1
0
mirror of https://github.com/moparisthebest/SickRage synced 2024-08-13 16:53:54 -04:00

Added code to perform disposal of bs4 class when finished as well as perform garbage collection afterwards, this fixed a memory leak issue SR was experiancing during backlog/manual/failed searches.

This commit is contained in:
echel0n 2014-07-21 15:16:04 -07:00
parent f340f9b073
commit 4a29476415
12 changed files with 83 additions and 29 deletions

View File

@ -21,6 +21,7 @@ import traceback
import datetime
import urlparse
import time
import gc
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
@ -174,7 +175,11 @@ class BitSoupProvider(generic.TorrentProvider):
torrent_table = html.find('table', attrs={'class': 'koptekst'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",

View File

@ -21,6 +21,7 @@ import traceback
import datetime
import urlparse
import time
import gc
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
@ -199,6 +200,11 @@ class FreshOnTVProvider(generic.TorrentProvider):
torrent_table = html.find('table', attrs={'class': 'frame'})
torrent_rows = torrent_table.findChildren('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",

View File

@ -22,6 +22,7 @@ import re
import traceback
import datetime
import urlparse
import gc
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
@ -203,6 +204,10 @@ class HDTorrentsProvider(generic.TorrentProvider):
#Get first entry in table
entries = html.find_all('td', attrs={'align': 'center'})
# cleanup memory
html.decompose()
gc.collect()
if not entries:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",
logger.DEBUG)

View File

@ -21,6 +21,7 @@ import re
import traceback
import datetime
import urlparse
import gc
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
@ -182,6 +183,10 @@ class IPTorrentsProvider(generic.TorrentProvider):
torrent_table = html.find('table', attrs={'class': 'torrents'})
torrents = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrents) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",

View File

@ -19,7 +19,7 @@
from __future__ import with_statement
import time
import gc
import sys
import os
import traceback
@ -120,8 +120,12 @@ class KATProvider(generic.TorrentProvider):
return None
try:
soup = BeautifulSoup(data, features=["html5lib", "permissive"])
file_table = soup.find('table', attrs={'class': 'torrentFileList'})
html = BeautifulSoup(data, features=["html5lib", "permissive"])
file_table = html.find('table', attrs={'class': 'torrentFileList'})
# cleanup memory
html.decompose()
gc.collect()
if not file_table:
return None
@ -248,11 +252,15 @@ class KATProvider(generic.TorrentProvider):
continue
try:
soup = BeautifulSoup(html, features=["html5lib", "permissive"])
html = BeautifulSoup(html, features=["html5lib", "permissive"])
torrent_table = soup.find('table', attrs={'class': 'data'})
torrent_table = html.find('table', attrs={'class': 'data'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The data returned from " + self.name + " does not contain any torrents",
@ -284,7 +292,7 @@ class KATProvider(generic.TorrentProvider):
logger.DEBUG)
continue
#Check number video files = episode in season and find the real Quality for full season torrent analyzing files in torrent
#Check number video files = episode in season and find the real Quality for full season torrent analyzing files in torrent
if mode == 'Season' and search_mode == 'sponly':
ep_number = int(epcount / len(set(allPossibleShowNames(self.show))))
title = self._find_season_quality(title, link, ep_number)

View File

@ -25,6 +25,7 @@ import traceback
import urllib, urlparse
import re
import datetime
import gc
import sickbeard
import generic
@ -152,11 +153,15 @@ class PublicHDProvider(generic.TorrentProvider):
html = os.linesep.join([s for s in html.splitlines() if not optreg.search(s)])
try:
soup = BeautifulSoup(html, features=["html5lib", "permissive"])
html = BeautifulSoup(html, features=["html5lib", "permissive"])
torrent_table = soup.find('table', attrs={'id': 'torrbg'})
torrent_table = html.find('table', attrs={'id': 'torrbg'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",

View File

@ -22,6 +22,7 @@ import re
import traceback
import datetime
import urlparse
import gc
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
@ -203,6 +204,10 @@ class SCCProvider(generic.TorrentProvider):
torrent_table = html.find('table', attrs={'id': 'torrents-table'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if at least one Release is found
if len(torrent_rows) < 2:
if html.title:
@ -222,10 +227,14 @@ class SCCProvider(generic.TorrentProvider):
url = all_urls[1]
else:
url = all_urls[0]
title = link.string
if re.search('\.\.\.', title):
details_html = BeautifulSoup(self.getURL(self.url + "/" + link['href']))
title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
details_html.decompose()
gc.collect()
download_url = self.urls['download'] % url['href']
id = int(link['href'].replace('details?id=', ''))
seeders = int(result.find('td', attrs={'class': 'ttr_seeders'}).string)

View File

@ -21,6 +21,7 @@ import traceback
import datetime
import urlparse
import time
import gc
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
@ -175,6 +176,10 @@ class TorrentBytesProvider(generic.TorrentProvider):
torrent_table = html.find('table', attrs={'border': '1'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",

View File

@ -21,6 +21,7 @@ import traceback
import datetime
import urlparse
import time
import gc
import sickbeard
import generic
from sickbeard.common import Quality, cpu_presets
@ -178,6 +179,10 @@ class TorrentLeechProvider(generic.TorrentProvider):
torrent_table = html.find('table', attrs={'id': 'torrenttable'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrent_rows) < 2:
logger.log(u"The Data returned from " + self.name + " do not contains any torrent",

View File

@ -441,7 +441,6 @@ def get_xem_absolute_numbering_for_show(indexer_id, indexer):
xem_refresh(indexer_id, indexer)
result = {}
myDB = db.DBConnection()
rows = myDB.select(

View File

@ -227,11 +227,6 @@ class TVShow(object):
def getEpisode(self, season=None, episode=None, file=None, noCreate=False, absolute_number=None, forceUpdate=False):
# Load XEM data to DB for show
sickbeard.scene_numbering.xem_refresh(self.indexerid, self.indexer, force=forceUpdate)
ep = None
# if we get an anime get the real season and episode
if self.is_anime and absolute_number and not season and not episode:
myDB = db.DBConnection()
@ -269,21 +264,23 @@ class TVShow(object):
else:
ep = TVEpisode(self, season, episode)
# get scene absolute numbering
ep.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(self.indexerid,
self.indexer,
ep.absolute_number)
# get scene season and episode numbering
ep.scene_season, ep.scene_episode = sickbeard.scene_numbering.get_scene_numbering(self.indexerid,
self.indexer,
season, episode)
if ep != None:
# Load XEM data to DB for show
sickbeard.scene_numbering.xem_refresh(self.indexerid, self.indexer, force=forceUpdate)
# get scene absolute numbering
ep.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(self.indexerid,
self.indexer,
ep.absolute_number)
# get scene season and episode numbering
ep.scene_season, ep.scene_episode = sickbeard.scene_numbering.get_scene_numbering(self.indexerid,
self.indexer,
season, episode)
self.episodes[season][episode] = ep
epObj = self.episodes[season][episode]
return epObj
return self.episodes[season][episode]
def should_update(self, update_date=datetime.date.today()):

View File

@ -22,6 +22,7 @@ from __future__ import with_statement
import unittest
import sys, os.path
import urlparse
import gc
sys.path.append(os.path.abspath('..'))
sys.path.append(os.path.abspath('../lib'))
@ -40,11 +41,15 @@ class TorrentBasicTests(test.SickbeardTestDBCase):
if not html:
return
soup = BeautifulSoup(html, features=["html5lib", "permissive"])
html = BeautifulSoup(html, features=["html5lib", "permissive"])
torrent_table = soup.find('table', attrs={'class': 'data'})
torrent_table = html.find('table', attrs={'class': 'data'})
torrent_rows = torrent_table.find_all('tr') if torrent_table else []
# cleanup memory
html.decompose()
gc.collect()
#Continue only if one Release is found
if len(torrent_rows) < 2:
print(u"The data returned does not contain any torrents")