Here are the examples of the python api requests.compat.urljoin taken from open source projects. By voting up you can indicate which examples are most useful and appropriate.
64 Examples
3
Example 1
Project: SickRage Source File: hd4free.py
def __init__(self):
TorrentProvider.__init__(self, "HD4Free")
self.url = 'https://hd4free.xyz'
self.urls = {'search': urljoin(self.url, '/searchapi.php')}
self.freeleech = None
self.username = None
self.api_key = None
self.minseed = None
self.minleech = None
self.cache = tvcache.TVCache(self, min_time=10) # Only poll HD4Free every 10 minutes max
3
Example 2
def _request(self, url, method="GET", **kwargs):
"""Send HTTP request to Beam."""
response = self.http_session.request(
method,
urljoin(self.PATH, url.lstrip('/')),
headers={"X-CSRF-Token": self.csrf_token},
**kwargs
)
if self.csrf_token is None:
self.csrf_token = response.headers.get("X-CSRF-Token")
elif response.status_code == 461:
self.csrf_token = response.headers.get("X-CSRF-Token")
self._request(url, method, **kwargs)
try:
return response.json()
except Exception:
return response.text
3
Example 3
Project: SickRage Source File: womble.py
def __init__(self):
NZBProvider.__init__(self, 'Womble\'s Index')
self.public = True
self.url = 'http://newshost.co.za'
self.urls = {'rss': urljoin(self.url, 'rss')}
self.supports_backlog = False
self.cache = WombleCache(self, min_time=20)
3
Example 4
@property
def short_link(self):
"""Return a short link to the submission.
The short link points to a page on the short_domain that redirects to
the main. http://redd.it/y3r8u is a short link for reddit.com.
"""
return urljoin(self.reddit_session.config.short_domain, self.id)
3
Example 5
@restrict_access(scope='vote')
def vote(self, direction=0):
"""Vote for the given item in the direction specified.
:returns: The json response from the server.
"""
url = self.reddit_session.config['vote']
data = {'id': self.fullname,
'dir': six.text_type(direction)}
if self.reddit_session.user:
# pylint: disable-msg=W0212
urls = [urljoin(self.reddit_session.user._url, 'disliked'),
urljoin(self.reddit_session.user._url, 'liked')]
# pylint: enable-msg=W0212
self.reddit_session.evict(urls)
return self.reddit_session.request_json(url, data=data)
3
Example 6
Project: SickRage Source File: shazbat.py
def __init__(self):
TorrentProvider.__init__(self, 'Shazbat.tv')
self.supports_backlog = False
self.passkey = None
self.options = None
self.cache = ShazbatCache(self, min_time=20)
self.url = 'http://www.shazbat.tv'
self.urls = {
'login': urljoin(self.url, 'login'),
'rss_recent': urljoin(self.url, 'rss/recent'),
# 'rss_queue': urljoin(self.url, 'rss/download_queue'),
# 'rss_followed': urljoin(self.url, 'rss/followed')
}
3
Example 7
Project: livepythonconsole-app-engine Source File: internal.py
def _raise_redirect_exceptions(response):
"""Return the new url or None if there are no redirects.
Raise exceptions if appropriate.
"""
if response.status_code != 302:
return None
new_url = urljoin(response.url, response.headers['location'])
if 'reddits/search?q=' in new_url: # Handle non-existent subreddit
subreddit = new_url.rsplit('=', 1)[1]
raise InvalidSubreddit('`{0}` is not a valid subreddit'
.format(subreddit))
elif 'random' not in response.url:
raise RedirectException(response.url, new_url)
return new_url
3
Example 8
Project: SickRage Source File: hdbits.py
def __init__(self):
TorrentProvider.__init__(self, "HDBits")
self.username = None
self.passkey = None
self.cache = HDBitsCache(self, min_time=15) # only poll HDBits every 15 minutes max
self.url = 'https://hdbits.org'
self.urls = {
'search': urljoin(self.url, '/api/torrents'),
'rss': urljoin(self.url, '/api/torrents'),
'download': urljoin(self.url, '/download.php')
}
3
Example 9
def _http_request(self, url, data=None, headers=None):
"""Get content for received url."""
if not url.startswith("http"):
url = requests.compat.urljoin(self._base_url, url)
headers = {} if headers is None else headers
headers.update(self._get_oauth_headers(url))
return super(MaaSHttpService, self)._http_request(url, data, headers)
3
Example 10
@restrict_access(scope=None, login=True)
def report(self):
"""Report this object to the moderators.
:returns: The json response from the server.
"""
url = self.reddit_session.config['report']
data = {'id': self.fullname}
response = self.reddit_session.request_json(url, data=data)
# Reported objects are automatically hidden as well
# pylint: disable-msg=W0212
urls = [self.reddit_session.config['user'],
urljoin(self.reddit_session.user._url, 'hidden')]
# pylint: enable-msg=W0212
self.reddit_session.evict(urls)
return response
3
Example 11
Project: SickRage Source File: binsearch.py
def __init__(self):
NZBProvider.__init__(self, 'BinSearch')
self.url = 'https://www.binsearch.info'
self.urls = {'rss': urljoin(self.url, 'rss.php')}
self.public = True
self.supports_backlog = False
self.cache = BinSearchCache(self, min_time=30) # only poll Binsearch every 30 minutes max
3
Example 12
Project: SickRage Source File: newpct.py
def __init__(self):
TorrentProvider.__init__(self, 'Newpct')
self.onlyspasearch = None
self.url = 'http://www.newpct.com'
self.urls = {'search': urljoin(self.url, 'index.php')}
self.cache = tvcache.TVCache(self, min_time=20)
3
Example 13
Project: SickRage Source File: kat.py
def __init__(self):
TorrentProvider.__init__(self, "KickAssTorrents")
self.public = True
self.confirmed = True
self.minseed = None
self.minleech = None
self.url = "https://kat.cr"
self.urls = {"search": urljoin(self.url, "%s/")}
self.custom_url = None
self.cache = tvcache.TVCache(self, search_params={"RSS": ["tv", "anime"]})
3
Example 14
def _get_section(subpath=''):
"""Return function to generate various non-subreddit listings."""
def _section(self, sort='new', time='all', *args, **kwargs):
"""Return a get_content generator for some RedditContentObject type.
:param sort: Specify the sort order of the results if applicable.
:param time: Specify the time-period to return submissions if
applicable.
The additional parameters are passed directly into
:meth:`.get_content`. Note: the `url` parameter cannot be altered.
"""
kwargs.setdefault('params', {})
kwargs['params'].setdefault('sort', sort)
kwargs['params'].setdefault('t', time)
url = urljoin(self._url, subpath) # pylint: disable-msg=W0212
return self.reddit_session.get_content(url, *args, **kwargs)
return _section
3
Example 15
def get_url(self, url, post_data=None, params=None, timeout=30, **kwargs): # pylint: disable=too-many-arguments
"""
returns='content' when trying access to torrent info (For calling torrent client). Previously we must parse
the URL to get torrent file
"""
trickery = kwargs.pop('returns', '')
if trickery == 'content':
kwargs['returns'] = 'text'
data = super(newpctProvider, self).get_url(url, post_data=post_data, params=params, timeout=timeout, **kwargs)
url = re.search(r'http://tumejorserie.com/descargar/.+\.torrent', data, re.DOTALL).group()
url = urljoin(self.url, url.rsplit('=', 1)[-1])
kwargs['returns'] = trickery
return super(newpctProvider, self).get_url(url, post_data=post_data, params=params,
timeout=timeout, **kwargs)
3
Example 16
def _get_auth(self):
"""
Makes a request to the token url to get a CSRF token
"""
try:
self.response = self.session.get(urljoin(self.url, 'token.html'), verify=False)
self.response.raise_for_status()
self.auth = re.findall("<div.*?>(.*?)</", self.response.text)[0]
except Exception as error:
sickbeard.helpers.handle_requests_exception(error)
self.auth = None
return self.auth
3
Example 17
@staticmethod
def convert_url(url, params):
# noinspection PyBroadException
try:
return urljoin(url, '{type}/{q}/{page}/{orderby}/{category}/'.format(**params)), {}
except Exception:
return url.replace('search', 's/'), params
3
Example 18
Project: SickRage Source File: sab.py
def getSabAccesMethod(host=None):
'''
Find out how we should connect to SAB
:param host: hostname where SAB lives
:param username: username to use
:param password: password to use
:param apikey: apikey to use
:return: (boolean, string) with True if method was successful
'''
params = {'mode': 'auth', 'output': 'json'}
url = urljoin(host, 'api')
data = helpers.getURL(url, params=params, session=session, returns='json', verify=False)
if not data:
return False, data
return _checkSabResponse(data)
3
Example 19
@restrict_access(scope=None, login=True)
def hide(self, unhide=False):
"""Hide object in the context of the logged in user.
:returns: The json response from the server.
"""
url = self.reddit_session.config['unhide' if unhide else 'hide']
data = {'id': self.fullname,
'executed': 'unhide' if unhide else 'hide'}
response = self.reddit_session.request_json(url, data=data)
# pylint: disable-msg=W0212
urls = [urljoin(self.reddit_session.user._url, 'hidden')]
# pylint: enable-msg=W0212
self.reddit_session.evict(urls)
return response
3
Example 20
def _http_request(self, url, data=None, headers=None):
"""Get content for received url."""
if not url.startswith("http"):
url = requests.compat.urljoin(self._base_url, url)
request_action = requests.get if not data else requests.post
if not data:
LOG.debug('Getting metadata from: %s', url)
else:
LOG.debug('Posting data to %s', url)
response = request_action(url=url, data=data, headers=headers,
verify=self._verify_https_request())
response.raise_for_status()
return response.content
3
Example 21
def __init__(self, reddit_session, json_dict):
super(Submission, self).__init__(reddit_session, json_dict)
self.permalink = urljoin(reddit_session.config['reddit_url'],
self.permalink)
self._comment_sort = None
self._comments_by_id = {}
self._comments = None
self._orphaned = {}
self._replaced_more = False
3
Example 22
Project: python-scrapinghub Source File: legacy.py
def _build_url(self, method, format):
"""Returns full url for given method and format"""
from requests.compat import urljoin
# TODO: verify method's format support
try:
base_path = self.API_METHODS[method]
except KeyError:
raise APIError("Unknown method: {0}".format(method))
else:
path = "{0}.{1}".format(base_path, format)
return urljoin(self.url, path)
3
Example 23
def __init__(self, host=None, username=None, password=None):
"""
Initializes the utorrent client class and sets the url, username, and password
"""
super(uTorrentAPI, self).__init__('uTorrent', host, username, password)
self.url = urljoin(self.host, 'gui/')
0
Example 24
Project: SickRage Source File: speedcd.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# http://speed.cd/browse.php?c49=1&c50=1&c52=1&c41=1&c55=1&c2=1&c30=1&freeleech=on&search=arrow&d=on
# Search Params
search_params = {
'c30': 1, # Anime
'c41': 1, # TV/Packs
'c49': 1, # TV/HD
'c50': 1, # TV/Sports
'c52': 1, # TV/B-Ray
'c55': 1, # TV/Kids
'search': '',
}
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def process_column_header(td):
result = ''
img = td.find('img')
if img:
result = img.get('alt')
if not result:
result = td.get_text(strip=True)
return result
if self.freeleech:
search_params['freeleech'] = 'on'
for mode in search_strings:
items = []
logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params['search'] = search_string
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('div', class_='boxContent')
torrent_table = torrent_table.find('table') if torrent_table else []
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('th')]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result('td')
title = cells[labels.index('Title')].find('a', class_='torrent').get_text()
download_url = urljoin(self.url, cells[labels.index('Download') - 1].a['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('Seeders') - 1].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers') - 1].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index('Size') - 1].get_text()
torrent_size = torrent_size[:-2] + ' ' + torrent_size[-2:]
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 25
Project: SickRage Source File: abnormal.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, 'ABNormal')
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
# URLs
self.url = 'https://abnormal.ws'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'torrents.php'),
}
# Proper Strings
self.proper_strings = ['PROPER']
# Cache
self.cache = tvcache.TVCache(self, min_time=30)
0
Example 26
Project: SickRage Source File: thepiratebay.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "ThePirateBay")
# Credentials
self.public = True
# Torrent Stats
self.minseed = None
self.minleech = None
self.confirmed = True
# URLs
self.url = "https://thepiratebay.se"
self.urls = {
"rss": [urljoin(self.url, "browse/208/0/4"), urljoin(self.url, "browse/205/0/4")],
"search": urljoin(self.url, "search"),
}
self.custom_url = None
# Proper Strings
# Cache
self.cache = tvcache.TVCache(self, min_time=30) # only poll ThePirateBay every 30 minutes max
self.magnet_regex = re.compile(r'magnet:\?xt=urn:btih:\w{32,40}(:?&dn=[\w. %+-]+)*(:?&tr=(:?tcp|https?|udp)[\w%. +-]+)*')
0
Example 27
Project: SickRage Source File: speedcd.py
def login(self):
if any(dict_from_cookiejar(self.session.cookies).values()):
return True
login_params = {
'username': self.username,
'password': self.password,
}
# Yay lets add another request to the process since they are unreasonable.
response = self.get_url(self.url, returns='text')
with BS4Parser(response, 'html5lib') as html:
form = html.find('form', id='loginform')
if form:
self.urls['login'] = urljoin(self.url, form['action'])
response = self.get_url(self.urls['login'], post_data=login_params, returns='text')
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
if re.search('Incorrect username or Password. Please try again.', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
return True
0
Example 28
Project: SickRage Source File: torrentbytes.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "TorrentBytes")
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
self.freeleech = False
# URLs
self.url = "https://www.torrentbytes.net"
self.urls = {
"login": urljoin(self.url, "takelogin.php"),
"search": urljoin(self.url, "browse.php")
}
# Proper Strings
self.proper_strings = ["PROPER", "REPACK"]
# Cache
self.cache = tvcache.TVCache(self)
0
Example 29
Project: SickRage Source File: abnormal.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# Search Params
search_params = {
'cat[]': ['TV|SD|VOSTFR', 'TV|HD|VOSTFR', 'TV|SD|VF', 'TV|HD|VF', 'TV|PACK|FR', 'TV|PACK|VOSTFR', 'TV|EMISSIONS', 'ANIME'],
# Both ASC and DESC are available for sort direction
'way': 'DESC'
}
# Units
units = ['O', 'KO', 'MO', 'GO', 'TO', 'PO']
for mode in search_strings:
items = []
logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {0}'.format
(search_string.decode('utf-8')), logger.DEBUG)
# Sorting: Available parameters: ReleaseName, Seeders, Leechers, Snatched, Size
search_params['order'] = ('Seeders', 'Time')[mode == 'RSS']
search_params['search'] = re.sub(r'[()]', '', search_string)
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find(class_='torrent_table')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log('Data returned from provider does not contain any torrents', logger.DEBUG)
continue
# Catégorie, Release, Date, DL, Size, C, S, L
labels = [label.get_text(strip=True) for label in torrent_rows[0]('td')]
# Skip column headers
for result in torrent_rows[1:]:
cells = result('td')
if len(cells) < len(labels):
continue
try:
title = cells[labels.index('Release')].get_text(strip=True)
download_url = urljoin(self.url, cells[labels.index('DL')].find('a', class_='tooltip')['href'])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index('S')].get_text(strip=True))
leechers = try_int(cells[labels.index('L')].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log('Discarding torrent because it doesn\'t meet the minimum seeders or leechers: {0} (S:{1} L:{2})'.format
(title, seeders, leechers), logger.DEBUG)
continue
size_index = labels.index('Size') if 'Size' in labels else labels.index('Taille')
torrent_size = cells[size_index].get_text()
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log('Found result: {0} with {1} seeders and {2} leechers'.format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 30
Project: SickRage Source File: iptorrents.py
def login(self):
cookie_dict = dict_from_cookiejar(self.session.cookies)
if cookie_dict.get('uid') and cookie_dict.get('pass'):
return True
if self.cookies:
success, status = self.add_cookies_from_ui()
if not success:
logger.log(status, logger.INFO)
return False
login_params = {'username': self.username,
'password': self.password,
'login': 'submit'}
login_url = self.urls['login']
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
return False
login_url = urljoin(self.custom_url, self.urls['login'].split(self.url)[1])
self.get_url(login_url, returns='text')
response = self.get_url(login_url, post_data=login_params, returns='text')
if not response:
logger.log(u"Unable to connect to provider", logger.WARNING)
return False
# Invalid username and password combination
if re.search('Invalid username and password combination', response):
logger.log(u"Invalid username or password. Check your settings", logger.WARNING)
return False
# You tried too often, please try again after 2 hours!
if re.search('You tried too often', response):
logger.log(u"You tried too often, please try again after 2 hours! Disable IPTorrents for at least 2 hours", logger.WARNING)
return False
# Captcha!
if re.search('Captcha verification failed.', response):
logger.log(u"Stupid captcha", logger.WARNING)
return False
return True
0
Example 31
Project: SickRage Source File: torrentbytes.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
if not self.login():
return results
search_params = {
"c41": 1, "c33": 1, "c38": 1, "c32": 1, "c37": 1
}
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != "RSS":
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params["search"] = search_string
data = self.get_url(self.urls["search"], params=search_params, returns="text")
if not data:
logger.log("No data returned from provider", logger.DEBUG)
continue
with BS4Parser(data, "html5lib") as html:
torrent_table = html.find("table", border="1")
torrent_rows = torrent_table("tr") if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
# "Type", "Name", Files", "Comm.", "Added", "TTL", "Size", "Snatched", "Seeders", "Leechers"
labels = [label.get_text(strip=True) for label in torrent_rows[0]("td")]
for result in torrent_rows[1:]:
try:
cells = result("td")
download_url = urljoin(self.url, cells[labels.index("Name")].find("a", href=re.compile(r"download.php\?id="))["href"])
title_element = cells[labels.index("Name")].find("a", href=re.compile(r"details.php\?id="))
title = title_element.get("title", "") or title_element.get_text(strip=True)
if not all([title, download_url]):
continue
if self.freeleech:
# Free leech torrents are marked with green [F L] in the title (i.e. <font color=green>[F L]</font>)
freeleech = cells[labels.index("Name")].find("font", color="green")
if not freeleech or freeleech.get_text(strip=True) != "[F\xa0L]":
continue
seeders = try_int(cells[labels.index("Seeders")].get_text(strip=True))
leechers = try_int(cells[labels.index("Leechers")].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
# Need size for failed downloads handling
torrent_size = cells[labels.index("Size")].get_text(strip=True)
size = convert_size(torrent_size) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError):
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 32
Project: SickRage Source File: thepiratebay.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
"""
205 = SD, 208 = HD, 200 = All Videos
https://pirateproxy.pl/s/?q=Game of Thrones&type=search&orderby=7&page=0&category=200
"""
search_params = {
"q": "",
"type": "search",
"orderby": 7,
"page": 0,
"category": 200
}
# Units
units = ["B", "KIB", "MIB", "GIB"]
def process_column_header(th):
text = ""
if th.a:
text = th.a.get_text(strip=True)
if not text:
text = th.get_text(strip=True)
return text
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_urls = (self.urls["search"], self.urls["rss"])[mode == "RSS"]
if not isinstance(search_urls, list):
search_urls = [search_urls]
for search_url in search_urls:
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
return results
search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
if mode != "RSS":
search_params["q"] = search_string
logger.log("Search string: {}".format
(search_string.decode("utf-8")), logger.DEBUG)
# Prevents a 302 redirect, since there is always a 301 from .se to the best mirror having an extra
# redirect is excessive on the provider and spams the debug log unnecessarily
search_url, search_params = self.convert_url(search_url, search_params)
data = self.get_url(search_url, params=search_params, returns="text")
else:
data = self.get_url(search_url, returns="text")
if not data:
logger.log("URL did not return data, maybe try a custom url, or a different one", logger.DEBUG)
continue
with BS4Parser(data, "html5lib") as html:
torrent_table = html.find("table", id="searchResult")
torrent_rows = torrent_table("tr") if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]("th")]
# Skip column headers
for result in torrent_rows[1:]:
try:
cells = result("td")
# Funky js on page messing up titles, this fixes that
title = result.find(class_="detLink")['title'].split('Details for ', 1)[-1]
download_url = result.find(title="Download this torrent using magnet")["href"] + self._custom_trackers
if not self.magnet_regex.match(download_url):
logger.log("Got an invalid magnet: {0}".format(download_url))
logger.log("Invalid ThePirateBay proxy please try another one", logger.DEBUG)
continue
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index("SE")].get_text(strip=True))
leechers = try_int(cells[labels.index("LE")].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
# Accept Torrent only from Good People for every Episode Search
if self.confirmed and not result.find(alt=re.compile(r"VIP|Trusted")):
if mode != "RSS":
logger.log("Found result: {0} but that doesn't seem like a trusted result so I'm ignoring it".format(title), logger.DEBUG)
continue
# Convert size after all possible skip scenarios
torrent_size = re.sub(r".*Size ([\d.]+).+([KMGT]iB).*", r"\1 \2", result.find(class_="detDesc").get_text(strip=True))
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 33
Project: SickRage Source File: ilovetorrents.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "ILoveTorrents")
# URLs
self.url = 'https://www.ilovetorrents.me/'
self.urls = {
'login': urljoin(self.url, "takelogin.php"),
'detail': urljoin(self.url, "details.php?id=%s"),
'search': urljoin(self.url, "browse.php"),
'download': urljoin(self.url, "%s"),
}
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
# Proper Strings
self.proper_strings = ["PROPER", "REPACK", "REAL"]
# Cache
self.cache = tvcache.TVCache(self)
0
Example 34
Project: SickRage Source File: alpharatio.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "AlphaRatio")
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
# URLs
self.url = "http://alpharatio.cc"
self.urls = {
"login": urljoin(self.url, "login.php"),
"search": urljoin(self.url, "torrents.php"),
}
# Proper Strings
self.proper_strings = ["PROPER", "REPACK"]
# Cache
self.cache = tvcache.TVCache(self)
0
Example 35
Project: SickRage Source File: iptorrents.py
def search(self, search_params, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches, too-many-statements
results = []
if not self.login():
return results
freeleech = '&free=on' if self.freeleech else ''
for mode in search_params:
items = []
logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_params[mode]:
if mode != 'RSS':
logger.log(u"Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
# URL with 50 tv-show results, or max 150 if adjusted in IPTorrents profile
search_url = self.urls['search'] % (self.categories, freeleech, search_string)
search_url += ';o=seeders' if mode != 'RSS' else ''
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
return results
search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
data = self.get_url(search_url, returns='text')
if not data:
continue
try:
data = re.sub(r'(?im)<button.+?</button>', '', data, 0)
with BS4Parser(data, 'html5lib') as html:
if not html:
logger.log(u"No data returned from provider", logger.DEBUG)
continue
if html.find(text='No Torrents Found!'):
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
torrent_table = html.find('table', id='torrents')
torrents = torrent_table('tr') if torrent_table else []
# Continue only if one Release is found
if len(torrents) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for result in torrents[1:]:
try:
title = result('td')[1].find('a').text
download_url = urljoin(search_url, result('td')[3].find('a')['href'])
seeders = int(result.find('td', class_='ac t_seeders').text)
leechers = int(result.find('td', class_='ac t_leechers').text)
torrent_size = result('td')[5].text
size = convert_size(torrent_size) or -1
except (AttributeError, TypeError, KeyError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except Exception as e:
logger.log(u"Failed parsing provider. Error: {0!r}".format(ex(e)), logger.ERROR)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 36
Project: SickRage Source File: kat.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals, too-many-statements
results = []
anime = (self.show and self.show.anime) or (ep_obj and ep_obj.show and ep_obj.show.anime) or False
search_params = {
"q": "",
"field": "seeders",
"sorder": "desc",
"rss": 1,
"category": ("tv", "anime")[anime]
}
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_params["q"] = search_string if mode != "RSS" else ""
search_params["field"] = "seeders" if mode != "RSS" else "time_add"
if mode != "RSS":
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_url = self.urls["search"] % ("usearch" if mode != "RSS" else search_string)
if self.custom_url:
if not validators.url(self.custom_url):
logger.log("Invalid custom url: {0}".format(self.custom_url), logger.WARNING)
return results
search_url = urljoin(self.custom_url, search_url.split(self.url)[1])
data = self.get_url(search_url, params=search_params, returns="text")
if not data:
logger.log("URL did not return results/data, if the results are on the site maybe try a custom url, or a different one", logger.DEBUG)
continue
if not data.startswith("<?xml"):
logger.log("Expected xml but got something else, is your mirror failing?", logger.INFO)
continue
with BS4Parser(data, "html5lib") as html:
for item in html("item"):
try:
title = item.title.get_text(strip=True)
# Use the torcache link kat provides,
# unless it is not torcache or we are not using blackhole
# because we want to use magnets if connecting direct to client
# so that proxies work.
download_url = item.enclosure["url"]
if sickbeard.TORRENT_METHOD != "blackhole" or "torcache" not in download_url:
download_url = item.find("torrent:magneturi").next.replace("CDATA", "").strip("[!]") + self._custom_trackers
if not (title and download_url):
continue
seeders = try_int(item.find("torrent:seeds").get_text(strip=True))
leechers = try_int(item.find("torrent:peers").get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
verified = bool(try_int(item.find("torrent:verified").get_text(strip=True)))
if self.confirmed and not verified:
if mode != "RSS":
logger.log("Found result " + title + " but that doesn't seem like a verified result so I'm ignoring it", logger.DEBUG)
continue
torrent_size = item.find("torrent:contentlength").get_text(strip=True)
size = convert_size(torrent_size) or -1
info_hash = item.find("torrent:infohash").get_text(strip=True)
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': info_hash}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 37
def __init__(self, host=None, username=None, password=None):
"""
Initializes the DownloadStation client
params: :host: Url to the Download Station API
:username: Username to use for authentication
:password: Password to use for authentication
"""
super(DownloadStationAPI, self).__init__('DownloadStation', host, username, password)
self.urls = {
'login': urljoin(self.host, 'webapi/auth.cgi'),
'task': urljoin(self.host, 'webapi/DownloadStation/task.cgi'),
}
self.url = self.urls['task']
generic_errors = {
100: 'Unknown error',
101: 'Invalid parameter',
102: 'The requested API does not exist',
103: 'The requested method does not exist',
104: 'The requested version does not support the functionality',
105: 'The logged in session does not have permission',
106: 'Session timeout',
107: 'Session interrupted by duplicate login',
}
self.error_map = {
'create': {
400: 'File upload failed',
401: 'Max number of tasks reached',
402: 'Destination denied',
403: 'Destination does not exist',
404: 'Invalid task id',
405: 'Invalid task action',
406: 'No default destination',
407: 'Set destination failed',
408: 'File does not exist'
},
'login': {
400: 'No such account or incorrect password',
401: 'Account disabled',
402: 'Permission denied',
403: '2-step verification code required',
404: 'Failed to authenticate 2-step verification code'
}
}
for api_method in self.error_map:
self.error_map[api_method].update(generic_errors)
self._task_post_data = {
'api': 'SYNO.DownloadStation.Task',
'version': '1',
'method': 'create',
'session': 'DownloadStation',
}
0
Example 38
Project: SickRage Source File: pushbullet.py
def get_devices(self, pushbullet_api):
logger.log('Testing Pushbullet authentication and retrieving the device list.', logger.DEBUG)
headers = {'Access-Token': pushbullet_api}
return helpers.getURL(urljoin(self.url, 'devices'), session=self.session, headers=headers, returns='text') or {}
0
Example 39
Project: SickRage Source File: alpharatio.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# Search Params
search_params = {
"searchstr": "",
"filter_cat[1]": 1,
"filter_cat[2]": 1,
"filter_cat[3]": 1,
"filter_cat[4]": 1,
"filter_cat[5]": 1
}
# Units
units = ["B", "KB", "MB", "GB", "TB", "PB"]
def process_column_header(td):
result = ""
if td.a and td.a.img:
result = td.a.img.get("title", td.a.get_text(strip=True))
if not result:
result = td.get_text(strip=True)
return result
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != "RSS":
logger.log("Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params["searchstr"] = search_string
search_url = self.urls["search"]
data = self.get_url(search_url, params=search_params, returns="text")
if not data:
logger.log("No data returned from provider", logger.DEBUG)
continue
with BS4Parser(data, "html5lib") as html:
torrent_table = html.find("table", id="torrent_table")
torrent_rows = torrent_table("tr") if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
# "", "", "Name /Year", "Files", "Time", "Size", "Snatches", "Seeders", "Leechers"
labels = [process_column_header(label) for label in torrent_rows[0]("td")]
# Skip column headers
for result in torrent_rows[1:]:
cells = result("td")
if len(cells) < len(labels):
continue
try:
title = cells[labels.index("Name /Year")].find("a", dir="ltr").get_text(strip=True)
download_url = urljoin(self.url, cells[labels.index("Name /Year")].find("a", title="Download")["href"])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index("Seeders")].get_text(strip=True))
leechers = try_int(cells[labels.index("Leechers")].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the"
" minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index("Size")].get_text(strip=True)
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 40
Project: SickRage Source File: morethantv.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "MoreThanTV")
# Credentials
self.username = None
self.password = None
self._uid = None
self._hash = None
# Torrent Stats
self.minseed = None
self.minleech = None
self.freeleech = None
# URLs
self.url = 'https://www.morethan.tv/'
self.urls = {
'login': urljoin(self.url, 'login.php'),
'search': urljoin(self.url, 'torrents.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK']
# Cache
self.cache = tvcache.TVCache(self)
0
Example 41
Project: SickRage Source File: morethantv.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# Search Params
search_params = {
'tags_type': 1,
'order_by': 'time',
'order_way': 'desc',
'action': 'basic',
'searchsubmit': 1,
'searchstr': ''
}
# Units
units = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
def process_column_header(td):
result = ''
if td.a and td.a.img:
result = td.a.img.get('title', td.a.get_text(strip=True))
if not result:
result = td.get_text(strip=True)
return result
for mode in search_strings:
items = []
logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_params['searchstr'] = search_string
data = self.get_url(self.urls['search'], params=search_params, returns='text')
if not data:
logger.log(u"No data returned from provider", logger.DEBUG)
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', class_='torrent_table')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
labels = [process_column_header(label) for label in torrent_rows[0]('td')]
# Skip column headers
for result in torrent_rows[1:]:
try:
# skip if torrent has been nuked due to poor quality
if result.find('img', alt='Nuked'):
continue
title = result.find('a', title='View torrent').get_text(strip=True)
download_url = urljoin(self.url, result.find('span', title='Download').parent['href'])
if not all([title, download_url]):
continue
cells = result('td')
seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the"
u" minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index('Size')].get_text(strip=True)
size = convert_size(torrent_size, units=units) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 42
Project: SickRage Source File: pushbullet.py
def get_channels(self, pushbullet_api):
"""Fetches the list of channels a given access key has permissions to push to"""
logger.log('Testing Pushbullet authentication and retrieving the device list.', logger.DEBUG)
headers = {'Access-Token': pushbullet_api}
return helpers.getURL(urljoin(self.url, 'channels'), session=self.session, headers=headers, returns='text') or {}
0
Example 43
Project: SickRage Source File: bitcannon.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-branches, too-many-locals
results = []
url = "http://localhost:3000/"
if self.custom_url:
if not validators.url(self.custom_url, require_tld=False):
logger.log("Invalid custom url set, please check your settings", logger.WARNING)
return results
url = self.custom_url
search_params = {}
anime = ep_obj and ep_obj.show and ep_obj.show.anime
search_params["category"] = ("tv", "anime")[bool(anime)]
if self.api_key:
search_params["apiKey"] = self.api_key
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
search_params["q"] = search_string
if mode != "RSS":
logger.log("Search string: {0}".format
(search_string.decode('utf-8')), logger.DEBUG)
search_url = urljoin(url, "api/search")
parsed_json = self.get_url(search_url, params=search_params, returns="json")
if not parsed_json:
logger.log("No data returned from provider", logger.DEBUG)
continue
if not self._check_auth_from_data(parsed_json):
return results
for result in parsed_json.pop("torrents", {}):
try:
title = result.pop("title", "")
info_hash = result.pop("infoHash", "")
download_url = "magnet:?xt=urn:btih:" + info_hash
if not all([title, download_url, info_hash]):
continue
swarm = result.pop("swarm", None)
if swarm:
seeders = try_int(swarm.pop("seeders", 0))
leechers = try_int(swarm.pop("leechers", 0))
else:
seeders = leechers = 0
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the "
"minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
size = convert_size(result.pop("size", -1)) or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except (AttributeError, TypeError, KeyError, ValueError):
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 44
Project: SickRage Source File: newznab.py
def get_newznab_categories(self, just_caps=False):
"""
Uses the newznab provider url and apikey to get the capabilities.
Makes use of the default newznab caps param. e.a. http://yournewznab/api?t=caps&apikey=skdfiw7823sdkdsfjsfk
Returns a tuple with (succes or not, array with dicts [{'id': '5070', 'name': 'Anime'},
{'id': '5080', 'name': 'Docuementary'}, {'id': '5020', 'name': 'Foreign'}...etc}], error message)
"""
return_categories = []
if not self._check_auth():
return False, return_categories, 'Provider requires auth and your key is not set'
url_params = {'t': 'caps'}
if self.needs_auth and self.key:
url_params['apikey'] = self.key
data = self.get_url(urljoin(self.url, 'api'), params=url_params, returns='text')
if not data:
error_string = 'Error getting caps xml for [{0}]'.format(self.name)
logger.log(error_string, logger.WARNING)
return False, return_categories, error_string
with BS4Parser(data, 'html5lib') as html:
if not html.find('categories'):
error_string = 'Error parsing caps xml for [{0}]'.format(self.name)
logger.log(error_string, logger.DEBUG)
return False, return_categories, error_string
self.set_caps(html.find('searching'))
if just_caps:
return True, return_categories, 'Just checking caps!'
for category in html('category'):
if 'TV' in category.get('name', '') and category.get('id', ''):
return_categories.append({'id': category['id'], 'name': category['name']})
for subcat in category('subcat'):
if subcat.get('name', '') and subcat.get('id', ''):
return_categories.append({'id': subcat['id'], 'name': subcat['name']})
return True, return_categories, ''
error_string = 'Error getting xml for [{0}]'.format(self.name)
logger.log(error_string, logger.WARNING)
return False, return_categories, error_string
0
Example 45
Project: SickRage Source File: newznab.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-arguments, too-many-locals, too-many-branches, too-many-statements
"""
Searches indexer using the params in search_strings, either for latest releases, or a string/id search
Returns: list of results in dict form
"""
results = []
if not self._check_auth():
return results
# gingadaddy has no caps.
if not self.caps and 'gingadaddy' not in self.url:
self.get_newznab_categories(just_caps=True)
if not self.caps and 'gingadaddy' not in self.url:
return results
for mode in search_strings:
torznab = False
search_params = {
't': 'tvsearch' if 'tvdbid' in str(self.cap_tv_search) else 'search',
'limit': 100,
'offset': 0,
'cat': self.catIDs.strip(', ') or '5030,5040',
'maxage': sickbeard.USENET_RETENTION
}
if self.needs_auth and self.key:
search_params['apikey'] = self.key
if mode != 'RSS':
if search_params['t'] == 'tvsearch':
search_params['tvdbid'] = ep_obj.show.indexerid
if ep_obj.show.air_by_date or ep_obj.show.sports:
date_str = str(ep_obj.airdate)
search_params['season'] = date_str.partition('-')[0]
search_params['ep'] = date_str.partition('-')[2].replace('-', '/')
else:
search_params['season'] = ep_obj.scene_season
search_params['ep'] = ep_obj.scene_episode
if mode == 'Season':
search_params.pop('ep', '')
items = []
logger.log('Search Mode: {0}'.format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log('Search string: {0}'.format
(search_string.decode('utf-8')), logger.DEBUG)
if search_params['t'] != 'tvsearch':
search_params['q'] = search_string
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
data = self.get_url(urljoin(self.url, 'api'), params=search_params, returns='text')
if not data:
break
with BS4Parser(data, 'html5lib') as html:
if not self._checkAuthFromData(html):
break
try:
torznab = 'xmlns:torznab' in html.rss.attrs
except AttributeError:
torznab = False
for item in html('item'):
try:
title = item.title.get_text(strip=True)
download_url = None
if item.link:
if validators.url(item.link.get_text(strip=True), require_tld=False):
download_url = item.link.get_text(strip=True)
elif validators.url(item.link.next.strip(), require_tld=False):
download_url = item.link.next.strip()
if not download_url and item.enclosure:
if validators.url(item.enclosure.get('url', '').strip(), require_tld=False):
download_url = item.enclosure.get('url', '').strip()
if not (title and download_url):
continue
seeders = leechers = None
if 'gingadaddy' in self.url:
size_regex = re.search(r'\d*.?\d* [KMGT]B', str(item.description))
item_size = size_regex.group() if size_regex else -1
else:
item_size = item.size.get_text(strip=True) if item.size else -1
for attr in item('newznab:attr') + item('torznab:attr'):
item_size = attr['value'] if attr['name'] == 'size' else item_size
seeders = try_int(attr['value']) if attr['name'] == 'seeders' else seeders
leechers = try_int(attr['value']) if attr['name'] == 'peers' else leechers
if not item_size or (torznab and (seeders is None or leechers is None)):
continue
size = convert_size(item_size) or -1
result = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers}
items.append(result)
except StandardError:
continue
# Since we arent using the search string,
# break out of the search string loop
if 'tvdbid' in search_params:
break
if torznab:
results.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 46
Project: SickRage Source File: filelist.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "FileList")
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
# URLs
self.url = "http://filelist.ro"
self.urls = {
"login": urljoin(self.url, "takelogin.php"),
"search": urljoin(self.url, "browse.php"),
}
# Proper Strings
self.proper_strings = ["PROPER", "REPACK"]
# Cache
self.cache = tvcache.TVCache(self)
0
Example 47
Project: SickRage Source File: scc.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals,too-many-branches, too-many-statements
results = []
if not self.login():
return results
for mode in search_strings:
items = []
if mode != 'RSS':
logger.log(u"Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != 'RSS':
logger.log(u"Search string: {0}".format
(search_string.decode("utf-8")), logger.DEBUG)
search_url = self.urls['search'] % (quote(search_string), self.categories[mode])
try:
data = self.get_url(search_url, returns='text')
time.sleep(cpu_presets[sickbeard.CPU_PRESET])
except Exception as e:
logger.log(u"Unable to fetch data. Error: {0}".format(repr(e)), logger.WARNING)
if not data:
continue
with BS4Parser(data, 'html5lib') as html:
torrent_table = html.find('table', id='torrents-table')
torrent_rows = torrent_table('tr') if torrent_table else []
# Continue only if at least one Release is found
if len(torrent_rows) < 2:
logger.log(u"Data returned from provider does not contain any torrents", logger.DEBUG)
continue
for result in torrent_table('tr')[1:]:
try:
link = result.find('td', class_='ttr_name').find('a')
url = result.find('td', class_='td_dl').find('a')
title = link.string
if re.search(r'\.\.\.', title):
data = self.get_url(urljoin(self.url, link['href']), returns='text')
if data:
with BS4Parser(data) as details_html:
title = re.search('(?<=").+(?<!")', details_html.title.string).group(0)
download_url = self.urls['download'] % url['href']
seeders = int(result.find('td', class_='ttr_seeders').string)
leechers = int(result.find('td', class_='ttr_leechers').string)
torrent_size = result.find('td', class_='ttr_size').contents[0]
size = convert_size(torrent_size) or -1
except (AttributeError, TypeError):
continue
if not all([title, download_url]):
continue
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != 'RSS':
logger.log(u"Discarding torrent because it doesn't meet the minimum seeders or leechers: {0} (S:{1} L:{2})".format(title, seeders, leechers), logger.DEBUG)
continue
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': ''}
if mode != 'RSS':
logger.log(u"Found result: {0} with {1} seeders and {2} leechers".format(title, seeders, leechers), logger.DEBUG)
items.append(item)
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 48
Project: SickRage Source File: pushbullet.py
def _sendPushbullet( # pylint: disable=too-many-arguments
self, pushbullet_api=None, pushbullet_device=None, pushbullet_channel=None, event=None, message=None, link=None, force=False):
if not (sickbeard.USE_PUSHBULLET or force):
return False
pushbullet_api = pushbullet_api or sickbeard.PUSHBULLET_API
pushbullet_device = pushbullet_device or sickbeard.PUSHBULLET_DEVICE
pushbullet_channel = pushbullet_channel or sickbeard.PUSHBULLET_CHANNEL
logger.log('Pushbullet event: {0!r}'.format(event), logger.DEBUG)
logger.log('Pushbullet message: {0!r}'.format(message), logger.DEBUG)
logger.log('Pushbullet api: {0!r}'.format(pushbullet_api), logger.DEBUG)
logger.log('Pushbullet devices: {0!r}'.format(pushbullet_device), logger.DEBUG)
post_data = {
'title': event,
'body': message,
'type': 'link' if link else 'note'
}
if link:
post_data['url'] = link
headers = {'Access-Token': pushbullet_api}
if pushbullet_device:
post_data['device_iden'] = pushbullet_device
elif pushbullet_channel:
post_data['channel_tag'] = pushbullet_channel
response = helpers.getURL(urljoin(self.url, 'pushes'), session=self.session, post_data=post_data, headers=headers, returns='json') or {}
failed = response.pop('error', {})
if failed:
logger.log('Pushbullet notification failed: {0}'.format(failed.pop('message')), logger.WARNING)
else:
logger.log('Pushbullet notification sent.', logger.DEBUG)
return False if failed else True
0
Example 49
Project: SickRage Source File: filelist.py
def search(self, search_strings, age=0, ep_obj=None): # pylint: disable=too-many-locals, too-many-branches
results = []
if not self.login():
return results
# Search Params
search_params = {
"search": "",
"cat": 0
}
# Units
units = ["B", "KB", "MB", "GB", "TB", "PB"]
def process_column_header(td):
result = ""
if td.a and td.a.img:
result = td.a.img.get("title", td.a.get_text(strip=True))
if not result:
result = td.get_text(strip=True)
return result
for mode in search_strings:
items = []
logger.log("Search Mode: {0}".format(mode), logger.DEBUG)
for search_string in search_strings[mode]:
if mode != "RSS":
logger.log("Search string: {search}".format
(search=search_string.decode("utf-8")), logger.DEBUG)
search_params["search"] = search_string
search_url = self.urls["search"]
data = self.get_url(search_url, params=search_params, returns="text")
if not data:
logger.log("No data returned from provider", logger.DEBUG)
continue
with BS4Parser(data, "html5lib") as html:
torrent_rows = html.find_all("div", class_="torrentrow")
# Continue only if at least one Release is found
if not torrent_rows:
logger.log("Data returned from provider does not contain any torrents", logger.DEBUG)
continue
# "Type", "Name", "Download", "Files", "Comments", "Added", "Size", "Snatched", "Seeders", "Leechers", "Upped by"
labels = []
columns = html.find_all("div", class_="colhead")
for column in columns:
lbl = column.get_text(strip=True)
if lbl:
labels.append(str(lbl))
else:
lbl = column.find("img")
if lbl:
if lbl.has_attr("alt"):
lbl = lbl['alt']
labels.append(str(lbl))
else:
lbl = "Download"
labels.append(lbl)
# Skip column headers
for result in torrent_rows:
cells = result.find_all("div", class_="torrenttable")
if len(cells) < len(labels):
continue
try:
title = cells[labels.index("Name")].find("a").find("b").get_text(strip=True)
download_url = urljoin(self.url, cells[labels.index("Download")].find("a")["href"])
if not all([title, download_url]):
continue
seeders = try_int(cells[labels.index("Seeders")].find("span").get_text(strip=True))
leechers = try_int(cells[labels.index("Leechers")].find("span").get_text(strip=True))
# Filter unseeded torrent
if seeders < self.minseed or leechers < self.minleech:
if mode != "RSS":
logger.log("Discarding torrent because it doesn't meet the"
" minimum seeders or leechers: {0} (S:{1} L:{2})".format
(title, seeders, leechers), logger.DEBUG)
continue
torrent_size = cells[labels.index("Size")].find("span").get_text(strip=True)
size = convert_size(torrent_size, units=units, sep='') or -1
item = {'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'hash': None}
if mode != "RSS":
logger.log("Found result: {0} with {1} seeders and {2} leechers".format
(title, seeders, leechers), logger.DEBUG)
items.append(item)
except StandardError:
continue
# For each search mode sort all the items by seeders if available
items.sort(key=lambda d: try_int(d.get('seeders', 0)), reverse=True)
results += items
return results
0
Example 50
Project: SickRage Source File: speedcd.py
def __init__(self):
# Provider Init
TorrentProvider.__init__(self, "Speedcd")
# Credentials
self.username = None
self.password = None
# Torrent Stats
self.minseed = None
self.minleech = None
self.freeleech = False
# URLs
self.url = 'https://speed.cd'
self.urls = {
'login': urljoin(self.url, 'takeElogin.php'),
'search': urljoin(self.url, 'browse.php'),
}
# Proper Strings
self.proper_strings = ['PROPER', 'REPACK']
# Cache
self.cache = tvcache.TVCache(self)