Drop support for Python2. Resolves #84 (#144)

This commit is contained in:
Diego Heras 2020-08-18 20:41:32 +02:00 committed by GitHub
parent e0303cff4e
commit 4897c77738
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 10 additions and 1293 deletions

View File

@ -1,24 +1,23 @@
language: python
dist: trusty
dist: bionic
sudo: false
cache: pip
matrix:
include:
- python: 2.7
env: NOVA_DIR=nova
- python: 3.4
env: NOVA_DIR=nova3
- python: 3.5
- python: 3.6
- python: 3.7
- python: 3.8
- python: nightly
env: NOVA_DIR=nova3
install:
- pip install pycodestyle pyflakes
script:
- cd "$TRAVIS_BUILD_DIR/$NOVA_DIR/engines"
- cd "$TRAVIS_BUILD_DIR/nova3/engines"
- python -m compileall *.py
- pyflakes *.py
# skipping E265, fixing it will break plugin usage on older qbt instances (< v4.1.2)

View File

@ -3,9 +3,11 @@ Search Plugins
[![TravisCI Status](https://travis-ci.org/qbittorrent/search-plugins.svg?branch=master)](https://travis-ci.org/qbittorrent/search-plugins)
This repository contains search plugins for the search feature in qBittorrent.
This repository contains search plugins for the search feature in [qBittorrent](https://github.com/qbittorrent/qBittorrent).
:warning: We are going to remove support for Python2 soon. Please upgrade to Python3 to be prepared and get a better experience.
:warning: We removed support for Python 2. Please, upgrade to Python 3 to continue using the search function.
Jackett search plugin is enabled by default but you have to install an external program to make it work. You can disable the Jackett search plugin or [install Jackett](https://github.com/qbittorrent/search-plugins/wiki/How-to-configure-Jackett-plugin).
Most probably, you want to head over to the [wiki](https://github.com/qbittorrent/search-plugins/wiki):
* [List of unofficial search plugins](https://github.com/qbittorrent/search-plugins/wiki/Unofficial-search-plugins)

View File

@ -1,79 +0,0 @@
#VERSION: 1.10
# AUTHORS: nindogo
# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
try:
# python3
from html.parser import HTMLParser
except ImportError:
# python2
from HTMLParser import HTMLParser
# qBt
from novaprinter import prettyPrinter
from helpers import retrieve_url
class eztv(object):
name = "EZTV"
url = 'https://eztv.io'
supported_categories = {'all': 'all', 'tv': 'tv'}
class MyHtmlParser(HTMLParser):
A, TD, TR, TABLE = ('a', 'td', 'tr', 'table')
""" Sub-class for parsing results """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.in_table_row = False
self.current_item = {}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if (params.get('class') == 'forum_header_border'
and params.get('name') == 'hover'):
self.in_table_row = True
self.current_item = {}
self.current_item['seeds'] = -1
self.current_item['leech'] = -1
self.current_item['size'] = -1
self.current_item['engine_url'] = self.url
if (tag == self.A
and self.in_table_row and params.get('class') == 'magnet'):
self.current_item['link'] = params.get('href')
if (tag == self.A
and self.in_table_row and params.get('class') == 'epinfo'):
self.current_item['desc_link'] = self.url + params.get('href')
self.current_item['name'] = params.get('title').split(' (')[0]
def handle_data(self, data):
data = data.replace(',', '')
if (self.in_table_row
and (data.endswith(' KB') or data.endswith(' MB') or data.endswith(' GB'))):
self.current_item['size'] = data
elif self.in_table_row and data.isnumeric():
self.current_item['seeds'] = int(data)
def handle_endtag(self, tag):
if self.in_table_row and tag == self.TR:
prettyPrinter(self.current_item)
self.in_table_row = False
def search(self, what, cat='all'):
query = self.url + '/search/' + what.replace('%20', '-')
eztv_html = retrieve_url(query)
eztv_parser = self.MyHtmlParser(self.url)
eztv_parser.feed(eztv_html)
eztv_parser.close()
if __name__ == '__main__':
eztv_se = eztv()
eztv_se.search('Acre', 'all')

View File

@ -1,205 +0,0 @@
#VERSION: 3.4
# AUTHORS: Diego de las Heras (ngosang@hotmail.es)
# CONTRIBUTORS: ukharley
# hannsen (github.com/hannsen)
import json
import os
import xml.etree.ElementTree
try:
# python3
from urllib.parse import urlencode, unquote
from urllib import request as urllib_request
from http.cookiejar import CookieJar
except ImportError:
# python2
from urllib import urlencode, unquote
import urllib2 as urllib_request
from cookielib import CookieJar
# qBt
from novaprinter import prettyPrinter
from helpers import download_file
###############################################################################
# load configuration from file
CONFIG_FILE = 'jackett.json'
CONFIG_PATH = os.path.join(os.path.dirname(os.path.realpath(__file__)), CONFIG_FILE)
CONFIG_DATA = {
'api_key': 'YOUR_API_KEY_HERE', # jackett api
'tracker_first': False, # (False/True) add tracker name to beginning of search result
'url': 'http://127.0.0.1:9117', # jackett url
}
def load_configuration():
global CONFIG_PATH, CONFIG_DATA
try:
# try to load user data from file
with open(CONFIG_PATH) as f:
CONFIG_DATA = json.load(f)
except ValueError:
# if file exists but it's malformed we load add a flag
CONFIG_DATA['malformed'] = True
except Exception:
# if file doesn't exist, we create it
with open(CONFIG_PATH, 'w') as f:
f.write(json.dumps(CONFIG_DATA, indent=4, sort_keys=True))
# do some checks
if any(item not in CONFIG_DATA for item in ['api_key', 'tracker_first', 'url']):
CONFIG_DATA['malformed'] = True
load_configuration()
###############################################################################
class jackett(object):
name = 'Jackett'
url = CONFIG_DATA['url'] if CONFIG_DATA['url'][-1] != '/' else CONFIG_DATA['url'][:-1]
api_key = CONFIG_DATA['api_key']
supported_categories = {
'all': None,
'anime': ['5070'],
'books': ['8000'],
'games': ['1000', '4000'],
'movies': ['2000'],
'music': ['3000'],
'software': ['4000'],
'tv': ['5000'],
}
def download_torrent(self, download_url):
# fix for some indexers with magnet link inside .torrent file
if download_url.startswith('magnet:?'):
print(download_url + " " + download_url)
response = self.get_response(download_url)
if response is not None and response.startswith('magnet:?'):
print(response + " " + download_url)
else:
print(download_file(download_url))
def search(self, what, cat='all'):
what = unquote(what)
category = self.supported_categories[cat.lower()]
# check for malformed configuration
if 'malformed' in CONFIG_DATA:
self.handle_error("malformed configuration file", what)
return
# check api_key
if self.api_key == "YOUR_API_KEY_HERE":
self.handle_error("api key error", what)
return
# prepare jackett url
params = [
('apikey', self.api_key),
('q', what)
]
if category is not None:
params.append(('cat', ','.join(category)))
params = urlencode(params)
jacket_url = self.url + "/api/v2.0/indexers/all/results/torznab/api?%s" % params
response = self.get_response(jacket_url)
if response is None:
self.handle_error("connection error", what)
return
# process search results
response_xml = xml.etree.ElementTree.fromstring(response)
for result in response_xml.find('channel').findall('item'):
res = {}
title = result.find('title')
if title is not None:
title = title.text
else:
continue
tracker = result.find('jackettindexer')
tracker = '' if tracker is None else tracker.text
if CONFIG_DATA['tracker_first']:
res['name'] = '[%s] %s' % (tracker, title)
else:
res['name'] = '%s [%s]' % (title, tracker)
res['link'] = result.find(self.generate_xpath('magneturl'))
if res['link'] is not None:
res['link'] = res['link'].attrib['value']
else:
res['link'] = result.find('link')
if res['link'] is not None:
res['link'] = res['link'].text
else:
continue
res['size'] = result.find('size')
res['size'] = -1 if res['size'] is None else (res['size'].text + ' B')
res['seeds'] = result.find(self.generate_xpath('seeders'))
res['seeds'] = -1 if res['seeds'] is None else int(res['seeds'].attrib['value'])
res['leech'] = result.find(self.generate_xpath('peers'))
res['leech'] = -1 if res['leech'] is None else int(res['leech'].attrib['value'])
if res['seeds'] != -1 and res['leech'] != -1:
res['leech'] -= res['seeds']
res['desc_link'] = result.find('comments')
if res['desc_link'] is not None:
res['desc_link'] = res['desc_link'].text
else:
res['desc_link'] = result.find('guid')
res['desc_link'] = '' if res['desc_link'] is None else res['desc_link'].text
# note: engine_url can't be changed, torrent download stops working
res['engine_url'] = self.url
prettyPrinter(self.escape_pipe(res))
def generate_xpath(self, tag):
return './{http://torznab.com/schemas/2015/feed}attr[@name="%s"]' % tag
# Safety measure until it's fixed in prettyPrinter
def escape_pipe(self, dictionary):
for key in dictionary.keys():
if isinstance(dictionary[key], str):
dictionary[key] = dictionary[key].replace('|', '%7C')
return dictionary
def get_response(self, query):
response = None
try:
# we can't use helpers.retrieve_url because of redirects
# we need the cookie processor to handle redirects
opener = urllib_request.build_opener(urllib_request.HTTPCookieProcessor(CookieJar()))
response = opener.open(query).read().decode('utf-8')
except urllib_request.HTTPError as e:
# if the page returns a magnet redirect, used in download_torrent
if e.code == 302:
response = e.url
except Exception:
pass
return response
def handle_error(self, error_msg, what):
# we need to print the search text to be displayed in qBittorrent when
# 'Torrent names only' is enabled
prettyPrinter({
'seeds': -1,
'size': -1,
'leech': -1,
'engine_url': self.url,
'link': self.url,
'desc_link': 'https://github.com/qbittorrent/search-plugins/wiki/How-to-configure-Jackett-plugin', # noqa
'name': "Jackett: %s! Right-click this row and select 'Open description page' to open help. Configuration file: '%s' Search: '%s'" % (error_msg, CONFIG_PATH, what) # noqa
})
if __name__ == "__main__":
jackett_se = jackett()
jackett_se.search("ubuntu server", 'software')

View File

@ -1,170 +0,0 @@
#VERSION: 2.2
#AUTHORS: Vikas Yadav (https://github.com/v1k45 | http://v1k45.com)
#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import re
try:
# python3
from html.parser import HTMLParser
except ImportError:
# python2
from HTMLParser import HTMLParser
# qBt
from helpers import retrieve_url
from novaprinter import prettyPrinter
class leetx(object):
url = "https://1337x.to"
name = "1337x"
supported_categories = {
'all': 'All',
'movies': 'Movies',
'tv': 'TV',
'music': 'Music',
'games': 'Games',
'anime': 'Anime',
'software': 'Apps'
}
class MyHtmlParser(HTMLParser):
A, TABLE, TR, TD, SPAN = ('a', 'table', 'tr', 'td', 'span')
""" Sub-class for parsing results """
def __init__(self, results, url):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.current_result = {}
self.current_item = None
self.inside_table = False
self.inside_row = False
def handle_starttag(self, tag, attrs):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
self.inside_table = self.inside_table or tag == self.TABLE
if not self.inside_table:
return
# convert attrs tuple to dictionary
attrs = dict(attrs)
# for torrent name and link
link = attrs.get('href', '')
if tag == self.A and link.startswith('/torrent'):
self.current_result['link'] = self.url + link
self.current_result['desc_link'] = self.url + link
self.current_result['engine_url'] = self.url
self.current_item = 'name'
# to ignore uploader name attached to the torrent size in span tag
if tag == self.SPAN:
self.current_item = None
# if this is a <td> there can be seeds, leeches or size inside it.
if tag == self.TD:
self.inside_row = True
# find apporipate data key using class name of td
for item in ['seeds', 'leech', 'size']:
if item in attrs.get('class', ''):
self.current_item = item
break
def handle_data(self, data):
# if we are not inside the table, no need to process any further
if not self.inside_table:
return
# do not process data if we are not inside the table body
if self.current_item:
prev_value = self.current_result.get(self.current_item, '')
self.current_result[self.current_item] = prev_value + data
def handle_endtag(self, tag):
# are we inside the results table body or not
# if we are not inside the table, no need to process any further
if tag == self.TABLE:
self.inside_table = False
if not self.inside_table:
return
# exiting the table data and maybe moving td or tr element
if self.inside_row and tag == self.TD:
self.inside_row = False
self.current_item = None
# exiting the tr element, which means all necessary data for a torrent has been
# extracted, we should save it and clean the object's state.
if self.current_result and tag == self.TR:
if 'size' in self.current_result:
self.current_result['size'] = self.current_result['size'].replace(',', '')
# skip malformed names (eg. with @)
if 'name' in self.current_result:
prettyPrinter(self.current_result)
self.results.append('a')
self.current_result = {}
self.current_item = None
def download_torrent(self, download_url):
# since 1337x does not provide torrent links in the search results,
# we will have to fetch the page and extract the magnet link
torrent_page = retrieve_url(download_url)
magnet_match = re.search(r"href\s*\=\s*\"(magnet[^\"]+)\"", torrent_page)
if magnet_match and magnet_match.groups():
print(magnet_match.groups()[0] + " " + download_url)
else:
raise Exception('Error, please fill a bug report!')
def search(self, what, cat='all'):
cat = cat.lower()
# decide which type of search to perform based on category
search_page = "search" if cat == 'all' else 'category-search'
search_url = "{url}/{search_page}/{search_query}/".format(
url=self.url, search_page=search_page, search_query=what)
# apply search category to url, if any.
if cat != 'all':
search_url += self.supported_categories[cat] + "/"
# try to get 15 pages (20 * 15 = 300 results) and stop when we don't found results
results_list = []
parser = self.MyHtmlParser(results_list, self.url)
page = 1
while page < 16:
# download the page
html = retrieve_url(search_url + str(page) + '/')
parser.feed(html)
if len(results_list) < 1:
break
del results_list[:]
page += 1
parser.close()

View File

@ -1,104 +0,0 @@
#VERSION: 2.4
# AUTHORS: Christophe Dumez (chris@qbittorrent.org)
# Douman (custparasite@gmx.se)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from HTMLParser import HTMLParser
from re import compile as re_compile
class legittorrents(object):
url = 'http://www.legittorrents.info'
name = 'Legit Torrents'
supported_categories = {'all': '0', 'movies': '1', 'tv': '13',
'music': '2', 'games': '3', 'anime': '5', 'books': '6'}
def download_torrent(self, info):
print(download_file(info))
class MyHtmlParseWithBlackJack(HTMLParser):
""" Parser class """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.current_item = None
self.save_item_key = None
def handle_starttag(self, tag, attrs):
""" Parser's start tag handler """
if self.current_item:
params = dict(attrs)
if tag == "a":
link = params["href"]
if link.startswith("index") and "title" in params:
# description link
self.current_item["name"] = params["title"][14:]
self.current_item["desc_link"] = "/".join((self.url, link))
elif link.startswith("download"):
self.current_item["link"] = "/".join((self.url, link))
elif tag == "td":
if ("width" in params and params["width"] == "30"
and "leech" not in self.current_item):
self.save_item_key = "leech" if "seeds" in self.current_item else "seeds"
elif tag == "tr":
self.current_item = {}
self.current_item["size"] = ""
self.current_item["engine_url"] = self.url
def handle_endtag(self, tag):
""" Parser's end tag handler """
if self.current_item and tag == "tr":
if len(self.current_item) > 4:
prettyPrinter(self.current_item)
self.current_item = None
def handle_data(self, data):
""" Parser's data handler """
if self.save_item_key:
self.current_item[self.save_item_key] = data.strip()
self.save_item_key = None
def search(self, what, cat='all'):
""" Performs search """
query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=",
self.supported_categories.get(cat, '0'), "&active=1"))
get_table = re_compile(r'(?s)<table\sclass="lista".*>(.*)</table>')
data = get_table.search(retrieve_url(query)).group(0)
# extract first ten pages of next results
next_pages = re_compile('(?m)<option value="(.*)">[0-9]+</option>')
next_pages = ["".join((self.url, page)) for page in next_pages.findall(data)[:10]]
parser = self.MyHtmlParseWithBlackJack(self.url)
parser.feed(data)
parser.close()
for page in next_pages:
parser.feed(get_table.search(retrieve_url(page)).group(0))
parser.close()

View File

@ -1,133 +0,0 @@
#VERSION: 4.5
# AUTHORS: Lima66
# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
import re
try:
# python3
from html.parser import HTMLParser
from urllib.parse import quote
except ImportError:
# python2
from HTMLParser import HTMLParser
from urllib import quote
# qBt
from novaprinter import prettyPrinter
from helpers import retrieve_url
# Fix invalid certificate in Windows
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
class limetorrents(object):
url = "https://limetor.com"
name = "LimeTorrents"
supported_categories = {'all': 'all',
'anime': 'anime',
'software': 'applications',
'games': 'games',
'movies': 'movies',
'music': 'music',
'tv': 'tv'}
class MyHtmlParser(HTMLParser):
""" Sub-class for parsing results """
def error(self, message):
pass
A, TD, TR, HREF = ('a', 'td', 'tr', 'href')
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.current_item = {} # dict for found item
self.item_name = None # key's name in current_item dict
self.page_empty = 22000
self.inside_tr = False
self.findTable = False
self.parser_class = {"tdnormal": "size", # class
"tdseed": "seeds",
"tdleech": "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if params.get('class') == 'table2':
self.findTable = True
if tag == self.TR and self.findTable and (params.get('bgcolor') == '#F4F4F4' or params.get('bgcolor') == '#FFFFFF'): # noqa
self.inside_tr = True
self.current_item = {}
if not self.inside_tr:
return
if self.inside_tr and tag == self.TD:
if "class" in params:
self.item_name = self.parser_class.get(params["class"], None)
if self.item_name:
self.current_item[self.item_name] = -1
if self.inside_tr and tag == self.A and self.HREF in params:
link = params["href"]
if link.startswith("http://itorrents.org/torrent/"):
self.current_item["engine_url"] = self.url
self.item_name = "name"
elif link.endswith(".html"):
try:
safe_link = quote(self.url + link, safe='/:')
except KeyError:
safe_link = self.url + link
self.current_item["link"] = safe_link
self.current_item["desc_link"] = safe_link
def handle_data(self, data):
if self.inside_tr and self.item_name:
if self.item_name == 'size' and (data.endswith('MB') or data.endswith('GB')):
self.current_item[self.item_name] = data.strip().replace(',', '')
elif not self.item_name == 'size':
self.current_item[self.item_name] = data.strip().replace(',', '')
self.item_name = None
def handle_endtag(self, tag):
if tag == 'table':
self.findTable = False
if self.inside_tr and tag == self.TR:
self.inside_tr = False
self.item_name = None
array_length = len(self.current_item)
if array_length < 1:
return
prettyPrinter(self.current_item)
self.current_item = {}
def download_torrent(self, info):
# since limetorrents provides torrent links in itorrent (cloudflare protected),
# we have to fetch the info page and extract the magnet link
info_page = retrieve_url(info)
magnet_match = re.search(r"href\s*\=\s*\"(magnet[^\"]+)\"", info_page)
if magnet_match and magnet_match.groups():
print(magnet_match.groups()[0] + " " + info)
else:
raise Exception('Error, please fill a bug report!')
def search(self, query, cat='all'):
""" Performs search """
query = query.replace("%20", "-")
category = self.supported_categories[cat]
parser = self.MyHtmlParser(self.url)
page = 1
while True:
page_url = "{0}/search/{1}/{2}/seeds/{3}/".format(self.url, category, query, page)
html = retrieve_url(page_url)
lunghezza_html = len(html)
if page > 6 or lunghezza_html <= parser.page_empty:
return
parser.feed(html)
page += 1
parser.close()

View File

@ -1,191 +0,0 @@
#VERSION: 2.20
# AUTHORS: Fabien Devaux (fab@gnux.info)
# CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
# Arthur (custparasite@gmx.se)
# Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
try:
# python3
from html.parser import HTMLParser
except ImportError:
# python2
from HTMLParser import HTMLParser
# qBt
from novaprinter import prettyPrinter
from helpers import download_file, retrieve_url
# Fix invalid certificate in Windows
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
class piratebay(object):
""" Search engine class """
url = 'https://pirateproxy.live'
name = 'The Pirate Bay'
supported_categories = {'all': '0', 'music': '100', 'movies': '200',
'games': '400', 'software': '300'}
def download_torrent(self, info):
""" Downloader """
print(download_file(info))
class MyHtmlParser(HTMLParser):
""" Parser class """
def __init__(self, results, url):
HTMLParser.__init__(self)
self.results = results
self.url = url
self.current_item = None
self.save_item = None
self.result_table = False # table with results is found
self.result_tbody = False
self.add_query = True
self.result_query = False
def handle_start_tag_default(self, attrs):
""" Default handler for start tag dispatcher """
pass
def handle_start_tag_a(self, attrs):
""" Handler for start tag a """
params = dict(attrs)
link = params["href"].replace(self.url, '')
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
self.save_item = "name"
elif link.startswith("magnet"):
self.current_item["link"] = link
# end of the 'name' item
self.current_item['name'] = self.current_item['name'].strip()
self.save_item = None
def handle_start_tag_font(self, attrs):
""" Handler for start tag font """
for attr in attrs:
if attr[1] == "detDesc":
self.save_item = "size"
break
def handle_start_tag_td(self, attrs):
""" Handler for start tag td """
for attr in attrs:
if attr[1] == "right":
if "seeds" in self.current_item.keys():
self.save_item = "leech"
else:
self.save_item = "seeds"
break
def handle_starttag(self, tag, attrs):
""" Parser's start tag handler """
if self.current_item:
dispatcher = getattr(self,
"_".join(("handle_start_tag", tag)),
self.handle_start_tag_default)
dispatcher(attrs)
elif self.result_tbody:
if tag == "tr":
self.current_item = {"engine_url": self.url}
elif tag == "table":
self.result_table = "searchResult" == attrs[0][1]
elif self.add_query:
if self.result_query and tag == "a":
if len(self.list_searches) < 10:
self.list_searches.append(attrs[0][1])
else:
self.add_query = False
self.result_query = False
elif tag == "div":
self.result_query = "center" == attrs[0][1]
def handle_endtag(self, tag):
""" Parser's end tag handler """
if self.result_tbody:
if tag == "tr":
if 'size' in self.current_item:
# clean up size
temp_data = self.current_item['size'].split()
if "Size" in temp_data:
indx = temp_data.index("Size")
self.current_item['size'] = (temp_data[indx + 1] + " "
+ temp_data[indx + 2])
else:
self.current_item['size'] = -1
# return result
prettyPrinter(self.current_item)
self.results.append('a')
self.current_item = None
elif tag == "font":
self.save_item = None
elif tag == "table":
self.result_table = self.result_tbody = False
elif self.result_table:
if tag == "thead":
self.result_tbody = True
elif tag == "table":
self.result_table = self.result_tbody = False
elif self.add_query and self.result_query:
if tag == "div":
self.add_query = self.result_query = False
def handle_data(self, data):
""" Parser's data handler """
if self.save_item:
if (self.save_item == "size" or self.save_item == "name"):
if self.save_item not in self.current_item:
self.current_item[self.save_item] = ''
self.current_item[self.save_item] += " " + data
else:
self.current_item[self.save_item] = data
self.save_item = None
def search(self, what, cat='all'):
""" Performs search """
cat = cat.lower()
# try to get 10 pages (10 * 30 = 300 results) and stop when we don't found results
results_list = []
parser = self.MyHtmlParser(results_list, self.url)
page = 1
while page < 11:
# prepare query. 7 is filtering by seeders
page_url = "{0}/search/{1}/{2}/7/{3}".format(self.url, what, page,
self.supported_categories[cat])
html = retrieve_url(page_url)
parser.feed(html)
if len(results_list) < 1:
break
del results_list[:]
page += 1
parser.close()

View File

@ -1,90 +0,0 @@
#VERSION: 2.12
# AUTHORS: b0nk
# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
import time
try:
# python3
from urllib.parse import urlencode, unquote
except ImportError:
# python2
from urllib import urlencode, unquote
# qBt
from novaprinter import prettyPrinter
from helpers import retrieve_url
class rarbg(object):
url = 'https://rarbg.to'
name = 'RARBG'
supported_categories = {
'all': '4;14;17;18;23;25;27;28;32;33;40;41;42;44;45;46;47;48;49;50;51;52;53;54',
'movies': '14;17;42;44;45;46;47;48;50;51;52;54',
'tv': '18;41;49',
'music': '23;25',
'games': '27;28;32;40;53',
'software': '33'
}
def search(self, what, cat='all'):
base_url = "https://torrentapi.org/pubapi_v2.php?%s"
app_id = "qbittorrent"
# get token
params = urlencode({'get_token': 'get_token', 'app_id': app_id})
response = retrieve_url(base_url % params)
j = json.loads(response)
token = j['token']
time.sleep(2.1)
# get response json
what = unquote(what)
category = self.supported_categories[cat]
params = urlencode({'mode': 'search',
'search_string': what,
'ranked': 0,
'category': category,
'limit': 100,
'sort': 'seeders',
'format': 'json_extended',
'token': token,
'app_id': 'qbittorrent'})
response = retrieve_url(base_url % params)
j = json.loads(response)
# parse results
for result in j['torrent_results']:
res = {'link': result['download'],
'name': result['title'],
'size': str(result['size']) + " B",
'seeds': result['seeders'],
'leech': result['leechers'],
'engine_url': self.url,
'desc_link': result['info_page'] + "&app_id=" + app_id}
prettyPrinter(res)

View File

@ -1,101 +0,0 @@
#VERSION: 2.1
# AUTHORS: Douman (custparasite@gmx.se)
# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from HTMLParser import HTMLParser
class torlock(object):
url = "https://www.torlock.com"
name = "TorLock"
supported_categories = {'all': 'all',
'anime': 'anime',
'software': 'software',
'games': 'game',
'movies': 'movie',
'music': 'music',
'tv': 'television',
'books': 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
class MyHtmlParser(HTMLParser):
""" Sub-class for parsing results """
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
self.article_found = False # true when <article> with results is found
self.item_found = False
self.item_bad = False # set to True for malicious links
self.current_item = None # dict for found item
self.item_name = None # key's name in current_item dict
self.parser_class = {"ts": "size",
"tul": "seeds",
"tdl": "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
if self.item_found:
if tag == "td":
if "class" in params:
self.item_name = self.parser_class.get(params["class"], None)
if self.item_name:
self.current_item[self.item_name] = ""
elif self.article_found and tag == "a":
if "href" in params:
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
self.current_item["link"] = "".join((self.url, "/tor/",
link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
self.current_item["name"] = ""
self.item_bad = "rel" in params and params["rel"] == "nofollow"
elif tag == "article":
self.article_found = True
self.current_item = {}
def handle_data(self, data):
if self.item_name:
self.current_item[self.item_name] += data
def handle_endtag(self, tag):
if tag == "article":
self.article_found = False
elif self.item_name and (tag == "a" or tag == "td"):
self.item_name = None
elif self.item_found and tag == "tr":
self.item_found = False
if not self.item_bad:
prettyPrinter(self.current_item)
self.current_item = {}
def search(self, query, cat='all'):
""" Performs search """
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
page = "".join((self.url, "/", self.supported_categories[cat],
"/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
additional_pages = re_compile(r"/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+"
.format(self.supported_categories[cat], query))
list_searches = additional_pages.findall(html)[:-1] # last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
counter += 1
if counter > 3:
break
parser.close()

View File

@ -1,82 +0,0 @@
#VERSION: 1.0
# AUTHORS: Dessalines
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the author nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import json
try:
# python3
from urllib.parse import urlencode
except ImportError:
# python2
from urllib import urlencode
# qBt
from novaprinter import prettyPrinter
from helpers import retrieve_url
class torrentscsv(object):
url = 'https://torrents-csv.ml'
name = 'torrents-csv'
supported_categories = {'all': ''}
# initialize trackers for magnet links
trackers_list = [
'udp://tracker.coppersurfer.tk:6969/announce',
'udp://tracker.leechers-paradise.org:6969/announce',
'udp://tracker.opentrackr.org:1337/announce',
'udp://tracker.openbittorrent.com:80/announce',
'udp://exodus.desync.com:6969/announce',
'udp://9.rarbg.me:2710/announce',
'udp://9.rarbg.to:2710/announce',
'udp://tracker.tiny-vps.com:6969/announce',
'udp://retracker.lanta-net.ru:2710/announce',
'udp://open.demonii.si:1337/announce'
]
trackers = '&'.join(urlencode({'tr': tracker}) for tracker in trackers_list)
def search(self, what, cat='all'):
search_url = "{}/service/search?size=300&q={}".format(self.url, what)
desc_url = "{}/#/search/torrent/{}/1".format(self.url, what)
# get response json
response = retrieve_url(search_url)
response_json = json.loads(response)
# parse results
for result in response_json:
res = {'link': self.download_link(result),
'name': result['name'],
'size': str(result['size_bytes']) + " B",
'seeds': result['seeders'],
'leech': result['leechers'],
'engine_url': self.url,
'desc_link': desc_url}
prettyPrinter(res)
def download_link(self, result):
return "magnet:?xt=urn:btih:{}&{}&{}".format(
result['infohash'], urlencode({'dn': result['name']}), self.trackers)

View File

@ -1,10 +0,0 @@
eztv: 1.10
jackett: 3.4
leetx: 2.2
legittorrents: 2.4
limetorrents: 4.5
piratebay: 2.20
rarbg: 2.12
torlock: 2.1
torrentscsv: 1.0
zooqle: 1.13

View File

@ -1,119 +0,0 @@
#VERSION: 1.13
# AUTHORS: Kanishk Singh (https://github.com/ArionMiles/)
# CONTRIBUTORS: affaff (https://github.com/affaff)
# Copyright (c) 2017 Kanishk Singh
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from xml.dom import minidom
from novaprinter import prettyPrinter
from io import StringIO
import gzip
user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
headers = {'User-Agent': user_agent}
try:
from urllib2 import urlopen, Request, URLError
except ImportError:
from urllib.request import urlopen, Request, URLError
def retrieve_url_nodecode(url):
""" Return the content of the url page as a string """
req = Request(url, headers=headers)
try:
response = urlopen(req)
except URLError as errno:
print(" ".join(("Connection error:", str(errno.reason))))
print(" ".join(("URL:", url)))
return ""
dat = response.read()
# Check if it is gzipped
if dat[:2] == '\037\213':
# Data is gzip encoded, decode it
compressedstream = StringIO(dat)
gzipper = gzip.GzipFile(fileobj=compressedstream)
extracted_data = gzipper.read()
dat = extracted_data
return dat
return dat
class zooqle(object):
""" Search engine class """
url = 'https://zooqle.com'
name = 'Zooqle'
supported_categories = {'all': 'all',
'movies': 'Movies',
'tv': 'TV',
'music': 'Music',
'games': 'Games',
'anime': 'Anime',
'software': 'Apps',
'books': 'Books'}
def search(self, what, cat="all"):
""" Performs search """
page = 1
while page < 11:
query = "".join((self.url, "/search?q=", what,
"+category%3A", self.supported_categories[cat], "&fmt=rss"))
if page > 1:
query = query + "&pg=" + str(page)
response = retrieve_url_nodecode(query)
xmldoc = minidom.parseString(response)
itemlist = xmldoc.getElementsByTagName('item')
if len(itemlist) == 0:
return
for item in itemlist:
zooqle_dict = zooqle_dict = {"engine_url": self.url}
zooqle_dict['name'] = (item.getElementsByTagName('title')[0]
.childNodes[0].data)
zooqle_dict["size"] = (item.getElementsByTagName('enclosure')[0]
.attributes['length'].childNodes[0].data)
if zooqle_dict["size"] == '0':
zooqle_dict["link"] = (item.getElementsByTagName('torrent:magnetURI')[0]
.childNodes[0].data)
else:
zooqle_dict["link"] = (item.getElementsByTagName('enclosure')[0]
.attributes['url'].value)
zooqle_dict["desc_link"] = (item.getElementsByTagName('link')[0]
.childNodes[0].data)
zooqle_dict["leech"] = (item.getElementsByTagName('torrent:peers')[0]
.childNodes[0].data)
if not zooqle_dict["leech"].isdigit():
zooqle_dict["leech"] = ''
zooqle_dict["seeds"] = (item.getElementsByTagName('torrent:seeds')[0]
.childNodes[0].data)
if not zooqle_dict["seeds"].isdigit():
zooqle_dict["seeds"] = ''
prettyPrinter(zooqle_dict)
totalResultVal = (xmldoc.getElementsByTagName('opensearch:totalResults')[0]
.childNodes[0].data)
startIndex = (xmldoc.getElementsByTagName('opensearch:startIndex')[0]
.childNodes[0].data)
itemsPerPage = (xmldoc.getElementsByTagName('opensearch:itemsPerPage')[0]
.childNodes[0].data)
if (int(startIndex) + int(itemsPerPage)) > int(totalResultVal):
return
page += 1
return