diff --git a/.travis.yml b/.travis.yml
index e0dc03e..3538c40 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -15,9 +15,11 @@ matrix:
env: NOVA_DIR=nova3
install:
- - pip install pyflakes
+ - pip install pycodestyle pyflakes
script:
- - cd "$TRAVIS_BUILD_DIR"
- - python -m compileall "$NOVA_DIR/engines/"*.py
- - pyflakes "$NOVA_DIR/engines/"*.py
+ - cd "$TRAVIS_BUILD_DIR/$NOVA_DIR/engines"
+ - python -m compileall *.py
+ - pyflakes *.py
+ # skipping E265, fixing it will break plugin usage on older qbt instances (< v4.1.2)
+ - pycodestyle --ignore=E265,W503 --max-line-length=100 --statistics *.py
diff --git a/nova/engines/btdb.py b/nova/engines/btdb.py
index 6777899..62913d5 100644
--- a/nova/engines/btdb.py
+++ b/nova/engines/btdb.py
@@ -1,6 +1,6 @@
-#VERSION: 1.03
-#AUTHORS: Charles Worthing
-#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 1.04
+# AUTHORS: Charles Worthing
+# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -27,10 +27,11 @@
# POSSIBILITY OF SUCH DAMAGE.
from HTMLParser import HTMLParser
-#qBt
+# qBt
from novaprinter import prettyPrinter
from helpers import download_file, retrieve_url
+
class btdb(object):
""" Search engine class """
url = 'https://btdb.to'
@@ -46,9 +47,9 @@ class btdb(object):
HTMLParser.__init__(self)
self.results = results
self.url = url
- self.current_item = {} # One torrent result
+ self.current_item = {} # One torrent result
self.add_query = True
- self.torrent_info_index = 0 # Count of the meta data encountered
+ self.torrent_info_index = 0 # Count of the meta data encountered
self.torrent_info_array = []
self.meta_data_grabbing = 0
self.meta_data_array = []
@@ -85,15 +86,15 @@ class btdb(object):
if link.startswith("magnet:"):
self.magnet_link = link
- def handle_endtag(self, tag):
+ def handle_endtag(self, tag):
if tag == "script":
return
if tag == "div":
if self.meta_data_grabbing > 0:
-
- self.torrent_no_files = self.meta_data_array[2] # Not used
- self.torrent_date_added = self.meta_data_array[4] # Not used
- self.torrent_popularity = self.meta_data_array[6] # Not used
+
+ self.torrent_no_files = self.meta_data_array[2] # Not used
+ self.torrent_date_added = self.meta_data_array[4] # Not used
+ self.torrent_popularity = self.meta_data_array[6] # Not used
self.current_item["size"] = self.meta_data_array[0]
self.current_item["name"] = self.torrent_name
diff --git a/nova/engines/eztv.py b/nova/engines/eztv.py
index 05a8c8f..4213767 100644
--- a/nova/engines/eztv.py
+++ b/nova/engines/eztv.py
@@ -1,6 +1,6 @@
-#VERSION: 1.00
-#AUTHORS: nindogo
-#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 1.01
+# AUTHORS: nindogo
+# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
try:
# python3
@@ -13,6 +13,7 @@ except ImportError:
from novaprinter import prettyPrinter
from helpers import retrieve_url
+
class eztv(object):
name = "EZTV"
url = 'https://eztv.ag'
@@ -45,13 +46,16 @@ class eztv(object):
self.current_item['desc_link'] = self.url + params.get('href')
self.current_item['name'] = params.get('title').split(' (')[0]
- if tag == self.TD and params.get('class') == 'forum_thread_post_end' and params.get('align') == 'center':
+ if (tag == self.TD
+ and params.get('class') == 'forum_thread_post_end'
+ and params.get('align') == 'center'):
prettyPrinter(self.current_item)
self.in_table_row = False
def handle_data(self, data):
data = data.replace(',', '')
- if self.in_table_row and (data.endswith('MB') or data.endswith('GB') or data.endswith('KB')):
+ if (self.in_table_row
+ and (data.endswith('MB') or data.endswith('GB') or data.endswith('KB'))):
self.current_item['size'] = data
if self.in_table_row and (data.isalnum() or data == '-'):
@@ -65,13 +69,14 @@ class eztv(object):
self.in_table_row = False
def search(self, what, cat='all'):
- query = self.url + '/search/' + what.replace('%20','-')
+ query = self.url + '/search/' + what.replace('%20', '-')
eztv_html = retrieve_url(query)
eztv_parser = self.MyHtmlParser(self.url)
eztv_parser.feed(eztv_html)
eztv_parser.close()
+
if __name__ == '__main__':
eztv_se = eztv()
eztv_se.search('Acre', 'all')
diff --git a/nova/engines/legittorrents.py b/nova/engines/legittorrents.py
index 5cacfc8..d9be445 100644
--- a/nova/engines/legittorrents.py
+++ b/nova/engines/legittorrents.py
@@ -1,6 +1,6 @@
-#VERSION: 2.02
-#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
-# Douman (custparasite@gmx.se)
+#VERSION: 2.03
+# AUTHORS: Christophe Dumez (chris@qbittorrent.org)
+# Douman (custparasite@gmx.se)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -26,16 +26,17 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from HTMLParser import HTMLParser
from re import compile as re_compile
+
class legittorrents(object):
url = 'http://www.legittorrents.info'
name = 'Legit Torrents'
- supported_categories = {'all': '0', 'movies': '1', 'tv': '13', 'music': '2', 'games': '3', 'anime': '5', 'books': '6'}
+ supported_categories = {'all': '0', 'movies': '1', 'tv': '13',
+ 'music': '2', 'games': '3', 'anime': '5', 'books': '6'}
def download_torrent(self, info):
print(download_file(info))
@@ -55,13 +56,14 @@ class legittorrents(object):
if tag == "a":
link = params["href"]
if link.startswith("index") and "title" in params:
- #description link
+ # description link
self.current_item["name"] = params["title"][14:]
self.current_item["desc_link"] = "/".join((self.url, link))
elif link.startswith("download"):
self.current_item["link"] = "/".join((self.url, link))
elif tag == "td":
- if "width" in params and params["width"] == "30" and not "leech" in self.current_item:
+ if ("width" in params and params["width"] == "30"
+ and "leech" not in self.current_item):
self.save_item_key = "leech" if "seeds" in self.current_item else "seeds"
elif tag == "tr":
@@ -84,11 +86,12 @@ class legittorrents(object):
def search(self, what, cat='all'):
""" Performs search """
- query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=", self.supported_categories.get(cat, '0'), "&active=1"))
+ query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=",
+ self.supported_categories.get(cat, '0'), "&active=1"))
- get_table = re_compile('(?s)
(.*)
')
+ get_table = re_compile(r'(?s)
(.*)
')
data = get_table.search(retrieve_url(query)).group(0)
- #extract first ten pages of next results
+ # extract first ten pages of next results
next_pages = re_compile('(?m)')
next_pages = ["".join((self.url, page)) for page in next_pages.findall(data)[:10]]
diff --git a/nova/engines/piratebay.py b/nova/engines/piratebay.py
index ba047d1..b7ee284 100644
--- a/nova/engines/piratebay.py
+++ b/nova/engines/piratebay.py
@@ -1,8 +1,8 @@
-#VERSION: 2.15
-#AUTHORS: Fabien Devaux (fab@gnux.info)
-#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
-# Arthur (custparasite@gmx.se)
-# Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 2.16
+# AUTHORS: Fabien Devaux (fab@gnux.info)
+# CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
+# Arthur (custparasite@gmx.se)
+# Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -29,15 +29,17 @@
# POSSIBILITY OF SUCH DAMAGE.
from HTMLParser import HTMLParser
-#qBt
+# qBt
from novaprinter import prettyPrinter
from helpers import download_file, retrieve_url
+
class piratebay(object):
""" Search engine class """
url = 'https://thepiratebay.org'
name = 'The Pirate Bay'
- supported_categories = {'all': '0', 'music': '100', 'movies': '200', 'games': '400', 'software': '300'}
+ supported_categories = {'all': '0', 'music': '100', 'movies': '200',
+ 'games': '400', 'software': '300'}
def download_torrent(self, info):
""" Downloader """
@@ -51,7 +53,7 @@ class piratebay(object):
self.url = url
self.current_item = None
self.save_item = None
- self.result_table = False #table with results is found
+ self.result_table = False # table with results is found
self.result_tbody = False
self.add_query = True
self.result_query = False
@@ -93,12 +95,14 @@ class piratebay(object):
def handle_starttag(self, tag, attrs):
""" Parser's start tag handler """
if self.current_item:
- dispatcher = getattr(self, "_".join(("handle_start_tag", tag)), self.handle_start_tag_default)
+ dispatcher = getattr(self,
+ "_".join(("handle_start_tag", tag)),
+ self.handle_start_tag_default)
dispatcher(attrs)
elif self.result_tbody:
if tag == "tr":
- self.current_item = {"engine_url" : self.url}
+ self.current_item = {"engine_url": self.url}
elif tag == "table":
self.result_table = "searchResult" == attrs[0][1]
@@ -141,7 +145,8 @@ class piratebay(object):
temp_data = data.split()
if "Size" in temp_data:
indx = temp_data.index("Size")
- self.current_item[self.save_item] = temp_data[indx + 1] + " " + temp_data[indx + 2]
+ self.current_item[self.save_item] = (temp_data[indx + 1] + " "
+ + temp_data[indx + 2])
elif self.save_item == "name":
# names with special characters like '&' are splitted in several pieces
@@ -153,10 +158,9 @@ class piratebay(object):
self.current_item[self.save_item] = data
self.save_item = None
-
def search(self, what, cat='all'):
""" Performs search """
- #prepare query. 7 is filtering by seeders
+ # prepare query. 7 is filtering by seeders
cat = cat.lower()
query = "/".join((self.url, "search", what, "0", "7", self.supported_categories[cat]))
diff --git a/nova/engines/torlock.py b/nova/engines/torlock.py
index f154bf0..d71e17c 100644
--- a/nova/engines/torlock.py
+++ b/nova/engines/torlock.py
@@ -1,23 +1,24 @@
-#VERSION: 2.0
-#AUTHORS: Douman (custparasite@gmx.se)
-#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 2.1
+# AUTHORS: Douman (custparasite@gmx.se)
+# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from HTMLParser import HTMLParser
+
class torlock(object):
url = "https://www.torlock.com"
name = "TorLock"
- supported_categories = {'all' : 'all',
- 'anime' : 'anime',
- 'software' : 'software',
- 'games' : 'game',
- 'movies' : 'movie',
- 'music' : 'music',
- 'tv' : 'television',
- 'books' : 'ebooks'}
+ supported_categories = {'all': 'all',
+ 'anime': 'anime',
+ 'software': 'software',
+ 'games': 'game',
+ 'movies': 'movie',
+ 'music': 'music',
+ 'tv': 'television',
+ 'books': 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
@@ -27,14 +28,14 @@ class torlock(object):
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
- self.article_found = False #true when with results is found
+ self.article_found = False # true when with results is found
self.item_found = False
- self.item_bad = False #set to True for malicious links
- self.current_item = None #dict for found item
- self.item_name = None #key's name in current_item dict
- self.parser_class = {"ts" : "size",
- "tul" : "seeds",
- "tdl" : "leech"}
+ self.item_bad = False # set to True for malicious links
+ self.current_item = None # dict for found item
+ self.item_name = None # key's name in current_item dict
+ self.parser_class = {"ts": "size",
+ "tul": "seeds",
+ "tdl": "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
@@ -50,7 +51,8 @@ class torlock(object):
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
- self.current_item["link"] = "".join((self.url, "/tor/", link.split('/')[2], ".torrent"))
+ self.current_item["link"] = "".join((self.url, "/tor/",
+ link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
@@ -81,13 +83,15 @@ class torlock(object):
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
- page = "".join((self.url, "/", self.supported_categories[cat], "/torrents/", query, ".html?sort=seeds&page=1"))
+ page = "".join((self.url, "/", self.supported_categories[cat],
+ "/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
- additional_pages = re_compile("/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+".format(self.supported_categories[cat], query))
- list_searches = additional_pages.findall(html)[:-1] #last link is next(i.e. second)
+ additional_pages = re_compile(r"/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+"
+ .format(self.supported_categories[cat], query))
+ list_searches = additional_pages.findall(html)[:-1] # last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
diff --git a/nova/engines/versions.txt b/nova/engines/versions.txt
index 13233ec..934ecd6 100644
--- a/nova/engines/versions.txt
+++ b/nova/engines/versions.txt
@@ -1,6 +1,6 @@
-btdb: 1.03
-eztv: 1.00
-legittorrents: 2.02
-piratebay: 2.15
-torlock: 2.0
-zooqle: 1.12
+btdb: 1.04
+eztv: 1.01
+legittorrents: 2.03
+piratebay: 2.16
+torlock: 2.1
+zooqle: 1.13
diff --git a/nova/engines/zooqle.py b/nova/engines/zooqle.py
index 6e5f7f9..39a32ad 100644
--- a/nova/engines/zooqle.py
+++ b/nova/engines/zooqle.py
@@ -1,6 +1,6 @@
-#VERSION: 1.12
-#AUTHORS: Kanishk Singh (https://github.com/ArionMiles/)
-#CONTRIBUTORS: affaff (https://github.com/affaff)
+#VERSION: 1.13
+# AUTHORS: Kanishk Singh (https://github.com/ArionMiles/)
+# CONTRIBUTORS: affaff (https://github.com/affaff)
# Copyright (c) 2017 Kanishk Singh
@@ -25,11 +25,12 @@
from xml.dom import minidom
from novaprinter import prettyPrinter
-user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
-headers = {'User-Agent': user_agent}
-
from io import StringIO
import gzip
+
+user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
+headers = {'User-Agent': user_agent}
+
try:
from urllib2 import urlopen, Request, URLError
except ImportError:
@@ -38,7 +39,7 @@ except ImportError:
def retrieve_url_nodecode(url):
""" Return the content of the url page as a string """
- req = Request(url, headers = headers)
+ req = Request(url, headers=headers)
try:
response = urlopen(req)
except URLError as errno:
@@ -56,50 +57,63 @@ def retrieve_url_nodecode(url):
return dat
return dat
+
class zooqle(object):
""" Search engine class """
url = 'https://zooqle.com'
name = 'Zooqle'
- supported_categories = {'all' : 'all',
- 'movies' : 'Movies',
- 'tv' : 'TV',
- 'music' : 'Music',
- 'games' : 'Games',
- 'anime' : 'Anime',
- 'software' : 'Apps',
- 'books' : 'Books'}
+ supported_categories = {'all': 'all',
+ 'movies': 'Movies',
+ 'tv': 'TV',
+ 'music': 'Music',
+ 'games': 'Games',
+ 'anime': 'Anime',
+ 'software': 'Apps',
+ 'books': 'Books'}
+
def search(self, what, cat="all"):
""" Performs search """
page = 1
while page < 11:
- query = "".join((self.url, "/search?q=", what, "+category%3A", self.supported_categories[cat], "&fmt=rss"))
- if( page>1 ):
- query = query + "&pg=" + str (page)
+ query = "".join((self.url, "/search?q=", what,
+ "+category%3A", self.supported_categories[cat], "&fmt=rss"))
+ if page > 1:
+ query = query + "&pg=" + str(page)
response = retrieve_url_nodecode(query)
xmldoc = minidom.parseString(response)
itemlist = xmldoc.getElementsByTagName('item')
- if( len(itemlist ) ==0):
+ if len(itemlist) == 0:
return
for item in itemlist:
- zooqle_dict = zooqle_dict = {"engine_url" : self.url}
- zooqle_dict['name'] = item.getElementsByTagName('title')[0].childNodes[0].data
- zooqle_dict["size"] = item.getElementsByTagName('enclosure')[0].attributes['length'].childNodes[0].data
- if( zooqle_dict["size"]=='0'):
- zooqle_dict["link"] = item.getElementsByTagName('torrent:magnetURI')[0].childNodes[0].data
+ zooqle_dict = zooqle_dict = {"engine_url": self.url}
+ zooqle_dict['name'] = (item.getElementsByTagName('title')[0]
+ .childNodes[0].data)
+ zooqle_dict["size"] = (item.getElementsByTagName('enclosure')[0]
+ .attributes['length'].childNodes[0].data)
+ if zooqle_dict["size"] == '0':
+ zooqle_dict["link"] = (item.getElementsByTagName('torrent:magnetURI')[0]
+ .childNodes[0].data)
else:
- zooqle_dict["link"] = item.getElementsByTagName('enclosure')[0].attributes['url'].value
- zooqle_dict["desc_link"] = item.getElementsByTagName('link')[0].childNodes[0].data
- zooqle_dict["leech"] = item.getElementsByTagName('torrent:peers')[0].childNodes[0].data
+ zooqle_dict["link"] = (item.getElementsByTagName('enclosure')[0]
+ .attributes['url'].value)
+ zooqle_dict["desc_link"] = (item.getElementsByTagName('link')[0]
+ .childNodes[0].data)
+ zooqle_dict["leech"] = (item.getElementsByTagName('torrent:peers')[0]
+ .childNodes[0].data)
if not zooqle_dict["leech"].isdigit():
zooqle_dict["leech"] = ''
- zooqle_dict["seeds"] = item.getElementsByTagName('torrent:seeds')[0].childNodes[0].data
+ zooqle_dict["seeds"] = (item.getElementsByTagName('torrent:seeds')[0]
+ .childNodes[0].data)
if not zooqle_dict["seeds"].isdigit():
zooqle_dict["seeds"] = ''
prettyPrinter(zooqle_dict)
- totalResultVal = xmldoc.getElementsByTagName('opensearch:totalResults')[0].childNodes[0].data
- startIndex = xmldoc.getElementsByTagName('opensearch:startIndex')[0].childNodes[0].data
- itemsPerPage = xmldoc.getElementsByTagName('opensearch:itemsPerPage')[0].childNodes[0].data
- if( ( int(startIndex) + int(itemsPerPage) > int( totalResultVal ))):
+ totalResultVal = (xmldoc.getElementsByTagName('opensearch:totalResults')[0]
+ .childNodes[0].data)
+ startIndex = (xmldoc.getElementsByTagName('opensearch:startIndex')[0]
+ .childNodes[0].data)
+ itemsPerPage = (xmldoc.getElementsByTagName('opensearch:itemsPerPage')[0]
+ .childNodes[0].data)
+ if (int(startIndex) + int(itemsPerPage)) > int(totalResultVal):
return
page += 1
return
diff --git a/nova3/engines/btdb.py b/nova3/engines/btdb.py
index a362871..d17003d 100644
--- a/nova3/engines/btdb.py
+++ b/nova3/engines/btdb.py
@@ -1,6 +1,6 @@
-#VERSION: 1.03
-#AUTHORS: Charles Worthing
-#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 1.04
+# AUTHORS: Charles Worthing
+# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -27,10 +27,11 @@
# POSSIBILITY OF SUCH DAMAGE.
from html.parser import HTMLParser
-#qBt
+# qBt
from novaprinter import prettyPrinter
from helpers import download_file, retrieve_url
+
class btdb(object):
""" Search engine class """
url = 'https://btdb.to'
@@ -46,9 +47,9 @@ class btdb(object):
HTMLParser.__init__(self)
self.results = results
self.url = url
- self.current_item = {} # One torrent result
+ self.current_item = {} # One torrent result
self.add_query = True
- self.torrent_info_index = 0 # Count of the meta data encountered
+ self.torrent_info_index = 0 # Count of the meta data encountered
self.torrent_info_array = []
self.meta_data_grabbing = 0
self.meta_data_array = []
@@ -85,15 +86,14 @@ class btdb(object):
if link.startswith("magnet:"):
self.magnet_link = link
- def handle_endtag(self, tag):
+ def handle_endtag(self, tag):
if tag == "script":
return
if tag == "div":
if self.meta_data_grabbing > 0:
-
- self.torrent_no_files = self.meta_data_array[2] # Not used
- self.torrent_date_added = self.meta_data_array[4] # Not used
- self.torrent_popularity = self.meta_data_array[6] # Not used
+ self.torrent_no_files = self.meta_data_array[2] # Not used
+ self.torrent_date_added = self.meta_data_array[4] # Not used
+ self.torrent_popularity = self.meta_data_array[6] # Not used
self.current_item["size"] = self.meta_data_array[0]
self.current_item["name"] = self.torrent_name
diff --git a/nova3/engines/eztv.py b/nova3/engines/eztv.py
index 05a8c8f..b352dde 100644
--- a/nova3/engines/eztv.py
+++ b/nova3/engines/eztv.py
@@ -1,6 +1,6 @@
-#VERSION: 1.00
-#AUTHORS: nindogo
-#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 1.01
+# AUTHORS: nindogo
+# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
try:
# python3
@@ -13,6 +13,7 @@ except ImportError:
from novaprinter import prettyPrinter
from helpers import retrieve_url
+
class eztv(object):
name = "EZTV"
url = 'https://eztv.ag'
@@ -32,26 +33,32 @@ class eztv(object):
def handle_starttag(self, tag, attrs):
params = dict(attrs)
- if (params.get('class') == 'forum_header_border' and params.get('name') == 'hover'):
+ if (params.get('class') == 'forum_header_border'
+ and params.get('name') == 'hover'):
self.in_table_row = True
self.current_item = {}
self.current_item['leech'] = -1
self.current_item['engine_url'] = self.url
- if tag == self.A and self.in_table_row and params.get('class') == 'magnet':
+ if (tag == self.A
+ and self.in_table_row and params.get('class') == 'magnet'):
self.current_item['link'] = params.get('href')
- if tag == self.A and self.in_table_row and params.get('class') == 'epinfo':
+ if (tag == self.A
+ and self.in_table_row and params.get('class') == 'epinfo'):
self.current_item['desc_link'] = self.url + params.get('href')
self.current_item['name'] = params.get('title').split(' (')[0]
- if tag == self.TD and params.get('class') == 'forum_thread_post_end' and params.get('align') == 'center':
+ if (tag == self.TD
+ and params.get('class') == 'forum_thread_post_end'
+ and params.get('align') == 'center'):
prettyPrinter(self.current_item)
self.in_table_row = False
def handle_data(self, data):
data = data.replace(',', '')
- if self.in_table_row and (data.endswith('MB') or data.endswith('GB') or data.endswith('KB')):
+ if (self.in_table_row
+ and (data.endswith('MB') or data.endswith('GB') or data.endswith('KB'))):
self.current_item['size'] = data
if self.in_table_row and (data.isalnum() or data == '-'):
@@ -65,13 +72,14 @@ class eztv(object):
self.in_table_row = False
def search(self, what, cat='all'):
- query = self.url + '/search/' + what.replace('%20','-')
+ query = self.url + '/search/' + what.replace('%20', '-')
eztv_html = retrieve_url(query)
eztv_parser = self.MyHtmlParser(self.url)
eztv_parser.feed(eztv_html)
eztv_parser.close()
+
if __name__ == '__main__':
eztv_se = eztv()
eztv_se.search('Acre', 'all')
diff --git a/nova3/engines/legittorrents.py b/nova3/engines/legittorrents.py
index 16095c7..dae9bf7 100644
--- a/nova3/engines/legittorrents.py
+++ b/nova3/engines/legittorrents.py
@@ -1,6 +1,6 @@
-#VERSION: 2.02
-#AUTHORS: Christophe Dumez (chris@qbittorrent.org)
-# Douman (custparasite@gmx.se)
+#VERSION: 2.03
+# AUTHORS: Christophe Dumez (chris@qbittorrent.org)
+# Douman (custparasite@gmx.se)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -26,16 +26,18 @@
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
-
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from html.parser import HTMLParser
from re import compile as re_compile
+
class legittorrents(object):
url = 'http://www.legittorrents.info'
name = 'Legit Torrents'
- supported_categories = {'all': '0', 'movies': '1', 'tv': '13', 'music': '2', 'games': '3', 'anime': '5', 'books': '6'}
+ supported_categories = {'all': '0', 'movies': '1', 'tv': '13',
+ 'music': '2', 'games': '3', 'anime': '5',
+ 'books': '6'}
def download_torrent(self, info):
print(download_file(info))
@@ -55,13 +57,14 @@ class legittorrents(object):
if tag == "a":
link = params["href"]
if link.startswith("index") and "title" in params:
- #description link
+ # description link
self.current_item["name"] = params["title"][14:]
self.current_item["desc_link"] = "/".join((self.url, link))
elif link.startswith("download"):
self.current_item["link"] = "/".join((self.url, link))
elif tag == "td":
- if "width" in params and params["width"] == "30" and not "leech" in self.current_item:
+ if ("width" in params
+ and params["width"] == "30" and "leech" not in self.current_item):
self.save_item_key = "leech" if "seeds" in self.current_item else "seeds"
elif tag == "tr":
@@ -84,11 +87,12 @@ class legittorrents(object):
def search(self, what, cat='all'):
""" Performs search """
- query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=", self.supported_categories.get(cat, '0'), "&active=1"))
+ query = "".join((self.url, "/index.php?page=torrents&search=", what, "&category=",
+ self.supported_categories.get(cat, '0'), "&active=1"))
- get_table = re_compile('(?s)
(.*)
')
+ get_table = re_compile(r'(?s)
(.*)
')
data = get_table.search(retrieve_url(query)).group(0)
- #extract first ten pages of next results
+ # extract first ten pages of next results
next_pages = re_compile('(?m)')
next_pages = ["".join((self.url, page)) for page in next_pages.findall(data)[:10]]
diff --git a/nova3/engines/piratebay.py b/nova3/engines/piratebay.py
index 3beab98..6b78254 100644
--- a/nova3/engines/piratebay.py
+++ b/nova3/engines/piratebay.py
@@ -1,8 +1,8 @@
-#VERSION: 2.15
-#AUTHORS: Fabien Devaux (fab@gnux.info)
-#CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
-# Arthur (custparasite@gmx.se)
-# Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 2.16
+# AUTHORS: Fabien Devaux (fab@gnux.info)
+# CONTRIBUTORS: Christophe Dumez (chris@qbittorrent.org)
+# Arthur (custparasite@gmx.se)
+# Diego de las Heras (ngosang@hotmail.es)
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
@@ -29,15 +29,17 @@
# POSSIBILITY OF SUCH DAMAGE.
from html.parser import HTMLParser
-#qBt
+# qBt
from novaprinter import prettyPrinter
from helpers import download_file, retrieve_url
+
class piratebay(object):
""" Search engine class """
url = 'https://thepiratebay.org'
name = 'The Pirate Bay'
- supported_categories = {'all': '0', 'music': '100', 'movies': '200', 'games': '400', 'software': '300'}
+ supported_categories = {'all': '0', 'music': '100', 'movies': '200',
+ 'games': '400', 'software': '300'}
def download_torrent(self, info):
""" Downloader """
@@ -51,7 +53,7 @@ class piratebay(object):
self.url = url
self.current_item = None
self.save_item = None
- self.result_table = False #table with results is found
+ self.result_table = False # table with results is found
self.result_tbody = False
self.add_query = True
self.result_query = False
@@ -93,12 +95,14 @@ class piratebay(object):
def handle_starttag(self, tag, attrs):
""" Parser's start tag handler """
if self.current_item:
- dispatcher = getattr(self, "_".join(("handle_start_tag", tag)), self.handle_start_tag_default)
+ dispatcher = getattr(self,
+ "_".join(("handle_start_tag", tag)),
+ self.handle_start_tag_default)
dispatcher(attrs)
elif self.result_tbody:
if tag == "tr":
- self.current_item = {"engine_url" : self.url}
+ self.current_item = {"engine_url": self.url}
elif tag == "table":
self.result_table = "searchResult" == attrs[0][1]
@@ -141,7 +145,8 @@ class piratebay(object):
temp_data = data.split()
if "Size" in temp_data:
indx = temp_data.index("Size")
- self.current_item[self.save_item] = temp_data[indx + 1] + " " + temp_data[indx + 2]
+ self.current_item[self.save_item] = (temp_data[indx + 1] + " "
+ + temp_data[indx + 2])
elif self.save_item == "name":
# names with special characters like '&' are splitted in several pieces
@@ -153,10 +158,9 @@ class piratebay(object):
self.current_item[self.save_item] = data
self.save_item = None
-
def search(self, what, cat='all'):
""" Performs search """
- #prepare query. 7 is filtering by seeders
+ # prepare query. 7 is filtering by seeders
cat = cat.lower()
query = "/".join((self.url, "search", what, "0", "7", self.supported_categories[cat]))
diff --git a/nova3/engines/torlock.py b/nova3/engines/torlock.py
index a1afbf1..6e68bdd 100644
--- a/nova3/engines/torlock.py
+++ b/nova3/engines/torlock.py
@@ -1,23 +1,24 @@
-#VERSION: 2.0
-#AUTHORS: Douman (custparasite@gmx.se)
-#CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
+#VERSION: 2.1
+# AUTHORS: Douman (custparasite@gmx.se)
+# CONTRIBUTORS: Diego de las Heras (ngosang@hotmail.es)
from novaprinter import prettyPrinter
from helpers import retrieve_url, download_file
from re import compile as re_compile
from html.parser import HTMLParser
+
class torlock(object):
url = "https://www.torlock.com"
name = "TorLock"
- supported_categories = {'all' : 'all',
- 'anime' : 'anime',
- 'software' : 'software',
- 'games' : 'game',
- 'movies' : 'movie',
- 'music' : 'music',
- 'tv' : 'television',
- 'books' : 'ebooks'}
+ supported_categories = {'all': 'all',
+ 'anime': 'anime',
+ 'software': 'software',
+ 'games': 'game',
+ 'movies': 'movie',
+ 'music': 'music',
+ 'tv': 'television',
+ 'books': 'ebooks'}
def download_torrent(self, info):
print(download_file(info))
@@ -27,14 +28,14 @@ class torlock(object):
def __init__(self, url):
HTMLParser.__init__(self)
self.url = url
- self.article_found = False #true when with results is found
+ self.article_found = False # true when with results is found
self.item_found = False
- self.item_bad = False #set to True for malicious links
- self.current_item = None #dict for found item
- self.item_name = None #key's name in current_item dict
- self.parser_class = {"ts" : "size",
- "tul" : "seeds",
- "tdl" : "leech"}
+ self.item_bad = False # set to True for malicious links
+ self.current_item = None # dict for found item
+ self.item_name = None # key's name in current_item dict
+ self.parser_class = {"ts": "size",
+ "tul": "seeds",
+ "tdl": "leech"}
def handle_starttag(self, tag, attrs):
params = dict(attrs)
@@ -50,7 +51,8 @@ class torlock(object):
link = params["href"]
if link.startswith("/torrent"):
self.current_item["desc_link"] = "".join((self.url, link))
- self.current_item["link"] = "".join((self.url, "/tor/", link.split('/')[2], ".torrent"))
+ self.current_item["link"] = "".join((self.url, "/tor/",
+ link.split('/')[2], ".torrent"))
self.current_item["engine_url"] = self.url
self.item_found = True
self.item_name = "name"
@@ -81,13 +83,15 @@ class torlock(object):
query = query.replace("%20", "-")
parser = self.MyHtmlParser(self.url)
- page = "".join((self.url, "/", self.supported_categories[cat], "/torrents/", query, ".html?sort=seeds&page=1"))
+ page = "".join((self.url, "/", self.supported_categories[cat],
+ "/torrents/", query, ".html?sort=seeds&page=1"))
html = retrieve_url(page)
parser.feed(html)
counter = 1
- additional_pages = re_compile("/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+".format(self.supported_categories[cat], query))
- list_searches = additional_pages.findall(html)[:-1] #last link is next(i.e. second)
+ additional_pages = re_compile(r"/{0}/torrents/{1}.html\?sort=seeds&page=[0-9]+"
+ .format(self.supported_categories[cat], query))
+ list_searches = additional_pages.findall(html)[:-1] # last link is next(i.e. second)
for page in map(lambda link: "".join((self.url, link)), list_searches):
html = retrieve_url(page)
parser.feed(html)
diff --git a/nova3/engines/versions.txt b/nova3/engines/versions.txt
index 13233ec..934ecd6 100644
--- a/nova3/engines/versions.txt
+++ b/nova3/engines/versions.txt
@@ -1,6 +1,6 @@
-btdb: 1.03
-eztv: 1.00
-legittorrents: 2.02
-piratebay: 2.15
-torlock: 2.0
-zooqle: 1.12
+btdb: 1.04
+eztv: 1.01
+legittorrents: 2.03
+piratebay: 2.16
+torlock: 2.1
+zooqle: 1.13
diff --git a/nova3/engines/zooqle.py b/nova3/engines/zooqle.py
index 6e5f7f9..39a32ad 100644
--- a/nova3/engines/zooqle.py
+++ b/nova3/engines/zooqle.py
@@ -1,6 +1,6 @@
-#VERSION: 1.12
-#AUTHORS: Kanishk Singh (https://github.com/ArionMiles/)
-#CONTRIBUTORS: affaff (https://github.com/affaff)
+#VERSION: 1.13
+# AUTHORS: Kanishk Singh (https://github.com/ArionMiles/)
+# CONTRIBUTORS: affaff (https://github.com/affaff)
# Copyright (c) 2017 Kanishk Singh
@@ -25,11 +25,12 @@
from xml.dom import minidom
from novaprinter import prettyPrinter
-user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
-headers = {'User-Agent': user_agent}
-
from io import StringIO
import gzip
+
+user_agent = 'Mozilla/5.0 (X11; Linux i686; rv:38.0) Gecko/20100101 Firefox/38.0'
+headers = {'User-Agent': user_agent}
+
try:
from urllib2 import urlopen, Request, URLError
except ImportError:
@@ -38,7 +39,7 @@ except ImportError:
def retrieve_url_nodecode(url):
""" Return the content of the url page as a string """
- req = Request(url, headers = headers)
+ req = Request(url, headers=headers)
try:
response = urlopen(req)
except URLError as errno:
@@ -56,50 +57,63 @@ def retrieve_url_nodecode(url):
return dat
return dat
+
class zooqle(object):
""" Search engine class """
url = 'https://zooqle.com'
name = 'Zooqle'
- supported_categories = {'all' : 'all',
- 'movies' : 'Movies',
- 'tv' : 'TV',
- 'music' : 'Music',
- 'games' : 'Games',
- 'anime' : 'Anime',
- 'software' : 'Apps',
- 'books' : 'Books'}
+ supported_categories = {'all': 'all',
+ 'movies': 'Movies',
+ 'tv': 'TV',
+ 'music': 'Music',
+ 'games': 'Games',
+ 'anime': 'Anime',
+ 'software': 'Apps',
+ 'books': 'Books'}
+
def search(self, what, cat="all"):
""" Performs search """
page = 1
while page < 11:
- query = "".join((self.url, "/search?q=", what, "+category%3A", self.supported_categories[cat], "&fmt=rss"))
- if( page>1 ):
- query = query + "&pg=" + str (page)
+ query = "".join((self.url, "/search?q=", what,
+ "+category%3A", self.supported_categories[cat], "&fmt=rss"))
+ if page > 1:
+ query = query + "&pg=" + str(page)
response = retrieve_url_nodecode(query)
xmldoc = minidom.parseString(response)
itemlist = xmldoc.getElementsByTagName('item')
- if( len(itemlist ) ==0):
+ if len(itemlist) == 0:
return
for item in itemlist:
- zooqle_dict = zooqle_dict = {"engine_url" : self.url}
- zooqle_dict['name'] = item.getElementsByTagName('title')[0].childNodes[0].data
- zooqle_dict["size"] = item.getElementsByTagName('enclosure')[0].attributes['length'].childNodes[0].data
- if( zooqle_dict["size"]=='0'):
- zooqle_dict["link"] = item.getElementsByTagName('torrent:magnetURI')[0].childNodes[0].data
+ zooqle_dict = zooqle_dict = {"engine_url": self.url}
+ zooqle_dict['name'] = (item.getElementsByTagName('title')[0]
+ .childNodes[0].data)
+ zooqle_dict["size"] = (item.getElementsByTagName('enclosure')[0]
+ .attributes['length'].childNodes[0].data)
+ if zooqle_dict["size"] == '0':
+ zooqle_dict["link"] = (item.getElementsByTagName('torrent:magnetURI')[0]
+ .childNodes[0].data)
else:
- zooqle_dict["link"] = item.getElementsByTagName('enclosure')[0].attributes['url'].value
- zooqle_dict["desc_link"] = item.getElementsByTagName('link')[0].childNodes[0].data
- zooqle_dict["leech"] = item.getElementsByTagName('torrent:peers')[0].childNodes[0].data
+ zooqle_dict["link"] = (item.getElementsByTagName('enclosure')[0]
+ .attributes['url'].value)
+ zooqle_dict["desc_link"] = (item.getElementsByTagName('link')[0]
+ .childNodes[0].data)
+ zooqle_dict["leech"] = (item.getElementsByTagName('torrent:peers')[0]
+ .childNodes[0].data)
if not zooqle_dict["leech"].isdigit():
zooqle_dict["leech"] = ''
- zooqle_dict["seeds"] = item.getElementsByTagName('torrent:seeds')[0].childNodes[0].data
+ zooqle_dict["seeds"] = (item.getElementsByTagName('torrent:seeds')[0]
+ .childNodes[0].data)
if not zooqle_dict["seeds"].isdigit():
zooqle_dict["seeds"] = ''
prettyPrinter(zooqle_dict)
- totalResultVal = xmldoc.getElementsByTagName('opensearch:totalResults')[0].childNodes[0].data
- startIndex = xmldoc.getElementsByTagName('opensearch:startIndex')[0].childNodes[0].data
- itemsPerPage = xmldoc.getElementsByTagName('opensearch:itemsPerPage')[0].childNodes[0].data
- if( ( int(startIndex) + int(itemsPerPage) > int( totalResultVal ))):
+ totalResultVal = (xmldoc.getElementsByTagName('opensearch:totalResults')[0]
+ .childNodes[0].data)
+ startIndex = (xmldoc.getElementsByTagName('opensearch:startIndex')[0]
+ .childNodes[0].data)
+ itemsPerPage = (xmldoc.getElementsByTagName('opensearch:itemsPerPage')[0]
+ .childNodes[0].data)
+ if (int(startIndex) + int(itemsPerPage)) > int(totalResultVal):
return
page += 1
return