searx/searx/engines/nyaa.py

113 lines
3.2 KiB
Python
Raw Normal View History

2016-03-24 19:24:37 +01:00
"""
2017-08-31 21:32:30 +02:00
Nyaa.si (Anime Bittorrent tracker)
2016-03-24 19:24:37 +01:00
2017-08-31 21:32:30 +02:00
@website http://www.nyaa.si/
2016-03-24 19:24:37 +01:00
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, content, seed, leech, torrentfile
"""
from lxml import html
from searx.engines.xpath import extract_text
2016-11-30 18:43:03 +01:00
from searx.url_utils import urlencode
2017-08-31 21:32:30 +02:00
from searx.utils import get_torrent_size
2016-03-24 19:24:37 +01:00
# engine dependent config
categories = ['files', 'images', 'videos', 'music']
paging = True
# search-url
2017-08-31 21:32:30 +02:00
base_url = 'http://www.nyaa.si/'
2016-03-24 19:24:37 +01:00
search_url = base_url + '?page=search&{query}&offset={offset}'
# xpath queries
2017-08-31 21:32:30 +02:00
xpath_results = '//table[contains(@class, "torrent-list")]//tr[not(th)]'
xpath_category = './/td[1]/a[1]'
xpath_title = './/td[2]/a[last()]'
xpath_torrent_links = './/td[3]/a'
xpath_filesize = './/td[4]/text()'
xpath_seeds = './/td[6]/text()'
xpath_leeches = './/td[7]/text()'
xpath_downloads = './/td[8]/text()'
2016-03-24 19:24:37 +01:00
2016-03-27 00:23:17 +01:00
2016-03-24 19:24:37 +01:00
# do search-request
def request(query, params):
query = urlencode({'term': query})
params['url'] = search_url.format(query=query, offset=params['pageno'])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath(xpath_results):
2017-08-31 21:32:30 +02:00
# defaults
filesize = 0
seed = 0
leech = 0
downloads = 0
magnet_link = ""
torrent_link = ""
2016-03-24 19:24:37 +01:00
# category in which our torrent belongs
category = result.xpath(xpath_category)[0].attrib.get('title')
# torrent title
page_a = result.xpath(xpath_title)[0]
title = extract_text(page_a)
2016-03-24 19:24:37 +01:00
# link to the page
2017-08-31 21:32:30 +02:00
href = base_url + page_a.attrib.get('href')
for link in result.xpath(xpath_torrent_links):
url = link.attrib.get('href')
if 'magnet' in url:
# link to the magnet
magnet_link = url
else:
# link to the torrent file
torrent_link = url
# get seeders and leechers
2016-03-24 19:24:37 +01:00
try:
2017-08-31 21:32:30 +02:00
seed = int(result.xpath(xpath_seeds)[0])
leech = int(result.xpath(xpath_leeches)[0])
except:
2017-08-31 21:32:30 +02:00
pass
2016-03-24 19:24:37 +01:00
2017-08-31 21:32:30 +02:00
# let's try to calculate the torrent size
try:
filesize_info = result.xpath(xpath_filesize)[0]
filesize, filesize_multiplier = filesize_info.split()
filesize = get_torrent_size(filesize, filesize_multiplier)
except:
pass
2016-03-24 19:24:37 +01:00
# torrent downloads count
2017-08-31 21:32:30 +02:00
try:
downloads = result.xpath(xpath_downloads)[0]
except:
pass
2016-03-24 19:24:37 +01:00
# content string contains all information not included into template
content = 'Category: "{category}". Downloaded {downloads} times.'
content = content.format(category=category, downloads=downloads)
results.append({'url': href,
'title': title,
'content': content,
'seed': seed,
'leech': leech,
2017-08-31 21:32:30 +02:00
'filesize': filesize,
2016-03-24 19:24:37 +01:00
'torrentfile': torrent_link,
2017-08-31 21:32:30 +02:00
'magnetlink': magnet_link,
2016-03-24 19:24:37 +01:00
'template': 'torrent.html'})
return results