From c2092b9fcea3aa80d144a46f8ac9a5f15afce25f Mon Sep 17 00:00:00 2001 From: pw3t Date: Sun, 29 Dec 2013 16:21:20 +0100 Subject: [PATCH 1/4] [enh] 1st version of filecrop engine, to discover how searx works --- .gitignore | 3 ++ searx/engines/filecrop.py | 74 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 searx/engines/filecrop.py diff --git a/.gitignore b/.gitignore index 4cc20423..76ae1ca2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ env engines.cfg + +*.pyc +*/*.pyc \ No newline at end of file diff --git a/searx/engines/filecrop.py b/searx/engines/filecrop.py new file mode 100644 index 00000000..df0ce417 --- /dev/null +++ b/searx/engines/filecrop.py @@ -0,0 +1,74 @@ +from json import loads +from urllib import urlencode +from searx.utils import html_to_text +from HTMLParser import HTMLParser + +url = 'http://www.filecrop.com/' +search_url = url + '/search.php?w={query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1' + +class FilecropResultParser(HTMLParser): + def __init__(self): + HTMLParser.__init__(self) + self.__start_processing = False + + self.results = [] + self.result = {} + + self.tr_counter = 0 + self.data_counter = 0 + + def handle_starttag(self, tag, attrs): + + if tag == 'tr': + if ('bgcolor', '#edeff5') in attrs or ('bgcolor', '#ffffff') in attrs: + self.__start_processing = True + + if not self.__start_processing: + return + + if tag == 'label': + self.result['title'] = [attr[1] for attr in attrs if attr[0] == 'title'][0] + elif tag == 'a' and ('rel', 'nofollow') in attrs and ('class', 'sourcelink') in attrs: + if 'content' in self.result: + self.result['content'] += [attr[1] for attr in attrs if attr[0] == 'title'][0] + else: + self.result['content'] = [attr[1] for attr in attrs if attr[0] == 'title'][0] + self.result['content'] += ' ' + elif tag == 'a': + self.result['url'] = url + [attr[1] for attr in attrs if attr[0] == 'href'][0] + + def handle_endtag(self, tag): + if self.__start_processing is False: + return + + if tag == 'tr': + self.tr_counter += 1 + + if self.tr_counter == 2: + self.__start_processing = False + self.tr_counter = 0 + self.data_counter = 0 + self.results.append(self.result) + self.result = {} + + def handle_data(self, data): + if not self.__start_processing: + return + print data + + if 'content' in self.result: + self.result['content'] += data + ' ' + else: + self.result['content'] = data + ' ' + + self.data_counter += 1 + +def request(query, params): + params['url'] = search_url.format(query=urlencode({'q': query})) + return params + +def response(resp): + parser = FilecropResultParser() + parser.feed(resp.text) + + return parser.results From b71bddad06140474a35f7792bf395eee5414c45a Mon Sep 17 00:00:00 2001 From: pw3t Date: Sun, 29 Dec 2013 16:21:20 +0100 Subject: [PATCH 2/4] [enh] 1st version of filecrop engine, to discover how searx works --- .gitignore | 3 ++ searx/engines/filecrop.py | 73 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 searx/engines/filecrop.py diff --git a/.gitignore b/.gitignore index 4cc20423..76ae1ca2 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,5 @@ env engines.cfg + +*.pyc +*/*.pyc \ No newline at end of file diff --git a/searx/engines/filecrop.py b/searx/engines/filecrop.py new file mode 100644 index 00000000..b2adff23 --- /dev/null +++ b/searx/engines/filecrop.py @@ -0,0 +1,73 @@ +from json import loads +from urllib import urlencode +from searx.utils import html_to_text +from HTMLParser import HTMLParser + +url = 'http://www.filecrop.com/' +search_url = url + '/search.php?w={query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1' + +class FilecropResultParser(HTMLParser): + def __init__(self): + HTMLParser.__init__(self) + self.__start_processing = False + + self.results = [] + self.result = {} + + self.tr_counter = 0 + self.data_counter = 0 + + def handle_starttag(self, tag, attrs): + + if tag == 'tr': + if ('bgcolor', '#edeff5') in attrs or ('bgcolor', '#ffffff') in attrs: + self.__start_processing = True + + if not self.__start_processing: + return + + if tag == 'label': + self.result['title'] = [attr[1] for attr in attrs if attr[0] == 'title'][0] + elif tag == 'a' and ('rel', 'nofollow') in attrs and ('class', 'sourcelink') in attrs: + if 'content' in self.result: + self.result['content'] += [attr[1] for attr in attrs if attr[0] == 'title'][0] + else: + self.result['content'] = [attr[1] for attr in attrs if attr[0] == 'title'][0] + self.result['content'] += ' ' + elif tag == 'a': + self.result['url'] = url + [attr[1] for attr in attrs if attr[0] == 'href'][0] + + def handle_endtag(self, tag): + if self.__start_processing is False: + return + + if tag == 'tr': + self.tr_counter += 1 + + if self.tr_counter == 2: + self.__start_processing = False + self.tr_counter = 0 + self.data_counter = 0 + self.results.append(self.result) + self.result = {} + + def handle_data(self, data): + if not self.__start_processing: + return + + if 'content' in self.result: + self.result['content'] += data + ' ' + else: + self.result['content'] = data + ' ' + + self.data_counter += 1 + +def request(query, params): + params['url'] = search_url.format(query=urlencode({'q': query})) + return params + +def response(resp): + parser = FilecropResultParser() + parser.feed(resp.text) + + return parser.results From a492ca6dedc477655f5cfcfb67b845779b825348 Mon Sep 17 00:00:00 2001 From: pw3t Date: Sun, 29 Dec 2013 21:39:23 +0100 Subject: [PATCH 3/4] [enh] add support for yacy engine (localhost) --- searx/engines/yacy.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 searx/engines/yacy.py diff --git a/searx/engines/yacy.py b/searx/engines/yacy.py new file mode 100644 index 00000000..e24edde5 --- /dev/null +++ b/searx/engines/yacy.py @@ -0,0 +1,38 @@ +from json import loads +from urllib import urlencode, quote + +url = 'http://localhost:8090' +search_url = '/yacysearch.json?{query}&maximumRecords=10' + +def request(query, params): + params['url'] = url + search_url.format(query=urlencode({'query':query})) + return params + +def response(resp): + raw_search_results = loads(resp.text) + + if not len(raw_search_results): + return [] + + search_results = raw_search_results.get('channels', {})[0].get('items', []) + + results = [] + + for result in search_results: + tmp_result = {} + tmp_result['title'] = result['title'] + tmp_result['url'] = result['link'] + tmp_result['content'] = '' + + if len(result['description']): + tmp_result['content'] += result['description'] +"
" + + if len(result['pubDate']): + tmp_result['content'] += result['pubDate'] + "
" + + if result['size'] != '-1': + tmp_result['content'] += result['sizename'] + + results.append(tmp_result) + + return results From 6f2b8aca5e36196f6370951d915b06356a2549db Mon Sep 17 00:00:00 2001 From: pw3t Date: Sun, 29 Dec 2013 21:43:13 +0100 Subject: [PATCH 4/4] [fix] fix an error in the query --- searx/engines/filecrop.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/searx/engines/filecrop.py b/searx/engines/filecrop.py index b2adff23..a1a933db 100644 --- a/searx/engines/filecrop.py +++ b/searx/engines/filecrop.py @@ -4,7 +4,7 @@ from searx.utils import html_to_text from HTMLParser import HTMLParser url = 'http://www.filecrop.com/' -search_url = url + '/search.php?w={query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1' +search_url = url + '/search.php?{query}&size_i=0&size_f=100000000&engine_r=1&engine_d=1&engine_e=1&engine_4=1&engine_m=1' class FilecropResultParser(HTMLParser): def __init__(self): @@ -63,7 +63,7 @@ class FilecropResultParser(HTMLParser): self.data_counter += 1 def request(query, params): - params['url'] = search_url.format(query=urlencode({'q': query})) + params['url'] = search_url.format(query=urlencode({'w' :query})) return params def response(resp):