Merge branch 'google-images-fix'

This commit is contained in:
Adam Tauber 2020-07-07 22:01:06 +02:00
commit 9f5cd28dba
2 changed files with 377 additions and 373 deletions

View File

@ -1,210 +1,207 @@
# Google (Web) # SPDX-License-Identifier: AGPL-3.0-or-later
# """Google (Web)
# @website https://www.google.com
# @provide-api yes (https://developers.google.com/custom-search/)
#
# @using-api no
# @results HTML
# @stable no (HTML can change)
# @parse url, title, content, suggestion
import re :website: https://www.google.com
:provide-api: yes (https://developers.google.com/custom-search/)
:using-api: not the offical, since it needs registration to another service
:results: HTML
:stable: no
:parse: url, title, content, number_of_results, answer, suggestion, correction
For detailed description of the *REST-full* API see: `Query Parameter
Definitions`_.
.. _Query Parameter Definitions:
https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
"""
# pylint: disable=invalid-name, missing-function-docstring
from lxml import html
from flask_babel import gettext from flask_babel import gettext
from lxml import html, etree from searx.engines.xpath import extract_text
from searx.engines.xpath import extract_text, extract_url
from searx import logger from searx import logger
from searx.url_utils import urlencode, urlparse, parse_qsl from searx.url_utils import urlencode, urlparse
from searx.utils import match_language, eval_xpath from searx.utils import match_language, eval_xpath
logger = logger.getChild('google engine') logger = logger.getChild('google engine')
# engine dependent config # engine dependent config
categories = ['general'] categories = ['general']
paging = True paging = True
language_support = True language_support = True
use_locale_domain = True
time_range_support = True time_range_support = True
safesearch = True
# based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
default_hostname = 'www.google.com'
country_to_hostname = {
'BG': 'www.google.bg', # Bulgaria
'CZ': 'www.google.cz', # Czech Republic
'DE': 'www.google.de', # Germany
'DK': 'www.google.dk', # Denmark
'AT': 'www.google.at', # Austria
'CH': 'www.google.ch', # Switzerland
'GR': 'www.google.gr', # Greece
'AU': 'www.google.com.au', # Australia
'CA': 'www.google.ca', # Canada
'GB': 'www.google.co.uk', # United Kingdom
'ID': 'www.google.co.id', # Indonesia
'IE': 'www.google.ie', # Ireland
'IN': 'www.google.co.in', # India
'MY': 'www.google.com.my', # Malaysia
'NZ': 'www.google.co.nz', # New Zealand
'PH': 'www.google.com.ph', # Philippines
'SG': 'www.google.com.sg', # Singapore
# 'US': 'www.google.us', # United States, redirect to .com
'ZA': 'www.google.co.za', # South Africa
'AR': 'www.google.com.ar', # Argentina
'CL': 'www.google.cl', # Chile
'ES': 'www.google.es', # Spain
'MX': 'www.google.com.mx', # Mexico
'EE': 'www.google.ee', # Estonia
'FI': 'www.google.fi', # Finland
'BE': 'www.google.be', # Belgium
'FR': 'www.google.fr', # France
'IL': 'www.google.co.il', # Israel
'HR': 'www.google.hr', # Croatia
'HU': 'www.google.hu', # Hungary
'IT': 'www.google.it', # Italy
'JP': 'www.google.co.jp', # Japan
'KR': 'www.google.co.kr', # South Korea
'LT': 'www.google.lt', # Lithuania
'LV': 'www.google.lv', # Latvia
'NO': 'www.google.no', # Norway
'NL': 'www.google.nl', # Netherlands
'PL': 'www.google.pl', # Poland
'BR': 'www.google.com.br', # Brazil
'PT': 'www.google.pt', # Portugal
'RO': 'www.google.ro', # Romania
'RU': 'www.google.ru', # Russia
'SK': 'www.google.sk', # Slovakia
'SI': 'www.google.si', # Slovenia
'SE': 'www.google.se', # Sweden
'TH': 'www.google.co.th', # Thailand
'TR': 'www.google.com.tr', # Turkey
'UA': 'www.google.com.ua', # Ukraine
# 'CN': 'www.google.cn', # China, only from China ?
'HK': 'www.google.com.hk', # Hong Kong
'TW': 'www.google.com.tw' # Taiwan
}
# osm
url_map = 'https://www.openstreetmap.org/'\
+ '?lat={latitude}&lon={longitude}&zoom={zoom}&layers=M'
# search-url
search_path = '/search'
search_url = ('https://{hostname}' +
search_path +
'?{query}&start={offset}&gws_rd=cr&gbv=1&lr={lang}&hl={lang_short}&ei=x')
time_range_search = "&tbs=qdr:{range}"
time_range_dict = {'day': 'd',
'week': 'w',
'month': 'm',
'year': 'y'}
# other URLs
map_hostname_start = 'maps.google.'
maps_path = '/maps'
redirect_path = '/url'
images_path = '/images'
supported_languages_url = 'https://www.google.com/preferences?#languages' supported_languages_url = 'https://www.google.com/preferences?#languages'
# based on https://en.wikipedia.org/wiki/List_of_Google_domains and tests
google_domains = {
'BG': 'google.bg', # Bulgaria
'CZ': 'google.cz', # Czech Republic
'DE': 'google.de', # Germany
'DK': 'google.dk', # Denmark
'AT': 'google.at', # Austria
'CH': 'google.ch', # Switzerland
'GR': 'google.gr', # Greece
'AU': 'google.com.au', # Australia
'CA': 'google.ca', # Canada
'GB': 'google.co.uk', # United Kingdom
'ID': 'google.co.id', # Indonesia
'IE': 'google.ie', # Ireland
'IN': 'google.co.in', # India
'MY': 'google.com.my', # Malaysia
'NZ': 'google.co.nz', # New Zealand
'PH': 'google.com.ph', # Philippines
'SG': 'google.com.sg', # Singapore
# 'US': 'google.us', # United States, redirect to .com
'ZA': 'google.co.za', # South Africa
'AR': 'google.com.ar', # Argentina
'CL': 'google.cl', # Chile
'ES': 'google.es', # Spain
'MX': 'google.com.mx', # Mexico
'EE': 'google.ee', # Estonia
'FI': 'google.fi', # Finland
'BE': 'google.be', # Belgium
'FR': 'google.fr', # France
'IL': 'google.co.il', # Israel
'HR': 'google.hr', # Croatia
'HU': 'google.hu', # Hungary
'IT': 'google.it', # Italy
'JP': 'google.co.jp', # Japan
'KR': 'google.co.kr', # South Korea
'LT': 'google.lt', # Lithuania
'LV': 'google.lv', # Latvia
'NO': 'google.no', # Norway
'NL': 'google.nl', # Netherlands
'PL': 'google.pl', # Poland
'BR': 'google.com.br', # Brazil
'PT': 'google.pt', # Portugal
'RO': 'google.ro', # Romania
'RU': 'google.ru', # Russia
'SK': 'google.sk', # Slovakia
'SI': 'google.si', # Slovenia
'SE': 'google.se', # Sweden
'TH': 'google.co.th', # Thailand
'TR': 'google.com.tr', # Turkey
'UA': 'google.com.ua', # Ukraine
# 'CN': 'google.cn', # China, only from China ?
'HK': 'google.com.hk', # Hong Kong
'TW': 'google.com.tw' # Taiwan
}
time_range_dict = {
'day': 'd',
'week': 'w',
'month': 'm',
'year': 'y'
}
# Filter results. 0: None, 1: Moderate, 2: Strict
filter_mapping = {
0 : 'off',
1 : 'medium',
2 : 'high'
}
# specific xpath variables # specific xpath variables
results_xpath = '//div[contains(@class, "ZINbbc")]' # ------------------------
url_xpath = './/div[@class="kCrYT"][1]/a/@href'
title_xpath = './/div[@class="kCrYT"][1]/a/div[1]'
content_xpath = './/div[@class="kCrYT"][2]//div[contains(@class, "BNeawe")]//div[contains(@class, "BNeawe")]'
suggestion_xpath = '//div[contains(@class, "ZINbbc")][last()]//div[@class="rVLSBd"]/a//div[contains(@class, "BNeawe")]'
spelling_suggestion_xpath = '//div[@id="scc"]//a'
# map : detail location # google results are grouped into <div class="g" ../>
map_address_xpath = './/div[@class="s"]//table//td[2]/span/text()' results_xpath = '//div[@class="g"]'
map_phone_xpath = './/div[@class="s"]//table//td[2]/span/span'
map_website_url_xpath = 'h3[2]/a/@href'
map_website_title_xpath = 'h3[2]'
# map : near the location # google *sections* are no usual *results*, we ignore them
map_near = 'table[@class="ts"]//tr' g_section_with_header='./g-section-with-header'
map_near_title = './/h4'
map_near_url = './/h4/a/@href'
map_near_phone = './/span[@class="nobr"]'
# images # the title is a h3 tag relative to the result group
images_xpath = './/div/a' title_xpath = './/h3[1]'
image_url_xpath = './@href'
image_img_src_xpath = './img/@src'
# property names # in the result group there is <div class="r" ../> it's first child is a <a
# FIXME : no translation # href=...> (on some results, the <a> is the first "descendant", not ""child")
property_address = "Address" href_xpath = './/div[@class="r"]//a/@href'
property_phone = "Phone number"
# in the result group there is <div class="s" ../> containing he *content*
content_xpath = './/div[@class="s"]'
# remove google-specific tracking-url # Suggestions are links placed in a *card-section*, we extract only the text
def parse_url(url_string, google_hostname): # from the links not the links itself.
# sanity check suggestion_xpath = '//div[contains(@class, "card-section")]//a'
if url_string is None:
return url_string
# normal case # Since google does *auto-correction* on the first query these are not really
parsed_url = urlparse(url_string) # *spelling suggestions*, we use them anyway.
if (parsed_url.netloc in [google_hostname, ''] spelling_suggestion_xpath = '//div[@class="med"]/p/a'
and parsed_url.path == redirect_path):
query = dict(parse_qsl(parsed_url.query))
return query['q']
else:
return url_string
# returns extract_text on the first result selected by the xpath or None
def extract_text_from_dom(result, xpath): def extract_text_from_dom(result, xpath):
"""returns extract_text on the first result selected by the xpath or None"""
r = eval_xpath(result, xpath) r = eval_xpath(result, xpath)
if len(r) > 0: if len(r) > 0:
return extract_text(r[0]) return extract_text(r[0])
return None return None
def get_lang_country(params, lang_list, custom_aliases):
# do search-request """Returns a tuple with *langauage* on its first and *country* on its second
def request(query, params): position."""
offset = (params['pageno'] - 1) * 10 language = params['language']
if language == 'all':
if params['language'] == 'all' or params['language'] == 'en-US': language = 'en-US'
language = 'en-GB'
else:
language = match_language(params['language'], supported_languages, language_aliases)
language_array = language.split('-') language_array = language.split('-')
if params['language'].find('-') > 0:
country = params['language'].split('-')[1] if len(language_array) == 2:
elif len(language_array) == 2:
country = language_array[1] country = language_array[1]
else: else:
country = 'US' country = language_array[0].upper()
url_lang = 'lang_' + language language = match_language(language, lang_list, custom_aliases)
lang_country = '%s-%s' % (language, country)
if lang_country == 'en-EN':
lang_country = 'en'
if use_locale_domain: return language, country, lang_country
google_hostname = country_to_hostname.get(country.upper(), default_hostname)
else: def request(query, params):
google_hostname = default_hostname """Google search request"""
offset = (params['pageno'] - 1) * 10
language, country, lang_country = get_lang_country(
# pylint: disable=undefined-variable
params, supported_languages, language_aliases
)
subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
# https://www.google.de/search?q=corona&hl=de-DE&lr=lang_de&start=0&tbs=qdr%3Ad&safe=medium
query_url = 'https://'+ subdomain + '/search' + "?" + urlencode({
'q': query,
'hl': lang_country,
'lr': "lang_" + language,
'ie': "utf8",
'oe': "utf8",
'start': offset,
})
# original format: ID=3e2b6616cee08557:TM=5556667580:C=r:IP=4.1.12.5-:S=23ASdf0soFgF2d34dfgf-_22JJOmHdfgg
params['cookies']['GOOGLE_ABUSE_EXEMPTION'] = 'x'
params['url'] = search_url.format(offset=offset,
query=urlencode({'q': query}),
hostname=google_hostname,
lang=url_lang,
lang_short=language)
if params['time_range'] in time_range_dict: if params['time_range'] in time_range_dict:
params['url'] += time_range_search.format(range=time_range_dict[params['time_range']]) query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
if params['safesearch']:
query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
params['headers']['Accept-Language'] = language + ',' + language + '-' + country params['url'] = query_url
params['headers']['Accept'] = 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8' logger.debug("query_url --> %s", query_url)
params['google_hostname'] = google_hostname # en-US,en;q=0.8,en;q=0.5
params['headers']['Accept-Language'] = (
lang_country + ',' + language + ';q=0.8,' + language + ';q=0.5'
)
logger.debug("HTTP header Accept-Language --> %s",
params['headers']['Accept-Language'])
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
)
#params['google_subdomain'] = subdomain
return params return params
# get response from search-request
def response(resp): def response(resp):
"""Get response from google's search request"""
results = [] results = []
# detect google sorry # detect google sorry
@ -215,68 +212,53 @@ def response(resp):
if resp_url.path.startswith('/sorry'): if resp_url.path.startswith('/sorry'):
raise RuntimeWarning(gettext('CAPTCHA required')) raise RuntimeWarning(gettext('CAPTCHA required'))
# which hostname ? # which subdomain ?
google_hostname = resp.search_params.get('google_hostname') # subdomain = resp.search_params.get('google_subdomain')
google_url = "https://" + google_hostname
# convert the text to dom # convert the text to dom
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
instant_answer = eval_xpath(dom, '//div[@id="_vBb"]//text()') # results --> answer
if instant_answer: answer = eval_xpath(dom, '//div[contains(@class, "LGOjhe")]//text()')
results.append({'answer': u' '.join(instant_answer)}) if answer:
results.append({'answer': ' '.join(answer)})
else:
logger.debug("did not found 'answer'")
# results --> number_of_results
try: try:
results_num = int(eval_xpath(dom, '//div[@id="resultStats"]//text()')[0] _txt = eval_xpath(dom, '//div[@id="result-stats"]//text()')[0]
.split()[1].replace(',', '')) _digit = ''.join([n for n in _txt if n.isdigit()])
results.append({'number_of_results': results_num}) number_of_results = int(_digit)
except: results.append({'number_of_results': number_of_results})
pass
except Exception as e: # pylint: disable=broad-except
logger.debug("did not 'number_of_results'")
logger.error(e, exc_info=True)
# parse results # parse results
for result in eval_xpath(dom, results_xpath): for result in eval_xpath(dom, results_xpath):
# google *sections*
if extract_text(eval_xpath(result, g_section_with_header)):
logger.debug("ingoring <g-section-with-header>")
continue
try: try:
title = extract_text(eval_xpath(result, title_xpath)[0]) title = extract_text(eval_xpath(result, title_xpath)[0])
url = parse_url(extract_url(eval_xpath(result, url_xpath), google_url), google_hostname) url = eval_xpath(result, href_xpath)[0]
parsed_url = urlparse(url, google_hostname) content = extract_text_from_dom(result, content_xpath)
results.append({
# map result 'url': url,
if parsed_url.netloc == google_hostname: 'title': title,
# TODO fix inside links 'content': content
continue })
# if parsed_url.path.startswith(maps_path) or parsed_url.netloc.startswith(map_hostname_start): except Exception as e: # pylint: disable=broad-except
# print "yooooo"*30 logger.error(e, exc_info=True)
# x = eval_xpath(result, map_near) #from lxml import etree
# if len(x) > 0: #logger.debug(etree.tostring(result, pretty_print=True))
# # map : near the location #import pdb
# results = results + parse_map_near(parsed_url, x, google_hostname) #pdb.set_trace()
# else:
# # map : detail about a location
# results = results + parse_map_detail(parsed_url, result, google_hostname)
# # google news
# elif parsed_url.path == search_path:
# # skipping news results
# pass
# # images result
# elif parsed_url.path == images_path:
# # only thumbnail image provided,
# # so skipping image results
# # results = results + parse_images(result, google_hostname)
# pass
else:
# normal result
content = extract_text_from_dom(result, content_xpath)
if content is None:
continue
# append result
results.append({'url': url,
'title': title,
'content': content
})
except:
logger.debug('result parse error in:\n%s', etree.tostring(result, pretty_print=True))
continue continue
# parse suggestion # parse suggestion
@ -290,102 +272,16 @@ def response(resp):
# return results # return results
return results return results
def parse_images(result, google_hostname):
results = []
for image in eval_xpath(result, images_xpath):
url = parse_url(extract_text(eval_xpath(image, image_url_xpath)[0]), google_hostname)
img_src = extract_text(eval_xpath(image, image_img_src_xpath)[0])
# append result
results.append({'url': url,
'title': '',
'content': '',
'img_src': img_src,
'template': 'images.html'
})
return results
def parse_map_near(parsed_url, x, google_hostname):
results = []
for result in x:
title = extract_text_from_dom(result, map_near_title)
url = parse_url(extract_text_from_dom(result, map_near_url), google_hostname)
attributes = []
phone = extract_text_from_dom(result, map_near_phone)
add_attributes(attributes, property_phone, phone, 'tel:' + phone)
results.append({'title': title,
'url': url,
'content': attributes_to_html(attributes)
})
return results
def parse_map_detail(parsed_url, result, google_hostname):
results = []
# try to parse the geoloc
m = re.search(r'@([0-9\.]+),([0-9\.]+),([0-9]+)', parsed_url.path)
if m is None:
m = re.search(r'll\=([0-9\.]+),([0-9\.]+)\&z\=([0-9]+)', parsed_url.query)
if m is not None:
# geoloc found (ignored)
lon = float(m.group(2)) # noqa
lat = float(m.group(1)) # noqa
zoom = int(m.group(3)) # noqa
# attributes
attributes = []
address = extract_text_from_dom(result, map_address_xpath)
phone = extract_text_from_dom(result, map_phone_xpath)
add_attributes(attributes, property_address, address, 'geo:' + str(lat) + ',' + str(lon))
add_attributes(attributes, property_phone, phone, 'tel:' + phone)
# title / content / url
website_title = extract_text_from_dom(result, map_website_title_xpath)
content = extract_text_from_dom(result, content_xpath)
website_url = parse_url(extract_text_from_dom(result, map_website_url_xpath), google_hostname)
# add a result if there is a website
if website_url is not None:
results.append({'title': website_title,
'content': (content + '<br />' if content is not None else '')
+ attributes_to_html(attributes),
'url': website_url
})
return results
def add_attributes(attributes, name, value, url):
if value is not None and len(value) > 0:
attributes.append({'label': name, 'value': value, 'url': url})
def attributes_to_html(attributes):
retval = '<table class="table table-striped">'
for a in attributes:
value = a.get('value')
if 'url' in a:
value = '<a href="' + a.get('url') + '">' + value + '</a>'
retval = retval + '<tr><th>' + a.get('label') + '</th><td>' + value + '</td></tr>'
retval = retval + '</table>'
return retval
# get supported languages from their site # get supported languages from their site
def _fetch_supported_languages(resp): def _fetch_supported_languages(resp):
supported_languages = {} ret_val = {}
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
options = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lr"]')
for option in options:
code = eval_xpath(option, './@value')[0].split('_')[-1]
name = eval_xpath(option, './@data-name')[0].title()
supported_languages[code] = {"name": name}
return supported_languages radio_buttons = eval_xpath(dom, '//*[@id="langSec"]//input[@name="lang"]')
for x in radio_buttons:
name = x.get("data-name")
code = x.get("value")
ret_val[code] = {"name": name}
return ret_val

View File

@ -1,97 +1,205 @@
""" # SPDX-License-Identifier: AGPL-3.0-or-later
Google (Images) """Google (Images)
@website https://www.google.com :website: https://images.google.com (redirected to subdomain www.)
@provide-api yes (https://developers.google.com/custom-search/) :provide-api: yes (https://developers.google.com/custom-search/)
:using-api: not the offical, since it needs registration to another service
:results: HTML
:stable: no
:template: images.html
:parse: url, title, content, source, thumbnail_src, img_src
For detailed description of the *REST-full* API see: `Query Parameter
Definitions`_.
.. _admonition:: Content-Security-Policy (CSP)
This engine needs to allow images from the `data URLs`_ (prefixed with the
``data:` scheme).::
Header set Content-Security-Policy "img-src 'self' data: ;"
.. _Query Parameter Definitions:
https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
@using-api no
@results HTML chunks with JSON inside
@stable no
@parse url, title, img_src
""" """
from datetime import date, timedelta
from json import loads
from lxml import html from lxml import html
from searx.url_utils import urlencode from flask_babel import gettext
from searx import logger
from searx.url_utils import urlencode, urlparse
from searx.utils import eval_xpath
from searx.engines.xpath import extract_text
# pylint: disable=unused-import
from searx.engines.google import (
supported_languages_url
, _fetch_supported_languages
)
# pylint: enable=unused-import
from searx.engines.google import (
get_lang_country
, google_domains
, time_range_dict
)
logger = logger.getChild('google images')
# engine dependent config # engine dependent config
categories = ['images'] categories = ['images']
paging = True paging = False
safesearch = True language_support = True
use_locale_domain = True
time_range_support = True time_range_support = True
number_of_results = 100 safesearch = True
search_url = 'https://www.google.com/search'\ filter_mapping = {
'?{query}'\ 0 : 'images',
'&tbm=isch'\ 1 : 'active',
'&yv=2'\ 2 : 'active'
'&{search_options}' }
time_range_attr = "qdr:{range}"
time_range_custom_attr = "cdr:1,cd_min:{start},cd_max{end}"
time_range_dict = {'day': 'd',
'week': 'w',
'month': 'm'}
def scrap_out_thumbs(dom):
"""Scrap out thumbnail data from <script> tags.
"""
ret_val = dict()
for script in eval_xpath(dom, '//script[contains(., "_setImgSrc(")]'):
_script = script.text
# _setImgSrc('0','data:image\/jpeg;base64,\/9j\/4AAQSkZJR ....');
_thumb_no, _img_data = _script[len("_setImgSrc("):-2].split(",",1)
_thumb_no = _thumb_no.replace("'","")
_img_data = _img_data.replace("'","")
_img_data = _img_data.replace(r"\/", r"/")
ret_val[_thumb_no] = _img_data.replace(r"\x3d", "=")
return ret_val
# do search-request
def request(query, params): def request(query, params):
search_options = { """Google-Video search request"""
'ijn': params['pageno'] - 1,
'start': (params['pageno'] - 1) * number_of_results language, country, lang_country = get_lang_country(
} # pylint: disable=undefined-variable
params, supported_languages, language_aliases
)
subdomain = 'www.' + google_domains.get(country.upper(), 'google.com')
query_url = 'https://'+ subdomain + '/search' + "?" + urlencode({
'q': query,
'tbm': "isch",
'hl': lang_country,
'lr': "lang_" + language,
'ie': "utf8",
'oe': "utf8",
'num': 30,
})
if params['time_range'] in time_range_dict: if params['time_range'] in time_range_dict:
search_options['tbs'] = time_range_attr.format(range=time_range_dict[params['time_range']]) query_url += '&' + urlencode({'tbs': 'qdr:' + time_range_dict[params['time_range']]})
elif params['time_range'] == 'year': if params['safesearch']:
now = date.today() query_url += '&' + urlencode({'safe': filter_mapping[params['safesearch']]})
then = now - timedelta(days=365)
start = then.strftime('%m/%d/%Y')
end = now.strftime('%m/%d/%Y')
search_options['tbs'] = time_range_custom_attr.format(start=start, end=end)
if safesearch and params['safesearch']: params['url'] = query_url
search_options['safe'] = 'on' logger.debug("query_url --> %s", query_url)
params['url'] = search_url.format(query=urlencode({'q': query}),
search_options=urlencode(search_options))
params['headers']['Accept-Language'] = (
"%s,%s;q=0.8,%s;q=0.5" % (lang_country, language, language))
logger.debug(
"HTTP Accept-Language --> %s", params['headers']['Accept-Language'])
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
)
#params['google_subdomain'] = subdomain
return params return params
# get response from search-request
def response(resp): def response(resp):
"""Get response from google's search request"""
results = [] results = []
# detect google sorry
resp_url = urlparse(resp.url)
if resp_url.netloc == 'sorry.google.com' or resp_url.path == '/sorry/IndexRedirect':
raise RuntimeWarning('sorry.google.com')
if resp_url.path.startswith('/sorry'):
raise RuntimeWarning(gettext('CAPTCHA required'))
# which subdomain ?
# subdomain = resp.search_params.get('google_subdomain')
# convert the text to dom
dom = html.fromstring(resp.text) dom = html.fromstring(resp.text)
img_bas64_map = scrap_out_thumbs(dom)
# parse results # parse results
for result in dom.xpath('//div[contains(@class, "rg_meta")]/text()'): #
# root element::
# <div id="islmp" ..>
# result div per image::
# <div jsmodel="tTXmib"> / <div jsaction="..." data-id="..."
# The data-id matches to a item in a json-data structure in::
# <script nonce="I+vqelcy/01CKiBJi5Z1Ow">AF_initDataCallback({key: 'ds:1', ... data:function(){return [ ...
# In this structure the ling to the origin PNG, JPG or whatever is given
# (we do not blow out the link there, you could still implement that)
# first link per image-div contains a <img> with the data-iid for bas64 encoded image data::
# <img class="rg_i Q4LuWd" data-iid="0"
# second link per image-div is the target link::
# <a class="VFACy kGQAp" href="https://en.wikipedia.org/wiki/The_Sacrament_of_the_Last_Supper">
# the second link also contains two div tags with the *description* and *publisher*::
# <div class="WGvvNb">The Sacrament of the Last Supper ...</div>
# <div class="fxgdke">en.wikipedia.org</div>
root = eval_xpath(dom, '//div[@id="islmp"]')
if not root:
logger.error("did not find root element id='islmp'")
return results
root = root[0]
for img_node in eval_xpath(root, './/img[contains(@class, "rg_i")]'):
try: try:
metadata = loads(result) img_alt = eval_xpath(img_node, '@alt')[0]
img_format = metadata.get('ity', '') img_base64_id = eval_xpath(img_node, '@data-iid')
img_width = metadata.get('ow', '') if img_base64_id:
img_height = metadata.get('oh', '') img_base64_id = img_base64_id[0]
if img_width and img_height: thumbnail_src = img_bas64_map[img_base64_id]
img_format += " {0}x{1}".format(img_width, img_height) else:
thumbnail_src = eval_xpath(img_node, '@src')
if not thumbnail_src:
thumbnail_src = eval_xpath(img_node, '@data-src')
if thumbnail_src:
thumbnail_src = thumbnail_src[0]
else:
thumbnail_src = ''
source = metadata.get('st', '') link_node = eval_xpath(img_node, '../../../a[2]')[0]
source_url = metadata.get('isu', '') url = eval_xpath(link_node, '@href')[0]
if source_url:
source += " ({0})".format(source_url)
results.append({'url': metadata['ru'], pub_nodes = eval_xpath(link_node, './div/div')
'title': metadata['pt'], pub_descr = img_alt
'content': metadata.get('s', ''), pub_source = ''
'source': source, if pub_nodes:
'img_format': img_format, pub_descr = extract_text(pub_nodes[0])
'thumbnail_src': metadata['tu'], pub_source = extract_text(pub_nodes[1])
'img_src': metadata['ou'],
'template': 'images.html'})
except: results.append({
'url': url,
'title': img_alt,
'content': pub_descr,
'source': pub_source,
'img_src': url,
# 'img_format': img_format,
'thumbnail_src': thumbnail_src,
'template': 'images.html'
})
except Exception as e: # pylint: disable=broad-except
logger.error(e, exc_info=True)
#from lxml import etree
#logger.debug(etree.tostring(img_node, pretty_print=True))
#import pdb
#pdb.set_trace()
continue continue
return results return results