searx/searx/engines/google_news.py

201 lines
6.3 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""Google (News)
For detailed description of the *REST-full* API see: `Query Parameter
Definitions`_. Not all parameters can be applied:
- num_ : the number of search results is ignored
- save_ : is ignored / Google-News results are always *SafeSearch*
.. _Query Parameter Definitions:
https://developers.google.com/custom-search/docs/xml_results#WebSearch_Query_Parameter_Definitions
.. _num: https://developers.google.com/custom-search/docs/xml_results#numsp
.. _save: https://developers.google.com/custom-search/docs/xml_results#safesp
"""
2014-03-04 13:11:04 +01:00
# pylint: disable=invalid-name, missing-function-docstring
import binascii
import re
from urllib.parse import urlencode
from base64 import b64decode
from random import random
from lxml import html
from searx import logger
from searx.utils import (
eval_xpath,
eval_xpath_list,
eval_xpath_getindex,
extract_text,
)
# pylint: disable=unused-import
from searx.engines.google import (
supported_languages_url,
_fetch_supported_languages,
)
# pylint: enable=unused-import
from searx.engines.google import (
get_lang_info,
detect_google_sorry,
)
2014-03-04 13:11:04 +01:00
# about
about = {
"website": 'https://news.google.com',
"wikidata_id": 'Q12020',
"official_api_documentation": 'https://developers.google.com/custom-search',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
logger = logger.getChild('google news')
# compared to other google engines google-news has a different time range
# support. The time range is included in the search term.
time_range_dict = {
'day': 'when:1d',
'week': 'when:7d',
'month': 'when:1m',
'year': 'when:1y',
}
# engine dependent config
2014-03-04 13:11:04 +01:00
categories = ['news']
paging = False
use_locale_domain = True
time_range_support = True
# Google-News results are always *SafeSearch*. Option 'safesearch' is set to
# False here, otherwise checker will report safesearch-errors::
#
# safesearch : results are identitical for safesearch=0 and safesearch=2
safesearch = False
2014-03-04 13:11:04 +01:00
def request(query, params):
"""Google-News search request"""
lang_info = get_lang_info(
# pylint: disable=undefined-variable
params, supported_languages, language_aliases, False
)
# google news has only one domain
lang_info['subdomain'] = 'news.google.com'
ceid = "%s:%s" % (lang_info['country'], lang_info['language'])
# google news redirects en to en-US
if lang_info['params']['hl'] == 'en':
lang_info['params']['hl'] = 'en-US'
# Very special to google-news compared to other google engines, the time
# range is included in the search term.
if params['time_range']:
query += ' ' + time_range_dict[params['time_range']]
query_url = 'https://' + lang_info['subdomain'] + '/search' + "?" + urlencode({
'q': query,
**lang_info['params'],
'ie': "utf8",
'oe': "utf8",
pick engine fixes (#3306) * [fix] google engine: results XPath * [fix] google & youtube - set EU consent cookie This change the previous bypass method for Google consent using ``ucbcb=1`` (6face215b8) to accept the consent using ``CONSENT=YES+``. The youtube_noapi and google have a similar API, at least for the consent[1]. Get CONSENT cookie from google reguest:: curl -i "https://www.google.com/search?q=time&tbm=isch" \ -A "Mozilla/5.0 (X11; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0" \ | grep -i consent ... location: https://consent.google.com/m?continue=https://www.google.com/search?q%3Dtime%26tbm%3Disch&gl=DE&m=0&pc=irp&uxe=eomtm&hl=en-US&src=1 set-cookie: CONSENT=PENDING+936; expires=Wed, 24-Jul-2024 11:26:20 GMT; path=/; domain=.google.com; Secure ... PENDING & YES [2]: Google change the way for consent about YouTube cookies agreement in EU countries. Instead of showing a popup in the website, YouTube redirects the user to a new webpage at consent.youtube.com domain ... Fix for this is to put a cookie CONSENT with YES+ value for every YouTube request [1] https://github.com/iv-org/invidious/pull/2207 [2] https://github.com/TeamNewPipe/NewPipeExtractor/issues/592 Closes: https://github.com/searxng/searxng/issues/1432 * [fix] sjp engine - convert enginename to a latin1 compliance name The engine name is not only a *name* its also a identifier that is used in logs, HTTP headers and more. Unicode characters in the name of an engine could cause various issues. Closes: https://github.com/searxng/searxng/issues/1544 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * [fix] engine tineye: handle 422 response of not supported img format Closes: https://github.com/searxng/searxng/issues/1449 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * bypass google consent with ucbcb=1 * [mod] Adds Lingva translate engine Add the lingva engine (which grabs data from google translate). Results from Lingva are added to the infobox results. * openstreetmap engine: return the localized named. For example: display "Tokyo" instead of "東京都" when the language is English. * [fix] engines/openstreetmap.py typo: user_langage --> user_language Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * Wikidata engine: ignore dummy entities * Wikidata engine: minor change of the SPARQL request The engine can be slow especially when the query won't return any answer. See https://www.mediawiki.org/wiki/Wikidata_Query_Service/User_Manual/MWAPI#Find_articles_in_Wikipedia_speaking_about_cheese_and_see_which_Wikibase_items_they_correspond_to Co-authored-by: Léon Tiekötter <leon@tiekoetter.com> Co-authored-by: Emilien Devos <contact@emiliendevos.be> Co-authored-by: Markus Heiser <markus.heiser@darmarit.de> Co-authored-by: Emilien Devos <github@emiliendevos.be> Co-authored-by: ta <alt3753.7@gmail.com> Co-authored-by: Alexandre Flament <alex@al-f.net>
2022-07-30 21:45:07 +02:00
'ucbcb': 1,
'gl': lang_info['country'],
}) + ('&ceid=%s' % ceid) # ceid includes a ':' character which must not be urlencoded
logger.debug("query_url --> %s", query_url)
params['url'] = query_url
logger.debug("HTTP header Accept-Language --> %s", lang_info.get('Accept-Language'))
pick engine fixes (#3306) * [fix] google engine: results XPath * [fix] google & youtube - set EU consent cookie This change the previous bypass method for Google consent using ``ucbcb=1`` (6face215b8) to accept the consent using ``CONSENT=YES+``. The youtube_noapi and google have a similar API, at least for the consent[1]. Get CONSENT cookie from google reguest:: curl -i "https://www.google.com/search?q=time&tbm=isch" \ -A "Mozilla/5.0 (X11; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0" \ | grep -i consent ... location: https://consent.google.com/m?continue=https://www.google.com/search?q%3Dtime%26tbm%3Disch&gl=DE&m=0&pc=irp&uxe=eomtm&hl=en-US&src=1 set-cookie: CONSENT=PENDING+936; expires=Wed, 24-Jul-2024 11:26:20 GMT; path=/; domain=.google.com; Secure ... PENDING & YES [2]: Google change the way for consent about YouTube cookies agreement in EU countries. Instead of showing a popup in the website, YouTube redirects the user to a new webpage at consent.youtube.com domain ... Fix for this is to put a cookie CONSENT with YES+ value for every YouTube request [1] https://github.com/iv-org/invidious/pull/2207 [2] https://github.com/TeamNewPipe/NewPipeExtractor/issues/592 Closes: https://github.com/searxng/searxng/issues/1432 * [fix] sjp engine - convert enginename to a latin1 compliance name The engine name is not only a *name* its also a identifier that is used in logs, HTTP headers and more. Unicode characters in the name of an engine could cause various issues. Closes: https://github.com/searxng/searxng/issues/1544 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * [fix] engine tineye: handle 422 response of not supported img format Closes: https://github.com/searxng/searxng/issues/1449 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * bypass google consent with ucbcb=1 * [mod] Adds Lingva translate engine Add the lingva engine (which grabs data from google translate). Results from Lingva are added to the infobox results. * openstreetmap engine: return the localized named. For example: display "Tokyo" instead of "東京都" when the language is English. * [fix] engines/openstreetmap.py typo: user_langage --> user_language Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * Wikidata engine: ignore dummy entities * Wikidata engine: minor change of the SPARQL request The engine can be slow especially when the query won't return any answer. See https://www.mediawiki.org/wiki/Wikidata_Query_Service/User_Manual/MWAPI#Find_articles_in_Wikipedia_speaking_about_cheese_and_see_which_Wikibase_items_they_correspond_to Co-authored-by: Léon Tiekötter <leon@tiekoetter.com> Co-authored-by: Emilien Devos <contact@emiliendevos.be> Co-authored-by: Markus Heiser <markus.heiser@darmarit.de> Co-authored-by: Emilien Devos <github@emiliendevos.be> Co-authored-by: ta <alt3753.7@gmail.com> Co-authored-by: Alexandre Flament <alex@al-f.net>
2022-07-30 21:45:07 +02:00
params['cookies']['CONSENT'] = "PENDING+" + str(random()*100)
params['headers'].update(lang_info['headers'])
params['headers']['Accept'] = (
'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8'
pick engine fixes (#3306) * [fix] google engine: results XPath * [fix] google & youtube - set EU consent cookie This change the previous bypass method for Google consent using ``ucbcb=1`` (6face215b8) to accept the consent using ``CONSENT=YES+``. The youtube_noapi and google have a similar API, at least for the consent[1]. Get CONSENT cookie from google reguest:: curl -i "https://www.google.com/search?q=time&tbm=isch" \ -A "Mozilla/5.0 (X11; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0" \ | grep -i consent ... location: https://consent.google.com/m?continue=https://www.google.com/search?q%3Dtime%26tbm%3Disch&gl=DE&m=0&pc=irp&uxe=eomtm&hl=en-US&src=1 set-cookie: CONSENT=PENDING+936; expires=Wed, 24-Jul-2024 11:26:20 GMT; path=/; domain=.google.com; Secure ... PENDING & YES [2]: Google change the way for consent about YouTube cookies agreement in EU countries. Instead of showing a popup in the website, YouTube redirects the user to a new webpage at consent.youtube.com domain ... Fix for this is to put a cookie CONSENT with YES+ value for every YouTube request [1] https://github.com/iv-org/invidious/pull/2207 [2] https://github.com/TeamNewPipe/NewPipeExtractor/issues/592 Closes: https://github.com/searxng/searxng/issues/1432 * [fix] sjp engine - convert enginename to a latin1 compliance name The engine name is not only a *name* its also a identifier that is used in logs, HTTP headers and more. Unicode characters in the name of an engine could cause various issues. Closes: https://github.com/searxng/searxng/issues/1544 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * [fix] engine tineye: handle 422 response of not supported img format Closes: https://github.com/searxng/searxng/issues/1449 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * bypass google consent with ucbcb=1 * [mod] Adds Lingva translate engine Add the lingva engine (which grabs data from google translate). Results from Lingva are added to the infobox results. * openstreetmap engine: return the localized named. For example: display "Tokyo" instead of "東京都" when the language is English. * [fix] engines/openstreetmap.py typo: user_langage --> user_language Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * Wikidata engine: ignore dummy entities * Wikidata engine: minor change of the SPARQL request The engine can be slow especially when the query won't return any answer. See https://www.mediawiki.org/wiki/Wikidata_Query_Service/User_Manual/MWAPI#Find_articles_in_Wikipedia_speaking_about_cheese_and_see_which_Wikibase_items_they_correspond_to Co-authored-by: Léon Tiekötter <leon@tiekoetter.com> Co-authored-by: Emilien Devos <contact@emiliendevos.be> Co-authored-by: Markus Heiser <markus.heiser@darmarit.de> Co-authored-by: Emilien Devos <github@emiliendevos.be> Co-authored-by: ta <alt3753.7@gmail.com> Co-authored-by: Alexandre Flament <alex@al-f.net>
2022-07-30 21:45:07 +02:00
)
2014-03-04 13:11:04 +01:00
return params
def response(resp):
"""Get response from google's search request"""
2014-03-04 13:11:04 +01:00
results = []
2014-09-01 15:10:05 +02:00
detect_google_sorry(resp)
# convert the text to dom
dom = html.fromstring(resp.text)
2014-03-04 13:11:04 +01:00
for result in eval_xpath_list(dom, '//div[@class="xrnccd"]'):
# The first <a> tag in the <article> contains the link to the
# article The href attribute of the <a> is a google internal link,
# we can't use. The real link is hidden in the jslog attribute:
#
# <a ...
# jslog="95014; 4:https://www.cnn.com/.../index.html; track:click"
# href="./articles/CAIiENu3nGS...?hl=en-US&amp;gl=US&amp;ceid=US%3Aen"
# ... />
jslog = eval_xpath_getindex(result, './article/a/@jslog', 0)
url = re.findall('http[^;]*', jslog)
if url:
url = url[0]
else:
# The real URL is base64 encoded in the json attribute:
# jslog="95014; 5:W251bGwsbnVsbCxudW...giXQ==; track:click"
jslog = jslog.split(";")[1].split(':')[1].strip()
try:
padding = (4 -(len(jslog) % 4)) * "="
jslog = b64decode(jslog + padding)
except binascii.Error:
# URL can't be read, skip this result
continue
# now we have : b'[null, ... null,"https://www.cnn.com/.../index.html"]'
url = re.findall('http[^;"]*', str(jslog))[0]
# the first <h3> tag in the <article> contains the title of the link
title = extract_text(eval_xpath(result, './article/h3[1]'))
# the first <div> tag in the <article> contains the content of the link
content = extract_text(eval_xpath(result, './article/div[1]'))
# the second <div> tag contains origin publisher and the publishing date
pub_date = extract_text(eval_xpath(result, './article/div[2]//time'))
pub_origin = extract_text(eval_xpath(result, './article/div[2]//a'))
pub_info = []
if pub_origin:
pub_info.append(pub_origin)
if pub_date:
# The pub_date is mostly a string like 'yesertday', not a real
# timezone date or time. Therefore we can't use publishedDate.
pub_info.append(pub_date)
pub_info = ', '.join(pub_info)
if pub_info:
content = pub_info + ': ' + content
# The image URL is located in a preceding sibling <img> tag, e.g.:
# "https://lh3.googleusercontent.com/DjhQh7DMszk.....z=-p-h100-w100"
# These URL are long but not personalized (double checked via tor).
img_src = extract_text(result.xpath('preceding-sibling::a/figure/img/@src'))
results.append({
'url': url,
'title': title,
'content': content,
'img_src': img_src,
})
2014-09-01 15:10:05 +02:00
# return results
2014-03-04 13:11:04 +01:00
return results