searx/searx/engines/youtube_noapi.py

161 lines
6.1 KiB
Python
Raw Normal View History

# SPDX-License-Identifier: AGPL-3.0-or-later
"""
Youtube (Videos)
"""
from functools import reduce
2021-03-15 17:21:46 +01:00
from json import loads, dumps
from urllib.parse import quote_plus
from random import random
# about
about = {
"website": 'https://www.youtube.com/',
"wikidata_id": 'Q866',
"official_api_documentation": 'https://developers.google.com/youtube/v3/docs/search/list?apix=true',
"use_official_api": False,
"require_api_key": False,
"results": 'HTML',
}
# engine dependent config
categories = ['videos', 'music']
paging = True
2021-03-15 17:21:46 +01:00
language_support = False
time_range_support = True
# search-url
base_url = 'https://www.youtube.com/results'
pick engine fixes (#3306) * [fix] google engine: results XPath * [fix] google & youtube - set EU consent cookie This change the previous bypass method for Google consent using ``ucbcb=1`` (6face215b8) to accept the consent using ``CONSENT=YES+``. The youtube_noapi and google have a similar API, at least for the consent[1]. Get CONSENT cookie from google reguest:: curl -i "https://www.google.com/search?q=time&tbm=isch" \ -A "Mozilla/5.0 (X11; Linux i686; rv:102.0) Gecko/20100101 Firefox/102.0" \ | grep -i consent ... location: https://consent.google.com/m?continue=https://www.google.com/search?q%3Dtime%26tbm%3Disch&gl=DE&m=0&pc=irp&uxe=eomtm&hl=en-US&src=1 set-cookie: CONSENT=PENDING+936; expires=Wed, 24-Jul-2024 11:26:20 GMT; path=/; domain=.google.com; Secure ... PENDING & YES [2]: Google change the way for consent about YouTube cookies agreement in EU countries. Instead of showing a popup in the website, YouTube redirects the user to a new webpage at consent.youtube.com domain ... Fix for this is to put a cookie CONSENT with YES+ value for every YouTube request [1] https://github.com/iv-org/invidious/pull/2207 [2] https://github.com/TeamNewPipe/NewPipeExtractor/issues/592 Closes: https://github.com/searxng/searxng/issues/1432 * [fix] sjp engine - convert enginename to a latin1 compliance name The engine name is not only a *name* its also a identifier that is used in logs, HTTP headers and more. Unicode characters in the name of an engine could cause various issues. Closes: https://github.com/searxng/searxng/issues/1544 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * [fix] engine tineye: handle 422 response of not supported img format Closes: https://github.com/searxng/searxng/issues/1449 Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * bypass google consent with ucbcb=1 * [mod] Adds Lingva translate engine Add the lingva engine (which grabs data from google translate). Results from Lingva are added to the infobox results. * openstreetmap engine: return the localized named. For example: display "Tokyo" instead of "東京都" when the language is English. * [fix] engines/openstreetmap.py typo: user_langage --> user_language Signed-off-by: Markus Heiser <markus.heiser@darmarit.de> * Wikidata engine: ignore dummy entities * Wikidata engine: minor change of the SPARQL request The engine can be slow especially when the query won't return any answer. See https://www.mediawiki.org/wiki/Wikidata_Query_Service/User_Manual/MWAPI#Find_articles_in_Wikipedia_speaking_about_cheese_and_see_which_Wikibase_items_they_correspond_to Co-authored-by: Léon Tiekötter <leon@tiekoetter.com> Co-authored-by: Emilien Devos <contact@emiliendevos.be> Co-authored-by: Markus Heiser <markus.heiser@darmarit.de> Co-authored-by: Emilien Devos <github@emiliendevos.be> Co-authored-by: ta <alt3753.7@gmail.com> Co-authored-by: Alexandre Flament <alex@al-f.net>
2022-07-30 21:45:07 +02:00
search_url = base_url + '?search_query={query}&page={page}&ucbcb=1'
time_range_url = '&sp=EgII{time_range}%253D%253D'
2021-03-15 17:21:46 +01:00
# the key seems to be constant
next_page_url = 'https://www.youtube.com/youtubei/v1/search?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8'
time_range_dict = {'day': 'Ag',
'week': 'Aw',
'month': 'BA',
'year': 'BQ'}
embedded_url = '<iframe width="540" height="304" ' +\
'data-src="https://www.youtube-nocookie.com/embed/{videoid}" ' +\
'frameborder="0" allowfullscreen></iframe>'
base_youtube_url = 'https://www.youtube.com/watch?v='
# do search-request
def request(query, params):
params['cookies']['CONSENT'] = "PENDING+" + str(random() * 100)
2021-03-15 17:21:46 +01:00
if not params['engine_data'].get('next_page_token'):
params['url'] = search_url.format(query=quote_plus(query), page=params['pageno'])
if params['time_range'] in time_range_dict:
params['url'] += time_range_url.format(time_range=time_range_dict[params['time_range']])
else:
params['url'] = next_page_url
params['method'] = 'POST'
params['data'] = dumps({
'context': {"client": {"clientName": "WEB", "clientVersion": "2.20210310.12.01"}},
'continuation': params['engine_data']['next_page_token'],
})
params['headers']['Content-Type'] = 'application/json'
return params
# get response from search-request
def response(resp):
2021-03-15 17:21:46 +01:00
if resp.search_params.get('engine_data'):
return parse_next_page_response(resp.text)
return parse_first_page_response(resp.text)
def parse_next_page_response(response_text):
results = []
2021-03-15 17:21:46 +01:00
result_json = loads(response_text)
for section in (result_json['onResponseReceivedCommands'][0]
.get('appendContinuationItemsAction')['continuationItems'][0]
.get('itemSectionRenderer')['contents']):
if 'videoRenderer' not in section:
continue
section = section['videoRenderer']
content = "-"
if 'descriptionSnippet' in section:
content = ' '.join(x['text'] for x in section['descriptionSnippet']['runs'])
results.append({
'url': base_youtube_url + section['videoId'],
'title': ' '.join(x['text'] for x in section['title']['runs']),
'content': content,
'author': section['ownerText']['runs'][0]['text'],
'length': section['lengthText']['simpleText'],
'template': 'videos.html',
'embedded': embedded_url.format(videoid=section['videoId']),
'thumbnail': section['thumbnail']['thumbnails'][-1]['url'],
})
try:
token = result_json['onResponseReceivedCommands'][0]\
.get('appendContinuationItemsAction')['continuationItems'][1]\
.get('continuationItemRenderer')['continuationEndpoint']\
.get('continuationCommand')['token']
results.append({
"engine_data": token,
"key": "next_page_token",
})
except:
pass
2021-03-15 17:21:46 +01:00
return results
2021-03-15 17:21:46 +01:00
def parse_first_page_response(response_text):
results = []
results_data = response_text[response_text.find('ytInitialData'):]
results_data = results_data[results_data.find('{'):results_data.find(';</script>')]
results_json = loads(results_data) if results_data else {}
sections = results_json.get('contents', {})\
.get('twoColumnSearchResultsRenderer', {})\
.get('primaryContents', {})\
.get('sectionListRenderer', {})\
.get('contents', [])
for section in sections:
2021-03-15 17:21:46 +01:00
if "continuationItemRenderer" in section:
next_page_token = section["continuationItemRenderer"]\
.get("continuationEndpoint", {})\
.get("continuationCommand", {})\
.get("token", "")
if next_page_token:
results.append({
"engine_data": next_page_token,
"key": "next_page_token",
})
for video_container in section.get('itemSectionRenderer', {}).get('contents', []):
video = video_container.get('videoRenderer', {})
videoid = video.get('videoId')
if videoid is not None:
url = base_youtube_url + videoid
thumbnail = 'https://i.ytimg.com/vi/' + videoid + '/hqdefault.jpg'
title = get_text_from_json(video.get('title', {}))
content = get_text_from_json(video.get('descriptionSnippet', {}))
embedded = embedded_url.format(videoid=videoid)
author = get_text_from_json(video.get('ownerText', {}))
length = get_text_from_json(video.get('lengthText', {}))
# append result
results.append({'url': url,
'title': title,
'content': content,
'author': author,
'length': length,
'template': 'videos.html',
'embedded': embedded,
'thumbnail': thumbnail})
# return results
return results
def get_text_from_json(element):
if 'runs' in element:
return reduce(lambda a, b: a + b.get('text', ''), element.get('runs'), '')
else:
return element.get('simpleText', '')