ytdl-nightly/youtube_dl/extractor/googlesearch.py

60 lines
1.7 KiB
Python
Raw Normal View History

from __future__ import unicode_literals
2013-06-23 20:32:49 +02:00
import itertools
import re
from .common import SearchInfoExtractor
from ..compat import (
2013-06-23 20:32:49 +02:00
compat_urllib_parse,
)
class GoogleSearchIE(SearchInfoExtractor):
IE_DESC = 'Google Video search'
2013-06-23 20:32:49 +02:00
_MAX_RESULTS = 1000
IE_NAME = 'video.google:search'
2013-06-23 20:32:49 +02:00
_SEARCH_KEY = 'gvsearch'
2014-08-25 17:02:52 +02:00
_TEST = {
'url': 'gvsearch15:python language',
'info_dict': {
'id': 'python language',
'title': 'python language',
},
'playlist_count': 15,
}
2013-06-23 20:32:49 +02:00
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
entries = []
2013-06-23 20:32:49 +02:00
res = {
'_type': 'playlist',
'id': query,
'title': query,
2013-06-23 20:32:49 +02:00
}
for pagenum in itertools.count():
result_url = (
'http://www.google.com/search?tbm=vid&q=%s&start=%s&hl=en'
% (compat_urllib_parse.quote_plus(query), pagenum * 10))
webpage = self._download_webpage(
result_url, 'gvsearch:' + query,
note='Downloading result page ' + str(pagenum + 1))
for hit_idx, mobj in enumerate(re.finditer(
r'<h3 class="r"><a href="([^"]+)"', webpage)):
# Skip playlists
if not re.search(r'id="vidthumb%d"' % (hit_idx + 1), webpage):
continue
2013-06-23 20:32:49 +02:00
entries.append({
2013-06-23 20:32:49 +02:00
'_type': 'url',
'url': mobj.group(1)
})
2013-06-23 20:32:49 +02:00
if (len(entries) >= n) or not re.search(r'id="pnnext"', webpage):
res['entries'] = entries[:n]
2013-06-23 20:32:49 +02:00
return res