1
0
mirror of https://github.com/yt-dlp/yt-dlp.git synced 2024-12-15 10:05:24 +01:00
yt-dlp/youtube_dl/extractor/fourtube.py

109 lines
3.8 KiB
Python
Raw Normal View History

from __future__ import unicode_literals
2014-02-16 18:10:04 +01:00
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
parse_duration,
2015-01-16 13:44:44 +01:00
parse_iso8601,
str_to_int,
)
2014-02-16 18:10:04 +01:00
class FourTubeIE(InfoExtractor):
IE_NAME = '4tube'
_VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)'
2014-02-16 18:10:04 +01:00
_TEST = {
'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black',
'md5': '6516c8ac63b03de06bc8eac14362db4f',
'info_dict': {
'id': '209733',
'ext': 'mp4',
'title': 'Hot Babe Holly Michaels gets her ass stuffed by black',
'uploader': 'WCP Club',
'uploader_id': 'wcp-club',
'upload_date': '20131031',
2015-01-16 13:44:44 +01:00
'timestamp': 1383263892,
'duration': 583,
2015-01-16 13:44:44 +01:00
'view_count': int,
'like_count': int,
'categories': list,
2015-08-08 17:43:20 +02:00
'age_limit': 18,
}
}
2014-02-16 18:10:04 +01:00
def _real_extract(self, url):
video_id = self._match_id(url)
2015-01-16 13:44:44 +01:00
webpage = self._download_webpage(url, video_id)
2014-02-16 18:10:04 +01:00
2015-01-16 13:44:44 +01:00
title = self._html_search_meta('name', webpage)
timestamp = parse_iso8601(self._html_search_meta(
'uploadDate', webpage))
thumbnail = self._html_search_meta('thumbnailUrl', webpage)
uploader_id = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">',
webpage, 'uploader id')
uploader = self._html_search_regex(
r'<a class="img-avatar" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">',
webpage, 'uploader')
2014-02-16 18:10:04 +01:00
2015-01-16 13:44:44 +01:00
categories_html = self._search_regex(
r'(?s)><i class="icon icon-tag"></i>\s*Categories / Tags\s*.*?<ul class="list">(.*?)</ul>',
webpage, 'categories', fatal=False)
categories = None
if categories_html:
categories = [
c.strip() for c in re.findall(
r'(?s)<li><a.*?>(.*?)</a>', categories_html)]
2015-01-16 13:44:44 +01:00
view_count = str_to_int(self._search_regex(
r'<meta itemprop="interactionCount" content="UserPlays:([0-9,]+)">',
webpage, 'view count', fatal=False))
like_count = str_to_int(self._search_regex(
r'<meta itemprop="interactionCount" content="UserLikes:([0-9,]+)">',
webpage, 'like count', fatal=False))
duration = parse_duration(self._html_search_meta('duration', webpage))
2015-01-16 13:44:44 +01:00
params_js = self._search_regex(
r'\$\.ajax\(url,\ opts\);\s*\}\s*\}\)\(([0-9,\[\] ]+)\)',
webpage, 'initialization parameters'
)
params = self._parse_json('[%s]' % params_js, video_id)
media_id = params[0]
sources = ['%s' % p for p in params[2]]
2014-02-16 18:10:04 +01:00
2015-01-16 13:44:44 +01:00
token_url = 'http://tkn.4tube.com/{0}/desktop/{1}'.format(
media_id, '+'.join(sources))
2014-02-16 18:10:04 +01:00
headers = {
2014-11-23 22:21:46 +01:00
b'Content-Type': b'application/x-www-form-urlencoded',
b'Origin': b'http://www.4tube.com',
}
2014-02-16 18:10:04 +01:00
token_req = compat_urllib_request.Request(token_url, b'{}', headers)
tokens = self._download_json(token_req, video_id)
formats = [{
'url': tokens[format]['token'],
'format_id': format + 'p',
'resolution': format + 'p',
'quality': int(format),
2014-11-23 22:21:46 +01:00
} for format in sources]
self._sort_formats(formats)
return {
2014-02-16 18:10:04 +01:00
'id': video_id,
'title': title,
'formats': formats,
2015-01-16 13:44:44 +01:00
'categories': categories,
'thumbnail': thumbnail,
'uploader': uploader,
'uploader_id': uploader_id,
2015-01-16 13:44:44 +01:00
'timestamp': timestamp,
'like_count': like_count,
'view_count': view_count,
'duration': duration,
2014-02-16 18:10:04 +01:00
'age_limit': 18,
2014-11-23 20:41:03 +01:00
}