2013-06-23 20:24:07 +02:00
|
|
|
import re
|
2013-06-27 00:09:51 +02:00
|
|
|
import json
|
2013-07-04 18:06:47 +02:00
|
|
|
import xml.etree.ElementTree
|
2013-06-23 20:24:07 +02:00
|
|
|
|
|
|
|
from .common import InfoExtractor
|
|
|
|
from ..utils import (
|
|
|
|
ExtractorError,
|
|
|
|
unified_strdate,
|
|
|
|
)
|
|
|
|
|
|
|
|
class ArteTvIE(InfoExtractor):
|
2013-06-30 13:38:22 +02:00
|
|
|
"""
|
|
|
|
There are two sources of video in arte.tv: videos.arte.tv and
|
|
|
|
www.arte.tv/guide, the extraction process is different for each one.
|
|
|
|
The videos expire in 7 days, so we can't add tests.
|
|
|
|
"""
|
2013-07-02 17:34:40 +02:00
|
|
|
_EMISSION_URL = r'(?:http://)?www\.arte.tv/guide/(?P<lang>fr|de)/(?:(?:sendungen|emissions)/)?(?P<id>.*?)/(?P<name>.*?)(\?.*)?'
|
2013-07-04 18:06:47 +02:00
|
|
|
_VIDEOS_URL = r'(?:http://)?videos.arte.tv/(?P<lang>fr|de)/.*-(?P<id>.*?).html'
|
2013-06-23 20:24:07 +02:00
|
|
|
_LIVE_URL = r'index-[0-9]+\.html$'
|
|
|
|
|
|
|
|
IE_NAME = u'arte.tv'
|
|
|
|
|
2013-06-30 13:38:22 +02:00
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
|
|
|
return any(re.match(regex, url) for regex in (cls._EMISSION_URL, cls._VIDEOS_URL))
|
|
|
|
|
2013-06-23 20:26:35 +02:00
|
|
|
# TODO implement Live Stream
|
2013-07-08 02:13:50 +02:00
|
|
|
# from ..utils import compat_urllib_parse
|
2013-06-23 20:26:35 +02:00
|
|
|
# def extractLiveStream(self, url):
|
|
|
|
# video_lang = url.split('/')[-4]
|
|
|
|
# info = self.grep_webpage(
|
|
|
|
# url,
|
|
|
|
# r'src="(.*?/videothek_js.*?\.js)',
|
|
|
|
# 0,
|
|
|
|
# [
|
|
|
|
# (1, 'url', u'Invalid URL: %s' % url)
|
|
|
|
# ]
|
|
|
|
# )
|
|
|
|
# http_host = url.split('/')[2]
|
|
|
|
# next_url = 'http://%s%s' % (http_host, compat_urllib_parse.unquote(info.get('url')))
|
|
|
|
# info = self.grep_webpage(
|
|
|
|
# next_url,
|
|
|
|
# r'(s_artestras_scst_geoFRDE_' + video_lang + '.*?)\'.*?' +
|
|
|
|
# '(http://.*?\.swf).*?' +
|
|
|
|
# '(rtmp://.*?)\'',
|
|
|
|
# re.DOTALL,
|
|
|
|
# [
|
|
|
|
# (1, 'path', u'could not extract video path: %s' % url),
|
|
|
|
# (2, 'player', u'could not extract video player: %s' % url),
|
|
|
|
# (3, 'url', u'could not extract video url: %s' % url)
|
|
|
|
# ]
|
|
|
|
# )
|
|
|
|
# video_url = u'%s/%s' % (info.get('url'), info.get('path'))
|
2013-06-23 20:24:07 +02:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
2013-06-30 13:38:22 +02:00
|
|
|
mobj = re.match(self._EMISSION_URL, url)
|
|
|
|
if mobj is not None:
|
2013-07-02 17:34:40 +02:00
|
|
|
lang = mobj.group('lang')
|
2013-06-30 13:38:22 +02:00
|
|
|
# This is not a real id, it can be for example AJT for the news
|
|
|
|
# http://www.arte.tv/guide/fr/emissions/AJT/arte-journal
|
|
|
|
video_id = mobj.group('id')
|
2013-07-02 17:34:40 +02:00
|
|
|
return self._extract_emission(url, video_id, lang)
|
2013-06-30 13:38:22 +02:00
|
|
|
|
|
|
|
mobj = re.match(self._VIDEOS_URL, url)
|
|
|
|
if mobj is not None:
|
|
|
|
id = mobj.group('id')
|
2013-07-04 18:06:47 +02:00
|
|
|
lang = mobj.group('lang')
|
|
|
|
return self._extract_video(url, id, lang)
|
2013-06-23 20:24:07 +02:00
|
|
|
|
|
|
|
if re.search(self._LIVE_URL, video_id) is not None:
|
2013-06-23 20:26:35 +02:00
|
|
|
raise ExtractorError(u'Arte live streams are not yet supported, sorry')
|
|
|
|
# self.extractLiveStream(url)
|
|
|
|
# return
|
2013-06-27 00:09:51 +02:00
|
|
|
|
2013-07-02 17:34:40 +02:00
|
|
|
def _extract_emission(self, url, video_id, lang):
|
2013-06-30 13:38:22 +02:00
|
|
|
"""Extract from www.arte.tv/guide"""
|
2013-07-08 01:28:19 +02:00
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
json_url = self._html_search_regex(r'arte_vp_url="(.*?)"', webpage, 'json url')
|
2013-06-27 00:09:51 +02:00
|
|
|
|
|
|
|
json_info = self._download_webpage(json_url, video_id, 'Downloading info json')
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
info = json.loads(json_info)
|
|
|
|
player_info = info['videoJsonPlayer']
|
|
|
|
|
|
|
|
info_dict = {'id': player_info['VID'],
|
|
|
|
'title': player_info['VTI'],
|
|
|
|
'description': player_info['VDE'],
|
|
|
|
'upload_date': unified_strdate(player_info['VDA'].split(' ')[0]),
|
|
|
|
'thumbnail': player_info['programImage'],
|
2013-06-30 13:38:22 +02:00
|
|
|
'ext': 'flv',
|
2013-06-27 00:09:51 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
formats = player_info['VSR'].values()
|
2013-07-02 17:34:40 +02:00
|
|
|
def _match_lang(f):
|
|
|
|
# Return true if that format is in the language of the url
|
|
|
|
if lang == 'fr':
|
|
|
|
l = 'F'
|
|
|
|
elif lang == 'de':
|
|
|
|
l = 'A'
|
|
|
|
regexes = [r'VO?%s' % l, r'V%s-ST.' % l]
|
|
|
|
return any(re.match(r, f['versionCode']) for r in regexes)
|
|
|
|
# Some formats may not be in the same language as the url
|
|
|
|
formats = filter(_match_lang, formats)
|
2013-06-27 00:09:51 +02:00
|
|
|
# We order the formats by quality
|
|
|
|
formats = sorted(formats, key=lambda f: int(f['height']))
|
|
|
|
# Pick the best quality
|
|
|
|
format_info = formats[-1]
|
|
|
|
if format_info['mediaType'] == u'rtmp':
|
|
|
|
info_dict['url'] = format_info['streamer']
|
|
|
|
info_dict['play_path'] = 'mp4:' + format_info['url']
|
2013-06-23 20:24:07 +02:00
|
|
|
else:
|
2013-06-27 00:09:51 +02:00
|
|
|
info_dict['url'] = format_info['url']
|
2013-06-23 20:24:07 +02:00
|
|
|
|
2013-06-27 00:09:51 +02:00
|
|
|
return info_dict
|
2013-06-30 13:38:22 +02:00
|
|
|
|
2013-07-04 18:06:47 +02:00
|
|
|
def _extract_video(self, url, video_id, lang):
|
2013-06-30 13:38:22 +02:00
|
|
|
"""Extract from videos.arte.tv"""
|
2013-07-04 18:06:47 +02:00
|
|
|
ref_xml_url = url.replace('/videos/', '/do_delegate/videos/')
|
|
|
|
ref_xml_url = ref_xml_url.replace('.html', ',view,asPlayerXml.xml')
|
|
|
|
ref_xml = self._download_webpage(ref_xml_url, video_id, note=u'Downloading metadata')
|
|
|
|
ref_xml_doc = xml.etree.ElementTree.fromstring(ref_xml)
|
|
|
|
config_node = ref_xml_doc.find('.//video[@lang="%s"]' % lang)
|
|
|
|
config_xml_url = config_node.attrib['ref']
|
|
|
|
config_xml = self._download_webpage(config_xml_url, video_id, note=u'Downloading configuration')
|
2013-06-30 13:38:22 +02:00
|
|
|
|
|
|
|
video_urls = list(re.finditer(r'<url quality="(?P<quality>.*?)">(?P<url>.*?)</url>', config_xml))
|
|
|
|
def _key(m):
|
|
|
|
quality = m.group('quality')
|
|
|
|
if quality == 'hd':
|
|
|
|
return 2
|
|
|
|
else:
|
|
|
|
return 1
|
|
|
|
# We pick the best quality
|
|
|
|
video_urls = sorted(video_urls, key=_key)
|
|
|
|
video_url = list(video_urls)[-1].group('url')
|
|
|
|
|
|
|
|
title = self._html_search_regex(r'<name>(.*?)</name>', config_xml, 'title')
|
|
|
|
thumbnail = self._html_search_regex(r'<firstThumbnailUrl>(.*?)</firstThumbnailUrl>',
|
|
|
|
config_xml, 'thumbnail')
|
|
|
|
return {'id': video_id,
|
|
|
|
'title': title,
|
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'flv',
|
|
|
|
}
|