mirror of https://github.com/yt-dlp/yt-dlp.git
Move gametrailers IE into its own file
This commit is contained in:
parent
b05654f0e3
commit
9f4e6bbaeb
|
@ -26,6 +26,7 @@ from .extractor.common import InfoExtractor, SearchInfoExtractor
|
||||||
from .extractor.ard import ARDIE
|
from .extractor.ard import ARDIE
|
||||||
from .extractor.arte import ArteTvIE
|
from .extractor.arte import ArteTvIE
|
||||||
from .extractor.dailymotion import DailymotionIE
|
from .extractor.dailymotion import DailymotionIE
|
||||||
|
from .extractor.gametrailers import GametrailersIE
|
||||||
from .extractor.metacafe import MetacafeIE
|
from .extractor.metacafe import MetacafeIE
|
||||||
from .extractor.statigram import StatigramIE
|
from .extractor.statigram import StatigramIE
|
||||||
from .extractor.photobucket import PhotobucketIE
|
from .extractor.photobucket import PhotobucketIE
|
||||||
|
@ -2910,56 +2911,6 @@ class Vbox7IE(InfoExtractor):
|
||||||
'thumbnail': thumbnail_url,
|
'thumbnail': thumbnail_url,
|
||||||
}]
|
}]
|
||||||
|
|
||||||
class GametrailersIE(InfoExtractor):
|
|
||||||
_VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
|
|
||||||
|
|
||||||
def _real_extract(self, url):
|
|
||||||
mobj = re.match(self._VALID_URL, url)
|
|
||||||
if mobj is None:
|
|
||||||
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
||||||
video_id = mobj.group('id')
|
|
||||||
video_type = mobj.group('type')
|
|
||||||
webpage = self._download_webpage(url, video_id)
|
|
||||||
if video_type == 'full-episodes':
|
|
||||||
mgid_re = r'data-video="(?P<mgid>mgid:.*?)"'
|
|
||||||
else:
|
|
||||||
mgid_re = r'data-contentId=\'(?P<mgid>mgid:.*?)\''
|
|
||||||
mgid = self._search_regex(mgid_re, webpage, u'mgid')
|
|
||||||
data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
|
|
||||||
|
|
||||||
info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
|
|
||||||
video_id, u'Downloading video info')
|
|
||||||
links_webpage = self._download_webpage('http://www.gametrailers.com/feeds/mediagen/?' + data,
|
|
||||||
video_id, u'Downloading video urls info')
|
|
||||||
|
|
||||||
self.report_extraction(video_id)
|
|
||||||
info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
|
|
||||||
<description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
|
|
||||||
'''
|
|
||||||
|
|
||||||
m_info = re.search(info_re, info_page, re.VERBOSE|re.DOTALL)
|
|
||||||
if m_info is None:
|
|
||||||
raise ExtractorError(u'Unable to extract video info')
|
|
||||||
video_title = m_info.group('title')
|
|
||||||
video_description = m_info.group('description')
|
|
||||||
video_thumb = m_info.group('thumb')
|
|
||||||
|
|
||||||
m_urls = list(re.finditer(r'<src>(?P<url>.*)</src>', links_webpage))
|
|
||||||
if m_urls is None or len(m_urls) == 0:
|
|
||||||
raise ExtractError(u'Unable to extrat video url')
|
|
||||||
# They are sorted from worst to best quality
|
|
||||||
video_url = m_urls[-1].group('url')
|
|
||||||
|
|
||||||
return {'url': video_url,
|
|
||||||
'id': video_id,
|
|
||||||
'title': video_title,
|
|
||||||
# Videos are actually flv not mp4
|
|
||||||
'ext': 'flv',
|
|
||||||
'thumbnail': video_thumb,
|
|
||||||
'description': video_description,
|
|
||||||
}
|
|
||||||
|
|
||||||
def gen_extractors():
|
def gen_extractors():
|
||||||
""" Return a list of an instance of every supported extractor.
|
""" Return a list of an instance of every supported extractor.
|
||||||
|
|
|
@ -0,0 +1,59 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
compat_urllib_parse,
|
||||||
|
|
||||||
|
ExtractorError,
|
||||||
|
)
|
||||||
|
|
||||||
|
class GametrailersIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'http://www.gametrailers.com/(?P<type>videos|reviews|full-episodes)/(?P<id>.*?)/(?P<title>.*)'
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
mobj = re.match(self._VALID_URL, url)
|
||||||
|
if mobj is None:
|
||||||
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
||||||
|
video_id = mobj.group('id')
|
||||||
|
video_type = mobj.group('type')
|
||||||
|
webpage = self._download_webpage(url, video_id)
|
||||||
|
if video_type == 'full-episodes':
|
||||||
|
mgid_re = r'data-video="(?P<mgid>mgid:.*?)"'
|
||||||
|
else:
|
||||||
|
mgid_re = r'data-contentId=\'(?P<mgid>mgid:.*?)\''
|
||||||
|
mgid = self._search_regex(mgid_re, webpage, u'mgid')
|
||||||
|
data = compat_urllib_parse.urlencode({'uri': mgid, 'acceptMethods': 'fms'})
|
||||||
|
|
||||||
|
info_page = self._download_webpage('http://www.gametrailers.com/feeds/mrss?' + data,
|
||||||
|
video_id, u'Downloading video info')
|
||||||
|
links_webpage = self._download_webpage('http://www.gametrailers.com/feeds/mediagen/?' + data,
|
||||||
|
video_id, u'Downloading video urls info')
|
||||||
|
|
||||||
|
self.report_extraction(video_id)
|
||||||
|
info_re = r'''<title><!\[CDATA\[(?P<title>.*?)\]\]></title>.*
|
||||||
|
<description><!\[CDATA\[(?P<description>.*?)\]\]></description>.*
|
||||||
|
'''
|
||||||
|
|
||||||
|
m_info = re.search(info_re, info_page, re.VERBOSE|re.DOTALL)
|
||||||
|
if m_info is None:
|
||||||
|
raise ExtractorError(u'Unable to extract video info')
|
||||||
|
video_title = m_info.group('title')
|
||||||
|
video_description = m_info.group('description')
|
||||||
|
video_thumb = m_info.group('thumb')
|
||||||
|
|
||||||
|
m_urls = list(re.finditer(r'<src>(?P<url>.*)</src>', links_webpage))
|
||||||
|
if m_urls is None or len(m_urls) == 0:
|
||||||
|
raise ExtractorError(u'Unable to extract video url')
|
||||||
|
# They are sorted from worst to best quality
|
||||||
|
video_url = m_urls[-1].group('url')
|
||||||
|
|
||||||
|
return {'url': video_url,
|
||||||
|
'id': video_id,
|
||||||
|
'title': video_title,
|
||||||
|
# Videos are actually flv not mp4
|
||||||
|
'ext': 'flv',
|
||||||
|
'thumbnail': video_thumb,
|
||||||
|
'description': video_description,
|
||||||
|
}
|
Loading…
Reference in New Issue