2012-12-12 14:15:21 +01:00
|
|
|
#!/usr/bin/env python
|
|
|
|
|
2014-11-16 15:17:48 +01:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2013-10-15 02:00:53 +02:00
|
|
|
# Allow direct execution
|
|
|
|
import os
|
|
|
|
import sys
|
|
|
|
import unittest
|
|
|
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
|
|
|
|
2013-10-28 23:03:26 +01:00
|
|
|
from test.helper import (
|
2014-08-25 18:03:01 +02:00
|
|
|
assertGreaterEqual,
|
2014-10-26 20:49:51 +01:00
|
|
|
expect_warnings,
|
2013-10-28 23:03:26 +01:00
|
|
|
get_params,
|
2014-03-17 14:30:13 +01:00
|
|
|
gettestcases,
|
2014-03-23 15:52:21 +01:00
|
|
|
expect_info_dict,
|
|
|
|
try_rm,
|
|
|
|
report_warning,
|
2013-10-28 23:03:26 +01:00
|
|
|
)
|
2013-10-15 02:00:53 +02:00
|
|
|
|
|
|
|
|
2012-09-28 15:34:56 +02:00
|
|
|
import hashlib
|
2012-12-12 14:15:21 +01:00
|
|
|
import io
|
2012-10-15 13:01:36 +02:00
|
|
|
import json
|
2012-12-20 16:30:55 +01:00
|
|
|
import socket
|
2012-11-28 15:09:56 +01:00
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
import youtube_dl.YoutubeDL
|
2014-11-02 11:46:12 +01:00
|
|
|
from youtube_dl.compat import (
|
2014-02-06 04:19:55 +01:00
|
|
|
compat_http_client,
|
2013-10-15 02:00:53 +02:00
|
|
|
compat_urllib_error,
|
2013-10-29 00:07:16 +01:00
|
|
|
compat_HTTPError,
|
2014-11-02 11:46:12 +01:00
|
|
|
)
|
|
|
|
from youtube_dl.utils import (
|
2013-10-15 02:00:53 +02:00
|
|
|
DownloadError,
|
|
|
|
ExtractorError,
|
2014-08-28 13:59:42 +02:00
|
|
|
format_bytes,
|
2013-10-15 02:00:53 +02:00
|
|
|
UnavailableVideoError,
|
|
|
|
)
|
2013-11-03 19:14:53 +01:00
|
|
|
from youtube_dl.extractor import get_info_extractor
|
2012-12-12 14:15:21 +01:00
|
|
|
|
2013-03-09 10:05:43 +01:00
|
|
|
RETRIES = 3
|
|
|
|
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
class YoutubeDL(youtube_dl.YoutubeDL):
|
2012-12-12 14:15:21 +01:00
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
self.to_stderr = self.to_screen
|
2012-12-20 14:14:43 +01:00
|
|
|
self.processed_info_dicts = []
|
2013-06-18 22:14:21 +02:00
|
|
|
super(YoutubeDL, self).__init__(*args, **kwargs)
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2013-06-06 15:07:05 +02:00
|
|
|
def report_warning(self, message):
|
2013-06-07 11:19:27 +02:00
|
|
|
# Don't accept warnings during tests
|
|
|
|
raise ExtractorError(message)
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2012-12-20 14:14:43 +01:00
|
|
|
def process_info(self, info_dict):
|
|
|
|
self.processed_info_dicts.append(info_dict)
|
2013-06-18 22:14:21 +02:00
|
|
|
return super(YoutubeDL, self).process_info(info_dict)
|
2012-12-12 03:55:06 +01:00
|
|
|
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2012-12-12 14:15:21 +01:00
|
|
|
def _file_md5(fn):
|
|
|
|
with open(fn, 'rb') as f:
|
|
|
|
return hashlib.md5(f.read()).hexdigest()
|
|
|
|
|
2016-11-17 12:42:56 +01:00
|
|
|
|
2014-03-17 14:30:13 +01:00
|
|
|
defs = gettestcases()
|
2013-06-27 18:28:45 +02:00
|
|
|
|
2012-12-20 14:14:43 +01:00
|
|
|
|
2012-12-12 03:55:06 +01:00
|
|
|
class TestDownload(unittest.TestCase):
|
2015-10-20 18:37:28 +02:00
|
|
|
# Parallel testing in nosetests. See
|
|
|
|
# http://nose.readthedocs.org/en/latest/doc_tests/test_multiprocess/multiprocess.html
|
|
|
|
_multiprocess_shared_ = True
|
|
|
|
|
2013-04-11 18:38:43 +02:00
|
|
|
maxDiff = None
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2017-03-25 15:36:40 +01:00
|
|
|
def __str__(self):
|
|
|
|
"""Identify each test with the `add_ie` attribute, if available."""
|
|
|
|
|
|
|
|
def strclass(cls):
|
|
|
|
"""From 2.7's unittest; 2.6 had _strclass so we can't import it."""
|
|
|
|
return '%s.%s' % (cls.__module__, cls.__name__)
|
|
|
|
|
|
|
|
add_ie = getattr(self, self._testMethodName).add_ie
|
|
|
|
return '%s (%s)%s:' % (self._testMethodName,
|
|
|
|
strclass(self.__class__),
|
|
|
|
' [%s]' % add_ie if add_ie else '')
|
|
|
|
|
2012-12-12 14:15:21 +01:00
|
|
|
def setUp(self):
|
|
|
|
self.defs = defs
|
|
|
|
|
2014-11-23 20:41:03 +01:00
|
|
|
# Dynamically generate tests
|
|
|
|
|
2015-10-20 18:37:28 +02:00
|
|
|
def generator(test_case, tname):
|
2012-12-12 15:14:58 +01:00
|
|
|
|
2012-12-12 03:55:06 +01:00
|
|
|
def test_template(self):
|
2018-01-21 12:15:11 +01:00
|
|
|
ie = youtube_dl.extractor.get_info_extractor(test_case['name'])()
|
|
|
|
other_ies = [get_info_extractor(ie_key)() for ie_key in test_case.get('add_ie', [])]
|
2014-08-21 11:52:07 +02:00
|
|
|
is_playlist = any(k.startswith('playlist') for k in test_case)
|
|
|
|
test_cases = test_case.get(
|
|
|
|
'playlist', [] if is_playlist else [test_case])
|
|
|
|
|
2013-06-28 11:20:00 +02:00
|
|
|
def print_skipping(reason):
|
|
|
|
print('Skipping %s: %s' % (test_case['name'], reason))
|
2022-06-08 16:52:21 +02:00
|
|
|
self.skipTest(reason)
|
|
|
|
|
2013-11-03 19:14:53 +01:00
|
|
|
if not ie.working():
|
2013-06-28 11:20:00 +02:00
|
|
|
print_skipping('IE marked as not _WORKING')
|
2014-08-21 11:52:07 +02:00
|
|
|
|
|
|
|
for tc in test_cases:
|
|
|
|
info_dict = tc.get('info_dict', {})
|
2015-02-01 15:21:18 +01:00
|
|
|
if not (info_dict.get('id') and info_dict.get('ext')):
|
2014-02-26 00:12:02 +01:00
|
|
|
raise Exception('Test definition incorrect. The output file cannot be known. Are both \'id\' and \'ext\' keys present?')
|
2014-08-21 11:52:07 +02:00
|
|
|
|
2012-12-12 14:15:21 +01:00
|
|
|
if 'skip' in test_case:
|
2013-06-28 11:20:00 +02:00
|
|
|
print_skipping(test_case['skip'])
|
2022-06-08 16:52:21 +02:00
|
|
|
|
2013-11-03 19:14:53 +01:00
|
|
|
for other_ie in other_ies:
|
|
|
|
if not other_ie.working():
|
2014-11-26 13:07:32 +01:00
|
|
|
print_skipping('test depends on %sIE, marked as not WORKING' % other_ie.ie_key())
|
2012-12-20 14:14:43 +01:00
|
|
|
|
2013-10-15 02:00:53 +02:00
|
|
|
params = get_params(test_case.get('params', {}))
|
2015-10-20 18:37:28 +02:00
|
|
|
params['outtmpl'] = tname + '_' + params['outtmpl']
|
2014-08-21 11:52:07 +02:00
|
|
|
if is_playlist and 'playlist' not in test_case:
|
2015-10-23 14:12:46 +02:00
|
|
|
params.setdefault('extract_flat', 'in_playlist')
|
2021-12-06 20:26:33 +01:00
|
|
|
params.setdefault('playlistend', test_case.get('playlist_mincount'))
|
2014-08-21 11:52:07 +02:00
|
|
|
params.setdefault('skip_download', True)
|
2012-12-20 14:14:43 +01:00
|
|
|
|
2014-11-02 17:53:12 +01:00
|
|
|
ydl = YoutubeDL(params, auto_init=False)
|
2013-06-27 23:51:06 +02:00
|
|
|
ydl.add_default_info_extractors()
|
2013-01-12 20:34:50 +01:00
|
|
|
finished_hook_called = set()
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2013-01-12 20:34:50 +01:00
|
|
|
def _hook(status):
|
|
|
|
if status['status'] == 'finished':
|
|
|
|
finished_hook_called.add(status['filename'])
|
2013-12-23 10:37:27 +01:00
|
|
|
ydl.add_progress_hook(_hook)
|
2014-10-26 20:49:51 +01:00
|
|
|
expect_warnings(ydl, test_case.get('expected_warnings', []))
|
2013-01-01 19:30:29 +01:00
|
|
|
|
2013-10-28 22:01:37 +01:00
|
|
|
def get_tc_filename(tc):
|
2015-02-01 15:21:18 +01:00
|
|
|
return ydl.prepare_filename(tc.get('info_dict', {}))
|
2013-10-28 22:01:37 +01:00
|
|
|
|
2014-08-27 17:11:45 +02:00
|
|
|
res_dict = None
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2014-08-27 17:11:45 +02:00
|
|
|
def try_rm_tcs_files(tcs=None):
|
|
|
|
if tcs is None:
|
|
|
|
tcs = test_cases
|
|
|
|
for tc in tcs:
|
2013-10-28 22:01:37 +01:00
|
|
|
tc_filename = get_tc_filename(tc)
|
|
|
|
try_rm(tc_filename)
|
|
|
|
try_rm(tc_filename + '.part')
|
2013-11-20 06:34:48 +01:00
|
|
|
try_rm(os.path.splitext(tc_filename)[0] + '.info.json')
|
2013-10-28 22:01:37 +01:00
|
|
|
try_rm_tcs_files()
|
2013-01-01 19:30:29 +01:00
|
|
|
try:
|
2013-10-28 23:03:26 +01:00
|
|
|
try_num = 1
|
|
|
|
while True:
|
2013-03-09 10:05:43 +01:00
|
|
|
try:
|
2017-04-08 06:27:52 +02:00
|
|
|
# We're not using .download here since that is just a shim
|
2014-08-21 11:52:07 +02:00
|
|
|
# for outside error handling, and returns the exit code
|
|
|
|
# instead of the result dict.
|
2015-08-01 21:14:41 +02:00
|
|
|
res_dict = ydl.extract_info(
|
|
|
|
test_case['url'],
|
|
|
|
force_generic_extractor=params.get('force_generic_extractor', False))
|
2013-03-09 10:05:43 +01:00
|
|
|
except (DownloadError, ExtractorError) as err:
|
|
|
|
# Check if the exception is not a network related one
|
2014-02-06 04:19:55 +01:00
|
|
|
if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503):
|
2013-03-09 10:05:43 +01:00
|
|
|
raise
|
|
|
|
|
2013-10-28 23:03:26 +01:00
|
|
|
if try_num == RETRIES:
|
2015-10-20 18:37:28 +02:00
|
|
|
report_warning('%s failed due to network errors, skipping...' % tname)
|
2013-10-28 23:03:26 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
print('Retrying: {0} failed tries\n\n##########\n\n'.format(try_num))
|
|
|
|
|
|
|
|
try_num += 1
|
2013-03-09 10:05:43 +01:00
|
|
|
else:
|
|
|
|
break
|
2013-01-01 19:30:29 +01:00
|
|
|
|
2014-08-21 11:52:07 +02:00
|
|
|
if is_playlist:
|
2015-04-19 13:08:37 +02:00
|
|
|
self.assertTrue(res_dict['_type'] in ['playlist', 'multi_video'])
|
2014-09-28 12:14:16 +02:00
|
|
|
self.assertTrue('entries' in res_dict)
|
2014-12-26 18:07:24 +01:00
|
|
|
expect_info_dict(self, res_dict, test_case.get('info_dict', {}))
|
2014-09-28 12:14:16 +02:00
|
|
|
|
2014-08-21 11:52:07 +02:00
|
|
|
if 'playlist_mincount' in test_case:
|
2014-08-25 18:03:01 +02:00
|
|
|
assertGreaterEqual(
|
|
|
|
self,
|
2014-08-21 11:52:07 +02:00
|
|
|
len(res_dict['entries']),
|
|
|
|
test_case['playlist_mincount'],
|
|
|
|
'Expected at least %d in playlist %s, but got only %d' % (
|
|
|
|
test_case['playlist_mincount'], test_case['url'],
|
|
|
|
len(res_dict['entries'])))
|
2014-08-25 17:02:52 +02:00
|
|
|
if 'playlist_count' in test_case:
|
|
|
|
self.assertEqual(
|
|
|
|
len(res_dict['entries']),
|
|
|
|
test_case['playlist_count'],
|
2014-08-27 17:11:45 +02:00
|
|
|
'Expected %d entries in playlist %s, but got %d.' % (
|
2014-08-28 00:58:24 +02:00
|
|
|
test_case['playlist_count'],
|
2014-08-27 17:11:45 +02:00
|
|
|
test_case['url'],
|
2014-08-28 00:58:24 +02:00
|
|
|
len(res_dict['entries']),
|
|
|
|
))
|
2014-08-27 17:11:45 +02:00
|
|
|
if 'playlist_duration_sum' in test_case:
|
|
|
|
got_duration = sum(e['duration'] for e in res_dict['entries'])
|
|
|
|
self.assertEqual(
|
|
|
|
test_case['playlist_duration_sum'], got_duration)
|
2014-08-21 11:52:07 +02:00
|
|
|
|
2017-04-10 18:59:38 +02:00
|
|
|
# Generalize both playlists and single videos to unified format for
|
|
|
|
# simplicity
|
|
|
|
if 'entries' not in res_dict:
|
|
|
|
res_dict['entries'] = [res_dict]
|
|
|
|
|
2017-04-08 09:10:12 +02:00
|
|
|
for tc_num, tc in enumerate(test_cases):
|
2017-04-10 18:59:38 +02:00
|
|
|
tc_res_dict = res_dict['entries'][tc_num]
|
|
|
|
# First, check test cases' data against extracted data alone
|
2017-04-08 09:10:12 +02:00
|
|
|
expect_info_dict(self, tc_res_dict, tc.get('info_dict', {}))
|
2017-04-10 18:59:38 +02:00
|
|
|
# Now, check downloaded file consistency
|
2013-10-28 22:01:37 +01:00
|
|
|
tc_filename = get_tc_filename(tc)
|
2013-01-01 21:01:49 +01:00
|
|
|
if not test_case.get('params', {}).get('skip_download', False):
|
2013-10-28 22:01:37 +01:00
|
|
|
self.assertTrue(os.path.exists(tc_filename), msg='Missing file ' + tc_filename)
|
|
|
|
self.assertTrue(tc_filename in finished_hook_called)
|
2014-08-29 13:42:42 +02:00
|
|
|
expected_minsize = tc.get('file_minsize', 10000)
|
|
|
|
if expected_minsize is not None:
|
|
|
|
if params.get('test'):
|
|
|
|
expected_minsize = max(expected_minsize, 10000)
|
|
|
|
got_fsize = os.path.getsize(tc_filename)
|
|
|
|
assertGreaterEqual(
|
|
|
|
self, got_fsize, expected_minsize,
|
|
|
|
'Expected %s to be at least %s, but it\'s only %s ' %
|
|
|
|
(tc_filename, format_bytes(expected_minsize),
|
|
|
|
format_bytes(got_fsize)))
|
|
|
|
if 'md5' in tc:
|
|
|
|
md5_for_file = _file_md5(tc_filename)
|
2017-04-27 11:04:21 +02:00
|
|
|
self.assertEqual(tc['md5'], md5_for_file)
|
2017-04-10 18:59:38 +02:00
|
|
|
# Finally, check test cases' data again but this time against
|
|
|
|
# extracted data from info JSON file written during processing
|
2013-11-20 06:34:48 +01:00
|
|
|
info_json_fn = os.path.splitext(tc_filename)[0] + '.info.json'
|
2014-10-27 00:39:39 +01:00
|
|
|
self.assertTrue(
|
|
|
|
os.path.exists(info_json_fn),
|
|
|
|
'Missing info file %s' % info_json_fn)
|
2013-11-20 06:34:48 +01:00
|
|
|
with io.open(info_json_fn, encoding='utf-8') as infof:
|
2013-01-01 19:30:29 +01:00
|
|
|
info_dict = json.load(infof)
|
2014-12-26 18:07:24 +01:00
|
|
|
expect_info_dict(self, info_dict, tc.get('info_dict', {}))
|
2013-01-01 19:30:29 +01:00
|
|
|
finally:
|
2013-10-28 22:01:37 +01:00
|
|
|
try_rm_tcs_files()
|
2014-09-28 12:14:16 +02:00
|
|
|
if is_playlist and res_dict is not None and res_dict.get('entries'):
|
2014-08-27 17:11:45 +02:00
|
|
|
# Remove all other files that may have been extracted if the
|
|
|
|
# extractor returns full results even with extract_flat
|
|
|
|
res_tcs = [{'info_dict': e} for e in res_dict['entries']]
|
|
|
|
try_rm_tcs_files(res_tcs)
|
2012-12-12 14:15:21 +01:00
|
|
|
|
2012-12-12 03:55:06 +01:00
|
|
|
return test_template
|
2012-12-12 14:15:21 +01:00
|
|
|
|
2016-11-17 12:42:56 +01:00
|
|
|
|
2014-11-23 20:41:03 +01:00
|
|
|
# And add them to TestDownload
|
2013-06-25 14:38:00 +02:00
|
|
|
for n, test_case in enumerate(defs):
|
2013-06-27 19:13:11 +02:00
|
|
|
tname = 'test_' + str(test_case['name'])
|
|
|
|
i = 1
|
|
|
|
while hasattr(TestDownload, tname):
|
2014-11-16 15:17:48 +01:00
|
|
|
tname = 'test_%s_%d' % (test_case['name'], i)
|
2013-06-27 19:13:11 +02:00
|
|
|
i += 1
|
2015-10-20 18:37:28 +02:00
|
|
|
test_method = generator(test_case, tname)
|
2014-11-16 15:17:48 +01:00
|
|
|
test_method.__name__ = str(tname)
|
2017-03-25 15:36:40 +01:00
|
|
|
ie_list = test_case.get('add_ie')
|
|
|
|
test_method.add_ie = ie_list and ','.join(ie_list)
|
2012-12-12 14:15:21 +01:00
|
|
|
setattr(TestDownload, test_method.__name__, test_method)
|
2012-12-12 15:14:58 +01:00
|
|
|
del test_method
|
2012-11-28 15:09:56 +01:00
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
unittest.main()
|