mirror of https://github.com/yt-dlp/yt-dlp
Compare commits
66 Commits
8463fb510a
...
168e72dcd3
Author | SHA1 | Date |
---|---|---|
github-actions[bot] | 168e72dcd3 | 1 month ago |
Simon Sawicki | ff07792676 | 1 month ago |
bashonly | 216f6a3cb5 | 1 month ago |
bashonly | b19ae095fd | 1 month ago |
Simon Sawicki | 9590cc6b47 | 1 month ago |
luiso1979 | 79a451e576 | 1 month ago |
Leo Heitmann Ruiz | df0e138fc0 | 1 month ago |
bashonly | 2e94602f24 | 2 months ago |
bashonly | 4af9d5c2f6 | 2 months ago |
John Victor | 36b240f9a7 | 2 months ago |
bashonly | fc53ec13ff | 2 months ago |
Dmitry Meyer | 2ab2651a4a | 2 months ago |
bashonly | b15b0c1d21 | 2 months ago |
bashonly | c8a61a9100 | 2 months ago |
Mozi | f2fd449b46 | 2 months ago |
Tomoka1 | 9415f1a5ef | 2 months ago |
bashonly | a48cc86d6f | 2 months ago |
bytedream | 954e57e405 | 2 months ago |
Dong Heon Hee | 9073ae6458 | 2 months ago |
Offert4324 | 4cd9e251b9 | 2 months ago |
bashonly | 0ae16ceb18 | 2 months ago |
bashonly | 443e206ec4 | 2 months ago |
bashonly | 4c3b7a0769 | 2 months ago |
bashonly | 16be117729 | 2 months ago |
trainman261 | b49d5ffc53 | 2 months ago |
HobbyistDev | 36baaa10e0 | 2 months ago |
Kacper Michajłow | 02f93ff51b | 2 months ago |
Mozi | c59de48e2b | 2 months ago |
Mozi | 0284f1fee2 | 2 months ago |
bashonly | e8032503b9 | 2 months ago |
bashonly | 97362712a1 | 2 months ago |
bashonly | 246571ae1d | 2 months ago |
Simon Sawicki | 32abfb00bd | 2 months ago |
pukkandan | c305a25c1b | 2 months ago |
pukkandan | e3a3ed8a98 | 2 months ago |
pukkandan | a25a424323 | 2 months ago |
sepro | 86e3b82261 | 2 months ago |
pukkandan | e7b17fce14 | 2 months ago |
bashonly | a2d0840739 | 2 months ago |
pukkandan | 86a972033e | 2 months ago |
bashonly | 50c2935231 | 2 months ago |
bashonly | 0df63cce69 | 2 months ago |
bashonly | 63f685f341 | 2 months ago |
Simon Sawicki | 3699eeb67c | 2 months ago |
Simon Sawicki | 979ce2e786 | 2 months ago |
bashonly | 58dd0f8d1e | 2 months ago |
bashonly | cb61e20c26 | 2 months ago |
bashonly | 9c42b7eef5 | 2 months ago |
coletdjnz | e5d4f11104 | 2 months ago |
src-tinkerer | bc2b8c0596 | 2 months ago |
sta1us | aa7e9ae4f4 | 2 months ago |
Shreyas Minocha | 07f5b2f757 | 2 months ago |
Daniel Vogt | ff349ff94a | 2 months ago |
Hasan Rüzgar | f859ed3ba1 | 2 months ago |
Aron Buzinkay | 17d248a587 | 2 months ago |
sepro | 388c979ac6 | 2 months ago |
sepro | 22e4dfacb6 | 2 months ago |
Trustin | 86d2f4d248 | 2 months ago |
coletdjnz | 52f5be1f1e | 2 months ago |
coletdjnz | 0b81d4d252 | 2 months ago |
coletdjnz | f849d77ab5 | 2 months ago |
bashonly | f2868b26e9 | 2 months ago |
bashonly | be77923ffe | 2 months ago |
bashonly | 8c05b3ebae | 2 months ago |
jazz1611 | 0da66980d3 | 2 months ago |
bashonly | 17b96974a3 | 2 months ago |
File diff suppressed because one or more lines are too long
Before Width: | Height: | Size: 24 KiB After Width: | Height: | Size: 15 KiB |
@ -0,0 +1,26 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
|
# Allow direct execution
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
from devscripts.make_changelog import create_changelog, create_parser
|
||||||
|
from devscripts.utils import read_file, read_version, write_file
|
||||||
|
|
||||||
|
# Always run after devscripts/update-version.py, and run before `make doc|pypi-files|tar|all`
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
parser = create_parser()
|
||||||
|
parser.description = 'Update an existing changelog file with an entry for a new release'
|
||||||
|
parser.add_argument(
|
||||||
|
'--changelog-path', type=Path, default=Path(__file__).parent.parent / 'Changelog.md',
|
||||||
|
help='path to the Changelog file')
|
||||||
|
args = parser.parse_args()
|
||||||
|
new_entry = create_changelog(args)
|
||||||
|
|
||||||
|
header, sep, changelog = read_file(args.changelog_path).partition('\n### ')
|
||||||
|
write_file(args.changelog_path, f'{header}{sep}{read_version()}\n{new_entry}\n{sep}{changelog}')
|
@ -0,0 +1,444 @@
|
|||||||
|
import http.cookies
|
||||||
|
import re
|
||||||
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
from yt_dlp.utils import dict_get, int_or_none, str_or_none
|
||||||
|
from yt_dlp.utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
_TEST_DATA = {
|
||||||
|
100: 100,
|
||||||
|
1.2: 1.2,
|
||||||
|
'str': 'str',
|
||||||
|
'None': None,
|
||||||
|
'...': ...,
|
||||||
|
'urls': [
|
||||||
|
{'index': 0, 'url': 'https://www.example.com/0'},
|
||||||
|
{'index': 1, 'url': 'https://www.example.com/1'},
|
||||||
|
],
|
||||||
|
'data': (
|
||||||
|
{'index': 2},
|
||||||
|
{'index': 3},
|
||||||
|
),
|
||||||
|
'dict': {},
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class TestTraversal:
|
||||||
|
def test_traversal_base(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, ('str',)) == 'str', \
|
||||||
|
'allow tuple path'
|
||||||
|
assert traverse_obj(_TEST_DATA, ['str']) == 'str', \
|
||||||
|
'allow list path'
|
||||||
|
assert traverse_obj(_TEST_DATA, (value for value in ("str",))) == 'str', \
|
||||||
|
'allow iterable path'
|
||||||
|
assert traverse_obj(_TEST_DATA, 'str') == 'str', \
|
||||||
|
'single items should be treated as a path'
|
||||||
|
assert traverse_obj(_TEST_DATA, 100) == 100, \
|
||||||
|
'allow int path'
|
||||||
|
assert traverse_obj(_TEST_DATA, 1.2) == 1.2, \
|
||||||
|
'allow float path'
|
||||||
|
assert traverse_obj(_TEST_DATA, None) == _TEST_DATA, \
|
||||||
|
'`None` should not perform any modification'
|
||||||
|
|
||||||
|
def test_traversal_ellipsis(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, ...) == [x for x in _TEST_DATA.values() if x not in (None, {})], \
|
||||||
|
'`...` should give all non discarded values'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', 0, ...)) == list(_TEST_DATA['urls'][0].values()), \
|
||||||
|
'`...` selection for dicts should select all values'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., ..., 'url')) == ['https://www.example.com/0', 'https://www.example.com/1'], \
|
||||||
|
'nested `...` queries should work'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., ..., 'index')) == list(range(4)), \
|
||||||
|
'`...` query result should be flattened'
|
||||||
|
assert traverse_obj(iter(range(4)), ...) == list(range(4)), \
|
||||||
|
'`...` should accept iterables'
|
||||||
|
|
||||||
|
def test_traversal_function(self):
|
||||||
|
filter_func = lambda x, y: x == 'urls' and isinstance(y, list)
|
||||||
|
assert traverse_obj(_TEST_DATA, filter_func) == [_TEST_DATA['urls']], \
|
||||||
|
'function as query key should perform a filter based on (key, value)'
|
||||||
|
assert traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)) == ['str'], \
|
||||||
|
'exceptions in the query function should be catched'
|
||||||
|
assert traverse_obj(iter(range(4)), lambda _, x: x % 2 == 0) == [0, 2], \
|
||||||
|
'function key should accept iterables'
|
||||||
|
# Wrong function signature should raise (debug mode)
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, lambda a: ...)
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, lambda a, b, c: ...)
|
||||||
|
|
||||||
|
def test_traversal_set(self):
|
||||||
|
# transformation/type, like `expected_type`
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str.upper}, )) == ['STR'], \
|
||||||
|
'Function in set should be a transformation'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str})) == ['str'], \
|
||||||
|
'Type in set should be a type filter'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str, int})) == [100, 'str'], \
|
||||||
|
'Multiple types in set should be a type filter'
|
||||||
|
assert traverse_obj(_TEST_DATA, {dict}) == _TEST_DATA, \
|
||||||
|
'A single set should be wrapped into a path'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str.upper})) == ['STR'], \
|
||||||
|
'Transformation function should not raise'
|
||||||
|
expected = [x for x in map(str_or_none, _TEST_DATA.values()) if x is not None]
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., {str_or_none})) == expected, \
|
||||||
|
'Function in set should be a transformation'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('fail', {lambda _: 'const'})) == 'const', \
|
||||||
|
'Function in set should always be called'
|
||||||
|
# Sets with length < 1 or > 1 not including only types should raise
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, set())
|
||||||
|
with pytest.raises(Exception):
|
||||||
|
traverse_obj(_TEST_DATA, {str.upper, str})
|
||||||
|
|
||||||
|
def test_traversal_slice(self):
|
||||||
|
_SLICE_DATA = [0, 1, 2, 3, 4]
|
||||||
|
|
||||||
|
assert traverse_obj(_TEST_DATA, ('dict', slice(1))) is None, \
|
||||||
|
'slice on a dictionary should not throw'
|
||||||
|
assert traverse_obj(_SLICE_DATA, slice(1)) == _SLICE_DATA[:1], \
|
||||||
|
'slice key should apply slice to sequence'
|
||||||
|
assert traverse_obj(_SLICE_DATA, slice(1, 2)) == _SLICE_DATA[1:2], \
|
||||||
|
'slice key should apply slice to sequence'
|
||||||
|
assert traverse_obj(_SLICE_DATA, slice(1, 4, 2)) == _SLICE_DATA[1:4:2], \
|
||||||
|
'slice key should apply slice to sequence'
|
||||||
|
|
||||||
|
def test_traversal_alternatives(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, 'fail', 'str') == 'str', \
|
||||||
|
'multiple `paths` should be treated as alternative paths'
|
||||||
|
assert traverse_obj(_TEST_DATA, 'str', 100) == 'str', \
|
||||||
|
'alternatives should exit early'
|
||||||
|
assert traverse_obj(_TEST_DATA, 'fail', 'fail') is None, \
|
||||||
|
'alternatives should return `default` if exhausted'
|
||||||
|
assert traverse_obj(_TEST_DATA, (..., 'fail'), 100) == 100, \
|
||||||
|
'alternatives should track their own branching return'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('dict', ...), ('data', ...)) == list(_TEST_DATA['data']), \
|
||||||
|
'alternatives on empty objects should search further'
|
||||||
|
|
||||||
|
def test_traversal_branching_nesting(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', (3, 0), 'url')) == ['https://www.example.com/0'], \
|
||||||
|
'tuple as key should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', [3, 0], 'url')) == ['https://www.example.com/0'], \
|
||||||
|
'list as key should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', ((1, 'fail'), (0, 'url')))) == ['https://www.example.com/0'], \
|
||||||
|
'double nesting in path should be treated as paths'
|
||||||
|
assert traverse_obj(['0', [1, 2]], [(0, 1), 0]) == [1], \
|
||||||
|
'do not fail early on branching'
|
||||||
|
expected = ['https://www.example.com/0', 'https://www.example.com/1']
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', ((0, ('fail', 'url')), (1, 'url')))) == expected, \
|
||||||
|
'tripple nesting in path should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, ('urls', ('fail', (..., 'url')))) == expected, \
|
||||||
|
'ellipsis as branch path start gets flattened'
|
||||||
|
|
||||||
|
def test_traversal_dict(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}) == {0: 100, 1: 1.2}, \
|
||||||
|
'dict key should result in a dict with the same keys'
|
||||||
|
expected = {0: 'https://www.example.com/0'}
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', 0, 'url')}) == expected, \
|
||||||
|
'dict key should allow paths'
|
||||||
|
expected = {0: ['https://www.example.com/0']}
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', (3, 0), 'url')}) == expected, \
|
||||||
|
'tuple in dict path should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, 'fail'), (0, 'url')))}) == expected, \
|
||||||
|
'double nesting in dict path should be treated as paths'
|
||||||
|
expected = {0: ['https://www.example.com/1', 'https://www.example.com/0']}
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('urls', ((1, ('fail', 'url')), (0, 'url')))}) == expected, \
|
||||||
|
'tripple nesting in dict path should be treated as branches'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'fail'}) == {}, \
|
||||||
|
'remove `None` values when top level dict key fails'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'fail'}, default=...) == {0: ...}, \
|
||||||
|
'use `default` if key fails and `default`'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'dict'}) == {}, \
|
||||||
|
'remove empty values when dict key'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 'dict'}, default=...) == {0: ...}, \
|
||||||
|
'use `default` when dict key and `default`'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}) == {}, \
|
||||||
|
'remove empty values when nested dict key fails'
|
||||||
|
assert traverse_obj(None, {0: 'fail'}) == {}, \
|
||||||
|
'default to dict if pruned'
|
||||||
|
assert traverse_obj(None, {0: 'fail'}, default=...) == {0: ...}, \
|
||||||
|
'default to dict if pruned and default is given'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: {0: 'fail'}}, default=...) == {0: {0: ...}}, \
|
||||||
|
'use nested `default` when nested dict key fails and `default`'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: ('dict', ...)}) == {}, \
|
||||||
|
'remove key if branch in dict key not successful'
|
||||||
|
|
||||||
|
def test_traversal_default(self):
|
||||||
|
_DEFAULT_DATA = {'None': None, 'int': 0, 'list': []}
|
||||||
|
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'fail') is None, \
|
||||||
|
'default value should be `None`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'fail', 'fail', default=...) == ..., \
|
||||||
|
'chained fails should result in default'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'None', 'int') == 0, \
|
||||||
|
'should not short cirquit on `None`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'fail', default=1) == 1, \
|
||||||
|
'invalid dict key should result in `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, 'None', default=1) == 1, \
|
||||||
|
'`None` is a deliberate sentinel and should become `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, ('list', 10)) is None, \
|
||||||
|
'`IndexError` should result in `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=1) == 1, \
|
||||||
|
'if branched but not successful return `default` if defined, not `[]`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, (..., 'fail'), default=None) is None, \
|
||||||
|
'if branched but not successful return `default` even if `default` is `None`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, (..., 'fail')) == [], \
|
||||||
|
'if branched but not successful return `[]`, not `default`'
|
||||||
|
assert traverse_obj(_DEFAULT_DATA, ('list', ...)) == [], \
|
||||||
|
'if branched but object is empty return `[]`, not `default`'
|
||||||
|
assert traverse_obj(None, ...) == [], \
|
||||||
|
'if branched but object is `None` return `[]`, not `default`'
|
||||||
|
assert traverse_obj({0: None}, (0, ...)) == [], \
|
||||||
|
'if branched but state is `None` return `[]`, not `default`'
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('path', [
|
||||||
|
('fail', ...),
|
||||||
|
(..., 'fail'),
|
||||||
|
100 * ('fail',) + (...,),
|
||||||
|
(...,) + 100 * ('fail',),
|
||||||
|
])
|
||||||
|
def test_traversal_branching(self, path):
|
||||||
|
assert traverse_obj({}, path) == [], \
|
||||||
|
'if branched but state is `None`, return `[]` (not `default`)'
|
||||||
|
assert traverse_obj({}, 'fail', path) == [], \
|
||||||
|
'if branching in last alternative and previous did not match, return `[]` (not `default`)'
|
||||||
|
assert traverse_obj({0: 'x'}, 0, path) == 'x', \
|
||||||
|
'if branching in last alternative and previous did match, return single value'
|
||||||
|
assert traverse_obj({0: 'x'}, path, 0) == 'x', \
|
||||||
|
'if branching in first alternative and non-branching path does match, return single value'
|
||||||
|
assert traverse_obj({}, path, 'fail') is None, \
|
||||||
|
'if branching in first alternative and non-branching path does not match, return `default`'
|
||||||
|
|
||||||
|
def test_traversal_expected_type(self):
|
||||||
|
_EXPECTED_TYPE_DATA = {'str': 'str', 'int': 0}
|
||||||
|
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=str) == 'str', \
|
||||||
|
'accept matching `expected_type` type'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=int) is None, \
|
||||||
|
'reject non matching `expected_type` type'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'int', expected_type=lambda x: str(x)) == '0', \
|
||||||
|
'transform type using type function'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, 'str', expected_type=lambda _: 1 / 0) is None, \
|
||||||
|
'wrap expected_type fuction in try_call'
|
||||||
|
assert traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str) == ['str'], \
|
||||||
|
'eliminate items that expected_type fails on'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int) == {0: 100}, \
|
||||||
|
'type as expected_type should filter dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none) == {0: '100', 1: '1.2'}, \
|
||||||
|
'function as expected_type should transform dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, ({0: 1.2}, 0, {int_or_none}), expected_type=int) == 1, \
|
||||||
|
'expected_type should not filter non final dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int) == {0: {0: 100}}, \
|
||||||
|
'expected_type should transform deep dict values'
|
||||||
|
assert traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(...)) == [{0: ...}, {0: ...}], \
|
||||||
|
'expected_type should transform branched dict values'
|
||||||
|
assert traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int) == [4], \
|
||||||
|
'expected_type regression for type matching in tuple branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, ['data', ...], expected_type=int) == [], \
|
||||||
|
'expected_type regression for type matching in dict result'
|
||||||
|
|
||||||
|
def test_traversal_get_all(self):
|
||||||
|
_GET_ALL_DATA = {'key': [0, 1, 2]}
|
||||||
|
|
||||||
|
assert traverse_obj(_GET_ALL_DATA, ('key', ...), get_all=False) == 0, \
|
||||||
|
'if not `get_all`, return only first matching value'
|
||||||
|
assert traverse_obj(_GET_ALL_DATA, ..., get_all=False) == [0, 1, 2], \
|
||||||
|
'do not overflatten if not `get_all`'
|
||||||
|
|
||||||
|
def test_traversal_casesense(self):
|
||||||
|
_CASESENSE_DATA = {
|
||||||
|
'KeY': 'value0',
|
||||||
|
0: {
|
||||||
|
'KeY': 'value1',
|
||||||
|
0: {'KeY': 'value2'},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, 'key') is None, \
|
||||||
|
'dict keys should be case sensitive unless `casesense`'
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, 'keY', casesense=False) == 'value0', \
|
||||||
|
'allow non matching key case if `casesense`'
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, [0, ('keY',)], casesense=False) == ['value1'], \
|
||||||
|
'allow non matching key case in branch if `casesense`'
|
||||||
|
assert traverse_obj(_CASESENSE_DATA, [0, ([0, 'keY'],)], casesense=False) == ['value2'], \
|
||||||
|
'allow non matching key case in branch path if `casesense`'
|
||||||
|
|
||||||
|
def test_traversal_traverse_string(self):
|
||||||
|
_TRAVERSE_STRING_DATA = {'str': 'str', 1.2: 1.2}
|
||||||
|
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0)) is None, \
|
||||||
|
'do not traverse into string if not `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', 0), traverse_string=True) == 's', \
|
||||||
|
'traverse into string if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, (1.2, 1), traverse_string=True) == '.', \
|
||||||
|
'traverse into converted data if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', ...), traverse_string=True) == 'str', \
|
||||||
|
'`...` should result in string (same value) if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', slice(0, None, 2)), traverse_string=True) == 'sr', \
|
||||||
|
'`slice` should result in string if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', lambda i, v: i or v == "s"), traverse_string=True) == 'str', \
|
||||||
|
'function should result in string if `traverse_string`'
|
||||||
|
assert traverse_obj(_TRAVERSE_STRING_DATA, ('str', (0, 2)), traverse_string=True) == ['s', 'r'], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
assert traverse_obj({}, (0, ...), traverse_string=True) == [], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
assert traverse_obj({}, (0, lambda x, y: True), traverse_string=True) == [], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
assert traverse_obj({}, (0, slice(1)), traverse_string=True) == [], \
|
||||||
|
'branching should result in list if `traverse_string`'
|
||||||
|
|
||||||
|
def test_traversal_re(self):
|
||||||
|
mobj = re.fullmatch(r'0(12)(?P<group>3)(4)?', '0123')
|
||||||
|
assert traverse_obj(mobj, ...) == [x for x in mobj.groups() if x is not None], \
|
||||||
|
'`...` on a `re.Match` should give its `groups()`'
|
||||||
|
assert traverse_obj(mobj, lambda k, _: k in (0, 2)) == ['0123', '3'], \
|
||||||
|
'function on a `re.Match` should give groupno, value starting at 0'
|
||||||
|
assert traverse_obj(mobj, 'group') == '3', \
|
||||||
|
'str key on a `re.Match` should give group with that name'
|
||||||
|
assert traverse_obj(mobj, 2) == '3', \
|
||||||
|
'int key on a `re.Match` should give group with that name'
|
||||||
|
assert traverse_obj(mobj, 'gRoUp', casesense=False) == '3', \
|
||||||
|
'str key on a `re.Match` should respect casesense'
|
||||||
|
assert traverse_obj(mobj, 'fail') is None, \
|
||||||
|
'failing str key on a `re.Match` should return `default`'
|
||||||
|
assert traverse_obj(mobj, 'gRoUpS', casesense=False) is None, \
|
||||||
|
'failing str key on a `re.Match` should return `default`'
|
||||||
|
assert traverse_obj(mobj, 8) is None, \
|
||||||
|
'failing int key on a `re.Match` should return `default`'
|
||||||
|
assert traverse_obj(mobj, lambda k, _: k in (0, 'group')) == ['0123', '3'], \
|
||||||
|
'function on a `re.Match` should give group name as well'
|
||||||
|
|
||||||
|
def test_traversal_xml_etree(self):
|
||||||
|
etree = xml.etree.ElementTree.fromstring('''<?xml version="1.0"?>
|
||||||
|
<data>
|
||||||
|
<country name="Liechtenstein">
|
||||||
|
<rank>1</rank>
|
||||||
|
<year>2008</year>
|
||||||
|
<gdppc>141100</gdppc>
|
||||||
|
<neighbor name="Austria" direction="E"/>
|
||||||
|
<neighbor name="Switzerland" direction="W"/>
|
||||||
|
</country>
|
||||||
|
<country name="Singapore">
|
||||||
|
<rank>4</rank>
|
||||||
|
<year>2011</year>
|
||||||
|
<gdppc>59900</gdppc>
|
||||||
|
<neighbor name="Malaysia" direction="N"/>
|
||||||
|
</country>
|
||||||
|
<country name="Panama">
|
||||||
|
<rank>68</rank>
|
||||||
|
<year>2011</year>
|
||||||
|
<gdppc>13600</gdppc>
|
||||||
|
<neighbor name="Costa Rica" direction="W"/>
|
||||||
|
<neighbor name="Colombia" direction="E"/>
|
||||||
|
</country>
|
||||||
|
</data>''')
|
||||||
|
assert traverse_obj(etree, '') == etree, \
|
||||||
|
'empty str key should return the element itself'
|
||||||
|
assert traverse_obj(etree, 'country') == list(etree), \
|
||||||
|
'str key should lead all children with that tag name'
|
||||||
|
assert traverse_obj(etree, ...) == list(etree), \
|
||||||
|
'`...` as key should return all children'
|
||||||
|
assert traverse_obj(etree, lambda _, x: x[0].text == '4') == [etree[1]], \
|
||||||
|
'function as key should get element as value'
|
||||||
|
assert traverse_obj(etree, lambda i, _: i == 1) == [etree[1]], \
|
||||||
|
'function as key should get index as key'
|
||||||
|
assert traverse_obj(etree, 0) == etree[0], \
|
||||||
|
'int key should return the nth child'
|
||||||
|
expected = ['Austria', 'Switzerland', 'Malaysia', 'Costa Rica', 'Colombia']
|
||||||
|
assert traverse_obj(etree, './/neighbor/@name') == expected, \
|
||||||
|
'`@<attribute>` at end of path should give that attribute'
|
||||||
|
assert traverse_obj(etree, '//neighbor/@fail') == [None, None, None, None, None], \
|
||||||
|
'`@<nonexistant>` at end of path should give `None`'
|
||||||
|
assert traverse_obj(etree, ('//neighbor/@', 2)) == {'name': 'Malaysia', 'direction': 'N'}, \
|
||||||
|
'`@` should give the full attribute dict'
|
||||||
|
assert traverse_obj(etree, '//year/text()') == ['2008', '2011', '2011'], \
|
||||||
|
'`text()` at end of path should give the inner text'
|
||||||
|
assert traverse_obj(etree, '//*[@direction]/@direction') == ['E', 'W', 'N', 'W', 'E'], \
|
||||||
|
'full Python xpath features should be supported'
|
||||||
|
assert traverse_obj(etree, (0, '@name')) == 'Liechtenstein', \
|
||||||
|
'special transformations should act on current element'
|
||||||
|
assert traverse_obj(etree, ('country', 0, ..., 'text()', {int_or_none})) == [1, 2008, 141100], \
|
||||||
|
'special transformations should act on current element'
|
||||||
|
|
||||||
|
def test_traversal_unbranching(self):
|
||||||
|
assert traverse_obj(_TEST_DATA, [(100, 1.2), all]) == [100, 1.2], \
|
||||||
|
'`all` should give all results as list'
|
||||||
|
assert traverse_obj(_TEST_DATA, [(100, 1.2), any]) == 100, \
|
||||||
|
'`any` should give the first result'
|
||||||
|
assert traverse_obj(_TEST_DATA, [100, all]) == [100], \
|
||||||
|
'`all` should give list if non branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, [100, any]) == 100, \
|
||||||
|
'`any` should give single item if non branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), all]) == [100], \
|
||||||
|
'`all` should filter `None` and empty dict'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100), any]) == 100, \
|
||||||
|
'`any` should filter `None` and empty dict'
|
||||||
|
assert traverse_obj(_TEST_DATA, [{
|
||||||
|
'all': [('dict', 'None', 100, 1.2), all],
|
||||||
|
'any': [('dict', 'None', 100, 1.2), any],
|
||||||
|
}]) == {'all': [100, 1.2], 'any': 100}, \
|
||||||
|
'`all`/`any` should apply to each dict path separately'
|
||||||
|
assert traverse_obj(_TEST_DATA, [{
|
||||||
|
'all': [('dict', 'None', 100, 1.2), all],
|
||||||
|
'any': [('dict', 'None', 100, 1.2), any],
|
||||||
|
}], get_all=False) == {'all': [100, 1.2], 'any': 100}, \
|
||||||
|
'`all`/`any` should apply to dict regardless of `get_all`'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, {float}]) is None, \
|
||||||
|
'`all` should reset branching status'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), any, {float}]) is None, \
|
||||||
|
'`any` should reset branching status'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 100, 1.2), all, ..., {float}]) == [1.2], \
|
||||||
|
'`all` should allow further branching'
|
||||||
|
assert traverse_obj(_TEST_DATA, [('dict', 'None', 'urls', 'data'), any, ..., 'index']) == [0, 1], \
|
||||||
|
'`any` should allow further branching'
|
||||||
|
|
||||||
|
def test_traversal_morsel(self):
|
||||||
|
values = {
|
||||||
|
'expires': 'a',
|
||||||
|
'path': 'b',
|
||||||
|
'comment': 'c',
|
||||||
|
'domain': 'd',
|
||||||
|
'max-age': 'e',
|
||||||
|
'secure': 'f',
|
||||||
|
'httponly': 'g',
|
||||||
|
'version': 'h',
|
||||||
|
'samesite': 'i',
|
||||||
|
}
|
||||||
|
morsel = http.cookies.Morsel()
|
||||||
|
morsel.set('item_key', 'item_value', 'coded_value')
|
||||||
|
morsel.update(values)
|
||||||
|
values['key'] = 'item_key'
|
||||||
|
values['value'] = 'item_value'
|
||||||
|
|
||||||
|
for key, value in values.items():
|
||||||
|
assert traverse_obj(morsel, key) == value, \
|
||||||
|
'Morsel should provide access to all values'
|
||||||
|
assert traverse_obj(morsel, ...) == list(values.values()), \
|
||||||
|
'`...` should yield all values'
|
||||||
|
assert traverse_obj(morsel, lambda k, v: True) == list(values.values()), \
|
||||||
|
'function key should yield all values'
|
||||||
|
assert traverse_obj(morsel, [(None,), any]) == morsel, \
|
||||||
|
'Morsel should not be implicitly changed to dict on usage'
|
||||||
|
|
||||||
|
|
||||||
|
class TestDictGet:
|
||||||
|
def test_dict_get(self):
|
||||||
|
FALSE_VALUES = {
|
||||||
|
'none': None,
|
||||||
|
'false': False,
|
||||||
|
'zero': 0,
|
||||||
|
'empty_string': '',
|
||||||
|
'empty_list': [],
|
||||||
|
}
|
||||||
|
d = {**FALSE_VALUES, 'a': 42}
|
||||||
|
assert dict_get(d, 'a') == 42
|
||||||
|
assert dict_get(d, 'b') is None
|
||||||
|
assert dict_get(d, 'b', 42) == 42
|
||||||
|
assert dict_get(d, ('a',)) == 42
|
||||||
|
assert dict_get(d, ('b', 'a')) == 42
|
||||||
|
assert dict_get(d, ('b', 'c', 'a', 'd')) == 42
|
||||||
|
assert dict_get(d, ('b', 'c')) is None
|
||||||
|
assert dict_get(d, ('b', 'c'), 42) == 42
|
||||||
|
for key, false_value in FALSE_VALUES.items():
|
||||||
|
assert dict_get(d, ('b', 'c', key)) is None
|
||||||
|
assert dict_get(d, ('b', 'c', key), skip_false_values=False) == false_value
|
@ -0,0 +1,54 @@
|
|||||||
|
import json
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
extract_attributes,
|
||||||
|
float_or_none,
|
||||||
|
get_element_html_by_id,
|
||||||
|
parse_iso8601,
|
||||||
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class FathomIE(InfoExtractor):
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?fathom\.video/share/(?P<id>[^/?#&]+)'
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://fathom.video/share/G9mkjkspnohVVZ_L5nrsoPycyWcB8y7s',
|
||||||
|
'md5': '0decd5343b8f30ae268625e79a02b60f',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '47200596',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'eCom Inucbator - Coaching Session',
|
||||||
|
'duration': 8125.380507,
|
||||||
|
'timestamp': 1699048914,
|
||||||
|
'upload_date': '20231103',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://fathom.video/share/mEws3bybftHL2QLymxYEDeE21vtLxGVm',
|
||||||
|
'md5': '4f5cb382126c22d1aba8a939f9c49690',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '46812957',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Jon, Lawrence, Neman chat about practice',
|
||||||
|
'duration': 3571.517847,
|
||||||
|
'timestamp': 1698933600,
|
||||||
|
'upload_date': '20231102',
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = self._match_id(url)
|
||||||
|
webpage = self._download_webpage(url, display_id)
|
||||||
|
props = traverse_obj(
|
||||||
|
get_element_html_by_id('app', webpage), ({extract_attributes}, 'data-page', {json.loads}, 'props'))
|
||||||
|
video_id = str(props['call']['id'])
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': self._extract_m3u8_formats(props['call']['video_url'], video_id, 'mp4'),
|
||||||
|
**traverse_obj(props, {
|
||||||
|
'title': ('head', 'title', {str}),
|
||||||
|
'duration': ('duration', {float_or_none}),
|
||||||
|
'timestamp': ('call', 'started_at', {parse_iso8601}),
|
||||||
|
}),
|
||||||
|
}
|
@ -0,0 +1,461 @@
|
|||||||
|
import json
|
||||||
|
import textwrap
|
||||||
|
import urllib.parse
|
||||||
|
import uuid
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
|
determine_ext,
|
||||||
|
filter_dict,
|
||||||
|
get_first,
|
||||||
|
int_or_none,
|
||||||
|
parse_iso8601,
|
||||||
|
update_url,
|
||||||
|
url_or_none,
|
||||||
|
variadic,
|
||||||
|
)
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class LoomIE(InfoExtractor):
|
||||||
|
IE_NAME = 'loom'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?loom\.com/(?:share|embed)/(?P<id>[\da-f]{32})'
|
||||||
|
_EMBED_REGEX = [rf'<iframe[^>]+\bsrc=["\'](?P<url>{_VALID_URL})']
|
||||||
|
_TESTS = [{
|
||||||
|
# m3u8 raw-url, mp4 transcoded-url, cdn url == raw-url, json subs only
|
||||||
|
'url': 'https://www.loom.com/share/43d05f362f734614a2e81b4694a3a523',
|
||||||
|
'md5': 'bfc2d7e9c2e0eb4813212230794b6f42',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '43d05f362f734614a2e81b4694a3a523',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'A Ruler for Windows - 28 March 2022',
|
||||||
|
'uploader': 'wILLIAM PIP',
|
||||||
|
'upload_date': '20220328',
|
||||||
|
'timestamp': 1648454238,
|
||||||
|
'duration': 27,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# webm raw-url, mp4 transcoded-url, cdn url == transcoded-url, no subs
|
||||||
|
'url': 'https://www.loom.com/share/c43a642f815f4378b6f80a889bb73d8d',
|
||||||
|
'md5': '70f529317be8cf880fcc2c649a531900',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'c43a642f815f4378b6f80a889bb73d8d',
|
||||||
|
'ext': 'webm',
|
||||||
|
'title': 'Lilah Nielsen Intro Video',
|
||||||
|
'uploader': 'Lilah Nielsen',
|
||||||
|
'upload_date': '20200826',
|
||||||
|
'timestamp': 1598480716,
|
||||||
|
'duration': 20,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# m3u8 raw-url, mp4 transcoded-url, cdn url == raw-url, vtt sub and json subs
|
||||||
|
'url': 'https://www.loom.com/share/9458bcbf79784162aa62ffb8dd66201b',
|
||||||
|
'md5': '51737ec002969dd28344db4d60b9cbbb',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9458bcbf79784162aa62ffb8dd66201b',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Sharing screen with gpt-4',
|
||||||
|
'description': 'Sharing screen with GPT 4 vision model and asking questions to guide through blender.',
|
||||||
|
'uploader': 'Suneel Matham',
|
||||||
|
'chapters': 'count:3',
|
||||||
|
'upload_date': '20231109',
|
||||||
|
'timestamp': 1699518978,
|
||||||
|
'duration': 93,
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# mpd raw-url, mp4 transcoded-url, cdn url == raw-url, no subs
|
||||||
|
'url': 'https://www.loom.com/share/24351eb8b317420289b158e4b7e96ff2',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '24351eb8b317420289b158e4b7e96ff2',
|
||||||
|
'ext': 'webm',
|
||||||
|
'title': 'OMFG clown',
|
||||||
|
'description': 'md5:285c5ee9d62aa087b7e3271b08796815',
|
||||||
|
'uploader': 'MrPumkin B',
|
||||||
|
'upload_date': '20210924',
|
||||||
|
'timestamp': 1632519618,
|
||||||
|
'duration': 210,
|
||||||
|
},
|
||||||
|
'params': {'skip_download': 'dash'},
|
||||||
|
}, {
|
||||||
|
# password-protected
|
||||||
|
'url': 'https://www.loom.com/share/50e26e8aeb7940189dff5630f95ce1f4',
|
||||||
|
'md5': '5cc7655e7d55d281d203f8ffd14771f7',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '50e26e8aeb7940189dff5630f95ce1f4',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'iOS Mobile Upload',
|
||||||
|
'uploader': 'Simon Curran',
|
||||||
|
'upload_date': '20200520',
|
||||||
|
'timestamp': 1590000123,
|
||||||
|
'duration': 35,
|
||||||
|
},
|
||||||
|
'params': {'videopassword': 'seniorinfants2'},
|
||||||
|
}, {
|
||||||
|
# embed, transcoded-url endpoint sends empty JSON response
|
||||||
|
'url': 'https://www.loom.com/embed/ddcf1c1ad21f451ea7468b1e33917e4e',
|
||||||
|
'md5': '8488817242a0db1cb2ad0ea522553cf6',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'ddcf1c1ad21f451ea7468b1e33917e4e',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'CF Reset User\'s Password',
|
||||||
|
'uploader': 'Aimee Heintz',
|
||||||
|
'upload_date': '20220707',
|
||||||
|
'timestamp': 1657216459,
|
||||||
|
'duration': 181,
|
||||||
|
},
|
||||||
|
'expected_warnings': ['Failed to parse JSON'],
|
||||||
|
}]
|
||||||
|
_WEBPAGE_TESTS = [{
|
||||||
|
'url': 'https://www.loom.com/community/e1229802a8694a09909e8ba0fbb6d073-pg',
|
||||||
|
'md5': 'ec838cd01b576cf0386f32e1ae424609',
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'e1229802a8694a09909e8ba0fbb6d073',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Rexie Jane Cimafranca - Founder\'s Presentation',
|
||||||
|
'uploader': 'Rexie Cimafranca',
|
||||||
|
'upload_date': '20230213',
|
||||||
|
'duration': 247,
|
||||||
|
'timestamp': 1676274030,
|
||||||
|
},
|
||||||
|
}]
|
||||||
|
|
||||||
|
_GRAPHQL_VARIABLES = {
|
||||||
|
'GetVideoSource': {
|
||||||
|
'acceptableMimes': ['DASH', 'M3U8', 'MP4'],
|
||||||
|
},
|
||||||
|
}
|
||||||
|
_GRAPHQL_QUERIES = {
|
||||||
|
'GetVideoSSR': textwrap.dedent('''\
|
||||||
|
query GetVideoSSR($videoId: ID!, $password: String) {
|
||||||
|
getVideo(id: $videoId, password: $password) {
|
||||||
|
__typename
|
||||||
|
... on PrivateVideo {
|
||||||
|
id
|
||||||
|
status
|
||||||
|
message
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
... on VideoPasswordMissingOrIncorrect {
|
||||||
|
id
|
||||||
|
message
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
... on RegularUserVideo {
|
||||||
|
id
|
||||||
|
__typename
|
||||||
|
createdAt
|
||||||
|
description
|
||||||
|
download_enabled
|
||||||
|
folder_id
|
||||||
|
is_protected
|
||||||
|
needs_password
|
||||||
|
owner {
|
||||||
|
display_name
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
privacy
|
||||||
|
s3_id
|
||||||
|
name
|
||||||
|
video_properties {
|
||||||
|
avgBitRate
|
||||||
|
client
|
||||||
|
camera_enabled
|
||||||
|
client_version
|
||||||
|
duration
|
||||||
|
durationMs
|
||||||
|
format
|
||||||
|
height
|
||||||
|
microphone_enabled
|
||||||
|
os
|
||||||
|
os_version
|
||||||
|
recordingClient
|
||||||
|
recording_type
|
||||||
|
recording_version
|
||||||
|
screen_type
|
||||||
|
tab_audio
|
||||||
|
trim_duration
|
||||||
|
width
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
playable_duration
|
||||||
|
source_duration
|
||||||
|
visibility
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}\n'''),
|
||||||
|
'GetVideoSource': textwrap.dedent('''\
|
||||||
|
query GetVideoSource($videoId: ID!, $password: String, $acceptableMimes: [CloudfrontVideoAcceptableMime]) {
|
||||||
|
getVideo(id: $videoId, password: $password) {
|
||||||
|
... on RegularUserVideo {
|
||||||
|
id
|
||||||
|
nullableRawCdnUrl(acceptableMimes: $acceptableMimes, password: $password) {
|
||||||
|
url
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
}\n'''),
|
||||||
|
'FetchVideoTranscript': textwrap.dedent('''\
|
||||||
|
query FetchVideoTranscript($videoId: ID!, $password: String) {
|
||||||
|
fetchVideoTranscript(videoId: $videoId, password: $password) {
|
||||||
|
... on VideoTranscriptDetails {
|
||||||
|
id
|
||||||
|
video_id
|
||||||
|
source_url
|
||||||
|
captions_source_url
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
... on GenericError {
|
||||||
|
message
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
}\n'''),
|
||||||
|
'FetchChapters': textwrap.dedent('''\
|
||||||
|
query FetchChapters($videoId: ID!, $password: String) {
|
||||||
|
fetchVideoChapters(videoId: $videoId, password: $password) {
|
||||||
|
... on VideoChapters {
|
||||||
|
video_id
|
||||||
|
content
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
... on EmptyChaptersPayload {
|
||||||
|
content
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
... on InvalidRequestWarning {
|
||||||
|
message
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
... on Error {
|
||||||
|
message
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
__typename
|
||||||
|
}
|
||||||
|
}\n'''),
|
||||||
|
}
|
||||||
|
_APOLLO_GRAPHQL_VERSION = '0a1856c'
|
||||||
|
|
||||||
|
def _call_graphql_api(self, operations, video_id, note=None, errnote=None):
|
||||||
|
password = self.get_param('videopassword')
|
||||||
|
return self._download_json(
|
||||||
|
'https://www.loom.com/graphql', video_id, note or 'Downloading GraphQL JSON',
|
||||||
|
errnote or 'Failed to download GraphQL JSON', headers={
|
||||||
|
'Accept': 'application/json',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'x-loom-request-source': f'loom_web_{self._APOLLO_GRAPHQL_VERSION}',
|
||||||
|
'apollographql-client-name': 'web',
|
||||||
|
'apollographql-client-version': self._APOLLO_GRAPHQL_VERSION,
|
||||||
|
}, data=json.dumps([{
|
||||||
|
'operationName': operation_name,
|
||||||
|
'variables': {
|
||||||
|
'videoId': video_id,
|
||||||
|
'password': password,
|
||||||
|
**self._GRAPHQL_VARIABLES.get(operation_name, {}),
|
||||||
|
},
|
||||||
|
'query': self._GRAPHQL_QUERIES[operation_name],
|
||||||
|
} for operation_name in variadic(operations)], separators=(',', ':')).encode())
|
||||||
|
|
||||||
|
def _call_url_api(self, endpoint, video_id):
|
||||||
|
response = self._download_json(
|
||||||
|
f'https://www.loom.com/api/campaigns/sessions/{video_id}/{endpoint}', video_id,
|
||||||
|
f'Downloading {endpoint} JSON', f'Failed to download {endpoint} JSON', fatal=False,
|
||||||
|
headers={'Accept': 'application/json', 'Content-Type': 'application/json'},
|
||||||
|
data=json.dumps({
|
||||||
|
'anonID': str(uuid.uuid4()),
|
||||||
|
'deviceID': None,
|
||||||
|
'force_original': False, # HTTP error 401 if True
|
||||||
|
'password': self.get_param('videopassword'),
|
||||||
|
}, separators=(',', ':')).encode())
|
||||||
|
return traverse_obj(response, ('url', {url_or_none}))
|
||||||
|
|
||||||
|
def _extract_formats(self, video_id, metadata, gql_data):
|
||||||
|
formats = []
|
||||||
|
video_properties = traverse_obj(metadata, ('video_properties', {
|
||||||
|
'width': ('width', {int_or_none}),
|
||||||
|
'height': ('height', {int_or_none}),
|
||||||
|
'acodec': ('microphone_enabled', {lambda x: 'none' if x is False else None}),
|
||||||
|
}))
|
||||||
|
|
||||||
|
def get_formats(format_url, format_id, quality):
|
||||||
|
if not format_url:
|
||||||
|
return
|
||||||
|
ext = determine_ext(format_url)
|
||||||
|
query = urllib.parse.urlparse(format_url).query
|
||||||
|
|
||||||
|
if ext == 'm3u8':
|
||||||
|
# Extract pre-merged HLS formats to avoid buggy parsing of metadata in split playlists
|
||||||
|
format_url = format_url.replace('-split.m3u8', '.m3u8')
|
||||||
|
m3u8_formats = self._extract_m3u8_formats(
|
||||||
|
format_url, video_id, 'mp4', m3u8_id=f'hls-{format_id}', fatal=False, quality=quality)
|
||||||
|
for fmt in m3u8_formats:
|
||||||
|
yield {
|
||||||
|
**fmt,
|
||||||
|
'url': update_url(fmt['url'], query=query),
|
||||||
|
'extra_param_to_segment_url': query,
|
||||||
|
}
|
||||||
|
|
||||||
|
elif ext == 'mpd':
|
||||||
|
dash_formats = self._extract_mpd_formats(
|
||||||
|
format_url, video_id, mpd_id=f'dash-{format_id}', fatal=False)
|
||||||
|
for fmt in dash_formats:
|
||||||
|
yield {
|
||||||
|
**fmt,
|
||||||
|
'extra_param_to_segment_url': query,
|
||||||
|
'quality': quality,
|
||||||
|
}
|
||||||
|
|
||||||
|
else:
|
||||||
|
yield {
|
||||||
|
'url': format_url,
|
||||||
|
'ext': ext,
|
||||||
|
'format_id': f'http-{format_id}',
|
||||||
|
'quality': quality,
|
||||||
|
**video_properties,
|
||||||
|
}
|
||||||
|
|
||||||
|
raw_url = self._call_url_api('raw-url', video_id)
|
||||||
|
formats.extend(get_formats(raw_url, 'raw', quality=1)) # original quality
|
||||||
|
|
||||||
|
transcoded_url = self._call_url_api('transcoded-url', video_id)
|
||||||
|
formats.extend(get_formats(transcoded_url, 'transcoded', quality=-1)) # transcoded quality
|
||||||
|
|
||||||
|
cdn_url = get_first(gql_data, ('data', 'getVideo', 'nullableRawCdnUrl', 'url', {url_or_none}))
|
||||||
|
# cdn_url is usually a dupe, but the raw-url/transcoded-url endpoints could return errors
|
||||||
|
valid_urls = [update_url(url, query=None) for url in (raw_url, transcoded_url) if url]
|
||||||
|
if cdn_url and update_url(cdn_url, query=None) not in valid_urls:
|
||||||
|
formats.extend(get_formats(cdn_url, 'cdn', quality=0)) # could be original or transcoded
|
||||||
|
|
||||||
|
return formats
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
video_id = self._match_id(url)
|
||||||
|
metadata = get_first(
|
||||||
|
self._call_graphql_api('GetVideoSSR', video_id, 'Downloading GraphQL metadata JSON'),
|
||||||
|
('data', 'getVideo', {dict})) or {}
|
||||||
|
|
||||||
|
if metadata.get('__typename') == 'VideoPasswordMissingOrIncorrect':
|
||||||
|
if not self.get_param('videopassword'):
|
||||||
|
raise ExtractorError(
|
||||||
|
'This video is password-protected, use the --video-password option', expected=True)
|
||||||
|
raise ExtractorError('Invalid video password', expected=True)
|
||||||
|
|
||||||
|
gql_data = self._call_graphql_api(['FetchChapters', 'FetchVideoTranscript', 'GetVideoSource'], video_id)
|
||||||
|
duration = traverse_obj(metadata, ('video_properties', 'duration', {int_or_none}))
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'duration': duration,
|
||||||
|
'chapters': self._extract_chapters_from_description(
|
||||||
|
get_first(gql_data, ('data', 'fetchVideoChapters', 'content', {str})), duration) or None,
|
||||||
|
'formats': self._extract_formats(video_id, metadata, gql_data),
|
||||||
|
'subtitles': filter_dict({
|
||||||
|
'en': traverse_obj(gql_data, (
|
||||||
|
..., 'data', 'fetchVideoTranscript',
|
||||||
|
('source_url', 'captions_source_url'), {
|
||||||
|
'url': {url_or_none},
|
||||||
|
})) or None,
|
||||||
|
}),
|
||||||
|
**traverse_obj(metadata, {
|
||||||
|
'title': ('name', {str}),
|
||||||
|
'description': ('description', {str}),
|
||||||
|
'uploader': ('owner', 'display_name', {str}),
|
||||||
|
'timestamp': ('createdAt', {parse_iso8601}),
|
||||||
|
}),
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class LoomFolderIE(InfoExtractor):
|
||||||
|
IE_NAME = 'loom:folder'
|
||||||
|
_VALID_URL = r'https?://(?:www\.)?loom\.com/share/folder/(?P<id>[\da-f]{32})'
|
||||||
|
_TESTS = [{
|
||||||
|
# 2 subfolders, no videos in root
|
||||||
|
'url': 'https://www.loom.com/share/folder/997db4db046f43e5912f10dc5f817b5c',
|
||||||
|
'playlist_mincount': 16,
|
||||||
|
'info_dict': {
|
||||||
|
'id': '997db4db046f43e5912f10dc5f817b5c',
|
||||||
|
'title': 'Blending Lessons',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# only videos, no subfolders
|
||||||
|
'url': 'https://www.loom.com/share/folder/9a8a87f6b6f546d9a400c8e7575ff7f2',
|
||||||
|
'playlist_mincount': 12,
|
||||||
|
'info_dict': {
|
||||||
|
'id': '9a8a87f6b6f546d9a400c8e7575ff7f2',
|
||||||
|
'title': 'List A- a, i, o',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# videos in root and empty subfolder
|
||||||
|
'url': 'https://www.loom.com/share/folder/886e534218c24fd292e97e9563078cc4',
|
||||||
|
'playlist_mincount': 21,
|
||||||
|
'info_dict': {
|
||||||
|
'id': '886e534218c24fd292e97e9563078cc4',
|
||||||
|
'title': 'Medicare Agent Training videos',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# videos in root and videos in subfolders
|
||||||
|
'url': 'https://www.loom.com/share/folder/b72c4ecdf04745da9403926d80a40c38',
|
||||||
|
'playlist_mincount': 21,
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'b72c4ecdf04745da9403926d80a40c38',
|
||||||
|
'title': 'Quick Altos Q & A Tutorials',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# recursive folder extraction
|
||||||
|
'url': 'https://www.loom.com/share/folder/8b458a94e0e4449b8df9ea7a68fafc4e',
|
||||||
|
'playlist_count': 23,
|
||||||
|
'info_dict': {
|
||||||
|
'id': '8b458a94e0e4449b8df9ea7a68fafc4e',
|
||||||
|
'title': 'Sezer Texting Guide',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# more than 50 videos in 1 folder
|
||||||
|
'url': 'https://www.loom.com/share/folder/e056a91d290d47ca9b00c9d1df56c463',
|
||||||
|
'playlist_mincount': 61,
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'e056a91d290d47ca9b00c9d1df56c463',
|
||||||
|
'title': 'User Videos',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
# many subfolders
|
||||||
|
'url': 'https://www.loom.com/share/folder/c2dde8cc67454f0e99031677279d8954',
|
||||||
|
'playlist_mincount': 75,
|
||||||
|
'info_dict': {
|
||||||
|
'id': 'c2dde8cc67454f0e99031677279d8954',
|
||||||
|
'title': 'Honors 1',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.loom.com/share/folder/bae17109a68146c7803454f2893c8cf8/Edpuzzle',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _extract_folder_data(self, folder_id):
|
||||||
|
return self._download_json(
|
||||||
|
f'https://www.loom.com/v1/folders/{folder_id}', folder_id,
|
||||||
|
'Downloading folder info JSON', query={'limit': '10000'})
|
||||||
|
|
||||||
|
def _extract_folder_entries(self, folder_id, initial_folder_data=None):
|
||||||
|
folder_data = initial_folder_data or self._extract_folder_data(folder_id)
|
||||||
|
|
||||||
|
for video in traverse_obj(folder_data, ('videos', lambda _, v: v['id'])):
|
||||||
|
video_id = video['id']
|
||||||
|
yield self.url_result(
|
||||||
|
f'https://www.loom.com/share/{video_id}', LoomIE, video_id, video.get('name'))
|
||||||
|
|
||||||
|
# Recurse into subfolders
|
||||||
|
for subfolder_id in traverse_obj(folder_data, (
|
||||||
|
'folders', lambda _, v: v['id'] != folder_id, 'id', {str})):
|
||||||
|
yield from self._extract_folder_entries(subfolder_id)
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
playlist_id = self._match_id(url)
|
||||||
|
playlist_data = self._extract_folder_data(playlist_id)
|
||||||
|
|
||||||
|
return self.playlist_result(
|
||||||
|
self._extract_folder_entries(playlist_id, playlist_data), playlist_id,
|
||||||
|
traverse_obj(playlist_data, ('folder', 'name', {str.strip})))
|
@ -1,67 +1,153 @@
|
|||||||
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
unified_strdate,
|
filter_dict,
|
||||||
update_url_query,
|
parse_iso8601,
|
||||||
urlencode_postdata,
|
traverse_obj,
|
||||||
|
try_call,
|
||||||
|
url_or_none,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
class MediciIE(InfoExtractor):
|
class MediciIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?medici\.tv/#!/(?P<id>[^?#&]+)'
|
_VALID_URL = r'https?://(?:(?P<sub>www|edu)\.)?medici\.tv/[a-z]{2}/[\w.-]+/(?P<id>[^/?#&]+)'
|
||||||
_TEST = {
|
_TESTS = [{
|
||||||
'url': 'http://www.medici.tv/#!/daniel-harding-frans-helmerson-verbier-festival-music-camp',
|
'url': 'https://www.medici.tv/en/operas/thomas-ades-the-exterminating-angel-calixto-bieito-opera-bastille-paris',
|
||||||
'md5': '004c21bb0a57248085b6ff3fec72719d',
|
'md5': 'd483f74e7a7a9eac0dbe152ab189050d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '8032',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Thomas Adès\'s The Exterminating Angel',
|
||||||
|
'description': 'md5:708ae6350dadc604225b4a6e32482bab',
|
||||||
|
'thumbnail': r're:https://.+/.+\.jpg',
|
||||||
|
'upload_date': '20240304',
|
||||||
|
'timestamp': 1709561766,
|
||||||
|
'display_id': 'thomas-ades-the-exterminating-angel-calixto-bieito-opera-bastille-paris',
|
||||||
|
},
|
||||||
|
'expected_warnings': [r'preview'],
|
||||||
|
}, {
|
||||||
|
'url': 'https://edu.medici.tv/en/operas/wagner-lohengrin-paris-opera-kirill-serebrennikov-piotr-beczala-kwangchul-youn-johanni-van-oostrum',
|
||||||
|
'md5': '4ef3f4079a6e1c617584463a9eb84f99',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '7900',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Wagner\'s Lohengrin',
|
||||||
|
'description': 'md5:a384a62937866101f86902f21752cd89',
|
||||||
|
'thumbnail': r're:https://.+/.+\.jpg',
|
||||||
|
'upload_date': '20231017',
|
||||||
|
'timestamp': 1697554771,
|
||||||
|
'display_id': 'wagner-lohengrin-paris-opera-kirill-serebrennikov-piotr-beczala-kwangchul-youn-johanni-van-oostrum',
|
||||||
|
},
|
||||||
|
'expected_warnings': [r'preview'],
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.medici.tv/en/concerts/sergey-smbatyan-conducts-mansurian-chouchane-siranossian-mario-brunello',
|
||||||
|
'md5': '9dd757e53b22b2511e85ea9ea60e4815',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '5712',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Sergey Smbatyan conducts Tigran Mansurian — With Chouchane Siranossian and Mario Brunello',
|
||||||
|
'thumbnail': r're:https://.+/.+\.jpg',
|
||||||
|
'description': 'md5:9411fe44c874bb10e9af288c65816e41',
|
||||||
|
'upload_date': '20200323',
|
||||||
|
'timestamp': 1584975600,
|
||||||
|
'display_id': 'sergey-smbatyan-conducts-mansurian-chouchane-siranossian-mario-brunello',
|
||||||
|
},
|
||||||
|
'expected_warnings': [r'preview'],
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.medici.tv/en/ballets/carmen-ballet-choregraphie-de-jiri-bubenicek-teatro-dellopera-di-roma',
|
||||||
|
'md5': '40f5e76cb701a97a6d7ba23b62c49990',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '7857',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Carmen by Jiří Bubeníček after Roland Petit, music by Bizet, de Falla, Castelnuovo-Tedesco, and Bonolis',
|
||||||
|
'thumbnail': r're:https://.+/.+\.jpg',
|
||||||
|
'description': 'md5:0f15a15611ed748020c769873e10a8bb',
|
||||||
|
'upload_date': '20240223',
|
||||||
|
'timestamp': 1708707600,
|
||||||
|
'display_id': 'carmen-ballet-choregraphie-de-jiri-bubenicek-teatro-dellopera-di-roma',
|
||||||
|
},
|
||||||
|
'expected_warnings': [r'preview'],
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.medici.tv/en/documentaries/la-sonnambula-liege-2023-documentaire',
|
||||||
|
'md5': '87ff198018ce79a34757ab0dd6f21080',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '7513',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'La Sonnambula',
|
||||||
|
'thumbnail': r're:https://.+/.+\.jpg',
|
||||||
|
'description': 'md5:0caf9109a860fd50cd018df062a67f34',
|
||||||
|
'upload_date': '20231103',
|
||||||
|
'timestamp': 1699010830,
|
||||||
|
'display_id': 'la-sonnambula-liege-2023-documentaire',
|
||||||
|
},
|
||||||
|
'expected_warnings': [r'preview'],
|
||||||
|
}, {
|
||||||
|
'url': 'https://edu.medici.tv/en/masterclasses/yvonne-loriod-olivier-messiaen',
|
||||||
|
'md5': 'fb5dcec46d76ad20fbdbaabb01da191d',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '3024',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'Olivier Messiaen and Yvonne Loriod, pianists and teachers',
|
||||||
|
'thumbnail': r're:https://.+/.+\.jpg',
|
||||||
|
'description': 'md5:aab948e2f7690214b5c28896c83f1fc1',
|
||||||
|
'upload_date': '20150223',
|
||||||
|
'timestamp': 1424706608,
|
||||||
|
'display_id': 'yvonne-loriod-olivier-messiaen',
|
||||||
|
},
|
||||||
|
'skip': 'Requires authentication; preview starts in the middle',
|
||||||
|
}, {
|
||||||
|
'url': 'https://www.medici.tv/en/jazz/makaya-mccraven-la-rochelle',
|
||||||
|
'md5': '4cc279a8b06609782747c8f50beea2b3',
|
||||||
'info_dict': {
|
'info_dict': {
|
||||||
'id': '3059',
|
'id': '7922',
|
||||||
'ext': 'flv',
|
'ext': 'mp4',
|
||||||
'title': 'Daniel Harding conducts the Verbier Festival Music Camp \u2013 With Frans Helmerson',
|
'title': 'NEW: Makaya McCraven in La Rochelle',
|
||||||
'description': 'md5:322a1e952bafb725174fd8c1a8212f58',
|
'thumbnail': r're:https://.+/.+\.jpg',
|
||||||
'thumbnail': r're:^https?://.*\.jpg$',
|
'description': 'md5:b5a8aaeb6993d8ccb18bde8abb8aa8d2',
|
||||||
'upload_date': '20170408',
|
'upload_date': '20231228',
|
||||||
|
'timestamp': 1703754863,
|
||||||
|
'display_id': 'makaya-mccraven-la-rochelle',
|
||||||
},
|
},
|
||||||
}
|
'expected_warnings': [r'preview'],
|
||||||
|
}]
|
||||||
|
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
video_id = self._match_id(url)
|
display_id, subdomain = self._match_valid_url(url).group('id', 'sub')
|
||||||
|
self._request_webpage(url, display_id, 'Requesting CSRF token cookie')
|
||||||
# Sets csrftoken cookie
|
|
||||||
self._download_webpage(url, video_id)
|
|
||||||
|
|
||||||
MEDICI_URL = 'http://www.medici.tv/'
|
subdomain = 'edu-' if subdomain == 'edu' else ''
|
||||||
|
origin = f'https://{urllib.parse.urlparse(url).hostname}'
|
||||||
|
|
||||||
data = self._download_json(
|
data = self._download_json(
|
||||||
MEDICI_URL, video_id,
|
f'https://api.medici.tv/{subdomain}satie/edito/movie-file/{display_id}/', display_id,
|
||||||
data=urlencode_postdata({
|
headers=filter_dict({
|
||||||
'json': 'true',
|
'Authorization': try_call(
|
||||||
'page': '/%s' % video_id,
|
lambda: urllib.parse.unquote(self._get_cookies(url)['auth._token.mAuth'].value)),
|
||||||
'timezone_offset': -420,
|
'Device-Type': 'web',
|
||||||
}), headers={
|
'Origin': origin,
|
||||||
'X-CSRFToken': self._get_cookies(url)['csrftoken'].value,
|
'Referer': f'{origin}/',
|
||||||
'X-Requested-With': 'XMLHttpRequest',
|
'Accept': 'application/json, text/plain, */*',
|
||||||
'Referer': MEDICI_URL,
|
}))
|
||||||
'Content-Type': 'application/x-www-form-urlencoded',
|
|
||||||
})
|
|
||||||
|
|
||||||
video = data['video']['videos']['video1']
|
|
||||||
|
|
||||||
title = video.get('nom') or data['title']
|
|
||||||
|
|
||||||
video_id = video.get('id') or video_id
|
if not traverse_obj(data, ('video', 'is_full_video')) and traverse_obj(
|
||||||
formats = self._extract_f4m_formats(
|
data, ('video', 'is_limited_by_user_access')):
|
||||||
update_url_query(video['url_akamai'], {
|
self.report_warning(
|
||||||
'hdcore': '3.1.0',
|
'The full video is for subscribers only. Only previews will be downloaded. If you '
|
||||||
'plugin=aasp': '3.1.0.43.124',
|
'have used the --cookies-from-browser option, try using the --cookies option instead')
|
||||||
}), video_id, f4m_id='hds')
|
|
||||||
|
|
||||||
description = data.get('meta_description')
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
|
||||||
thumbnail = video.get('url_thumbnail') or data.get('main_image')
|
data['video']['video_url'], display_id, 'mp4')
|
||||||
upload_date = unified_strdate(data['video'].get('date'))
|
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': str(data['id']),
|
||||||
'title': title,
|
'display_id': display_id,
|
||||||
'description': description,
|
|
||||||
'thumbnail': thumbnail,
|
|
||||||
'upload_date': upload_date,
|
|
||||||
'formats': formats,
|
'formats': formats,
|
||||||
|
'subtitles': subtitles,
|
||||||
|
**traverse_obj(data, {
|
||||||
|
'title': ('title', {str}),
|
||||||
|
'description': ('subtitle', {str}),
|
||||||
|
'thumbnail': ('picture', {url_or_none}),
|
||||||
|
'timestamp': ('date_publish', {parse_iso8601}),
|
||||||
|
}),
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,112 @@
|
|||||||
|
import json
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
|
from .common import InfoExtractor
|
||||||
|
from ..utils import determine_ext, int_or_none, url_or_none
|
||||||
|
from ..utils.traversal import traverse_obj
|
||||||
|
|
||||||
|
|
||||||
|
class SharePointIE(InfoExtractor):
|
||||||
|
_BASE_URL_RE = r'https?://[\w-]+\.sharepoint\.com/'
|
||||||
|
_VALID_URL = [
|
||||||
|
rf'{_BASE_URL_RE}:v:/[a-z]/(?:[^/?#]+/)*(?P<id>[^/?#]{{46}})/?(?:$|[?#])',
|
||||||
|
rf'{_BASE_URL_RE}(?!:v:)(?:[^/?#]+/)*stream\.aspx\?(?:[^#]+&)?id=(?P<id>[^&#]+)',
|
||||||
|
]
|
||||||
|
_TESTS = [{
|
||||||
|
'url': 'https://lut-my.sharepoint.com/:v:/g/personal/juha_eerola_student_lab_fi/EUrAmrktb4ZMhUcY9J2PqMEBD_9x_l0DyYWVgAvp-TTOMw?e=ZpQOOw',
|
||||||
|
'md5': '2950821d0d4937a0a76373782093b435',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '01EQRS7EKKYCNLSLLPQZGIKRYY6SOY7KGB',
|
||||||
|
'display_id': 'EUrAmrktb4ZMhUcY9J2PqMEBD_9x_l0DyYWVgAvp-TTOMw',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'CmvpJST',
|
||||||
|
'duration': 54.567,
|
||||||
|
'thumbnail': r're:https://.+/thumbnail',
|
||||||
|
'uploader_id': '8dcec565-a956-4b91-95e5-bacfb8bc015f',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://greaternyace.sharepoint.com/:v:/s/acementornydrive/ETski5eAfNVEoPRZUAyy1wEBpLgVFYWso5bjbZjfBLlPUg?e=PQUfVb',
|
||||||
|
'md5': 'c496a01644223273bff12e93e501afd1',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '01QI4AVTZ3ESFZPAD42VCKB5CZKAGLFVYB',
|
||||||
|
'display_id': 'ETski5eAfNVEoPRZUAyy1wEBpLgVFYWso5bjbZjfBLlPUg',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': '930103681233985536',
|
||||||
|
'duration': 3797.326,
|
||||||
|
'thumbnail': r're:https://.+/thumbnail',
|
||||||
|
},
|
||||||
|
}, {
|
||||||
|
'url': 'https://lut-my.sharepoint.com/personal/juha_eerola_student_lab_fi/_layouts/15/stream.aspx?id=%2Fpersonal%2Fjuha_eerola_student_lab_fi%2FDocuments%2FM-DL%2FCmvpJST.mp4&ga=1&referrer=StreamWebApp.Web&referrerScenario=AddressBarCopied.view',
|
||||||
|
'info_dict': {
|
||||||
|
'id': '01EQRS7EKKYCNLSLLPQZGIKRYY6SOY7KGB',
|
||||||
|
'display_id': '/personal/juha_eerola_student_lab_fi/Documents/M-DL/CmvpJST.mp4',
|
||||||
|
'ext': 'mp4',
|
||||||
|
'title': 'CmvpJST',
|
||||||
|
'duration': 54.567,
|
||||||
|
'thumbnail': r're:https://.+/thumbnail',
|
||||||
|
'uploader_id': '8dcec565-a956-4b91-95e5-bacfb8bc015f',
|
||||||
|
},
|
||||||
|
'skip': 'Session cookies needed',
|
||||||
|
}, {
|
||||||
|
'url': 'https://izoobasisschool.sharepoint.com/:v:/g/Eaqleq8COVBIvIPvod0U27oBypC6aWOkk8ptuDpmJ6arHw',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://uskudaredutr-my.sharepoint.com/:v:/g/personal/songul_turkaydin_uskudar_edu_tr/EbTf-VRUIbtGuIN73tx1MuwBCHBOmNcWNqSLw61Fd2_o0g?e=n5Vkof',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://epam-my.sharepoint.com/:v:/p/dzmitry_tamashevich/Ec4ZOs-rATZHjFYZWVxjczEB649FCoYFKDV_x3RxZiWAGA?e=4hswgA',
|
||||||
|
'only_matching': True,
|
||||||
|
}, {
|
||||||
|
'url': 'https://microsoft.sharepoint.com/:v:/t/MicrosoftSPARKRecordings-MSFTInternal/EWCyeqByVWBAt8wDvNZdV-UB0BvU5YVbKm0UHgdrUlI6dg?e=QbPck6',
|
||||||
|
'only_matching': True,
|
||||||
|
}]
|
||||||
|
|
||||||
|
def _real_extract(self, url):
|
||||||
|
display_id = urllib.parse.unquote(self._match_id(url))
|
||||||
|
webpage, urlh = self._download_webpage_handle(url, display_id)
|
||||||
|
if urllib.parse.urlparse(urlh.url).hostname == 'login.microsoftonline.com':
|
||||||
|
self.raise_login_required(
|
||||||
|
'Session cookies are required for this URL and can be passed '
|
||||||
|
'with the --cookies option. The --cookies-from-browser option will not work', method=None)
|
||||||
|
|
||||||
|
video_data = self._search_json(r'g_fileInfo\s*=', webpage, 'player config', display_id)
|
||||||
|
video_id = video_data['VroomItemId']
|
||||||
|
|
||||||
|
parsed_url = urllib.parse.urlparse(video_data['.transformUrl'])
|
||||||
|
base_media_url = urllib.parse.urlunparse(parsed_url._replace(
|
||||||
|
path=urllib.parse.urljoin(f'{parsed_url.path}/', '../videomanifest'),
|
||||||
|
query=urllib.parse.urlencode({
|
||||||
|
**urllib.parse.parse_qs(parsed_url.query),
|
||||||
|
'cTag': video_data['.ctag'],
|
||||||
|
'action': 'Access',
|
||||||
|
'part': 'index',
|
||||||
|
}, doseq=True)))
|
||||||
|
|
||||||
|
# Web player adds more params to the format URLs but we still get all formats without them
|
||||||
|
formats = self._extract_mpd_formats(
|
||||||
|
base_media_url, video_id, mpd_id='dash', query={'format': 'dash'}, fatal=False)
|
||||||
|
for hls_type in ('hls', 'hls-vnext'):
|
||||||
|
formats.extend(self._extract_m3u8_formats(
|
||||||
|
base_media_url, video_id, 'mp4', m3u8_id=hls_type,
|
||||||
|
query={'format': hls_type}, fatal=False, quality=-2))
|
||||||
|
|
||||||
|
if video_url := traverse_obj(video_data, ('downloadUrl', {url_or_none})):
|
||||||
|
formats.append({
|
||||||
|
'url': video_url,
|
||||||
|
'ext': determine_ext(video_data.get('extension') or video_data.get('name')),
|
||||||
|
'quality': 1,
|
||||||
|
'format_id': 'source',
|
||||||
|
'filesize': int_or_none(video_data.get('size')),
|
||||||
|
'vcodec': 'none' if video_data.get('isAudio') is True else None,
|
||||||
|
})
|
||||||
|
|
||||||
|
return {
|
||||||
|
'id': video_id,
|
||||||
|
'formats': formats,
|
||||||
|
'title': video_data.get('title') or video_data.get('displayName'),
|
||||||
|
'display_id': display_id,
|
||||||
|
'uploader_id': video_data.get('authorId'),
|
||||||
|
'duration': traverse_obj(video_data, (
|
||||||
|
'MediaServiceFastMetadata', {json.loads}, 'media', 'duration', {lambda x: x / 10000000})),
|
||||||
|
'thumbnail': url_or_none(video_data.get('thumbnailUrl')),
|
||||||
|
}
|
@ -0,0 +1,221 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import io
|
||||||
|
import math
|
||||||
|
import urllib.parse
|
||||||
|
|
||||||
|
from ._helper import InstanceStoreMixin, select_proxy
|
||||||
|
from .common import (
|
||||||
|
Features,
|
||||||
|
Request,
|
||||||
|
Response,
|
||||||
|
register_preference,
|
||||||
|
register_rh,
|
||||||
|
)
|
||||||
|
from .exceptions import (
|
||||||
|
CertificateVerifyError,
|
||||||
|
HTTPError,
|
||||||
|
IncompleteRead,
|
||||||
|
ProxyError,
|
||||||
|
SSLError,
|
||||||
|
TransportError,
|
||||||
|
)
|
||||||
|
from .impersonate import ImpersonateRequestHandler, ImpersonateTarget
|
||||||
|
from ..dependencies import curl_cffi
|
||||||
|
from ..utils import int_or_none
|
||||||
|
|
||||||
|
if curl_cffi is None:
|
||||||
|
raise ImportError('curl_cffi is not installed')
|
||||||
|
|
||||||
|
curl_cffi_version = tuple(int_or_none(x, default=0) for x in curl_cffi.__version__.split('.'))
|
||||||
|
|
||||||
|
if curl_cffi_version != (0, 5, 10):
|
||||||
|
curl_cffi._yt_dlp__version = f'{curl_cffi.__version__} (unsupported)'
|
||||||
|
raise ImportError('Only curl_cffi 0.5.10 is supported')
|
||||||
|
|
||||||
|
import curl_cffi.requests
|
||||||
|
from curl_cffi.const import CurlECode, CurlOpt
|
||||||
|
|
||||||
|
|
||||||
|
class CurlCFFIResponseReader(io.IOBase):
|
||||||
|
def __init__(self, response: curl_cffi.requests.Response):
|
||||||
|
self._response = response
|
||||||
|
self._iterator = response.iter_content()
|
||||||
|
self._buffer = b''
|
||||||
|
self.bytes_read = 0
|
||||||
|
|
||||||
|
def readable(self):
|
||||||
|
return True
|
||||||
|
|
||||||
|
def read(self, size=None):
|
||||||
|
exception_raised = True
|
||||||
|
try:
|
||||||
|
while self._iterator and (size is None or len(self._buffer) < size):
|
||||||
|
chunk = next(self._iterator, None)
|
||||||
|
if chunk is None:
|
||||||
|
self._iterator = None
|
||||||
|
break
|
||||||
|
self._buffer += chunk
|
||||||
|
self.bytes_read += len(chunk)
|
||||||
|
|
||||||
|
if size is None:
|
||||||
|
size = len(self._buffer)
|
||||||
|
data = self._buffer[:size]
|
||||||
|
self._buffer = self._buffer[size:]
|
||||||
|
|
||||||
|
# "free" the curl instance if the response is fully read.
|
||||||
|
# curl_cffi doesn't do this automatically and only allows one open response per thread
|
||||||
|
if not self._iterator and not self._buffer:
|
||||||
|
self.close()
|
||||||
|
exception_raised = False
|
||||||
|
return data
|
||||||
|
finally:
|
||||||
|
if exception_raised:
|
||||||
|
self.close()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if not self.closed:
|
||||||
|
self._response.close()
|
||||||
|
self._buffer = b''
|
||||||
|
super().close()
|
||||||
|
|
||||||
|
|
||||||
|
class CurlCFFIResponseAdapter(Response):
|
||||||
|
fp: CurlCFFIResponseReader
|
||||||
|
|
||||||
|
def __init__(self, response: curl_cffi.requests.Response):
|
||||||
|
super().__init__(
|
||||||
|
fp=CurlCFFIResponseReader(response),
|
||||||
|
headers=response.headers,
|
||||||
|
url=response.url,
|
||||||
|
status=response.status_code)
|
||||||
|
|
||||||
|
def read(self, amt=None):
|
||||||
|
try:
|
||||||
|
return self.fp.read(amt)
|
||||||
|
except curl_cffi.requests.errors.RequestsError as e:
|
||||||
|
if e.code == CurlECode.PARTIAL_FILE:
|
||||||
|
content_length = int_or_none(e.response.headers.get('Content-Length'))
|
||||||
|
raise IncompleteRead(
|
||||||
|
partial=self.fp.bytes_read,
|
||||||
|
expected=content_length - self.fp.bytes_read if content_length is not None else None,
|
||||||
|
cause=e) from e
|
||||||
|
raise TransportError(cause=e) from e
|
||||||
|
|
||||||
|
|
||||||
|
@register_rh
|
||||||
|
class CurlCFFIRH(ImpersonateRequestHandler, InstanceStoreMixin):
|
||||||
|
RH_NAME = 'curl_cffi'
|
||||||
|
_SUPPORTED_URL_SCHEMES = ('http', 'https')
|
||||||
|
_SUPPORTED_FEATURES = (Features.NO_PROXY, Features.ALL_PROXY)
|
||||||
|
_SUPPORTED_PROXY_SCHEMES = ('http', 'https', 'socks4', 'socks4a', 'socks5', 'socks5h')
|
||||||
|
_SUPPORTED_IMPERSONATE_TARGET_MAP = {
|
||||||
|
ImpersonateTarget('chrome', '110', 'windows', '10'): curl_cffi.requests.BrowserType.chrome110,
|
||||||
|
ImpersonateTarget('chrome', '107', 'windows', '10'): curl_cffi.requests.BrowserType.chrome107,
|
||||||
|
ImpersonateTarget('chrome', '104', 'windows', '10'): curl_cffi.requests.BrowserType.chrome104,
|
||||||
|
ImpersonateTarget('chrome', '101', 'windows', '10'): curl_cffi.requests.BrowserType.chrome101,
|
||||||
|
ImpersonateTarget('chrome', '100', 'windows', '10'): curl_cffi.requests.BrowserType.chrome100,
|
||||||
|
ImpersonateTarget('chrome', '99', 'windows', '10'): curl_cffi.requests.BrowserType.chrome99,
|
||||||
|
ImpersonateTarget('edge', '101', 'windows', '10'): curl_cffi.requests.BrowserType.edge101,
|
||||||
|
ImpersonateTarget('edge', '99', 'windows', '10'): curl_cffi.requests.BrowserType.edge99,
|
||||||
|
ImpersonateTarget('safari', '15.5', 'macos', '12'): curl_cffi.requests.BrowserType.safari15_5,
|
||||||
|
ImpersonateTarget('safari', '15.3', 'macos', '11'): curl_cffi.requests.BrowserType.safari15_3,
|
||||||
|
ImpersonateTarget('chrome', '99', 'android', '12'): curl_cffi.requests.BrowserType.chrome99_android,
|
||||||
|
}
|
||||||
|
|
||||||
|
def _create_instance(self, cookiejar=None):
|
||||||
|
return curl_cffi.requests.Session(cookies=cookiejar)
|
||||||
|
|
||||||
|
def _check_extensions(self, extensions):
|
||||||
|
super()._check_extensions(extensions)
|
||||||
|
extensions.pop('impersonate', None)
|
||||||
|
extensions.pop('cookiejar', None)
|
||||||
|
extensions.pop('timeout', None)
|
||||||
|
|
||||||
|
def _send(self, request: Request):
|
||||||
|
max_redirects_exceeded = False
|
||||||
|
session: curl_cffi.requests.Session = self._get_instance(
|
||||||
|
cookiejar=self._get_cookiejar(request) if 'cookie' not in request.headers else None)
|
||||||
|
|
||||||
|
if self.verbose:
|
||||||
|
session.curl.setopt(CurlOpt.VERBOSE, 1)
|
||||||
|
|
||||||
|
proxies = self._get_proxies(request)
|
||||||
|
if 'no' in proxies:
|
||||||
|
session.curl.setopt(CurlOpt.NOPROXY, proxies['no'])
|
||||||
|
proxies.pop('no', None)
|
||||||
|
|
||||||
|
# curl doesn't support per protocol proxies, so we select the one that matches the request protocol
|
||||||
|
proxy = select_proxy(request.url, proxies=proxies)
|
||||||
|
if proxy:
|
||||||
|
session.curl.setopt(CurlOpt.PROXY, proxy)
|
||||||
|
scheme = urllib.parse.urlparse(request.url).scheme.lower()
|
||||||
|
if scheme != 'http':
|
||||||
|
# Enable HTTP CONNECT for HTTPS urls.
|
||||||
|
# Don't use CONNECT for http for compatibility with urllib behaviour.
|
||||||
|
# See: https://curl.se/libcurl/c/CURLOPT_HTTPPROXYTUNNEL.html
|
||||||
|
session.curl.setopt(CurlOpt.HTTPPROXYTUNNEL, 1)
|
||||||
|
|
||||||
|
headers = self._get_impersonate_headers(request)
|
||||||
|
|
||||||
|
if self._client_cert:
|
||||||
|
session.curl.setopt(CurlOpt.SSLCERT, self._client_cert['client_certificate'])
|
||||||
|
client_certificate_key = self._client_cert.get('client_certificate_key')
|
||||||
|
client_certificate_password = self._client_cert.get('client_certificate_password')
|
||||||
|
if client_certificate_key:
|
||||||
|
session.curl.setopt(CurlOpt.SSLKEY, client_certificate_key)
|
||||||
|
if client_certificate_password:
|
||||||
|
session.curl.setopt(CurlOpt.KEYPASSWD, client_certificate_password)
|
||||||
|
|
||||||
|
timeout = self._calculate_timeout(request)
|
||||||
|
|
||||||
|
# set CURLOPT_LOW_SPEED_LIMIT and CURLOPT_LOW_SPEED_TIME to act as a read timeout. [1]
|
||||||
|
# curl_cffi does not currently do this. [2]
|
||||||
|
# Note: CURLOPT_LOW_SPEED_TIME is in seconds, so we need to round up to the nearest second. [3]
|
||||||
|
# [1] https://unix.stackexchange.com/a/305311
|
||||||
|
# [2] https://github.com/yifeikong/curl_cffi/issues/156
|
||||||
|
# [3] https://curl.se/libcurl/c/CURLOPT_LOW_SPEED_TIME.html
|
||||||
|
session.curl.setopt(CurlOpt.LOW_SPEED_LIMIT, 1) # 1 byte per second
|
||||||
|
session.curl.setopt(CurlOpt.LOW_SPEED_TIME, math.ceil(timeout))
|
||||||
|
|
||||||
|
try:
|
||||||
|
curl_response = session.request(
|
||||||
|
method=request.method,
|
||||||
|
url=request.url,
|
||||||
|
headers=headers,
|
||||||
|
data=request.data,
|
||||||
|
verify=self.verify,
|
||||||
|
max_redirects=5,
|
||||||
|
timeout=timeout,
|
||||||
|
impersonate=self._SUPPORTED_IMPERSONATE_TARGET_MAP.get(
|
||||||
|
self._get_request_target(request)),
|
||||||
|
interface=self.source_address,
|
||||||
|
stream=True
|
||||||
|
)
|
||||||
|
except curl_cffi.requests.errors.RequestsError as e:
|
||||||
|
if e.code == CurlECode.PEER_FAILED_VERIFICATION:
|
||||||
|
raise CertificateVerifyError(cause=e) from e
|
||||||
|
|
||||||
|
elif e.code == CurlECode.SSL_CONNECT_ERROR:
|
||||||
|
raise SSLError(cause=e) from e
|
||||||
|
|
||||||
|
elif e.code == CurlECode.TOO_MANY_REDIRECTS:
|
||||||
|
max_redirects_exceeded = True
|
||||||
|
curl_response = e.response
|
||||||
|
|
||||||
|
elif e.code == CurlECode.PROXY:
|
||||||
|
raise ProxyError(cause=e) from e
|
||||||
|
else:
|
||||||
|
raise TransportError(cause=e) from e
|
||||||
|
|
||||||
|
response = CurlCFFIResponseAdapter(curl_response)
|
||||||
|
|
||||||
|
if not 200 <= response.status < 300:
|
||||||
|
raise HTTPError(response, redirect_loop=max_redirects_exceeded)
|
||||||
|
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
|
@register_preference(CurlCFFIRH)
|
||||||
|
def curl_cffi_preference(rh, request):
|
||||||
|
return -100
|
@ -0,0 +1,141 @@
|
|||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import re
|
||||||
|
from abc import ABC
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from typing import Any
|
||||||
|
|
||||||
|
from .common import RequestHandler, register_preference
|
||||||
|
from .exceptions import UnsupportedRequest
|
||||||
|
from ..compat.types import NoneType
|
||||||
|
from ..utils import classproperty, join_nonempty
|
||||||
|
from ..utils.networking import std_headers
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass(order=True, frozen=True)
|
||||||
|
class ImpersonateTarget:
|
||||||
|
"""
|
||||||
|
A target for browser impersonation.
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
@param client: the client to impersonate
|
||||||
|
@param version: the client version to impersonate
|
||||||
|
@param os: the client OS to impersonate
|
||||||
|
@param os_version: the client OS version to impersonate
|
||||||
|
|
||||||
|
Note: None is used to indicate to match any.
|
||||||
|
|
||||||
|
"""
|
||||||
|
client: str | None = None
|
||||||
|
version: str | None = None
|
||||||
|
os: str | None = None
|
||||||
|
os_version: str | None = None
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
if self.version and not self.client:
|
||||||
|
raise ValueError('client is required if version is set')
|
||||||
|
if self.os_version and not self.os:
|
||||||
|
raise ValueError('os is required if os_version is set')
|
||||||
|
|
||||||
|
def __contains__(self, target: ImpersonateTarget):
|
||||||
|
if not isinstance(target, ImpersonateTarget):
|
||||||
|
return False
|
||||||
|
return (
|
||||||
|
(self.client is None or target.client is None or self.client == target.client)
|
||||||
|
and (self.version is None or target.version is None or self.version == target.version)
|
||||||
|
and (self.os is None or target.os is None or self.os == target.os)
|
||||||
|
and (self.os_version is None or target.os_version is None or self.os_version == target.os_version)
|
||||||
|
)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return f'{join_nonempty(self.client, self.version)}:{join_nonempty(self.os, self.os_version)}'.rstrip(':')
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_str(cls, target: str):
|
||||||
|
mobj = re.fullmatch(r'(?:(?P<client>[^:-]+)(?:-(?P<version>[^:-]+))?)?(?::(?:(?P<os>[^:-]+)(?:-(?P<os_version>[^:-]+))?)?)?', target)
|
||||||
|
if not mobj:
|
||||||
|
raise ValueError(f'Invalid impersonate target "{target}"')
|
||||||
|
return cls(**mobj.groupdict())
|
||||||
|
|
||||||
|
|
||||||
|
class ImpersonateRequestHandler(RequestHandler, ABC):
|
||||||
|
"""
|
||||||
|
Base class for request handlers that support browser impersonation.
|
||||||
|
|
||||||
|
This provides a method for checking the validity of the impersonate extension,
|
||||||
|
which can be used in _check_extensions.
|
||||||
|
|
||||||
|
Impersonate targets consist of a client, version, os and os_ver.
|
||||||
|
See the ImpersonateTarget class for more details.
|
||||||
|
|
||||||
|
The following may be defined:
|
||||||
|
- `_SUPPORTED_IMPERSONATE_TARGET_MAP`: a dict mapping supported targets to custom object.
|
||||||
|
Any Request with an impersonate target not in this list will raise an UnsupportedRequest.
|
||||||
|
Set to None to disable this check.
|
||||||
|
Note: Entries are in order of preference
|
||||||
|
|
||||||
|
Parameters:
|
||||||
|
@param impersonate: the default impersonate target to use for requests.
|
||||||
|
Set to None to disable impersonation.
|
||||||
|
"""
|
||||||
|
_SUPPORTED_IMPERSONATE_TARGET_MAP: dict[ImpersonateTarget, Any] = {}
|
||||||
|
|
||||||
|
def __init__(self, *, impersonate: ImpersonateTarget = None, **kwargs):
|
||||||
|
super().__init__(**kwargs)
|
||||||
|
self.impersonate = impersonate
|
||||||
|
|
||||||
|
def _check_impersonate_target(self, target: ImpersonateTarget):
|
||||||
|
assert isinstance(target, (ImpersonateTarget, NoneType))
|
||||||
|
if target is None or not self.supported_targets:
|
||||||
|
return
|
||||||
|
if not self.is_supported_target(target):
|
||||||
|
raise UnsupportedRequest(f'Unsupported impersonate target: {target}')
|
||||||
|
|
||||||
|
def _check_extensions(self, extensions):
|
||||||
|
super()._check_extensions(extensions)
|
||||||
|
if 'impersonate' in extensions:
|
||||||
|
self._check_impersonate_target(extensions.get('impersonate'))
|
||||||
|
|
||||||
|
def _validate(self, request):
|
||||||
|
super()._validate(request)
|
||||||
|
self._check_impersonate_target(self.impersonate)
|
||||||
|
|
||||||
|
def _resolve_target(self, target: ImpersonateTarget | None):
|
||||||
|
"""Resolve a target to a supported target."""
|
||||||
|
if target is None:
|
||||||
|
return
|
||||||
|
for supported_target in self.supported_targets:
|
||||||
|
if target in supported_target:
|
||||||
|
if self.verbose:
|
||||||
|
self._logger.stdout(
|
||||||
|
f'{self.RH_NAME}: resolved impersonate target {target} to {supported_target}')
|
||||||
|
return supported_target
|
||||||
|
|
||||||
|
@classproperty
|
||||||
|
def supported_targets(self) -> tuple[ImpersonateTarget, ...]:
|
||||||
|
return tuple(self._SUPPORTED_IMPERSONATE_TARGET_MAP.keys())
|
||||||
|
|
||||||
|
def is_supported_target(self, target: ImpersonateTarget):
|
||||||
|
assert isinstance(target, ImpersonateTarget)
|
||||||
|
return self._resolve_target(target) is not None
|
||||||
|
|
||||||
|
def _get_request_target(self, request):
|
||||||
|
"""Get the requested target for the request"""
|
||||||
|
return self._resolve_target(request.extensions.get('impersonate') or self.impersonate)
|
||||||
|
|
||||||
|
def _get_impersonate_headers(self, request):
|
||||||
|
headers = self._merge_headers(request.headers)
|
||||||
|
if self._get_request_target(request) is not None:
|
||||||
|
# remove all headers present in std_headers
|
||||||
|
# todo: change this to not depend on std_headers
|
||||||
|
for k, v in std_headers.items():
|
||||||
|
if headers.get(k) == v:
|
||||||
|
headers.pop(k)
|
||||||
|
return headers
|
||||||
|
|
||||||
|
|
||||||
|
@register_preference(ImpersonateRequestHandler)
|
||||||
|
def impersonate_preference(rh, request):
|
||||||
|
if request.extensions.get('impersonate') or rh.impersonate:
|
||||||
|
return 1000
|
||||||
|
return 0
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue