mirror of https://github.com/yt-dlp/yt-dlp.git
[compat, networking] Deprecate old functions (#2861)
Authored by: coletdjnz, pukkandan
This commit is contained in:
parent
227bf1a33b
commit
3d2623a898
|
@ -160,7 +160,7 @@ def generator(test_case, tname):
|
||||||
force_generic_extractor=params.get('force_generic_extractor', False))
|
force_generic_extractor=params.get('force_generic_extractor', False))
|
||||||
except (DownloadError, ExtractorError) as err:
|
except (DownloadError, ExtractorError) as err:
|
||||||
# Check if the exception is not a network related one
|
# Check if the exception is not a network related one
|
||||||
if not isinstance(err.exc_info[1], (TransportError, UnavailableVideoError)) or (isinstance(err.exc_info[1], HTTPError) and err.exc_info[1].code == 503):
|
if not isinstance(err.exc_info[1], (TransportError, UnavailableVideoError)) or (isinstance(err.exc_info[1], HTTPError) and err.exc_info[1].status == 503):
|
||||||
err.msg = f'{getattr(err, "msg", err)} ({tname})'
|
err.msg = f'{getattr(err, "msg", err)} ({tname})'
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -1057,14 +1057,15 @@ class TestYoutubeDLNetworking:
|
||||||
urllib_req = urllib.request.Request('http://foo.bar', data=b'test', method='PUT', headers={'X-Test': '1'})
|
urllib_req = urllib.request.Request('http://foo.bar', data=b'test', method='PUT', headers={'X-Test': '1'})
|
||||||
urllib_req.add_unredirected_header('Cookie', 'bob=bob')
|
urllib_req.add_unredirected_header('Cookie', 'bob=bob')
|
||||||
urllib_req.timeout = 2
|
urllib_req.timeout = 2
|
||||||
|
with warnings.catch_warnings():
|
||||||
req = ydl.urlopen(urllib_req).request
|
warnings.simplefilter('ignore', category=DeprecationWarning)
|
||||||
assert req.url == urllib_req.get_full_url()
|
req = ydl.urlopen(urllib_req).request
|
||||||
assert req.data == urllib_req.data
|
assert req.url == urllib_req.get_full_url()
|
||||||
assert req.method == urllib_req.get_method()
|
assert req.data == urllib_req.data
|
||||||
assert 'X-Test' in req.headers
|
assert req.method == urllib_req.get_method()
|
||||||
assert 'Cookie' in req.headers
|
assert 'X-Test' in req.headers
|
||||||
assert req.extensions.get('timeout') == 2
|
assert 'Cookie' in req.headers
|
||||||
|
assert req.extensions.get('timeout') == 2
|
||||||
|
|
||||||
with pytest.raises(AssertionError):
|
with pytest.raises(AssertionError):
|
||||||
ydl.urlopen(None)
|
ydl.urlopen(None)
|
||||||
|
@ -1362,7 +1363,9 @@ class TestResponse:
|
||||||
|
|
||||||
def test_compat(self):
|
def test_compat(self):
|
||||||
res = Response(io.BytesIO(b''), url='test://', status=404, headers={'test': 'test'})
|
res = Response(io.BytesIO(b''), url='test://', status=404, headers={'test': 'test'})
|
||||||
assert res.code == res.getcode() == res.status
|
with warnings.catch_warnings():
|
||||||
assert res.geturl() == res.url
|
warnings.simplefilter('ignore', category=DeprecationWarning)
|
||||||
assert res.info() is res.headers
|
assert res.code == res.getcode() == res.status
|
||||||
assert res.getheader('test') == res.get_header('test')
|
assert res.geturl() == res.url
|
||||||
|
assert res.info() is res.headers
|
||||||
|
assert res.getheader('test') == res.get_header('test')
|
||||||
|
|
|
@ -8,11 +8,13 @@ import pytest
|
||||||
|
|
||||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||||
|
|
||||||
|
import contextlib
|
||||||
import io
|
import io
|
||||||
import platform
|
import platform
|
||||||
import random
|
import random
|
||||||
import ssl
|
import ssl
|
||||||
import urllib.error
|
import urllib.error
|
||||||
|
import warnings
|
||||||
|
|
||||||
from yt_dlp.cookies import YoutubeDLCookieJar
|
from yt_dlp.cookies import YoutubeDLCookieJar
|
||||||
from yt_dlp.dependencies import certifi
|
from yt_dlp.dependencies import certifi
|
||||||
|
@ -202,20 +204,58 @@ class TestNetworkingExceptions:
|
||||||
assert isinstance(error, HTTPError)
|
assert isinstance(error, HTTPError)
|
||||||
assert isinstance(error, urllib.error.HTTPError)
|
assert isinstance(error, urllib.error.HTTPError)
|
||||||
|
|
||||||
assert error.code == 403
|
@contextlib.contextmanager
|
||||||
assert error.getcode() == 403
|
def raises_deprecation_warning():
|
||||||
assert error.hdrs is error.response.headers
|
with warnings.catch_warnings(record=True) as w:
|
||||||
assert error.info() is error.response.headers
|
warnings.simplefilter('always')
|
||||||
assert error.headers is error.response.headers
|
yield
|
||||||
assert error.filename == error.response.url
|
|
||||||
assert error.url == error.response.url
|
if len(w) == 0:
|
||||||
assert error.geturl() == error.response.url
|
pytest.fail('Did not raise DeprecationWarning')
|
||||||
|
if len(w) > 1:
|
||||||
|
pytest.fail(f'Raised multiple warnings: {w}')
|
||||||
|
|
||||||
|
if not issubclass(w[-1].category, DeprecationWarning):
|
||||||
|
pytest.fail(f'Expected DeprecationWarning, got {w[-1].category}')
|
||||||
|
w.clear()
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.code == 403
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.getcode() == 403
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.hdrs is error.response.headers
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.info() is error.response.headers
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.headers is error.response.headers
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.filename == error.response.url
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.url == error.response.url
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
assert error.geturl() == error.response.url
|
||||||
|
|
||||||
# Passthrough file operations
|
# Passthrough file operations
|
||||||
assert error.read() == b'test'
|
with raises_deprecation_warning():
|
||||||
assert not error.closed
|
assert error.read() == b'test'
|
||||||
# Technically Response operations are also passed through, which should not be used.
|
|
||||||
assert error.get_header('test') == 'test'
|
with raises_deprecation_warning():
|
||||||
|
assert not error.closed
|
||||||
|
|
||||||
|
with raises_deprecation_warning():
|
||||||
|
# Technically Response operations are also passed through, which should not be used.
|
||||||
|
assert error.get_header('test') == 'test'
|
||||||
|
|
||||||
|
# Should not raise a warning
|
||||||
|
error.close()
|
||||||
|
|
||||||
@pytest.mark.skipif(
|
@pytest.mark.skipif(
|
||||||
platform.python_implementation() == 'PyPy', reason='garbage collector works differently in pypy')
|
platform.python_implementation() == 'PyPy', reason='garbage collector works differently in pypy')
|
||||||
|
|
|
@ -33,7 +33,7 @@ from .extractor import gen_extractor_classes, get_info_extractor
|
||||||
from .extractor.common import UnsupportedURLIE
|
from .extractor.common import UnsupportedURLIE
|
||||||
from .extractor.openload import PhantomJSwrapper
|
from .extractor.openload import PhantomJSwrapper
|
||||||
from .minicurses import format_text
|
from .minicurses import format_text
|
||||||
from .networking import Request, RequestDirector
|
from .networking import HEADRequest, Request, RequestDirector
|
||||||
from .networking.common import _REQUEST_HANDLERS
|
from .networking.common import _REQUEST_HANDLERS
|
||||||
from .networking.exceptions import (
|
from .networking.exceptions import (
|
||||||
HTTPError,
|
HTTPError,
|
||||||
|
@ -41,6 +41,7 @@ from .networking.exceptions import (
|
||||||
RequestError,
|
RequestError,
|
||||||
SSLError,
|
SSLError,
|
||||||
_CompatHTTPError,
|
_CompatHTTPError,
|
||||||
|
network_exceptions,
|
||||||
)
|
)
|
||||||
from .plugins import directories as plugin_directories
|
from .plugins import directories as plugin_directories
|
||||||
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
|
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
|
||||||
|
@ -80,7 +81,6 @@ from .utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
FormatSorter,
|
FormatSorter,
|
||||||
GeoRestrictedError,
|
GeoRestrictedError,
|
||||||
HEADRequest,
|
|
||||||
ISO3166Utils,
|
ISO3166Utils,
|
||||||
LazyList,
|
LazyList,
|
||||||
MaxDownloadsReached,
|
MaxDownloadsReached,
|
||||||
|
@ -122,7 +122,6 @@ from .utils import (
|
||||||
locked_file,
|
locked_file,
|
||||||
make_archive_id,
|
make_archive_id,
|
||||||
make_dir,
|
make_dir,
|
||||||
network_exceptions,
|
|
||||||
number_of_digits,
|
number_of_digits,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
orderedSet_from_options,
|
orderedSet_from_options,
|
||||||
|
@ -135,7 +134,6 @@ from .utils import (
|
||||||
sanitize_filename,
|
sanitize_filename,
|
||||||
sanitize_path,
|
sanitize_path,
|
||||||
sanitize_url,
|
sanitize_url,
|
||||||
std_headers,
|
|
||||||
str_or_none,
|
str_or_none,
|
||||||
strftime_or_none,
|
strftime_or_none,
|
||||||
subtitles_filename,
|
subtitles_filename,
|
||||||
|
@ -158,6 +156,7 @@ from .utils.networking import (
|
||||||
HTTPHeaderDict,
|
HTTPHeaderDict,
|
||||||
clean_headers,
|
clean_headers,
|
||||||
clean_proxies,
|
clean_proxies,
|
||||||
|
std_headers,
|
||||||
)
|
)
|
||||||
from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
|
from .version import CHANNEL, RELEASE_GIT_HEAD, VARIANT, __version__
|
||||||
|
|
||||||
|
@ -4019,6 +4018,9 @@ class YoutubeDL:
|
||||||
if isinstance(req, str):
|
if isinstance(req, str):
|
||||||
req = Request(req)
|
req = Request(req)
|
||||||
elif isinstance(req, urllib.request.Request):
|
elif isinstance(req, urllib.request.Request):
|
||||||
|
self.deprecation_warning(
|
||||||
|
'Passing a urllib.request.Request object to YoutubeDL.urlopen() is deprecated. '
|
||||||
|
'Use yt_dlp.networking.common.Request instead.')
|
||||||
req = urllib_req_to_req(req)
|
req = urllib_req_to_req(req)
|
||||||
assert isinstance(req, Request)
|
assert isinstance(req, Request)
|
||||||
|
|
||||||
|
@ -4242,7 +4244,7 @@ class YoutubeDL:
|
||||||
ret.append((thumb_filename, thumb_filename_final))
|
ret.append((thumb_filename, thumb_filename_final))
|
||||||
t['filepath'] = thumb_filename
|
t['filepath'] = thumb_filename
|
||||||
except network_exceptions as err:
|
except network_exceptions as err:
|
||||||
if isinstance(err, urllib.error.HTTPError) and err.code == 404:
|
if isinstance(err, HTTPError) and err.status == 404:
|
||||||
self.to_screen(f'[info] {thumb_display_id.title()} does not exist')
|
self.to_screen(f'[info] {thumb_display_id.title()} does not exist')
|
||||||
else:
|
else:
|
||||||
self.report_warning(f'Unable to download {thumb_display_id}: {err}')
|
self.report_warning(f'Unable to download {thumb_display_id}: {err}')
|
||||||
|
|
|
@ -57,11 +57,11 @@ from .utils import (
|
||||||
read_stdin,
|
read_stdin,
|
||||||
render_table,
|
render_table,
|
||||||
setproctitle,
|
setproctitle,
|
||||||
std_headers,
|
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
variadic,
|
variadic,
|
||||||
write_string,
|
write_string,
|
||||||
)
|
)
|
||||||
|
from .utils.networking import std_headers
|
||||||
from .YoutubeDL import YoutubeDL
|
from .YoutubeDL import YoutubeDL
|
||||||
|
|
||||||
_IN_CLI = False
|
_IN_CLI = False
|
||||||
|
|
|
@ -8,7 +8,6 @@ compat_str = str
|
||||||
|
|
||||||
compat_b64decode = base64.b64decode
|
compat_b64decode = base64.b64decode
|
||||||
|
|
||||||
compat_HTTPError = urllib.error.HTTPError
|
|
||||||
compat_urlparse = urllib.parse
|
compat_urlparse = urllib.parse
|
||||||
compat_parse_qs = urllib.parse.parse_qs
|
compat_parse_qs = urllib.parse.parse_qs
|
||||||
compat_urllib_parse_unquote = urllib.parse.unquote
|
compat_urllib_parse_unquote = urllib.parse.unquote
|
||||||
|
|
|
@ -70,6 +70,7 @@ compat_html_parser_HTMLParseError = compat_HTMLParseError
|
||||||
compat_HTMLParser = compat_html_parser_HTMLParser = html.parser.HTMLParser
|
compat_HTMLParser = compat_html_parser_HTMLParser = html.parser.HTMLParser
|
||||||
compat_http_client = http.client
|
compat_http_client = http.client
|
||||||
compat_http_server = http.server
|
compat_http_server = http.server
|
||||||
|
compat_HTTPError = urllib.error.HTTPError
|
||||||
compat_input = input
|
compat_input = input
|
||||||
compat_integer_types = (int, )
|
compat_integer_types = (int, )
|
||||||
compat_itertools_count = itertools.count
|
compat_itertools_count = itertools.count
|
||||||
|
|
|
@ -10,6 +10,7 @@ import uuid
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import functools
|
from ..compat import functools
|
||||||
|
from ..networking import Request
|
||||||
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
Popen,
|
Popen,
|
||||||
|
@ -25,7 +26,6 @@ from ..utils import (
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
find_available_port,
|
find_available_port,
|
||||||
remove_end,
|
remove_end,
|
||||||
sanitized_Request,
|
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -357,13 +357,12 @@ class Aria2cFD(ExternalFD):
|
||||||
'method': method,
|
'method': method,
|
||||||
'params': [f'token:{rpc_secret}', *params],
|
'params': [f'token:{rpc_secret}', *params],
|
||||||
}).encode('utf-8')
|
}).encode('utf-8')
|
||||||
request = sanitized_Request(
|
request = Request(
|
||||||
f'http://localhost:{rpc_port}/jsonrpc',
|
f'http://localhost:{rpc_port}/jsonrpc',
|
||||||
data=d, headers={
|
data=d, headers={
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'Content-Length': f'{len(d)}',
|
'Content-Length': f'{len(d)}',
|
||||||
'Ytdl-request-proxy': '__noproxy__',
|
}, proxies={'all': None})
|
||||||
})
|
|
||||||
with self.ydl.urlopen(request) as r:
|
with self.ydl.urlopen(request) as r:
|
||||||
resp = json.load(r)
|
resp = json.load(r)
|
||||||
assert resp.get('id') == sanitycheck, 'Something went wrong with RPC server'
|
assert resp.get('id') == sanitycheck, 'Something went wrong with RPC server'
|
||||||
|
|
|
@ -3,11 +3,11 @@ import io
|
||||||
import itertools
|
import itertools
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
from ..compat import compat_etree_fromstring
|
from ..compat import compat_etree_fromstring
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import fix_xml_ampersands, xpath_text
|
from ..utils import fix_xml_ampersands, xpath_text
|
||||||
|
|
||||||
|
|
||||||
|
@ -312,7 +312,7 @@ class F4mFD(FragmentFD):
|
||||||
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME)
|
||||||
|
|
||||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||||
man_url = urlh.geturl()
|
man_url = urlh.url
|
||||||
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
|
# Some manifests may be malformed, e.g. prosiebensat1 generated manifests
|
||||||
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244
|
# (see https://github.com/ytdl-org/youtube-dl/issues/6215#issuecomment-121704244
|
||||||
# and https://github.com/ytdl-org/youtube-dl/issues/7823)
|
# and https://github.com/ytdl-org/youtube-dl/issues/7823)
|
||||||
|
@ -407,8 +407,8 @@ class F4mFD(FragmentFD):
|
||||||
if box_type == b'mdat':
|
if box_type == b'mdat':
|
||||||
self._append_fragment(ctx, box_data)
|
self._append_fragment(ctx, box_data)
|
||||||
break
|
break
|
||||||
except urllib.error.HTTPError as err:
|
except HTTPError as err:
|
||||||
if live and (err.code == 404 or err.code == 410):
|
if live and (err.status == 404 or err.status == 410):
|
||||||
# We didn't keep up with the live window. Continue
|
# We didn't keep up with the live window. Continue
|
||||||
# with the next available fragment.
|
# with the next available fragment.
|
||||||
msg = 'Fragment %d unavailable' % frag_i
|
msg = 'Fragment %d unavailable' % frag_i
|
||||||
|
|
|
@ -1,24 +1,19 @@
|
||||||
import concurrent.futures
|
import concurrent.futures
|
||||||
import contextlib
|
import contextlib
|
||||||
import http.client
|
|
||||||
import json
|
import json
|
||||||
import math
|
import math
|
||||||
import os
|
import os
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .http import HttpFD
|
from .http import HttpFD
|
||||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||||
from ..compat import compat_os_name
|
from ..compat import compat_os_name
|
||||||
from ..utils import (
|
from ..networking import Request
|
||||||
DownloadError,
|
from ..networking.exceptions import HTTPError, IncompleteRead
|
||||||
RetryManager,
|
from ..utils import DownloadError, RetryManager, encodeFilename, traverse_obj
|
||||||
encodeFilename,
|
from ..utils.networking import HTTPHeaderDict
|
||||||
sanitized_Request,
|
|
||||||
traverse_obj,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class HttpQuietDownloader(HttpFD):
|
class HttpQuietDownloader(HttpFD):
|
||||||
|
@ -75,7 +70,7 @@ class FragmentFD(FileDownloader):
|
||||||
|
|
||||||
def _prepare_url(self, info_dict, url):
|
def _prepare_url(self, info_dict, url):
|
||||||
headers = info_dict.get('http_headers')
|
headers = info_dict.get('http_headers')
|
||||||
return sanitized_Request(url, None, headers) if headers else url
|
return Request(url, None, headers) if headers else url
|
||||||
|
|
||||||
def _prepare_and_start_frag_download(self, ctx, info_dict):
|
def _prepare_and_start_frag_download(self, ctx, info_dict):
|
||||||
self._prepare_frag_download(ctx)
|
self._prepare_frag_download(ctx)
|
||||||
|
@ -457,7 +452,7 @@ class FragmentFD(FileDownloader):
|
||||||
|
|
||||||
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
frag_index = ctx['fragment_index'] = fragment['frag_index']
|
||||||
ctx['last_error'] = None
|
ctx['last_error'] = None
|
||||||
headers = info_dict.get('http_headers', {}).copy()
|
headers = HTTPHeaderDict(info_dict.get('http_headers'))
|
||||||
byte_range = fragment.get('byte_range')
|
byte_range = fragment.get('byte_range')
|
||||||
if byte_range:
|
if byte_range:
|
||||||
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
headers['Range'] = 'bytes=%d-%d' % (byte_range['start'], byte_range['end'] - 1)
|
||||||
|
@ -477,7 +472,7 @@ class FragmentFD(FileDownloader):
|
||||||
if not self._download_fragment(
|
if not self._download_fragment(
|
||||||
ctx, fragment['url'], info_dict, headers, info_dict.get('request_data')):
|
ctx, fragment['url'], info_dict, headers, info_dict.get('request_data')):
|
||||||
return
|
return
|
||||||
except (urllib.error.HTTPError, http.client.IncompleteRead) as err:
|
except (HTTPError, IncompleteRead) as err:
|
||||||
retry.error = err
|
retry.error = err
|
||||||
continue
|
continue
|
||||||
except DownloadError: # has own retry settings
|
except DownloadError: # has own retry settings
|
||||||
|
|
|
@ -75,7 +75,7 @@ class HlsFD(FragmentFD):
|
||||||
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME)
|
||||||
|
|
||||||
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url))
|
||||||
man_url = urlh.geturl()
|
man_url = urlh.url
|
||||||
s = urlh.read().decode('utf-8', 'ignore')
|
s = urlh.read().decode('utf-8', 'ignore')
|
||||||
|
|
||||||
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
can_download, message = self.can_download(s, info_dict, self.params.get('allow_unplayable_formats')), None
|
||||||
|
|
|
@ -1,10 +1,14 @@
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from ..networking.exceptions import CertificateVerifyError, TransportError
|
from ..networking import Request
|
||||||
|
from ..networking.exceptions import (
|
||||||
|
CertificateVerifyError,
|
||||||
|
HTTPError,
|
||||||
|
TransportError,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ContentTooShortError,
|
ContentTooShortError,
|
||||||
RetryManager,
|
RetryManager,
|
||||||
|
@ -14,10 +18,10 @@ from ..utils import (
|
||||||
encodeFilename,
|
encodeFilename,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_http_range,
|
parse_http_range,
|
||||||
sanitized_Request,
|
|
||||||
try_call,
|
try_call,
|
||||||
write_xattr,
|
write_xattr,
|
||||||
)
|
)
|
||||||
|
from ..utils.networking import HTTPHeaderDict
|
||||||
|
|
||||||
|
|
||||||
class HttpFD(FileDownloader):
|
class HttpFD(FileDownloader):
|
||||||
|
@ -36,10 +40,7 @@ class HttpFD(FileDownloader):
|
||||||
ctx.stream = None
|
ctx.stream = None
|
||||||
|
|
||||||
# Disable compression
|
# Disable compression
|
||||||
headers = {'Accept-Encoding': 'identity'}
|
headers = HTTPHeaderDict({'Accept-Encoding': 'identity'}, info_dict.get('http_headers'))
|
||||||
add_headers = info_dict.get('http_headers')
|
|
||||||
if add_headers:
|
|
||||||
headers.update(add_headers)
|
|
||||||
|
|
||||||
is_test = self.params.get('test', False)
|
is_test = self.params.get('test', False)
|
||||||
chunk_size = self._TEST_FILE_SIZE if is_test else (
|
chunk_size = self._TEST_FILE_SIZE if is_test else (
|
||||||
|
@ -110,10 +111,10 @@ class HttpFD(FileDownloader):
|
||||||
if try_call(lambda: range_end >= ctx.content_len):
|
if try_call(lambda: range_end >= ctx.content_len):
|
||||||
range_end = ctx.content_len - 1
|
range_end = ctx.content_len - 1
|
||||||
|
|
||||||
request = sanitized_Request(url, request_data, headers)
|
request = Request(url, request_data, headers)
|
||||||
has_range = range_start is not None
|
has_range = range_start is not None
|
||||||
if has_range:
|
if has_range:
|
||||||
request.add_header('Range', f'bytes={int(range_start)}-{int_or_none(range_end) or ""}')
|
request.headers['Range'] = f'bytes={int(range_start)}-{int_or_none(range_end) or ""}'
|
||||||
# Establish connection
|
# Establish connection
|
||||||
try:
|
try:
|
||||||
ctx.data = self.ydl.urlopen(request)
|
ctx.data = self.ydl.urlopen(request)
|
||||||
|
@ -144,17 +145,17 @@ class HttpFD(FileDownloader):
|
||||||
self.report_unable_to_resume()
|
self.report_unable_to_resume()
|
||||||
ctx.resume_len = 0
|
ctx.resume_len = 0
|
||||||
ctx.open_mode = 'wb'
|
ctx.open_mode = 'wb'
|
||||||
ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None))
|
ctx.data_len = ctx.content_len = int_or_none(ctx.data.headers.get('Content-length', None))
|
||||||
except urllib.error.HTTPError as err:
|
except HTTPError as err:
|
||||||
if err.code == 416:
|
if err.status == 416:
|
||||||
# Unable to resume (requested range not satisfiable)
|
# Unable to resume (requested range not satisfiable)
|
||||||
try:
|
try:
|
||||||
# Open the connection again without the range header
|
# Open the connection again without the range header
|
||||||
ctx.data = self.ydl.urlopen(
|
ctx.data = self.ydl.urlopen(
|
||||||
sanitized_Request(url, request_data, headers))
|
Request(url, request_data, headers))
|
||||||
content_length = ctx.data.info()['Content-Length']
|
content_length = ctx.data.headers['Content-Length']
|
||||||
except urllib.error.HTTPError as err:
|
except HTTPError as err:
|
||||||
if err.code < 500 or err.code >= 600:
|
if err.status < 500 or err.status >= 600:
|
||||||
raise
|
raise
|
||||||
else:
|
else:
|
||||||
# Examine the reported length
|
# Examine the reported length
|
||||||
|
@ -182,7 +183,7 @@ class HttpFD(FileDownloader):
|
||||||
ctx.resume_len = 0
|
ctx.resume_len = 0
|
||||||
ctx.open_mode = 'wb'
|
ctx.open_mode = 'wb'
|
||||||
return
|
return
|
||||||
elif err.code < 500 or err.code >= 600:
|
elif err.status < 500 or err.status >= 600:
|
||||||
# Unexpected HTTP error
|
# Unexpected HTTP error
|
||||||
raise
|
raise
|
||||||
raise RetryDownload(err)
|
raise RetryDownload(err)
|
||||||
|
@ -198,9 +199,9 @@ class HttpFD(FileDownloader):
|
||||||
ctx.stream = None
|
ctx.stream = None
|
||||||
|
|
||||||
def download():
|
def download():
|
||||||
data_len = ctx.data.info().get('Content-length')
|
data_len = ctx.data.headers.get('Content-length')
|
||||||
|
|
||||||
if ctx.data.info().get('Content-encoding'):
|
if ctx.data.headers.get('Content-encoding'):
|
||||||
# Content-encoding is present, Content-length is not reliable anymore as we are
|
# Content-encoding is present, Content-length is not reliable anymore as we are
|
||||||
# doing auto decompression. (See: https://github.com/yt-dlp/yt-dlp/pull/6176)
|
# doing auto decompression. (See: https://github.com/yt-dlp/yt-dlp/pull/6176)
|
||||||
data_len = None
|
data_len = None
|
||||||
|
@ -345,7 +346,7 @@ class HttpFD(FileDownloader):
|
||||||
|
|
||||||
# Update file modification time
|
# Update file modification time
|
||||||
if self.params.get('updatetime', True):
|
if self.params.get('updatetime', True):
|
||||||
info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.info().get('last-modified', None))
|
info_dict['filetime'] = self.try_utime(ctx.filename, ctx.data.headers.get('last-modified', None))
|
||||||
|
|
||||||
self._hook_progress({
|
self._hook_progress({
|
||||||
'downloaded_bytes': byte_counter,
|
'downloaded_bytes': byte_counter,
|
||||||
|
|
|
@ -2,9 +2,9 @@ import binascii
|
||||||
import io
|
import io
|
||||||
import struct
|
import struct
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import RetryManager
|
from ..utils import RetryManager
|
||||||
|
|
||||||
u8 = struct.Struct('>B')
|
u8 = struct.Struct('>B')
|
||||||
|
@ -271,7 +271,7 @@ class IsmFD(FragmentFD):
|
||||||
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
write_piff_header(ctx['dest_stream'], info_dict['_download_params'])
|
||||||
extra_state['ism_track_written'] = True
|
extra_state['ism_track_written'] = True
|
||||||
self._append_fragment(ctx, frag_content)
|
self._append_fragment(ctx, frag_content)
|
||||||
except urllib.error.HTTPError as err:
|
except HTTPError as err:
|
||||||
retry.error = err
|
retry.error = err
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -5,13 +5,8 @@ import time
|
||||||
from . import get_suitable_downloader
|
from . import get_suitable_downloader
|
||||||
from .common import FileDownloader
|
from .common import FileDownloader
|
||||||
from .external import FFmpegFD
|
from .external import FFmpegFD
|
||||||
from ..utils import (
|
from ..networking import Request
|
||||||
DownloadError,
|
from ..utils import DownloadError, WebSocketsWrapper, str_or_none, try_get
|
||||||
WebSocketsWrapper,
|
|
||||||
sanitized_Request,
|
|
||||||
str_or_none,
|
|
||||||
try_get,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class NiconicoDmcFD(FileDownloader):
|
class NiconicoDmcFD(FileDownloader):
|
||||||
|
@ -33,7 +28,7 @@ class NiconicoDmcFD(FileDownloader):
|
||||||
heartbeat_data = heartbeat_info_dict['data'].encode()
|
heartbeat_data = heartbeat_info_dict['data'].encode()
|
||||||
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
heartbeat_interval = heartbeat_info_dict.get('interval', 30)
|
||||||
|
|
||||||
request = sanitized_Request(heartbeat_url, heartbeat_data)
|
request = Request(heartbeat_url, heartbeat_data)
|
||||||
|
|
||||||
def heartbeat():
|
def heartbeat():
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import json
|
import json
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .fragment import FragmentFD
|
from .fragment import FragmentFD
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
RetryManager,
|
RetryManager,
|
||||||
|
@ -10,6 +10,7 @@ from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
from ..utils.networking import HTTPHeaderDict
|
||||||
|
|
||||||
|
|
||||||
class YoutubeLiveChatFD(FragmentFD):
|
class YoutubeLiveChatFD(FragmentFD):
|
||||||
|
@ -37,10 +38,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||||
start_time = int(time.time() * 1000)
|
start_time = int(time.time() * 1000)
|
||||||
|
|
||||||
def dl_fragment(url, data=None, headers=None):
|
def dl_fragment(url, data=None, headers=None):
|
||||||
http_headers = info_dict.get('http_headers', {})
|
http_headers = HTTPHeaderDict(info_dict.get('http_headers'), headers)
|
||||||
if headers:
|
|
||||||
http_headers = http_headers.copy()
|
|
||||||
http_headers.update(headers)
|
|
||||||
return self._download_fragment(ctx, url, info_dict, http_headers, data)
|
return self._download_fragment(ctx, url, info_dict, http_headers, data)
|
||||||
|
|
||||||
def parse_actions_replay(live_chat_continuation):
|
def parse_actions_replay(live_chat_continuation):
|
||||||
|
@ -129,7 +127,7 @@ class YoutubeLiveChatFD(FragmentFD):
|
||||||
or frag_index == 1 and try_refresh_replay_beginning
|
or frag_index == 1 and try_refresh_replay_beginning
|
||||||
or parse_actions_replay)
|
or parse_actions_replay)
|
||||||
return (True, *func(live_chat_continuation))
|
return (True, *func(live_chat_continuation))
|
||||||
except urllib.error.HTTPError as err:
|
except HTTPError as err:
|
||||||
retry.error = err
|
retry.error = err
|
||||||
continue
|
continue
|
||||||
return False, None, None, None
|
return False, None, None, None
|
||||||
|
|
|
@ -22,7 +22,6 @@ from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
request_to_url,
|
|
||||||
time_seconds,
|
time_seconds,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
|
@ -137,7 +136,7 @@ class AbemaLicenseHandler(urllib.request.BaseHandler):
|
||||||
return intlist_to_bytes(aes_ecb_decrypt(encvideokey, enckey))
|
return intlist_to_bytes(aes_ecb_decrypt(encvideokey, enckey))
|
||||||
|
|
||||||
def abematv_license_open(self, url):
|
def abematv_license_open(self, url):
|
||||||
url = request_to_url(url)
|
url = url.get_full_url() if isinstance(url, urllib.request.Request) else url
|
||||||
ticket = urllib.parse.urlparse(url).netloc
|
ticket = urllib.parse.urlparse(url).netloc
|
||||||
response_data = self._get_videokey_from_ticket(ticket)
|
response_data = self._get_videokey_from_ticket(ticket)
|
||||||
return urllib.response.addinfourl(io.BytesIO(response_data), headers={
|
return urllib.response.addinfourl(io.BytesIO(response_data), headers={
|
||||||
|
|
|
@ -6,10 +6,8 @@ import random
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||||
from ..compat import (
|
from ..compat import compat_b64decode
|
||||||
compat_HTTPError,
|
from ..networking.exceptions import HTTPError
|
||||||
compat_b64decode,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ass_subtitles_timecode,
|
ass_subtitles_timecode,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
|
@ -142,9 +140,9 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
||||||
self._HEADERS = {'authorization': 'Bearer ' + access_token}
|
self._HEADERS = {'authorization': 'Bearer ' + access_token}
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
message = None
|
message = None
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
resp = self._parse_json(
|
resp = self._parse_json(
|
||||||
e.cause.read().decode(), None, fatal=False) or {}
|
e.cause.response.read().decode(), None, fatal=False) or {}
|
||||||
message = resp.get('message') or resp.get('code')
|
message = resp.get('message') or resp.get('code')
|
||||||
self.report_warning(message or self._LOGIN_ERR_MESSAGE)
|
self.report_warning(message or self._LOGIN_ERR_MESSAGE)
|
||||||
|
|
||||||
|
@ -195,14 +193,14 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
||||||
})
|
})
|
||||||
break
|
break
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if not isinstance(e.cause, compat_HTTPError):
|
if not isinstance(e.cause, HTTPError):
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
if e.cause.code == 401:
|
if e.cause.status == 401:
|
||||||
# This usually goes away with a different random pkcs1pad, so retry
|
# This usually goes away with a different random pkcs1pad, so retry
|
||||||
continue
|
continue
|
||||||
|
|
||||||
error = self._parse_json(e.cause.read(), video_id)
|
error = self._parse_json(e.cause.response.read(), video_id)
|
||||||
message = error.get('message')
|
message = error.get('message')
|
||||||
if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
|
if e.cause.code == 403 and error.get('code') == 'player-bad-geolocation-country':
|
||||||
self.raise_geo_restricted(msg=message)
|
self.raise_geo_restricted(msg=message)
|
||||||
|
|
|
@ -2,11 +2,11 @@ import getpass
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
import xml.etree.ElementTree as etree
|
import xml.etree.ElementTree as etree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -1394,7 +1394,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
form_page, urlh = form_page_res
|
form_page, urlh = form_page_res
|
||||||
post_url = self._html_search_regex(r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_page, 'post url', group='url')
|
post_url = self._html_search_regex(r'<form[^>]+action=(["\'])(?P<url>.+?)\1', form_page, 'post url', group='url')
|
||||||
if not re.match(r'https?://', post_url):
|
if not re.match(r'https?://', post_url):
|
||||||
post_url = compat_urlparse.urljoin(urlh.geturl(), post_url)
|
post_url = compat_urlparse.urljoin(urlh.url, post_url)
|
||||||
form_data = self._hidden_inputs(form_page)
|
form_data = self._hidden_inputs(form_page)
|
||||||
form_data.update(data)
|
form_data.update(data)
|
||||||
return self._download_webpage_handle(
|
return self._download_webpage_handle(
|
||||||
|
@ -1619,7 +1619,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
hidden_data['history'] = 1
|
hidden_data['history'] = 1
|
||||||
|
|
||||||
provider_login_page_res = self._download_webpage_handle(
|
provider_login_page_res = self._download_webpage_handle(
|
||||||
urlh.geturl(), video_id, 'Sending first bookend',
|
urlh.url, video_id, 'Sending first bookend',
|
||||||
query=hidden_data)
|
query=hidden_data)
|
||||||
|
|
||||||
provider_association_redirect, urlh = post_form(
|
provider_association_redirect, urlh = post_form(
|
||||||
|
@ -1629,7 +1629,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
})
|
})
|
||||||
|
|
||||||
provider_refresh_redirect_url = extract_redirect_url(
|
provider_refresh_redirect_url = extract_redirect_url(
|
||||||
provider_association_redirect, url=urlh.geturl())
|
provider_association_redirect, url=urlh.url)
|
||||||
|
|
||||||
last_bookend_page, urlh = self._download_webpage_handle(
|
last_bookend_page, urlh = self._download_webpage_handle(
|
||||||
provider_refresh_redirect_url, video_id,
|
provider_refresh_redirect_url, video_id,
|
||||||
|
@ -1638,7 +1638,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
hidden_data['history'] = 3
|
hidden_data['history'] = 3
|
||||||
|
|
||||||
mvpd_confirm_page_res = self._download_webpage_handle(
|
mvpd_confirm_page_res = self._download_webpage_handle(
|
||||||
urlh.geturl(), video_id, 'Sending final bookend',
|
urlh.url, video_id, 'Sending final bookend',
|
||||||
query=hidden_data)
|
query=hidden_data)
|
||||||
|
|
||||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||||
|
@ -1652,7 +1652,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
hidden_data['history_val'] = 1
|
hidden_data['history_val'] = 1
|
||||||
|
|
||||||
provider_login_redirect_page_res = self._download_webpage_handle(
|
provider_login_redirect_page_res = self._download_webpage_handle(
|
||||||
urlh.geturl(), video_id, 'Sending First Bookend',
|
urlh.url, video_id, 'Sending First Bookend',
|
||||||
query=hidden_data)
|
query=hidden_data)
|
||||||
|
|
||||||
provider_login_redirect_page, urlh = provider_login_redirect_page_res
|
provider_login_redirect_page, urlh = provider_login_redirect_page_res
|
||||||
|
@ -1680,7 +1680,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
})
|
})
|
||||||
|
|
||||||
provider_refresh_redirect_url = extract_redirect_url(
|
provider_refresh_redirect_url = extract_redirect_url(
|
||||||
provider_association_redirect, url=urlh.geturl())
|
provider_association_redirect, url=urlh.url)
|
||||||
|
|
||||||
last_bookend_page, urlh = self._download_webpage_handle(
|
last_bookend_page, urlh = self._download_webpage_handle(
|
||||||
provider_refresh_redirect_url, video_id,
|
provider_refresh_redirect_url, video_id,
|
||||||
|
@ -1690,7 +1690,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
hidden_data['history_val'] = 3
|
hidden_data['history_val'] = 3
|
||||||
|
|
||||||
mvpd_confirm_page_res = self._download_webpage_handle(
|
mvpd_confirm_page_res = self._download_webpage_handle(
|
||||||
urlh.geturl(), video_id, 'Sending Final Bookend',
|
urlh.url, video_id, 'Sending Final Bookend',
|
||||||
query=hidden_data)
|
query=hidden_data)
|
||||||
|
|
||||||
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
post_form(mvpd_confirm_page_res, 'Confirming Login')
|
||||||
|
@ -1699,7 +1699,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
# based redirect that should be followed.
|
# based redirect that should be followed.
|
||||||
provider_redirect_page, urlh = provider_redirect_page_res
|
provider_redirect_page, urlh = provider_redirect_page_res
|
||||||
provider_refresh_redirect_url = extract_redirect_url(
|
provider_refresh_redirect_url = extract_redirect_url(
|
||||||
provider_redirect_page, url=urlh.geturl())
|
provider_redirect_page, url=urlh.url)
|
||||||
if provider_refresh_redirect_url:
|
if provider_refresh_redirect_url:
|
||||||
provider_redirect_page_res = self._download_webpage_handle(
|
provider_redirect_page_res = self._download_webpage_handle(
|
||||||
provider_refresh_redirect_url, video_id,
|
provider_refresh_redirect_url, video_id,
|
||||||
|
@ -1724,7 +1724,7 @@ class AdobePassIE(InfoExtractor): # XXX: Conventionally, base classes should en
|
||||||
'requestor_id': requestor_id,
|
'requestor_id': requestor_id,
|
||||||
}), headers=mvpd_headers)
|
}), headers=mvpd_headers)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if not mso_id and isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 401:
|
if not mso_id and isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
raise_mvpd_required()
|
raise_mvpd_required()
|
||||||
raise
|
raise
|
||||||
if '<pendingLogout' in session:
|
if '<pendingLogout' in session:
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
HEADRequest,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
scale_thumbnails_to_max_format_width,
|
scale_thumbnails_to_max_format_width,
|
||||||
|
@ -121,7 +121,7 @@ class Ant1NewsGrEmbedIE(Ant1NewsGrBaseIE):
|
||||||
canonical_url = self._request_webpage(
|
canonical_url = self._request_webpage(
|
||||||
HEADRequest(url), video_id,
|
HEADRequest(url), video_id,
|
||||||
note='Resolve canonical player URL',
|
note='Resolve canonical player URL',
|
||||||
errnote='Could not resolve canonical player URL').geturl()
|
errnote='Could not resolve canonical player URL').url
|
||||||
_, netloc, _, _, query, _ = urllib.parse.urlparse(canonical_url)
|
_, netloc, _, _, query, _ = urllib.parse.urlparse(canonical_url)
|
||||||
cid = urllib.parse.parse_qs(query)['cid'][0]
|
cid = urllib.parse.parse_qs(query)['cid'][0]
|
||||||
|
|
||||||
|
|
|
@ -1,16 +1,16 @@
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import urllib.error
|
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .naver import NaverBaseIE
|
from .naver import NaverBaseIE
|
||||||
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
|
from .youtube import YoutubeBaseInfoExtractor, YoutubeIE
|
||||||
from ..compat import compat_HTTPError, compat_urllib_parse_unquote
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
from ..networking import HEADRequest
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
KNOWN_EXTENSIONS,
|
KNOWN_EXTENSIONS,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
bug_reports_message,
|
bug_reports_message,
|
||||||
clean_html,
|
clean_html,
|
||||||
dict_get,
|
dict_get,
|
||||||
|
@ -899,7 +899,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||||
video_id, note='Fetching archived video file url', expected_status=True)
|
video_id, note='Fetching archived video file url', expected_status=True)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
# HTTP Error 404 is expected if the video is not saved.
|
# HTTP Error 404 is expected if the video is not saved.
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 404:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 404:
|
||||||
self.raise_no_formats(
|
self.raise_no_formats(
|
||||||
'The requested video is not archived, indexed, or there is an issue with web.archive.org (try again later)', expected=True)
|
'The requested video is not archived, indexed, or there is an issue with web.archive.org (try again later)', expected=True)
|
||||||
else:
|
else:
|
||||||
|
@ -926,7 +926,7 @@ class YoutubeWebArchiveIE(InfoExtractor):
|
||||||
info['thumbnails'] = self._extract_thumbnails(video_id)
|
info['thumbnails'] = self._extract_thumbnails(video_id)
|
||||||
|
|
||||||
if urlh:
|
if urlh:
|
||||||
url = compat_urllib_parse_unquote(urlh.geturl())
|
url = compat_urllib_parse_unquote(urlh.url)
|
||||||
video_file_url_qs = parse_qs(url)
|
video_file_url_qs = parse_qs(url)
|
||||||
# Attempt to recover any ext & format info from playback url & response headers
|
# Attempt to recover any ext & format info from playback url & response headers
|
||||||
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
|
format = {'url': url, 'filesize': int_or_none(urlh.headers.get('x-archive-orig-content-length'))}
|
||||||
|
@ -1052,7 +1052,7 @@ class VLiveWebArchiveIE(InfoExtractor):
|
||||||
try:
|
try:
|
||||||
return self._download_webpage(f'https://web.archive.org/web/{timestamp}id_/{url}', video_id, **kwargs)
|
return self._download_webpage(f'https://web.archive.org/web/{timestamp}id_/{url}', video_id, **kwargs)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 404:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 404:
|
||||||
raise ExtractorError('Page was not archived', expected=True)
|
raise ExtractorError('Page was not archived', expected=True)
|
||||||
retry.error = e
|
retry.error = e
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -34,8 +34,8 @@ class AtresPlayerIE(InfoExtractor):
|
||||||
_API_BASE = 'https://api.atresplayer.com/'
|
_API_BASE = 'https://api.atresplayer.com/'
|
||||||
|
|
||||||
def _handle_error(self, e, code):
|
def _handle_error(self, e, code):
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == code:
|
if isinstance(e.cause, HTTPError) and e.cause.status == code:
|
||||||
error = self._parse_json(e.cause.read(), None)
|
error = self._parse_json(e.cause.response.read(), None)
|
||||||
if error.get('error') == 'required_registered':
|
if error.get('error') == 'required_registered':
|
||||||
self.raise_login_required()
|
self.raise_login_required()
|
||||||
raise ExtractorError(error['error_description'], expected=True)
|
raise ExtractorError(error['error_description'], expected=True)
|
||||||
|
|
|
@ -2,11 +2,11 @@ import functools
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import urllib.error
|
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError, compat_str, compat_urlparse
|
from ..compat import compat_str, compat_urlparse
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
|
@ -277,7 +277,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||||
post_url, None, 'Logging in', data=urlencode_postdata(login_form),
|
post_url, None, 'Logging in', data=urlencode_postdata(login_form),
|
||||||
headers={'Referer': self._LOGIN_URL})
|
headers={'Referer': self._LOGIN_URL})
|
||||||
|
|
||||||
if self._LOGIN_URL in urlh.geturl():
|
if self._LOGIN_URL in urlh.url:
|
||||||
error = clean_html(get_element_by_class('form-message', response))
|
error = clean_html(get_element_by_class('form-message', response))
|
||||||
if error:
|
if error:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
|
@ -388,8 +388,8 @@ class BBCCoUkIE(InfoExtractor):
|
||||||
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
href, programme_id, ext='mp4', entry_protocol='m3u8_native',
|
||||||
m3u8_id=format_id, fatal=False)
|
m3u8_id=format_id, fatal=False)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if not (isinstance(e.exc_info[1], urllib.error.HTTPError)
|
if not (isinstance(e.exc_info[1], HTTPError)
|
||||||
and e.exc_info[1].code in (403, 404)):
|
and e.exc_info[1].status in (403, 404)):
|
||||||
raise
|
raise
|
||||||
fmts = []
|
fmts = []
|
||||||
formats.extend(fmts)
|
formats.extend(fmts)
|
||||||
|
@ -472,7 +472,7 @@ class BBCCoUkIE(InfoExtractor):
|
||||||
|
|
||||||
return programme_id, title, description, duration, formats, subtitles
|
return programme_id, title, description, duration, formats, subtitles
|
||||||
except ExtractorError as ee:
|
except ExtractorError as ee:
|
||||||
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
|
if not (isinstance(ee.cause, HTTPError) and ee.cause.status == 404):
|
||||||
raise
|
raise
|
||||||
|
|
||||||
# fallback to legacy playlist
|
# fallback to legacy playlist
|
||||||
|
@ -983,7 +983,7 @@ class BBCIE(BBCCoUkIE): # XXX: Do not subclass from concrete IE
|
||||||
# Some playlist URL may fail with 500, at the same time
|
# Some playlist URL may fail with 500, at the same time
|
||||||
# the other one may work fine (e.g.
|
# the other one may work fine (e.g.
|
||||||
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
|
# http://www.bbc.com/turkce/haberler/2015/06/150615_telabyad_kentin_cogu)
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 500:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 500:
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
if entry:
|
if entry:
|
||||||
|
|
|
@ -4,11 +4,11 @@ import hashlib
|
||||||
import itertools
|
import itertools
|
||||||
import math
|
import math
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor, SearchInfoExtractor
|
from .common import InfoExtractor, SearchInfoExtractor
|
||||||
from ..dependencies import Cryptodome
|
from ..dependencies import Cryptodome
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
GeoRestrictedError,
|
GeoRestrictedError,
|
||||||
|
@ -614,7 +614,7 @@ class BilibiliSpaceVideoIE(BilibiliSpaceBaseIE):
|
||||||
response = self._download_json('https://api.bilibili.com/x/space/wbi/arc/search',
|
response = self._download_json('https://api.bilibili.com/x/space/wbi/arc/search',
|
||||||
playlist_id, note=f'Downloading page {page_idx}', query=query)
|
playlist_id, note=f'Downloading page {page_idx}', query=query)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 412:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 412:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'Request is blocked by server (412), please add cookies, wait and try later.', expected=True)
|
'Request is blocked by server (412), please add cookies, wait and try later.', expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -2,9 +2,9 @@ import functools
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
clean_html,
|
clean_html,
|
||||||
get_element_by_class,
|
get_element_by_class,
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
from .adobepass import AdobePassIE
|
from .adobepass import AdobePassIE
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
HEADRequest,
|
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
get_element_html_by_class,
|
get_element_html_by_class,
|
||||||
|
@ -155,7 +155,7 @@ class BravoTVIE(AdobePassIE):
|
||||||
chapters = None
|
chapters = None
|
||||||
|
|
||||||
m3u8_url = self._request_webpage(HEADRequest(
|
m3u8_url = self._request_webpage(HEADRequest(
|
||||||
update_url_query(f'{tp_url}/stream.m3u8', query)), video_id, 'Checking m3u8 URL').geturl()
|
update_url_query(f'{tp_url}/stream.m3u8', query)), video_id, 'Checking m3u8 URL').url
|
||||||
if 'mpeg_cenc' in m3u8_url:
|
if 'mpeg_cenc' in m3u8_url:
|
||||||
self.report_drm(video_id)
|
self.report_drm(video_id)
|
||||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, 'mp4', m3u8_id='hls')
|
formats, subtitles = self._extract_m3u8_formats_and_subtitles(m3u8_url, video_id, 'mp4', m3u8_id='hls')
|
||||||
|
|
|
@ -7,10 +7,10 @@ from .adobepass import AdobePassIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_etree_fromstring,
|
compat_etree_fromstring,
|
||||||
compat_HTTPError,
|
|
||||||
compat_parse_qs,
|
compat_parse_qs,
|
||||||
compat_urlparse,
|
compat_urlparse,
|
||||||
)
|
)
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
dict_get,
|
dict_get,
|
||||||
|
@ -915,8 +915,8 @@ class BrightcoveNewIE(BrightcoveNewBaseIE):
|
||||||
json_data = self._download_json(api_url, video_id, headers=headers)
|
json_data = self._download_json(api_url, video_id, headers=headers)
|
||||||
break
|
break
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 403):
|
if isinstance(e.cause, HTTPError) and e.cause.status in (401, 403):
|
||||||
json_data = self._parse_json(e.cause.read().decode(), video_id)[0]
|
json_data = self._parse_json(e.cause.response.read().decode(), video_id)[0]
|
||||||
message = json_data.get('message') or json_data['error_code']
|
message = json_data.get('message') or json_data['error_code']
|
||||||
if json_data.get('error_subcode') == 'CLIENT_GEO':
|
if json_data.get('error_subcode') == 'CLIENT_GEO':
|
||||||
self.raise_geo_restricted(msg=message)
|
self.raise_geo_restricted(msg=message)
|
||||||
|
|
|
@ -64,7 +64,7 @@ class CanalplusIE(InfoExtractor):
|
||||||
# response = self._request_webpage(
|
# response = self._request_webpage(
|
||||||
# HEADRequest(fmt_url), video_id,
|
# HEADRequest(fmt_url), video_id,
|
||||||
# 'Checking if the video is georestricted')
|
# 'Checking if the video is georestricted')
|
||||||
# if '/blocage' in response.geturl():
|
# if '/blocage' in response.url:
|
||||||
# raise ExtractorError(
|
# raise ExtractorError(
|
||||||
# 'The video is not available in your country',
|
# 'The video is not available in your country',
|
||||||
# expected=True)
|
# expected=True)
|
||||||
|
|
|
@ -7,9 +7,9 @@ import zlib
|
||||||
from .anvato import AnvatoIE
|
from .anvato import AnvatoIE
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .paramountplus import ParamountPlusIE
|
from .paramountplus import ParamountPlusIE
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
|
|
@ -1,20 +1,20 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_urllib_parse_unquote, compat_urllib_parse_urlparse
|
||||||
compat_urllib_parse_unquote,
|
from ..networking import Request
|
||||||
compat_urllib_parse_urlparse,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
sanitized_Request,
|
|
||||||
str_or_none,
|
str_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
USER_AGENTS,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
|
USER_AGENTS = {
|
||||||
|
'Safari': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27',
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
class CeskaTelevizeIE(InfoExtractor):
|
class CeskaTelevizeIE(InfoExtractor):
|
||||||
_VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/(?:ivysilani|porady|zive)/(?:[^/?#&]+/)*(?P<id>[^/#?]+)'
|
_VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/(?:ivysilani|porady|zive)/(?:[^/?#&]+/)*(?P<id>[^/#?]+)'
|
||||||
|
@ -97,7 +97,7 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||||
def _real_extract(self, url):
|
def _real_extract(self, url):
|
||||||
playlist_id = self._match_id(url)
|
playlist_id = self._match_id(url)
|
||||||
webpage, urlh = self._download_webpage_handle(url, playlist_id)
|
webpage, urlh = self._download_webpage_handle(url, playlist_id)
|
||||||
parsed_url = compat_urllib_parse_urlparse(urlh.geturl())
|
parsed_url = compat_urllib_parse_urlparse(urlh.url)
|
||||||
site_name = self._og_search_property('site_name', webpage, fatal=False, default='Česká televize')
|
site_name = self._og_search_property('site_name', webpage, fatal=False, default='Česká televize')
|
||||||
playlist_title = self._og_search_title(webpage, default=None)
|
playlist_title = self._og_search_title(webpage, default=None)
|
||||||
if site_name and playlist_title:
|
if site_name and playlist_title:
|
||||||
|
@ -163,16 +163,16 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||||
entries = []
|
entries = []
|
||||||
|
|
||||||
for user_agent in (None, USER_AGENTS['Safari']):
|
for user_agent in (None, USER_AGENTS['Safari']):
|
||||||
req = sanitized_Request(
|
req = Request(
|
||||||
'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist/',
|
'https://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist/',
|
||||||
data=urlencode_postdata(data))
|
data=urlencode_postdata(data))
|
||||||
|
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.headers['Content-type'] = 'application/x-www-form-urlencoded'
|
||||||
req.add_header('x-addr', '127.0.0.1')
|
req.headers['x-addr'] = '127.0.0.1'
|
||||||
req.add_header('X-Requested-With', 'XMLHttpRequest')
|
req.headers['X-Requested-With'] = 'XMLHttpRequest'
|
||||||
if user_agent:
|
if user_agent:
|
||||||
req.add_header('User-Agent', user_agent)
|
req.headers['User-Agent'] = user_agent
|
||||||
req.add_header('Referer', url)
|
req.headers['Referer'] = url
|
||||||
|
|
||||||
playlistpage = self._download_json(req, playlist_id, fatal=False)
|
playlistpage = self._download_json(req, playlist_id, fatal=False)
|
||||||
|
|
||||||
|
@ -183,8 +183,8 @@ class CeskaTelevizeIE(InfoExtractor):
|
||||||
if playlist_url == 'error_region':
|
if playlist_url == 'error_region':
|
||||||
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
|
raise ExtractorError(NOT_AVAILABLE_STRING, expected=True)
|
||||||
|
|
||||||
req = sanitized_Request(compat_urllib_parse_unquote(playlist_url))
|
req = Request(compat_urllib_parse_unquote(playlist_url))
|
||||||
req.add_header('Referer', url)
|
req.headers['Referer'] = url
|
||||||
|
|
||||||
playlist = self._download_json(req, playlist_id, fatal=False)
|
playlist = self._download_json(req, playlist_id, fatal=False)
|
||||||
if not playlist:
|
if not playlist:
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
import json
|
import json
|
||||||
import urllib.error
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
@ -40,7 +40,7 @@ class CinetecaMilanoIE(InfoExtractor):
|
||||||
'Authorization': try_get(self._get_cookies('https://www.cinetecamilano.it'), lambda x: f'Bearer {x["cnt-token"].value}') or ''
|
'Authorization': try_get(self._get_cookies('https://www.cinetecamilano.it'), lambda x: f'Bearer {x["cnt-token"].value}') or ''
|
||||||
})
|
})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if ((isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 500)
|
if ((isinstance(e.cause, HTTPError) and e.cause.status == 500)
|
||||||
or isinstance(e.cause, json.JSONDecodeError)):
|
or isinstance(e.cause, json.JSONDecodeError)):
|
||||||
self.raise_login_required(method='cookies')
|
self.raise_login_required(method='cookies')
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -33,7 +33,7 @@ class CiscoWebexIE(InfoExtractor):
|
||||||
if rcid:
|
if rcid:
|
||||||
webpage = self._download_webpage(url, None, note='Getting video ID')
|
webpage = self._download_webpage(url, None, note='Getting video ID')
|
||||||
url = self._search_regex(self._VALID_URL, webpage, 'redirection url', group='url')
|
url = self._search_regex(self._VALID_URL, webpage, 'redirection url', group='url')
|
||||||
url = self._request_webpage(url, None, note='Resolving final URL').geturl()
|
url = self._request_webpage(url, None, note='Resolving final URL').url
|
||||||
mobj = self._match_valid_url(url)
|
mobj = self._match_valid_url(url)
|
||||||
subdomain = mobj.group('subdomain')
|
subdomain = mobj.group('subdomain')
|
||||||
siteurl = mobj.group('siteurl_1') or mobj.group('siteurl_2')
|
siteurl = mobj.group('siteurl_1') or mobj.group('siteurl_2')
|
||||||
|
@ -49,7 +49,7 @@ class CiscoWebexIE(InfoExtractor):
|
||||||
'https://%s.webex.com/webappng/api/v1/recordings/%s/stream' % (subdomain, video_id),
|
'https://%s.webex.com/webappng/api/v1/recordings/%s/stream' % (subdomain, video_id),
|
||||||
video_id, headers=headers, query={'siteurl': siteurl}, expected_status=(403, 429))
|
video_id, headers=headers, query={'siteurl': siteurl}, expected_status=(403, 429))
|
||||||
|
|
||||||
if urlh.getcode() == 403:
|
if urlh.status == 403:
|
||||||
if stream['code'] == 53004:
|
if stream['code'] == 53004:
|
||||||
self.raise_login_required()
|
self.raise_login_required()
|
||||||
if stream['code'] == 53005:
|
if stream['code'] == 53005:
|
||||||
|
@ -59,7 +59,7 @@ class CiscoWebexIE(InfoExtractor):
|
||||||
'This video is protected by a password, use the --video-password option', expected=True)
|
'This video is protected by a password, use the --video-password option', expected=True)
|
||||||
raise ExtractorError(f'{self.IE_NAME} said: {stream["code"]} - {stream["message"]}', expected=True)
|
raise ExtractorError(f'{self.IE_NAME} said: {stream["code"]} - {stream["message"]}', expected=True)
|
||||||
|
|
||||||
if urlh.getcode() == 429:
|
if urlh.status == 429:
|
||||||
self.raise_login_required(
|
self.raise_login_required(
|
||||||
f'{self.IE_NAME} asks you to solve a CAPTCHA. Solve CAPTCHA in browser and',
|
f'{self.IE_NAME} asks you to solve a CAPTCHA. Solve CAPTCHA in browser and',
|
||||||
method='cookies')
|
method='cookies')
|
||||||
|
|
|
@ -31,8 +31,12 @@ from ..compat import (
|
||||||
from ..cookies import LenientSimpleCookie
|
from ..cookies import LenientSimpleCookie
|
||||||
from ..downloader.f4m import get_base_url, remove_encrypted_media
|
from ..downloader.f4m import get_base_url, remove_encrypted_media
|
||||||
from ..downloader.hls import HlsFD
|
from ..downloader.hls import HlsFD
|
||||||
from ..networking.common import HEADRequest, Request
|
from ..networking import HEADRequest, Request
|
||||||
from ..networking.exceptions import network_exceptions
|
from ..networking.exceptions import (
|
||||||
|
HTTPError,
|
||||||
|
IncompleteRead,
|
||||||
|
network_exceptions,
|
||||||
|
)
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
IDENTITY,
|
IDENTITY,
|
||||||
JSON_LD_RE,
|
JSON_LD_RE,
|
||||||
|
@ -729,7 +733,7 @@ class InfoExtractor:
|
||||||
e.ie = e.ie or self.IE_NAME,
|
e.ie = e.ie or self.IE_NAME,
|
||||||
e.traceback = e.traceback or sys.exc_info()[2]
|
e.traceback = e.traceback or sys.exc_info()[2]
|
||||||
raise
|
raise
|
||||||
except http.client.IncompleteRead as e:
|
except IncompleteRead as e:
|
||||||
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
|
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
|
||||||
except (KeyError, StopIteration) as e:
|
except (KeyError, StopIteration) as e:
|
||||||
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
|
raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url))
|
||||||
|
@ -788,16 +792,19 @@ class InfoExtractor:
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def __can_accept_status_code(err, expected_status):
|
def __can_accept_status_code(err, expected_status):
|
||||||
assert isinstance(err, urllib.error.HTTPError)
|
assert isinstance(err, HTTPError)
|
||||||
if expected_status is None:
|
if expected_status is None:
|
||||||
return False
|
return False
|
||||||
elif callable(expected_status):
|
elif callable(expected_status):
|
||||||
return expected_status(err.code) is True
|
return expected_status(err.status) is True
|
||||||
else:
|
else:
|
||||||
return err.code in variadic(expected_status)
|
return err.status in variadic(expected_status)
|
||||||
|
|
||||||
def _create_request(self, url_or_request, data=None, headers=None, query=None):
|
def _create_request(self, url_or_request, data=None, headers=None, query=None):
|
||||||
if isinstance(url_or_request, urllib.request.Request):
|
if isinstance(url_or_request, urllib.request.Request):
|
||||||
|
self._downloader.deprecation_warning(
|
||||||
|
'Passing a urllib.request.Request to _create_request() is deprecated. '
|
||||||
|
'Use yt_dlp.networking.common.Request instead.')
|
||||||
url_or_request = urllib_req_to_req(url_or_request)
|
url_or_request = urllib_req_to_req(url_or_request)
|
||||||
elif not isinstance(url_or_request, Request):
|
elif not isinstance(url_or_request, Request):
|
||||||
url_or_request = Request(url_or_request)
|
url_or_request = Request(url_or_request)
|
||||||
|
@ -839,7 +846,7 @@ class InfoExtractor:
|
||||||
try:
|
try:
|
||||||
return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
|
return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query))
|
||||||
except network_exceptions as err:
|
except network_exceptions as err:
|
||||||
if isinstance(err, urllib.error.HTTPError):
|
if isinstance(err, HTTPError):
|
||||||
if self.__can_accept_status_code(err, expected_status):
|
if self.__can_accept_status_code(err, expected_status):
|
||||||
return err.response
|
return err.response
|
||||||
|
|
||||||
|
@ -973,11 +980,11 @@ class InfoExtractor:
|
||||||
if prefix is not None:
|
if prefix is not None:
|
||||||
webpage_bytes = prefix + webpage_bytes
|
webpage_bytes = prefix + webpage_bytes
|
||||||
if self.get_param('dump_intermediate_pages', False):
|
if self.get_param('dump_intermediate_pages', False):
|
||||||
self.to_screen('Dumping request to ' + urlh.geturl())
|
self.to_screen('Dumping request to ' + urlh.url)
|
||||||
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
dump = base64.b64encode(webpage_bytes).decode('ascii')
|
||||||
self._downloader.to_screen(dump)
|
self._downloader.to_screen(dump)
|
||||||
if self.get_param('write_pages'):
|
if self.get_param('write_pages'):
|
||||||
filename = self._request_dump_filename(urlh.geturl(), video_id)
|
filename = self._request_dump_filename(urlh.url, video_id)
|
||||||
self.to_screen(f'Saving request to {filename}')
|
self.to_screen(f'Saving request to {filename}')
|
||||||
with open(filename, 'wb') as outf:
|
with open(filename, 'wb') as outf:
|
||||||
outf.write(webpage_bytes)
|
outf.write(webpage_bytes)
|
||||||
|
@ -1109,7 +1116,7 @@ class InfoExtractor:
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
|
return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs)
|
||||||
except http.client.IncompleteRead as e:
|
except IncompleteRead as e:
|
||||||
try_count += 1
|
try_count += 1
|
||||||
if try_count >= tries:
|
if try_count >= tries:
|
||||||
raise e
|
raise e
|
||||||
|
@ -1806,7 +1813,7 @@ class InfoExtractor:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
manifest, urlh = res
|
manifest, urlh = res
|
||||||
manifest_url = urlh.geturl()
|
manifest_url = urlh.url
|
||||||
|
|
||||||
return self._parse_f4m_formats(
|
return self._parse_f4m_formats(
|
||||||
manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
|
manifest, manifest_url, video_id, preference=preference, quality=quality, f4m_id=f4m_id,
|
||||||
|
@ -1965,7 +1972,7 @@ class InfoExtractor:
|
||||||
return [], {}
|
return [], {}
|
||||||
|
|
||||||
m3u8_doc, urlh = res
|
m3u8_doc, urlh = res
|
||||||
m3u8_url = urlh.geturl()
|
m3u8_url = urlh.url
|
||||||
|
|
||||||
return self._parse_m3u8_formats_and_subtitles(
|
return self._parse_m3u8_formats_and_subtitles(
|
||||||
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
|
m3u8_doc, m3u8_url, ext=ext, entry_protocol=entry_protocol,
|
||||||
|
@ -2243,7 +2250,7 @@ class InfoExtractor:
|
||||||
return [], {}
|
return [], {}
|
||||||
|
|
||||||
smil, urlh = res
|
smil, urlh = res
|
||||||
smil_url = urlh.geturl()
|
smil_url = urlh.url
|
||||||
|
|
||||||
namespace = self._parse_smil_namespace(smil)
|
namespace = self._parse_smil_namespace(smil)
|
||||||
|
|
||||||
|
@ -2266,7 +2273,7 @@ class InfoExtractor:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
smil, urlh = res
|
smil, urlh = res
|
||||||
smil_url = urlh.geturl()
|
smil_url = urlh.url
|
||||||
|
|
||||||
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
|
return self._parse_smil(smil, smil_url, video_id, f4m_params=f4m_params)
|
||||||
|
|
||||||
|
@ -2458,7 +2465,7 @@ class InfoExtractor:
|
||||||
return []
|
return []
|
||||||
|
|
||||||
xspf, urlh = res
|
xspf, urlh = res
|
||||||
xspf_url = urlh.geturl()
|
xspf_url = urlh.url
|
||||||
|
|
||||||
return self._parse_xspf(
|
return self._parse_xspf(
|
||||||
xspf, playlist_id, xspf_url=xspf_url,
|
xspf, playlist_id, xspf_url=xspf_url,
|
||||||
|
@ -2529,7 +2536,7 @@ class InfoExtractor:
|
||||||
return [], {}
|
return [], {}
|
||||||
|
|
||||||
# We could have been redirected to a new url when we retrieved our mpd file.
|
# We could have been redirected to a new url when we retrieved our mpd file.
|
||||||
mpd_url = urlh.geturl()
|
mpd_url = urlh.url
|
||||||
mpd_base_url = base_url(mpd_url)
|
mpd_base_url = base_url(mpd_url)
|
||||||
|
|
||||||
return self._parse_mpd_formats_and_subtitles(
|
return self._parse_mpd_formats_and_subtitles(
|
||||||
|
@ -2900,7 +2907,7 @@ class InfoExtractor:
|
||||||
if ism_doc is None:
|
if ism_doc is None:
|
||||||
return [], {}
|
return [], {}
|
||||||
|
|
||||||
return self._parse_ism_formats_and_subtitles(ism_doc, urlh.geturl(), ism_id)
|
return self._parse_ism_formats_and_subtitles(ism_doc, urlh.url, ism_id)
|
||||||
|
|
||||||
def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
|
def _parse_ism_formats_and_subtitles(self, ism_doc, ism_url, ism_id=None):
|
||||||
"""
|
"""
|
||||||
|
|
|
@ -4,7 +4,7 @@ import re
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
@ -113,7 +113,7 @@ class CrackleIE(InfoExtractor):
|
||||||
errnote='Unable to download media JSON')
|
errnote='Unable to download media JSON')
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
# 401 means geo restriction, trying next country
|
# 401 means geo restriction, trying next country
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import base64
|
import base64
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
@ -114,7 +114,7 @@ class CrunchyrollBaseIE(InfoExtractor):
|
||||||
result = self._call_base_api(
|
result = self._call_base_api(
|
||||||
path, internal_id, lang, f'Downloading {note} JSON ({self._API_ENDPOINT})', query=query)
|
path, internal_id, lang, f'Downloading {note} JSON ({self._API_ENDPOINT})', query=query)
|
||||||
except ExtractorError as error:
|
except ExtractorError as error:
|
||||||
if isinstance(error.cause, urllib.error.HTTPError) and error.cause.code == 404:
|
if isinstance(error.cause, HTTPError) and error.cause.status == 404:
|
||||||
return None
|
return None
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,8 @@
|
||||||
import time
|
import time
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..networking import HEADRequest
|
||||||
int_or_none,
|
from ..utils import int_or_none
|
||||||
HEADRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class CultureUnpluggedIE(InfoExtractor):
|
class CultureUnpluggedIE(InfoExtractor):
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import hashlib
|
import hashlib
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
classproperty,
|
classproperty,
|
||||||
|
@ -105,7 +105,7 @@ class DacastVODIE(DacastBaseIE):
|
||||||
formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4', m3u8_id='hls')
|
formats = self._extract_m3u8_formats(hls_url, video_id, 'mp4', m3u8_id='hls')
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
# CDN will randomly respond with 403
|
# CDN will randomly respond with 403
|
||||||
if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
retry.error = e
|
retry.error = e
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -3,7 +3,7 @@ import json
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
|
@ -68,9 +68,9 @@ class DailymotionBaseInfoExtractor(InfoExtractor):
|
||||||
None, 'Downloading Access Token',
|
None, 'Downloading Access Token',
|
||||||
data=urlencode_postdata(data))['access_token']
|
data=urlencode_postdata(data))['access_token']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
raise ExtractorError(self._parse_json(
|
raise ExtractorError(self._parse_json(
|
||||||
e.cause.read().decode(), xid)['error_description'], expected=True)
|
e.cause.response.read().decode(), xid)['error_description'], expected=True)
|
||||||
raise
|
raise
|
||||||
self._set_dailymotion_cookie('access_token' if username else 'client_token', token)
|
self._set_dailymotion_cookie('access_token' if username else 'client_token', token)
|
||||||
self._HEADERS['Authorization'] = 'Bearer ' + token
|
self._HEADERS['Authorization'] = 'Bearer ' + token
|
||||||
|
|
|
@ -3,8 +3,8 @@ import string
|
||||||
|
|
||||||
from .discoverygo import DiscoveryGoBaseIE
|
from .discoverygo import DiscoveryGoBaseIE
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import ExtractorError
|
from ..utils import ExtractorError
|
||||||
from ..compat import compat_HTTPError
|
|
||||||
|
|
||||||
|
|
||||||
class DiscoveryIE(DiscoveryGoBaseIE):
|
class DiscoveryIE(DiscoveryGoBaseIE):
|
||||||
|
@ -100,9 +100,9 @@ class DiscoveryIE(DiscoveryGoBaseIE):
|
||||||
self._API_BASE_URL + 'streaming/video/' + video_id,
|
self._API_BASE_URL + 'streaming/video/' + video_id,
|
||||||
display_id, 'Downloading streaming JSON metadata', headers=headers)
|
display_id, 'Downloading streaming JSON metadata', headers=headers)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 403):
|
if isinstance(e.cause, HTTPError) and e.cause.status in (401, 403):
|
||||||
e_description = self._parse_json(
|
e_description = self._parse_json(
|
||||||
e.cause.read().decode(), display_id)['description']
|
e.cause.response.read().decode(), display_id)['description']
|
||||||
if 'resource not available for country' in e_description:
|
if 'resource not available for country' in e_description:
|
||||||
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
||||||
if 'Authorized Networks' in e_description:
|
if 'Authorized Networks' in e_description:
|
||||||
|
|
|
@ -2,7 +2,7 @@ import json
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -39,7 +39,7 @@ class DPlayBaseIE(InfoExtractor):
|
||||||
return f'Bearer {token}'
|
return f'Bearer {token}'
|
||||||
|
|
||||||
def _process_errors(self, e, geo_countries):
|
def _process_errors(self, e, geo_countries):
|
||||||
info = self._parse_json(e.cause.read().decode('utf-8'), None)
|
info = self._parse_json(e.cause.response.read().decode('utf-8'), None)
|
||||||
error = info['errors'][0]
|
error = info['errors'][0]
|
||||||
error_code = error.get('code')
|
error_code = error.get('code')
|
||||||
if error_code == 'access.denied.geoblocked':
|
if error_code == 'access.denied.geoblocked':
|
||||||
|
@ -87,7 +87,7 @@ class DPlayBaseIE(InfoExtractor):
|
||||||
'include': 'images,primaryChannel,show,tags'
|
'include': 'images,primaryChannel,show,tags'
|
||||||
})
|
})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
self._process_errors(e, geo_countries)
|
self._process_errors(e, geo_countries)
|
||||||
raise
|
raise
|
||||||
video_id = video['data']['id']
|
video_id = video['data']['id']
|
||||||
|
@ -99,7 +99,7 @@ class DPlayBaseIE(InfoExtractor):
|
||||||
streaming = self._download_video_playback_info(
|
streaming = self._download_video_playback_info(
|
||||||
disco_base, video_id, headers)
|
disco_base, video_id, headers)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
self._process_errors(e, geo_countries)
|
self._process_errors(e, geo_countries)
|
||||||
raise
|
raise
|
||||||
for format_dict in streaming:
|
for format_dict in streaming:
|
||||||
|
|
|
@ -2,7 +2,7 @@ import functools
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -111,8 +111,8 @@ class EaglePlatformIE(InfoExtractor):
|
||||||
response = super(EaglePlatformIE, self)._download_json(
|
response = super(EaglePlatformIE, self)._download_json(
|
||||||
url_or_request, video_id, *args, **kwargs)
|
url_or_request, video_id, *args, **kwargs)
|
||||||
except ExtractorError as ee:
|
except ExtractorError as ee:
|
||||||
if isinstance(ee.cause, compat_HTTPError):
|
if isinstance(ee.cause, HTTPError):
|
||||||
response = self._parse_json(ee.cause.read().decode('utf-8'), video_id)
|
response = self._parse_json(ee.cause.response.read().decode('utf-8'), video_id)
|
||||||
self._handle_error(response)
|
self._handle_error(response)
|
||||||
raise
|
raise
|
||||||
return response
|
return response
|
||||||
|
|
|
@ -1,10 +1,6 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..networking import Request
|
||||||
float_or_none,
|
from ..utils import float_or_none, int_or_none, parse_iso8601
|
||||||
int_or_none,
|
|
||||||
parse_iso8601,
|
|
||||||
sanitized_Request,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class EitbIE(InfoExtractor):
|
class EitbIE(InfoExtractor):
|
||||||
|
@ -54,7 +50,7 @@ class EitbIE(InfoExtractor):
|
||||||
|
|
||||||
hls_url = media.get('HLS_SURL')
|
hls_url = media.get('HLS_SURL')
|
||||||
if hls_url:
|
if hls_url:
|
||||||
request = sanitized_Request(
|
request = Request(
|
||||||
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
|
'http://mam.eitb.eus/mam/REST/ServiceMultiweb/DomainRestrictedSecurity/TokenAuth/',
|
||||||
headers={'Referer': url})
|
headers={'Referer': url})
|
||||||
token_data = self._download_json(
|
token_data = self._download_json(
|
||||||
|
|
|
@ -52,7 +52,7 @@ class EpornerIE(InfoExtractor):
|
||||||
|
|
||||||
webpage, urlh = self._download_webpage_handle(url, display_id)
|
webpage, urlh = self._download_webpage_handle(url, display_id)
|
||||||
|
|
||||||
video_id = self._match_id(urlh.geturl())
|
video_id = self._match_id(urlh.url)
|
||||||
|
|
||||||
hash = self._search_regex(
|
hash = self._search_regex(
|
||||||
r'hash\s*[:=]\s*["\']([\da-f]{32})', webpage, 'hash')
|
r'hash\s*[:=]\s*["\']([\da-f]{32})', webpage, 'hash')
|
||||||
|
|
|
@ -8,6 +8,8 @@ from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
)
|
)
|
||||||
|
from ..networking import Request
|
||||||
|
from ..networking.exceptions import network_exceptions
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
clean_html,
|
clean_html,
|
||||||
|
@ -19,11 +21,9 @@ from ..utils import (
|
||||||
int_or_none,
|
int_or_none,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
network_exceptions,
|
|
||||||
parse_count,
|
parse_count,
|
||||||
parse_qs,
|
parse_qs,
|
||||||
qualities,
|
qualities,
|
||||||
sanitized_Request,
|
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
try_get,
|
try_get,
|
||||||
url_or_none,
|
url_or_none,
|
||||||
|
@ -319,7 +319,7 @@ class FacebookIE(InfoExtractor):
|
||||||
}
|
}
|
||||||
|
|
||||||
def _perform_login(self, username, password):
|
def _perform_login(self, username, password):
|
||||||
login_page_req = sanitized_Request(self._LOGIN_URL)
|
login_page_req = Request(self._LOGIN_URL)
|
||||||
self._set_cookie('facebook.com', 'locale', 'en_US')
|
self._set_cookie('facebook.com', 'locale', 'en_US')
|
||||||
login_page = self._download_webpage(login_page_req, None,
|
login_page = self._download_webpage(login_page_req, None,
|
||||||
note='Downloading login page',
|
note='Downloading login page',
|
||||||
|
@ -340,8 +340,8 @@ class FacebookIE(InfoExtractor):
|
||||||
'timezone': '-60',
|
'timezone': '-60',
|
||||||
'trynum': '1',
|
'trynum': '1',
|
||||||
}
|
}
|
||||||
request = sanitized_Request(self._LOGIN_URL, urlencode_postdata(login_form))
|
request = Request(self._LOGIN_URL, urlencode_postdata(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||||
try:
|
try:
|
||||||
login_results = self._download_webpage(request, None,
|
login_results = self._download_webpage(request, None,
|
||||||
note='Logging in', errnote='unable to fetch login page')
|
note='Logging in', errnote='unable to fetch login page')
|
||||||
|
@ -367,8 +367,8 @@ class FacebookIE(InfoExtractor):
|
||||||
'h': h,
|
'h': h,
|
||||||
'name_action_selected': 'dont_save',
|
'name_action_selected': 'dont_save',
|
||||||
}
|
}
|
||||||
check_req = sanitized_Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
|
check_req = Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
|
||||||
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
check_req.headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||||
check_response = self._download_webpage(check_req, None,
|
check_response = self._download_webpage(check_req, None,
|
||||||
note='Confirming login')
|
note='Confirming login')
|
||||||
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
|
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
|
||||||
|
|
|
@ -3,11 +3,11 @@ import re
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_parse_qs
|
from ..compat import compat_parse_qs
|
||||||
from ..dependencies import websockets
|
from ..dependencies import websockets
|
||||||
|
from ..networking import Request
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
WebSocketsWrapper,
|
WebSocketsWrapper,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
sanitized_Request,
|
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
update_url_query,
|
update_url_query,
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
|
@ -57,7 +57,7 @@ class FC2IE(InfoExtractor):
|
||||||
}
|
}
|
||||||
|
|
||||||
login_data = urlencode_postdata(login_form_strs)
|
login_data = urlencode_postdata(login_form_strs)
|
||||||
request = sanitized_Request(
|
request = Request(
|
||||||
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
|
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
|
||||||
|
|
||||||
login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')
|
login_results = self._download_webpage(request, None, note='Logging in', errnote='Unable to log in')
|
||||||
|
@ -66,7 +66,7 @@ class FC2IE(InfoExtractor):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
# this is also needed
|
# this is also needed
|
||||||
login_redir = sanitized_Request('http://id.fc2.com/?mode=redirect&login=done')
|
login_redir = Request('http://id.fc2.com/?mode=redirect&login=done')
|
||||||
self._download_webpage(
|
self._download_webpage(
|
||||||
login_redir, None, note='Login redirect', errnote='Login redirect failed')
|
login_redir, None, note='Login redirect', errnote='Login redirect failed')
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_str
|
||||||
compat_str,
|
from ..networking.exceptions import HTTPError
|
||||||
compat_HTTPError,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
qualities,
|
qualities,
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
|
@ -40,8 +38,8 @@ class FilmOnIE(InfoExtractor):
|
||||||
'https://www.filmon.com/api/vod/movie?id=%s' % video_id,
|
'https://www.filmon.com/api/vod/movie?id=%s' % video_id,
|
||||||
video_id)['response']
|
video_id)['response']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError):
|
if isinstance(e.cause, HTTPError):
|
||||||
errmsg = self._parse_json(e.cause.read().decode(), video_id)['reason']
|
errmsg = self._parse_json(e.cause.response.read().decode(), video_id)['reason']
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True)
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -124,8 +122,8 @@ class FilmOnChannelIE(InfoExtractor):
|
||||||
channel_data = self._download_json(
|
channel_data = self._download_json(
|
||||||
'http://www.filmon.com/api-v2/channel/' + channel_id, channel_id)['data']
|
'http://www.filmon.com/api-v2/channel/' + channel_id, channel_id)['data']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError):
|
if isinstance(e.cause, HTTPError):
|
||||||
errmsg = self._parse_json(e.cause.read().decode(), channel_id)['message']
|
errmsg = self._parse_json(e.cause.response.read().decode(), channel_id)['message']
|
||||||
raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True)
|
raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -3,10 +3,10 @@ import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_HTTPError,
|
|
||||||
compat_str,
|
compat_str,
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
)
|
)
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -68,9 +68,9 @@ class FOXIE(InfoExtractor):
|
||||||
'https://api3.fox.com/v2.0/' + path,
|
'https://api3.fox.com/v2.0/' + path,
|
||||||
video_id, data=data, headers=headers)
|
video_id, data=data, headers=headers)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
entitlement_issues = self._parse_json(
|
entitlement_issues = self._parse_json(
|
||||||
e.cause.read().decode(), video_id)['entitlementIssues']
|
e.cause.response.read().decode(), video_id)['entitlementIssues']
|
||||||
for e in entitlement_issues:
|
for e in entitlement_issues:
|
||||||
if e.get('errorCode') == 1005:
|
if e.get('errorCode') == 1005:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
|
@ -123,8 +123,8 @@ class FOXIE(InfoExtractor):
|
||||||
try:
|
try:
|
||||||
m3u8_url = self._download_json(release_url, video_id)['playURL']
|
m3u8_url = self._download_json(release_url, video_id)['playURL']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
error = self._parse_json(e.cause.read().decode(), video_id)
|
error = self._parse_json(e.cause.response.read().decode(), video_id)
|
||||||
if error.get('exception') == 'GeoLocationBlocked':
|
if error.get('exception') == 'GeoLocationBlocked':
|
||||||
self.raise_geo_restricted(countries=['US'])
|
self.raise_geo_restricted(countries=['US'])
|
||||||
raise ExtractorError(error['description'], expected=True)
|
raise ExtractorError(error['description'], expected=True)
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .uplynk import UplynkPreplayIE
|
from .uplynk import UplynkPreplayIE
|
||||||
from ..utils import HEADRequest, float_or_none, make_archive_id, smuggle_url
|
from ..networking import HEADRequest
|
||||||
|
from ..utils import float_or_none, make_archive_id, smuggle_url
|
||||||
|
|
||||||
|
|
||||||
class FoxSportsIE(InfoExtractor):
|
class FoxSportsIE(InfoExtractor):
|
||||||
|
@ -35,7 +36,7 @@ class FoxSportsIE(InfoExtractor):
|
||||||
'x-api-key': 'cf289e299efdfa39fb6316f259d1de93',
|
'x-api-key': 'cf289e299efdfa39fb6316f259d1de93',
|
||||||
})
|
})
|
||||||
preplay_url = self._request_webpage(
|
preplay_url = self._request_webpage(
|
||||||
HEADRequest(data['url']), video_id, 'Fetching preplay URL').geturl()
|
HEADRequest(data['url']), video_id, 'Fetching preplay URL').url
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'_type': 'url_transparent',
|
'_type': 'url_transparent',
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from ..utils import HEADRequest
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
|
|
||||||
|
|
||||||
class FujiTVFODPlus7IE(InfoExtractor):
|
class FujiTVFODPlus7IE(InfoExtractor):
|
||||||
|
|
|
@ -3,7 +3,7 @@ import re
|
||||||
import string
|
import string
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -46,8 +46,8 @@ class FunimationBaseIE(InfoExtractor):
|
||||||
}))
|
}))
|
||||||
FunimationBaseIE._TOKEN = data['token']
|
FunimationBaseIE._TOKEN = data['token']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
error = self._parse_json(e.cause.read().decode(), None)['error']
|
error = self._parse_json(e.cause.response.read().decode(), None)['error']
|
||||||
raise ExtractorError(error, expected=True)
|
raise ExtractorError(error, expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -2,13 +2,8 @@ import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .kaltura import KalturaIE
|
from .kaltura import KalturaIE
|
||||||
from ..utils import (
|
from ..networking import HEADRequest, Request
|
||||||
HEADRequest,
|
from ..utils import remove_start, smuggle_url, urlencode_postdata
|
||||||
remove_start,
|
|
||||||
sanitized_Request,
|
|
||||||
smuggle_url,
|
|
||||||
urlencode_postdata,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class GDCVaultIE(InfoExtractor):
|
class GDCVaultIE(InfoExtractor):
|
||||||
|
@ -138,8 +133,8 @@ class GDCVaultIE(InfoExtractor):
|
||||||
'password': password,
|
'password': password,
|
||||||
}
|
}
|
||||||
|
|
||||||
request = sanitized_Request(login_url, urlencode_postdata(login_form))
|
request = Request(login_url, urlencode_postdata(login_form))
|
||||||
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
request.headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||||
self._download_webpage(request, display_id, 'Logging in')
|
self._download_webpage(request, display_id, 'Logging in')
|
||||||
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
|
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
|
||||||
self._download_webpage(logout_url, display_id, 'Logging out')
|
self._download_webpage(logout_url, display_id, 'Logging out')
|
||||||
|
@ -163,7 +158,7 @@ class GDCVaultIE(InfoExtractor):
|
||||||
video_url = 'http://www.gdcvault.com' + direct_url
|
video_url = 'http://www.gdcvault.com' + direct_url
|
||||||
# resolve the url so that we can detect the correct extension
|
# resolve the url so that we can detect the correct extension
|
||||||
video_url = self._request_webpage(
|
video_url = self._request_webpage(
|
||||||
HEADRequest(video_url), video_id).geturl()
|
HEADRequest(video_url), video_id).url
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|
|
@ -2431,7 +2431,7 @@ class GenericIE(InfoExtractor):
|
||||||
'Accept-Encoding': 'identity',
|
'Accept-Encoding': 'identity',
|
||||||
**smuggled_data.get('http_headers', {})
|
**smuggled_data.get('http_headers', {})
|
||||||
})
|
})
|
||||||
new_url = full_response.geturl()
|
new_url = full_response.url
|
||||||
url = urllib.parse.urlparse(url)._replace(scheme=urllib.parse.urlparse(new_url).scheme).geturl()
|
url = urllib.parse.urlparse(url)._replace(scheme=urllib.parse.urlparse(new_url).scheme).geturl()
|
||||||
if new_url != extract_basic_auth(url)[0]:
|
if new_url != extract_basic_auth(url)[0]:
|
||||||
self.report_following_redirect(new_url)
|
self.report_following_redirect(new_url)
|
||||||
|
@ -2529,12 +2529,12 @@ class GenericIE(InfoExtractor):
|
||||||
return self.playlist_result(
|
return self.playlist_result(
|
||||||
self._parse_xspf(
|
self._parse_xspf(
|
||||||
doc, video_id, xspf_url=url,
|
doc, video_id, xspf_url=url,
|
||||||
xspf_base_url=full_response.geturl()),
|
xspf_base_url=full_response.url),
|
||||||
video_id)
|
video_id)
|
||||||
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
|
elif re.match(r'(?i)^(?:{[^}]+})?MPD$', doc.tag):
|
||||||
info_dict['formats'], info_dict['subtitles'] = self._parse_mpd_formats_and_subtitles(
|
info_dict['formats'], info_dict['subtitles'] = self._parse_mpd_formats_and_subtitles(
|
||||||
doc,
|
doc,
|
||||||
mpd_base_url=full_response.geturl().rpartition('/')[0],
|
mpd_base_url=full_response.url.rpartition('/')[0],
|
||||||
mpd_url=url)
|
mpd_url=url)
|
||||||
self._extra_manifest_info(info_dict, url)
|
self._extra_manifest_info(info_dict, url)
|
||||||
self.report_detected('DASH manifest')
|
self.report_detected('DASH manifest')
|
||||||
|
@ -2572,7 +2572,7 @@ class GenericIE(InfoExtractor):
|
||||||
info_dict = types.MappingProxyType(info_dict) # Prevents accidental mutation
|
info_dict = types.MappingProxyType(info_dict) # Prevents accidental mutation
|
||||||
video_id = traverse_obj(info_dict, 'display_id', 'id') or self._generic_id(url)
|
video_id = traverse_obj(info_dict, 'display_id', 'id') or self._generic_id(url)
|
||||||
url, smuggled_data = unsmuggle_url(url, {})
|
url, smuggled_data = unsmuggle_url(url, {})
|
||||||
actual_url = urlh.geturl() if urlh else url
|
actual_url = urlh.url if urlh else url
|
||||||
|
|
||||||
# Sometimes embedded video player is hidden behind percent encoding
|
# Sometimes embedded video player is hidden behind percent encoding
|
||||||
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
|
# (e.g. https://github.com/ytdl-org/youtube-dl/issues/2448)
|
||||||
|
|
|
@ -8,8 +8,8 @@ from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import (
|
||||||
compat_str,
|
compat_str,
|
||||||
)
|
)
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
HEADRequest,
|
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
|
|
|
@ -228,7 +228,7 @@ class GoogleDriveIE(InfoExtractor):
|
||||||
# Using original URLs may result in redirect loop due to
|
# Using original URLs may result in redirect loop due to
|
||||||
# google.com's cookies mistakenly used for googleusercontent.com
|
# google.com's cookies mistakenly used for googleusercontent.com
|
||||||
# redirect URLs (see #23919).
|
# redirect URLs (see #23919).
|
||||||
'url': urlh.geturl(),
|
'url': urlh.url,
|
||||||
'ext': determine_ext(title, 'mp4').lower(),
|
'ext': determine_ext(title, 'mp4').lower(),
|
||||||
'format_id': 'source',
|
'format_id': 'source',
|
||||||
'quality': 1,
|
'quality': 1,
|
||||||
|
|
|
@ -126,7 +126,7 @@ class HKETVIE(InfoExtractor):
|
||||||
# If we ever wanted to provide the final resolved URL that
|
# If we ever wanted to provide the final resolved URL that
|
||||||
# does not require cookies, albeit with a shorter lifespan:
|
# does not require cookies, albeit with a shorter lifespan:
|
||||||
# urlh = self._downloader.urlopen(file_url)
|
# urlh = self._downloader.urlopen(file_url)
|
||||||
# resolved_url = urlh.geturl()
|
# resolved_url = urlh.url
|
||||||
label = fmt.get('label')
|
label = fmt.get('label')
|
||||||
h = self._FORMAT_HEIGHTS.get(label)
|
h = self._FORMAT_HEIGHTS.get(label)
|
||||||
w = h * width // height if h and width and height else None
|
w = h * width // height if h and width and height else None
|
||||||
|
|
|
@ -1,11 +1,7 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_b64decode
|
from ..compat import compat_b64decode
|
||||||
from ..utils import (
|
from ..networking import HEADRequest, Request
|
||||||
ExtractorError,
|
from ..utils import ExtractorError, urlencode_postdata
|
||||||
HEADRequest,
|
|
||||||
sanitized_Request,
|
|
||||||
urlencode_postdata,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class HotNewHipHopIE(InfoExtractor):
|
class HotNewHipHopIE(InfoExtractor):
|
||||||
|
@ -36,9 +32,9 @@ class HotNewHipHopIE(InfoExtractor):
|
||||||
('mediaType', 's'),
|
('mediaType', 's'),
|
||||||
('mediaId', video_id),
|
('mediaId', video_id),
|
||||||
])
|
])
|
||||||
r = sanitized_Request(
|
r = Request(
|
||||||
'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata)
|
'http://www.hotnewhiphop.com/ajax/media/getActions/', data=reqdata)
|
||||||
r.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
r.headers['Content-Type'] = 'application/x-www-form-urlencoded'
|
||||||
mkd = self._download_json(
|
mkd = self._download_json(
|
||||||
r, video_id, note='Requesting media key',
|
r, video_id, note='Requesting media key',
|
||||||
errnote='Could not download media key')
|
errnote='Could not download media key')
|
||||||
|
@ -50,7 +46,7 @@ class HotNewHipHopIE(InfoExtractor):
|
||||||
req = self._request_webpage(
|
req = self._request_webpage(
|
||||||
redirect_req, video_id,
|
redirect_req, video_id,
|
||||||
note='Resolving final URL', errnote='Could not resolve final URL')
|
note='Resolving final URL', errnote='Could not resolve final URL')
|
||||||
video_url = req.geturl()
|
video_url = req.url
|
||||||
if video_url.endswith('.html'):
|
if video_url.endswith('.html'):
|
||||||
raise ExtractorError('Redirect failed')
|
raise ExtractorError('Redirect failed')
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,8 @@ import time
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError, compat_str
|
from ..compat import compat_str
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -233,7 +234,7 @@ class HotStarIE(HotStarBaseIE):
|
||||||
'height': int_or_none(playback_set.get('height')),
|
'height': int_or_none(playback_set.get('height')),
|
||||||
}]
|
}]
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
geo_restricted = True
|
geo_restricted = True
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking import Request
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
parse_age_limit,
|
parse_age_limit,
|
||||||
sanitized_Request,
|
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ class HRTiBaseIE(InfoExtractor):
|
||||||
'application_version': self._APP_VERSION
|
'application_version': self._APP_VERSION
|
||||||
}
|
}
|
||||||
|
|
||||||
req = sanitized_Request(self._API_URL, data=json.dumps(app_data).encode('utf-8'))
|
req = Request(self._API_URL, data=json.dumps(app_data).encode('utf-8'))
|
||||||
req.get_method = lambda: 'PUT'
|
req.get_method = lambda: 'PUT'
|
||||||
|
|
||||||
resources = self._download_json(
|
resources = self._download_json(
|
||||||
|
@ -73,8 +73,8 @@ class HRTiBaseIE(InfoExtractor):
|
||||||
self._login_url, None, note='Logging in', errnote='Unable to log in',
|
self._login_url, None, note='Logging in', errnote='Unable to log in',
|
||||||
data=json.dumps(auth_data).encode('utf-8'))
|
data=json.dumps(auth_data).encode('utf-8'))
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 406:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 406:
|
||||||
auth_info = self._parse_json(e.cause.read().encode('utf-8'), None)
|
auth_info = self._parse_json(e.cause.response.read().encode('utf-8'), None)
|
||||||
else:
|
else:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -1,8 +1,9 @@
|
||||||
import re
|
import re
|
||||||
import urllib.error
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_parse_qs
|
from ..compat import compat_parse_qs
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -27,9 +28,9 @@ class IGNBaseIE(InfoExtractor):
|
||||||
try:
|
try:
|
||||||
return self._call_api(slug)
|
return self._call_api(slug)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 404:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 404:
|
||||||
e.cause.args = e.cause.args or [
|
e.cause.args = e.cause.args or [
|
||||||
e.cause.geturl(), e.cause.getcode(), e.cause.reason]
|
e.cause.response.url, e.cause.status, e.cause.reason]
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'Content not found: expired?', cause=e.cause,
|
'Content not found: expired?', cause=e.cause,
|
||||||
expected=True)
|
expected=True)
|
||||||
|
@ -226,7 +227,7 @@ class IGNVideoIE(IGNBaseIE):
|
||||||
parsed_url._replace(path=parsed_url.path.rsplit('/', 1)[0] + '/embed'))
|
parsed_url._replace(path=parsed_url.path.rsplit('/', 1)[0] + '/embed'))
|
||||||
|
|
||||||
webpage, urlh = self._download_webpage_handle(embed_url, video_id)
|
webpage, urlh = self._download_webpage_handle(embed_url, video_id)
|
||||||
new_url = urlh.geturl()
|
new_url = urlh.url
|
||||||
ign_url = compat_parse_qs(
|
ign_url = compat_parse_qs(
|
||||||
urllib.parse.urlparse(new_url).query).get('url', [None])[-1]
|
urllib.parse.urlparse(new_url).query).get('url', [None])[-1]
|
||||||
if ign_url:
|
if ign_url:
|
||||||
|
@ -323,14 +324,14 @@ class IGNArticleIE(IGNBaseIE):
|
||||||
try:
|
try:
|
||||||
return self._call_api(slug)
|
return self._call_api(slug)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, urllib.error.HTTPError):
|
if isinstance(e.cause, HTTPError):
|
||||||
e.cause.args = e.cause.args or [
|
e.cause.args = e.cause.args or [
|
||||||
e.cause.geturl(), e.cause.getcode(), e.cause.reason]
|
e.cause.response.url, e.cause.status, e.cause.reason]
|
||||||
if e.cause.code == 404:
|
if e.cause.status == 404:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'Content not found: expired?', cause=e.cause,
|
'Content not found: expired?', cause=e.cause,
|
||||||
expected=True)
|
expected=True)
|
||||||
elif e.cause.code == 503:
|
elif e.cause.status == 503:
|
||||||
self.report_warning(error_to_compat_str(e.cause))
|
self.report_warning(error_to_compat_str(e.cause))
|
||||||
return
|
return
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -52,9 +52,9 @@ class ImgGamingBaseIE(InfoExtractor):
|
||||||
return self._call_api(
|
return self._call_api(
|
||||||
stream_path, media_id)['playerUrlCallback']
|
stream_path, media_id)['playerUrlCallback']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
self._parse_json(e.cause.read().decode(), media_id)['messages'][0],
|
self._parse_json(e.cause.response.read().decode(), media_id)['messages'][0],
|
||||||
expected=True)
|
expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -3,9 +3,9 @@ import itertools
|
||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
decode_base_n,
|
decode_base_n,
|
||||||
|
@ -442,7 +442,7 @@ class InstagramIE(InstagramBaseIE):
|
||||||
shared_data = self._search_json(
|
shared_data = self._search_json(
|
||||||
r'window\._sharedData\s*=', webpage, 'shared data', video_id, fatal=False) or {}
|
r'window\._sharedData\s*=', webpage, 'shared data', video_id, fatal=False) or {}
|
||||||
|
|
||||||
if shared_data and self._LOGIN_URL not in urlh.geturl():
|
if shared_data and self._LOGIN_URL not in urlh.url:
|
||||||
media.update(traverse_obj(
|
media.update(traverse_obj(
|
||||||
shared_data, ('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
|
shared_data, ('entry_data', 'PostPage', 0, 'graphql', 'shortcode_media'),
|
||||||
('entry_data', 'PostPage', 0, 'media'), expected_type=dict) or {})
|
('entry_data', 'PostPage', 0, 'media'), expected_type=dict) or {})
|
||||||
|
@ -589,7 +589,7 @@ class InstagramPlaylistBaseIE(InstagramBaseIE):
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
# if it's an error caused by a bad query, and there are
|
# if it's an error caused by a bad query, and there are
|
||||||
# more GIS templates to try, ignore it and keep trying
|
# more GIS templates to try, ignore it and keep trying
|
||||||
if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
if gis_tmpl != gis_tmpls[-1]:
|
if gis_tmpl != gis_tmpls[-1]:
|
||||||
continue
|
continue
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -81,7 +81,7 @@ class IPrimaIE(InfoExtractor):
|
||||||
note='Logging in')
|
note='Logging in')
|
||||||
|
|
||||||
# a profile may need to be selected first, even when there is only a single one
|
# a profile may need to be selected first, even when there is only a single one
|
||||||
if '/profile-select' in login_handle.geturl():
|
if '/profile-select' in login_handle.url:
|
||||||
profile_id = self._search_regex(
|
profile_id = self._search_regex(
|
||||||
r'data-identifier\s*=\s*["\']?(\w+)', profile_select_html, 'profile id')
|
r'data-identifier\s*=\s*["\']?(\w+)', profile_select_html, 'profile id')
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ class IPrimaIE(InfoExtractor):
|
||||||
f'{self._AUTH_ROOT}/user/profile-select-perform/{profile_id}', None,
|
f'{self._AUTH_ROOT}/user/profile-select-perform/{profile_id}', None,
|
||||||
query={'continueUrl': '/user/login?redirect_uri=/user/'}, note='Selecting profile')
|
query={'continueUrl': '/user/login?redirect_uri=/user/'}, note='Selecting profile')
|
||||||
|
|
||||||
code = traverse_obj(login_handle.geturl(), ({parse_qs}, 'code', 0))
|
code = traverse_obj(login_handle.url, ({parse_qs}, 'code', 0))
|
||||||
if not code:
|
if not code:
|
||||||
raise ExtractorError('Login failed', expected=True)
|
raise ExtractorError('Login failed', expected=True)
|
||||||
|
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -101,8 +101,8 @@ class KakaoIE(InfoExtractor):
|
||||||
cdn_api_base, video_id, query=query,
|
cdn_api_base, video_id, query=query,
|
||||||
note='Downloading video URL for profile %s' % profile_name)
|
note='Downloading video URL for profile %s' % profile_name)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
resp = self._parse_json(e.cause.read().decode(), video_id)
|
resp = self._parse_json(e.cause.response.read().decode(), video_id)
|
||||||
if resp.get('code') == 'GeoBlocked':
|
if resp.get('code') == 'GeoBlocked':
|
||||||
self.raise_geo_restricted()
|
self.raise_geo_restricted()
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
HEADRequest,
|
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
merge_dicts,
|
merge_dicts,
|
||||||
|
|
|
@ -91,7 +91,7 @@ class KuwoIE(KuwoBaseIE):
|
||||||
webpage, urlh = self._download_webpage_handle(
|
webpage, urlh = self._download_webpage_handle(
|
||||||
url, song_id, note='Download song detail info',
|
url, song_id, note='Download song detail info',
|
||||||
errnote='Unable to get song detail info')
|
errnote='Unable to get song detail info')
|
||||||
if song_id not in urlh.geturl() or '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage:
|
if song_id not in urlh.url or '对不起,该歌曲由于版权问题已被下线,将返回网站首页' in webpage:
|
||||||
raise ExtractorError('this song has been offline because of copyright issues', expected=True)
|
raise ExtractorError('this song has been offline because of copyright issues', expected=True)
|
||||||
|
|
||||||
song_name = self._html_search_regex(
|
song_name = self._html_search_regex(
|
||||||
|
|
|
@ -1,13 +1,8 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..utils import (
|
from ..networking import HEADRequest
|
||||||
float_or_none,
|
from ..utils import float_or_none, int_or_none, parse_duration, unified_strdate
|
||||||
HEADRequest,
|
|
||||||
int_or_none,
|
|
||||||
parse_duration,
|
|
||||||
unified_strdate,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class LA7IE(InfoExtractor):
|
class LA7IE(InfoExtractor):
|
||||||
|
|
|
@ -3,9 +3,9 @@ import json
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
UnsupportedError,
|
UnsupportedError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -266,7 +266,7 @@ class LBRYIE(LBRYBaseIE):
|
||||||
# HEAD request returns redirect response to m3u8 URL if available
|
# HEAD request returns redirect response to m3u8 URL if available
|
||||||
final_url = self._request_webpage(
|
final_url = self._request_webpage(
|
||||||
HEADRequest(streaming_url), display_id, headers=headers,
|
HEADRequest(streaming_url), display_id, headers=headers,
|
||||||
note='Downloading streaming redirect url info').geturl()
|
note='Downloading streaming redirect url info').url
|
||||||
|
|
||||||
elif result.get('value_type') == 'stream':
|
elif result.get('value_type') == 'stream':
|
||||||
claim_id, is_live = result['signing_channel']['claim_id'], True
|
claim_id, is_live = result['signing_channel']['claim_id'], True
|
||||||
|
|
|
@ -25,7 +25,7 @@ class LecturioBaseIE(InfoExtractor):
|
||||||
self._LOGIN_URL, None, 'Downloading login popup')
|
self._LOGIN_URL, None, 'Downloading login popup')
|
||||||
|
|
||||||
def is_logged(url_handle):
|
def is_logged(url_handle):
|
||||||
return self._LOGIN_URL not in url_handle.geturl()
|
return self._LOGIN_URL not in url_handle.url
|
||||||
|
|
||||||
# Already logged in
|
# Already logged in
|
||||||
if is_logged(urlh):
|
if is_logged(urlh):
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -75,7 +75,7 @@ class LEGOIE(InfoExtractor):
|
||||||
'videoId': '%s_%s' % (uuid.UUID(video_id), locale),
|
'videoId': '%s_%s' % (uuid.UUID(video_id), locale),
|
||||||
}, headers=self.geo_verification_headers())
|
}, headers=self.geo_verification_headers())
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 451:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 451:
|
||||||
self.raise_geo_restricted(countries=countries)
|
self.raise_geo_restricted(countries=countries)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
@ -69,8 +69,8 @@ class LimelightBaseIE(InfoExtractor):
|
||||||
item_id, 'Downloading PlaylistService %s JSON' % method,
|
item_id, 'Downloading PlaylistService %s JSON' % method,
|
||||||
fatal=fatal, headers=headers)
|
fatal=fatal, headers=headers)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
error = self._parse_json(e.cause.read().decode(), item_id)['detail']['contentAccessPermission']
|
error = self._parse_json(e.cause.response.read().decode(), item_id)['detail']['contentAccessPermission']
|
||||||
if error == 'CountryDisabled':
|
if error == 'CountryDisabled':
|
||||||
self.raise_geo_restricted()
|
self.raise_geo_restricted()
|
||||||
raise ExtractorError(error, expected=True)
|
raise ExtractorError(error, expected=True)
|
||||||
|
|
|
@ -2,11 +2,8 @@ import json
|
||||||
import random
|
import random
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_b64decode, compat_str
|
||||||
compat_b64decode,
|
from ..networking.exceptions import HTTPError
|
||||||
compat_HTTPError,
|
|
||||||
compat_str,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -107,7 +104,7 @@ class LinuxAcademyIE(InfoExtractor):
|
||||||
'sso': 'true',
|
'sso': 'true',
|
||||||
})
|
})
|
||||||
|
|
||||||
login_state_url = urlh.geturl()
|
login_state_url = urlh.url
|
||||||
|
|
||||||
try:
|
try:
|
||||||
login_page = self._download_webpage(
|
login_page = self._download_webpage(
|
||||||
|
@ -119,8 +116,8 @@ class LinuxAcademyIE(InfoExtractor):
|
||||||
'Referer': login_state_url,
|
'Referer': login_state_url,
|
||||||
})
|
})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
error = self._parse_json(e.cause.read(), None)
|
error = self._parse_json(e.cause.response.read(), None)
|
||||||
message = error.get('description') or error['code']
|
message = error.get('description') or error['code']
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'%s said: %s' % (self.IE_NAME, message), expected=True)
|
'%s said: %s' % (self.IE_NAME, message), expected=True)
|
||||||
|
@ -137,7 +134,7 @@ class LinuxAcademyIE(InfoExtractor):
|
||||||
})
|
})
|
||||||
|
|
||||||
access_token = self._search_regex(
|
access_token = self._search_regex(
|
||||||
r'access_token=([^=&]+)', urlh.geturl(),
|
r'access_token=([^=&]+)', urlh.url,
|
||||||
'access token', default=None)
|
'access token', default=None)
|
||||||
if not access_token:
|
if not access_token:
|
||||||
access_token = self._parse_json(
|
access_token = self._parse_json(
|
||||||
|
|
|
@ -171,7 +171,7 @@ class MediasiteIE(InfoExtractor):
|
||||||
query = mobj.group('query')
|
query = mobj.group('query')
|
||||||
|
|
||||||
webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer?
|
webpage, urlh = self._download_webpage_handle(url, resource_id) # XXX: add UrlReferrer?
|
||||||
redirect_url = urlh.geturl()
|
redirect_url = urlh.url
|
||||||
|
|
||||||
# XXX: might have also extracted UrlReferrer and QueryString from the html
|
# XXX: might have also extracted UrlReferrer and QueryString from the html
|
||||||
service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex(
|
service_path = compat_urlparse.urljoin(redirect_url, self._html_search_regex(
|
||||||
|
|
|
@ -1,14 +1,14 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
|
ExtractorError,
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
get_element_by_class,
|
get_element_by_class,
|
||||||
get_element_html_by_id,
|
get_element_html_by_id,
|
||||||
HEADRequest,
|
|
||||||
parse_qs,
|
parse_qs,
|
||||||
unescapeHTML,
|
unescapeHTML,
|
||||||
unified_timestamp,
|
unified_timestamp,
|
||||||
|
@ -160,5 +160,5 @@ class MegaTVComEmbedIE(MegaTVComBaseIE):
|
||||||
canonical_url = self._request_webpage(
|
canonical_url = self._request_webpage(
|
||||||
HEADRequest(canonical_url), video_id,
|
HEADRequest(canonical_url), video_id,
|
||||||
note='Resolve canonical URL',
|
note='Resolve canonical URL',
|
||||||
errnote='Could not resolve canonical URL').geturl()
|
errnote='Could not resolve canonical URL').url
|
||||||
return self.url_result(canonical_url, MegaTVComIE.ie_key(), video_id)
|
return self.url_result(canonical_url, MegaTVComIE.ie_key(), video_id)
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import base64
|
import base64
|
||||||
import time
|
import time
|
||||||
import urllib.error
|
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -86,8 +86,8 @@ class MGTVIE(InfoExtractor):
|
||||||
'type': 'pch5'
|
'type': 'pch5'
|
||||||
}, headers=self.geo_verification_headers())['data']
|
}, headers=self.geo_verification_headers())['data']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, urllib.error.HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
error = self._parse_json(e.cause.read().decode(), None)
|
error = self._parse_json(e.cause.response.read().decode(), None)
|
||||||
if error.get('code') == 40005:
|
if error.get('code') == 40005:
|
||||||
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
self.raise_geo_restricted(countries=self._GEO_COUNTRIES)
|
||||||
raise ExtractorError(error['msg'], expected=True)
|
raise ExtractorError(error['msg'], expected=True)
|
||||||
|
|
|
@ -106,7 +106,7 @@ class MindsIE(MindsBaseIE):
|
||||||
if poster:
|
if poster:
|
||||||
urlh = self._request_webpage(poster, video_id, fatal=False)
|
urlh = self._request_webpage(poster, video_id, fatal=False)
|
||||||
if urlh:
|
if urlh:
|
||||||
thumbnail = urlh.geturl()
|
thumbnail = urlh.url
|
||||||
|
|
||||||
return {
|
return {
|
||||||
'id': video_id,
|
'id': video_id,
|
||||||
|
|
|
@ -2,12 +2,8 @@ import random
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_urlparse
|
from ..compat import compat_urlparse
|
||||||
from ..utils import (
|
from ..networking import Request
|
||||||
xpath_text,
|
from ..utils import ExtractorError, int_or_none, xpath_text
|
||||||
int_or_none,
|
|
||||||
ExtractorError,
|
|
||||||
sanitized_Request,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class MioMioIE(InfoExtractor):
|
class MioMioIE(InfoExtractor):
|
||||||
|
@ -61,7 +57,7 @@ class MioMioIE(InfoExtractor):
|
||||||
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
|
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/xml.php?id=%s&r=%s' % (id, random.randint(100, 999)),
|
||||||
video_id)
|
video_id)
|
||||||
|
|
||||||
vid_config_request = sanitized_Request(
|
vid_config_request = Request(
|
||||||
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
|
'http://www.miomio.tv/mioplayer/mioplayerconfigfiles/sina.php?{0}'.format(xml_config),
|
||||||
headers=http_headers)
|
headers=http_headers)
|
||||||
|
|
||||||
|
|
|
@ -2,16 +2,15 @@ import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
|
from ..networking import HEADRequest, Request
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
RegexNotFoundError,
|
||||||
find_xpath_attr,
|
find_xpath_attr,
|
||||||
fix_xml_ampersands,
|
fix_xml_ampersands,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
HEADRequest,
|
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
RegexNotFoundError,
|
|
||||||
sanitized_Request,
|
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
timeconvert,
|
timeconvert,
|
||||||
try_get,
|
try_get,
|
||||||
|
@ -51,15 +50,15 @@ class MTVServicesInfoExtractor(InfoExtractor):
|
||||||
|
|
||||||
def _extract_mobile_video_formats(self, mtvn_id):
|
def _extract_mobile_video_formats(self, mtvn_id):
|
||||||
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
|
webpage_url = self._MOBILE_TEMPLATE % mtvn_id
|
||||||
req = sanitized_Request(webpage_url)
|
req = Request(webpage_url)
|
||||||
# Otherwise we get a webpage that would execute some javascript
|
# Otherwise we get a webpage that would execute some javascript
|
||||||
req.add_header('User-Agent', 'curl/7')
|
req.headers['User-Agent'] = 'curl/7'
|
||||||
webpage = self._download_webpage(req, mtvn_id,
|
webpage = self._download_webpage(req, mtvn_id,
|
||||||
'Downloading mobile page')
|
'Downloading mobile page')
|
||||||
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
|
metrics_url = unescapeHTML(self._search_regex(r'<a href="(http://metrics.+?)"', webpage, 'url'))
|
||||||
req = HEADRequest(metrics_url)
|
req = HEADRequest(metrics_url)
|
||||||
response = self._request_webpage(req, mtvn_id, 'Resolving url')
|
response = self._request_webpage(req, mtvn_id, 'Resolving url')
|
||||||
url = response.geturl()
|
url = response.url
|
||||||
# Transform the url to get the best quality:
|
# Transform the url to get the best quality:
|
||||||
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
|
url = re.sub(r'.+pxE=mp4', 'http://mtvnmobile.vo.llnwd.net/kip0/_pxn=0+_pxK=18639+_pxE=mp4', url, 1)
|
||||||
return [{'url': url, 'ext': 'mp4'}]
|
return [{'url': url, 'ext': 'mp4'}]
|
||||||
|
|
|
@ -6,9 +6,9 @@ from .common import InfoExtractor
|
||||||
from .theplatform import ThePlatformIE, default_ns
|
from .theplatform import ThePlatformIE, default_ns
|
||||||
from .adobepass import AdobePassIE
|
from .adobepass import AdobePassIE
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
RegexNotFoundError,
|
RegexNotFoundError,
|
||||||
UserNotLive,
|
UserNotLive,
|
||||||
clean_html,
|
clean_html,
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
import itertools
|
import itertools
|
||||||
import json
|
import json
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import ExtractorError, make_archive_id, parse_iso8601, remove_start
|
from ..utils import ExtractorError, make_archive_id, parse_iso8601, remove_start
|
||||||
|
|
||||||
_BASE_URL_RE = r'https?://(?:www\.|beta\.)?(?:watchnebula\.com|nebula\.app|nebula\.tv)'
|
_BASE_URL_RE = r'https?://(?:www\.|beta\.)?(?:watchnebula\.com|nebula\.app|nebula\.tv)'
|
||||||
|
@ -48,7 +48,7 @@ class NebulaBaseIE(InfoExtractor):
|
||||||
return inner_call()
|
return inner_call()
|
||||||
except ExtractorError as exc:
|
except ExtractorError as exc:
|
||||||
# if 401 or 403, attempt credential re-auth and retry
|
# if 401 or 403, attempt credential re-auth and retry
|
||||||
if exc.cause and isinstance(exc.cause, urllib.error.HTTPError) and exc.cause.code in (401, 403):
|
if exc.cause and isinstance(exc.cause, HTTPError) and exc.cause.status in (401, 403):
|
||||||
self.to_screen(f'Reauthenticating to Nebula and retrying, because last {auth_type} call resulted in error {exc.cause.code}')
|
self.to_screen(f'Reauthenticating to Nebula and retrying, because last {auth_type} call resulted in error {exc.cause.code}')
|
||||||
self._perform_login()
|
self._perform_login()
|
||||||
return inner_call()
|
return inner_call()
|
||||||
|
|
|
@ -11,6 +11,7 @@ from random import randint
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..aes import aes_ecb_encrypt, pkcs7_padding
|
from ..aes import aes_ecb_encrypt, pkcs7_padding
|
||||||
from ..compat import compat_urllib_parse_urlencode
|
from ..compat import compat_urllib_parse_urlencode
|
||||||
|
from ..networking import Request
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
bytes_to_intlist,
|
bytes_to_intlist,
|
||||||
|
@ -18,7 +19,6 @@ from ..utils import (
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
intlist_to_bytes,
|
intlist_to_bytes,
|
||||||
sanitized_Request,
|
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -146,8 +146,8 @@ class NetEaseMusicBaseIE(InfoExtractor):
|
||||||
return int(round(ms / 1000.0))
|
return int(round(ms / 1000.0))
|
||||||
|
|
||||||
def query_api(self, endpoint, video_id, note):
|
def query_api(self, endpoint, video_id, note):
|
||||||
req = sanitized_Request('%s%s' % (self._API_BASE, endpoint))
|
req = Request('%s%s' % (self._API_BASE, endpoint))
|
||||||
req.add_header('Referer', self._API_BASE)
|
req.headers['Referer'] = self._API_BASE
|
||||||
return self._download_json(req, video_id, note)
|
return self._download_json(req, video_id, note)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -8,10 +8,8 @@ import time
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
from .common import InfoExtractor, SearchInfoExtractor
|
from .common import InfoExtractor, SearchInfoExtractor
|
||||||
from ..compat import (
|
|
||||||
compat_HTTPError,
|
|
||||||
)
|
|
||||||
from ..dependencies import websockets
|
from ..dependencies import websockets
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
OnDemandPagedList,
|
OnDemandPagedList,
|
||||||
|
@ -396,7 +394,7 @@ class NiconicoIE(InfoExtractor):
|
||||||
webpage, handle = self._download_webpage_handle(
|
webpage, handle = self._download_webpage_handle(
|
||||||
'https://www.nicovideo.jp/watch/' + video_id, video_id)
|
'https://www.nicovideo.jp/watch/' + video_id, video_id)
|
||||||
if video_id.startswith('so'):
|
if video_id.startswith('so'):
|
||||||
video_id = self._match_id(handle.geturl())
|
video_id = self._match_id(handle.url)
|
||||||
|
|
||||||
api_data = self._parse_json(self._html_search_regex(
|
api_data = self._parse_json(self._html_search_regex(
|
||||||
'data-api-data="([^"]+)"', webpage,
|
'data-api-data="([^"]+)"', webpage,
|
||||||
|
@ -407,9 +405,9 @@ class NiconicoIE(InfoExtractor):
|
||||||
'https://www.nicovideo.jp/api/watch/v3/%s?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_%d' % (video_id, round(time.time() * 1000)), video_id,
|
'https://www.nicovideo.jp/api/watch/v3/%s?_frontendId=6&_frontendVersion=0&actionTrackId=AAAAAAAAAA_%d' % (video_id, round(time.time() * 1000)), video_id,
|
||||||
note='Downloading API JSON', errnote='Unable to fetch data')['data']
|
note='Downloading API JSON', errnote='Unable to fetch data')['data']
|
||||||
except ExtractorError:
|
except ExtractorError:
|
||||||
if not isinstance(e.cause, compat_HTTPError):
|
if not isinstance(e.cause, HTTPError):
|
||||||
raise
|
raise
|
||||||
webpage = e.cause.read().decode('utf-8', 'replace')
|
webpage = e.cause.response.read().decode('utf-8', 'replace')
|
||||||
error_msg = self._html_search_regex(
|
error_msg = self._html_search_regex(
|
||||||
r'(?s)<section\s+class="(?:(?:ErrorMessage|WatchExceptionPage-message)\s*)+">(.+?)</section>',
|
r'(?s)<section\s+class="(?:(?:ErrorMessage|WatchExceptionPage-message)\s*)+">(.+?)</section>',
|
||||||
webpage, 'error reason', default=None)
|
webpage, 'error reason', default=None)
|
||||||
|
@ -742,7 +740,7 @@ class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
|
||||||
try:
|
try:
|
||||||
mylist = self._call_api(list_id, 'list', {'pageSize': 1})
|
mylist = self._call_api(list_id, 'list', {'pageSize': 1})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
self.raise_login_required('You have to be logged in to get your history')
|
self.raise_login_required('You have to be logged in to get your history')
|
||||||
raise
|
raise
|
||||||
return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
|
return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
|
||||||
|
@ -951,8 +949,8 @@ class NiconicoLiveIE(InfoExtractor):
|
||||||
'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
|
'frontend_id': traverse_obj(embedded_data, ('site', 'frontendId')) or '9',
|
||||||
})
|
})
|
||||||
|
|
||||||
hostname = remove_start(urlparse(urlh.geturl()).hostname, 'sp.')
|
hostname = remove_start(urlparse(urlh.url).hostname, 'sp.')
|
||||||
cookies = try_get(urlh.geturl(), self._downloader._calc_cookies)
|
cookies = try_get(urlh.url, self._downloader._calc_cookies)
|
||||||
latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
|
latency = try_get(self._configuration_arg('latency'), lambda x: x[0])
|
||||||
if latency not in self._KNOWN_LATENCY:
|
if latency not in self._KNOWN_LATENCY:
|
||||||
latency = 'high'
|
latency = 'high'
|
||||||
|
|
|
@ -51,7 +51,7 @@ class NJPWWorldIE(InfoExtractor):
|
||||||
data=urlencode_postdata({'login_id': username, 'pw': password}),
|
data=urlencode_postdata({'login_id': username, 'pw': password}),
|
||||||
headers={'Referer': 'https://front.njpwworld.com/auth'})
|
headers={'Referer': 'https://front.njpwworld.com/auth'})
|
||||||
# /auth/login will return 302 for successful logins
|
# /auth/login will return 302 for successful logins
|
||||||
if urlh.geturl() == self._LOGIN_URL:
|
if urlh.url == self._LOGIN_URL:
|
||||||
self.report_warning('unable to login')
|
self.report_warning('unable to login')
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import Request
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
sanitized_Request,
|
|
||||||
urlencode_postdata,
|
urlencode_postdata,
|
||||||
xpath_text,
|
xpath_text,
|
||||||
xpath_with_ns,
|
xpath_with_ns,
|
||||||
|
@ -36,8 +36,8 @@ class NosVideoIE(InfoExtractor):
|
||||||
'op': 'download1',
|
'op': 'download1',
|
||||||
'method_free': 'Continue to Video',
|
'method_free': 'Continue to Video',
|
||||||
}
|
}
|
||||||
req = sanitized_Request(url, urlencode_postdata(fields))
|
req = Request(url, urlencode_postdata(fields))
|
||||||
req.add_header('Content-type', 'application/x-www-form-urlencoded')
|
req.headers['Content-type'] = 'application/x-www-form-urlencoded'
|
||||||
webpage = self._download_webpage(req, video_id,
|
webpage = self._download_webpage(req, video_id,
|
||||||
'Downloading download page')
|
'Downloading download page')
|
||||||
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
|
if re.search(self._FILE_DELETED_REGEX, webpage) is not None:
|
||||||
|
|
|
@ -4,10 +4,8 @@ from .brightcove import (
|
||||||
)
|
)
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_str
|
from ..compat import compat_str
|
||||||
from ..utils import (
|
from ..networking import Request
|
||||||
ExtractorError,
|
from ..utils import ExtractorError
|
||||||
sanitized_Request,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class NownessBaseIE(InfoExtractor):
|
class NownessBaseIE(InfoExtractor):
|
||||||
|
@ -40,7 +38,7 @@ class NownessBaseIE(InfoExtractor):
|
||||||
|
|
||||||
def _api_request(self, url, request_path):
|
def _api_request(self, url, request_path):
|
||||||
display_id = self._match_id(url)
|
display_id = self._match_id(url)
|
||||||
request = sanitized_Request(
|
request = Request(
|
||||||
'http://api.nowness.com/api/' + request_path % display_id,
|
'http://api.nowness.com/api/' + request_path % display_id,
|
||||||
headers={
|
headers={
|
||||||
'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us',
|
'X-Nowness-Language': 'zh-cn' if 'cn.nowness.com' in url else 'en-us',
|
||||||
|
|
|
@ -3,7 +3,8 @@ import random
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError, compat_str
|
from ..compat import compat_str
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -148,7 +149,7 @@ class NRKIE(NRKBaseIE):
|
||||||
try:
|
try:
|
||||||
return self._call_api(f'playback/{item}/program/{video_id}', video_id, item, query=query)
|
return self._call_api(f'playback/{item}/program/{video_id}', video_id, item, query=query)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
return self._call_api(f'playback/{item}/{video_id}', video_id, item, query=query)
|
return self._call_api(f'playback/{item}/{video_id}', video_id, item, query=query)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
import json
|
import json
|
||||||
import urllib.error
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
GeoRestrictedError,
|
GeoRestrictedError,
|
||||||
|
@ -74,8 +74,8 @@ class OnDemandChinaEpisodeIE(InfoExtractor):
|
||||||
f'https://odkmedia.io/odc/api/v2/playback/{video_info["id"]}/', display_id,
|
f'https://odkmedia.io/odc/api/v2/playback/{video_info["id"]}/', display_id,
|
||||||
headers={'Authorization': '', 'service-name': 'odc'})
|
headers={'Authorization': '', 'service-name': 'odc'})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, urllib.error.HTTPError):
|
if isinstance(e.cause, HTTPError):
|
||||||
error_data = self._parse_json(e.cause.read(), display_id)['detail']
|
error_data = self._parse_json(e.cause.response.read(), display_id)['detail']
|
||||||
raise GeoRestrictedError(error_data)
|
raise GeoRestrictedError(error_data)
|
||||||
|
|
||||||
formats, subtitles = [], {}
|
formats, subtitles = [], {}
|
||||||
|
|
|
@ -7,9 +7,9 @@ from ..compat import (
|
||||||
compat_urllib_parse_unquote,
|
compat_urllib_parse_unquote,
|
||||||
compat_urllib_parse_urlparse,
|
compat_urllib_parse_urlparse,
|
||||||
)
|
)
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
float_or_none,
|
float_or_none,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
qualities,
|
qualities,
|
||||||
|
@ -448,7 +448,7 @@ class OdnoklassnikiIE(InfoExtractor):
|
||||||
json_data = self._parse_json(unescapeHTML(json_data), video_id) or {}
|
json_data = self._parse_json(unescapeHTML(json_data), video_id) or {}
|
||||||
|
|
||||||
redirect_url = self._request_webpage(HEADRequest(
|
redirect_url = self._request_webpage(HEADRequest(
|
||||||
json_data['videoSrc']), video_id, 'Requesting download URL').geturl()
|
json_data['videoSrc']), video_id, 'Requesting download URL').url
|
||||||
self._clear_cookies(redirect_url)
|
self._clear_cookies(redirect_url)
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|
|
@ -2,11 +2,11 @@ import functools
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
HEADRequest,
|
|
||||||
InAdvancePagedList,
|
InAdvancePagedList,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
|
|
|
@ -44,7 +44,7 @@ class OwnCloudIE(InfoExtractor):
|
||||||
webpage, urlh = self._download_webpage_handle(url, video_id)
|
webpage, urlh = self._download_webpage_handle(url, video_id)
|
||||||
|
|
||||||
if re.search(r'<label[^>]+for="password"', webpage):
|
if re.search(r'<label[^>]+for="password"', webpage):
|
||||||
webpage = self._verify_video_password(webpage, urlh.geturl(), video_id)
|
webpage = self._verify_video_password(webpage, urlh.url, video_id)
|
||||||
|
|
||||||
hidden_inputs = self._hidden_inputs(webpage)
|
hidden_inputs = self._hidden_inputs(webpage)
|
||||||
title = hidden_inputs.get('filename')
|
title = hidden_inputs.get('filename')
|
||||||
|
|
|
@ -1,10 +1,7 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..networking.exceptions import HTTPError
|
||||||
# compat_str,
|
|
||||||
compat_HTTPError,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -54,8 +51,8 @@ class PacktPubIE(PacktPubBaseIE):
|
||||||
'password': password,
|
'password': password,
|
||||||
}).encode())['data']['access']
|
}).encode())['data']['access']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 401, 404):
|
if isinstance(e.cause, HTTPError) and e.cause.status in (400, 401, 404):
|
||||||
message = self._parse_json(e.cause.read().decode(), None)['message']
|
message = self._parse_json(e.cause.response.read().decode(), None)['message']
|
||||||
raise ExtractorError(message, expected=True)
|
raise ExtractorError(message, expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
@ -70,7 +67,7 @@ class PacktPubIE(PacktPubBaseIE):
|
||||||
'https://services.packtpub.com/products-v1/products/%s/%s/%s' % (course_id, chapter_id, video_id), video_id,
|
'https://services.packtpub.com/products-v1/products/%s/%s/%s' % (course_id, chapter_id, video_id), video_id,
|
||||||
'Downloading JSON video', headers=headers)['data']
|
'Downloading JSON video', headers=headers)['data']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 400:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 400:
|
||||||
self.raise_login_required('This video is locked')
|
self.raise_login_required('This video is locked')
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
import itertools
|
import itertools
|
||||||
from urllib.error import HTTPError
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .vimeo import VimeoIE
|
from .vimeo import VimeoIE
|
||||||
|
|
||||||
from ..compat import compat_urllib_parse_unquote
|
from ..compat import compat_urllib_parse_unquote
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
clean_html,
|
clean_html,
|
||||||
determine_ext,
|
determine_ext,
|
||||||
|
@ -37,9 +37,9 @@ class PatreonBaseIE(InfoExtractor):
|
||||||
item_id, note='Downloading API JSON' if not note else note,
|
item_id, note='Downloading API JSON' if not note else note,
|
||||||
query=query, fatal=fatal, headers=headers)
|
query=query, fatal=fatal, headers=headers)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if not isinstance(e.cause, HTTPError) or mimetype2ext(e.cause.headers.get('Content-Type')) != 'json':
|
if not isinstance(e.cause, HTTPError) or mimetype2ext(e.cause.response.headers.get('Content-Type')) != 'json':
|
||||||
raise
|
raise
|
||||||
err_json = self._parse_json(self._webpage_read_content(e.cause, None, item_id), item_id, fatal=False)
|
err_json = self._parse_json(self._webpage_read_content(e.cause.response, None, item_id), item_id, fatal=False)
|
||||||
err_message = traverse_obj(err_json, ('errors', ..., 'detail'), get_all=False)
|
err_message = traverse_obj(err_json, ('errors', ..., 'detail'), get_all=False)
|
||||||
if err_message:
|
if err_message:
|
||||||
raise ExtractorError(f'Patreon said: {err_message}', expected=True)
|
raise ExtractorError(f'Patreon said: {err_message}', expected=True)
|
||||||
|
|
|
@ -3,7 +3,7 @@ import re
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
float_or_none,
|
float_or_none,
|
||||||
|
@ -83,8 +83,8 @@ class PelotonIE(InfoExtractor):
|
||||||
}).encode(),
|
}).encode(),
|
||||||
headers={'Content-Type': 'application/json', 'User-Agent': 'web'})
|
headers={'Content-Type': 'application/json', 'User-Agent': 'web'})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
json_string = self._webpage_read_content(e.cause, None, video_id)
|
json_string = self._webpage_read_content(e.cause.response, None, video_id)
|
||||||
res = self._parse_json(json_string, video_id)
|
res = self._parse_json(json_string, video_id)
|
||||||
raise ExtractorError(res['message'], expected=res['message'] == 'Login failed')
|
raise ExtractorError(res['message'], expected=res['message'] == 'Login failed')
|
||||||
else:
|
else:
|
||||||
|
@ -96,8 +96,8 @@ class PelotonIE(InfoExtractor):
|
||||||
'https://api.onepeloton.com/api/subscription/stream', video_id, note='Downloading token',
|
'https://api.onepeloton.com/api/subscription/stream', video_id, note='Downloading token',
|
||||||
data=json.dumps({}).encode(), headers={'Content-Type': 'application/json'})
|
data=json.dumps({}).encode(), headers={'Content-Type': 'application/json'})
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
json_string = self._webpage_read_content(e.cause, None, video_id)
|
json_string = self._webpage_read_content(e.cause.response, None, video_id)
|
||||||
res = self._parse_json(json_string, video_id)
|
res = self._parse_json(json_string, video_id)
|
||||||
raise ExtractorError(res['message'], expected=res['message'] == 'Stream limit reached')
|
raise ExtractorError(res['message'], expected=res['message'] == 'Stream limit reached')
|
||||||
else:
|
else:
|
||||||
|
@ -109,7 +109,7 @@ class PelotonIE(InfoExtractor):
|
||||||
try:
|
try:
|
||||||
self._start_session(video_id)
|
self._start_session(video_id)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
self._login(video_id)
|
self._login(video_id)
|
||||||
self._start_session(video_id)
|
self._start_session(video_id)
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -69,7 +69,7 @@ class PiaproIE(InfoExtractor):
|
||||||
if urlh is False:
|
if urlh is False:
|
||||||
login_ok = False
|
login_ok = False
|
||||||
else:
|
else:
|
||||||
parts = compat_urlparse.urlparse(urlh.geturl())
|
parts = compat_urlparse.urlparse(urlh.url)
|
||||||
if parts.path != '/':
|
if parts.path != '/':
|
||||||
login_ok = False
|
login_ok = False
|
||||||
if not login_ok:
|
if not login_ok:
|
||||||
|
|
|
@ -78,7 +78,7 @@ class PladformIE(InfoExtractor):
|
||||||
expected=True)
|
expected=True)
|
||||||
|
|
||||||
if not video:
|
if not video:
|
||||||
targetUrl = self._request_webpage(url, video_id, note='Resolving final URL').geturl()
|
targetUrl = self._request_webpage(url, video_id, note='Resolving final URL').url
|
||||||
if targetUrl == url:
|
if targetUrl == url:
|
||||||
raise ExtractorError('Can\'t parse page')
|
raise ExtractorError('Can\'t parse page')
|
||||||
return self.url_result(targetUrl)
|
return self.url_result(targetUrl)
|
||||||
|
|
|
@ -36,7 +36,7 @@ class PlatziBaseIE(InfoExtractor):
|
||||||
headers={'Referer': self._LOGIN_URL})
|
headers={'Referer': self._LOGIN_URL})
|
||||||
|
|
||||||
# login succeeded
|
# login succeeded
|
||||||
if 'platzi.com/login' not in urlh.geturl():
|
if 'platzi.com/login' not in urlh.url:
|
||||||
return
|
return
|
||||||
|
|
||||||
login_error = self._webpage_read_content(
|
login_error = self._webpage_read_content(
|
||||||
|
|
|
@ -1,13 +1,9 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking import PUTRequest
|
||||||
from ..utils import (
|
from ..networking.exceptions import HTTPError
|
||||||
clean_html,
|
from ..utils import ExtractorError, clean_html, int_or_none
|
||||||
ExtractorError,
|
|
||||||
int_or_none,
|
|
||||||
PUTRequest,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
class PlayPlusTVIE(InfoExtractor):
|
class PlayPlusTVIE(InfoExtractor):
|
||||||
|
@ -47,9 +43,9 @@ class PlayPlusTVIE(InfoExtractor):
|
||||||
try:
|
try:
|
||||||
self._token = self._download_json(req, None)['token']
|
self._token = self._download_json(req, None)['token']
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 401:
|
||||||
raise ExtractorError(self._parse_json(
|
raise ExtractorError(self._parse_json(
|
||||||
e.cause.read(), None)['errorMessage'], expected=True)
|
e.cause.response.read(), None)['errorMessage'], expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
self._profile = self._call_api('Profiles')['list'][0]['_id']
|
self._profile = self._call_api('Profiles')['list'][0]['_id']
|
||||||
|
|
|
@ -3,11 +3,12 @@ import itertools
|
||||||
import math
|
import math
|
||||||
import operator
|
import operator
|
||||||
import re
|
import re
|
||||||
import urllib.request
|
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from .openload import PhantomJSwrapper
|
from .openload import PhantomJSwrapper
|
||||||
from ..compat import compat_HTTPError, compat_str
|
from ..compat import compat_str
|
||||||
|
from ..networking import Request
|
||||||
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
NO_DEFAULT,
|
NO_DEFAULT,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -46,8 +47,8 @@ class PornHubBaseIE(InfoExtractor):
|
||||||
r'document\.cookie\s*=\s*["\']RNKEY=',
|
r'document\.cookie\s*=\s*["\']RNKEY=',
|
||||||
r'document\.location\.reload\(true\)')):
|
r'document\.location\.reload\(true\)')):
|
||||||
url_or_request = args[0]
|
url_or_request = args[0]
|
||||||
url = (url_or_request.get_full_url()
|
url = (url_or_request.url
|
||||||
if isinstance(url_or_request, urllib.request.Request)
|
if isinstance(url_or_request, Request)
|
||||||
else url_or_request)
|
else url_or_request)
|
||||||
phantom = PhantomJSwrapper(self, required_version='2.0')
|
phantom = PhantomJSwrapper(self, required_version='2.0')
|
||||||
phantom.get(url, html=webpage)
|
phantom.get(url, html=webpage)
|
||||||
|
@ -602,7 +603,7 @@ class PornHubPagedPlaylistBaseIE(PornHubPlaylistBaseIE):
|
||||||
base_url, item_id, note, query={'page': num})
|
base_url, item_id, note, query={'page': num})
|
||||||
|
|
||||||
def is_404(e):
|
def is_404(e):
|
||||||
return isinstance(e.cause, compat_HTTPError) and e.cause.code == 404
|
return isinstance(e.cause, HTTPError) and e.cause.status == 404
|
||||||
|
|
||||||
base_url = url
|
base_url = url
|
||||||
has_page = page is not None
|
has_page = page is not None
|
||||||
|
|
|
@ -1,8 +1,6 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import (
|
from ..compat import compat_str
|
||||||
compat_HTTPError,
|
from ..networking.exceptions import HTTPError
|
||||||
compat_str,
|
|
||||||
)
|
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
int_or_none,
|
int_or_none,
|
||||||
|
@ -72,7 +70,7 @@ class PuhuTVIE(InfoExtractor):
|
||||||
display_id, 'Downloading video JSON',
|
display_id, 'Downloading video JSON',
|
||||||
headers=self.geo_verification_headers())
|
headers=self.geo_verification_headers())
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403:
|
if isinstance(e.cause, HTTPError) and e.cause.status == 403:
|
||||||
self.raise_geo_restricted()
|
self.raise_geo_restricted()
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ class RadikoBaseIE(InfoExtractor):
|
||||||
'x-radiko-device': 'pc',
|
'x-radiko-device': 'pc',
|
||||||
'x-radiko-user': 'dummy_user',
|
'x-radiko-user': 'dummy_user',
|
||||||
})
|
})
|
||||||
auth1_header = auth1_handle.info()
|
auth1_header = auth1_handle.headers
|
||||||
|
|
||||||
auth_token = auth1_header['X-Radiko-AuthToken']
|
auth_token = auth1_header['X-Radiko-AuthToken']
|
||||||
kl = int(auth1_header['X-Radiko-KeyLength'])
|
kl = int(auth1_header['X-Radiko-KeyLength'])
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
from ..compat import compat_HTTPError
|
from ..networking.exceptions import HTTPError
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
determine_ext,
|
determine_ext,
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
|
@ -74,8 +74,8 @@ class RadioCanadaIE(InfoExtractor):
|
||||||
return self._download_json(
|
return self._download_json(
|
||||||
'https://services.radio-canada.ca/media/' + path, video_id, query=query)
|
'https://services.radio-canada.ca/media/' + path, video_id, query=query)
|
||||||
except ExtractorError as e:
|
except ExtractorError as e:
|
||||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code in (401, 422):
|
if isinstance(e.cause, HTTPError) and e.cause.status in (401, 422):
|
||||||
data = self._parse_json(e.cause.read().decode(), None)
|
data = self._parse_json(e.cause.response.read().decode(), None)
|
||||||
error = data.get('error_description') or data['errorMessage']['text']
|
error = data.get('error_description') or data['errorMessage']['text']
|
||||||
raise ExtractorError(error, expected=True)
|
raise ExtractorError(error, expected=True)
|
||||||
raise
|
raise
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
import re
|
import re
|
||||||
|
|
||||||
from .common import InfoExtractor
|
from .common import InfoExtractor
|
||||||
|
from ..networking import HEADRequest
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
ExtractorError,
|
ExtractorError,
|
||||||
HEADRequest,
|
|
||||||
base_url,
|
base_url,
|
||||||
clean_html,
|
clean_html,
|
||||||
extract_attributes,
|
extract_attributes,
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue