mirror of https://github.com/yt-dlp/yt-dlp.git
Merge branch 'yt-dlp:master' into generic
This commit is contained in:
commit
a111b3ab58
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting a broken site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -62,7 +62,7 @@ body:
|
|||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2022.11.11 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version 2023.01.06 [9d339c4] (win32_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
|
@ -70,8 +70,8 @@ body:
|
|||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2022.11.11, Current version: 2022.11.11
|
||||
yt-dlp is up to date (2022.11.11)
|
||||
Latest version: 2023.01.06, Current version: 2023.01.06
|
||||
yt-dlp is up to date (2023.01.06)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting a new site support request
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -74,7 +74,7 @@ body:
|
|||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2022.11.11 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version 2023.01.06 [9d339c4] (win32_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
|
@ -82,8 +82,8 @@ body:
|
|||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2022.11.11, Current version: 2022.11.11
|
||||
yt-dlp is up to date (2022.11.11)
|
||||
Latest version: 2023.01.06, Current version: 2023.01.06
|
||||
yt-dlp is up to date (2023.01.06)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm requesting a site-specific feature
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -70,7 +70,7 @@ body:
|
|||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2022.11.11 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version 2023.01.06 [9d339c4] (win32_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
|
@ -78,8 +78,8 @@ body:
|
|||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2022.11.11, Current version: 2022.11.11
|
||||
yt-dlp is up to date (2022.11.11)
|
||||
Latest version: 2023.01.06, Current version: 2023.01.06
|
||||
yt-dlp is up to date (2023.01.06)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
|
@ -18,7 +18,7 @@ body:
|
|||
options:
|
||||
- label: I'm reporting a bug unrelated to a specific site
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
required: true
|
||||
- label: I've checked that all provided URLs are playable in a browser with the same IP and same login details
|
||||
required: true
|
||||
|
@ -55,7 +55,7 @@ body:
|
|||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2022.11.11 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version 2023.01.06 [9d339c4] (win32_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
|
@ -63,8 +63,8 @@ body:
|
|||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2022.11.11, Current version: 2022.11.11
|
||||
yt-dlp is up to date (2022.11.11)
|
||||
Latest version: 2023.01.06, Current version: 2023.01.06
|
||||
yt-dlp is up to date (2023.01.06)
|
||||
<more lines>
|
||||
render: shell
|
||||
validations:
|
||||
|
|
|
@ -20,7 +20,7 @@ body:
|
|||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
required: true
|
||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar issues **including closed ones**. DO NOT post duplicates
|
||||
required: true
|
||||
|
@ -51,7 +51,7 @@ body:
|
|||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2022.11.11 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version 2023.01.06 [9d339c4] (win32_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
|
@ -59,7 +59,7 @@ body:
|
|||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2022.11.11, Current version: 2022.11.11
|
||||
yt-dlp is up to date (2022.11.11)
|
||||
Latest version: 2023.01.06, Current version: 2023.01.06
|
||||
yt-dlp is up to date (2023.01.06)
|
||||
<more lines>
|
||||
render: shell
|
||||
|
|
|
@ -26,7 +26,7 @@ body:
|
|||
required: true
|
||||
- label: I've looked through the [README](https://github.com/yt-dlp/yt-dlp#readme)
|
||||
required: true
|
||||
- label: I've verified that I'm running yt-dlp version **2022.11.11** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
- label: I've verified that I'm running yt-dlp version **2023.01.06** ([update instructions](https://github.com/yt-dlp/yt-dlp#update)) or later (specify commit)
|
||||
required: true
|
||||
- label: I've searched the [bugtracker](https://github.com/yt-dlp/yt-dlp/issues?q=) for similar questions **including closed ones**. DO NOT post duplicates
|
||||
required: true
|
||||
|
@ -57,7 +57,7 @@ body:
|
|||
[debug] Command-line config: ['-vU', 'test:youtube']
|
||||
[debug] Portable config "yt-dlp.conf": ['-i']
|
||||
[debug] Encodings: locale cp65001, fs utf-8, pref cp65001, out utf-8, error utf-8, screen utf-8
|
||||
[debug] yt-dlp version 2022.11.11 [9d339c4] (win32_exe)
|
||||
[debug] yt-dlp version 2023.01.06 [9d339c4] (win32_exe)
|
||||
[debug] Python 3.8.10 (CPython 64bit) - Windows-10-10.0.22000-SP0
|
||||
[debug] Checking exe version: ffmpeg -bsfs
|
||||
[debug] Checking exe version: ffprobe -bsfs
|
||||
|
@ -65,7 +65,7 @@ body:
|
|||
[debug] Optional libraries: Cryptodome-3.15.0, brotli-1.0.9, certifi-2022.06.15, mutagen-1.45.1, sqlite3-2.6.0, websockets-10.3
|
||||
[debug] Proxy map: {}
|
||||
[debug] Fetching release info: https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest
|
||||
Latest version: 2022.11.11, Current version: 2022.11.11
|
||||
yt-dlp is up to date (2022.11.11)
|
||||
Latest version: 2023.01.06, Current version: 2023.01.06
|
||||
yt-dlp is up to date (2023.01.06)
|
||||
<more lines>
|
||||
render: shell
|
||||
|
|
|
@ -2,8 +2,6 @@
|
|||
|
||||
### Description of your *pull request* and other information
|
||||
|
||||
</details>
|
||||
|
||||
<!--
|
||||
|
||||
Explanation of your *pull request* in arbitrary form goes here. Please **make sure the description explains the purpose and effect** of your *pull request* and is worded well enough to be understood. Provide as much **context and examples** as possible
|
||||
|
@ -41,3 +39,5 @@ Fixes #
|
|||
- [ ] New extractor ([Piracy websites will not be accepted](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#is-the-website-primarily-used-for-piracy))
|
||||
- [ ] Core bug fix/improvement
|
||||
- [ ] New feature (It is strongly [recommended to open an issue first](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#adding-new-feature-or-making-overarching-changes))
|
||||
|
||||
</details>
|
||||
|
|
|
@ -12,13 +12,13 @@ jobs:
|
|||
fail-fast: false
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
# CPython 3.9 is in quick-test
|
||||
python-version: ['3.7', '3.10', 3.11-dev, pypy-3.7, pypy-3.8]
|
||||
# CPython 3.11 is in quick-test
|
||||
python-version: ['3.8', '3.9', '3.10', pypy-3.7, pypy-3.8]
|
||||
run-tests-ext: [sh]
|
||||
include:
|
||||
# atleast one of each CPython/PyPy tests must be in windows
|
||||
- os: windows-latest
|
||||
python-version: '3.8'
|
||||
python-version: '3.7'
|
||||
run-tests-ext: bat
|
||||
- os: windows-latest
|
||||
python-version: pypy-3.9
|
||||
|
@ -33,5 +33,6 @@ jobs:
|
|||
run: pip install pytest
|
||||
- name: Run tests
|
||||
continue-on-error: False
|
||||
run: ./devscripts/run_tests.${{ matrix.run-tests-ext }} core
|
||||
# Linter is in quick-test
|
||||
run: |
|
||||
python3 -m yt_dlp -v || true # Print debug head
|
||||
./devscripts/run_tests.${{ matrix.run-tests-ext }} core
|
||||
|
|
|
@ -10,24 +10,23 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python
|
||||
- name: Set up Python 3.11
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
python-version: '3.11'
|
||||
- name: Install test requirements
|
||||
run: pip install pytest pycryptodomex
|
||||
- name: Run tests
|
||||
run: ./devscripts/run_tests.sh core
|
||||
run: |
|
||||
python3 -m yt_dlp -v || true
|
||||
./devscripts/run_tests.sh core
|
||||
flake8:
|
||||
name: Linter
|
||||
if: "!contains(github.event.head_commit.message, 'ci skip all')"
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: 3.9
|
||||
- uses: actions/setup-python@v4
|
||||
- name: Install flake8
|
||||
run: pip install flake8
|
||||
- name: Make lazy extractors
|
||||
|
|
|
@ -30,6 +30,7 @@ cookies
|
|||
*.f4v
|
||||
*.flac
|
||||
*.flv
|
||||
*.gif
|
||||
*.jpeg
|
||||
*.jpg
|
||||
*.m4a
|
||||
|
@ -71,6 +72,7 @@ dist/
|
|||
zip/
|
||||
tmp/
|
||||
venv/
|
||||
.venv/
|
||||
completions/
|
||||
|
||||
# Misc
|
||||
|
@ -119,9 +121,5 @@ yt-dlp.zip
|
|||
*/extractor/lazy_extractors.py
|
||||
|
||||
# Plugins
|
||||
ytdlp_plugins/extractor/*
|
||||
!ytdlp_plugins/extractor/__init__.py
|
||||
!ytdlp_plugins/extractor/sample.py
|
||||
ytdlp_plugins/postprocessor/*
|
||||
!ytdlp_plugins/postprocessor/__init__.py
|
||||
!ytdlp_plugins/postprocessor/sample.py
|
||||
ytdlp_plugins/
|
||||
yt-dlp-plugins
|
||||
|
|
|
@ -351,8 +351,9 @@ Say you extracted a list of thumbnails into `thumbnail_data` and want to iterate
|
|||
```python
|
||||
thumbnail_data = data.get('thumbnails') or []
|
||||
thumbnails = [{
|
||||
'url': item['url']
|
||||
} for item in thumbnail_data] # correct
|
||||
'url': item['url'],
|
||||
'height': item.get('h'),
|
||||
} for item in thumbnail_data if item.get('url')] # correct
|
||||
```
|
||||
|
||||
and not like:
|
||||
|
@ -360,12 +361,27 @@ and not like:
|
|||
```python
|
||||
thumbnail_data = data.get('thumbnails')
|
||||
thumbnails = [{
|
||||
'url': item['url']
|
||||
'url': item['url'],
|
||||
'height': item.get('h'),
|
||||
} for item in thumbnail_data] # incorrect
|
||||
```
|
||||
|
||||
In this case, `thumbnail_data` will be `None` if the field was not found and this will cause the loop `for item in thumbnail_data` to raise a fatal error. Using `or []` avoids this error and results in setting an empty list in `thumbnails` instead.
|
||||
|
||||
Alternately, this can be further simplified by using `traverse_obj`
|
||||
|
||||
```python
|
||||
thumbnails = [{
|
||||
'url': item['url'],
|
||||
'height': item.get('h'),
|
||||
} for item in traverse_obj(data, ('thumbnails', lambda _, v: v['url']))]
|
||||
```
|
||||
|
||||
or, even better,
|
||||
|
||||
```python
|
||||
thumbnails = traverse_obj(data, ('thumbnails', ..., {'url': 'url', 'height': 'h'}))
|
||||
```
|
||||
|
||||
### Provide fallbacks
|
||||
|
||||
|
|
26
CONTRIBUTORS
26
CONTRIBUTORS
|
@ -3,6 +3,7 @@ shirt-dev (collaborator)
|
|||
coletdjnz/colethedj (collaborator)
|
||||
Ashish0804 (collaborator)
|
||||
nao20010128nao/Lesmiscore (collaborator)
|
||||
bashonly (collaborator)
|
||||
h-h-h-h
|
||||
pauldubois98
|
||||
nixxo
|
||||
|
@ -295,7 +296,6 @@ Mehavoid
|
|||
winterbird-code
|
||||
yashkc2025
|
||||
aldoridhoni
|
||||
bashonly
|
||||
jacobtruman
|
||||
masta79
|
||||
palewire
|
||||
|
@ -357,3 +357,27 @@ SG5
|
|||
the-marenga
|
||||
tkgmomosheep
|
||||
vitkhab
|
||||
glensc
|
||||
synthpop123
|
||||
tntmod54321
|
||||
milkknife
|
||||
Bnyro
|
||||
CapacitorSet
|
||||
stelcodes
|
||||
skbeh
|
||||
muddi900
|
||||
digitall
|
||||
chengzhicn
|
||||
mexus
|
||||
JChris246
|
||||
redraskal
|
||||
Spicadox
|
||||
barsnick
|
||||
docbender
|
||||
KurtBestor
|
||||
Chrissi2812
|
||||
FrederikNS
|
||||
gschizas
|
||||
JC-Chung
|
||||
mzhou
|
||||
OndrejBakan
|
||||
|
|
151
Changelog.md
151
Changelog.md
|
@ -11,6 +11,157 @@
|
|||
-->
|
||||
|
||||
|
||||
### 2023.01.06
|
||||
|
||||
* Fix config locations by [Grub4k](https://github.com/Grub4k), [coletdjnz](https://github.com/coletdjnz), [pukkandan](https://github.com/pukkandan)
|
||||
* [downloader/aria2c] Disable native progress
|
||||
* [utils] `mimetype2ext`: `weba` is not standard
|
||||
* [utils] `windows_enable_vt_mode`: Better error handling
|
||||
* [build] Add minimal `pyproject.toml`
|
||||
* [update] Fix updater file removal on windows by [Grub4K](https://github.com/Grub4K)
|
||||
* [cleanup] Misc fixes and cleanup
|
||||
* [extractor/aitube] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/drtv] Add series extractors by [FrederikNS](https://github.com/FrederikNS)
|
||||
* [extractor/volejtv] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/xanimu] Add extractor by [JChris246](https://github.com/JChris246)
|
||||
* [extractor/youtube] Retry manifest refresh for live-from-start by [mzhou](https://github.com/mzhou)
|
||||
* [extractor/biliintl] Add `/media` to `VALID_URL` by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/biliIntl] Add fallback to `video_data` by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/crunchyroll:show] Add `language` to entries by [Chrissi2812](https://github.com/Chrissi2812)
|
||||
* [extractor/joj] Fix extractor by [OndrejBakan](https://github.com/OndrejBakan), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/nbc] Update graphql query by [jacobtruman](https://github.com/jacobtruman)
|
||||
* [extractor/reddit] Add subreddit as `channel_id` by [gschizas](https://github.com/gschizas)
|
||||
* [extractor/tiktok] Add `TikTokLive` extractor by [JC-Chung](https://github.com/JC-Chung)
|
||||
|
||||
### 2023.01.02
|
||||
|
||||
* **Improve plugin architecture** by [Grub4K](https://github.com/Grub4K), [coletdjnz](https://github.com/coletdjnz), [flashdagger](https://github.com/flashdagger), [pukkandan](https://github.com/pukkandan)
|
||||
* Plugins can be loaded in any distribution of yt-dlp (binary, pip, source, etc.) and can be distributed and installed as packages. See [the readme](https://github.com/yt-dlp/yt-dlp/tree/05997b6e98e638d97d409c65bb5eb86da68f3b64#plugins) for more information
|
||||
* Add `--compat-options 2021,2022`
|
||||
* This allows devs to change defaults and make other potentially breaking changes more easily. If you need everything to work exactly as-is, put Use `--compat 2022` in your config to guard against future compat changes.
|
||||
* [downloader/aria2c] Native progress for aria2c via RPC by [Lesmiscore](https://github.com/Lesmiscore), [pukkandan](https://github.com/pukkandan)
|
||||
* Merge youtube-dl: Upto [commit/195f22f](https://github.com/ytdl-org/youtube-dl/commit/195f22f6) by [Grub4k](https://github.com/Grub4k), [pukkandan](https://github.com/pukkandan)
|
||||
* Add pre-processor stage `video`
|
||||
* Let `--parse/replace-in-metadata` run at any post-processing stage
|
||||
* Add `--enable-file-urls` by [coletdjnz](https://github.com/coletdjnz)
|
||||
* Add new field `aspect_ratio`
|
||||
* Add `ac4` to known codecs
|
||||
* Add `weba` to known extensions
|
||||
* [FFmpegVideoConvertor] Add `gif` to `--recode-video`
|
||||
* Add message when there are no subtitles/thumbnails
|
||||
* Deprioritize HEVC-over-FLV formats by [Lesmiscore](https://github.com/Lesmiscore)
|
||||
* Make early reject of `--match-filter` stricter
|
||||
* Fix `--cookies-from-browser` CLI parsing
|
||||
* Fix `original_url` in playlists
|
||||
* Fix bug in writing playlist info-json
|
||||
* Fix bugs in `PlaylistEntries`
|
||||
* [downloader/ffmpeg] Fix headers for video+audio formats by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||
* [extractor] Add a way to distinguish IEs that returns only videos
|
||||
* [extractor] Implement universal format sorting and deprecate `_sort_formats`
|
||||
* [extractor] Let `_extract_format` functions obey `--ignore-no-formats`
|
||||
* [extractor/generic] Add `fragment_query` extractor arg for DASH and HLS by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/generic] Decode unicode-escaped embed URLs by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/generic] Don't report redirect to https
|
||||
* [extractor/generic] Fix JSON LD manifest extraction by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/generic] Use `Accept-Encoding: identity` for initial request by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [FormatSort] Add `mov` to `vext`
|
||||
* [jsinterp] Escape regex that looks like nested set
|
||||
* [webvtt] Handle premature EOF by [flashdagger](https://github.com/flashdagger)
|
||||
* [utils] `classproperty`: Add cache support
|
||||
* [utils] `get_exe_version`: Detect broken executables by [dirkf](https://github.com/dirkf), [pukkandan](https://github.com/pukkandan)
|
||||
* [utils] `js_to_json`: Fix bug in [f55523c](https://github.com/yt-dlp/yt-dlp/commit/f55523c) by [ChillingPepper](https://github.com/ChillingPepper), [pukkandan](https://github.com/pukkandan)
|
||||
* [utils] Make `ExtractorError` mutable
|
||||
* [utils] Move `FileDownloader.parse_bytes` into utils
|
||||
* [utils] Move format sorting code into `utils`
|
||||
* [utils] `windows_enable_vt_mode`: Proper implementation by [Grub4K](https://github.com/Grub4K)
|
||||
* [update] Workaround [#5632](https://github.com/yt-dlp/yt-dlp/issues/5632)
|
||||
* [docs] Improvements
|
||||
* [cleanup] Misc fixes and cleanup
|
||||
* [cleanup] Use `random.choices` by [freezboltz](https://github.com/freezboltz)
|
||||
* [extractor/airtv] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/amazonminitv] Add extractors by [GautamMKGarg](https://github.com/GautamMKGarg), [nyuszika7h](https://github.com/nyuszika7h)
|
||||
* [extractor/beatbump] Add extractors by [Bobscorn](https://github.com/Bobscorn), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/europarl] Add EuroParlWebstream extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/kanal2] Add extractor by [bashonly](https://github.com/bashonly), [glensc](https://github.com/glensc), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/kankanews] Add extractor by [synthpop123](https://github.com/synthpop123)
|
||||
* [extractor/kick] Add extractor by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/mediastream] Add extractor by [HobbyistDev](https://github.com/HobbyistDev), [elyse0](https://github.com/elyse0)
|
||||
* [extractor/noice] Add NoicePodcast extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/oneplace] Add OnePlacePodcast extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/rumble] Add RumbleIE extractor by [flashdagger](https://github.com/flashdagger)
|
||||
* [extractor/screencastify] Add extractor by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/trtcocuk] Add extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/Veoh] Add user extractor by [tntmod54321](https://github.com/tntmod54321)
|
||||
* [extractor/videoken] Add extractors by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/webcamerapl] Add extractor by [milkknife](https://github.com/milkknife)
|
||||
* [extractor/amazon] Add `AmazonReviews` extractor by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/netverse] Add `NetverseSearch` extractor by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/vimeo] Add `VimeoProIE` by [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/xiami] Remove extractors by [synthpop123](https://github.com/synthpop123)
|
||||
* [extractor/youtube] Add `piped.video` by [Bnyro](https://github.com/Bnyro)
|
||||
* [extractor/youtube] Consider language in format de-duplication
|
||||
* [extractor/youtube] Extract DRC formats
|
||||
* [extractor/youtube] Fix `ytuser:`
|
||||
* [extractor/youtube] Fix bug in handling of music URLs
|
||||
* [extractor/youtube] Subtitles cannot be translated to `und`
|
||||
* [extractor/youtube:tab] Extract metadata from channel items by [coletdjnz](https://github.com/coletdjnz)
|
||||
* [extractor/ARD] Add vtt subtitles by [CapacitorSet](https://github.com/CapacitorSet)
|
||||
* [extractor/ArteTV] Extract chapters by [bashonly](https://github.com/bashonly), [iw0nderhow](https://github.com/iw0nderhow)
|
||||
* [extractor/bandcamp] Add `album_artist` by [stelcodes](https://github.com/stelcodes)
|
||||
* [extractor/bilibili] Fix `--no-playlist` for anthology
|
||||
* [extractor/bilibili] Improve `_VALID_URL` by [skbeh](https://github.com/skbeh)
|
||||
* [extractor/biliintl:series] Make partial download of series faster
|
||||
* [extractor/BiliLive] Fix extractor
|
||||
* [extractor/brightcove] Add `BrightcoveNewBaseIE` and fix embed extraction
|
||||
* [extractor/cda] Support premium and misc improvements by [selfisekai](https://github.com/selfisekai)
|
||||
* [extractor/ciscowebex] Support password-protected videos by [damianoamatruda](https://github.com/damianoamatruda)
|
||||
* [extractor/curiositystream] Fix auth by [mnn](https://github.com/mnn)
|
||||
* [extractor/embedly] Handle vimeo embeds
|
||||
* [extractor/fifa] Fix Preplay extraction by [dirkf](https://github.com/dirkf)
|
||||
* [extractor/foxsports] Fix extractor by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/gronkh] Fix `_VALID_URL` by [muddi900](https://github.com/muddi900)
|
||||
* [extractor/hotstar] Improve format metadata
|
||||
* [extractor/iqiyi] Fix `Iq` JS regex by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/la7] Improve extractor by [nixxo](https://github.com/nixxo)
|
||||
* [extractor/mediaset] Better embed detection and error messages by [nixxo](https://github.com/nixxo)
|
||||
* [extractor/mixch] Support `--wait-for-video`
|
||||
* [extractor/naver] Improve `_VALID_URL` for `NaverNowIE` by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/naver] Treat fan subtitles as separate language
|
||||
* [extractor/netverse] Extract comments by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/nosnl] Add support for /video by [HobbyistDev](https://github.com/HobbyistDev)
|
||||
* [extractor/odnoklassniki] Extract subtitles by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/pinterest] Fix extractor by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/plutotv] Fix videos with non-zero start by [digitall](https://github.com/digitall)
|
||||
* [extractor/polskieradio] Adapt to next.js redesigns by [selfisekai](https://github.com/selfisekai)
|
||||
* [extractor/reddit] Add vcodec to fallback format by [chengzhicn](https://github.com/chengzhicn)
|
||||
* [extractor/reddit] Extract crossposted media by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/reddit] Extract video embeds in text posts by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/rutube] Support private videos by [mexus](https://github.com/mexus)
|
||||
* [extractor/sibnet] Separate from VKIE
|
||||
* [extractor/slideslive] Fix extractor by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||
* [extractor/slideslive] Support embeds and slides by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/soundcloud] Support user permalink by [nosoop](https://github.com/nosoop)
|
||||
* [extractor/spankbang] Fix extractor by [JChris246](https://github.com/JChris246)
|
||||
* [extractor/stv] Detect DRM
|
||||
* [extractor/swearnet] Fix description bug
|
||||
* [extractor/tencent] Fix geo-restricted video by [elyse0](https://github.com/elyse0)
|
||||
* [extractor/tiktok] Fix subs, `DouyinIE`, improve `_VALID_URL` by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/tiktok] Update `_VALID_URL`, add `api_hostname` arg by [bashonly](https://github.com/bashonly)
|
||||
* [extractor/tiktok] Update API hostname by [redraskal](https://github.com/redraskal)
|
||||
* [extractor/twitcasting] Fix videos with password by [Spicadox](https://github.com/Spicadox), [bashonly](https://github.com/bashonly)
|
||||
* [extractor/twitter] Heed `--no-playlist` for multi-video tweets by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||
* [extractor/twitter] Refresh guest token when expired by [Grub4K](https://github.com/Grub4K), [bashonly](https://github.com/bashonly)
|
||||
* [extractor/twitter:spaces] Add `Referer` to m3u8 by [nixxo](https://github.com/nixxo)
|
||||
* [extractor/udemy] Fix lectures that have no URL and detect DRM
|
||||
* [extractor/unsupported] Add more URLs
|
||||
* [extractor/urplay] Support for audio-only formats by [barsnick](https://github.com/barsnick)
|
||||
* [extractor/wistia] Improve extension detection by [Grub4k](https://github.com/Grub4k), [bashonly](https://github.com/bashonly), [pukkandan](https://github.com/pukkandan)
|
||||
* [extractor/yle_areena] Support restricted videos by [docbender](https://github.com/docbender)
|
||||
* [extractor/youku] Fix extractor by [KurtBestor](https://github.com/KurtBestor)
|
||||
* [extractor/youporn] Fix metadata by [marieell](https://github.com/marieell)
|
||||
* [extractor/redgifs] Fix bug in [8c188d5](https://github.com/yt-dlp/yt-dlp/commit/8c188d5d09177ed213a05c900d3523867c5897fd)
|
||||
|
||||
|
||||
### 2022.11.11
|
||||
|
||||
* Merge youtube-dl: Upto [commit/de39d12](https://github.com/ytdl-org/youtube-dl/commit/de39d128)
|
||||
|
|
|
@ -42,7 +42,7 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
|||
* Improved/fixed support for HiDive, HotStar, Hungama, LBRY, LinkedInLearning, Mxplayer, SonyLiv, TV2, Vimeo, VLive etc
|
||||
|
||||
|
||||
## [Lesmiscore](https://github.com/Lesmiscore) (nao20010128nao)
|
||||
## [Lesmiscore](https://github.com/Lesmiscore) <sub><sup>(nao20010128nao)</sup></sub>
|
||||
|
||||
**Bitcoin**: bc1qfd02r007cutfdjwjmyy9w23rjvtls6ncve7r3s
|
||||
**Monacoin**: mona1q3tf7dzvshrhfe3md379xtvt2n22duhglv5dskr
|
||||
|
@ -50,3 +50,10 @@ You can also find lists of all [contributors of yt-dlp](CONTRIBUTORS) and [autho
|
|||
* Download live from start to end for YouTube
|
||||
* Added support for new websites AbemaTV, mildom, PixivSketch, skeb, radiko, voicy, mirrativ, openrec, whowatch, damtomo, 17.live, mixch etc
|
||||
* Improved/fixed support for fc2, YahooJapanNews, tver, iwara etc
|
||||
|
||||
|
||||
## [bashonly](https://github.com/bashonly)
|
||||
|
||||
* `--cookies-from-browser` support for Firefox containers
|
||||
* Added support for new websites Genius, Kick, NBCStations, Triller, VideoKen etc
|
||||
* Improved/fixed support for Anvato, Brightcove, Instagram, ParamountPlus, Reddit, SlidesLive, TikTok, Twitter, Vimeo etc
|
||||
|
|
4
Makefile
4
Makefile
|
@ -17,8 +17,8 @@ pypi-files: AUTHORS Changelog.md LICENSE README.md README.txt supportedsites \
|
|||
clean-test:
|
||||
rm -rf test/testdata/sigs/player-*.js tmp/ *.annotations.xml *.aria2 *.description *.dump *.frag \
|
||||
*.frag.aria2 *.frag.urls *.info.json *.live_chat.json *.meta *.part* *.tmp *.temp *.unknown_video *.ytdl \
|
||||
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 *.mp4 \
|
||||
*.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||
*.3gp *.ape *.ass *.avi *.desktop *.f4v *.flac *.flv *.gif *.jpeg *.jpg *.m4a *.m4v *.mhtml *.mkv *.mov *.mp3 \
|
||||
*.mp4 *.mpga *.oga *.ogg *.opus *.png *.sbv *.srt *.swf *.swp *.tt *.ttml *.url *.vtt *.wav *.webloc *.webm *.webp
|
||||
clean-dist:
|
||||
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
|
||||
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
|
||||
|
|
213
README.md
213
README.md
|
@ -10,7 +10,7 @@
|
|||
[![Discord](https://img.shields.io/discord/807245652072857610?color=blue&labelColor=555555&label=&logo=discord&style=for-the-badge)](https://discord.gg/H5MNcFW63r "Discord")
|
||||
[![Supported Sites](https://img.shields.io/badge/-Supported_Sites-brightgreen.svg?style=for-the-badge)](supportedsites.md "Supported Sites")
|
||||
[![License: Unlicense](https://img.shields.io/badge/-Unlicense-blue.svg?style=for-the-badge)](LICENSE "License")
|
||||
[![CI Status](https://img.shields.io/github/workflow/status/yt-dlp/yt-dlp/Core%20Tests/master?label=Tests&style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/actions "CI Status")
|
||||
[![CI Status](https://img.shields.io/github/actions/workflow/status/yt-dlp/yt-dlp/core.yml?branch=master&label=Tests&style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/actions "CI Status")
|
||||
[![Commits](https://img.shields.io/github/commit-activity/m/yt-dlp/yt-dlp?label=commits&style=for-the-badge)](https://github.com/yt-dlp/yt-dlp/commits "Commit History")
|
||||
[![Last Commit](https://img.shields.io/github/last-commit/yt-dlp/yt-dlp/master?label=&style=for-the-badge&display_timestamp=committer)](https://github.com/yt-dlp/yt-dlp/commits "Commit History")
|
||||
|
||||
|
@ -61,6 +61,8 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
|||
* [Modifying metadata examples](#modifying-metadata-examples)
|
||||
* [EXTRACTOR ARGUMENTS](#extractor-arguments)
|
||||
* [PLUGINS](#plugins)
|
||||
* [Installing Plugins](#installing-plugins)
|
||||
* [Developing Plugins](#developing-plugins)
|
||||
* [EMBEDDING YT-DLP](#embedding-yt-dlp)
|
||||
* [Embedding examples](#embedding-examples)
|
||||
* [DEPRECATED OPTIONS](#deprecated-options)
|
||||
|
@ -74,13 +76,13 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
|
|||
|
||||
# NEW FEATURES
|
||||
|
||||
* Merged with **youtube-dl v2021.12.17+ [commit/de39d12](https://github.com/ytdl-org/youtube-dl/commit/de39d128)** <!--([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))--> and **youtube-dlc v2020.11.11-3+ [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl)
|
||||
* Merged with **youtube-dl v2021.12.17+ [commit/195f22f](https://github.com/ytdl-org/youtube-dl/commit/195f22f)** <!--([exceptions](https://github.com/yt-dlp/yt-dlp/issues/21))--> and **youtube-dlc v2020.11.11-3+ [commit/f9401f2](https://github.com/blackjack4494/yt-dlc/commit/f9401f2a91987068139c5f757b12fc711d4c0cee)**: You get all the features and patches of [youtube-dlc](https://github.com/blackjack4494/yt-dlc) in addition to the latest [youtube-dl](https://github.com/ytdl-org/youtube-dl)
|
||||
|
||||
* **[SponsorBlock Integration](#sponsorblock-options)**: You can mark/remove sponsor sections in YouTube videos by utilizing the [SponsorBlock](https://sponsor.ajay.app) API
|
||||
|
||||
* **[Format Sorting](#sorting-formats)**: The default format sorting options have been changed so that higher resolution and better codecs will be now preferred instead of simply using larger bitrate. Furthermore, you can now specify the sort order using `-S`. This allows for much easier format selection than what is possible by simply using `--format` ([examples](#format-selection-examples))
|
||||
|
||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that the NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||
* **Merged with animelover1984/youtube-dl**: You get most of the features and improvements from [animelover1984/youtube-dl](https://github.com/animelover1984/youtube-dl) including `--write-comments`, `BiliBiliSearch`, `BilibiliChannel`, Embedding thumbnail in mp4/ogg/opus, playlist infojson etc. Note that NicoNico livestreams are not available. See [#31](https://github.com/yt-dlp/yt-dlp/pull/31) for details.
|
||||
|
||||
* **YouTube improvements**:
|
||||
* Supports Clips, Stories (`ytstories:<channel UCID>`), Search (including filters)**\***, YouTube Music Search, Channel-specific search, Search prefixes (`ytsearch:`, `ytsearchdate:`)**\***, Mixes, YouTube Music Albums/Channels ([except self-uploaded music](https://github.com/yt-dlp/yt-dlp/issues/723)), and Feeds (`:ytfav`, `:ytwatchlater`, `:ytsubs`, `:ythistory`, `:ytrec`, `:ytnotif`)
|
||||
|
@ -151,12 +153,15 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
|||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
||||
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
||||
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
||||
* yt-dlp tries to parse the external downloader outputs into the standard progress output if possible (Currently implemented: [~~aria2c~~](https://github.com/yt-dlp/yt-dlp/issues/5931)). You can use `--compat-options no-external-downloader-progress` to get the downloader output as-is
|
||||
|
||||
For ease of use, a few more compat options are available:
|
||||
|
||||
* `--compat-options all`: Use all compat options (Do NOT use)
|
||||
* `--compat-options youtube-dl`: Same as `--compat-options all,-multistreams`
|
||||
* `--compat-options youtube-dlc`: Same as `--compat-options all,-no-live-chat,-no-youtube-channel-redirect`
|
||||
* `--compat-options 2021`: Same as `--compat-options 2022,no-certifi,filename-sanitization,no-youtube-prefer-utc-upload-date`
|
||||
* `--compat-options 2022`: Same as `--compat-options no-external-downloader-progress`. Use this to enable all future compat options
|
||||
|
||||
|
||||
# INSTALLATION
|
||||
|
@ -179,7 +184,7 @@ You can use `yt-dlp -U` to update if you are [using the release binaries](#relea
|
|||
|
||||
If you [installed with PIP](https://github.com/yt-dlp/yt-dlp/wiki/Installation#with-pip), simply re-run the same command that was used to install the program
|
||||
|
||||
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation) or refer their documentation
|
||||
For other third-party package managers, see [the wiki](https://github.com/yt-dlp/yt-dlp/wiki/Installation#third-party-package-managers) or refer their documentation
|
||||
|
||||
|
||||
<!-- MANPAGE: BEGIN EXCLUDED SECTION -->
|
||||
|
@ -217,7 +222,7 @@ File|Description
|
|||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||
|
||||
|
||||
Note: The manpages, shell completion files etc. are available in the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||
**Note**: The manpages, shell completion files etc. are available in the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||
|
||||
## DEPENDENCIES
|
||||
Python versions 3.7+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||
|
@ -233,8 +238,9 @@ While all the other dependencies are optional, `ffmpeg` and `ffprobe` are highly
|
|||
|
||||
* [**ffmpeg** and **ffprobe**](https://www.ffmpeg.org) - Required for [merging separate video and audio files](#format-selection) as well as for various [post-processing](#post-processing-options) tasks. License [depends on the build](https://www.ffmpeg.org/legal.html)
|
||||
|
||||
<!-- TODO: ffmpeg has merged this patch. Remove this note once there is new release -->
|
||||
**Note**: There are some regressions in newer ffmpeg versions that causes various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||
There are bugs in ffmpeg that causes various issues when used alongside yt-dlp. Since ffmpeg is such an important dependency, we provide [custom builds](https://github.com/yt-dlp/FFmpeg-Builds#ffmpeg-static-auto-builds) with patches for some of these issues at [yt-dlp/FFmpeg-Builds](https://github.com/yt-dlp/FFmpeg-Builds). See [the readme](https://github.com/yt-dlp/FFmpeg-Builds#patches-applied) for details on the specific issues solved by these builds
|
||||
|
||||
**Important**: What you need is ffmpeg *binary*, **NOT** [the python package of the same name](https://pypi.org/project/ffmpeg)
|
||||
|
||||
### Networking
|
||||
* [**certifi**](https://github.com/certifi/python-certifi)\* - Provides Mozilla's root certificate bundle. Licensed under [MPLv2](https://github.com/certifi/python-certifi/blob/master/LICENSE)
|
||||
|
@ -281,7 +287,7 @@ On some systems, you may need to use `py` or `python` instead of `python3`.
|
|||
|
||||
`pyinst.py` accepts any arguments that can be passed to `pyinstaller`, such as `--onefile/-F` or `--onedir/-D`, which is further [documented here](https://pyinstaller.org/en/stable/usage.html#what-to-generate).
|
||||
|
||||
Note that pyinstaller with versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
||||
**Note**: Pyinstaller versions below 4.4 [do not support](https://github.com/pyinstaller/pyinstaller#requirements-and-tested-platforms) Python installed from the Windows store without using a virtual environment.
|
||||
|
||||
**Important**: Running `pyinstaller` directly **without** using `pyinst.py` is **not** officially supported. This may or may not work correctly.
|
||||
|
||||
|
@ -414,6 +420,8 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
--source-address IP Client-side IP address to bind to
|
||||
-4, --force-ipv4 Make all connections via IPv4
|
||||
-6, --force-ipv6 Make all connections via IPv6
|
||||
--enable-file-urls Enable file:// URLs. This is disabled by
|
||||
default for security reasons.
|
||||
|
||||
## Geo-restriction:
|
||||
--geo-verification-proxy URL Use this proxy to verify the IP address for
|
||||
|
@ -432,23 +440,25 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
explicitly provided IP block in CIDR notation
|
||||
|
||||
## Video Selection:
|
||||
-I, --playlist-items ITEM_SPEC Comma separated playlist_index of the videos
|
||||
-I, --playlist-items ITEM_SPEC Comma separated playlist_index of the items
|
||||
to download. You can specify a range using
|
||||
"[START]:[STOP][:STEP]". For backward
|
||||
compatibility, START-STOP is also supported.
|
||||
Use negative indices to count from the right
|
||||
and negative STEP to download in reverse
|
||||
order. E.g. "-I 1:3,7,-5::2" used on a
|
||||
playlist of size 15 will download the videos
|
||||
playlist of size 15 will download the items
|
||||
at index 1,2,3,7,11,13,15
|
||||
--min-filesize SIZE Do not download any videos smaller than
|
||||
--min-filesize SIZE Abort download if filesize is smaller than
|
||||
SIZE, e.g. 50k or 44.6M
|
||||
--max-filesize SIZE Abort download if filesize is larger than
|
||||
SIZE, e.g. 50k or 44.6M
|
||||
--max-filesize SIZE Do not download any videos larger than SIZE,
|
||||
e.g. 50k or 44.6M
|
||||
--date DATE Download only videos uploaded on this date.
|
||||
The date can be "YYYYMMDD" or in the format
|
||||
[now|today|yesterday][-N[day|week|month|year]].
|
||||
E.g. --date today-2weeks
|
||||
E.g. "--date today-2weeks" downloads
|
||||
only videos uploaded on the same day two
|
||||
weeks ago
|
||||
--datebefore DATE Download only videos uploaded on or before
|
||||
this date. The date formats accepted is the
|
||||
same as --date
|
||||
|
@ -491,9 +501,9 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
a file that is in the archive
|
||||
--break-on-reject Stop the download process when encountering
|
||||
a file that has been filtered out
|
||||
--break-per-input --break-on-existing, --break-on-reject,
|
||||
--max-downloads, and autonumber resets per
|
||||
input URL
|
||||
--break-per-input Alters --max-downloads, --break-on-existing,
|
||||
--break-on-reject, and autonumber to reset
|
||||
per input URL
|
||||
--no-break-per-input --break-on-existing and similar options
|
||||
terminates the entire download queue
|
||||
--skip-playlist-after-errors N Number of allowed failures until the rest of
|
||||
|
@ -525,8 +535,8 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
linear=1::2 --retry-sleep fragment:exp=1:20
|
||||
--skip-unavailable-fragments Skip unavailable fragments for DASH,
|
||||
hlsnative and ISM downloads (default)
|
||||
(Alias: --no-abort-on-unavailable-fragment)
|
||||
--abort-on-unavailable-fragment
|
||||
(Alias: --no-abort-on-unavailable-fragments)
|
||||
--abort-on-unavailable-fragments
|
||||
Abort download if a fragment is unavailable
|
||||
(Alias: --no-skip-unavailable-fragments)
|
||||
--keep-fragments Keep downloaded fragments on disk after
|
||||
|
@ -725,7 +735,7 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
screen, optionally prefixed with when to
|
||||
print it, separated by a ":". Supported
|
||||
values of "WHEN" are the same as that of
|
||||
--use-postprocessor, and "video" (default).
|
||||
--use-postprocessor (default: video).
|
||||
Implies --quiet. Implies --simulate unless
|
||||
--no-simulate or later stages of WHEN are
|
||||
used. This option can be used multiple times
|
||||
|
@ -893,11 +903,11 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
specific bitrate like 128K (default 5)
|
||||
--remux-video FORMAT Remux the video into another container if
|
||||
necessary (currently supported: avi, flv,
|
||||
mkv, mov, mp4, webm, aac, aiff, alac, flac,
|
||||
m4a, mka, mp3, ogg, opus, vorbis, wav). If
|
||||
target container does not support the
|
||||
video/audio codec, remuxing will fail. You
|
||||
can specify multiple rules; e.g.
|
||||
gif, mkv, mov, mp4, webm, aac, aiff, alac,
|
||||
flac, m4a, mka, mp3, ogg, opus, vorbis,
|
||||
wav). If target container does not support
|
||||
the video/audio codec, remuxing will fail.
|
||||
You can specify multiple rules; e.g.
|
||||
"aac>m4a/mov>mp4/mkv" will remux aac to m4a,
|
||||
mov to mp4 and anything else to mkv
|
||||
--recode-video FORMAT Re-encode the video into another format if
|
||||
|
@ -952,13 +962,18 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
mkv/mka video files
|
||||
--no-embed-info-json Do not embed the infojson as an attachment
|
||||
to the video file
|
||||
--parse-metadata FROM:TO Parse additional metadata like title/artist
|
||||
--parse-metadata [WHEN:]FROM:TO
|
||||
Parse additional metadata like title/artist
|
||||
from other fields; see "MODIFYING METADATA"
|
||||
for details
|
||||
--replace-in-metadata FIELDS REGEX REPLACE
|
||||
for details. Supported values of "WHEN" are
|
||||
the same as that of --use-postprocessor
|
||||
(default: pre_process)
|
||||
--replace-in-metadata [WHEN:]FIELDS REGEX REPLACE
|
||||
Replace text in a metadata field using the
|
||||
given regex. This option can be used
|
||||
multiple times
|
||||
multiple times. Supported values of "WHEN"
|
||||
are the same as that of --use-postprocessor
|
||||
(default: pre_process)
|
||||
--xattrs Write metadata to the video file's xattrs
|
||||
(using dublin core and xdg standards)
|
||||
--concat-playlist POLICY Concatenate videos in a playlist. One of
|
||||
|
@ -979,18 +994,18 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
--ffmpeg-location PATH Location of the ffmpeg binary; either the
|
||||
path to the binary or its containing directory
|
||||
--exec [WHEN:]CMD Execute a command, optionally prefixed with
|
||||
when to execute it (after_move if
|
||||
unspecified), separated by a ":". Supported
|
||||
values of "WHEN" are the same as that of
|
||||
--use-postprocessor. Same syntax as the
|
||||
output template can be used to pass any
|
||||
field as arguments to the command. After
|
||||
download, an additional field "filepath"
|
||||
that contains the final path of the
|
||||
downloaded file is also available, and if no
|
||||
fields are passed, %(filepath)q is appended
|
||||
to the end of the command. This option can
|
||||
be used multiple times
|
||||
when to execute it, separated by a ":".
|
||||
Supported values of "WHEN" are the same as
|
||||
that of --use-postprocessor (default:
|
||||
after_move). Same syntax as the output
|
||||
template can be used to pass any field as
|
||||
arguments to the command. After download, an
|
||||
additional field "filepath" that contains
|
||||
the final path of the downloaded file is
|
||||
also available, and if no fields are passed,
|
||||
%(filepath,_filename|)q is appended to the
|
||||
end of the command. This option can be used
|
||||
multiple times
|
||||
--no-exec Remove any previously defined --exec
|
||||
--convert-subs FORMAT Convert the subtitles to another format
|
||||
(currently supported: ass, lrc, srt, vtt)
|
||||
|
@ -1028,14 +1043,16 @@ You can also fork the project on GitHub and run your fork's [build workflow](.gi
|
|||
postprocessor is invoked. It can be one of
|
||||
"pre_process" (after video extraction),
|
||||
"after_filter" (after video passes filter),
|
||||
"before_dl" (before each video download),
|
||||
"post_process" (after each video download;
|
||||
default), "after_move" (after moving video
|
||||
file to it's final locations), "after_video"
|
||||
(after downloading and processing all
|
||||
formats of a video), or "playlist" (at end
|
||||
of playlist). This option can be used
|
||||
multiple times to add different postprocessors
|
||||
"video" (after --format; before
|
||||
--print/--output), "before_dl" (before each
|
||||
video download), "post_process" (after each
|
||||
video download; default), "after_move"
|
||||
(after moving video file to it's final
|
||||
locations), "after_video" (after downloading
|
||||
and processing all formats of a video), or
|
||||
"playlist" (at end of playlist). This option
|
||||
can be used multiple times to add different
|
||||
postprocessors
|
||||
|
||||
## SponsorBlock Options:
|
||||
Make chapter entries for, or remove various segments (sponsor,
|
||||
|
@ -1046,10 +1063,10 @@ Make chapter entries for, or remove various segments (sponsor,
|
|||
for, separated by commas. Available
|
||||
categories are sponsor, intro, outro,
|
||||
selfpromo, preview, filler, interaction,
|
||||
music_offtopic, poi_highlight, chapter, all and
|
||||
default (=all). You can prefix the category
|
||||
with a "-" to exclude it. See [1] for
|
||||
description of the categories. E.g.
|
||||
music_offtopic, poi_highlight, chapter, all
|
||||
and default (=all). You can prefix the
|
||||
category with a "-" to exclude it. See [1]
|
||||
for description of the categories. E.g.
|
||||
--sponsorblock-mark all,-preview
|
||||
[1] https://wiki.sponsor.ajay.app/w/Segment_Categories
|
||||
--sponsorblock-remove CATS SponsorBlock categories to be removed from
|
||||
|
@ -1058,7 +1075,7 @@ Make chapter entries for, or remove various segments (sponsor,
|
|||
remove takes precedence. The syntax and
|
||||
available categories are the same as for
|
||||
--sponsorblock-mark except that "default"
|
||||
refers to "all,-filler" and poi_highlight and
|
||||
refers to "all,-filler" and poi_highlight,
|
||||
chapter are not available
|
||||
--sponsorblock-chapter-title TEMPLATE
|
||||
An output template for the title of the
|
||||
|
@ -1102,16 +1119,22 @@ You can configure yt-dlp by placing any supported command line option to a confi
|
|||
* `yt-dlp.conf` in the home path given by `-P`
|
||||
* If `-P` is not given, the current directory is searched
|
||||
1. **User Configuration**:
|
||||
* `${XDG_CONFIG_HOME}/yt-dlp/config` (recommended on Linux/macOS)
|
||||
* `${XDG_CONFIG_HOME}/yt-dlp.conf`
|
||||
* `${XDG_CONFIG_HOME}/yt-dlp/config` (recommended on Linux/macOS)
|
||||
* `${XDG_CONFIG_HOME}/yt-dlp/config.txt`
|
||||
* `${APPDATA}/yt-dlp.conf`
|
||||
* `${APPDATA}/yt-dlp/config` (recommended on Windows)
|
||||
* `${APPDATA}/yt-dlp/config.txt`
|
||||
* `~/yt-dlp.conf`
|
||||
* `~/yt-dlp.conf.txt`
|
||||
* `~/.yt-dlp/config`
|
||||
* `~/.yt-dlp/config.txt`
|
||||
|
||||
See also: [Notes about environment variables](#notes-about-environment-variables)
|
||||
1. **System Configuration**:
|
||||
* `/etc/yt-dlp.conf`
|
||||
* `/etc/yt-dlp/config`
|
||||
* `/etc/yt-dlp/config.txt`
|
||||
|
||||
E.g. with the following configuration file yt-dlp will always extract the audio, not copy the mtime, use a proxy and save all videos under `YouTube` directory in your home directory:
|
||||
```
|
||||
|
@ -1130,7 +1153,7 @@ E.g. with the following configuration file yt-dlp will always extract the audio,
|
|||
-o ~/YouTube/%(title)s.%(ext)s
|
||||
```
|
||||
|
||||
Note that options in configuration file are just the same options aka switches used in regular command line calls; thus there **must be no whitespace** after `-` or `--`, e.g. `-o` or `--proxy` but not `- o` or `-- proxy`. They must also be quoted when necessary as-if it were a UNIX shell.
|
||||
**Note**: Options in configuration file are just the same options aka switches used in regular command line calls; thus there **must be no whitespace** after `-` or `--`, e.g. `-o` or `--proxy` but not `- o` or `-- proxy`. They must also be quoted when necessary as-if it were a UNIX shell.
|
||||
|
||||
You can use `--ignore-config` if you want to disable all configuration files for a particular yt-dlp run. If `--ignore-config` is found inside any configuration file, no further configuration will be loaded. For example, having the option in the portable configuration file prevents loading of home, user, and system configurations. Additionally, (for backward compatibility) if `--ignore-config` is found inside the system configuration file, the user configuration is not loaded.
|
||||
|
||||
|
@ -1206,7 +1229,7 @@ Additionally, you can set different output templates for the various metadata fi
|
|||
|
||||
<a id="outtmpl-postprocess-note"></a>
|
||||
|
||||
Note: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
||||
**Note**: Due to post-processing (i.e. merging etc.), the actual output filename might differ. Use `--print after_move:filepath` to get the name after all post-processing is complete.
|
||||
|
||||
The available fields are:
|
||||
|
||||
|
@ -1327,7 +1350,7 @@ Available only in `--sponsorblock-chapter-title`:
|
|||
|
||||
Each aforementioned sequence when referenced in an output template will be replaced by the actual value corresponding to the sequence name. E.g. for `-o %(title)s-%(id)s.%(ext)s` and an mp4 video with title `yt-dlp test video` and id `BaW_jenozKc`, this will result in a `yt-dlp test video-BaW_jenozKc.mp4` file created in the current directory.
|
||||
|
||||
Note that some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with placeholder value provided with `--output-na-placeholder` (`NA` by default).
|
||||
**Note**: Some of the sequences are not guaranteed to be present since they depend on the metadata obtained by a particular extractor. Such sequences will be replaced with placeholder value provided with `--output-na-placeholder` (`NA` by default).
|
||||
|
||||
**Tip**: Look at the `-j` output to identify which fields are available for the particular URL
|
||||
|
||||
|
@ -1468,7 +1491,7 @@ Also filtering work for comparisons `=` (equals), `^=` (starts with), `$=` (ends
|
|||
|
||||
Any string comparison may be prefixed with negation `!` in order to produce an opposite comparison, e.g. `!*=` (does not contain). The comparand of a string comparison needs to be quoted with either double or single quotes if it contains spaces or special characters other than `._-`.
|
||||
|
||||
Note that none of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the website. Any other field made available by the extractor can also be used for filtering.
|
||||
**Note**: None of the aforementioned meta fields are guaranteed to be present since this solely depends on the metadata obtained by particular extractor, i.e. the metadata offered by the website. Any other field made available by the extractor can also be used for filtering.
|
||||
|
||||
Formats for which the value is not known are excluded unless you put a question mark (`?`) after the operator. You can combine format filters, so `-f "[height<=?720][tbr>500]"` selects up to 720p videos (or videos where the height is not known) with a bitrate of at least 500 KBit/s. You can also use the filters with `all` to download all formats that satisfy the filter, e.g. `-f "all[vcodec=none]"` selects all audio-only formats.
|
||||
|
||||
|
@ -1488,7 +1511,7 @@ The available fields are:
|
|||
- `source`: The preference of the source
|
||||
- `proto`: Protocol used for download (`https`/`ftps` > `http`/`ftp` > `m3u8_native`/`m3u8` > `http_dash_segments`> `websocket_frag` > `mms`/`rtsp` > `f4f`/`f4m`)
|
||||
- `vcodec`: Video Codec (`av01` > `vp9.2` > `vp9` > `h265` > `h264` > `vp8` > `h263` > `theora` > other)
|
||||
- `acodec`: Audio Codec (`flac`/`alac` > `wav`/`aiff` > `opus` > `vorbis` > `aac` > `mp4a` > `mp3` > `eac3` > `ac3` > `dts` > other)
|
||||
- `acodec`: Audio Codec (`flac`/`alac` > `wav`/`aiff` > `opus` > `vorbis` > `aac` > `mp4a` > `mp3` `ac4` > > `eac3` > `ac3` > `dts` > other)
|
||||
- `codec`: Equivalent to `vcodec,acodec`
|
||||
- `vext`: Video Extension (`mp4` > `mov` > `webm` > `flv` > other). If `--prefer-free-formats` is used, `webm` is preferred.
|
||||
- `aext`: Audio Extension (`m4a` > `aac` > `mp3` > `ogg` > `opus` > `webm` > other). If `--prefer-free-formats` is used, the order changes to `ogg` > `opus` > `webm` > `mp3` > `m4a` > `aac`
|
||||
|
@ -1721,7 +1744,7 @@ Some extractors accept additional arguments which can be passed using `--extract
|
|||
The following extractors use this feature:
|
||||
|
||||
#### youtube
|
||||
* `lang`: Language code to prefer translated metadata of this language (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube.py](https://github.com/yt-dlp/yt-dlp/blob/c26f9b991a0681fd3ea548d535919cec1fbbd430/yt_dlp/extractor/youtube.py#L381-L390) for list of supported content language codes
|
||||
* `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube.py](https://github.com/yt-dlp/yt-dlp/blob/c26f9b991a0681fd3ea548d535919cec1fbbd430/yt_dlp/extractor/youtube.py#L381-L390) for list of supported content language codes
|
||||
* `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively
|
||||
* `player_client`: Clients to extract video data from. The main clients are `web`, `android` and `ios` with variants `_music`, `_embedded`, `_embedscreen`, `_creator` (e.g. `web_embedded`); and `mweb` and `tv_embedded` (agegate bypass) with no variants. By default, `android,web` is used, but `tv_embedded` and `creator` variants are added as required for age-gated videos. Similarly, the music variants are added for `music.youtube.com` urls. You can use `all` to use all the clients, and `default` for the default clients.
|
||||
* `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details
|
||||
|
@ -1736,6 +1759,9 @@ The following extractors use this feature:
|
|||
* `skip`: One or more of `webpage` (skip initial webpage download), `authcheck` (allow the download of playlists requiring authentication when no initial webpage is downloaded. This may cause unwanted behavior, see [#1122](https://github.com/yt-dlp/yt-dlp/pull/1122) for more details)
|
||||
* `approximate_date`: Extract approximate `upload_date` and `timestamp` in flat-playlist. This may cause date-based filters to be slightly off
|
||||
|
||||
#### generic
|
||||
* `fragment_query`: Passthrough any query in mpd/m3u8 manifest URLs to their fragments. Does not apply to ffmpeg
|
||||
|
||||
#### funimation
|
||||
* `language`: Audio languages to extract, e.g. `funimation:language=english,japanese`
|
||||
* `version`: The video version to extract - `uncut` or `simulcast`
|
||||
|
@ -1762,6 +1788,7 @@ The following extractors use this feature:
|
|||
* `dr`: dynamic range to ignore - one or more of `sdr`, `hdr10`, `dv`
|
||||
|
||||
#### tiktok
|
||||
* `api_hostname`: Hostname to use for mobile API requests, e.g. `api-h2.tiktokv.com`
|
||||
* `app_version`: App version to call mobile APIs with - should be set along with `manifest_app_version`, e.g. `20.2.1`
|
||||
* `manifest_app_version`: Numeric app version to call mobile APIs with, e.g. `221`
|
||||
|
||||
|
@ -1771,26 +1798,78 @@ The following extractors use this feature:
|
|||
#### twitter
|
||||
* `force_graphql`: Force usage of the GraphQL API. By default it will only be used if login cookies are provided
|
||||
|
||||
NOTE: These options may be changed/removed in the future without concern for backward compatibility
|
||||
**Note**: These options may be changed/removed in the future without concern for backward compatibility
|
||||
|
||||
<!-- MANPAGE: MOVE "INSTALLATION" SECTION HERE -->
|
||||
|
||||
|
||||
# PLUGINS
|
||||
|
||||
Plugins are loaded from `<root-dir>/ytdlp_plugins/<type>/__init__.py`; where `<root-dir>` is the directory of the binary (`<root-dir>/yt-dlp`), or the root directory of the module if you are running directly from source-code (`<root dir>/yt_dlp/__main__.py`). Plugins are currently not supported for the `pip` version
|
||||
Note that **all** plugins are imported even if not invoked, and that **there are no checks** performed on plugin code. **Use plugins at your own risk and only if you trust the code!**
|
||||
|
||||
Plugins can be of `<type>`s `extractor` or `postprocessor`. Extractor plugins do not need to be enabled from the CLI and are automatically invoked when the input URL is suitable for it. Postprocessor plugins can be invoked using `--use-postprocessor NAME`.
|
||||
Plugins can be of `<type>`s `extractor` or `postprocessor`.
|
||||
- Extractor plugins do not need to be enabled from the CLI and are automatically invoked when the input URL is suitable for it.
|
||||
- Extractor plugins take priority over builtin extractors.
|
||||
- Postprocessor plugins can be invoked using `--use-postprocessor NAME`.
|
||||
|
||||
See [ytdlp_plugins](ytdlp_plugins) for example plugins.
|
||||
|
||||
Note that **all** plugins are imported even if not invoked, and that **there are no checks** performed on plugin code. Use plugins at your own risk and only if you trust the code
|
||||
Plugins are loaded from the namespace packages `yt_dlp_plugins.extractor` and `yt_dlp_plugins.postprocessor`.
|
||||
|
||||
If you are a plugin author, add [ytdlp-plugins](https://github.com/topics/ytdlp-plugins) as a topic to your repository for discoverability
|
||||
In other words, the file structure on the disk looks something like:
|
||||
|
||||
yt_dlp_plugins/
|
||||
extractor/
|
||||
myplugin.py
|
||||
postprocessor/
|
||||
myplugin.py
|
||||
|
||||
yt-dlp looks for these `yt_dlp_plugins` namespace folders in many locations (see below) and loads in plugins from **all** of them.
|
||||
|
||||
See the [wiki for some known plugins](https://github.com/yt-dlp/yt-dlp/wiki/Plugins)
|
||||
|
||||
## Installing Plugins
|
||||
|
||||
Plugins can be installed using various methods and locations.
|
||||
|
||||
1. **Configuration directories**:
|
||||
Plugin packages (containing a `yt_dlp_plugins` namespace folder) can be dropped into the following standard [configuration locations](#configuration):
|
||||
* **User Plugins**
|
||||
* `${XDG_CONFIG_HOME}/yt-dlp/plugins/<package name>/yt_dlp_plugins/` (recommended on Linux/macOS)
|
||||
* `${XDG_CONFIG_HOME}/yt-dlp-plugins/<package name>/yt_dlp_plugins/`
|
||||
* `${APPDATA}/yt-dlp/plugins/<package name>/yt_dlp_plugins/` (recommended on Windows)
|
||||
* `${APPDATA}/yt-dlp-plugins/<package name>/yt_dlp_plugins/`
|
||||
* `~/.yt-dlp/plugins/<package name>/yt_dlp_plugins/`
|
||||
* `~/yt-dlp-plugins/<package name>/yt_dlp_plugins/`
|
||||
* **System Plugins**
|
||||
* `/etc/yt-dlp/plugins/<package name>/yt_dlp_plugins/`
|
||||
* `/etc/yt-dlp-plugins/<package name>/yt_dlp_plugins/`
|
||||
2. **Executable location**: Plugin packages can similarly be installed in a `yt-dlp-plugins` directory under the executable location:
|
||||
* Binary: where `<root-dir>/yt-dlp.exe`, `<root-dir>/yt-dlp-plugins/<package name>/yt_dlp_plugins/`
|
||||
* Source: where `<root-dir>/yt_dlp/__main__.py`, `<root-dir>/yt-dlp-plugins/<package name>/yt_dlp_plugins/`
|
||||
|
||||
3. **pip and other locations in `PYTHONPATH`**
|
||||
* Plugin packages can be installed and managed using `pip`. See [yt-dlp-sample-plugins](https://github.com/yt-dlp/yt-dlp-sample-plugins) for an example.
|
||||
* Note: plugin files between plugin packages installed with pip must have unique filenames.
|
||||
* Any path in `PYTHONPATH` is searched in for the `yt_dlp_plugins` namespace folder.
|
||||
* Note: This does not apply for Pyinstaller/py2exe builds.
|
||||
|
||||
|
||||
`.zip`, `.egg` and `.whl` archives containing a `yt_dlp_plugins` namespace folder in their root are also supported as plugin packages.
|
||||
* e.g. `${XDG_CONFIG_HOME}/yt-dlp/plugins/mypluginpkg.zip` where `mypluginpkg.zip` contains `yt_dlp_plugins/<type>/myplugin.py`
|
||||
|
||||
Run yt-dlp with `--verbose` to check if the plugin has been loaded.
|
||||
|
||||
## Developing Plugins
|
||||
|
||||
See the [yt-dlp-sample-plugins](https://github.com/yt-dlp/yt-dlp-sample-plugins) repo for a template plugin package and the [Plugin Development](https://github.com/yt-dlp/yt-dlp/wiki/Plugin-Development) section of the wiki for a plugin development guide.
|
||||
|
||||
All public classes with a name ending in `IE`/`PP` are imported from each file for extractors and postprocessors repectively. This respects underscore prefix (e.g. `_MyBasePluginIE` is private) and `__all__`. Modules can similarly be excluded by prefixing the module name with an underscore (e.g. `_myplugin.py`).
|
||||
|
||||
To replace an existing extractor with a subclass of one, set the `plugin_name` class keyword argument (e.g. `class MyPluginIE(ABuiltInIE, plugin_name='myplugin')` will replace `ABuiltInIE` with `MyPluginIE`). Since the extractor replaces the parent, you should exclude the subclass extractor from being imported separately by making it private using one of the methods described above.
|
||||
|
||||
If you are a plugin author, add [yt-dlp-plugins](https://github.com/topics/yt-dlp-plugins) as a topic to your repository for discoverability.
|
||||
|
||||
See the [Developer Instructions](https://github.com/yt-dlp/yt-dlp/blob/master/CONTRIBUTING.md#developer-instructions) on how to write and test an extractor.
|
||||
|
||||
# EMBEDDING YT-DLP
|
||||
|
||||
|
|
|
@ -40,8 +40,12 @@ def main():
|
|||
|
||||
_ALL_CLASSES = get_all_ies() # Must be before import
|
||||
|
||||
import yt_dlp.plugins
|
||||
from yt_dlp.extractor.common import InfoExtractor, SearchInfoExtractor
|
||||
|
||||
# Filter out plugins
|
||||
_ALL_CLASSES = [cls for cls in _ALL_CLASSES if not cls.__module__.startswith(f'{yt_dlp.plugins.PACKAGE_NAME}.')]
|
||||
|
||||
DummyInfoExtractor = type('InfoExtractor', (InfoExtractor,), {'IE_NAME': NO_ATTR})
|
||||
module_src = '\n'.join((
|
||||
MODULE_TEMPLATE,
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
[build-system]
|
||||
build-backend = 'setuptools.build_meta'
|
||||
# https://github.com/yt-dlp/yt-dlp/issues/5941
|
||||
# https://github.com/pypa/distutils/issues/17
|
||||
requires = ['setuptools > 50']
|
|
@ -26,7 +26,7 @@ markers =
|
|||
|
||||
[tox:tox]
|
||||
skipsdist = true
|
||||
envlist = py{36,37,38,39,310},pypy{36,37,38,39}
|
||||
envlist = py{36,37,38,39,310,311},pypy{36,37,38,39}
|
||||
skip_missing_interpreters = true
|
||||
|
||||
[testenv] # tox
|
||||
|
|
8
setup.py
8
setup.py
|
@ -1,8 +1,12 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
import os.path
|
||||
import subprocess
|
||||
# Allow execution from anywhere
|
||||
import os
|
||||
import sys
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
import subprocess
|
||||
import warnings
|
||||
|
||||
try:
|
||||
|
|
|
@ -51,6 +51,8 @@
|
|||
- **afreecatv:live**: [<abbr title="netrc machine"><em>afreecatv</em></abbr>] afreecatv.com
|
||||
- **afreecatv:user**
|
||||
- **AirMozilla**
|
||||
- **AirTV**
|
||||
- **AitubeKZVideo**
|
||||
- **AliExpressLive**
|
||||
- **AlJazeera**
|
||||
- **Allocine**
|
||||
|
@ -60,6 +62,10 @@
|
|||
- **Alura**: [<abbr title="netrc machine"><em>alura</em></abbr>]
|
||||
- **AluraCourse**: [<abbr title="netrc machine"><em>aluracourse</em></abbr>]
|
||||
- **Amara**
|
||||
- **AmazonMiniTV**
|
||||
- **amazonminitv:season**: Amazon MiniTV Series, "minitv:season:" prefix
|
||||
- **amazonminitv:series**
|
||||
- **AmazonReviews**
|
||||
- **AmazonStore**
|
||||
- **AMCNetworks**
|
||||
- **AmericasTestKitchen**
|
||||
|
@ -130,6 +136,8 @@
|
|||
- **BBVTV**: [<abbr title="netrc machine"><em>bbvtv</em></abbr>]
|
||||
- **BBVTVLive**: [<abbr title="netrc machine"><em>bbvtv</em></abbr>]
|
||||
- **BBVTVRecordings**: [<abbr title="netrc machine"><em>bbvtv</em></abbr>]
|
||||
- **BeatBumpPlaylist**
|
||||
- **BeatBumpVideo**
|
||||
- **Beatport**
|
||||
- **Beeg**
|
||||
- **BehindKink**
|
||||
|
@ -157,7 +165,7 @@
|
|||
- **BilibiliSpacePlaylist**
|
||||
- **BilibiliSpaceVideo**
|
||||
- **BiliIntl**: [<abbr title="netrc machine"><em>biliintl</em></abbr>]
|
||||
- **BiliIntlSeries**: [<abbr title="netrc machine"><em>biliintl</em></abbr>]
|
||||
- **biliIntl:series**: [<abbr title="netrc machine"><em>biliintl</em></abbr>]
|
||||
- **BiliLive**
|
||||
- **BioBioChileTV**
|
||||
- **Biography**
|
||||
|
@ -345,6 +353,8 @@
|
|||
- **DrTuber**
|
||||
- **drtv**
|
||||
- **drtv:live**
|
||||
- **drtv:season**
|
||||
- **drtv:series**
|
||||
- **DTube**
|
||||
- **duboku**: www.duboku.io
|
||||
- **duboku:list**: www.duboku.io entire series
|
||||
|
@ -387,6 +397,7 @@
|
|||
- **ESPNCricInfo**
|
||||
- **EsriVideo**
|
||||
- **Europa**
|
||||
- **EuroParlWebstream**
|
||||
- **EuropeanTour**
|
||||
- **Eurosport**
|
||||
- **EUScreen**
|
||||
|
@ -599,6 +610,8 @@
|
|||
- **JWPlatform**
|
||||
- **Kakao**
|
||||
- **Kaltura**
|
||||
- **Kanal2**
|
||||
- **KankaNews**
|
||||
- **Karaoketv**
|
||||
- **KarriereVideos**
|
||||
- **Katsomo**
|
||||
|
@ -607,8 +620,10 @@
|
|||
- **Ketnet**
|
||||
- **khanacademy**
|
||||
- **khanacademy:unit**
|
||||
- **Kick**
|
||||
- **Kicker**
|
||||
- **KickStarter**
|
||||
- **KickVOD**
|
||||
- **KinjaEmbed**
|
||||
- **KinoPoisk**
|
||||
- **KompasVideo**
|
||||
|
@ -709,6 +724,7 @@
|
|||
- **Mediasite**
|
||||
- **MediasiteCatalog**
|
||||
- **MediasiteNamedCatalog**
|
||||
- **MediaStream**
|
||||
- **MediaWorksNZVOD**
|
||||
- **Medici**
|
||||
- **megaphone.fm**: megaphone.fm embedded players
|
||||
|
@ -845,6 +861,7 @@
|
|||
- **NetPlusTVRecordings**: [<abbr title="netrc machine"><em>netplus</em></abbr>]
|
||||
- **Netverse**
|
||||
- **NetversePlaylist**
|
||||
- **NetverseSearch**: "netsearch:" prefix
|
||||
- **Netzkino**
|
||||
- **Newgrounds**
|
||||
- **Newgrounds:playlist**
|
||||
|
@ -887,6 +904,7 @@
|
|||
- **njoy:embed**
|
||||
- **NJPWWorld**: [<abbr title="netrc machine"><em>njpwworld</em></abbr>] 新日本プロレスワールド
|
||||
- **NobelPrize**
|
||||
- **NoicePodcast**
|
||||
- **NonkTube**
|
||||
- **NoodleMagazine**
|
||||
- **Noovo**
|
||||
|
@ -933,6 +951,7 @@
|
|||
- **on24**: ON24
|
||||
- **OnDemandKorea**
|
||||
- **OneFootball**
|
||||
- **OnePlacePodcast**
|
||||
- **onet.pl**
|
||||
- **onet.tv**
|
||||
- **onet.tv:channel**
|
||||
|
@ -1022,11 +1041,13 @@
|
|||
- **PokerGoCollection**: [<abbr title="netrc machine"><em>pokergo</em></abbr>]
|
||||
- **PolsatGo**
|
||||
- **PolskieRadio**
|
||||
- **polskieradio:audition**
|
||||
- **polskieradio:category**
|
||||
- **polskieradio:kierowcow**
|
||||
- **polskieradio:legacy**
|
||||
- **polskieradio:player**
|
||||
- **polskieradio:podcast**
|
||||
- **polskieradio:podcast:list**
|
||||
- **PolskieRadioCategory**
|
||||
- **Popcorntimes**
|
||||
- **PopcornTV**
|
||||
- **PornCom**
|
||||
|
@ -1155,6 +1176,7 @@
|
|||
- **rtvslo.si**
|
||||
- **RUHD**
|
||||
- **Rule34Video**
|
||||
- **Rumble**
|
||||
- **RumbleChannel**
|
||||
- **RumbleEmbed**
|
||||
- **Ruptly**
|
||||
|
@ -1189,6 +1211,7 @@
|
|||
- **screen.yahoo:search**: Yahoo screen search; "yvsearch:" prefix
|
||||
- **Screen9**
|
||||
- **Screencast**
|
||||
- **Screencastify**
|
||||
- **ScreencastOMatic**
|
||||
- **ScrippsNetworks**
|
||||
- **scrippsnetworks:watch**
|
||||
|
@ -1212,6 +1235,7 @@
|
|||
- **ShugiinItvLive**: 衆議院インターネット審議中継
|
||||
- **ShugiinItvLiveRoom**: 衆議院インターネット審議中継 (中継)
|
||||
- **ShugiinItvVod**: 衆議院インターネット審議中継 (ビデオライブラリ)
|
||||
- **SibnetEmbed**
|
||||
- **simplecast**
|
||||
- **simplecast:episode**
|
||||
- **simplecast:podcast**
|
||||
|
@ -1227,7 +1251,7 @@
|
|||
- **skynewsarabia:video**
|
||||
- **SkyNewsAU**
|
||||
- **Slideshare**
|
||||
- **SlidesLive**: (**Currently broken**)
|
||||
- **SlidesLive**
|
||||
- **Slutload**
|
||||
- **Smotrim**
|
||||
- **Snotr**
|
||||
|
@ -1241,6 +1265,7 @@
|
|||
- **soundcloud:set**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||
- **soundcloud:trackstation**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||
- **soundcloud:user**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||
- **soundcloud:user:permalink**: [<abbr title="netrc machine"><em>soundcloud</em></abbr>]
|
||||
- **SoundcloudEmbed**
|
||||
- **soundgasm**
|
||||
- **soundgasm:profile**
|
||||
|
@ -1352,10 +1377,14 @@
|
|||
- **ThisAmericanLife**
|
||||
- **ThisAV**
|
||||
- **ThisOldHouse**
|
||||
- **ThisVid**
|
||||
- **ThisVidMember**
|
||||
- **ThisVidPlaylist**
|
||||
- **ThreeSpeak**
|
||||
- **ThreeSpeakUser**
|
||||
- **TikTok**
|
||||
- **tiktok:effect**: (**Currently broken**)
|
||||
- **tiktok:live**
|
||||
- **tiktok:sound**: (**Currently broken**)
|
||||
- **tiktok:tag**: (**Currently broken**)
|
||||
- **tiktok:user**: (**Currently broken**)
|
||||
|
@ -1383,6 +1412,7 @@
|
|||
- **TrovoChannelClip**: All Clips of a trovo.live channel; "trovoclip:" prefix
|
||||
- **TrovoChannelVod**: All VODs of a trovo.live channel; "trovovod:" prefix
|
||||
- **TrovoVod**
|
||||
- **TrtCocukVideo**
|
||||
- **TrueID**
|
||||
- **TruNews**
|
||||
- **Truth**
|
||||
|
@ -1483,6 +1513,7 @@
|
|||
- **VeeHD**
|
||||
- **Veo**
|
||||
- **Veoh**
|
||||
- **veoh:user**
|
||||
- **Vesti**: Вести.Ru
|
||||
- **Vevo**
|
||||
- **VevoPlaylist**
|
||||
|
@ -1502,6 +1533,11 @@
|
|||
- **video.sky.it:live**
|
||||
- **VideoDetective**
|
||||
- **videofy.me**
|
||||
- **VideoKen**
|
||||
- **VideoKenCategory**
|
||||
- **VideoKenPlayer**
|
||||
- **VideoKenPlaylist**
|
||||
- **VideoKenTopic**
|
||||
- **videomore**
|
||||
- **videomore:season**
|
||||
- **videomore:video**
|
||||
|
@ -1521,6 +1557,7 @@
|
|||
- **vimeo:group**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||
- **vimeo:likes**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Vimeo user likes
|
||||
- **vimeo:ondemand**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||
- **vimeo:pro**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||
- **vimeo:review**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Review pages on vimeo
|
||||
- **vimeo:user**: [<abbr title="netrc machine"><em>vimeo</em></abbr>]
|
||||
- **vimeo:watchlater**: [<abbr title="netrc machine"><em>vimeo</em></abbr>] Vimeo watch later list, ":vimeowatchlater" keyword (requires authentication)
|
||||
|
@ -1549,6 +1586,7 @@
|
|||
- **VoiceRepublic**
|
||||
- **voicy**
|
||||
- **voicy:channel**
|
||||
- **VolejTV**
|
||||
- **Voot**
|
||||
- **VootSeries**
|
||||
- **VoxMedia**
|
||||
|
@ -1591,6 +1629,7 @@
|
|||
- **WDRElefant**
|
||||
- **WDRPage**
|
||||
- **web.archive:youtube**: web.archive.org saved youtube videos, "ytarchive:" prefix
|
||||
- **Webcamerapl**
|
||||
- **Webcaster**
|
||||
- **WebcasterFeed**
|
||||
- **WebOfStories**
|
||||
|
@ -1604,6 +1643,7 @@
|
|||
- **wikimedia.org**
|
||||
- **Willow**
|
||||
- **WimTV**
|
||||
- **WinSportsVideo**
|
||||
- **Wistia**
|
||||
- **WistiaChannel**
|
||||
- **WistiaPlaylist**
|
||||
|
@ -1618,16 +1658,13 @@
|
|||
- **WWE**
|
||||
- **wyborcza:video**
|
||||
- **WyborczaPodcast**
|
||||
- **Xanimu**
|
||||
- **XBef**
|
||||
- **XboxClips**
|
||||
- **XFileShare**: XFileShare based sites: Aparat, ClipWatching, GoUnlimited, GoVid, HolaVid, Streamty, TheVideoBee, Uqload, VidBom, vidlo, VidLocker, VidShare, VUp, WolfStream, XVideoSharing
|
||||
- **XHamster**
|
||||
- **XHamsterEmbed**
|
||||
- **XHamsterUser**
|
||||
- **xiami:album**: 虾米音乐 - 专辑
|
||||
- **xiami:artist**: 虾米音乐 - 歌手
|
||||
- **xiami:collection**: 虾米音乐 - 精选集
|
||||
- **xiami:song**: 虾米音乐
|
||||
- **ximalaya**: 喜马拉雅FM
|
||||
- **ximalaya:album**: 喜马拉雅FM 专辑
|
||||
- **xinpianchang**: xinpianchang.com
|
||||
|
|
|
@ -0,0 +1,227 @@
|
|||
#!/usr/bin/env python3
|
||||
|
||||
# Allow direct execution
|
||||
import os
|
||||
import sys
|
||||
import unittest
|
||||
import unittest.mock
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
|
||||
import contextlib
|
||||
import itertools
|
||||
from pathlib import Path
|
||||
|
||||
from yt_dlp.compat import compat_expanduser
|
||||
from yt_dlp.options import create_parser, parseOpts
|
||||
from yt_dlp.utils import Config, get_executable_path
|
||||
|
||||
ENVIRON_DEFAULTS = {
|
||||
'HOME': None,
|
||||
'XDG_CONFIG_HOME': '/_xdg_config_home/',
|
||||
'USERPROFILE': 'C:/Users/testing/',
|
||||
'APPDATA': 'C:/Users/testing/AppData/Roaming/',
|
||||
'HOMEDRIVE': 'C:/',
|
||||
'HOMEPATH': 'Users/testing/',
|
||||
}
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def set_environ(**kwargs):
|
||||
saved_environ = os.environ.copy()
|
||||
|
||||
for name, value in {**ENVIRON_DEFAULTS, **kwargs}.items():
|
||||
if value is None:
|
||||
os.environ.pop(name, None)
|
||||
else:
|
||||
os.environ[name] = value
|
||||
|
||||
yield
|
||||
|
||||
os.environ.clear()
|
||||
os.environ.update(saved_environ)
|
||||
|
||||
|
||||
def _generate_expected_groups():
|
||||
xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config')
|
||||
appdata_dir = os.getenv('appdata')
|
||||
home_dir = compat_expanduser('~')
|
||||
return {
|
||||
'Portable': [
|
||||
Path(get_executable_path(), 'yt-dlp.conf'),
|
||||
],
|
||||
'Home': [
|
||||
Path('yt-dlp.conf'),
|
||||
],
|
||||
'User': [
|
||||
Path(xdg_config_home, 'yt-dlp.conf'),
|
||||
Path(xdg_config_home, 'yt-dlp', 'config'),
|
||||
Path(xdg_config_home, 'yt-dlp', 'config.txt'),
|
||||
*((
|
||||
Path(appdata_dir, 'yt-dlp.conf'),
|
||||
Path(appdata_dir, 'yt-dlp', 'config'),
|
||||
Path(appdata_dir, 'yt-dlp', 'config.txt'),
|
||||
) if appdata_dir else ()),
|
||||
Path(home_dir, 'yt-dlp.conf'),
|
||||
Path(home_dir, 'yt-dlp.conf.txt'),
|
||||
Path(home_dir, '.yt-dlp', 'config'),
|
||||
Path(home_dir, '.yt-dlp', 'config.txt'),
|
||||
],
|
||||
'System': [
|
||||
Path('/etc/yt-dlp.conf'),
|
||||
Path('/etc/yt-dlp/config'),
|
||||
Path('/etc/yt-dlp/config.txt'),
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
class TestConfig(unittest.TestCase):
|
||||
maxDiff = None
|
||||
|
||||
@set_environ()
|
||||
def test_config__ENVIRON_DEFAULTS_sanity(self):
|
||||
expected = make_expected()
|
||||
self.assertCountEqual(
|
||||
set(expected), expected,
|
||||
'ENVIRON_DEFAULTS produces non unique names')
|
||||
|
||||
def test_config_all_environ_values(self):
|
||||
for name, value in ENVIRON_DEFAULTS.items():
|
||||
for new_value in (None, '', '.', value or '/some/dir'):
|
||||
with set_environ(**{name: new_value}):
|
||||
self._simple_grouping_test()
|
||||
|
||||
def test_config_default_expected_locations(self):
|
||||
files, _ = self._simple_config_test()
|
||||
self.assertEqual(
|
||||
files, make_expected(),
|
||||
'Not all expected locations have been checked')
|
||||
|
||||
def test_config_default_grouping(self):
|
||||
self._simple_grouping_test()
|
||||
|
||||
def _simple_grouping_test(self):
|
||||
expected_groups = make_expected_groups()
|
||||
for name, group in expected_groups.items():
|
||||
for index, existing_path in enumerate(group):
|
||||
result, opts = self._simple_config_test(existing_path)
|
||||
expected = expected_from_expected_groups(expected_groups, existing_path)
|
||||
self.assertEqual(
|
||||
result, expected,
|
||||
f'The checked locations do not match the expected ({name}, {index})')
|
||||
self.assertEqual(
|
||||
opts.outtmpl['default'], '1',
|
||||
f'The used result value was incorrect ({name}, {index})')
|
||||
|
||||
def _simple_config_test(self, *stop_paths):
|
||||
encountered = 0
|
||||
paths = []
|
||||
|
||||
def read_file(filename, default=[]):
|
||||
nonlocal encountered
|
||||
path = Path(filename)
|
||||
paths.append(path)
|
||||
if path in stop_paths:
|
||||
encountered += 1
|
||||
return ['-o', f'{encountered}']
|
||||
|
||||
with ConfigMock(read_file):
|
||||
_, opts, _ = parseOpts([], False)
|
||||
|
||||
return paths, opts
|
||||
|
||||
@set_environ()
|
||||
def test_config_early_exit_commandline(self):
|
||||
self._early_exit_test(0, '--ignore-config')
|
||||
|
||||
@set_environ()
|
||||
def test_config_early_exit_files(self):
|
||||
for index, _ in enumerate(make_expected(), 1):
|
||||
self._early_exit_test(index)
|
||||
|
||||
def _early_exit_test(self, allowed_reads, *args):
|
||||
reads = 0
|
||||
|
||||
def read_file(filename, default=[]):
|
||||
nonlocal reads
|
||||
reads += 1
|
||||
|
||||
if reads > allowed_reads:
|
||||
self.fail('The remaining config was not ignored')
|
||||
elif reads == allowed_reads:
|
||||
return ['--ignore-config']
|
||||
|
||||
with ConfigMock(read_file):
|
||||
parseOpts(args, False)
|
||||
|
||||
@set_environ()
|
||||
def test_config_override_commandline(self):
|
||||
self._override_test(0, '-o', 'pass')
|
||||
|
||||
@set_environ()
|
||||
def test_config_override_files(self):
|
||||
for index, _ in enumerate(make_expected(), 1):
|
||||
self._override_test(index)
|
||||
|
||||
def _override_test(self, start_index, *args):
|
||||
index = 0
|
||||
|
||||
def read_file(filename, default=[]):
|
||||
nonlocal index
|
||||
index += 1
|
||||
|
||||
if index > start_index:
|
||||
return ['-o', 'fail']
|
||||
elif index == start_index:
|
||||
return ['-o', 'pass']
|
||||
|
||||
with ConfigMock(read_file):
|
||||
_, opts, _ = parseOpts(args, False)
|
||||
|
||||
self.assertEqual(
|
||||
opts.outtmpl['default'], 'pass',
|
||||
'The earlier group did not override the later ones')
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def ConfigMock(read_file=None):
|
||||
with unittest.mock.patch('yt_dlp.options.Config') as mock:
|
||||
mock.return_value = Config(create_parser())
|
||||
if read_file is not None:
|
||||
mock.read_file = read_file
|
||||
|
||||
yield mock
|
||||
|
||||
|
||||
def make_expected(*filepaths):
|
||||
return expected_from_expected_groups(_generate_expected_groups(), *filepaths)
|
||||
|
||||
|
||||
def make_expected_groups(*filepaths):
|
||||
return _filter_expected_groups(_generate_expected_groups(), filepaths)
|
||||
|
||||
|
||||
def expected_from_expected_groups(expected_groups, *filepaths):
|
||||
return list(itertools.chain.from_iterable(
|
||||
_filter_expected_groups(expected_groups, filepaths).values()))
|
||||
|
||||
|
||||
def _filter_expected_groups(expected, filepaths):
|
||||
if not filepaths:
|
||||
return expected
|
||||
|
||||
result = {}
|
||||
for group, paths in expected.items():
|
||||
new_paths = []
|
||||
for path in paths:
|
||||
new_paths.append(path)
|
||||
if path in filepaths:
|
||||
break
|
||||
|
||||
result[group] = new_paths
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -155,6 +155,38 @@ class TestJSInterpreter(unittest.TestCase):
|
|||
self.assertEqual(jsi.call_function('z'), 5)
|
||||
self.assertEqual(jsi.call_function('y'), 2)
|
||||
|
||||
def test_if(self):
|
||||
jsi = JSInterpreter('''
|
||||
function x() {
|
||||
let a = 9;
|
||||
if (0==0) {a++}
|
||||
return a
|
||||
}''')
|
||||
self.assertEqual(jsi.call_function('x'), 10)
|
||||
|
||||
jsi = JSInterpreter('''
|
||||
function x() {
|
||||
if (0==0) {return 10}
|
||||
}''')
|
||||
self.assertEqual(jsi.call_function('x'), 10)
|
||||
|
||||
jsi = JSInterpreter('''
|
||||
function x() {
|
||||
if (0!=0) {return 1}
|
||||
else {return 10}
|
||||
}''')
|
||||
self.assertEqual(jsi.call_function('x'), 10)
|
||||
|
||||
""" # Unsupported
|
||||
jsi = JSInterpreter('''
|
||||
function x() {
|
||||
if (0!=0) {return 1}
|
||||
else if (1==0) {return 2}
|
||||
else {return 10}
|
||||
}''')
|
||||
self.assertEqual(jsi.call_function('x'), 10)
|
||||
"""
|
||||
|
||||
def test_for_loop(self):
|
||||
jsi = JSInterpreter('''
|
||||
function x() { a=0; for (i=0; i-10; i++) {a++} return a }
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
import importlib
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import unittest
|
||||
from pathlib import Path
|
||||
|
||||
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
||||
TEST_DATA_DIR = Path(os.path.dirname(os.path.abspath(__file__)), 'testdata')
|
||||
sys.path.append(str(TEST_DATA_DIR))
|
||||
importlib.invalidate_caches()
|
||||
|
||||
from yt_dlp.plugins import PACKAGE_NAME, directories, load_plugins
|
||||
|
||||
|
||||
class TestPlugins(unittest.TestCase):
|
||||
|
||||
TEST_PLUGIN_DIR = TEST_DATA_DIR / PACKAGE_NAME
|
||||
|
||||
def test_directories_containing_plugins(self):
|
||||
self.assertIn(self.TEST_PLUGIN_DIR, map(Path, directories()))
|
||||
|
||||
def test_extractor_classes(self):
|
||||
for module_name in tuple(sys.modules):
|
||||
if module_name.startswith(f'{PACKAGE_NAME}.extractor'):
|
||||
del sys.modules[module_name]
|
||||
plugins_ie = load_plugins('extractor', 'IE')
|
||||
|
||||
self.assertIn(f'{PACKAGE_NAME}.extractor.normal', sys.modules.keys())
|
||||
self.assertIn('NormalPluginIE', plugins_ie.keys())
|
||||
|
||||
# don't load modules with underscore prefix
|
||||
self.assertFalse(
|
||||
f'{PACKAGE_NAME}.extractor._ignore' in sys.modules.keys(),
|
||||
'loaded module beginning with underscore')
|
||||
self.assertNotIn('IgnorePluginIE', plugins_ie.keys())
|
||||
|
||||
# Don't load extractors with underscore prefix
|
||||
self.assertNotIn('_IgnoreUnderscorePluginIE', plugins_ie.keys())
|
||||
|
||||
# Don't load extractors not specified in __all__ (if supplied)
|
||||
self.assertNotIn('IgnoreNotInAllPluginIE', plugins_ie.keys())
|
||||
self.assertIn('InAllPluginIE', plugins_ie.keys())
|
||||
|
||||
def test_postprocessor_classes(self):
|
||||
plugins_pp = load_plugins('postprocessor', 'PP')
|
||||
self.assertIn('NormalPluginPP', plugins_pp.keys())
|
||||
|
||||
def test_importing_zipped_module(self):
|
||||
zip_path = TEST_DATA_DIR / 'zipped_plugins.zip'
|
||||
shutil.make_archive(str(zip_path)[:-4], 'zip', str(zip_path)[:-4])
|
||||
sys.path.append(str(zip_path)) # add zip to search paths
|
||||
importlib.invalidate_caches() # reset the import caches
|
||||
|
||||
try:
|
||||
for plugin_type in ('extractor', 'postprocessor'):
|
||||
package = importlib.import_module(f'{PACKAGE_NAME}.{plugin_type}')
|
||||
self.assertIn(zip_path / PACKAGE_NAME / plugin_type, map(Path, package.__path__))
|
||||
|
||||
plugins_ie = load_plugins('extractor', 'IE')
|
||||
self.assertIn('ZippedPluginIE', plugins_ie.keys())
|
||||
|
||||
plugins_pp = load_plugins('postprocessor', 'PP')
|
||||
self.assertIn('ZippedPluginPP', plugins_pp.keys())
|
||||
|
||||
finally:
|
||||
sys.path.remove(str(zip_path))
|
||||
os.remove(zip_path)
|
||||
importlib.invalidate_caches() # reset the import caches
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
|
@ -105,6 +105,7 @@ from yt_dlp.utils import (
|
|||
sanitized_Request,
|
||||
shell_quote,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
str_to_int,
|
||||
strip_jsonp,
|
||||
strip_or_none,
|
||||
|
@ -954,6 +955,85 @@ class TestUtil(unittest.TestCase):
|
|||
)
|
||||
self.assertEqual(escape_url('http://vimeo.com/56015672#at=0'), 'http://vimeo.com/56015672#at=0')
|
||||
|
||||
def test_js_to_json_vars_strings(self):
|
||||
self.assertDictEqual(
|
||||
json.loads(js_to_json(
|
||||
'''{
|
||||
'null': a,
|
||||
'nullStr': b,
|
||||
'true': c,
|
||||
'trueStr': d,
|
||||
'false': e,
|
||||
'falseStr': f,
|
||||
'unresolvedVar': g,
|
||||
}''',
|
||||
{
|
||||
'a': 'null',
|
||||
'b': '"null"',
|
||||
'c': 'true',
|
||||
'd': '"true"',
|
||||
'e': 'false',
|
||||
'f': '"false"',
|
||||
'g': 'var',
|
||||
}
|
||||
)),
|
||||
{
|
||||
'null': None,
|
||||
'nullStr': 'null',
|
||||
'true': True,
|
||||
'trueStr': 'true',
|
||||
'false': False,
|
||||
'falseStr': 'false',
|
||||
'unresolvedVar': 'var'
|
||||
}
|
||||
)
|
||||
|
||||
self.assertDictEqual(
|
||||
json.loads(js_to_json(
|
||||
'''{
|
||||
'int': a,
|
||||
'intStr': b,
|
||||
'float': c,
|
||||
'floatStr': d,
|
||||
}''',
|
||||
{
|
||||
'a': '123',
|
||||
'b': '"123"',
|
||||
'c': '1.23',
|
||||
'd': '"1.23"',
|
||||
}
|
||||
)),
|
||||
{
|
||||
'int': 123,
|
||||
'intStr': '123',
|
||||
'float': 1.23,
|
||||
'floatStr': '1.23',
|
||||
}
|
||||
)
|
||||
|
||||
self.assertDictEqual(
|
||||
json.loads(js_to_json(
|
||||
'''{
|
||||
'object': a,
|
||||
'objectStr': b,
|
||||
'array': c,
|
||||
'arrayStr': d,
|
||||
}''',
|
||||
{
|
||||
'a': '{}',
|
||||
'b': '"{}"',
|
||||
'c': '[]',
|
||||
'd': '"[]"',
|
||||
}
|
||||
)),
|
||||
{
|
||||
'object': {},
|
||||
'objectStr': '{}',
|
||||
'array': [],
|
||||
'arrayStr': '[]',
|
||||
}
|
||||
)
|
||||
|
||||
def test_js_to_json_realworld(self):
|
||||
inp = '''{
|
||||
'clip':{'provider':'pseudo'}
|
||||
|
@ -1874,6 +1954,8 @@ Line 1
|
|||
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['m4a']), 'mkv')
|
||||
self.assertEqual(get_compatible_ext(
|
||||
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['webm']), 'webm')
|
||||
self.assertEqual(get_compatible_ext(
|
||||
vcodecs=[None], acodecs=[None], vexts=['webm'], aexts=['weba']), 'webm')
|
||||
|
||||
self.assertEqual(get_compatible_ext(
|
||||
vcodecs=['h264'], acodecs=['mp4a'], vexts=['mov'], aexts=['m4a']), 'mp4')
|
||||
|
@ -1934,6 +2016,29 @@ Line 1
|
|||
msg='function as query key should perform a filter based on (key, value)')
|
||||
self.assertCountEqual(traverse_obj(_TEST_DATA, lambda _, x: isinstance(x[0], str)), {'str'},
|
||||
msg='exceptions in the query function should be catched')
|
||||
if __debug__:
|
||||
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, lambda a: ...)
|
||||
with self.assertRaises(Exception, msg='Wrong function signature should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, lambda a, b, c: ...)
|
||||
|
||||
# Test set as key (transformation/type, like `expected_type`)
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str.upper}, )), ['STR'],
|
||||
msg='Function in set should be a transformation')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str})), ['str'],
|
||||
msg='Type in set should be a type filter')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {dict}), _TEST_DATA,
|
||||
msg='A single set should be wrapped into a path')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str.upper})), ['STR'],
|
||||
msg='Transformation function should not raise')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, (..., {str_or_none})),
|
||||
[item for item in map(str_or_none, _TEST_DATA.values()) if item is not None],
|
||||
msg='Function in set should be a transformation')
|
||||
if __debug__:
|
||||
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, set())
|
||||
with self.assertRaises(Exception, msg='Sets with length != 1 should raise in debug'):
|
||||
traverse_obj(_TEST_DATA, {str.upper, str})
|
||||
|
||||
# Test alternative paths
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, 'fail', 'str'), 'str',
|
||||
|
@ -2025,6 +2130,20 @@ Line 1
|
|||
msg='wrap expected_type fuction in try_call')
|
||||
self.assertEqual(traverse_obj(_EXPECTED_TYPE_DATA, ..., expected_type=str), ['str'],
|
||||
msg='eliminate items that expected_type fails on')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2}, expected_type=int), {0: 100},
|
||||
msg='type as expected_type should filter dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: 100, 1: 1.2, 2: 'None'}, expected_type=str_or_none), {0: '100', 1: '1.2'},
|
||||
msg='function as expected_type should transform dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, ({0: 1.2}, 0, {int_or_none}), expected_type=int), 1,
|
||||
msg='expected_type should not filter non final dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, {0: {0: 100, 1: 'str'}}, expected_type=int), {0: {0: 100}},
|
||||
msg='expected_type should transform deep dict values')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, [({0: '...'}, {0: '...'})], expected_type=type(...)), [{0: ...}, {0: ...}],
|
||||
msg='expected_type should transform branched dict values')
|
||||
self.assertEqual(traverse_obj({1: {3: 4}}, [(1, 2), 3], expected_type=int), [4],
|
||||
msg='expected_type regression for type matching in tuple branching')
|
||||
self.assertEqual(traverse_obj(_TEST_DATA, ['data', ...], expected_type=int), [],
|
||||
msg='expected_type regression for type matching in dict result')
|
||||
|
||||
# Test get_all behavior
|
||||
_GET_ALL_DATA = {'key': [0, 1, 2]}
|
||||
|
@ -2108,6 +2227,8 @@ Line 1
|
|||
msg='failing str key on a `re.Match` should return `default`')
|
||||
self.assertEqual(traverse_obj(mobj, 8), None,
|
||||
msg='failing int key on a `re.Match` should return `default`')
|
||||
self.assertEqual(traverse_obj(mobj, lambda k, _: k in (0, 'group')), ['0123', '3'],
|
||||
msg='function on a `re.Match` should give group name as well')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -134,6 +134,10 @@ _NSIG_TESTS = [
|
|||
'https://www.youtube.com/s/player/7a062b77/player_ias.vflset/en_US/base.js',
|
||||
'NRcE3y3mVtm_cV-W', 'VbsCYUATvqlt5w',
|
||||
),
|
||||
(
|
||||
'https://www.youtube.com/s/player/dac945fd/player_ias.vflset/en_US/base.js',
|
||||
'o8BkRxXhuYsBCWi6RplPdP', '3Lx32v_hmzTm6A',
|
||||
),
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
from yt_dlp.extractor.common import InfoExtractor
|
||||
|
||||
|
||||
class IgnorePluginIE(InfoExtractor):
|
||||
pass
|
|
@ -0,0 +1,12 @@
|
|||
from yt_dlp.extractor.common import InfoExtractor
|
||||
|
||||
|
||||
class IgnoreNotInAllPluginIE(InfoExtractor):
|
||||
pass
|
||||
|
||||
|
||||
class InAllPluginIE(InfoExtractor):
|
||||
pass
|
||||
|
||||
|
||||
__all__ = ['InAllPluginIE']
|
|
@ -0,0 +1,9 @@
|
|||
from yt_dlp.extractor.common import InfoExtractor
|
||||
|
||||
|
||||
class NormalPluginIE(InfoExtractor):
|
||||
pass
|
||||
|
||||
|
||||
class _IgnoreUnderscorePluginIE(InfoExtractor):
|
||||
pass
|
|
@ -0,0 +1,5 @@
|
|||
from yt_dlp.postprocessor.common import PostProcessor
|
||||
|
||||
|
||||
class NormalPluginPP(PostProcessor):
|
||||
pass
|
|
@ -0,0 +1,5 @@
|
|||
from yt_dlp.extractor.common import InfoExtractor
|
||||
|
||||
|
||||
class ZippedPluginIE(InfoExtractor):
|
||||
pass
|
|
@ -0,0 +1,5 @@
|
|||
from yt_dlp.postprocessor.common import PostProcessor
|
||||
|
||||
|
||||
class ZippedPluginPP(PostProcessor):
|
||||
pass
|
|
@ -32,7 +32,8 @@ from .extractor import gen_extractor_classes, get_info_extractor
|
|||
from .extractor.common import UnsupportedURLIE
|
||||
from .extractor.openload import PhantomJSwrapper
|
||||
from .minicurses import format_text
|
||||
from .postprocessor import _PLUGIN_CLASSES as plugin_postprocessors
|
||||
from .plugins import directories as plugin_directories
|
||||
from .postprocessor import _PLUGIN_CLASSES as plugin_pps
|
||||
from .postprocessor import (
|
||||
EmbedThumbnailPP,
|
||||
FFmpegFixupDuplicateMoovPP,
|
||||
|
@ -317,6 +318,7 @@ class YoutubeDL:
|
|||
If not provided and the key is encrypted, yt-dlp will ask interactively
|
||||
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
|
||||
(Only supported by some extractors)
|
||||
enable_file_urls: Enable file:// URLs. This is disabled by default for security reasons.
|
||||
http_headers: A dictionary of custom headers to be used for all requests
|
||||
proxy: URL of the proxy server to use
|
||||
geo_verification_proxy: URL of the proxy to use for IP address verification
|
||||
|
@ -584,7 +586,6 @@ class YoutubeDL:
|
|||
self._playlist_urls = set()
|
||||
self.cache = Cache(self)
|
||||
|
||||
windows_enable_vt_mode()
|
||||
stdout = sys.stderr if self.params.get('logtostderr') else sys.stdout
|
||||
self._out_files = Namespace(
|
||||
out=stdout,
|
||||
|
@ -593,6 +594,12 @@ class YoutubeDL:
|
|||
console=None if compat_os_name == 'nt' else next(
|
||||
filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None)
|
||||
)
|
||||
|
||||
try:
|
||||
windows_enable_vt_mode()
|
||||
except Exception as e:
|
||||
self.write_debug(f'Failed to enable VT mode: {e}')
|
||||
|
||||
self._allow_colors = Namespace(**{
|
||||
type_: not self.params.get('no_color') and supports_terminal_sequences(stream)
|
||||
for type_, stream in self._out_files.items_ if type_ != 'console'
|
||||
|
@ -1068,7 +1075,7 @@ class YoutubeDL:
|
|||
# correspondingly that is not what we want since we need to keep
|
||||
# '%%' intact for template dict substitution step. Working around
|
||||
# with boundary-alike separator hack.
|
||||
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
|
||||
sep = ''.join(random.choices(ascii_letters, k=32))
|
||||
outtmpl = outtmpl.replace('%%', f'%{sep}%').replace('$$', f'${sep}$')
|
||||
|
||||
# outtmpl should be expand_path'ed before template dict substitution
|
||||
|
@ -1626,8 +1633,8 @@ class YoutubeDL:
|
|||
if result_type in ('url', 'url_transparent'):
|
||||
ie_result['url'] = sanitize_url(
|
||||
ie_result['url'], scheme='http' if self.params.get('prefer_insecure') else 'https')
|
||||
if ie_result.get('original_url'):
|
||||
extra_info.setdefault('original_url', ie_result['original_url'])
|
||||
if ie_result.get('original_url') and not extra_info.get('original_url'):
|
||||
extra_info = {'original_url': ie_result['original_url'], **extra_info}
|
||||
|
||||
extract_flat = self.params.get('extract_flat', False)
|
||||
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
|
||||
|
@ -1770,7 +1777,7 @@ class YoutubeDL:
|
|||
return {
|
||||
**info,
|
||||
'playlist_index': 0,
|
||||
'__last_playlist_index': max(ie_result['requested_entries'] or (0, 0)),
|
||||
'__last_playlist_index': max(ie_result.get('requested_entries') or (0, 0)),
|
||||
'extractor': ie_result['extractor'],
|
||||
'extractor_key': ie_result['extractor_key'],
|
||||
}
|
||||
|
@ -1862,11 +1869,10 @@ class YoutubeDL:
|
|||
self.to_screen('[download] Downloading item %s of %s' % (
|
||||
self._format_screen(i + 1, self.Styles.ID), self._format_screen(n_entries, self.Styles.EMPHASIS)))
|
||||
|
||||
extra.update({
|
||||
entry_result = self.__process_iterable_entry(entry, download, collections.ChainMap({
|
||||
'playlist_index': playlist_index,
|
||||
'playlist_autonumber': i + 1,
|
||||
})
|
||||
entry_result = self.__process_iterable_entry(entry, download, extra)
|
||||
}, extra))
|
||||
if not entry_result:
|
||||
failures += 1
|
||||
if failures >= max_failures:
|
||||
|
@ -2405,11 +2411,7 @@ class YoutubeDL:
|
|||
def _fill_common_fields(self, info_dict, final=True):
|
||||
# TODO: move sanitization here
|
||||
if final:
|
||||
title = info_dict.get('title', NO_DEFAULT)
|
||||
if title is NO_DEFAULT:
|
||||
raise ExtractorError('Missing "title" field in extractor result',
|
||||
video_id=info_dict['id'], ie=info_dict['extractor'])
|
||||
info_dict['fulltitle'] = title
|
||||
title = info_dict['fulltitle'] = info_dict.get('title')
|
||||
if not title:
|
||||
if title == '':
|
||||
self.write_debug('Extractor gave empty title. Creating a generic title')
|
||||
|
@ -2977,6 +2979,16 @@ class YoutubeDL:
|
|||
|
||||
# Does nothing under normal operation - for backward compatibility of process_info
|
||||
self.post_extract(info_dict)
|
||||
|
||||
def replace_info_dict(new_info):
|
||||
nonlocal info_dict
|
||||
if new_info == info_dict:
|
||||
return
|
||||
info_dict.clear()
|
||||
info_dict.update(new_info)
|
||||
|
||||
new_info, _ = self.pre_process(info_dict, 'video')
|
||||
replace_info_dict(new_info)
|
||||
self._num_downloads += 1
|
||||
|
||||
# info_dict['_filename'] needs to be set for backward compatibility
|
||||
|
@ -3090,13 +3102,6 @@ class YoutubeDL:
|
|||
for link_type, should_write in write_links.items()):
|
||||
return
|
||||
|
||||
def replace_info_dict(new_info):
|
||||
nonlocal info_dict
|
||||
if new_info == info_dict:
|
||||
return
|
||||
info_dict.clear()
|
||||
info_dict.update(new_info)
|
||||
|
||||
new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
|
||||
replace_info_dict(new_info)
|
||||
|
||||
|
@ -3123,7 +3128,7 @@ class YoutubeDL:
|
|||
fd, success = None, True
|
||||
if info_dict.get('protocol') or info_dict.get('url'):
|
||||
fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
|
||||
if fd is not FFmpegFD and (
|
||||
if fd is not FFmpegFD and 'no-direct-merge' not in self.params['compat_opts'] and (
|
||||
info_dict.get('section_start') or info_dict.get('section_end')):
|
||||
msg = ('This format cannot be partially downloaded' if FFmpegFD.available()
|
||||
else 'You have requested downloading the video partially, but ffmpeg is not installed')
|
||||
|
@ -3388,6 +3393,7 @@ class YoutubeDL:
|
|||
reject = lambda k, v: v is None or k.startswith('__') or k in {
|
||||
'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
|
||||
'entries', 'filepath', '_filename', 'infojson_filename', 'original_url', 'playlist_autonumber',
|
||||
'_format_sort_fields',
|
||||
}
|
||||
else:
|
||||
reject = lambda k, v: False
|
||||
|
@ -3457,6 +3463,7 @@ class YoutubeDL:
|
|||
return infodict
|
||||
|
||||
def run_all_pps(self, key, info, *, additional_pps=None):
|
||||
if key != 'video':
|
||||
self._forceprint(key, info)
|
||||
for pp in (additional_pps or []) + self._pps[key]:
|
||||
info = self.run_pp(pp, info)
|
||||
|
@ -3726,7 +3733,10 @@ class YoutubeDL:
|
|||
|
||||
# These imports can be slow. So import them only as needed
|
||||
from .extractor.extractors import _LAZY_LOADER
|
||||
from .extractor.extractors import _PLUGIN_CLASSES as plugin_extractors
|
||||
from .extractor.extractors import (
|
||||
_PLUGIN_CLASSES as plugin_ies,
|
||||
_PLUGIN_OVERRIDES as plugin_ie_overrides
|
||||
)
|
||||
|
||||
def get_encoding(stream):
|
||||
ret = str(getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__))
|
||||
|
@ -3771,10 +3781,6 @@ class YoutubeDL:
|
|||
write_debug('Lazy loading extractors is forcibly disabled')
|
||||
else:
|
||||
write_debug('Lazy loading extractors is disabled')
|
||||
if plugin_extractors or plugin_postprocessors:
|
||||
write_debug('Plugins: %s' % [
|
||||
'%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
||||
for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
|
||||
if self.params['compat_opts']:
|
||||
write_debug('Compatibility options: %s' % ', '.join(self.params['compat_opts']))
|
||||
|
||||
|
@ -3808,6 +3814,21 @@ class YoutubeDL:
|
|||
proxy_map.update(handler.proxies)
|
||||
write_debug(f'Proxy map: {proxy_map}')
|
||||
|
||||
for plugin_type, plugins in {'Extractor': plugin_ies, 'Post-Processor': plugin_pps}.items():
|
||||
display_list = ['%s%s' % (
|
||||
klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
||||
for name, klass in plugins.items()]
|
||||
if plugin_type == 'Extractor':
|
||||
display_list.extend(f'{plugins[-1].IE_NAME.partition("+")[2]} ({parent.__name__})'
|
||||
for parent, plugins in plugin_ie_overrides.items())
|
||||
if not display_list:
|
||||
continue
|
||||
write_debug(f'{plugin_type} Plugins: {", ".join(sorted(display_list))}')
|
||||
|
||||
plugin_dirs = plugin_directories()
|
||||
if plugin_dirs:
|
||||
write_debug(f'Plugin directories: {plugin_dirs}')
|
||||
|
||||
# Not implemented
|
||||
if False and self.params.get('call_home'):
|
||||
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode()
|
||||
|
@ -3857,8 +3878,11 @@ class YoutubeDL:
|
|||
# https://github.com/ytdl-org/youtube-dl/issues/8227)
|
||||
file_handler = urllib.request.FileHandler()
|
||||
|
||||
if not self.params.get('enable_file_urls'):
|
||||
def file_open(*args, **kwargs):
|
||||
raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
|
||||
raise urllib.error.URLError(
|
||||
'file:// URLs are explicitly disabled in yt-dlp for security reasons. '
|
||||
'Use --enable-file-urls to enable at your own risk.')
|
||||
file_handler.file_open = file_open
|
||||
|
||||
opener = urllib.request.build_opener(
|
||||
|
@ -3921,7 +3945,7 @@ class YoutubeDL:
|
|||
elif not self.params.get('overwrites', True) and os.path.exists(descfn):
|
||||
self.to_screen(f'[info] {label.title()} description is already present')
|
||||
elif ie_result.get('description') is None:
|
||||
self.report_warning(f'There\'s no {label} description to write')
|
||||
self.to_screen(f'[info] There\'s no {label} description to write')
|
||||
return False
|
||||
else:
|
||||
try:
|
||||
|
@ -3937,15 +3961,18 @@ class YoutubeDL:
|
|||
''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
|
||||
ret = []
|
||||
subtitles = info_dict.get('requested_subtitles')
|
||||
if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
|
||||
if not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
|
||||
# subtitles download errors are already managed as troubles in relevant IE
|
||||
# that way it will silently go on when used with unsupporting IE
|
||||
return ret
|
||||
|
||||
elif not subtitles:
|
||||
self.to_screen('[info] There\'s no subtitles for the requested languages')
|
||||
return ret
|
||||
sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
|
||||
if not sub_filename_base:
|
||||
self.to_screen('[info] Skipping writing video subtitles')
|
||||
return ret
|
||||
|
||||
for sub_lang, sub_info in subtitles.items():
|
||||
sub_format = sub_info['ext']
|
||||
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
|
||||
|
@ -3992,6 +4019,9 @@ class YoutubeDL:
|
|||
thumbnails, ret = [], []
|
||||
if write_all or self.params.get('writethumbnail', False):
|
||||
thumbnails = info_dict.get('thumbnails') or []
|
||||
if not thumbnails:
|
||||
self.to_screen(f'[info] There\'s no {label} thumbnails to download')
|
||||
return ret
|
||||
multiple = write_all and len(thumbnails) > 1
|
||||
|
||||
if thumb_filename_base is None:
|
||||
|
|
|
@ -91,12 +91,11 @@ def get_urls(urls, batchfile, verbose):
|
|||
|
||||
|
||||
def print_extractor_information(opts, urls):
|
||||
# Importing GenericIE is currently slow since it imports other extractors
|
||||
# TODO: Move this back to module level after generalization of embed detection
|
||||
from .extractor.generic import GenericIE
|
||||
|
||||
out = ''
|
||||
if opts.list_extractors:
|
||||
# Importing GenericIE is currently slow since it imports YoutubeIE
|
||||
from .extractor.generic import GenericIE
|
||||
|
||||
urls = dict.fromkeys(urls, False)
|
||||
for ie in list_extractor_classes(opts.age_limit):
|
||||
out += ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie.working() else '') + '\n'
|
||||
|
@ -333,7 +332,7 @@ def validate_options(opts):
|
|||
mobj = range_ != '-' and re.fullmatch(r'([^-]+)?\s*-\s*([^-]+)?', range_)
|
||||
dur = mobj and (parse_timestamp(mobj.group(1) or '0'), parse_timestamp(mobj.group(2) or 'inf'))
|
||||
if None in (dur or [None]):
|
||||
raise ValueError(f'invalid {name} time range "{regex}". Must be of the form *start-end')
|
||||
raise ValueError(f'invalid {name} time range "{regex}". Must be of the form "*start-end"')
|
||||
ranges.append(dur)
|
||||
continue
|
||||
try:
|
||||
|
@ -351,7 +350,7 @@ def validate_options(opts):
|
|||
mobj = re.fullmatch(r'''(?x)
|
||||
(?P<name>[^+:]+)
|
||||
(?:\s*\+\s*(?P<keyring>[^:]+))?
|
||||
(?:\s*:\s*(?P<profile>.+?))?
|
||||
(?:\s*:\s*(?!:)(?P<profile>.+?))?
|
||||
(?:\s*::\s*(?P<container>.+))?
|
||||
''', opts.cookiesfrombrowser)
|
||||
if mobj is None:
|
||||
|
@ -387,10 +386,12 @@ def validate_options(opts):
|
|||
raise ValueError(f'{cmd} is invalid; {err}')
|
||||
yield action
|
||||
|
||||
parse_metadata = opts.parse_metadata or []
|
||||
if opts.metafromtitle is not None:
|
||||
parse_metadata.append('title:%s' % opts.metafromtitle)
|
||||
opts.parse_metadata = list(itertools.chain(*map(metadataparser_actions, parse_metadata)))
|
||||
opts.parse_metadata.setdefault('pre_process', []).append('title:%s' % opts.metafromtitle)
|
||||
opts.parse_metadata = {
|
||||
k: list(itertools.chain(*map(metadataparser_actions, v)))
|
||||
for k, v in opts.parse_metadata.items()
|
||||
}
|
||||
|
||||
# Other options
|
||||
if opts.playlist_items is not None:
|
||||
|
@ -562,11 +563,11 @@ def validate_options(opts):
|
|||
def get_postprocessors(opts):
|
||||
yield from opts.add_postprocessors
|
||||
|
||||
if opts.parse_metadata:
|
||||
for when, actions in opts.parse_metadata.items():
|
||||
yield {
|
||||
'key': 'MetadataParser',
|
||||
'actions': opts.parse_metadata,
|
||||
'when': 'pre_process'
|
||||
'actions': actions,
|
||||
'when': when
|
||||
}
|
||||
sponsorblock_query = opts.sponsorblock_mark | opts.sponsorblock_remove
|
||||
if sponsorblock_query:
|
||||
|
@ -702,7 +703,7 @@ def parse_options(argv=None):
|
|||
|
||||
postprocessors = list(get_postprocessors(opts))
|
||||
|
||||
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[2:])
|
||||
print_only = bool(opts.forceprint) and all(k not in opts.forceprint for k in POSTPROCESS_WHEN[3:])
|
||||
any_getting = any(getattr(opts, k) for k in (
|
||||
'dumpjson', 'dump_single_json', 'getdescription', 'getduration', 'getfilename',
|
||||
'getformat', 'getid', 'getthumbnail', 'gettitle', 'geturl'
|
||||
|
@ -854,6 +855,7 @@ def parse_options(argv=None):
|
|||
'legacyserverconnect': opts.legacy_server_connect,
|
||||
'nocheckcertificate': opts.no_check_certificate,
|
||||
'prefer_insecure': opts.prefer_insecure,
|
||||
'enable_file_urls': opts.enable_file_urls,
|
||||
'http_headers': opts.headers,
|
||||
'proxy': opts.proxy,
|
||||
'socket_timeout': opts.socket_timeout,
|
||||
|
|
|
@ -5,6 +5,7 @@ import os
|
|||
import re
|
||||
import shutil
|
||||
import traceback
|
||||
import urllib.parse
|
||||
|
||||
from .utils import expand_path, traverse_obj, version_tuple, write_json_file
|
||||
from .version import __version__
|
||||
|
@ -22,11 +23,9 @@ class Cache:
|
|||
return expand_path(res)
|
||||
|
||||
def _get_cache_fn(self, section, key, dtype):
|
||||
assert re.match(r'^[a-zA-Z0-9_.-]+$', section), \
|
||||
'invalid section %r' % section
|
||||
assert re.match(r'^[a-zA-Z0-9_.-]+$', key), 'invalid key %r' % key
|
||||
return os.path.join(
|
||||
self._get_root_dir(), section, f'{key}.{dtype}')
|
||||
assert re.match(r'^[\w.-]+$', section), f'invalid section {section!r}'
|
||||
key = urllib.parse.quote(key, safe='').replace('%', ',') # encode non-ascii characters
|
||||
return os.path.join(self._get_root_dir(), section, f'{key}.{dtype}')
|
||||
|
||||
@property
|
||||
def enabled(self):
|
||||
|
|
|
@ -20,6 +20,7 @@ from ..utils import (
|
|||
RetryManager,
|
||||
classproperty,
|
||||
decodeArgument,
|
||||
deprecation_warning,
|
||||
encodeFilename,
|
||||
format_bytes,
|
||||
join_nonempty,
|
||||
|
@ -180,7 +181,9 @@ class FileDownloader:
|
|||
@staticmethod
|
||||
def parse_bytes(bytestr):
|
||||
"""Parse a string indicating a byte quantity into an integer."""
|
||||
parse_bytes(bytestr)
|
||||
deprecation_warning('yt_dlp.FileDownloader.parse_bytes is deprecated and '
|
||||
'may be removed in the future. Use yt_dlp.utils.parse_bytes instead')
|
||||
return parse_bytes(bytestr)
|
||||
|
||||
def slow_down(self, start_time, now, byte_counter):
|
||||
"""Sleep if the download speed is over the rate limit."""
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
import time
|
||||
import urllib.parse
|
||||
|
||||
from . import get_suitable_downloader
|
||||
from .fragment import FragmentFD
|
||||
from ..utils import urljoin
|
||||
from ..utils import update_url_query, urljoin
|
||||
|
||||
|
||||
class DashSegmentsFD(FragmentFD):
|
||||
|
@ -40,7 +41,12 @@ class DashSegmentsFD(FragmentFD):
|
|||
self._prepare_and_start_frag_download(ctx, fmt)
|
||||
ctx['start'] = real_start
|
||||
|
||||
fragments_to_download = self._get_fragments(fmt, ctx)
|
||||
extra_query = None
|
||||
extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url')
|
||||
if extra_param_to_segment_url:
|
||||
extra_query = urllib.parse.parse_qs(extra_param_to_segment_url)
|
||||
|
||||
fragments_to_download = self._get_fragments(fmt, ctx, extra_query)
|
||||
|
||||
if real_downloader:
|
||||
self.to_screen(
|
||||
|
@ -57,7 +63,7 @@ class DashSegmentsFD(FragmentFD):
|
|||
fragments = fragments(ctx) if callable(fragments) else fragments
|
||||
return [next(iter(fragments))] if self.params.get('test') else fragments
|
||||
|
||||
def _get_fragments(self, fmt, ctx):
|
||||
def _get_fragments(self, fmt, ctx, extra_query):
|
||||
fragment_base_url = fmt.get('fragment_base_url')
|
||||
fragments = self._resolve_fragments(fmt['fragments'], ctx)
|
||||
|
||||
|
@ -70,6 +76,8 @@ class DashSegmentsFD(FragmentFD):
|
|||
if not fragment_url:
|
||||
assert fragment_base_url
|
||||
fragment_url = urljoin(fragment_base_url, fragment['path'])
|
||||
if extra_query:
|
||||
fragment_url = update_url_query(fragment_url, extra_query)
|
||||
|
||||
yield {
|
||||
'frag_index': frag_index,
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
import enum
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
from .fragment import FragmentFD
|
||||
from ..compat import functools
|
||||
|
@ -20,8 +22,10 @@ from ..utils import (
|
|||
determine_ext,
|
||||
encodeArgument,
|
||||
encodeFilename,
|
||||
find_available_port,
|
||||
handle_youtubedl_headers,
|
||||
remove_end,
|
||||
sanitized_Request,
|
||||
traverse_obj,
|
||||
)
|
||||
|
||||
|
@ -60,7 +64,6 @@ class ExternalFD(FragmentFD):
|
|||
}
|
||||
if filename != '-':
|
||||
fsize = os.path.getsize(encodeFilename(tmpfilename))
|
||||
self.to_screen(f'\r[{self.get_basename()}] Downloaded {fsize} bytes')
|
||||
self.try_rename(tmpfilename, filename)
|
||||
status.update({
|
||||
'downloaded_bytes': fsize,
|
||||
|
@ -129,8 +132,7 @@ class ExternalFD(FragmentFD):
|
|||
self._debug_cmd(cmd)
|
||||
|
||||
if 'fragments' not in info_dict:
|
||||
_, stderr, returncode = Popen.run(
|
||||
cmd, text=True, stderr=subprocess.PIPE if self._CAPTURE_STDERR else None)
|
||||
_, stderr, returncode = self._call_process(cmd, info_dict)
|
||||
if returncode and stderr:
|
||||
self.to_stderr(stderr)
|
||||
return returncode
|
||||
|
@ -140,7 +142,7 @@ class ExternalFD(FragmentFD):
|
|||
retry_manager = RetryManager(self.params.get('fragment_retries'), self.report_retry,
|
||||
frag_index=None, fatal=not skip_unavailable_fragments)
|
||||
for retry in retry_manager:
|
||||
_, stderr, returncode = Popen.run(cmd, text=True, stderr=subprocess.PIPE)
|
||||
_, stderr, returncode = self._call_process(cmd, info_dict)
|
||||
if not returncode:
|
||||
break
|
||||
# TODO: Decide whether to retry based on error code
|
||||
|
@ -172,6 +174,9 @@ class ExternalFD(FragmentFD):
|
|||
self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename))
|
||||
return 0
|
||||
|
||||
def _call_process(self, cmd, info_dict):
|
||||
return Popen.run(cmd, text=True, stderr=subprocess.PIPE)
|
||||
|
||||
|
||||
class CurlFD(ExternalFD):
|
||||
AVAILABLE_OPT = '-V'
|
||||
|
@ -256,6 +261,15 @@ class Aria2cFD(ExternalFD):
|
|||
def _aria2c_filename(fn):
|
||||
return fn if os.path.isabs(fn) else f'.{os.path.sep}{fn}'
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
# FIXME: Disabled due to https://github.com/yt-dlp/yt-dlp/issues/5931
|
||||
if False and 'no-external-downloader-progress' not in self.params.get('compat_opts', []):
|
||||
info_dict['__rpc'] = {
|
||||
'port': find_available_port() or 19190,
|
||||
'secret': str(uuid.uuid4()),
|
||||
}
|
||||
return super()._call_downloader(tmpfilename, info_dict)
|
||||
|
||||
def _make_cmd(self, tmpfilename, info_dict):
|
||||
cmd = [self.exe, '-c',
|
||||
'--console-log-level=warn', '--summary-interval=0', '--download-result=hide',
|
||||
|
@ -276,6 +290,12 @@ class Aria2cFD(ExternalFD):
|
|||
cmd += self._bool_option('--show-console-readout', 'noprogress', 'false', 'true', '=')
|
||||
cmd += self._configuration_args()
|
||||
|
||||
if '__rpc' in info_dict:
|
||||
cmd += [
|
||||
'--enable-rpc',
|
||||
f'--rpc-listen-port={info_dict["__rpc"]["port"]}',
|
||||
f'--rpc-secret={info_dict["__rpc"]["secret"]}']
|
||||
|
||||
# aria2c strips out spaces from the beginning/end of filenames and paths.
|
||||
# We work around this issue by adding a "./" to the beginning of the
|
||||
# filename and relative path, and adding a "/" at the end of the path.
|
||||
|
@ -304,6 +324,88 @@ class Aria2cFD(ExternalFD):
|
|||
cmd += ['--', info_dict['url']]
|
||||
return cmd
|
||||
|
||||
def aria2c_rpc(self, rpc_port, rpc_secret, method, params=()):
|
||||
# Does not actually need to be UUID, just unique
|
||||
sanitycheck = str(uuid.uuid4())
|
||||
d = json.dumps({
|
||||
'jsonrpc': '2.0',
|
||||
'id': sanitycheck,
|
||||
'method': method,
|
||||
'params': [f'token:{rpc_secret}', *params],
|
||||
}).encode('utf-8')
|
||||
request = sanitized_Request(
|
||||
f'http://localhost:{rpc_port}/jsonrpc',
|
||||
data=d, headers={
|
||||
'Content-Type': 'application/json',
|
||||
'Content-Length': f'{len(d)}',
|
||||
'Ytdl-request-proxy': '__noproxy__',
|
||||
})
|
||||
with self.ydl.urlopen(request) as r:
|
||||
resp = json.load(r)
|
||||
assert resp.get('id') == sanitycheck, 'Something went wrong with RPC server'
|
||||
return resp['result']
|
||||
|
||||
def _call_process(self, cmd, info_dict):
|
||||
if '__rpc' not in info_dict:
|
||||
return super()._call_process(cmd, info_dict)
|
||||
|
||||
send_rpc = functools.partial(self.aria2c_rpc, info_dict['__rpc']['port'], info_dict['__rpc']['secret'])
|
||||
started = time.time()
|
||||
|
||||
fragmented = 'fragments' in info_dict
|
||||
frag_count = len(info_dict['fragments']) if fragmented else 1
|
||||
status = {
|
||||
'filename': info_dict.get('_filename'),
|
||||
'status': 'downloading',
|
||||
'elapsed': 0,
|
||||
'downloaded_bytes': 0,
|
||||
'fragment_count': frag_count if fragmented else None,
|
||||
'fragment_index': 0 if fragmented else None,
|
||||
}
|
||||
self._hook_progress(status, info_dict)
|
||||
|
||||
def get_stat(key, *obj, average=False):
|
||||
val = tuple(filter(None, map(float, traverse_obj(obj, (..., ..., key))))) or [0]
|
||||
return sum(val) / (len(val) if average else 1)
|
||||
|
||||
with Popen(cmd, text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE) as p:
|
||||
# Add a small sleep so that RPC client can receive response,
|
||||
# or the connection stalls infinitely
|
||||
time.sleep(0.2)
|
||||
retval = p.poll()
|
||||
while retval is None:
|
||||
# We don't use tellStatus as we won't know the GID without reading stdout
|
||||
# Ref: https://aria2.github.io/manual/en/html/aria2c.html#aria2.tellActive
|
||||
active = send_rpc('aria2.tellActive')
|
||||
completed = send_rpc('aria2.tellStopped', [0, frag_count])
|
||||
|
||||
downloaded = get_stat('totalLength', completed) + get_stat('completedLength', active)
|
||||
speed = get_stat('downloadSpeed', active)
|
||||
total = frag_count * get_stat('totalLength', active, completed, average=True)
|
||||
if total < downloaded:
|
||||
total = None
|
||||
|
||||
status.update({
|
||||
'downloaded_bytes': int(downloaded),
|
||||
'speed': speed,
|
||||
'total_bytes': None if fragmented else total,
|
||||
'total_bytes_estimate': total,
|
||||
'eta': (total - downloaded) / (speed or 1),
|
||||
'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None,
|
||||
'elapsed': time.time() - started
|
||||
})
|
||||
self._hook_progress(status, info_dict)
|
||||
|
||||
if not active and len(completed) >= frag_count:
|
||||
send_rpc('aria2.shutdown')
|
||||
retval = p.wait()
|
||||
break
|
||||
|
||||
time.sleep(0.1)
|
||||
retval = p.poll()
|
||||
|
||||
return '', p.stderr.read(), retval
|
||||
|
||||
|
||||
class HttpieFD(ExternalFD):
|
||||
AVAILABLE_OPT = '--version'
|
||||
|
@ -342,7 +444,6 @@ class FFmpegFD(ExternalFD):
|
|||
and cls.can_download(info_dict))
|
||||
|
||||
def _call_downloader(self, tmpfilename, info_dict):
|
||||
urls = [f['url'] for f in info_dict.get('requested_formats', [])] or [info_dict['url']]
|
||||
ffpp = FFmpegPostProcessor(downloader=self)
|
||||
if not ffpp.available:
|
||||
self.report_error('m3u8 download detected but ffmpeg could not be found. Please install')
|
||||
|
@ -372,16 +473,6 @@ class FFmpegFD(ExternalFD):
|
|||
# http://trac.ffmpeg.org/ticket/6125#comment:10
|
||||
args += ['-seekable', '1' if seekable else '0']
|
||||
|
||||
http_headers = None
|
||||
if info_dict.get('http_headers'):
|
||||
youtubedl_headers = handle_youtubedl_headers(info_dict['http_headers'])
|
||||
http_headers = [
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
'-headers',
|
||||
''.join(f'{key}: {val}\r\n' for key, val in youtubedl_headers.items())
|
||||
]
|
||||
|
||||
env = None
|
||||
proxy = self.params.get('proxy')
|
||||
if proxy:
|
||||
|
@ -434,21 +525,26 @@ class FFmpegFD(ExternalFD):
|
|||
|
||||
start_time, end_time = info_dict.get('section_start') or 0, info_dict.get('section_end')
|
||||
|
||||
for i, url in enumerate(urls):
|
||||
if http_headers is not None and re.match(r'^https?://', url):
|
||||
args += http_headers
|
||||
selected_formats = info_dict.get('requested_formats') or [info_dict]
|
||||
for i, fmt in enumerate(selected_formats):
|
||||
if fmt.get('http_headers') and re.match(r'^https?://', fmt['url']):
|
||||
headers_dict = handle_youtubedl_headers(fmt['http_headers'])
|
||||
# Trailing \r\n after each HTTP header is important to prevent warning from ffmpeg/avconv:
|
||||
# [http @ 00000000003d2fa0] No trailing CRLF found in HTTP header.
|
||||
args.extend(['-headers', ''.join(f'{key}: {val}\r\n' for key, val in headers_dict.items())])
|
||||
|
||||
if start_time:
|
||||
args += ['-ss', str(start_time)]
|
||||
if end_time:
|
||||
args += ['-t', str(end_time - start_time)]
|
||||
|
||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', url]
|
||||
args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']]
|
||||
|
||||
if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'):
|
||||
args += ['-c', 'copy']
|
||||
|
||||
if info_dict.get('requested_formats') or protocol == 'http_dash_segments':
|
||||
for (i, fmt) in enumerate(info_dict.get('requested_formats') or [info_dict]):
|
||||
for i, fmt in enumerate(selected_formats):
|
||||
stream_number = fmt.get('manifest_stream_number', 0)
|
||||
args.extend(['-map', f'{i}:{stream_number}'])
|
||||
|
||||
|
@ -488,8 +584,9 @@ class FFmpegFD(ExternalFD):
|
|||
args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True))
|
||||
self._debug_cmd(args)
|
||||
|
||||
piped = any(fmt['url'] in ('-', 'pipe:') for fmt in selected_formats)
|
||||
with Popen(args, stdin=subprocess.PIPE, env=env) as proc:
|
||||
if url in ('-', 'pipe:'):
|
||||
if piped:
|
||||
self.on_process_started(proc, proc.stdin)
|
||||
try:
|
||||
retval = proc.wait()
|
||||
|
@ -499,7 +596,7 @@ class FFmpegFD(ExternalFD):
|
|||
# produces a file that is playable (this is mostly useful for live
|
||||
# streams). Note that Windows is not affected and produces playable
|
||||
# files (see https://github.com/ytdl-org/youtube-dl/issues/8300).
|
||||
if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and url not in ('-', 'pipe:'):
|
||||
if isinstance(e, KeyboardInterrupt) and sys.platform != 'win32' and not piped:
|
||||
proc.communicate_or_kill(b'q')
|
||||
else:
|
||||
proc.kill(timeout=None)
|
||||
|
|
|
@ -21,7 +21,8 @@ from .youtube import ( # Youtube is moved to the top to improve performance
|
|||
YoutubeYtBeIE,
|
||||
YoutubeYtUserIE,
|
||||
YoutubeWatchLaterIE,
|
||||
YoutubeShortsAudioPivotIE
|
||||
YoutubeShortsAudioPivotIE,
|
||||
YoutubeConsentRedirectIE,
|
||||
)
|
||||
|
||||
from .abc import (
|
||||
|
@ -78,6 +79,8 @@ from .agora import (
|
|||
WyborczaVideoIE,
|
||||
)
|
||||
from .airmozilla import AirMozillaIE
|
||||
from .airtv import AirTVIE
|
||||
from .aitube import AitubeKZVideoIE
|
||||
from .aljazeera import AlJazeeraIE
|
||||
from .alphaporno import AlphaPornoIE
|
||||
from .amara import AmaraIE
|
||||
|
@ -86,7 +89,15 @@ from .alura import (
|
|||
AluraCourseIE
|
||||
)
|
||||
from .amcnetworks import AMCNetworksIE
|
||||
from .amazon import AmazonStoreIE
|
||||
from .amazon import (
|
||||
AmazonStoreIE,
|
||||
AmazonReviewsIE,
|
||||
)
|
||||
from .amazonminitv import (
|
||||
AmazonMiniTVIE,
|
||||
AmazonMiniTVSeasonIE,
|
||||
AmazonMiniTVSeriesIE,
|
||||
)
|
||||
from .americastestkitchen import (
|
||||
AmericasTestKitchenIE,
|
||||
AmericasTestKitchenSeasonIE,
|
||||
|
@ -178,6 +189,10 @@ from .bbc import (
|
|||
from .beeg import BeegIE
|
||||
from .behindkink import BehindKinkIE
|
||||
from .bellmedia import BellMediaIE
|
||||
from .beatbump import (
|
||||
BeatBumpVideoIE,
|
||||
BeatBumpPlaylistIE,
|
||||
)
|
||||
from .beatport import BeatportIE
|
||||
from .berufetv import BerufeTVIE
|
||||
from .bet import BetIE
|
||||
|
@ -461,6 +476,8 @@ from .drtuber import DrTuberIE
|
|||
from .drtv import (
|
||||
DRTVIE,
|
||||
DRTVLiveIE,
|
||||
DRTVSeasonIE,
|
||||
DRTVSeriesIE,
|
||||
)
|
||||
from .dtube import DTubeIE
|
||||
from .dvtv import DVTVIE
|
||||
|
@ -531,7 +548,7 @@ from .espn import (
|
|||
ESPNCricInfoIE,
|
||||
)
|
||||
from .esri import EsriVideoIE
|
||||
from .europa import EuropaIE
|
||||
from .europa import EuropaIE, EuroParlWebstreamIE
|
||||
from .europeantour import EuropeanTourIE
|
||||
from .eurosport import EurosportIE
|
||||
from .euscreen import EUScreenIE
|
||||
|
@ -820,6 +837,8 @@ from .joj import JojIE
|
|||
from .jwplatform import JWPlatformIE
|
||||
from .kakao import KakaoIE
|
||||
from .kaltura import KalturaIE
|
||||
from .kanal2 import Kanal2IE
|
||||
from .kankanews import KankaNewsIE
|
||||
from .karaoketv import KaraoketvIE
|
||||
from .karrierevideos import KarriereVideosIE
|
||||
from .keezmovies import KeezMoviesIE
|
||||
|
@ -829,6 +848,10 @@ from .khanacademy import (
|
|||
KhanAcademyIE,
|
||||
KhanAcademyUnitIE,
|
||||
)
|
||||
from .kick import (
|
||||
KickIE,
|
||||
KickVODIE,
|
||||
)
|
||||
from .kicker import KickerIE
|
||||
from .kickstarter import KickStarterIE
|
||||
from .kinja import KinjaEmbedIE
|
||||
|
@ -976,6 +999,10 @@ from .mediasite import (
|
|||
MediasiteCatalogIE,
|
||||
MediasiteNamedCatalogIE,
|
||||
)
|
||||
from .mediastream import (
|
||||
MediaStreamIE,
|
||||
WinSportsVideoIE,
|
||||
)
|
||||
from .mediaworksnz import MediaWorksNZVODIE
|
||||
from .medici import MediciIE
|
||||
from .megaphone import MegaphoneIE
|
||||
|
@ -1144,6 +1171,7 @@ from .neteasemusic import (
|
|||
from .netverse import (
|
||||
NetverseIE,
|
||||
NetversePlaylistIE,
|
||||
NetverseSearchIE,
|
||||
)
|
||||
from .newgrounds import (
|
||||
NewgroundsIE,
|
||||
|
@ -1205,6 +1233,7 @@ from .nintendo import NintendoIE
|
|||
from .nitter import NitterIE
|
||||
from .njpwworld import NJPWWorldIE
|
||||
from .nobelprize import NobelPrizeIE
|
||||
from .noice import NoicePodcastIE
|
||||
from .nonktube import NonkTubeIE
|
||||
from .noodlemagazine import NoodleMagazineIE
|
||||
from .noovo import NoovoIE
|
||||
|
@ -1270,6 +1299,7 @@ from .on24 import On24IE
|
|||
from .ondemandkorea import OnDemandKoreaIE
|
||||
from .onefootball import OneFootballIE
|
||||
from .onenewsnz import OneNewsNZIE
|
||||
from .oneplace import OnePlacePodcastIE
|
||||
from .onet import (
|
||||
OnetIE,
|
||||
OnetChannelIE,
|
||||
|
@ -1392,6 +1422,8 @@ from .pokergo import (
|
|||
from .polsatgo import PolsatGoIE
|
||||
from .polskieradio import (
|
||||
PolskieRadioIE,
|
||||
PolskieRadioLegacyIE,
|
||||
PolskieRadioAuditionIE,
|
||||
PolskieRadioCategoryIE,
|
||||
PolskieRadioPlayerIE,
|
||||
PolskieRadioPodcastIE,
|
||||
|
@ -1524,7 +1556,10 @@ from .rokfin import (
|
|||
)
|
||||
from .roosterteeth import RoosterTeethIE, RoosterTeethSeriesIE
|
||||
from .rottentomatoes import RottenTomatoesIE
|
||||
from .rozhlas import RozhlasIE
|
||||
from .rozhlas import (
|
||||
RozhlasIE,
|
||||
RozhlasVltavaIE,
|
||||
)
|
||||
from .rte import RteIE, RteRadioIE
|
||||
from .rtlnl import (
|
||||
RtlNlIE,
|
||||
|
@ -1561,6 +1596,7 @@ from .ruhd import RUHDIE
|
|||
from .rule34video import Rule34VideoIE
|
||||
from .rumble import (
|
||||
RumbleEmbedIE,
|
||||
RumbleIE,
|
||||
RumbleChannelIE,
|
||||
)
|
||||
from .rutube import (
|
||||
|
@ -1603,6 +1639,7 @@ from .savefrom import SaveFromIE
|
|||
from .sbs import SBSIE
|
||||
from .screen9 import Screen9IE
|
||||
from .screencast import ScreencastIE
|
||||
from .screencastify import ScreencastifyIE
|
||||
from .screencastomatic import ScreencastOMaticIE
|
||||
from .scrippsnetworks import (
|
||||
ScrippsNetworksWatchIE,
|
||||
|
@ -1632,6 +1669,7 @@ from .shared import (
|
|||
VivoIE,
|
||||
)
|
||||
from .sharevideos import ShareVideosEmbedIE
|
||||
from .sibnet import SibnetEmbedIE
|
||||
from .shemaroome import ShemarooMeIE
|
||||
from .showroomlive import ShowRoomLiveIE
|
||||
from .simplecast import (
|
||||
|
@ -1679,6 +1717,7 @@ from .soundcloud import (
|
|||
SoundcloudSetIE,
|
||||
SoundcloudRelatedIE,
|
||||
SoundcloudUserIE,
|
||||
SoundcloudUserPermalinkIE,
|
||||
SoundcloudTrackStationIE,
|
||||
SoundcloudPlaylistIE,
|
||||
SoundcloudSearchIE,
|
||||
|
@ -1840,6 +1879,11 @@ from .theweatherchannel import TheWeatherChannelIE
|
|||
from .thisamericanlife import ThisAmericanLifeIE
|
||||
from .thisav import ThisAVIE
|
||||
from .thisoldhouse import ThisOldHouseIE
|
||||
from .thisvid import (
|
||||
ThisVidIE,
|
||||
ThisVidMemberIE,
|
||||
ThisVidPlaylistIE,
|
||||
)
|
||||
from .threespeak import (
|
||||
ThreeSpeakIE,
|
||||
ThreeSpeakUserIE,
|
||||
|
@ -1852,6 +1896,7 @@ from .tiktok import (
|
|||
TikTokEffectIE,
|
||||
TikTokTagIE,
|
||||
TikTokVMIE,
|
||||
TikTokLiveIE,
|
||||
DouyinIE,
|
||||
)
|
||||
from .tinypic import TinyPicIE
|
||||
|
@ -1889,6 +1934,7 @@ from .trovo import (
|
|||
TrovoChannelVodIE,
|
||||
TrovoChannelClipIE,
|
||||
)
|
||||
from .trtcocuk import TrtCocukVideoIE
|
||||
from .trueid import TrueIDIE
|
||||
from .trunews import TruNewsIE
|
||||
from .truth import TruthIE
|
||||
|
@ -2002,6 +2048,10 @@ from .twitter import (
|
|||
TwitterSpacesIE,
|
||||
TwitterShortenerIE,
|
||||
)
|
||||
from .txxx import (
|
||||
TxxxIE,
|
||||
PornTopIE,
|
||||
)
|
||||
from .udemy import (
|
||||
UdemyIE,
|
||||
UdemyCourseIE
|
||||
|
@ -2072,6 +2122,13 @@ from .videocampus_sachsen import (
|
|||
)
|
||||
from .videodetective import VideoDetectiveIE
|
||||
from .videofyme import VideofyMeIE
|
||||
from .videoken import (
|
||||
VideoKenIE,
|
||||
VideoKenPlayerIE,
|
||||
VideoKenPlaylistIE,
|
||||
VideoKenCategoryIE,
|
||||
VideoKenTopicIE,
|
||||
)
|
||||
from .videomore import (
|
||||
VideomoreIE,
|
||||
VideomoreVideoIE,
|
||||
|
@ -2096,6 +2153,7 @@ from .vimeo import (
|
|||
VimeoGroupsIE,
|
||||
VimeoLikesIE,
|
||||
VimeoOndemandIE,
|
||||
VimeoProIE,
|
||||
VimeoReviewIE,
|
||||
VimeoUserIE,
|
||||
VimeoWatchLaterIE,
|
||||
|
@ -2138,6 +2196,7 @@ from .voicy import (
|
|||
VoicyIE,
|
||||
VoicyChannelIE,
|
||||
)
|
||||
from .volejtv import VolejTVIE
|
||||
from .voot import (
|
||||
VootIE,
|
||||
VootSeriesIE,
|
||||
|
@ -2183,6 +2242,7 @@ from .wdr import (
|
|||
WDRElefantIE,
|
||||
WDRMobileIE,
|
||||
)
|
||||
from .webcamerapl import WebcameraplIE
|
||||
from .webcaster import (
|
||||
WebcasterIE,
|
||||
WebcasterFeedIE,
|
||||
|
@ -2219,6 +2279,7 @@ from .wsj import (
|
|||
WSJArticleIE,
|
||||
)
|
||||
from .wwe import WWEIE
|
||||
from .xanimu import XanimuIE
|
||||
from .xbef import XBefIE
|
||||
from .xboxclips import XboxClipsIE
|
||||
from .xfileshare import XFileShareIE
|
||||
|
@ -2227,12 +2288,6 @@ from .xhamster import (
|
|||
XHamsterEmbedIE,
|
||||
XHamsterUserIE,
|
||||
)
|
||||
from .xiami import (
|
||||
XiamiSongIE,
|
||||
XiamiAlbumIE,
|
||||
XiamiArtistIE,
|
||||
XiamiCollectionIE
|
||||
)
|
||||
from .ximalaya import (
|
||||
XimalayaIE,
|
||||
XimalayaAlbumIE
|
||||
|
|
|
@ -168,7 +168,7 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text'''
|
|||
}, data=b'')['token']
|
||||
|
||||
links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link')
|
||||
self._K = ''.join([random.choice('0123456789abcdef') for _ in range(16)])
|
||||
self._K = ''.join(random.choices('0123456789abcdef', k=16))
|
||||
message = bytes_to_intlist(json.dumps({
|
||||
'k': self._K,
|
||||
't': token,
|
||||
|
|
|
@ -0,0 +1,96 @@
|
|||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
mimetype2ext,
|
||||
parse_iso8601,
|
||||
traverse_obj
|
||||
)
|
||||
|
||||
|
||||
class AirTVIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.air\.tv/watch\?v=(?P<id>\w+)'
|
||||
_TESTS = [{
|
||||
# without youtube_id
|
||||
'url': 'https://www.air.tv/watch?v=W87jcWleSn2hXZN47zJZsQ',
|
||||
'info_dict': {
|
||||
'id': 'W87jcWleSn2hXZN47zJZsQ',
|
||||
'ext': 'mp4',
|
||||
'release_date': '20221003',
|
||||
'release_timestamp': 1664792603,
|
||||
'channel_id': 'vgfManQlRQKgoFQ8i8peFQ',
|
||||
'title': 'md5:c12d49ed367c3dadaa67659aff43494c',
|
||||
'upload_date': '20221003',
|
||||
'duration': 151,
|
||||
'view_count': int,
|
||||
'thumbnail': 'https://cdn-sp-gcs.air.tv/videos/W/8/W87jcWleSn2hXZN47zJZsQ/b13fc56464f47d9d62a36d110b9b5a72-4096x2160_9.jpg',
|
||||
'timestamp': 1664792603,
|
||||
}
|
||||
}, {
|
||||
# with youtube_id
|
||||
'url': 'https://www.air.tv/watch?v=sv57EC8tRXG6h8dNXFUU1Q',
|
||||
'info_dict': {
|
||||
'id': '2ZTqmpee-bQ',
|
||||
'ext': 'mp4',
|
||||
'comment_count': int,
|
||||
'tags': 'count:11',
|
||||
'channel_follower_count': int,
|
||||
'like_count': int,
|
||||
'uploader': 'Newsflare',
|
||||
'thumbnail': 'https://i.ytimg.com/vi_webp/2ZTqmpee-bQ/maxresdefault.webp',
|
||||
'availability': 'public',
|
||||
'title': 'Geese Chase Alligator Across Golf Course',
|
||||
'uploader_id': 'NewsflareBreaking',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCzSSoloGEz10HALUAbYhngQ',
|
||||
'description': 'md5:99b21d9cea59330149efbd9706e208f5',
|
||||
'age_limit': 0,
|
||||
'channel_id': 'UCzSSoloGEz10HALUAbYhngQ',
|
||||
'uploader_url': 'http://www.youtube.com/user/NewsflareBreaking',
|
||||
'view_count': int,
|
||||
'categories': ['News & Politics'],
|
||||
'live_status': 'not_live',
|
||||
'playable_in_embed': True,
|
||||
'channel': 'Newsflare',
|
||||
'duration': 37,
|
||||
'upload_date': '20180511',
|
||||
}
|
||||
}]
|
||||
|
||||
def _get_formats_and_subtitle(self, json_data, video_id):
|
||||
formats, subtitles = [], {}
|
||||
for source in traverse_obj(json_data, 'sources', 'sources_desktop', ...):
|
||||
ext = determine_ext(source.get('src'), mimetype2ext(source.get('type')))
|
||||
if ext == 'm3u8':
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(source.get('src'), video_id)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
else:
|
||||
formats.append({'url': source.get('src'), 'ext': ext})
|
||||
return formats, subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
nextjs_json = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['initialState']['videos'][display_id]
|
||||
if nextjs_json.get('youtube_id'):
|
||||
return self.url_result(
|
||||
f'https://www.youtube.com/watch?v={nextjs_json.get("youtube_id")}', YoutubeIE)
|
||||
|
||||
formats, subtitles = self._get_formats_and_subtitle(nextjs_json, display_id)
|
||||
return {
|
||||
'id': display_id,
|
||||
'title': nextjs_json.get('title') or self._html_search_meta('og:title', webpage),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'description': nextjs_json.get('description') or None,
|
||||
'duration': int_or_none(nextjs_json.get('duration')),
|
||||
'thumbnails': [
|
||||
{'url': thumbnail}
|
||||
for thumbnail in traverse_obj(nextjs_json, ('default_thumbnails', ...))],
|
||||
'channel_id': traverse_obj(nextjs_json, 'channel', 'channel_slug'),
|
||||
'timestamp': parse_iso8601(nextjs_json.get('created')),
|
||||
'release_timestamp': parse_iso8601(nextjs_json.get('published')),
|
||||
'view_count': int_or_none(nextjs_json.get('views')),
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import int_or_none, merge_dicts
|
||||
|
||||
|
||||
class AitubeKZVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://aitube\.kz/(?:video|embed/)\?(?:[^\?]+)?id=(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
# id paramater as first parameter
|
||||
'url': 'https://aitube.kz/video?id=9291d29b-c038-49a1-ad42-3da2051d353c&playlistId=d55b1f5f-ef2a-4f23-b646-2a86275b86b7&season=1',
|
||||
'info_dict': {
|
||||
'id': '9291d29b-c038-49a1-ad42-3da2051d353c',
|
||||
'ext': 'mp4',
|
||||
'duration': 2174.0,
|
||||
'channel_id': '94962f73-013b-432c-8853-1bd78ca860fe',
|
||||
'like_count': int,
|
||||
'channel': 'ASTANA TV',
|
||||
'comment_count': int,
|
||||
'view_count': int,
|
||||
'description': 'Смотреть любимые сериалы и видео, поделиться видео и сериалами с друзьями и близкими',
|
||||
'thumbnail': 'https://cdn.static02.aitube.kz/kz.aitudala.aitube.staticaccess/files/ddf2a2ff-bee3-409b-b5f2-2a8202bba75b',
|
||||
'upload_date': '20221102',
|
||||
'timestamp': 1667370519,
|
||||
'title': 'Ангел хранитель 1 серия',
|
||||
'channel_follower_count': int,
|
||||
}
|
||||
}, {
|
||||
# embed url
|
||||
'url': 'https://aitube.kz/embed/?id=9291d29b-c038-49a1-ad42-3da2051d353c',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# id parameter is not as first paramater
|
||||
'url': 'https://aitube.kz/video?season=1&id=9291d29b-c038-49a1-ad42-3da2051d353c&playlistId=d55b1f5f-ef2a-4f23-b646-2a86275b86b7',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
nextjs_data = self._search_nextjs_data(webpage, video_id)['props']['pageProps']['videoInfo']
|
||||
json_ld_data = self._search_json_ld(webpage, video_id)
|
||||
|
||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(
|
||||
f'https://api-http.aitube.kz/kz.aitudala.aitube.staticaccess/video/{video_id}/video', video_id)
|
||||
|
||||
return merge_dicts({
|
||||
'id': video_id,
|
||||
'title': nextjs_data.get('title') or self._html_search_meta(['name', 'og:title'], webpage),
|
||||
'description': nextjs_data.get('description'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'view_count': (nextjs_data.get('viewCount')
|
||||
or int_or_none(self._html_search_meta('ya:ovs:views_total', webpage))),
|
||||
'like_count': nextjs_data.get('likeCount'),
|
||||
'channel': nextjs_data.get('channelTitle'),
|
||||
'channel_id': nextjs_data.get('channelId'),
|
||||
'thumbnail': nextjs_data.get('coverUrl'),
|
||||
'comment_count': nextjs_data.get('commentCount'),
|
||||
'channel_follower_count': int_or_none(nextjs_data.get('channelSubscriberCount')),
|
||||
}, json_ld_data)
|
|
@ -1,5 +1,17 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, int_or_none
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
clean_html,
|
||||
float_or_none,
|
||||
get_element_by_attribute,
|
||||
get_element_by_class,
|
||||
int_or_none,
|
||||
js_to_json,
|
||||
traverse_obj,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class AmazonStoreIE(InfoExtractor):
|
||||
|
@ -9,7 +21,7 @@ class AmazonStoreIE(InfoExtractor):
|
|||
'url': 'https://www.amazon.co.uk/dp/B098XNCHLD/',
|
||||
'info_dict': {
|
||||
'id': 'B098XNCHLD',
|
||||
'title': 'md5:dae240564cbb2642170c02f7f0d7e472',
|
||||
'title': str,
|
||||
},
|
||||
'playlist_mincount': 1,
|
||||
'playlist': [{
|
||||
|
@ -20,28 +32,32 @@ class AmazonStoreIE(InfoExtractor):
|
|||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 34,
|
||||
},
|
||||
}]
|
||||
}],
|
||||
'expected_warnings': ['Unable to extract data'],
|
||||
}, {
|
||||
'url': 'https://www.amazon.in/Sony-WH-1000XM4-Cancelling-Headphones-Bluetooth/dp/B0863TXGM3',
|
||||
'info_dict': {
|
||||
'id': 'B0863TXGM3',
|
||||
'title': 'md5:d1d3352428f8f015706c84b31e132169',
|
||||
'title': str,
|
||||
},
|
||||
'playlist_mincount': 4,
|
||||
'expected_warnings': ['Unable to extract data'],
|
||||
}, {
|
||||
'url': 'https://www.amazon.com/dp/B0845NXCXF/',
|
||||
'info_dict': {
|
||||
'id': 'B0845NXCXF',
|
||||
'title': 'md5:f3fa12779bf62ddb6a6ec86a360a858e',
|
||||
'title': str,
|
||||
},
|
||||
'playlist-mincount': 1,
|
||||
'expected_warnings': ['Unable to extract data'],
|
||||
}, {
|
||||
'url': 'https://www.amazon.es/Samsung-Smartphone-s-AMOLED-Quad-c%C3%A1mara-espa%C3%B1ola/dp/B08WX337PQ',
|
||||
'info_dict': {
|
||||
'id': 'B08WX337PQ',
|
||||
'title': 'md5:f3fa12779bf62ddb6a6ec86a360a858e',
|
||||
'title': str,
|
||||
},
|
||||
'playlist_mincount': 1,
|
||||
'expected_warnings': ['Unable to extract data'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -52,7 +68,7 @@ class AmazonStoreIE(InfoExtractor):
|
|||
try:
|
||||
data_json = self._search_json(
|
||||
r'var\s?obj\s?=\s?jQuery\.parseJSON\(\'', webpage, 'data', id,
|
||||
transform_source=lambda x: x.replace(R'\\u', R'\u'))
|
||||
transform_source=js_to_json)
|
||||
except ExtractorError as e:
|
||||
retry.error = e
|
||||
|
||||
|
@ -66,3 +82,89 @@ class AmazonStoreIE(InfoExtractor):
|
|||
'width': int_or_none(video.get('videoWidth')),
|
||||
} for video in (data_json.get('videos') or []) if video.get('isVideo') and video.get('url')]
|
||||
return self.playlist_result(entries, playlist_id=id, playlist_title=data_json.get('title'))
|
||||
|
||||
|
||||
class AmazonReviewsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?amazon\.(?:[a-z]{2,3})(?:\.[a-z]{2})?/gp/customer-reviews/(?P<id>[^/&#$?]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.amazon.com/gp/customer-reviews/R10VE9VUSY19L3/ref=cm_cr_arp_d_rvw_ttl',
|
||||
'info_dict': {
|
||||
'id': 'R10VE9VUSY19L3',
|
||||
'ext': 'mp4',
|
||||
'title': 'Get squad #Suspicious',
|
||||
'description': 'md5:7012695052f440a1e064e402d87e0afb',
|
||||
'uploader': 'Kimberly Cronkright',
|
||||
'average_rating': 1.0,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
'expected_warnings': ['Review body was not found in webpage'],
|
||||
}, {
|
||||
'url': 'https://www.amazon.com/gp/customer-reviews/R10VE9VUSY19L3/ref=cm_cr_arp_d_rvw_ttl?language=es_US',
|
||||
'info_dict': {
|
||||
'id': 'R10VE9VUSY19L3',
|
||||
'ext': 'mp4',
|
||||
'title': 'Get squad #Suspicious',
|
||||
'description': 'md5:7012695052f440a1e064e402d87e0afb',
|
||||
'uploader': 'Kimberly Cronkright',
|
||||
'average_rating': 1.0,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
'expected_warnings': ['Review body was not found in webpage'],
|
||||
}, {
|
||||
'url': 'https://www.amazon.in/gp/customer-reviews/RV1CO8JN5VGXV/',
|
||||
'info_dict': {
|
||||
'id': 'RV1CO8JN5VGXV',
|
||||
'ext': 'mp4',
|
||||
'title': 'Not sure about its durability',
|
||||
'description': 'md5:1a252c106357f0a3109ebf37d2e87494',
|
||||
'uploader': 'Shoaib Gulzar',
|
||||
'average_rating': 2.0,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
'expected_warnings': ['Review body was not found in webpage'],
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
for retry in self.RetryManager():
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
review_body = get_element_by_attribute('data-hook', 'review-body', webpage)
|
||||
if not review_body:
|
||||
retry.error = ExtractorError('Review body was not found in webpage', expected=True)
|
||||
|
||||
formats, subtitles = [], {}
|
||||
|
||||
manifest_url = self._search_regex(
|
||||
r'data-video-url="([^"]+)"', review_body, 'm3u8 url', default=None)
|
||||
if url_or_none(manifest_url):
|
||||
fmts, subtitles = self._extract_m3u8_formats_and_subtitles(
|
||||
manifest_url, video_id, 'mp4', fatal=False)
|
||||
formats.extend(fmts)
|
||||
|
||||
video_url = self._search_regex(
|
||||
r'<input[^>]+\bvalue="([^"]+)"[^>]+\bclass="video-url"', review_body, 'mp4 url', default=None)
|
||||
if url_or_none(video_url):
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'format_id': 'http-mp4',
|
||||
})
|
||||
|
||||
if not formats:
|
||||
self.raise_no_formats('No video found for this customer review', expected=True)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': (clean_html(get_element_by_attribute('data-hook', 'review-title', webpage))
|
||||
or self._html_extract_title(webpage)),
|
||||
'description': clean_html(traverse_obj(re.findall(
|
||||
r'<span(?:\s+class="cr-original-review-content")?>(.+?)</span>', review_body), -1)),
|
||||
'uploader': clean_html(get_element_by_class('a-profile-name', webpage)),
|
||||
'average_rating': float_or_none(clean_html(get_element_by_attribute(
|
||||
'data-hook', 'review-star-rating', webpage) or '').partition(' ')[0]),
|
||||
'thumbnail': self._search_regex(
|
||||
r'data-thumbnail-url="([^"]+)"', review_body, 'thumbnail', default=None),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
|
|
@ -0,0 +1,290 @@
|
|||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import ExtractorError, int_or_none, traverse_obj, try_get
|
||||
|
||||
|
||||
class AmazonMiniTVBaseIE(InfoExtractor):
|
||||
def _real_initialize(self):
|
||||
self._download_webpage(
|
||||
'https://www.amazon.in/minitv', None,
|
||||
note='Fetching guest session cookies')
|
||||
AmazonMiniTVBaseIE.session_id = self._get_cookies('https://www.amazon.in')['session-id'].value
|
||||
|
||||
def _call_api(self, asin, data=None, note=None):
|
||||
device = {'clientId': 'ATVIN', 'deviceLocale': 'en_GB'}
|
||||
if data:
|
||||
data['variables'].update({
|
||||
'contentType': 'VOD',
|
||||
'sessionIdToken': self.session_id,
|
||||
**device,
|
||||
})
|
||||
|
||||
resp = self._download_json(
|
||||
f'https://www.amazon.in/minitv/api/web/{"graphql" if data else "prs"}',
|
||||
asin, note=note, headers={'Content-Type': 'application/json'},
|
||||
data=json.dumps(data).encode() if data else None,
|
||||
query=None if data else {
|
||||
'deviceType': 'A1WMMUXPCUJL4N',
|
||||
'contentId': asin,
|
||||
**device,
|
||||
})
|
||||
|
||||
if resp.get('errors'):
|
||||
raise ExtractorError(f'MiniTV said: {resp["errors"][0]["message"]}')
|
||||
elif not data:
|
||||
return resp
|
||||
return resp['data'][data['operationName']]
|
||||
|
||||
|
||||
class AmazonMiniTVIE(AmazonMiniTVBaseIE):
|
||||
_VALID_URL = r'(?:https?://(?:www\.)?amazon\.in/minitv/tp/|amazonminitv:(?:amzn1\.dv\.gti\.)?)(?P<id>[a-f0-9-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.amazon.in/minitv/tp/75fe3a75-b8fe-4499-8100-5c9424344840?referrer=https%3A%2F%2Fwww.amazon.in%2Fminitv',
|
||||
'info_dict': {
|
||||
'id': 'amzn1.dv.gti.75fe3a75-b8fe-4499-8100-5c9424344840',
|
||||
'ext': 'mp4',
|
||||
'title': 'May I Kiss You?',
|
||||
'language': 'Hindi',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'description': 'md5:a549bfc747973e04feb707833474e59d',
|
||||
'release_timestamp': 1644710400,
|
||||
'release_date': '20220213',
|
||||
'duration': 846,
|
||||
'chapters': 'count:2',
|
||||
'series': 'Couple Goals',
|
||||
'series_id': 'amzn1.dv.gti.56521d46-b040-4fd5-872e-3e70476a04b0',
|
||||
'season': 'Season 3',
|
||||
'season_number': 3,
|
||||
'season_id': 'amzn1.dv.gti.20331016-d9b9-4968-b991-c89fa4927a36',
|
||||
'episode': 'May I Kiss You?',
|
||||
'episode_number': 2,
|
||||
'episode_id': 'amzn1.dv.gti.75fe3a75-b8fe-4499-8100-5c9424344840',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.amazon.in/minitv/tp/280d2564-584f-452f-9c98-7baf906e01ab?referrer=https%3A%2F%2Fwww.amazon.in%2Fminitv',
|
||||
'info_dict': {
|
||||
'id': 'amzn1.dv.gti.280d2564-584f-452f-9c98-7baf906e01ab',
|
||||
'ext': 'mp4',
|
||||
'title': 'Jahaan',
|
||||
'language': 'Hindi',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'description': 'md5:05eb765a77bf703f322f120ec6867339',
|
||||
'release_timestamp': 1647475200,
|
||||
'release_date': '20220317',
|
||||
'duration': 783,
|
||||
'chapters': [],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.amazon.in/minitv/tp/280d2564-584f-452f-9c98-7baf906e01ab',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'amazonminitv:amzn1.dv.gti.280d2564-584f-452f-9c98-7baf906e01ab',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'amazonminitv:280d2564-584f-452f-9c98-7baf906e01ab',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_GRAPHQL_QUERY_CONTENT = '''
|
||||
query content($sessionIdToken: String!, $deviceLocale: String, $contentId: ID!, $contentType: ContentType!, $clientId: String) {
|
||||
content(
|
||||
applicationContextInput: {deviceLocale: $deviceLocale, sessionIdToken: $sessionIdToken, clientId: $clientId}
|
||||
contentId: $contentId
|
||||
contentType: $contentType
|
||||
) {
|
||||
contentId
|
||||
name
|
||||
... on Episode {
|
||||
contentId
|
||||
vodType
|
||||
name
|
||||
images
|
||||
description {
|
||||
synopsis
|
||||
contentLengthInSeconds
|
||||
}
|
||||
publicReleaseDateUTC
|
||||
audioTracks
|
||||
seasonId
|
||||
seriesId
|
||||
seriesName
|
||||
seasonNumber
|
||||
episodeNumber
|
||||
timecode {
|
||||
endCreditsTime
|
||||
}
|
||||
}
|
||||
... on MovieContent {
|
||||
contentId
|
||||
vodType
|
||||
name
|
||||
description {
|
||||
synopsis
|
||||
contentLengthInSeconds
|
||||
}
|
||||
images
|
||||
publicReleaseDateUTC
|
||||
audioTracks
|
||||
}
|
||||
}
|
||||
}'''
|
||||
|
||||
def _real_extract(self, url):
|
||||
asin = f'amzn1.dv.gti.{self._match_id(url)}'
|
||||
prs = self._call_api(asin, note='Downloading playback info')
|
||||
|
||||
formats, subtitles = [], {}
|
||||
for type_, asset in prs['playbackAssets'].items():
|
||||
if not traverse_obj(asset, 'manifestUrl'):
|
||||
continue
|
||||
if type_ == 'hls':
|
||||
m3u8_fmts, m3u8_subs = self._extract_m3u8_formats_and_subtitles(
|
||||
asset['manifestUrl'], asin, ext='mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id=type_, fatal=False)
|
||||
formats.extend(m3u8_fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, m3u8_subs)
|
||||
elif type_ == 'dash':
|
||||
mpd_fmts, mpd_subs = self._extract_mpd_formats_and_subtitles(
|
||||
asset['manifestUrl'], asin, mpd_id=type_, fatal=False)
|
||||
formats.extend(mpd_fmts)
|
||||
subtitles = self._merge_subtitles(subtitles, mpd_subs)
|
||||
else:
|
||||
self.report_warning(f'Unknown asset type: {type_}')
|
||||
|
||||
title_info = self._call_api(
|
||||
asin, note='Downloading title info', data={
|
||||
'operationName': 'content',
|
||||
'variables': {'contentId': asin},
|
||||
'query': self._GRAPHQL_QUERY_CONTENT,
|
||||
})
|
||||
credits_time = try_get(title_info, lambda x: x['timecode']['endCreditsTime'] / 1000)
|
||||
is_episode = title_info.get('vodType') == 'EPISODE'
|
||||
|
||||
return {
|
||||
'id': asin,
|
||||
'title': title_info.get('name'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'language': traverse_obj(title_info, ('audioTracks', 0)),
|
||||
'thumbnails': [{
|
||||
'id': type_,
|
||||
'url': url,
|
||||
} for type_, url in (title_info.get('images') or {}).items()],
|
||||
'description': traverse_obj(title_info, ('description', 'synopsis')),
|
||||
'release_timestamp': int_or_none(try_get(title_info, lambda x: x['publicReleaseDateUTC'] / 1000)),
|
||||
'duration': traverse_obj(title_info, ('description', 'contentLengthInSeconds')),
|
||||
'chapters': [{
|
||||
'start_time': credits_time,
|
||||
'title': 'End Credits',
|
||||
}] if credits_time else [],
|
||||
'series': title_info.get('seriesName'),
|
||||
'series_id': title_info.get('seriesId'),
|
||||
'season_number': title_info.get('seasonNumber'),
|
||||
'season_id': title_info.get('seasonId'),
|
||||
'episode': title_info.get('name') if is_episode else None,
|
||||
'episode_number': title_info.get('episodeNumber'),
|
||||
'episode_id': asin if is_episode else None,
|
||||
}
|
||||
|
||||
|
||||
class AmazonMiniTVSeasonIE(AmazonMiniTVBaseIE):
|
||||
IE_NAME = 'amazonminitv:season'
|
||||
_VALID_URL = r'amazonminitv:season:(?:amzn1\.dv\.gti\.)?(?P<id>[a-f0-9-]+)'
|
||||
IE_DESC = 'Amazon MiniTV Series, "minitv:season:" prefix'
|
||||
_TESTS = [{
|
||||
'url': 'amazonminitv:season:amzn1.dv.gti.0aa996eb-6a1b-4886-a342-387fbd2f1db0',
|
||||
'playlist_mincount': 6,
|
||||
'info_dict': {
|
||||
'id': 'amzn1.dv.gti.0aa996eb-6a1b-4886-a342-387fbd2f1db0',
|
||||
},
|
||||
}, {
|
||||
'url': 'amazonminitv:season:0aa996eb-6a1b-4886-a342-387fbd2f1db0',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_GRAPHQL_QUERY = '''
|
||||
query getEpisodes($sessionIdToken: String!, $clientId: String, $episodeOrSeasonId: ID!, $deviceLocale: String) {
|
||||
getEpisodes(
|
||||
applicationContextInput: {sessionIdToken: $sessionIdToken, deviceLocale: $deviceLocale, clientId: $clientId}
|
||||
episodeOrSeasonId: $episodeOrSeasonId
|
||||
) {
|
||||
episodes {
|
||||
... on Episode {
|
||||
contentId
|
||||
name
|
||||
images
|
||||
seriesName
|
||||
seasonId
|
||||
seriesId
|
||||
seasonNumber
|
||||
episodeNumber
|
||||
description {
|
||||
synopsis
|
||||
contentLengthInSeconds
|
||||
}
|
||||
publicReleaseDateUTC
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
def _entries(self, asin):
|
||||
season_info = self._call_api(
|
||||
asin, note='Downloading season info', data={
|
||||
'operationName': 'getEpisodes',
|
||||
'variables': {'episodeOrSeasonId': asin},
|
||||
'query': self._GRAPHQL_QUERY,
|
||||
})
|
||||
|
||||
for episode in season_info['episodes']:
|
||||
yield self.url_result(
|
||||
f'amazonminitv:{episode["contentId"]}', AmazonMiniTVIE, episode['contentId'])
|
||||
|
||||
def _real_extract(self, url):
|
||||
asin = f'amzn1.dv.gti.{self._match_id(url)}'
|
||||
return self.playlist_result(self._entries(asin), asin)
|
||||
|
||||
|
||||
class AmazonMiniTVSeriesIE(AmazonMiniTVBaseIE):
|
||||
IE_NAME = 'amazonminitv:series'
|
||||
_VALID_URL = r'amazonminitv:series:(?:amzn1\.dv\.gti\.)?(?P<id>[a-f0-9-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'amazonminitv:series:amzn1.dv.gti.56521d46-b040-4fd5-872e-3e70476a04b0',
|
||||
'playlist_mincount': 3,
|
||||
'info_dict': {
|
||||
'id': 'amzn1.dv.gti.56521d46-b040-4fd5-872e-3e70476a04b0',
|
||||
},
|
||||
}, {
|
||||
'url': 'amazonminitv:series:56521d46-b040-4fd5-872e-3e70476a04b0',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_GRAPHQL_QUERY = '''
|
||||
query getSeasons($sessionIdToken: String!, $deviceLocale: String, $episodeOrSeasonOrSeriesId: ID!, $clientId: String) {
|
||||
getSeasons(
|
||||
applicationContextInput: {deviceLocale: $deviceLocale, sessionIdToken: $sessionIdToken, clientId: $clientId}
|
||||
episodeOrSeasonOrSeriesId: $episodeOrSeasonOrSeriesId
|
||||
) {
|
||||
seasons {
|
||||
seasonId
|
||||
}
|
||||
}
|
||||
}
|
||||
'''
|
||||
|
||||
def _entries(self, asin):
|
||||
season_info = self._call_api(
|
||||
asin, note='Downloading series info', data={
|
||||
'operationName': 'getSeasons',
|
||||
'variables': {'episodeOrSeasonOrSeriesId': asin},
|
||||
'query': self._GRAPHQL_QUERY,
|
||||
})
|
||||
|
||||
for season in season_info['seasons']:
|
||||
yield self.url_result(f'amazonminitv:season:{season["seasonId"]}', AmazonMiniTVSeasonIE, season['seasonId'])
|
||||
|
||||
def _real_extract(self, url):
|
||||
asin = f'amzn1.dv.gti.{self._match_id(url)}'
|
||||
return self.playlist_result(self._entries(asin), asin)
|
|
@ -46,6 +46,9 @@ class ARDMediathekBaseIE(InfoExtractor):
|
|||
subtitles['de'] = [{
|
||||
'ext': 'ttml',
|
||||
'url': subtitle_url,
|
||||
}, {
|
||||
'ext': 'vtt',
|
||||
'url': subtitle_url.replace('/ebutt/', '/webvtt/') + '.vtt',
|
||||
}]
|
||||
|
||||
return {
|
||||
|
@ -286,16 +289,16 @@ class ARDMediathekIE(ARDMediathekBaseIE):
|
|||
class ARDIE(InfoExtractor):
|
||||
_VALID_URL = r'(?P<mainurl>https?://(?:www\.)?daserste\.de/(?:[^/?#&]+/)+(?P<id>[^/?#&]+))\.html'
|
||||
_TESTS = [{
|
||||
# available till 7.01.2022
|
||||
'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-die-woche-video100.html',
|
||||
'md5': '867d8aa39eeaf6d76407c5ad1bb0d4c1',
|
||||
# available till 7.12.2023
|
||||
'url': 'https://www.daserste.de/information/talk/maischberger/videos/maischberger-video-424.html',
|
||||
'md5': 'a438f671e87a7eba04000336a119ccc4',
|
||||
'info_dict': {
|
||||
'id': 'maischberger-die-woche-video100',
|
||||
'display_id': 'maischberger-die-woche-video100',
|
||||
'id': 'maischberger-video-424',
|
||||
'display_id': 'maischberger-video-424',
|
||||
'ext': 'mp4',
|
||||
'duration': 3687.0,
|
||||
'title': 'maischberger. die woche vom 7. Januar 2021',
|
||||
'upload_date': '20210107',
|
||||
'duration': 4452.0,
|
||||
'title': 'maischberger am 07.12.2022',
|
||||
'upload_date': '20221207',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
}, {
|
||||
|
|
|
@ -65,6 +65,21 @@ class ArteTVIE(ArteTVBaseIE):
|
|||
}, {
|
||||
'url': 'https://api.arte.tv/api/player/v2/config/de/LIVE',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.arte.tv/de/videos/110203-006-A/zaz/',
|
||||
'info_dict': {
|
||||
'id': '110203-006-A',
|
||||
'chapters': 'count:16',
|
||||
'description': 'md5:cf592f1df52fe52007e3f8eac813c084',
|
||||
'alt_title': 'Zaz',
|
||||
'title': 'Baloise Session 2022',
|
||||
'timestamp': 1668445200,
|
||||
'duration': 4054,
|
||||
'thumbnail': 'https://api-cdn.arte.tv/img/v2/image/ubQjmVCGyRx3hmBuZEK9QZ/940x530',
|
||||
'upload_date': '20221114',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
'expected_warnings': ['geo restricted']
|
||||
}]
|
||||
|
||||
_GEO_BYPASS = True
|
||||
|
@ -180,10 +195,6 @@ class ArteTVIE(ArteTVBaseIE):
|
|||
else:
|
||||
self.report_warning(f'Skipping stream with unknown protocol {stream["protocol"]}')
|
||||
|
||||
# TODO: chapters from stream['segments']?
|
||||
# The JS also looks for chapters in config['data']['attributes']['chapters'],
|
||||
# but I am yet to find a video having those
|
||||
|
||||
formats.extend(secondary_formats)
|
||||
self._remove_duplicate_formats(formats)
|
||||
|
||||
|
@ -205,6 +216,11 @@ class ArteTVIE(ArteTVBaseIE):
|
|||
{'url': image['url'], 'id': image.get('caption')}
|
||||
for image in metadata.get('images') or [] if url_or_none(image.get('url'))
|
||||
],
|
||||
# TODO: chapters may also be in stream['segments']?
|
||||
'chapters': traverse_obj(config, ('data', 'attributes', 'chapters', 'elements', ..., {
|
||||
'start_time': 'startTime',
|
||||
'title': 'title',
|
||||
})) or None,
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -29,11 +29,18 @@ class BandcampIE(InfoExtractor):
|
|||
'info_dict': {
|
||||
'id': '1812978515',
|
||||
'ext': 'mp3',
|
||||
'title': "youtube-dl \"'/\\ä↭ - youtube-dl \"'/\\ä↭ - youtube-dl test song \"'/\\ä↭",
|
||||
'title': 'youtube-dl "\'/\\ä↭ - youtube-dl "\'/\\ä↭ - youtube-dl test song "\'/\\ä↭',
|
||||
'duration': 9.8485,
|
||||
'uploader': 'youtube-dl "\'/\\ä↭',
|
||||
'upload_date': '20121129',
|
||||
'timestamp': 1354224127,
|
||||
'track': 'youtube-dl "\'/\\ä↭ - youtube-dl test song "\'/\\ä↭',
|
||||
'album_artist': 'youtube-dl "\'/\\ä↭',
|
||||
'track_id': '1812978515',
|
||||
'artist': 'youtube-dl "\'/\\ä↭',
|
||||
'uploader_url': 'https://youtube-dl.bandcamp.com',
|
||||
'uploader_id': 'youtube-dl',
|
||||
'thumbnail': 'https://f4.bcbits.com/img/a3216802731_5.jpg',
|
||||
},
|
||||
'_skip': 'There is a limit of 200 free downloads / month for the test song'
|
||||
}, {
|
||||
|
@ -41,7 +48,8 @@ class BandcampIE(InfoExtractor):
|
|||
'url': 'http://benprunty.bandcamp.com/track/lanius-battle',
|
||||
'info_dict': {
|
||||
'id': '2650410135',
|
||||
'ext': 'aiff',
|
||||
'ext': 'm4a',
|
||||
'acodec': r're:[fa]lac',
|
||||
'title': 'Ben Prunty - Lanius (Battle)',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'Ben Prunty',
|
||||
|
@ -54,7 +62,10 @@ class BandcampIE(InfoExtractor):
|
|||
'track_number': 1,
|
||||
'track_id': '2650410135',
|
||||
'artist': 'Ben Prunty',
|
||||
'album_artist': 'Ben Prunty',
|
||||
'album': 'FTL: Advanced Edition Soundtrack',
|
||||
'uploader_url': 'https://benprunty.bandcamp.com',
|
||||
'uploader_id': 'benprunty',
|
||||
},
|
||||
}, {
|
||||
# no free download, mp3 128
|
||||
|
@ -75,7 +86,34 @@ class BandcampIE(InfoExtractor):
|
|||
'track_number': 5,
|
||||
'track_id': '2584466013',
|
||||
'artist': 'Mastodon',
|
||||
'album_artist': 'Mastodon',
|
||||
'album': 'Call of the Mastodon',
|
||||
'uploader_url': 'https://relapsealumni.bandcamp.com',
|
||||
'uploader_id': 'relapsealumni',
|
||||
},
|
||||
}, {
|
||||
# track from compilation album (artist/album_artist difference)
|
||||
'url': 'https://diskotopia.bandcamp.com/track/safehouse',
|
||||
'md5': '19c5337bca1428afa54129f86a2f6a69',
|
||||
'info_dict': {
|
||||
'id': '1978174799',
|
||||
'ext': 'mp3',
|
||||
'title': 'submerse - submerse - Safehouse',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'submerse',
|
||||
'timestamp': 1480779297,
|
||||
'upload_date': '20161203',
|
||||
'release_timestamp': 1481068800,
|
||||
'release_date': '20161207',
|
||||
'duration': 154.066,
|
||||
'track': 'submerse - Safehouse',
|
||||
'track_number': 3,
|
||||
'track_id': '1978174799',
|
||||
'artist': 'submerse',
|
||||
'album_artist': 'Diskotopia',
|
||||
'album': 'DSK F/W 2016-2017 Free Compilation',
|
||||
'uploader_url': 'https://diskotopia.bandcamp.com',
|
||||
'uploader_id': 'diskotopia',
|
||||
},
|
||||
}]
|
||||
|
||||
|
@ -121,6 +159,9 @@ class BandcampIE(InfoExtractor):
|
|||
embed = self._extract_data_attr(webpage, title, 'embed', False)
|
||||
current = tralbum.get('current') or {}
|
||||
artist = embed.get('artist') or current.get('artist') or tralbum.get('artist')
|
||||
album_artist = self._html_search_regex(
|
||||
r'<h3 class="albumTitle">[\S\s]*?by\s*<span>\s*<a href="[^>]+">\s*([^>]+?)\s*</a>',
|
||||
webpage, 'album artist', fatal=False)
|
||||
timestamp = unified_timestamp(
|
||||
current.get('publish_date') or tralbum.get('album_publish_date'))
|
||||
|
||||
|
@ -205,6 +246,7 @@ class BandcampIE(InfoExtractor):
|
|||
'track_id': track_id,
|
||||
'artist': artist,
|
||||
'album': embed.get('album_title'),
|
||||
'album_artist': album_artist,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,101 @@
|
|||
from .common import InfoExtractor
|
||||
from .youtube import YoutubeIE, YoutubeTabIE
|
||||
|
||||
|
||||
class BeatBumpVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https://beatbump\.ml/listen\?id=(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://beatbump.ml/listen?id=MgNrAu2pzNs',
|
||||
'md5': '5ff3fff41d3935b9810a9731e485fe66',
|
||||
'info_dict': {
|
||||
'id': 'MgNrAu2pzNs',
|
||||
'ext': 'mp4',
|
||||
'uploader_url': 'http://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
|
||||
'artist': 'Stephen',
|
||||
'thumbnail': 'https://i.ytimg.com/vi_webp/MgNrAu2pzNs/maxresdefault.webp',
|
||||
'channel_url': 'https://www.youtube.com/channel/UC-pWHpBjdGG69N9mM2auIAA',
|
||||
'upload_date': '20190312',
|
||||
'categories': ['Music'],
|
||||
'playable_in_embed': True,
|
||||
'duration': 169,
|
||||
'like_count': int,
|
||||
'alt_title': 'Voyeur Girl',
|
||||
'view_count': int,
|
||||
'track': 'Voyeur Girl',
|
||||
'uploader': 'Stephen - Topic',
|
||||
'title': 'Voyeur Girl',
|
||||
'channel_follower_count': int,
|
||||
'uploader_id': 'UC-pWHpBjdGG69N9mM2auIAA',
|
||||
'age_limit': 0,
|
||||
'availability': 'public',
|
||||
'live_status': 'not_live',
|
||||
'album': 'it\'s too much love to know my dear',
|
||||
'channel': 'Stephen',
|
||||
'comment_count': int,
|
||||
'description': 'md5:7ae382a65843d6df2685993e90a8628f',
|
||||
'tags': 'count:11',
|
||||
'creator': 'Stephen',
|
||||
'channel_id': 'UC-pWHpBjdGG69N9mM2auIAA',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
id_ = self._match_id(url)
|
||||
return self.url_result(f'https://music.youtube.com/watch?v={id_}', YoutubeIE, id_)
|
||||
|
||||
|
||||
class BeatBumpPlaylistIE(InfoExtractor):
|
||||
_VALID_URL = r'https://beatbump\.ml/(?:release\?id=|artist/|playlist/)(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://beatbump.ml/release?id=MPREb_gTAcphH99wE',
|
||||
'playlist_count': 50,
|
||||
'info_dict': {
|
||||
'id': 'OLAK5uy_l1m0thk3g31NmIIz_vMIbWtyv7eZixlH0',
|
||||
'availability': 'unlisted',
|
||||
'view_count': int,
|
||||
'title': 'Album - Royalty Free Music Library V2 (50 Songs)',
|
||||
'description': '',
|
||||
'tags': [],
|
||||
'modified_date': '20221223',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://beatbump.ml/artist/UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'playlist_mincount': 1,
|
||||
'params': {'flatplaylist': True},
|
||||
'info_dict': {
|
||||
'id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'uploader_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'channel_url': 'https://www.youtube.com/channel/UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'channel_follower_count': int,
|
||||
'title': 'NoCopyrightSounds - Videos',
|
||||
'uploader': 'NoCopyrightSounds',
|
||||
'description': 'md5:cd4fd53d81d363d05eee6c1b478b491a',
|
||||
'channel': 'NoCopyrightSounds',
|
||||
'tags': 'count:12',
|
||||
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://beatbump.ml/playlist/VLPLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
|
||||
'playlist_mincount': 1,
|
||||
'params': {'flatplaylist': True},
|
||||
'info_dict': {
|
||||
'id': 'PLRBp0Fe2GpgmgoscNFLxNyBVSFVdYmFkq',
|
||||
'uploader_url': 'https://www.youtube.com/@NoCopyrightSounds',
|
||||
'description': 'Providing you with copyright free / safe music for gaming, live streaming, studying and more!',
|
||||
'view_count': int,
|
||||
'channel_url': 'https://www.youtube.com/@NoCopyrightSounds',
|
||||
'uploader_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
'title': 'NCS : All Releases 💿',
|
||||
'uploader': 'NoCopyrightSounds',
|
||||
'availability': 'public',
|
||||
'channel': 'NoCopyrightSounds',
|
||||
'tags': [],
|
||||
'modified_date': '20221225',
|
||||
'channel_id': 'UC_aEa8K-EOJ3D6gOs7HcyNg',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
id_ = self._match_id(url)
|
||||
return self.url_result(f'https://music.youtube.com/browse/{id_}', YoutubeTabIE, id_)
|
|
@ -16,13 +16,16 @@ from ..utils import (
|
|||
format_field,
|
||||
int_or_none,
|
||||
make_archive_id,
|
||||
merge_dicts,
|
||||
mimetype2ext,
|
||||
parse_count,
|
||||
parse_qs,
|
||||
qualities,
|
||||
smuggle_url,
|
||||
srt_subtitles_timecode,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
unsmuggle_url,
|
||||
url_or_none,
|
||||
urlencode_postdata,
|
||||
)
|
||||
|
@ -303,7 +306,8 @@ class BiliBiliIE(BilibiliBaseIE):
|
|||
getter=lambda entry: f'https://www.bilibili.com/video/{video_id}?p={entry["page"]}')
|
||||
|
||||
if is_anthology:
|
||||
title += f' p{part_id:02d} {traverse_obj(page_list_json, ((part_id or 1) - 1, "part")) or ""}'
|
||||
part_id = part_id or 1
|
||||
title += f' p{part_id:02d} {traverse_obj(page_list_json, (part_id - 1, "part")) or ""}'
|
||||
|
||||
aid = video_data.get('aid')
|
||||
old_video_id = format_field(aid, None, f'%s_part{part_id or 1}')
|
||||
|
@ -880,16 +884,12 @@ class BiliIntlBaseIE(InfoExtractor):
|
|||
|
||||
return formats
|
||||
|
||||
def _extract_video_info(self, video_data, *, ep_id=None, aid=None):
|
||||
def _parse_video_metadata(self, video_data):
|
||||
return {
|
||||
'id': ep_id or aid,
|
||||
'title': video_data.get('title_display') or video_data.get('title'),
|
||||
'thumbnail': video_data.get('cover'),
|
||||
'episode_number': int_or_none(self._search_regex(
|
||||
r'^E(\d+)(?:$| - )', video_data.get('title_display') or '', 'episode number', default=None)),
|
||||
'formats': self._get_formats(ep_id=ep_id, aid=aid),
|
||||
'subtitles': self._get_subtitles(ep_id=ep_id, aid=aid),
|
||||
'extractor_key': BiliIntlIE.ie_key(),
|
||||
}
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
|
@ -935,6 +935,10 @@ class BiliIntlIE(BiliIntlBaseIE):
|
|||
'title': 'E2 - The First Night',
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
|
||||
'episode_number': 2,
|
||||
'upload_date': '20201009',
|
||||
'episode': 'Episode 2',
|
||||
'timestamp': 1602259500,
|
||||
'description': 'md5:297b5a17155eb645e14a14b385ab547e',
|
||||
}
|
||||
}, {
|
||||
# Non-Bstation page
|
||||
|
@ -945,6 +949,10 @@ class BiliIntlIE(BiliIntlBaseIE):
|
|||
'title': 'E3 - Who?',
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
|
||||
'episode_number': 3,
|
||||
'description': 'md5:e1a775e71a35c43f141484715470ad09',
|
||||
'episode': 'Episode 3',
|
||||
'upload_date': '20211219',
|
||||
'timestamp': 1639928700,
|
||||
}
|
||||
}, {
|
||||
# Subtitle with empty content
|
||||
|
@ -957,6 +965,17 @@ class BiliIntlIE(BiliIntlBaseIE):
|
|||
'episode_number': 140,
|
||||
},
|
||||
'skip': 'According to the copyright owner\'s request, you may only watch the video after you log in.'
|
||||
}, {
|
||||
'url': 'https://www.bilibili.tv/en/video/2041863208',
|
||||
'info_dict': {
|
||||
'id': '2041863208',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1670874843,
|
||||
'description': 'Scheduled for April 2023.\nStudio: ufotable',
|
||||
'thumbnail': r're:https?://pic[-\.]bstarstatic.+/ugc/.+\.jpg$',
|
||||
'upload_date': '20221212',
|
||||
'title': 'Kimetsu no Yaiba Season 3 Official Trailer - Bstation',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.biliintl.com/en/play/34613/341736',
|
||||
'only_matching': True,
|
||||
|
@ -974,42 +993,78 @@ class BiliIntlIE(BiliIntlBaseIE):
|
|||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
season_id, ep_id, aid = self._match_valid_url(url).group('season_id', 'ep_id', 'aid')
|
||||
video_id = ep_id or aid
|
||||
def _make_url(video_id, series_id=None):
|
||||
if series_id:
|
||||
return f'https://www.bilibili.tv/en/play/{series_id}/{video_id}'
|
||||
return f'https://www.bilibili.tv/en/video/{video_id}'
|
||||
|
||||
def _extract_video_metadata(self, url, video_id, season_id):
|
||||
url, smuggled_data = unsmuggle_url(url, {})
|
||||
if smuggled_data.get('title'):
|
||||
return smuggled_data
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
# Bstation layout
|
||||
initial_data = (
|
||||
self._search_json(r'window\.__INITIAL_(?:DATA|STATE)__\s*=', webpage, 'preload state', video_id, default={})
|
||||
or self._search_nuxt_data(webpage, video_id, '__initialState', fatal=False, traverse=None))
|
||||
video_data = traverse_obj(
|
||||
initial_data, ('OgvVideo', 'epDetail'), ('UgcVideo', 'videoData'), ('ugc', 'archive'), expected_type=dict)
|
||||
initial_data, ('OgvVideo', 'epDetail'), ('UgcVideo', 'videoData'), ('ugc', 'archive'), expected_type=dict) or {}
|
||||
|
||||
if season_id and not video_data:
|
||||
# Non-Bstation layout, read through episode list
|
||||
season_json = self._call_api(f'/web/v2/ogv/play/episodes?season_id={season_id}&platform=web', video_id)
|
||||
video_data = traverse_obj(season_json,
|
||||
('sections', ..., 'episodes', lambda _, v: str(v['episode_id']) == ep_id),
|
||||
expected_type=dict, get_all=False)
|
||||
return self._extract_video_info(video_data or {}, ep_id=ep_id, aid=aid)
|
||||
video_data = traverse_obj(season_json, (
|
||||
'sections', ..., 'episodes', lambda _, v: str(v['episode_id']) == video_id
|
||||
), expected_type=dict, get_all=False)
|
||||
|
||||
# XXX: webpage metadata may not accurate, it just used to not crash when video_data not found
|
||||
return merge_dicts(
|
||||
self._parse_video_metadata(video_data), self._search_json_ld(webpage, video_id), {
|
||||
'title': self._html_search_meta('og:title', webpage),
|
||||
'description': self._html_search_meta('og:description', webpage)
|
||||
})
|
||||
|
||||
def _real_extract(self, url):
|
||||
season_id, ep_id, aid = self._match_valid_url(url).group('season_id', 'ep_id', 'aid')
|
||||
video_id = ep_id or aid
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
**self._extract_video_metadata(url, video_id, season_id),
|
||||
'formats': self._get_formats(ep_id=ep_id, aid=aid),
|
||||
'subtitles': self.extract_subtitles(ep_id=ep_id, aid=aid),
|
||||
}
|
||||
|
||||
|
||||
class BiliIntlSeriesIE(BiliIntlBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?play/(?P<id>\d+)/?(?:[?#]|$)'
|
||||
IE_NAME = 'biliIntl:series'
|
||||
_VALID_URL = r'https?://(?:www\.)?bili(?:bili\.tv|intl\.com)/(?:[a-zA-Z]{2}/)?(?:play|media)/(?P<id>\d+)/?(?:[?#]|$)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.bilibili.tv/en/play/34613',
|
||||
'playlist_mincount': 15,
|
||||
'info_dict': {
|
||||
'id': '34613',
|
||||
'title': 'Fly Me to the Moon',
|
||||
'description': 'md5:a861ee1c4dc0acfad85f557cc42ac627',
|
||||
'categories': ['Romance', 'Comedy', 'Slice of life'],
|
||||
'title': 'TONIKAWA: Over the Moon For You',
|
||||
'description': 'md5:297b5a17155eb645e14a14b385ab547e',
|
||||
'categories': ['Slice of life', 'Comedy', 'Romance'],
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.png$',
|
||||
'view_count': int,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.bilibili.tv/en/media/1048837',
|
||||
'info_dict': {
|
||||
'id': '1048837',
|
||||
'title': 'SPY×FAMILY',
|
||||
'description': 'md5:b4434eb1a9a97ad2bccb779514b89f17',
|
||||
'categories': ['Adventure', 'Action', 'Comedy'],
|
||||
'thumbnail': r're:^https://pic\.bstarstatic\.com/ogv/.+\.jpg$',
|
||||
'view_count': int,
|
||||
},
|
||||
'playlist_mincount': 25,
|
||||
}, {
|
||||
'url': 'https://www.biliintl.com/en/play/34613',
|
||||
'only_matching': True,
|
||||
|
@ -1020,9 +1075,12 @@ class BiliIntlSeriesIE(BiliIntlBaseIE):
|
|||
|
||||
def _entries(self, series_id):
|
||||
series_json = self._call_api(f'/web/v2/ogv/play/episodes?season_id={series_id}&platform=web', series_id)
|
||||
for episode in traverse_obj(series_json, ('sections', ..., 'episodes', ...), expected_type=dict, default=[]):
|
||||
episode_id = str(episode.get('episode_id'))
|
||||
yield self._extract_video_info(episode, ep_id=episode_id)
|
||||
for episode in traverse_obj(series_json, ('sections', ..., 'episodes', ...), expected_type=dict):
|
||||
episode_id = str(episode['episode_id'])
|
||||
yield self.url_result(smuggle_url(
|
||||
BiliIntlIE._make_url(episode_id, series_id),
|
||||
self._parse_video_metadata(episode)
|
||||
), BiliIntlIE, episode_id)
|
||||
|
||||
def _real_extract(self, url):
|
||||
series_id = self._match_id(url)
|
||||
|
@ -1034,7 +1092,7 @@ class BiliIntlSeriesIE(BiliIntlBaseIE):
|
|||
|
||||
|
||||
class BiliLiveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://live.bilibili.com/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://live.bilibili.com/(?:blanc/)?(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://live.bilibili.com/196',
|
||||
|
@ -1050,6 +1108,9 @@ class BiliLiveIE(InfoExtractor):
|
|||
}, {
|
||||
'url': 'https://live.bilibili.com/196?broadcast_type=0&is_room_feed=1?spm_id_from=333.999.space_home.strengthen_live_card.click',
|
||||
'only_matching': True
|
||||
}, {
|
||||
'url': 'https://live.bilibili.com/blanc/196',
|
||||
'only_matching': True
|
||||
}]
|
||||
|
||||
_FORMATS = {
|
||||
|
@ -1111,6 +1172,7 @@ class BiliLiveIE(InfoExtractor):
|
|||
'thumbnail': room_data.get('user_cover'),
|
||||
'timestamp': stream_data.get('live_time'),
|
||||
'formats': formats,
|
||||
'is_live': True,
|
||||
'http_headers': {
|
||||
'Referer': url,
|
||||
},
|
||||
|
|
|
@ -4,6 +4,7 @@ import datetime
|
|||
import hashlib
|
||||
import hmac
|
||||
import json
|
||||
import random
|
||||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
@ -27,11 +28,10 @@ class CDAIE(InfoExtractor):
|
|||
_VALID_URL = r'https?://(?:(?:www\.)?cda\.pl/video|ebd\.cda\.pl/[0-9]+x[0-9]+)/(?P<id>[0-9a-z]+)'
|
||||
_NETRC_MACHINE = 'cdapl'
|
||||
|
||||
_BASE_URL = 'http://www.cda.pl/'
|
||||
_BASE_URL = 'https://www.cda.pl'
|
||||
_BASE_API_URL = 'https://api.cda.pl'
|
||||
_API_HEADERS = {
|
||||
'Accept': 'application/vnd.cda.public+json',
|
||||
'User-Agent': 'pl.cda 1.0 (version 1.2.88 build 15306; Android 9; Xiaomi Redmi 3S)',
|
||||
}
|
||||
# hardcoded in the app
|
||||
_LOGIN_REQUEST_AUTH = 'Basic YzU3YzBlZDUtYTIzOC00MWQwLWI2NjQtNmZmMWMxY2Y2YzVlOklBTm95QlhRRVR6U09MV1hnV3MwMW0xT2VyNWJNZzV4clRNTXhpNGZJUGVGZ0lWUlo5UGVYTDhtUGZaR1U1U3Q'
|
||||
|
@ -101,6 +101,38 @@ class CDAIE(InfoExtractor):
|
|||
}, **kwargs)
|
||||
|
||||
def _perform_login(self, username, password):
|
||||
app_version = random.choice((
|
||||
'1.2.88 build 15306',
|
||||
'1.2.174 build 18469',
|
||||
))
|
||||
android_version = random.randrange(8, 14)
|
||||
phone_model = random.choice((
|
||||
# x-kom.pl top selling Android smartphones, as of 2022-12-26
|
||||
# https://www.x-kom.pl/g-4/c/1590-smartfony-i-telefony.html?f201-system-operacyjny=61322-android
|
||||
'ASUS ZenFone 8',
|
||||
'Motorola edge 20 5G',
|
||||
'Motorola edge 30 neo 5G',
|
||||
'Motorola moto g22',
|
||||
'OnePlus Nord 2T 5G',
|
||||
'Samsung Galaxy A32 SM‑A325F',
|
||||
'Samsung Galaxy M13',
|
||||
'Samsung Galaxy S20 FE 5G',
|
||||
'Xiaomi 11T',
|
||||
'Xiaomi POCO M4 Pro',
|
||||
'Xiaomi Redmi 10',
|
||||
'Xiaomi Redmi 10C',
|
||||
'Xiaomi Redmi 9C NFC',
|
||||
'Xiaomi Redmi Note 10 Pro',
|
||||
'Xiaomi Redmi Note 11 Pro',
|
||||
'Xiaomi Redmi Note 11',
|
||||
'Xiaomi Redmi Note 11S 5G',
|
||||
'Xiaomi Redmi Note 11S',
|
||||
'realme 10',
|
||||
'realme 9 Pro+',
|
||||
'vivo Y33s',
|
||||
))
|
||||
self._API_HEADERS['User-Agent'] = f'pl.cda 1.0 (version {app_version}; Android {android_version}; {phone_model})'
|
||||
|
||||
cached_bearer = self.cache.load(self._BEARER_CACHE, username) or {}
|
||||
if cached_bearer.get('valid_until', 0) > datetime.datetime.now().timestamp() + 5:
|
||||
self._API_HEADERS['Authorization'] = f'Bearer {cached_bearer["token"]}'
|
||||
|
@ -138,9 +170,6 @@ class CDAIE(InfoExtractor):
|
|||
meta = self._download_json(
|
||||
f'{self._BASE_API_URL}/video/{video_id}', video_id, headers=self._API_HEADERS)['video']
|
||||
|
||||
if meta.get('premium') and not meta.get('premium_free'):
|
||||
self.report_drm(video_id)
|
||||
|
||||
uploader = traverse_obj(meta, 'author', 'login')
|
||||
|
||||
formats = [{
|
||||
|
@ -151,6 +180,10 @@ class CDAIE(InfoExtractor):
|
|||
'filesize': quality.get('length'),
|
||||
} for quality in meta['qualities'] if quality.get('file')]
|
||||
|
||||
if meta.get('premium') and not meta.get('premium_free') and not formats:
|
||||
raise ExtractorError(
|
||||
'Video requires CDA Premium - subscription needed', expected=True)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': meta.get('title'),
|
||||
|
@ -167,10 +200,10 @@ class CDAIE(InfoExtractor):
|
|||
def _web_extract(self, video_id, url):
|
||||
self._set_cookie('cda.pl', 'cda.player', 'html5')
|
||||
webpage = self._download_webpage(
|
||||
self._BASE_URL + '/video/' + video_id, video_id)
|
||||
f'{self._BASE_URL}/video/{video_id}/vfilm', video_id)
|
||||
|
||||
if 'Ten film jest dostępny dla użytkowników premium' in webpage:
|
||||
raise ExtractorError('This video is only available for premium users.', expected=True)
|
||||
self.raise_login_required('This video is only available for premium users')
|
||||
|
||||
if re.search(r'niedostępn[ey] w(?: |\s+)Twoim kraju\s*<', webpage):
|
||||
self.raise_geo_restricted()
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
|
@ -38,11 +39,30 @@ class CiscoWebexIE(InfoExtractor):
|
|||
siteurl = mobj.group('siteurl_1') or mobj.group('siteurl_2')
|
||||
video_id = mobj.group('id')
|
||||
|
||||
stream = self._download_json(
|
||||
password = self.get_param('videopassword')
|
||||
|
||||
headers = {'Accept': 'application/json'}
|
||||
if password:
|
||||
headers['accessPwd'] = password
|
||||
|
||||
stream, urlh = self._download_json_handle(
|
||||
'https://%s.webex.com/webappng/api/v1/recordings/%s/stream' % (subdomain, video_id),
|
||||
video_id, fatal=False, query={'siteurl': siteurl})
|
||||
if not stream:
|
||||
self.raise_login_required(method='cookies')
|
||||
video_id, headers=headers, query={'siteurl': siteurl}, expected_status=(403, 429))
|
||||
|
||||
if urlh.status == 403:
|
||||
if stream['code'] == 53004:
|
||||
self.raise_login_required()
|
||||
if stream['code'] == 53005:
|
||||
if password:
|
||||
raise ExtractorError('Wrong password', expected=True)
|
||||
raise ExtractorError(
|
||||
'This video is protected by a password, use the --video-password option', expected=True)
|
||||
raise ExtractorError(f'{self.IE_NAME} said: {stream["code"]} - {stream["message"]}', expected=True)
|
||||
|
||||
if urlh.status == 429:
|
||||
self.raise_login_required(
|
||||
f'{self.IE_NAME} asks you to solve a CAPTCHA. Solve CAPTCHA in browser and',
|
||||
method='cookies')
|
||||
|
||||
video_id = stream.get('recordUUID') or video_id
|
||||
|
||||
|
@ -78,7 +98,7 @@ class CiscoWebexIE(InfoExtractor):
|
|||
'title': stream['recordName'],
|
||||
'description': stream.get('description'),
|
||||
'uploader': stream.get('ownerDisplayName'),
|
||||
'uploader_id': stream.get('ownerUserName') or stream.get('ownerId'), # mail or id
|
||||
'uploader_id': stream.get('ownerUserName') or stream.get('ownerId'),
|
||||
'timestamp': unified_timestamp(stream.get('createTime')),
|
||||
'duration': int_or_none(stream.get('duration'), 1000),
|
||||
'webpage_url': 'https://%s.webex.com/recordingservice/sites/%s/recording/playback/%s' % (subdomain, siteurl, video_id),
|
||||
|
|
|
@ -32,6 +32,7 @@ from ..utils import (
|
|||
FormatSorter,
|
||||
GeoRestrictedError,
|
||||
GeoUtils,
|
||||
HEADRequest,
|
||||
LenientJSONDecoder,
|
||||
RegexNotFoundError,
|
||||
RetryManager,
|
||||
|
@ -71,6 +72,7 @@ from ..utils import (
|
|||
str_to_int,
|
||||
strip_or_none,
|
||||
traverse_obj,
|
||||
truncate_string,
|
||||
try_call,
|
||||
try_get,
|
||||
unescapeHTML,
|
||||
|
@ -79,6 +81,7 @@ from ..utils import (
|
|||
update_Request,
|
||||
update_url_query,
|
||||
url_basename,
|
||||
urlhandle_detect_ext,
|
||||
url_or_none,
|
||||
urljoin,
|
||||
variadic,
|
||||
|
@ -674,7 +677,8 @@ class InfoExtractor:
|
|||
for _ in range(2):
|
||||
try:
|
||||
self.initialize()
|
||||
self.write_debug('Extracting URL: %s' % url)
|
||||
self.to_screen('Extracting URL: %s' % (
|
||||
url if self.get_param('verbose') else truncate_string(url, 100, 20)))
|
||||
ie_result = self._real_extract(url)
|
||||
if ie_result is None:
|
||||
return None
|
||||
|
@ -692,16 +696,10 @@ class InfoExtractor:
|
|||
except UnsupportedError:
|
||||
raise
|
||||
except ExtractorError as e:
|
||||
kwargs = {
|
||||
'video_id': e.video_id or self.get_temp_id(url),
|
||||
'ie': self.IE_NAME,
|
||||
'tb': e.traceback or sys.exc_info()[2],
|
||||
'expected': e.expected,
|
||||
'cause': e.cause
|
||||
}
|
||||
if hasattr(e, 'countries'):
|
||||
kwargs['countries'] = e.countries
|
||||
raise type(e)(e.orig_msg, **kwargs)
|
||||
e.video_id = e.video_id or self.get_temp_id(url),
|
||||
e.ie = e.ie or self.IE_NAME,
|
||||
e.traceback = e.traceback or sys.exc_info()[2]
|
||||
raise
|
||||
except http.client.IncompleteRead as e:
|
||||
raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url))
|
||||
except (KeyError, StopIteration) as e:
|
||||
|
@ -1266,10 +1264,9 @@ class InfoExtractor:
|
|||
Like _search_regex, but strips HTML tags and unescapes entities.
|
||||
"""
|
||||
res = self._search_regex(pattern, string, name, default, fatal, flags, group)
|
||||
if res:
|
||||
return clean_html(res).strip()
|
||||
else:
|
||||
return res
|
||||
if isinstance(res, tuple):
|
||||
return tuple(map(clean_html, res))
|
||||
return clean_html(res)
|
||||
|
||||
def _get_netrc_login_info(self, netrc_machine=None):
|
||||
username = None
|
||||
|
@ -1400,10 +1397,16 @@ class InfoExtractor:
|
|||
# And then there are the jokers who advertise that they use RTA, but actually don't.
|
||||
AGE_LIMIT_MARKERS = [
|
||||
r'Proudly Labeled <a href="http://www\.rtalabel\.org/" title="Restricted to Adults">RTA</a>',
|
||||
r'>[^<]*you acknowledge you are at least (\d+) years old',
|
||||
r'>\s*(?:18\s+U(?:\.S\.C\.|SC)\s+)?(?:§+\s*)?2257\b',
|
||||
]
|
||||
if any(re.search(marker, html) for marker in AGE_LIMIT_MARKERS):
|
||||
return 18
|
||||
return 0
|
||||
|
||||
age_limit = 0
|
||||
for marker in AGE_LIMIT_MARKERS:
|
||||
mobj = re.search(marker, html)
|
||||
if mobj:
|
||||
age_limit = max(age_limit, int(traverse_obj(mobj, 1, default=18)))
|
||||
return age_limit
|
||||
|
||||
def _media_rating_search(self, html):
|
||||
# See http://www.tjg-designs.com/WP/metadata-code-examples-adding-metadata-to-your-web-pages/
|
||||
|
@ -1763,6 +1766,9 @@ class InfoExtractor:
|
|||
def _extract_f4m_formats(self, manifest_url, video_id, preference=None, quality=None, f4m_id=None,
|
||||
transform_source=lambda s: fix_xml_ampersands(s).strip(),
|
||||
fatal=True, m3u8_id=None, data=None, headers={}, query={}):
|
||||
if self.get_param('ignore_no_formats_error'):
|
||||
fatal = False
|
||||
|
||||
res = self._download_xml_handle(
|
||||
manifest_url, video_id, 'Downloading f4m manifest',
|
||||
'Unable to download f4m manifest',
|
||||
|
@ -1912,6 +1918,17 @@ class InfoExtractor:
|
|||
errnote=None, fatal=True, live=False, data=None, headers={},
|
||||
query={}):
|
||||
|
||||
if self.get_param('ignore_no_formats_error'):
|
||||
fatal = False
|
||||
|
||||
if not m3u8_url:
|
||||
if errnote is not False:
|
||||
errnote = errnote or 'Failed to obtain m3u8 URL'
|
||||
if fatal:
|
||||
raise ExtractorError(errnote, video_id=video_id)
|
||||
self.report_warning(f'{errnote}{bug_reports_message()}')
|
||||
return [], {}
|
||||
|
||||
res = self._download_webpage_handle(
|
||||
m3u8_url, video_id,
|
||||
note='Downloading m3u8 information' if note is None else note,
|
||||
|
@ -2163,13 +2180,23 @@ class InfoExtractor:
|
|||
return self._parse_m3u8_vod_duration(m3u8_vod or '', video_id)
|
||||
|
||||
def _parse_m3u8_vod_duration(self, m3u8_vod, video_id):
|
||||
if '#EXT-X-PLAYLIST-TYPE:VOD' not in m3u8_vod:
|
||||
if '#EXT-X-ENDLIST' not in m3u8_vod:
|
||||
return None
|
||||
|
||||
return int(sum(
|
||||
float(line[len('#EXTINF:'):].split(',')[0])
|
||||
for line in m3u8_vod.splitlines() if line.startswith('#EXTINF:'))) or None
|
||||
|
||||
def _extract_mpd_vod_duration(
|
||||
self, mpd_url, video_id, note=None, errnote=None, data=None, headers={}, query={}):
|
||||
|
||||
mpd_doc = self._download_xml(
|
||||
mpd_url, video_id,
|
||||
note='Downloading MPD VOD manifest' if note is None else note,
|
||||
errnote='Failed to download VOD manifest' if errnote is None else errnote,
|
||||
fatal=False, data=data, headers=headers, query=query) or {}
|
||||
return int_or_none(parse_duration(mpd_doc.get('mediaPresentationDuration')))
|
||||
|
||||
@staticmethod
|
||||
def _xpath_ns(path, namespace=None):
|
||||
if not namespace:
|
||||
|
@ -2183,6 +2210,9 @@ class InfoExtractor:
|
|||
return '/'.join(out)
|
||||
|
||||
def _extract_smil_formats_and_subtitles(self, smil_url, video_id, fatal=True, f4m_params=None, transform_source=None):
|
||||
if self.get_param('ignore_no_formats_error'):
|
||||
fatal = False
|
||||
|
||||
res = self._download_smil(smil_url, video_id, fatal=fatal, transform_source=transform_source)
|
||||
if res is False:
|
||||
assert not fatal
|
||||
|
@ -2293,7 +2323,8 @@ class InfoExtractor:
|
|||
height = int_or_none(medium.get('height'))
|
||||
proto = medium.get('proto')
|
||||
ext = medium.get('ext')
|
||||
src_ext = determine_ext(src)
|
||||
src_ext = determine_ext(src, default_ext=None) or ext or urlhandle_detect_ext(
|
||||
self._request_webpage(HEADRequest(src), video_id, note='Requesting extension info', fatal=False))
|
||||
streamer = medium.get('streamer') or base
|
||||
|
||||
if proto == 'rtmp' or streamer.startswith('rtmp'):
|
||||
|
@ -2458,6 +2489,10 @@ class InfoExtractor:
|
|||
def _extract_mpd_formats_and_subtitles(
|
||||
self, mpd_url, video_id, mpd_id=None, note=None, errnote=None,
|
||||
fatal=True, data=None, headers={}, query={}):
|
||||
|
||||
if self.get_param('ignore_no_formats_error'):
|
||||
fatal = False
|
||||
|
||||
res = self._download_xml_handle(
|
||||
mpd_url, video_id,
|
||||
note='Downloading MPD manifest' if note is None else note,
|
||||
|
@ -2827,6 +2862,9 @@ class InfoExtractor:
|
|||
return fmts
|
||||
|
||||
def _extract_ism_formats_and_subtitles(self, ism_url, video_id, ism_id=None, note=None, errnote=None, fatal=True, data=None, headers={}, query={}):
|
||||
if self.get_param('ignore_no_formats_error'):
|
||||
fatal = False
|
||||
|
||||
res = self._download_xml_handle(
|
||||
ism_url, video_id,
|
||||
note='Downloading ISM manifest' if note is None else note,
|
||||
|
@ -3196,7 +3234,7 @@ class InfoExtractor:
|
|||
|
||||
def _find_jwplayer_data(self, webpage, video_id=None, transform_source=js_to_json):
|
||||
mobj = re.search(
|
||||
r'(?s)jwplayer\((?P<quote>[\'"])[^\'" ]+(?P=quote)\)(?!</script>).*?\.setup\s*\((?P<options>[^)]+)\)',
|
||||
r'''(?s)jwplayer\s*\(\s*(?P<q>'|")(?!(?P=q)).+(?P=q)\s*\)(?!</script>).*?\.\s*setup\s*\(\s*(?P<options>(?:\([^)]*\)|[^)])+)\s*\)''',
|
||||
webpage)
|
||||
if mobj:
|
||||
try:
|
||||
|
@ -3217,19 +3255,20 @@ class InfoExtractor:
|
|||
|
||||
def _parse_jwplayer_data(self, jwplayer_data, video_id=None, require_title=True,
|
||||
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
|
||||
# JWPlayer backward compatibility: flattened playlists
|
||||
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
|
||||
if 'playlist' not in jwplayer_data:
|
||||
jwplayer_data = {'playlist': [jwplayer_data]}
|
||||
|
||||
entries = []
|
||||
if not isinstance(jwplayer_data, dict):
|
||||
return entries
|
||||
|
||||
# JWPlayer backward compatibility: single playlist item
|
||||
playlist_items = jwplayer_data.get('playlist')
|
||||
# JWPlayer backward compatibility: single playlist item/flattened playlists
|
||||
# https://github.com/jwplayer/jwplayer/blob/v7.7.0/src/js/playlist/playlist.js#L10
|
||||
if not isinstance(jwplayer_data['playlist'], list):
|
||||
jwplayer_data['playlist'] = [jwplayer_data['playlist']]
|
||||
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/api/config.js#L81-L96
|
||||
if not isinstance(playlist_items, list):
|
||||
playlist_items = (playlist_items or jwplayer_data, )
|
||||
|
||||
for video_data in jwplayer_data['playlist']:
|
||||
for video_data in playlist_items:
|
||||
if not isinstance(video_data, dict):
|
||||
continue
|
||||
# JWPlayer backward compatibility: flattened sources
|
||||
# https://github.com/jwplayer/jwplayer/blob/v7.4.3/src/js/playlist/item.js#L29-L35
|
||||
if 'sources' not in video_data:
|
||||
|
@ -3267,6 +3306,13 @@ class InfoExtractor:
|
|||
'timestamp': int_or_none(video_data.get('pubdate')),
|
||||
'duration': float_or_none(jwplayer_data.get('duration') or video_data.get('duration')),
|
||||
'subtitles': subtitles,
|
||||
'alt_title': clean_html(video_data.get('subtitle')), # attributes used e.g. by Tele5 ...
|
||||
'genre': clean_html(video_data.get('genre')),
|
||||
'channel': clean_html(dict_get(video_data, ('category', 'channel'))),
|
||||
'season_number': int_or_none(video_data.get('season')),
|
||||
'episode_number': int_or_none(video_data.get('episode')),
|
||||
'release_year': int_or_none(video_data.get('releasedate')),
|
||||
'age_limit': int_or_none(video_data.get('age_restriction')),
|
||||
}
|
||||
# https://github.com/jwplayer/jwplayer/blob/master/src/js/utils/validator.js#L32
|
||||
if len(formats) == 1 and re.search(r'^(?:http|//).*(?:youtube\.com|youtu\.be)/.+', formats[0]['url']):
|
||||
|
@ -3284,7 +3330,7 @@ class InfoExtractor:
|
|||
|
||||
def _parse_jwplayer_formats(self, jwplayer_sources_data, video_id=None,
|
||||
m3u8_id=None, mpd_id=None, rtmp_params=None, base_url=None):
|
||||
urls = []
|
||||
urls = set()
|
||||
formats = []
|
||||
for source in jwplayer_sources_data:
|
||||
if not isinstance(source, dict):
|
||||
|
@ -3293,14 +3339,14 @@ class InfoExtractor:
|
|||
base_url, self._proto_relative_url(source.get('file')))
|
||||
if not source_url or source_url in urls:
|
||||
continue
|
||||
urls.append(source_url)
|
||||
urls.add(source_url)
|
||||
source_type = source.get('type') or ''
|
||||
ext = mimetype2ext(source_type) or determine_ext(source_url)
|
||||
if source_type == 'hls' or ext == 'm3u8':
|
||||
if source_type == 'hls' or ext == 'm3u8' or 'format=m3u8-aapl' in source_url:
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
source_url, video_id, 'mp4', entry_protocol='m3u8_native',
|
||||
m3u8_id=m3u8_id, fatal=False))
|
||||
elif source_type == 'dash' or ext == 'mpd':
|
||||
elif source_type == 'dash' or ext == 'mpd' or 'format=mpd-time-csf' in source_url:
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
source_url, video_id, mpd_id=mpd_id, fatal=False))
|
||||
elif ext == 'smil':
|
||||
|
@ -3315,13 +3361,12 @@ class InfoExtractor:
|
|||
'ext': ext,
|
||||
})
|
||||
else:
|
||||
format_id = str_or_none(source.get('label'))
|
||||
height = int_or_none(source.get('height'))
|
||||
if height is None:
|
||||
if height is None and format_id:
|
||||
# Often no height is provided but there is a label in
|
||||
# format like "1080p", "720p SD", or 1080.
|
||||
height = int_or_none(self._search_regex(
|
||||
r'^(\d{3,4})[pP]?(?:\b|$)', str(source.get('label') or ''),
|
||||
'height', default=None))
|
||||
height = parse_resolution(format_id).get('height')
|
||||
a_format = {
|
||||
'url': source_url,
|
||||
'width': int_or_none(source.get('width')),
|
||||
|
@ -3329,6 +3374,7 @@ class InfoExtractor:
|
|||
'tbr': int_or_none(source.get('bitrate'), scale=1000),
|
||||
'filesize': int_or_none(source.get('filesize')),
|
||||
'ext': ext,
|
||||
'format_id': format_id
|
||||
}
|
||||
if source_url.startswith('rtmp'):
|
||||
a_format['ext'] = 'flv'
|
||||
|
@ -3422,13 +3468,17 @@ class InfoExtractor:
|
|||
continue
|
||||
t['name'] = cls.ie_key()
|
||||
yield t
|
||||
if getattr(cls, '__wrapped__', None):
|
||||
yield from cls.__wrapped__.get_testcases(include_onlymatching)
|
||||
|
||||
@classmethod
|
||||
def get_webpage_testcases(cls):
|
||||
tests = vars(cls).get('_WEBPAGE_TESTS', [])
|
||||
for t in tests:
|
||||
t['name'] = cls.ie_key()
|
||||
return tests
|
||||
yield t
|
||||
if getattr(cls, '__wrapped__', None):
|
||||
yield from cls.__wrapped__.get_webpage_testcases()
|
||||
|
||||
@classproperty(cache=True)
|
||||
def age_limit(cls):
|
||||
|
@ -3474,7 +3524,7 @@ class InfoExtractor:
|
|||
elif cls.IE_DESC:
|
||||
desc += f' {cls.IE_DESC}'
|
||||
if cls.SEARCH_KEY:
|
||||
desc += f'; "{cls.SEARCH_KEY}:" prefix'
|
||||
desc += f'{";" if cls.IE_DESC else ""} "{cls.SEARCH_KEY}:" prefix'
|
||||
if search_examples:
|
||||
_COUNTS = ('', '5', '10', 'all')
|
||||
desc += f' (e.g. "{cls.SEARCH_KEY}{random.choice(_COUNTS)}:{random.choice(search_examples)}")'
|
||||
|
@ -3690,10 +3740,12 @@ class InfoExtractor:
|
|||
if plugin_name:
|
||||
mro = inspect.getmro(cls)
|
||||
super_class = cls.__wrapped__ = mro[mro.index(cls) + 1]
|
||||
cls.IE_NAME, cls.ie_key = f'{super_class.IE_NAME}+{plugin_name}', super_class.ie_key
|
||||
cls.PLUGIN_NAME, cls.ie_key = plugin_name, super_class.ie_key
|
||||
cls.IE_NAME = f'{super_class.IE_NAME}+{plugin_name}'
|
||||
while getattr(super_class, '__wrapped__', None):
|
||||
super_class = super_class.__wrapped__
|
||||
setattr(sys.modules[super_class.__module__], super_class.__name__, cls)
|
||||
_PLUGIN_OVERRIDES[super_class].append(cls)
|
||||
|
||||
return super().__init_subclass__(**kwargs)
|
||||
|
||||
|
@ -3750,3 +3802,6 @@ class UnsupportedURLIE(InfoExtractor):
|
|||
|
||||
def _real_extract(self, url):
|
||||
raise UnsupportedError(url)
|
||||
|
||||
|
||||
_PLUGIN_OVERRIDES = collections.defaultdict(list)
|
||||
|
|
|
@ -182,7 +182,7 @@ class CrunchyrollBetaIE(CrunchyrollBaseIE):
|
|||
self.to_screen(
|
||||
'To get all formats of a hardsub language, use '
|
||||
'"--extractor-args crunchyrollbeta:hardsub=<language_code or all>". '
|
||||
'See https://github.com/yt-dlp/yt-dlp#crunchyrollbeta for more info',
|
||||
'See https://github.com/yt-dlp/yt-dlp#crunchyrollbeta-crunchyroll for more info',
|
||||
only_once=True)
|
||||
else:
|
||||
full_format_langs = set(map(str.lower, available_formats))
|
||||
|
@ -291,7 +291,8 @@ class CrunchyrollBetaShowIE(CrunchyrollBaseIE):
|
|||
'season_id': episode.get('season_id'),
|
||||
'season_number': episode.get('season_number'),
|
||||
'episode': episode.get('title'),
|
||||
'episode_number': episode.get('sequence_number')
|
||||
'episode_number': episode.get('sequence_number'),
|
||||
'language': episode.get('audio_locale'),
|
||||
}
|
||||
|
||||
return self.playlist_result(entries(), internal_id, series_response.get('title'))
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
|
@ -23,7 +24,7 @@ class CuriosityStreamBaseIE(InfoExtractor):
|
|||
auth_cookie = self._get_cookies('https://curiositystream.com').get('auth_token')
|
||||
if auth_cookie:
|
||||
self.write_debug('Obtained auth_token cookie')
|
||||
self._auth_token = auth_cookie.value
|
||||
self._auth_token = urllib.parse.unquote(auth_cookie.value)
|
||||
if self._auth_token:
|
||||
headers['X-Auth-Token'] = self._auth_token
|
||||
result = self._download_json(
|
||||
|
@ -54,8 +55,11 @@ class CuriosityStreamIE(CuriosityStreamBaseIE):
|
|||
'description': 'Vint Cerf, Google\'s Chief Internet Evangelist, describes how he and Bob Kahn created the internet.',
|
||||
'channel': 'Curiosity Stream',
|
||||
'categories': ['Technology', 'Interview'],
|
||||
'average_rating': 96.79,
|
||||
'average_rating': float,
|
||||
'series_id': '2',
|
||||
'thumbnail': r're:https://img.curiositystream.com/.+\.jpg',
|
||||
'tags': [],
|
||||
'duration': 158
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
|
|
|
@ -78,7 +78,7 @@ class DiscoveryIE(DiscoveryGoBaseIE):
|
|||
'Downloading token JSON metadata', query={
|
||||
'authRel': 'authorization',
|
||||
'client_id': '3020a40c2356a645b4b4',
|
||||
'nonce': ''.join([random.choice(string.ascii_letters) for _ in range(32)]),
|
||||
'nonce': ''.join(random.choices(string.ascii_letters, k=32)),
|
||||
'redirectUri': 'https://www.discovery.com/',
|
||||
})['access_token']
|
||||
|
||||
|
|
|
@ -2,22 +2,24 @@ import binascii
|
|||
import hashlib
|
||||
import re
|
||||
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
mimetype2ext,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unified_timestamp,
|
||||
update_url_query,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
SERIES_API = 'https://production-cdn.dr-massive.com/api/page?device=web_browser&item_detail_expand=all&lang=da&max_list_prefetch=3&path=%s'
|
||||
|
||||
|
||||
class DRTVIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
|
@ -141,13 +143,13 @@ class DRTVIE(InfoExtractor):
|
|||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
raw_video_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
webpage = self._download_webpage(url, raw_video_id)
|
||||
|
||||
if '>Programmet er ikke længere tilgængeligt' in webpage:
|
||||
raise ExtractorError(
|
||||
'Video %s is not available' % video_id, expected=True)
|
||||
'Video %s is not available' % raw_video_id, expected=True)
|
||||
|
||||
video_id = self._search_regex(
|
||||
(r'data-(?:material-identifier|episode-slug)="([^"]+)"',
|
||||
|
@ -182,6 +184,11 @@ class DRTVIE(InfoExtractor):
|
|||
data = self._download_json(
|
||||
programcard_url, video_id, 'Downloading video JSON', query=query)
|
||||
|
||||
supplementary_data = {}
|
||||
if re.search(r'_\d+$', raw_video_id):
|
||||
supplementary_data = self._download_json(
|
||||
SERIES_API % f'/episode/{raw_video_id}', raw_video_id, fatal=False) or {}
|
||||
|
||||
title = str_or_none(data.get('Title')) or re.sub(
|
||||
r'\s*\|\s*(?:TV\s*\|\s*DR|DRTV)$', '',
|
||||
self._og_search_title(webpage))
|
||||
|
@ -313,8 +320,8 @@ class DRTVIE(InfoExtractor):
|
|||
'season': str_or_none(data.get('SeasonTitle')),
|
||||
'season_number': int_or_none(data.get('SeasonNumber')),
|
||||
'season_id': str_or_none(data.get('SeasonUrn')),
|
||||
'episode': str_or_none(data.get('EpisodeTitle')),
|
||||
'episode_number': int_or_none(data.get('EpisodeNumber')),
|
||||
'episode': traverse_obj(supplementary_data, ('entries', 0, 'item', 'contextualTitle')) or str_or_none(data.get('EpisodeTitle')),
|
||||
'episode_number': traverse_obj(supplementary_data, ('entries', 0, 'item', 'episodeNumber')) or int_or_none(data.get('EpisodeNumber')),
|
||||
'release_year': int_or_none(data.get('ProductionYear')),
|
||||
}
|
||||
|
||||
|
@ -372,3 +379,92 @@ class DRTVLiveIE(InfoExtractor):
|
|||
'formats': formats,
|
||||
'is_live': True,
|
||||
}
|
||||
|
||||
|
||||
class DRTVSeasonIE(InfoExtractor):
|
||||
IE_NAME = 'drtv:season'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/saeson/(?P<display_id>[\w-]+)_(?P<id>\d+)'
|
||||
_GEO_COUNTRIES = ['DK']
|
||||
_TESTS = [{
|
||||
'url': 'https://www.dr.dk/drtv/saeson/frank-and-kastaniegaarden_9008',
|
||||
'info_dict': {
|
||||
'id': '9008',
|
||||
'display_id': 'frank-and-kastaniegaarden',
|
||||
'title': 'Frank & Kastaniegaarden',
|
||||
'series': 'Frank & Kastaniegaarden',
|
||||
},
|
||||
'playlist_mincount': 8
|
||||
}, {
|
||||
'url': 'https://www.dr.dk/drtv/saeson/frank-and-kastaniegaarden_8761',
|
||||
'info_dict': {
|
||||
'id': '8761',
|
||||
'display_id': 'frank-and-kastaniegaarden',
|
||||
'title': 'Frank & Kastaniegaarden',
|
||||
'series': 'Frank & Kastaniegaarden',
|
||||
},
|
||||
'playlist_mincount': 19
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, season_id = self._match_valid_url(url).group('display_id', 'id')
|
||||
data = self._download_json(SERIES_API % f'/saeson/{display_id}_{season_id}', display_id)
|
||||
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'url': f'https://www.dr.dk/drtv{episode["path"]}',
|
||||
'ie_key': DRTVIE.ie_key(),
|
||||
'title': episode.get('title'),
|
||||
'episode': episode.get('episodeName'),
|
||||
'description': episode.get('shortDescription'),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber')),
|
||||
'episode_number': episode.get('episodeNumber'),
|
||||
} for episode in traverse_obj(data, ('entries', 0, 'item', 'episodes', 'items'))]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': season_id,
|
||||
'display_id': display_id,
|
||||
'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'entries': entries,
|
||||
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
|
||||
}
|
||||
|
||||
|
||||
class DRTVSeriesIE(InfoExtractor):
|
||||
IE_NAME = 'drtv:series'
|
||||
_VALID_URL = r'https?://(?:www\.)?(?:dr\.dk|dr-massive\.com)/drtv/serie/(?P<display_id>[\w-]+)_(?P<id>\d+)'
|
||||
_GEO_COUNTRIES = ['DK']
|
||||
_TESTS = [{
|
||||
'url': 'https://www.dr.dk/drtv/serie/frank-and-kastaniegaarden_6954',
|
||||
'info_dict': {
|
||||
'id': '6954',
|
||||
'display_id': 'frank-and-kastaniegaarden',
|
||||
'title': 'Frank & Kastaniegaarden',
|
||||
'series': 'Frank & Kastaniegaarden',
|
||||
},
|
||||
'playlist_mincount': 15
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, series_id = self._match_valid_url(url).group('display_id', 'id')
|
||||
data = self._download_json(SERIES_API % f'/serie/{display_id}_{series_id}', display_id)
|
||||
|
||||
entries = [{
|
||||
'_type': 'url',
|
||||
'url': f'https://www.dr.dk/drtv{season.get("path")}',
|
||||
'ie_key': DRTVSeasonIE.ie_key(),
|
||||
'title': season.get('title'),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'season_number': traverse_obj(data, ('entries', 0, 'item', 'seasonNumber'))
|
||||
} for season in traverse_obj(data, ('entries', 0, 'item', 'show', 'seasons', 'items'))]
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': series_id,
|
||||
'display_id': display_id,
|
||||
'title': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'series': traverse_obj(data, ('entries', 0, 'item', 'title')),
|
||||
'entries': entries
|
||||
}
|
||||
|
|
|
@ -1,24 +1,80 @@
|
|||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from .youtube import YoutubeTabIE
|
||||
from ..utils import parse_qs, smuggle_url, traverse_obj
|
||||
|
||||
|
||||
class EmbedlyIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www|cdn\.)?embedly\.com/widgets/media\.html\?(?:[^#]*?&)?url=(?P<id>[^#&]+)'
|
||||
_VALID_URL = r'https?://(?:www|cdn\.)?embedly\.com/widgets/media\.html\?(?:[^#]*?&)?(?:src|url)=(?:[^#&]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://cdn.embedly.com/widgets/media.html?src=http%3A%2F%2Fwww.youtube.com%2Fembed%2Fvideoseries%3Flist%3DUUGLim4T2loE5rwCMdpCIPVg&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DSU4fj_aEMVw%26list%3DUUGLim4T2loE5rwCMdpCIPVg&image=http%3A%2F%2Fi.ytimg.com%2Fvi%2FSU4fj_aEMVw%2Fhqdefault.jpg&key=8ee8a2e6a8cc47aab1a5ee67f9a178e0&type=text%2Fhtml&schema=youtube&autoplay=1',
|
||||
'info_dict': {
|
||||
'id': 'UUGLim4T2loE5rwCMdpCIPVg',
|
||||
'modified_date': '20221225',
|
||||
'view_count': int,
|
||||
'uploader_url': 'https://www.youtube.com/@TraciHinesMusic',
|
||||
'channel_id': 'UCGLim4T2loE5rwCMdpCIPVg',
|
||||
'uploader': 'TraciJHines',
|
||||
'channel_url': 'https://www.youtube.com/@TraciHinesMusic',
|
||||
'channel': 'TraciJHines',
|
||||
'availability': 'public',
|
||||
'uploader_id': 'UCGLim4T2loE5rwCMdpCIPVg',
|
||||
'description': '',
|
||||
'tags': [],
|
||||
'title': 'Uploads from TraciJHines',
|
||||
},
|
||||
'playlist_mincount': 10,
|
||||
}, {
|
||||
'url': 'https://cdn.embedly.com/widgets/media.html?src=http%3A%2F%2Fwww.youtube.com%2Fembed%2Fvideoseries%3Flist%3DUUGLim4T2loE5rwCMdpCIPVg&url=https%3A%2F%2Fwww.youtube.com%2Fwatch%3Fv%3DSU4fj_aEMVw%26list%3DUUGLim4T2loE5rwCMdpCIPVg&image=http%3A%2F%2Fi.ytimg.com%2Fvi%2FSU4fj_aEMVw%2Fhqdefault.jpg&key=8ee8a2e6a8cc47aab1a5ee67f9a178e0&type=text%2Fhtml&schema=youtube&autoplay=1',
|
||||
'params': {'noplaylist': True},
|
||||
'info_dict': {
|
||||
'id': 'SU4fj_aEMVw',
|
||||
'ext': 'mp4',
|
||||
'title': 'I\'m on Patreon!',
|
||||
'age_limit': 0,
|
||||
'categories': ['Entertainment'],
|
||||
'thumbnail': 'https://i.ytimg.com/vi_webp/SU4fj_aEMVw/maxresdefault.webp',
|
||||
'live_status': 'not_live',
|
||||
'playable_in_embed': True,
|
||||
'channel': 'TraciJHines',
|
||||
'uploader_id': 'TraciJHines',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCGLim4T2loE5rwCMdpCIPVg',
|
||||
'uploader_url': 'http://www.youtube.com/user/TraciJHines',
|
||||
'upload_date': '20150211',
|
||||
'duration': 282,
|
||||
'availability': 'public',
|
||||
'channel_follower_count': int,
|
||||
'tags': 'count:39',
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
'channel_id': 'UCGLim4T2loE5rwCMdpCIPVg',
|
||||
'like_count': int,
|
||||
'uploader': 'TraciJHines',
|
||||
'description': 'md5:8af6425f50bd46fbf29f3db0fc3a8364',
|
||||
'chapters': list,
|
||||
|
||||
},
|
||||
}, {
|
||||
'url': 'https://cdn.embedly.com/widgets/media.html?src=https://player.vimeo.com/video/1234567?h=abcdefgh',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_embed_urls(cls, url, webpage):
|
||||
# Bypass suitable check
|
||||
def _extract_from_webpage(cls, url, webpage):
|
||||
# Bypass "ie=cls" and suitable check
|
||||
for mobj in re.finditer(r'class=["\']embedly-card["\'][^>]href=["\'](?P<url>[^"\']+)', webpage):
|
||||
yield mobj.group('url')
|
||||
yield cls.url_result(mobj.group('url'))
|
||||
|
||||
for mobj in re.finditer(r'class=["\']embedly-embed["\'][^>]src=["\'][^"\']*url=(?P<url>[^&]+)', webpage):
|
||||
yield urllib.parse.unquote(mobj.group('url'))
|
||||
yield cls.url_result(urllib.parse.unquote(mobj.group('url')))
|
||||
|
||||
def _real_extract(self, url):
|
||||
return self.url_result(compat_urllib_parse_unquote(self._match_id(url)))
|
||||
qs = parse_qs(url)
|
||||
src = urllib.parse.unquote(traverse_obj(qs, ('url', 0)) or '')
|
||||
if src and YoutubeTabIE.suitable(src):
|
||||
return self.url_result(src, YoutubeTabIE)
|
||||
return self.url_result(smuggle_url(
|
||||
urllib.parse.unquote(traverse_obj(qs, ('src', 0), ('url', 0))),
|
||||
{'http_headers': {'Referer': url}}))
|
||||
|
|
|
@ -3,6 +3,7 @@ from ..utils import (
|
|||
int_or_none,
|
||||
orderedSet,
|
||||
parse_duration,
|
||||
parse_iso8601,
|
||||
parse_qs,
|
||||
qualities,
|
||||
unified_strdate,
|
||||
|
@ -87,3 +88,86 @@ class EuropaIE(InfoExtractor):
|
|||
'view_count': view_count,
|
||||
'formats': formats
|
||||
}
|
||||
|
||||
|
||||
class EuroParlWebstreamIE(InfoExtractor):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:multimedia|webstreaming)\.europarl\.europa\.eu/[^/#?]+/
|
||||
(?:embed/embed\.html\?event=|(?!video)[^/#?]+/[\w-]+_)(?P<id>[\w-]+)
|
||||
'''
|
||||
_TESTS = [{
|
||||
'url': 'https://multimedia.europarl.europa.eu/pl/webstreaming/plenary-session_20220914-0900-PLENARY',
|
||||
'info_dict': {
|
||||
'id': 'bcaa1db4-76ef-7e06-8da7-839bd0ad1dbe',
|
||||
'ext': 'mp4',
|
||||
'release_timestamp': 1663137900,
|
||||
'title': 'Plenary session',
|
||||
'release_date': '20220914',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://multimedia.europarl.europa.eu/pl/webstreaming/eu-cop27-un-climate-change-conference-in-sharm-el-sheikh-egypt-ep-delegation-meets-with-ngo-represen_20221114-1600-SPECIAL-OTHER',
|
||||
'info_dict': {
|
||||
'id': 'a8428de8-b9cd-6a2e-11e4-3805d9c9ff5c',
|
||||
'ext': 'mp4',
|
||||
'release_timestamp': 1668434400,
|
||||
'release_date': '20221114',
|
||||
'title': 'md5:d3550280c33cc70e0678652e3d52c028',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# embed webpage
|
||||
'url': 'https://webstreaming.europarl.europa.eu/ep/embed/embed.html?event=20220914-0900-PLENARY&language=en&autoplay=true&logo=true',
|
||||
'info_dict': {
|
||||
'id': 'bcaa1db4-76ef-7e06-8da7-839bd0ad1dbe',
|
||||
'ext': 'mp4',
|
||||
'title': 'Plenary session',
|
||||
'release_date': '20220914',
|
||||
'release_timestamp': 1663137900,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# live webstream
|
||||
'url': 'https://multimedia.europarl.europa.eu/en/webstreaming/euroscola_20221115-1000-SPECIAL-EUROSCOLA',
|
||||
'info_dict': {
|
||||
'ext': 'mp4',
|
||||
'id': '510eda7f-ba72-161b-7ee7-0e836cd2e715',
|
||||
'release_timestamp': 1668502800,
|
||||
'title': 'Euroscola 2022-11-15 19:21',
|
||||
'release_date': '20221115',
|
||||
'live_status': 'is_live',
|
||||
},
|
||||
'skip': 'not live anymore'
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
|
||||
json_info = self._download_json(
|
||||
'https://vis-api.vuplay.co.uk/event/external', display_id,
|
||||
query={
|
||||
'player_key': 'europarl|718f822c-a48c-4841-9947-c9cb9bb1743c',
|
||||
'external_id': display_id,
|
||||
})
|
||||
|
||||
formats, subtitles = self._extract_mpd_formats_and_subtitles(json_info['streaming_url'], display_id)
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
json_info['streaming_url'].replace('.mpd', '.m3u8'), display_id)
|
||||
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
|
||||
return {
|
||||
'id': json_info['id'],
|
||||
'title': json_info.get('title'),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'release_timestamp': parse_iso8601(json_info.get('published_start')),
|
||||
'is_live': 'LIVE' in json_info.get('state', '')
|
||||
}
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
import contextlib
|
||||
import os
|
||||
|
||||
from ..utils import load_plugins
|
||||
from ..plugins import load_plugins
|
||||
|
||||
# NB: Must be before other imports so that plugins can be correctly injected
|
||||
_PLUGIN_CLASSES = load_plugins('extractor', 'IE', {})
|
||||
_PLUGIN_CLASSES = load_plugins('extractor', 'IE')
|
||||
|
||||
_LAZY_LOADER = False
|
||||
if not os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
|
||||
|
@ -24,3 +24,5 @@ if not _LAZY_LOADER:
|
|||
|
||||
globals().update(_PLUGIN_CLASSES)
|
||||
_ALL_CLASSES[:0] = _PLUGIN_CLASSES.values()
|
||||
|
||||
from .common import _PLUGIN_OVERRIDES # noqa: F401
|
||||
|
|
|
@ -17,8 +17,10 @@ class FifaIE(InfoExtractor):
|
|||
'description': 'md5:f4520d0ee80529c8ba4134a7d692ff8b',
|
||||
'ext': 'mp4',
|
||||
'categories': ['FIFA Tournaments'],
|
||||
'thumbnail': 'https://digitalhub.fifa.com/transform/fa6f0b3e-a2e9-4cf7-9f32-53c57bcb7360/2006_Final_ITA_FRA',
|
||||
'thumbnail': 'https://digitalhub.fifa.com/transform/135e2656-3a51-407b-8810-6c34bec5b59b/FMR_2006_Italy_France_Final_Hero',
|
||||
'duration': 8165,
|
||||
'release_timestamp': 1152403200,
|
||||
'release_date': '20060709',
|
||||
},
|
||||
'params': {'skip_download': 'm3u8'},
|
||||
}, {
|
||||
|
@ -54,7 +56,7 @@ class FifaIE(InfoExtractor):
|
|||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
preconnect_link = self._search_regex(
|
||||
r'<link[^>]+rel\s*=\s*"preconnect"[^>]+href\s*=\s*"([^"]+)"', webpage, 'Preconnect Link')
|
||||
r'<link\b[^>]+\brel\s*=\s*"preconnect"[^>]+href\s*=\s*"([^"]+)"', webpage, 'Preconnect Link')
|
||||
|
||||
video_details = self._download_json(
|
||||
f'{preconnect_link}/sections/videoDetails/{video_id}', video_id, 'Downloading Video Details', fatal=False)
|
||||
|
@ -62,22 +64,9 @@ class FifaIE(InfoExtractor):
|
|||
preplay_parameters = self._download_json(
|
||||
f'{preconnect_link}/videoPlayerData/{video_id}', video_id, 'Downloading Preplay Parameters')['preplayParameters']
|
||||
|
||||
cid = preplay_parameters['contentId']
|
||||
content_data = self._download_json(
|
||||
f'https://content.uplynk.com/preplay/{cid}/multiple.json', video_id, 'Downloading Content Data', query={
|
||||
'v': preplay_parameters['preplayAPIVersion'],
|
||||
'tc': preplay_parameters['tokenCheckAlgorithmVersion'],
|
||||
'rn': preplay_parameters['randomNumber'],
|
||||
'exp': preplay_parameters['tokenExpirationDate'],
|
||||
'ct': preplay_parameters['contentType'],
|
||||
'cid': cid,
|
||||
'mbtracks': preplay_parameters['tracksAssetNumber'],
|
||||
'ad': preplay_parameters['adConfiguration'],
|
||||
'ad.preroll': int(preplay_parameters['adPreroll']),
|
||||
'ad.cmsid': preplay_parameters['adCMSSourceId'],
|
||||
'ad.vid': preplay_parameters['adSourceVideoID'],
|
||||
'sig': preplay_parameters['signature'],
|
||||
})
|
||||
'https://content.uplynk.com/preplay/{contentId}/multiple.json?{queryStr}&sig={signature}'.format(**preplay_parameters),
|
||||
video_id, 'Downloading Content Data')
|
||||
|
||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(content_data['playURL'], video_id)
|
||||
|
||||
|
|
|
@ -1,31 +1,51 @@
|
|||
from .common import InfoExtractor
|
||||
from .uplynk import UplynkPreplayIE
|
||||
from ..utils import HEADRequest, float_or_none, make_archive_id, smuggle_url
|
||||
|
||||
|
||||
class FoxSportsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?foxsports\.com/(?:[^/]+/)*video/(?P<id>\d+)'
|
||||
|
||||
_TEST = {
|
||||
'url': 'http://www.foxsports.com/tennessee/video/432609859715',
|
||||
'md5': 'b49050e955bebe32c301972e4012ac17',
|
||||
_VALID_URL = r'https?://(?:www\.)?foxsports\.com/watch/(?P<id>[\w-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.foxsports.com/watch/play-612168c6700004b',
|
||||
'info_dict': {
|
||||
'id': '432609859715',
|
||||
'id': 'b72f5bd8658140baa5791bb676433733',
|
||||
'ext': 'mp4',
|
||||
'title': 'Courtney Lee on going up 2-0 in series vs. Blazers',
|
||||
'description': 'Courtney Lee talks about Memphis being focused.',
|
||||
# TODO: fix timestamp
|
||||
'upload_date': '19700101', # '20150423',
|
||||
# 'timestamp': 1429761109,
|
||||
'uploader': 'NEWA-FNG-FOXSPORTS',
|
||||
'display_id': 'play-612168c6700004b',
|
||||
'title': 'md5:e0c4ecac3a1f25295b4fae22fb5c126a',
|
||||
'description': 'md5:371bc43609708ae2b9e1a939229762af',
|
||||
'uploader_id': '06b4a36349624051a9ba52ac3a91d268',
|
||||
'upload_date': '20221205',
|
||||
'timestamp': 1670262586,
|
||||
'duration': 31.7317,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'extra_param_to_segment_url': str,
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
'add_ie': ['ThePlatform'],
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
json_ld = self._search_json_ld(webpage, video_id, expected_type='VideoObject', default={})
|
||||
data = self._download_json(
|
||||
f'https://api3.fox.com/v2.0/vodplayer/sportsclip/{video_id}',
|
||||
video_id, note='Downloading API JSON', headers={
|
||||
'x-api-key': 'cf289e299efdfa39fb6316f259d1de93',
|
||||
})
|
||||
preplay_url = self._request_webpage(
|
||||
HEADRequest(data['url']), video_id, 'Fetching preplay URL').geturl()
|
||||
|
||||
return self.url_result(
|
||||
'https://feed.theplatform.com/f/BKQ29B/foxsports-all?byId=' + video_id, 'ThePlatformFeed')
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': UplynkPreplayIE.ie_key(),
|
||||
'url': smuggle_url(preplay_url, {'Origin': 'https://www.foxsports.com'}),
|
||||
'display_id': video_id,
|
||||
'title': data.get('name') or json_ld.get('title'),
|
||||
'description': data.get('description') or json_ld.get('description'),
|
||||
'duration': float_or_none(data.get('durationInSeconds')),
|
||||
'timestamp': json_ld.get('timestamp'),
|
||||
'thumbnails': json_ld.get('thumbnails'),
|
||||
'_old_archive_ids': [make_archive_id(self, video_id)],
|
||||
}
|
||||
|
|
|
@ -52,6 +52,7 @@ class FreesoundIE(InfoExtractor):
|
|||
tags_str = get_element_by_class('tags', webpage)
|
||||
tags = re.findall(r'<a[^>]+>([^<]+)', tags_str) if tags_str else None
|
||||
|
||||
audio_url = re.sub(r'^https?://freesound\.org(https?://)', r'\1', audio_url)
|
||||
audio_urls = [audio_url]
|
||||
|
||||
LQ_FORMAT = '-lq.mp3'
|
||||
|
|
|
@ -210,7 +210,7 @@ class FunimationIE(FunimationBaseIE):
|
|||
page = self._download_json(
|
||||
'https://www.funimation.com/api/showexperience/%s/' % experience_id,
|
||||
display_id, headers=headers, expected_status=403, query={
|
||||
'pinst_id': ''.join([random.choice(string.digits + string.ascii_letters) for _ in range(8)]),
|
||||
'pinst_id': ''.join(random.choices(string.digits + string.ascii_letters, k=8)),
|
||||
}, note=f'Downloading {format_name} JSON')
|
||||
sources = page.get('items') or []
|
||||
if not sources:
|
||||
|
|
|
@ -32,6 +32,7 @@ from ..utils import (
|
|||
unified_timestamp,
|
||||
unsmuggle_url,
|
||||
url_or_none,
|
||||
urljoin,
|
||||
variadic,
|
||||
xpath_attr,
|
||||
xpath_text,
|
||||
|
@ -886,20 +887,6 @@ class GenericIE(InfoExtractor):
|
|||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
},
|
||||
},
|
||||
{
|
||||
# JWPlayer config passed as variable
|
||||
'url': 'http://www.txxx.com/videos/3326530/ariele/',
|
||||
'info_dict': {
|
||||
'id': '3326530_hq',
|
||||
'ext': 'mp4',
|
||||
'title': 'ARIELE | Tube Cup',
|
||||
'uploader': 'www.txxx.com',
|
||||
'age_limit': 18,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
},
|
||||
{
|
||||
# Video.js embed, multiple formats
|
||||
'url': 'http://ortcam.com/solidworks-урок-6-настройка-чертежа_33f9b7351.html',
|
||||
|
@ -1570,19 +1557,6 @@ class GenericIE(InfoExtractor):
|
|||
},
|
||||
'add_ie': ['WashingtonPost'],
|
||||
},
|
||||
{
|
||||
# Mediaset embed
|
||||
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
|
||||
'info_dict': {
|
||||
'id': '720642',
|
||||
'ext': 'mp4',
|
||||
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'add_ie': ['Mediaset'],
|
||||
},
|
||||
{
|
||||
# JOJ.sk embeds
|
||||
'url': 'https://www.noviny.sk/slovensko/238543-slovenskom-sa-prehnala-vlna-silnych-burok',
|
||||
|
@ -1887,11 +1861,6 @@ class GenericIE(InfoExtractor):
|
|||
'title': 'I AM BIO Podcast | BIO',
|
||||
},
|
||||
'playlist_mincount': 52,
|
||||
},
|
||||
{
|
||||
# Sibnet embed (https://help.sibnet.ru/?sibnet_video_embed)
|
||||
'url': 'https://phpbb3.x-tk.ru/bbcode-video-sibnet-t24.html',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# WimTv embed player
|
||||
'url': 'http://www.msmotor.tv/wearefmi-pt-2-2021/',
|
||||
|
@ -1908,11 +1877,13 @@ class GenericIE(InfoExtractor):
|
|||
'display_id': 'kelis-4th-of-july',
|
||||
'ext': 'mp4',
|
||||
'title': 'Kelis - 4th Of July',
|
||||
'thumbnail': 'https://kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
|
||||
'description': 'Kelis - 4th Of July',
|
||||
'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
'expected_warnings': ['Untested major version'],
|
||||
}, {
|
||||
# KVS Player
|
||||
'url': 'https://www.kvs-demo.com/embed/105/',
|
||||
|
@ -1921,35 +1892,12 @@ class GenericIE(InfoExtractor):
|
|||
'display_id': 'kelis-4th-of-july',
|
||||
'ext': 'mp4',
|
||||
'title': 'Kelis - 4th Of July / Embed Player',
|
||||
'thumbnail': 'https://kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
|
||||
'thumbnail': r're:https://(?:www\.)?kvs-demo.com/contents/videos_screenshots/0/105/preview.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# KVS Player
|
||||
'url': 'https://thisvid.com/videos/french-boy-pantsed/',
|
||||
'md5': '3397979512c682f6b85b3b04989df224',
|
||||
'info_dict': {
|
||||
'id': '2400174',
|
||||
'display_id': 'french-boy-pantsed',
|
||||
'ext': 'mp4',
|
||||
'title': 'French Boy Pantsed - ThisVid.com',
|
||||
'thumbnail': 'https://media.thisvid.com/contents/videos_screenshots/2400000/2400174/preview.mp4.jpg',
|
||||
}
|
||||
}, {
|
||||
# KVS Player
|
||||
'url': 'https://thisvid.com/embed/2400174/',
|
||||
'md5': '3397979512c682f6b85b3b04989df224',
|
||||
'info_dict': {
|
||||
'id': '2400174',
|
||||
'display_id': 'french-boy-pantsed',
|
||||
'ext': 'mp4',
|
||||
'title': 'French Boy Pantsed - ThisVid.com',
|
||||
'thumbnail': 'https://media.thisvid.com/contents/videos_screenshots/2400000/2400174/preview.mp4.jpg',
|
||||
}
|
||||
}, {
|
||||
# KVS Player
|
||||
'url': 'https://youix.com/video/leningrad-zoj/',
|
||||
'md5': '94f96ba95706dc3880812b27b7d8a2b8',
|
||||
'info_dict': {
|
||||
|
@ -1957,8 +1905,8 @@ class GenericIE(InfoExtractor):
|
|||
'display_id': 'leningrad-zoj',
|
||||
'ext': 'mp4',
|
||||
'title': 'Клип: Ленинград - ЗОЖ скачать, смотреть онлайн | Youix.com',
|
||||
'thumbnail': 'https://youix.com/contents/videos_screenshots/18000/18485/preview_480x320_youix_com.mp4.jpg',
|
||||
}
|
||||
'thumbnail': r're:https://youix.com/contents/videos_screenshots/18000/18485/preview(?:_480x320_youix_com.mp4)?\.jpg',
|
||||
},
|
||||
}, {
|
||||
# KVS Player
|
||||
'url': 'https://youix.com/embed/18485',
|
||||
|
@ -1968,19 +1916,20 @@ class GenericIE(InfoExtractor):
|
|||
'display_id': 'leningrad-zoj',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ленинград - ЗОЖ',
|
||||
'thumbnail': 'https://youix.com/contents/videos_screenshots/18000/18485/preview_480x320_youix_com.mp4.jpg',
|
||||
}
|
||||
'thumbnail': r're:https://youix.com/contents/videos_screenshots/18000/18485/preview(?:_480x320_youix_com.mp4)?\.jpg',
|
||||
},
|
||||
}, {
|
||||
# KVS Player
|
||||
'url': 'https://bogmedia.org/videos/21217/40-nochey-40-nights-2016/',
|
||||
'md5': '94166bdb26b4cb1fb9214319a629fc51',
|
||||
'info_dict': {
|
||||
'id': '21217',
|
||||
'display_id': '40-nochey-40-nights-2016',
|
||||
'display_id': '40-nochey-2016',
|
||||
'ext': 'mp4',
|
||||
'title': '40 ночей (2016) - BogMedia.org',
|
||||
'description': 'md5:4e6d7d622636eb7948275432eb256dc3',
|
||||
'thumbnail': 'https://bogmedia.org/contents/videos_screenshots/21000/21217/preview_480p.mp4.jpg',
|
||||
}
|
||||
},
|
||||
},
|
||||
{
|
||||
# KVS Player (for sites that serve kt_player.js via non-https urls)
|
||||
|
@ -1991,8 +1940,8 @@ class GenericIE(InfoExtractor):
|
|||
'display_id': 'syren-de-mer-onlyfans-05-07-2020have-a-happy-safe-holiday5f014e68a220979bdb8cd-source',
|
||||
'ext': 'mp4',
|
||||
'title': 'Syren De Mer onlyfans_05-07-2020Have_a_happy_safe_holiday5f014e68a220979bdb8cd_source / Embed плеер',
|
||||
'thumbnail': 'http://www.camhub.world/contents/videos_screenshots/389000/389508/preview.mp4.jpg',
|
||||
}
|
||||
'thumbnail': r're:https?://www\.camhub\.world/contents/videos_screenshots/389000/389508/preview\.mp4\.jpg',
|
||||
},
|
||||
},
|
||||
{
|
||||
# Reddit-hosted video that will redirect and be processed by RedditIE
|
||||
|
@ -2195,7 +2144,52 @@ class GenericIE(InfoExtractor):
|
|||
'age_limit': 0,
|
||||
'direct': True,
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
'note': 'server returns data in brotli compression by default if `accept-encoding: *` is specified.',
|
||||
'url': 'https://www.extra.cz/cauky-lidi-70-dil-babis-predstavil-pohadky-prymulanek-nebo-andrejovy-nove-saty-ac867',
|
||||
'info_dict': {
|
||||
'id': 'cauky-lidi-70-dil-babis-predstavil-pohadky-prymulanek-nebo-andrejovy-nove-saty-ac867',
|
||||
'ext': 'mp4',
|
||||
'title': 'čauky lidi 70 finall',
|
||||
'description': 'čauky lidi 70 finall',
|
||||
'thumbnail': 'h',
|
||||
'upload_date': '20220606',
|
||||
'timestamp': 1654513791,
|
||||
'duration': 318.0,
|
||||
'direct': True,
|
||||
'age_limit': 0,
|
||||
},
|
||||
},
|
||||
{
|
||||
'note': 'JW Player embed with unicode-escape sequences in URL',
|
||||
'url': 'https://www.medici.tv/en/concerts/lahav-shani-mozart-mahler-israel-philharmonic-abu-dhabi-classics',
|
||||
'info_dict': {
|
||||
'id': 'm',
|
||||
'ext': 'mp4',
|
||||
'title': 'Lahav Shani conducts the Israel Philharmonic\'s first-ever concert in Abu Dhabi',
|
||||
'description': 'Mahler\'s ',
|
||||
'uploader': 'www.medici.tv',
|
||||
'age_limit': 0,
|
||||
'thumbnail': r're:^https?://.+\.jpg',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
},
|
||||
{
|
||||
'url': 'https://shooshtime.com/videos/284002/just-out-of-the-shower-joi/',
|
||||
'md5': 'e2f0a4c329f7986280b7328e24036d60',
|
||||
'info_dict': {
|
||||
'id': '284002',
|
||||
'display_id': 'just-out-of-the-shower-joi',
|
||||
'ext': 'mp4',
|
||||
'title': 'Just Out Of The Shower JOI - Shooshtime',
|
||||
'thumbnail': 'https://i.shoosh.co/contents/videos_screenshots/284000/284002/preview.mp4.jpg',
|
||||
'height': 720,
|
||||
'age_limit': 18,
|
||||
},
|
||||
},
|
||||
]
|
||||
|
||||
def report_following_redirect(self, new_url):
|
||||
|
@ -2212,6 +2206,13 @@ class GenericIE(InfoExtractor):
|
|||
|
||||
self._downloader.write_debug(f'Identified {num} {name}{format_field(note, None, "; %s")}')
|
||||
|
||||
def _fragment_query(self, url):
|
||||
if self._configuration_arg('fragment_query'):
|
||||
query_string = urllib.parse.urlparse(url).query
|
||||
if query_string:
|
||||
return {'extra_param_to_segment_url': query_string}
|
||||
return {}
|
||||
|
||||
def _extract_rss(self, url, video_id, doc):
|
||||
NS_MAP = {
|
||||
'itunes': 'http://www.itunes.com/dtds/podcast-1.0.dtd',
|
||||
|
@ -2259,43 +2260,87 @@ class GenericIE(InfoExtractor):
|
|||
'entries': entries,
|
||||
}
|
||||
|
||||
def _kvs_getrealurl(self, video_url, license_code):
|
||||
@classmethod
|
||||
def _kvs_get_real_url(cls, video_url, license_code):
|
||||
if not video_url.startswith('function/0/'):
|
||||
return video_url # not obfuscated
|
||||
|
||||
url_path, _, url_query = video_url.partition('?')
|
||||
urlparts = url_path.split('/')[2:]
|
||||
license = self._kvs_getlicensetoken(license_code)
|
||||
newmagic = urlparts[5][:32]
|
||||
parsed = urllib.parse.urlparse(video_url[len('function/0/'):])
|
||||
license = cls._kvs_get_license_token(license_code)
|
||||
urlparts = parsed.path.split('/')
|
||||
|
||||
for o in range(len(newmagic) - 1, -1, -1):
|
||||
new = ''
|
||||
l = (o + sum(int(n) for n in license[o:])) % 32
|
||||
HASH_LENGTH = 32
|
||||
hash = urlparts[3][:HASH_LENGTH]
|
||||
indices = list(range(HASH_LENGTH))
|
||||
|
||||
for i in range(0, len(newmagic)):
|
||||
if i == o:
|
||||
new += newmagic[l]
|
||||
elif i == l:
|
||||
new += newmagic[o]
|
||||
else:
|
||||
new += newmagic[i]
|
||||
newmagic = new
|
||||
# Swap indices of hash according to the destination calculated from the license token
|
||||
accum = 0
|
||||
for src in reversed(range(HASH_LENGTH)):
|
||||
accum += license[src]
|
||||
dest = (src + accum) % HASH_LENGTH
|
||||
indices[src], indices[dest] = indices[dest], indices[src]
|
||||
|
||||
urlparts[5] = newmagic + urlparts[5][32:]
|
||||
return '/'.join(urlparts) + '?' + url_query
|
||||
urlparts[3] = ''.join(hash[index] for index in indices) + urlparts[3][HASH_LENGTH:]
|
||||
return urllib.parse.urlunparse(parsed._replace(path='/'.join(urlparts)))
|
||||
|
||||
def _kvs_getlicensetoken(self, license):
|
||||
modlicense = license.replace('$', '').replace('0', '1')
|
||||
center = int(len(modlicense) / 2)
|
||||
@staticmethod
|
||||
def _kvs_get_license_token(license):
|
||||
license = license.replace('$', '')
|
||||
license_values = [int(char) for char in license]
|
||||
|
||||
modlicense = license.replace('0', '1')
|
||||
center = len(modlicense) // 2
|
||||
fronthalf = int(modlicense[:center + 1])
|
||||
backhalf = int(modlicense[center:])
|
||||
modlicense = str(4 * abs(fronthalf - backhalf))[:center + 1]
|
||||
|
||||
modlicense = str(4 * abs(fronthalf - backhalf))
|
||||
retval = ''
|
||||
for o in range(0, center + 1):
|
||||
for i in range(1, 5):
|
||||
retval += str((int(license[o + i]) + int(modlicense[o])) % 10)
|
||||
return retval
|
||||
return [
|
||||
(license_values[index + offset] + current) % 10
|
||||
for index, current in enumerate(map(int, modlicense))
|
||||
for offset in range(4)
|
||||
]
|
||||
|
||||
def _extract_kvs(self, url, webpage, video_id):
|
||||
flashvars = self._search_json(
|
||||
r'(?s:<script\b[^>]*>.*?var\s+flashvars\s*=)',
|
||||
webpage, 'flashvars', video_id, transform_source=js_to_json)
|
||||
|
||||
# extract the part after the last / as the display_id from the
|
||||
# canonical URL.
|
||||
display_id = self._search_regex(
|
||||
r'(?:<link href="https?://[^"]+/(.+?)/?" rel="canonical"\s*/?>'
|
||||
r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)',
|
||||
webpage, 'display_id', fatal=False)
|
||||
title = self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title')
|
||||
|
||||
thumbnail = flashvars['preview_url']
|
||||
if thumbnail.startswith('//'):
|
||||
protocol, _, _ = url.partition('/')
|
||||
thumbnail = protocol + thumbnail
|
||||
|
||||
url_keys = list(filter(re.compile(r'^video_(?:url|alt_url\d*)$').match, flashvars.keys()))
|
||||
formats = []
|
||||
for key in url_keys:
|
||||
if '/get_file/' not in flashvars[key]:
|
||||
continue
|
||||
format_id = flashvars.get(f'{key}_text', key)
|
||||
formats.append({
|
||||
'url': urljoin(url, self._kvs_get_real_url(flashvars[key], flashvars['license_code'])),
|
||||
'format_id': format_id,
|
||||
'ext': 'mp4',
|
||||
**(parse_resolution(format_id) or parse_resolution(flashvars[key])),
|
||||
'http_headers': {'Referer': url},
|
||||
})
|
||||
if not formats[-1].get('height'):
|
||||
formats[-1]['quality'] = 1
|
||||
|
||||
return {
|
||||
'id': flashvars['video_id'],
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
if url.startswith('//'):
|
||||
|
@ -2351,7 +2396,7 @@ class GenericIE(InfoExtractor):
|
|||
# It may probably better to solve this by checking Content-Type for application/octet-stream
|
||||
# after a HEAD request, but not sure if we can rely on this.
|
||||
full_response = self._request_webpage(url, video_id, headers={
|
||||
'Accept-Encoding': '*',
|
||||
'Accept-Encoding': 'identity',
|
||||
**smuggled_data.get('http_headers', {})
|
||||
})
|
||||
new_url = full_response.geturl()
|
||||
|
@ -2379,8 +2424,10 @@ class GenericIE(InfoExtractor):
|
|||
subtitles = {}
|
||||
if format_id.endswith('mpegurl'):
|
||||
formats, subtitles = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4', headers=headers)
|
||||
info_dict.update(self._fragment_query(url))
|
||||
elif format_id.endswith('mpd') or format_id.endswith('dash+xml'):
|
||||
formats, subtitles = self._extract_mpd_formats_and_subtitles(url, video_id, headers=headers)
|
||||
info_dict.update(self._fragment_query(url))
|
||||
elif format_id == 'f4m':
|
||||
formats = self._extract_f4m_formats(url, video_id, headers=headers)
|
||||
else:
|
||||
|
@ -2393,7 +2440,7 @@ class GenericIE(InfoExtractor):
|
|||
info_dict.update({
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'http_headers': headers,
|
||||
'http_headers': headers or None,
|
||||
})
|
||||
return info_dict
|
||||
|
||||
|
@ -2407,6 +2454,7 @@ class GenericIE(InfoExtractor):
|
|||
if first_bytes.startswith(b'#EXTM3U'):
|
||||
self.report_detected('M3U playlist')
|
||||
info_dict['formats'], info_dict['subtitles'] = self._extract_m3u8_formats_and_subtitles(url, video_id, 'mp4')
|
||||
info_dict.update(self._fragment_query(url))
|
||||
return info_dict
|
||||
|
||||
# Maybe it's a direct link to a video?
|
||||
|
@ -2457,6 +2505,7 @@ class GenericIE(InfoExtractor):
|
|||
doc,
|
||||
mpd_base_url=full_response.geturl().rpartition('/')[0],
|
||||
mpd_url=url)
|
||||
info_dict.update(self._fragment_query(url))
|
||||
self.report_detected('DASH manifest')
|
||||
return info_dict
|
||||
elif re.match(r'^{http://ns\.adobe\.com/f4m/[12]\.0}manifest$', doc.tag):
|
||||
|
@ -2569,7 +2618,10 @@ class GenericIE(InfoExtractor):
|
|||
m3u8_id='hls', fatal=False)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
else:
|
||||
for fmt in formats:
|
||||
fmt.update(self._fragment_query(src))
|
||||
|
||||
if not formats:
|
||||
formats.append({
|
||||
'url': src,
|
||||
'ext': (mimetype2ext(src_type)
|
||||
|
@ -2597,6 +2649,17 @@ class GenericIE(InfoExtractor):
|
|||
self.report_detected('video.js embed')
|
||||
return [{'formats': formats, 'subtitles': subtitles}]
|
||||
|
||||
# Look for generic KVS player (before json-ld bc of some urls that break otherwise)
|
||||
found = self._search_regex((
|
||||
r'<script\b[^>]+?\bsrc\s*=\s*(["\'])https?://(?:(?!\1)[^?#])+/kt_player\.js\?v=(?P<ver>\d+(?:\.\d+)+)\1[^>]*>',
|
||||
r'kt_player\s*\(\s*(["\'])(?:(?!\1)[\w\W])+\1\s*,\s*(["\'])https?://(?:(?!\2)[^?#])+/kt_player\.swf\?v=(?P<ver>\d+(?:\.\d+)+)\2\s*,',
|
||||
), webpage, 'KVS player', group='ver', default=False)
|
||||
if found:
|
||||
self.report_detected('KVS Player')
|
||||
if found.split('.')[0] not in ('4', '5', '6'):
|
||||
self.report_warning(f'Untested major version ({found}) in player engine - download may fail.')
|
||||
return [self._extract_kvs(url, webpage, video_id)]
|
||||
|
||||
# Looking for http://schema.org/VideoObject
|
||||
json_ld = self._search_json_ld(webpage, video_id, default={})
|
||||
if json_ld.get('url') not in (url, None):
|
||||
|
@ -2639,52 +2702,6 @@ class GenericIE(InfoExtractor):
|
|||
['"]?file['"]?\s*:\s*["\'](.*?)["\']''', webpage))
|
||||
if found:
|
||||
self.report_detected('JW Player embed')
|
||||
if not found:
|
||||
# Look for generic KVS player
|
||||
found = re.search(r'<script [^>]*?src="https?://.+?/kt_player\.js\?v=(?P<ver>(?P<maj_ver>\d+)(\.\d+)+)".*?>', webpage)
|
||||
if found:
|
||||
self.report_detected('KWS Player')
|
||||
if found.group('maj_ver') not in ['4', '5']:
|
||||
self.report_warning('Untested major version (%s) in player engine--Download may fail.' % found.group('ver'))
|
||||
flashvars = re.search(r'(?ms)<script.*?>.*?var\s+flashvars\s*=\s*(\{.*?\});.*?</script>', webpage)
|
||||
flashvars = self._parse_json(flashvars.group(1), video_id, transform_source=js_to_json)
|
||||
|
||||
# extract the part after the last / as the display_id from the
|
||||
# canonical URL.
|
||||
display_id = self._search_regex(
|
||||
r'(?:<link href="https?://[^"]+/(.+?)/?" rel="canonical"\s*/?>'
|
||||
r'|<link rel="canonical" href="https?://[^"]+/(.+?)/?"\s*/?>)',
|
||||
webpage, 'display_id', fatal=False
|
||||
)
|
||||
title = self._html_search_regex(r'<(?:h1|title)>(?:Video: )?(.+?)</(?:h1|title)>', webpage, 'title')
|
||||
|
||||
thumbnail = flashvars['preview_url']
|
||||
if thumbnail.startswith('//'):
|
||||
protocol, _, _ = url.partition('/')
|
||||
thumbnail = protocol + thumbnail
|
||||
|
||||
url_keys = list(filter(re.compile(r'video_url|video_alt_url\d*').fullmatch, flashvars.keys()))
|
||||
formats = []
|
||||
for key in url_keys:
|
||||
if '/get_file/' not in flashvars[key]:
|
||||
continue
|
||||
format_id = flashvars.get(f'{key}_text', key)
|
||||
formats.append({
|
||||
'url': self._kvs_getrealurl(flashvars[key], flashvars['license_code']),
|
||||
'format_id': format_id,
|
||||
'ext': 'mp4',
|
||||
**(parse_resolution(format_id) or parse_resolution(flashvars[key]))
|
||||
})
|
||||
if not formats[-1].get('height'):
|
||||
formats[-1]['quality'] = 1
|
||||
|
||||
return [{
|
||||
'id': flashvars['video_id'],
|
||||
'display_id': display_id,
|
||||
'title': title,
|
||||
'thumbnail': thumbnail,
|
||||
'formats': formats,
|
||||
}]
|
||||
if not found:
|
||||
# Broaden the search a little bit
|
||||
found = filter_video(re.findall(r'[^A-Za-z0-9]?(?:file|source)=(http[^\'"&]*)', webpage))
|
||||
|
@ -2765,6 +2782,7 @@ class GenericIE(InfoExtractor):
|
|||
|
||||
entries = []
|
||||
for video_url in orderedSet(found):
|
||||
video_url = video_url.encode().decode('unicode-escape')
|
||||
video_url = unescapeHTML(video_url)
|
||||
video_url = video_url.replace('\\/', '/')
|
||||
video_url = urllib.parse.urljoin(url, video_url)
|
||||
|
@ -2804,8 +2822,10 @@ class GenericIE(InfoExtractor):
|
|||
return [self._extract_xspf_playlist(video_url, video_id)]
|
||||
elif ext == 'm3u8':
|
||||
entry_info_dict['formats'], entry_info_dict['subtitles'] = self._extract_m3u8_formats_and_subtitles(video_url, video_id, ext='mp4', headers=headers)
|
||||
entry_info_dict.update(self._fragment_query(video_url))
|
||||
elif ext == 'mpd':
|
||||
entry_info_dict['formats'], entry_info_dict['subtitles'] = self._extract_mpd_formats_and_subtitles(video_url, video_id, headers=headers)
|
||||
entry_info_dict.update(self._fragment_query(video_url))
|
||||
elif ext == 'f4m':
|
||||
entry_info_dict['formats'] = self._extract_f4m_formats(video_url, video_id, headers=headers)
|
||||
elif re.search(r'(?i)\.(?:ism|smil)/manifest', video_url) and video_url != url:
|
||||
|
|
|
@ -76,11 +76,11 @@ class GoPlayIE(InfoExtractor):
|
|||
}
|
||||
|
||||
api = self._download_json(
|
||||
f'https://api.viervijfzes.be/content/{video_id}',
|
||||
video_id, headers={'Authorization': self._id_token})
|
||||
f'https://api.goplay.be/web/v1/videos/long-form/{video_id}',
|
||||
video_id, headers={'Authorization': 'Bearer %s' % self._id_token})
|
||||
|
||||
formats, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
api['video']['S'], video_id, ext='mp4', m3u8_id='HLS')
|
||||
api['manifestUrls']['hls'], video_id, ext='mp4', m3u8_id='HLS')
|
||||
|
||||
info_dict.update({
|
||||
'id': video_id,
|
||||
|
|
|
@ -9,15 +9,26 @@ from ..utils import (
|
|||
|
||||
|
||||
class GronkhIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv/(?:watch/)?stream/(?P<id>\d+)'
|
||||
_VALID_URL = r'https?://(?:www\.)?gronkh\.tv/(?:watch/)?streams?/(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://gronkh.tv/streams/657',
|
||||
'info_dict': {
|
||||
'id': '657',
|
||||
'ext': 'mp4',
|
||||
'title': 'H.O.R.D.E. - DAS ZWEiTE ZEiTALTER 🎲 Session 1',
|
||||
'view_count': int,
|
||||
'thumbnail': 'https://01.cdn.vod.farm/preview/9e2555d3a23bf4e5c5b7c6b3b70a9d84.jpg',
|
||||
'upload_date': '20221111'
|
||||
},
|
||||
'params': {'skip_download': True}
|
||||
}, {
|
||||
'url': 'https://gronkh.tv/stream/536',
|
||||
'info_dict': {
|
||||
'id': '536',
|
||||
'ext': 'mp4',
|
||||
'title': 'GTV0536, 2021-10-01 - MARTHA IS DEAD #FREiAB1830 !FF7 !horde !archiv',
|
||||
'view_count': 19491,
|
||||
'view_count': int,
|
||||
'thumbnail': 'https://01.cdn.vod.farm/preview/6436746cce14e25f751260a692872b9b.jpg',
|
||||
'upload_date': '20211001'
|
||||
},
|
||||
|
|
|
@ -148,6 +148,12 @@ class HotStarIE(HotStarBaseIE):
|
|||
'dr': 'dynamic_range',
|
||||
}
|
||||
|
||||
_TAG_FIELDS = {
|
||||
'language': 'language',
|
||||
'acodec': 'audio_codec',
|
||||
'vcodec': 'video_codec',
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def _video_url(cls, video_id, video_type=None, *, slug='ignore_me', root=None):
|
||||
assert None in (video_type, root)
|
||||
|
@ -182,24 +188,22 @@ class HotStarIE(HotStarBaseIE):
|
|||
for key, prefix in self._IGNORE_MAP.items()
|
||||
for ignore in self._configuration_arg(key)):
|
||||
continue
|
||||
tag_dict = dict((t.split(':', 1) + [None])[:2] for t in tags.split(';'))
|
||||
|
||||
format_url = url_or_none(playback_set.get('playbackUrl'))
|
||||
if not format_url:
|
||||
continue
|
||||
format_url = re.sub(r'(?<=//staragvod)(\d)', r'web\1', format_url)
|
||||
dr = re.search(r'dynamic_range:(?P<dr>[a-z]+)', playback_set.get('tagsCombination')).group('dr')
|
||||
ext = determine_ext(format_url)
|
||||
|
||||
current_formats, current_subs = [], {}
|
||||
try:
|
||||
if 'package:hls' in tags or ext == 'm3u8':
|
||||
current_formats, current_subs = self._extract_m3u8_formats_and_subtitles(
|
||||
format_url, video_id, 'mp4',
|
||||
entry_protocol='m3u8_native',
|
||||
m3u8_id=f'{dr}-hls', headers=headers)
|
||||
format_url, video_id, ext='mp4', headers=headers)
|
||||
elif 'package:dash' in tags or ext == 'mpd':
|
||||
current_formats, current_subs = self._extract_mpd_formats_and_subtitles(
|
||||
format_url, video_id, mpd_id=f'{dr}-dash', headers=headers)
|
||||
format_url, video_id, headers=headers)
|
||||
elif ext == 'f4m':
|
||||
pass # XXX: produce broken files
|
||||
else:
|
||||
|
@ -213,20 +217,32 @@ class HotStarIE(HotStarBaseIE):
|
|||
geo_restricted = True
|
||||
continue
|
||||
|
||||
if tags and 'encryption:plain' not in tags:
|
||||
if tag_dict.get('encryption') not in ('plain', None):
|
||||
for f in current_formats:
|
||||
f['has_drm'] = True
|
||||
if tags and 'language' in tags:
|
||||
lang = re.search(r'language:(?P<lang>[a-z]+)', tags).group('lang')
|
||||
for f in current_formats:
|
||||
if not f.get('langauge'):
|
||||
f['language'] = lang
|
||||
for k, v in self._TAG_FIELDS.items():
|
||||
if not f.get(k):
|
||||
f[k] = tag_dict.get(v)
|
||||
if f.get('vcodec') != 'none' and not f.get('dynamic_range'):
|
||||
f['dynamic_range'] = tag_dict.get('dynamic_range')
|
||||
if f.get('acodec') != 'none' and not f.get('audio_channels'):
|
||||
f['audio_channels'] = {
|
||||
'stereo': 2,
|
||||
'dolby51': 6,
|
||||
}.get(tag_dict.get('audio_channel'))
|
||||
f['format_note'] = join_nonempty(
|
||||
tag_dict.get('ladder'),
|
||||
tag_dict.get('audio_channel') if f.get('acodec') != 'none' else None,
|
||||
f.get('format_note'),
|
||||
delim=', ')
|
||||
|
||||
formats.extend(current_formats)
|
||||
subs = self._merge_subtitles(subs, current_subs)
|
||||
|
||||
if not formats and geo_restricted:
|
||||
self.raise_geo_restricted(countries=['IN'], metadata_available=True)
|
||||
self._remove_duplicate_formats(formats)
|
||||
for f in formats:
|
||||
f.setdefault('http_headers', {}).update(headers)
|
||||
|
||||
|
@ -235,7 +251,7 @@ class HotStarIE(HotStarBaseIE):
|
|||
'title': video_data.get('title'),
|
||||
'description': video_data.get('description'),
|
||||
'duration': int_or_none(video_data.get('duration')),
|
||||
'timestamp': int_or_none(video_data.get('broadcastDate') or video_data.get('startDate')),
|
||||
'timestamp': int_or_none(traverse_obj(video_data, 'broadcastDate', 'startDate')),
|
||||
'formats': formats,
|
||||
'subtitles': subs,
|
||||
'channel': video_data.get('channelName'),
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import hashlib
|
||||
import random
|
||||
import re
|
||||
|
||||
from ..compat import compat_urlparse, compat_b64decode
|
||||
|
||||
|
@ -37,7 +38,7 @@ class HuyaLiveIE(InfoExtractor):
|
|||
}]
|
||||
|
||||
_RESOLUTION = {
|
||||
'蓝光4M': {
|
||||
'蓝光': {
|
||||
'width': 1920,
|
||||
'height': 1080,
|
||||
},
|
||||
|
@ -76,11 +77,15 @@ class HuyaLiveIE(InfoExtractor):
|
|||
if re_secret:
|
||||
fm, ss = self.encrypt(params, stream_info, stream_name)
|
||||
for si in stream_data.get('vMultiStreamInfo'):
|
||||
display_name, bitrate = re.fullmatch(
|
||||
r'(.+?)(?:(\d+)M)?', si.get('sDisplayName')).groups()
|
||||
rate = si.get('iBitRate')
|
||||
if rate:
|
||||
params['ratio'] = rate
|
||||
else:
|
||||
params.pop('ratio', None)
|
||||
if bitrate:
|
||||
rate = int(bitrate) * 1000
|
||||
if re_secret:
|
||||
params['wsSecret'] = hashlib.md5(
|
||||
'_'.join([fm, params['u'], stream_name, ss, params['wsTime']]))
|
||||
|
@ -90,7 +95,7 @@ class HuyaLiveIE(InfoExtractor):
|
|||
'tbr': rate,
|
||||
'url': update_url_query(f'{stream_url}/{stream_name}.{stream_info.get("sFlvUrlSuffix")}',
|
||||
query=params),
|
||||
**self._RESOLUTION.get(si.get('sDisplayName'), {}),
|
||||
**self._RESOLUTION.get(display_name, {}),
|
||||
})
|
||||
|
||||
return {
|
||||
|
|
|
@ -527,11 +527,14 @@ class IqIE(InfoExtractor):
|
|||
webpack_js_url = self._proto_relative_url(self._search_regex(
|
||||
r'<script src="((?:https?)?//stc.iqiyipic.com/_next/static/chunks/webpack-\w+\.js)"', webpage, 'webpack URL'))
|
||||
webpack_js = self._download_webpage(webpack_js_url, video_id, note='Downloading webpack JS', errnote='Unable to download webpack JS')
|
||||
webpack_map1, webpack_map2 = [self._parse_json(js_map, video_id, transform_source=js_to_json) for js_map in self._search_regex(
|
||||
r'\(({[^}]*})\[\w+\][^\)]*\)\s*\+\s*["\']\.["\']\s*\+\s*({[^}]*})\[\w+\]\+["\']\.js', webpack_js, 'JS locations', group=(1, 2))]
|
||||
for module_index in reversed(list(webpack_map2.keys())):
|
||||
webpack_map = self._search_json(
|
||||
r'["\']\s*\+\s*', webpack_js, 'JS locations', video_id,
|
||||
contains_pattern=r'{\s*(?:\d+\s*:\s*["\'][\da-f]+["\']\s*,?\s*)+}',
|
||||
end_pattern=r'\[\w+\]\+["\']\.js', transform_source=js_to_json)
|
||||
|
||||
for module_index in reversed(webpack_map):
|
||||
module_js = self._download_webpage(
|
||||
f'https://stc.iqiyipic.com/_next/static/chunks/{webpack_map1.get(module_index, module_index)}.{webpack_map2[module_index]}.js',
|
||||
f'https://stc.iqiyipic.com/_next/static/chunks/{module_index}.{webpack_map[module_index]}.js',
|
||||
video_id, note=f'Downloading #{module_index} module JS', errnote='Unable to download module JS', fatal=False) or ''
|
||||
if 'vms request' in module_js:
|
||||
self.cache.store('iq', 'player_js', module_js)
|
||||
|
@ -543,11 +546,11 @@ class IqIE(InfoExtractor):
|
|||
self._extract_vms_player_js(webpage, video_id), 'signature function')
|
||||
|
||||
def _update_bid_tags(self, webpage, video_id):
|
||||
extracted_bid_tags = self._parse_json(
|
||||
self._search_regex(
|
||||
r'arguments\[1\][^,]*,\s*function\s*\([^\)]*\)\s*{\s*"use strict";?\s*var \w=({.+}})\s*,\s*\w\s*=\s*{\s*getNewVd',
|
||||
self._extract_vms_player_js(webpage, video_id), 'video tags', default=''),
|
||||
video_id, transform_source=js_to_json, fatal=False)
|
||||
extracted_bid_tags = self._search_json(
|
||||
r'function\s*\([^)]*\)\s*\{\s*"use strict";?\s*var \w\s*=\s*',
|
||||
self._extract_vms_player_js(webpage, video_id), 'video tags', video_id,
|
||||
contains_pattern=r'{\s*\d+\s*:\s*\{\s*nbid\s*:.+}\s*}',
|
||||
end_pattern=r'\s*,\s*\w\s*=\s*\{\s*getNewVd', fatal=False, transform_source=js_to_json)
|
||||
if not extracted_bid_tags:
|
||||
return
|
||||
self._BID_TAGS = {
|
||||
|
|
|
@ -23,9 +23,19 @@ class JojIE(InfoExtractor):
|
|||
'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932',
|
||||
'ext': 'mp4',
|
||||
'title': 'NOVÉ BÝVANIE',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'thumbnail': r're:^https?://.*?$',
|
||||
'duration': 3118,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://media.joj.sk/embed/CSM0Na0l0p1',
|
||||
'info_dict': {
|
||||
'id': 'CSM0Na0l0p1',
|
||||
'ext': 'mp4',
|
||||
'height': 576,
|
||||
'title': 'Extrémne rodiny 2 - POKRAČOVANIE (2012/04/09 21:30:00)',
|
||||
'duration': 3937,
|
||||
'thumbnail': r're:^https?://.*?$',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://media.joj.sk/embed/9i1cxv',
|
||||
'only_matching': True,
|
||||
|
@ -43,10 +53,10 @@ class JojIE(InfoExtractor):
|
|||
webpage = self._download_webpage(
|
||||
'https://media.joj.sk/embed/%s' % video_id, video_id)
|
||||
|
||||
title = self._search_regex(
|
||||
(r'videoTitle\s*:\s*(["\'])(?P<title>(?:(?!\1).)+)\1',
|
||||
r'<title>(?P<title>[^<]+)'), webpage, 'title',
|
||||
default=None, group='title') or self._og_search_title(webpage)
|
||||
title = (self._search_json(r'videoTitle\s*:', webpage, 'title', video_id,
|
||||
contains_pattern=r'["\'].+["\']', default=None)
|
||||
or self._html_extract_title(webpage, default=None)
|
||||
or self._og_search_title(webpage))
|
||||
|
||||
bitrates = self._parse_json(
|
||||
self._search_regex(
|
||||
|
@ -58,11 +68,13 @@ class JojIE(InfoExtractor):
|
|||
for format_url in try_get(bitrates, lambda x: x['mp4'], list) or []:
|
||||
if isinstance(format_url, compat_str):
|
||||
height = self._search_regex(
|
||||
r'(\d+)[pP]\.', format_url, 'height', default=None)
|
||||
r'(\d+)[pP]|(pal)\.', format_url, 'height', default=None)
|
||||
if height == 'pal':
|
||||
height = 576
|
||||
formats.append({
|
||||
'url': format_url,
|
||||
'format_id': format_field(height, None, '%sp'),
|
||||
'height': int(height),
|
||||
'height': int_or_none(height),
|
||||
})
|
||||
if not formats:
|
||||
playlist = self._download_xml(
|
||||
|
|
|
@ -0,0 +1,66 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
join_nonempty,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
update_url_query,
|
||||
)
|
||||
|
||||
|
||||
class Kanal2IE(InfoExtractor):
|
||||
_VALID_URL = r'https?://kanal2\.postimees\.ee/[^?#]+\?([^#]+&)?id=(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'note': 'Test standard url (#5575)',
|
||||
'url': 'https://kanal2.postimees.ee/pluss/video/?id=40792',
|
||||
'md5': '7ea7b16266ec1798743777df241883dd',
|
||||
'info_dict': {
|
||||
'id': '40792',
|
||||
'ext': 'mp4',
|
||||
'title': 'Aedniku aabits / Osa 53 (05.08.2016 20:00)',
|
||||
'thumbnail': r're:https?://.*\.jpg$',
|
||||
'description': 'md5:53cabf3c5d73150d594747f727431248',
|
||||
'upload_date': '20160805',
|
||||
'timestamp': 1470420000,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
playlist = self._download_json(
|
||||
f'https://kanal2.postimees.ee/player/playlist/{video_id}',
|
||||
video_id, query={'type': 'episodes'},
|
||||
headers={'X-Requested-With': 'XMLHttpRequest'})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': join_nonempty(*traverse_obj(playlist, ('info', ('title', 'subtitle'))), delim=' / '),
|
||||
'description': traverse_obj(playlist, ('info', 'description')),
|
||||
'thumbnail': traverse_obj(playlist, ('data', 'image')),
|
||||
'formats': self.get_formats(playlist, video_id),
|
||||
'timestamp': unified_timestamp(self._search_regex(
|
||||
r'\((\d{2}\.\d{2}\.\d{4}\s\d{2}:\d{2})\)$',
|
||||
traverse_obj(playlist, ('info', 'subtitle')), 'timestamp', default='') + ' +0200'),
|
||||
}
|
||||
|
||||
def get_formats(self, playlist, video_id):
|
||||
path = traverse_obj(playlist, ('data', 'path'))
|
||||
if not path:
|
||||
raise ExtractorError('Path value not found in playlist JSON response')
|
||||
session = self._download_json(
|
||||
'https://sts.postimees.ee/session/register',
|
||||
video_id, note='Creating session', errnote='Error creating session',
|
||||
headers={
|
||||
'X-Original-URI': path,
|
||||
'Accept': 'application/json',
|
||||
})
|
||||
if session.get('reason') != 'OK' or not session.get('session'):
|
||||
reason = session.get('reason', 'unknown error')
|
||||
raise ExtractorError(f'Unable to obtain session: {reason}')
|
||||
|
||||
formats = []
|
||||
for stream in traverse_obj(playlist, ('data', 'streams', ..., 'file')):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
update_url_query(stream, {'s': session['session']}), video_id, 'mp4'))
|
||||
|
||||
return formats
|
|
@ -0,0 +1,48 @@
|
|||
import time
|
||||
import random
|
||||
import string
|
||||
import hashlib
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class KankaNewsIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?kankanews\.com/a/\d+\-\d+\-\d+/(?P<id>\d+)\.shtml'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.kankanews.com/a/2022-11-08/00310276054.shtml?appid=1088227',
|
||||
'md5': '05e126513c74b1258d657452a6f4eef9',
|
||||
'info_dict': {
|
||||
'id': '4485057',
|
||||
'url': 'http://mediaplay.kksmg.com/2022/11/08/h264_450k_mp4_1a388ad771e0e4cc28b0da44d245054e_ncm.mp4',
|
||||
'ext': 'mp4',
|
||||
'title': '视频|第23个中国记者节,我们在进博切蛋糕',
|
||||
'thumbnail': r're:^https?://.*\.jpg*',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
video_id = self._search_regex(r'omsid\s*=\s*"(\d+)"', webpage, 'video id')
|
||||
|
||||
params = {
|
||||
'nonce': ''.join(random.choices(string.ascii_lowercase + string.digits, k=8)),
|
||||
'omsid': video_id,
|
||||
'platform': 'pc',
|
||||
'timestamp': int(time.time()),
|
||||
'version': '1.0',
|
||||
}
|
||||
params['sign'] = hashlib.md5((hashlib.md5((
|
||||
urllib.parse.urlencode(params) + '&28c8edde3d61a0411511d3b1866f0636'
|
||||
).encode()).hexdigest()).encode()).hexdigest()
|
||||
|
||||
meta = self._download_json('https://api-app.kankanews.com/kankan/pc/getvideo',
|
||||
video_id, query=params)['result']['video']
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': meta['videourl'],
|
||||
'title': self._search_regex(r'g\.title\s*=\s*"([^"]+)"', webpage, 'title'),
|
||||
'thumbnail': meta.get('titlepic'),
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
from .common import InfoExtractor
|
||||
|
||||
from ..utils import (
|
||||
HEADRequest,
|
||||
UserNotLive,
|
||||
float_or_none,
|
||||
merge_dicts,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class KickBaseIE(InfoExtractor):
|
||||
def _real_initialize(self):
|
||||
self._request_webpage(HEADRequest('https://kick.com/'), None, 'Setting up session')
|
||||
xsrf_token = self._get_cookies('https://kick.com/').get('XSRF-TOKEN')
|
||||
if not xsrf_token:
|
||||
self.write_debug('kick.com did not set XSRF-TOKEN cookie')
|
||||
KickBaseIE._API_HEADERS = {
|
||||
'Authorization': f'Bearer {xsrf_token.value}',
|
||||
'X-XSRF-TOKEN': xsrf_token.value,
|
||||
} if xsrf_token else {}
|
||||
|
||||
def _call_api(self, path, display_id, note='Downloading API JSON', headers={}, **kwargs):
|
||||
return self._download_json(
|
||||
f'https://kick.com/api/v1/{path}', display_id, note=note,
|
||||
headers=merge_dicts(headers, self._API_HEADERS), **kwargs)
|
||||
|
||||
|
||||
class KickIE(KickBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?kick\.com/(?!(?:video|categories|search|auth)(?:[/?#]|$))(?P<id>[\w_]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://kick.com/yuppy',
|
||||
'info_dict': {
|
||||
'id': '6cde1-kickrp-joe-flemmingskick-info-heremust-knowmust-see21',
|
||||
'ext': 'mp4',
|
||||
'title': str,
|
||||
'description': str,
|
||||
'channel': 'yuppy',
|
||||
'channel_id': '33538',
|
||||
'uploader': 'Yuppy',
|
||||
'uploader_id': '33793',
|
||||
'upload_date': str,
|
||||
'live_status': 'is_live',
|
||||
'timestamp': int,
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'categories': list,
|
||||
},
|
||||
'skip': 'livestream',
|
||||
}, {
|
||||
'url': 'https://kick.com/kmack710',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
channel = self._match_id(url)
|
||||
response = self._call_api(f'channels/{channel}', channel)
|
||||
if not traverse_obj(response, 'livestream', expected_type=dict):
|
||||
raise UserNotLive(video_id=channel)
|
||||
|
||||
return {
|
||||
'id': str(traverse_obj(
|
||||
response, ('livestream', ('slug', 'id')), get_all=False, default=channel)),
|
||||
'formats': self._extract_m3u8_formats(
|
||||
response['playback_url'], channel, 'mp4', live=True),
|
||||
'title': traverse_obj(
|
||||
response, ('livestream', ('session_title', 'slug')), get_all=False, default=''),
|
||||
'description': traverse_obj(response, ('user', 'bio')),
|
||||
'channel': channel,
|
||||
'channel_id': str_or_none(traverse_obj(response, 'id', ('livestream', 'channel_id'))),
|
||||
'uploader': traverse_obj(response, 'name', ('user', 'username')),
|
||||
'uploader_id': str_or_none(traverse_obj(response, 'user_id', ('user', 'id'))),
|
||||
'is_live': True,
|
||||
'timestamp': unified_timestamp(traverse_obj(response, ('livestream', 'created_at'))),
|
||||
'thumbnail': traverse_obj(
|
||||
response, ('livestream', 'thumbnail', 'url'), expected_type=url_or_none),
|
||||
'categories': traverse_obj(response, ('recent_categories', ..., 'name')),
|
||||
}
|
||||
|
||||
|
||||
class KickVODIE(KickBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?kick\.com/video/(?P<id>[\da-f]{8}-(?:[\da-f]{4}-){3}[\da-f]{12})'
|
||||
_TESTS = [{
|
||||
'url': 'https://kick.com/video/54244b5e-050a-4df4-a013-b2433dafbe35',
|
||||
'md5': '73691206a6a49db25c5aa1588e6538fc',
|
||||
'info_dict': {
|
||||
'id': '54244b5e-050a-4df4-a013-b2433dafbe35',
|
||||
'ext': 'mp4',
|
||||
'title': 'Making 710-carBoosting. Kinda No Pixel inspired. !guilded - !links',
|
||||
'description': 'md5:a0d3546bf7955d0a8252ffe0fd6f518f',
|
||||
'channel': 'kmack710',
|
||||
'channel_id': '16278',
|
||||
'uploader': 'Kmack710',
|
||||
'uploader_id': '16412',
|
||||
'upload_date': '20221206',
|
||||
'timestamp': 1670318289,
|
||||
'duration': 40104.0,
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'categories': ['Grand Theft Auto V'],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
response = self._call_api(f'video/{video_id}', video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'formats': self._extract_m3u8_formats(response['source'], video_id, 'mp4'),
|
||||
'title': traverse_obj(
|
||||
response, ('livestream', ('session_title', 'slug')), get_all=False, default=''),
|
||||
'description': traverse_obj(response, ('livestream', 'channel', 'user', 'bio')),
|
||||
'channel': traverse_obj(response, ('livestream', 'channel', 'slug')),
|
||||
'channel_id': str_or_none(traverse_obj(response, ('livestream', 'channel', 'id'))),
|
||||
'uploader': traverse_obj(response, ('livestream', 'channel', 'user', 'username')),
|
||||
'uploader_id': str_or_none(traverse_obj(response, ('livestream', 'channel', 'user_id'))),
|
||||
'timestamp': unified_timestamp(response.get('created_at')),
|
||||
'duration': float_or_none(traverse_obj(response, ('livestream', 'duration')), scale=1000),
|
||||
'thumbnail': traverse_obj(
|
||||
response, ('livestream', 'thumbnail'), expected_type=url_or_none),
|
||||
'categories': traverse_obj(response, ('livestream', 'categories', ..., 'name')),
|
||||
}
|
|
@ -2,7 +2,6 @@ import re
|
|||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
float_or_none,
|
||||
HEADRequest,
|
||||
int_or_none,
|
||||
|
@ -13,13 +12,13 @@ from ..utils import (
|
|||
|
||||
class LA7IE(InfoExtractor):
|
||||
IE_NAME = 'la7.it'
|
||||
_VALID_URL = r'''(?x)(https?://)?(?:
|
||||
(?:www\.)?la7\.it/([^/]+)/(?:rivedila7|video)/|
|
||||
_VALID_URL = r'''(?x)https?://(?:
|
||||
(?:www\.)?la7\.it/([^/]+)/(?:rivedila7|video|news)/|
|
||||
tg\.la7\.it/repliche-tgla7\?id=
|
||||
)(?P<id>.+)'''
|
||||
|
||||
_TESTS = [{
|
||||
# 'src' is a plain URL
|
||||
# single quality video
|
||||
'url': 'http://www.la7.it/crozza/video/inccool8-02-10-2015-163722',
|
||||
'md5': '8b613ffc0c4bf9b9e377169fc19c214c',
|
||||
'info_dict': {
|
||||
|
@ -29,6 +28,20 @@ class LA7IE(InfoExtractor):
|
|||
'description': 'Benvenuti nell\'incredibile mondo della INC. COOL. 8. dove “INC.” sta per “Incorporated” “COOL” sta per “fashion” ed Eight sta per il gesto atletico',
|
||||
'thumbnail': 're:^https?://.*',
|
||||
'upload_date': '20151002',
|
||||
'formats': 'count:4',
|
||||
},
|
||||
}, {
|
||||
# multiple quality video
|
||||
'url': 'https://www.la7.it/calcio-femminile/news/il-gol-di-lindsey-thomas-fiorentina-vs-milan-serie-a-calcio-femminile-26-11-2022-461736',
|
||||
'md5': 'd2370e78f75e8d1238cb3a0db9a2eda3',
|
||||
'info_dict': {
|
||||
'id': 'il-gol-di-lindsey-thomas-fiorentina-vs-milan-serie-a-calcio-femminile-26-11-2022-461736',
|
||||
'ext': 'mp4',
|
||||
'title': 'Il gol di Lindsey Thomas | Fiorentina vs Milan | Serie A Calcio Femminile',
|
||||
'description': 'Il gol di Lindsey Thomas | Fiorentina vs Milan | Serie A Calcio Femminile',
|
||||
'thumbnail': 're:^https?://.*',
|
||||
'upload_date': '20221126',
|
||||
'formats': 'count:8',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://www.la7.it/omnibus/rivedila7/omnibus-news-02-07-2016-189077',
|
||||
|
@ -39,7 +52,7 @@ class LA7IE(InfoExtractor):
|
|||
def _generate_mp4_url(self, quality, m3u8_formats):
|
||||
for f in m3u8_formats:
|
||||
if f['vcodec'] != 'none' and quality in f['url']:
|
||||
http_url = '%s%s.mp4' % (self._HOST, quality)
|
||||
http_url = f'{self._HOST}{quality}.mp4'
|
||||
|
||||
urlh = self._request_webpage(
|
||||
HEADRequest(http_url), quality,
|
||||
|
@ -58,12 +71,13 @@ class LA7IE(InfoExtractor):
|
|||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
|
||||
if not url.startswith('http'):
|
||||
url = '%s//%s' % (self.http_scheme(), url)
|
||||
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
video_path = self._search_regex(r'(/content/.*?).mp4', webpage, 'video_path')
|
||||
|
||||
if re.search(r'(?i)(drmsupport\s*:\s*true)\s*', webpage):
|
||||
self.report_drm(video_id)
|
||||
|
||||
video_path = self._search_regex(
|
||||
r'(/content/[\w/,]+?)\.mp4(?:\.csmil)?/master\.m3u8', webpage, 'video_path')
|
||||
|
||||
formats = self._extract_mpd_formats(
|
||||
f'{self._HOST}/local/dash/,{video_path}.mp4.urlset/manifest.mpd',
|
||||
|
@ -90,8 +104,7 @@ class LA7IE(InfoExtractor):
|
|||
|
||||
class LA7PodcastEpisodeIE(InfoExtractor):
|
||||
IE_NAME = 'la7.it:pod:episode'
|
||||
_VALID_URL = r'''(?x)(https?://)?
|
||||
(?:www\.)?la7\.it/[^/]+/podcast/([^/]+-)?(?P<id>\d+)'''
|
||||
_VALID_URL = r'https?://(?:www\.)?la7\.it/[^/]+/podcast/([^/]+-)?(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.la7.it/voicetown/podcast/la-carezza-delle-memoria-di-carlo-verdone-23-03-2021-371497',
|
||||
|
@ -125,14 +138,15 @@ class LA7PodcastEpisodeIE(InfoExtractor):
|
|||
webpage, 'video_id', group='vid')
|
||||
|
||||
media_url = self._search_regex(
|
||||
(r'src:\s*([\'"])(?P<url>.+?mp3.+?)\1',
|
||||
r'data-podcast=([\'"])(?P<url>.+?mp3.+?)\1'),
|
||||
(r'src\s*:\s*([\'"])(?P<url>\S+?mp3.+?)\1',
|
||||
r'data-podcast\s*=\s*([\'"])(?P<url>\S+?mp3.+?)\1'),
|
||||
webpage, 'media_url', group='url')
|
||||
ext = determine_ext(media_url)
|
||||
formats = [{
|
||||
'url': media_url,
|
||||
'format_id': ext,
|
||||
'ext': ext,
|
||||
'format_id': 'http-mp3',
|
||||
'ext': 'mp3',
|
||||
'acodec': 'mp3',
|
||||
'vcodec': 'none',
|
||||
}]
|
||||
|
||||
title = self._html_search_regex(
|
||||
|
@ -173,7 +187,7 @@ class LA7PodcastEpisodeIE(InfoExtractor):
|
|||
# and title is the same as the show_title
|
||||
# add the date to the title
|
||||
if date and not date_alt and ppn and ppn.lower() == title.lower():
|
||||
title += ' del %s' % date
|
||||
title = f'{title} del {date}'
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
|
@ -193,7 +207,7 @@ class LA7PodcastEpisodeIE(InfoExtractor):
|
|||
|
||||
class LA7PodcastIE(LA7PodcastEpisodeIE): # XXX: Do not subclass from concrete IE
|
||||
IE_NAME = 'la7.it:podcast'
|
||||
_VALID_URL = r'(https?://)?(www\.)?la7\.it/(?P<id>[^/]+)/podcast/?(?:$|[#?])'
|
||||
_VALID_URL = r'https?://(?:www\.)?la7\.it/(?P<id>[^/]+)/podcast/?(?:$|[#?])'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.la7.it/propagandalive/podcast',
|
||||
|
@ -201,7 +215,7 @@ class LA7PodcastIE(LA7PodcastEpisodeIE): # XXX: Do not subclass from concrete I
|
|||
'id': 'propagandalive',
|
||||
'title': "Propaganda Live",
|
||||
},
|
||||
'playlist_count': 10,
|
||||
'playlist_count_min': 10,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
|
|
@ -75,9 +75,8 @@ class LinuxAcademyIE(InfoExtractor):
|
|||
|
||||
def _perform_login(self, username, password):
|
||||
def random_string():
|
||||
return ''.join([
|
||||
random.choice('0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~')
|
||||
for _ in range(32)])
|
||||
return ''.join(random.choices(
|
||||
'0123456789ABCDEFGHIJKLMNOPQRSTUVXYZabcdefghijklmnopqrstuvwxyz-._~', k=32))
|
||||
|
||||
webpage, urlh = self._download_webpage_handle(
|
||||
self._AUTHORIZE_URL, None, 'Downloading authorize page', query={
|
||||
|
|
|
@ -7,7 +7,6 @@ from ..utils import (
|
|||
GeoRestrictedError,
|
||||
int_or_none,
|
||||
OnDemandPagedList,
|
||||
parse_qs,
|
||||
try_get,
|
||||
urljoin,
|
||||
update_url_query,
|
||||
|
@ -16,20 +15,25 @@ from ..utils import (
|
|||
|
||||
class MediasetIE(ThePlatformBaseIE):
|
||||
_TP_TLD = 'eu'
|
||||
_VALID_URL = r'''(?x)
|
||||
_GUID_RE = r'F[0-9A-Z]{15}'
|
||||
_VALID_URL = rf'''(?x)
|
||||
(?:
|
||||
mediaset:|
|
||||
https?://
|
||||
(?:\w+\.)+mediaset\.it/
|
||||
(?:
|
||||
(?:video|on-demand|movie)/(?:[^/]+/)+[^/]+_|
|
||||
player/(?:v\d+/)?index\.html\?.*?\bprogramGuid=
|
||||
player/(?:v\d+/)?index\.html\?\S*?\bprogramGuid=
|
||||
)
|
||||
)(?P<id>[0-9A-Z]{16,})
|
||||
)(?P<id>{_GUID_RE})
|
||||
'''
|
||||
|
||||
_EMBED_REGEX = [
|
||||
rf'<iframe[^>]+src=[\'"](?P<url>(?:https?:)?//(?:\w+\.)+mediaset\.it/player/(?:v\d+/)?index\.html\?\S*?programGuid={_GUID_RE})[\'"&]'
|
||||
]
|
||||
_TESTS = [{
|
||||
# full episode
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102',
|
||||
'url': 'https://mediasetinfinity.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102',
|
||||
'md5': 'a7e75c6384871f322adb781d3bd72c26',
|
||||
'info_dict': {
|
||||
'id': 'F310575103000102',
|
||||
|
@ -50,7 +54,7 @@ class MediasetIE(ThePlatformBaseIE):
|
|||
'chapters': [{'start_time': 0.0, 'end_time': 439.88}, {'start_time': 439.88, 'end_time': 1685.84}, {'start_time': 1685.84, 'end_time': 2682.0}],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501',
|
||||
'url': 'https://mediasetinfinity.mediaset.it/video/matrix/puntata-del-25-maggio_F309013801000501',
|
||||
'md5': '1276f966ac423d16ba255ce867de073e',
|
||||
'info_dict': {
|
||||
'id': 'F309013801000501',
|
||||
|
@ -71,51 +75,8 @@ class MediasetIE(ThePlatformBaseIE):
|
|||
'chapters': [{'start_time': 0.0, 'end_time': 3409.08}, {'start_time': 3409.08, 'end_time': 6565.008}],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-69-pezzo-di-luna_F303843101017801',
|
||||
'md5': 'd1650ac9ff944f185556126a736df148',
|
||||
'info_dict': {
|
||||
'id': 'F303843101017801',
|
||||
'ext': 'mp4',
|
||||
'title': 'Episodio 69 - Pezzo di luna',
|
||||
'description': 'md5:7c32c8ec4118b72588b9412f11353f73',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 263.008,
|
||||
'upload_date': '20200902',
|
||||
'series': 'Camera Café 5',
|
||||
'timestamp': 1599064700,
|
||||
'uploader': 'Italia 1',
|
||||
'uploader_id': 'I1',
|
||||
'season': 'Season 5',
|
||||
'episode': 'Episode 178',
|
||||
'season_number': 5,
|
||||
'episode_number': 178,
|
||||
'chapters': [{'start_time': 0.0, 'end_time': 261.88}, {'start_time': 261.88, 'end_time': 263.008}],
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/cameracafe5/episodio-51-tu-chi-sei_F303843107000601',
|
||||
'md5': '567e9ad375b7a27a0e370650f572a1e3',
|
||||
'info_dict': {
|
||||
'id': 'F303843107000601',
|
||||
'ext': 'mp4',
|
||||
'title': 'Episodio 51 - Tu chi sei?',
|
||||
'description': 'md5:42ef006e56824cc31787a547590923f4',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 367.021,
|
||||
'upload_date': '20200902',
|
||||
'series': 'Camera Café 5',
|
||||
'timestamp': 1599069817,
|
||||
'uploader': 'Italia 1',
|
||||
'uploader_id': 'I1',
|
||||
'season': 'Season 5',
|
||||
'episode': 'Episode 6',
|
||||
'season_number': 5,
|
||||
'episode_number': 6,
|
||||
'chapters': [{'start_time': 0.0, 'end_time': 358.68}, {'start_time': 358.68, 'end_time': 367.021}],
|
||||
},
|
||||
}, {
|
||||
# movie
|
||||
'url': 'https://www.mediasetplay.mediaset.it/movie/selvaggi/selvaggi_F006474501000101',
|
||||
'md5': '720440187a2ae26af8148eb9e6b901ed',
|
||||
# DRM
|
||||
'url': 'https://mediasetinfinity.mediaset.it/movie/selvaggi/selvaggi_F006474501000101',
|
||||
'info_dict': {
|
||||
'id': 'F006474501000101',
|
||||
'ext': 'mp4',
|
||||
|
@ -129,70 +90,69 @@ class MediasetIE(ThePlatformBaseIE):
|
|||
'uploader_id': 'B6',
|
||||
'chapters': [{'start_time': 0.0, 'end_time': 1938.56}, {'start_time': 1938.56, 'end_time': 5233.01}],
|
||||
},
|
||||
'params': {
|
||||
'ignore_no_formats_error': True,
|
||||
},
|
||||
'expected_warnings': [
|
||||
'None of the available releases match the specified AssetType, ProtectionScheme, and/or Format preferences',
|
||||
'Content behind paywall and DRM',
|
||||
],
|
||||
'skip': True,
|
||||
}, {
|
||||
# clip
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/gogglebox/un-grande-classico-della-commedia-sexy_FAFU000000661680',
|
||||
# old domain
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/mrwronglezionidamore/episodio-1_F310575103000102',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# iframe simple
|
||||
# iframe
|
||||
'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665924&id=665924',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/)
|
||||
'url': 'https://static3.mediasetplay.mediaset.it/player/index.html?appKey=5ad3966b1de1c4000d5cec48&programGuid=FAFU000000665104&id=665104',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# embedUrl (from https://www.wittytv.it/amici/est-ce-que-tu-maimes-gabriele-5-dicembre-copia/)
|
||||
'url': 'https://static3.mediasetplay.mediaset.it/player/v2/index.html?partnerId=wittytv&configId=&programGuid=FD00000000153323&autoplay=true&purl=http://www.wittytv.it/amici/est-ce-que-tu-maimes-gabriele-5-dicembre-copia/',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'mediaset:FAFU000000665924',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/mediasethaacuoreilfuturo/palmieri-alicudi-lisola-dei-tre-bambini-felici--un-decreto-per-alicudi-e-tutte-le-microscuole_FD00000000102295',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/cherryseason/anticipazioni-degli-episodi-del-23-ottobre_F306837101005C02',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/tg5/ambiente-onda-umana-per-salvare-il-pianeta_F309453601079D01',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/video/grandefratellovip/benedetta-una-doccia-gelata_F309344401044C135',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.mediasetplay.mediaset.it/movie/herculeslaleggendahainizio/hercules-la-leggenda-ha-inizio_F305927501000102',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://mediasetinfinity.mediaset.it/video/braveandbeautiful/episodio-113_F310948005000402',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://static3.mediasetplay.mediaset.it/player/v2/index.html?partnerId=wittytv&configId=&programGuid=FD00000000153323',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_from_webpage(self, url, webpage):
|
||||
def _program_guid(qs):
|
||||
return qs.get('programGuid', [None])[0]
|
||||
|
||||
for mobj in re.finditer(
|
||||
r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//(?:www\.)?video\.mediaset\.it/player/playerIFrame(?:Twitter)?\.shtml.*?)\1',
|
||||
webpage):
|
||||
embed_url = mobj.group('url')
|
||||
embed_qs = parse_qs(embed_url)
|
||||
program_guid = _program_guid(embed_qs)
|
||||
if program_guid:
|
||||
yield self.url_result(embed_url)
|
||||
continue
|
||||
|
||||
video_id = embed_qs.get('id', [None])[0]
|
||||
if not video_id:
|
||||
continue
|
||||
urlh = self._request_webpage(embed_url, video_id, note='Following embed URL redirect')
|
||||
embed_url = urlh.geturl()
|
||||
program_guid = _program_guid(parse_qs(embed_url))
|
||||
if program_guid:
|
||||
yield self.url_result(embed_url)
|
||||
_WEBPAGE_TESTS = [{
|
||||
# Mediaset embed
|
||||
'url': 'http://www.tgcom24.mediaset.it/politica/serracchiani-voglio-vivere-in-una-societa-aperta-reazioni-sproporzionate-_3071354-201702a.shtml',
|
||||
'info_dict': {
|
||||
'id': 'FD00000000004929',
|
||||
'ext': 'mp4',
|
||||
'title': 'Serracchiani: "Voglio vivere in una società aperta, con tutela del patto di fiducia"',
|
||||
'duration': 67.013,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'Mediaset Play',
|
||||
'uploader_id': 'QY',
|
||||
'upload_date': '20201005',
|
||||
'timestamp': 1601866168,
|
||||
'chapters': [],
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}, {
|
||||
# WittyTV embed
|
||||
'url': 'https://www.wittytv.it/mauriziocostanzoshow/ultima-puntata-venerdi-25-novembre/',
|
||||
'info_dict': {
|
||||
'id': 'F312172801000801',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ultima puntata - Venerdì 25 novembre',
|
||||
'description': 'Una serata all\'insegna della musica e del buonumore ma non priva di spunti di riflessione',
|
||||
'duration': 6203.01,
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'uploader': 'Canale 5',
|
||||
'uploader_id': 'C5',
|
||||
'upload_date': '20221126',
|
||||
'timestamp': 1669428689,
|
||||
'chapters': list,
|
||||
'series': 'Maurizio Costanzo Show',
|
||||
'season': 'Season 12',
|
||||
'season_number': 12,
|
||||
'episode': 'Episode 8',
|
||||
'episode_number': 8,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
}
|
||||
}]
|
||||
|
||||
def _parse_smil_formats(self, smil, smil_url, video_id, namespace=None, f4m_params=None, transform_rtmp_url=None):
|
||||
for video in smil.findall(self._xpath_ns('.//video', namespace)):
|
||||
|
@ -217,7 +177,7 @@ class MediasetIE(ThePlatformBaseIE):
|
|||
|
||||
def _real_extract(self, url):
|
||||
guid = self._match_id(url)
|
||||
tp_path = 'PR1GhC/media/guid/2702976343/' + guid
|
||||
tp_path = f'PR1GhC/media/guid/2702976343/{guid}'
|
||||
info = self._extract_theplatform_metadata(tp_path, guid)
|
||||
|
||||
formats = []
|
||||
|
@ -225,15 +185,17 @@ class MediasetIE(ThePlatformBaseIE):
|
|||
first_e = geo_e = None
|
||||
asset_type = 'geoNo:HD,browser,geoIT|geoNo:HD,geoIT|geoNo:SD,browser,geoIT|geoNo:SD,geoIT|geoNo|HD|SD'
|
||||
# TODO: fixup ISM+none manifest URLs
|
||||
for f in ('MPEG4', 'M3U'):
|
||||
for f in ('MPEG4', 'MPEG-DASH', 'M3U'):
|
||||
try:
|
||||
tp_formats, tp_subtitles = self._extract_theplatform_smil(
|
||||
update_url_query('http://link.theplatform.%s/s/%s' % (self._TP_TLD, tp_path), {
|
||||
update_url_query(f'http://link.theplatform.{self._TP_TLD}/s/{tp_path}', {
|
||||
'mbr': 'true',
|
||||
'formats': f,
|
||||
'assetTypes': asset_type,
|
||||
}), guid, 'Downloading %s SMIL data' % (f.split('+')[0]))
|
||||
}), guid, f'Downloading {f.split("+")[0]} SMIL data')
|
||||
except ExtractorError as e:
|
||||
if e.orig_msg == 'None of the available releases match the specified AssetType, ProtectionScheme, and/or Format preferences':
|
||||
e.orig_msg = 'This video is DRM protected'
|
||||
if not geo_e and isinstance(e, GeoRestrictedError):
|
||||
geo_e = e
|
||||
if not first_e:
|
||||
|
@ -248,7 +210,7 @@ class MediasetIE(ThePlatformBaseIE):
|
|||
raise geo_e or first_e
|
||||
|
||||
feed_data = self._download_json(
|
||||
'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2/guid/-/' + guid,
|
||||
f'https://feed.entertainment.tv.theplatform.eu/f/PR1GhC/mediaset-prod-all-programs-v2/guid/-/{guid}',
|
||||
guid, fatal=False)
|
||||
if feed_data:
|
||||
publish_info = feed_data.get('mediasetprogram$publishInfo') or {}
|
||||
|
@ -299,23 +261,23 @@ class MediasetShowIE(MediasetIE): # XXX: Do not subclass from concrete IE
|
|||
'''
|
||||
_TESTS = [{
|
||||
# TV Show webpage (general webpage)
|
||||
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061',
|
||||
'url': 'https://mediasetinfinity.mediaset.it/programmi-tv/leiene/leiene_SE000000000061',
|
||||
'info_dict': {
|
||||
'id': '000000000061',
|
||||
'title': 'Le Iene',
|
||||
'title': 'Le Iene 2022/2023',
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
'playlist_mincount': 6,
|
||||
}, {
|
||||
# TV Show webpage (specific season)
|
||||
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/leiene_SE000000000061,ST000000002763',
|
||||
'url': 'https://mediasetinfinity.mediaset.it/programmi-tv/leiene/leiene_SE000000000061,ST000000002763',
|
||||
'info_dict': {
|
||||
'id': '000000002763',
|
||||
'title': 'Le Iene',
|
||||
'title': 'Le Iene 2021/2022',
|
||||
},
|
||||
'playlist_mincount': 7,
|
||||
}, {
|
||||
# TV Show specific playlist (with multiple pages)
|
||||
'url': 'https://www.mediasetplay.mediaset.it/programmi-tv/leiene/iservizi_SE000000000061,ST000000002763,sb100013375',
|
||||
'url': 'https://mediasetinfinity.mediaset.it/programmi-tv/leiene/iservizi_SE000000000061,ST000000002763,sb100013375',
|
||||
'info_dict': {
|
||||
'id': '100013375',
|
||||
'title': 'I servizi',
|
||||
|
@ -340,10 +302,9 @@ class MediasetShowIE(MediasetIE): # XXX: Do not subclass from concrete IE
|
|||
playlist_id, st, sb = self._match_valid_url(url).group('id', 'st', 'sb')
|
||||
if not sb:
|
||||
page = self._download_webpage(url, st or playlist_id)
|
||||
entries = [self.url_result(urljoin('https://www.mediasetplay.mediaset.it', url))
|
||||
entries = [self.url_result(urljoin('https://mediasetinfinity.mediaset.it', url))
|
||||
for url in re.findall(r'href="([^<>=]+SE\d{12},ST\d{12},sb\d{9})">[^<]+<', page)]
|
||||
title = (self._html_search_regex(r'(?s)<h1[^>]*>(.+?)</h1>', page, 'title', default=None)
|
||||
or self._og_search_title(page))
|
||||
title = self._html_extract_title(page).split('|')[0].strip()
|
||||
return self.playlist_result(entries, st or playlist_id, title)
|
||||
|
||||
entries = OnDemandPagedList(
|
||||
|
|
|
@ -0,0 +1,155 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import clean_html, get_element_html_by_class
|
||||
|
||||
|
||||
class MediaStreamIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://mdstrm.com/(?:embed|live-stream)/(?P<id>\w+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://mdstrm.com/embed/6318e3f1d1d316083ae48831',
|
||||
'md5': '97b4f2634b8e8612cc574dfcd504df05',
|
||||
'info_dict': {
|
||||
'id': '6318e3f1d1d316083ae48831',
|
||||
'title': 'Video: Así fue el despido de Thomas Tuchel del Chelsea',
|
||||
'description': 'md5:358ce1e1396010d50a1ece1be3633c95',
|
||||
'thumbnail': r're:^https?://[^?#]+6318e3f1d1d316083ae48831',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
}]
|
||||
|
||||
_WEBPAGE_TESTS = [{
|
||||
'url': 'https://www.multimedios.com/video/costa-rica-tv-en-vivo/v2616',
|
||||
'info_dict': {
|
||||
'id': '5a7b1e63a8da282c34d65445',
|
||||
'title': 're:mmtv-costarica',
|
||||
'description': 'mmtv-costarica',
|
||||
'thumbnail': 're:^https?://[^?#]+5a7b1e63a8da282c34d65445',
|
||||
'ext': 'mp4',
|
||||
'live_status': 'is_live',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'Livestream'
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.multimedios.com/television/clases-de-llaves-y-castigos-quien-sabe-mas',
|
||||
'md5': 'de31f0b1ecc321fb35bf22d58734ea40',
|
||||
'info_dict': {
|
||||
'id': '63731bab8ec9b308a2c9ed28',
|
||||
'title': 'Clases de llaves y castigos ¿Quién sabe más?',
|
||||
'description': 'md5:1b49aa1ee5a4b32fbd66104b2d629e9d',
|
||||
'thumbnail': 're:^https?://[^?#]+63731bab8ec9b308a2c9ed28',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.americatv.com.pe/videos/esto-es-guerra/facundo-gonzalez-sufrio-fuerte-golpe-durante-competencia-frente-hugo-garcia-eeg-noticia-139120',
|
||||
'info_dict': {
|
||||
'id': '63756df1c638b008a5659dec',
|
||||
'title': 'Facundo González sufrió fuerte golpe durante competencia frente a Hugo García en EEG',
|
||||
'description': 'md5:9490c034264afd756eef7b2c3adee69e',
|
||||
'thumbnail': 're:^https?://[^?#]+63756df1c638b008a5659dec',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.americatv.com.pe/videos/al-fondo-hay-sitio/nuevas-lomas-town-bernardo-mata-se-enfrento-sujeto-luchar-amor-macarena-noticia-139083',
|
||||
'info_dict': {
|
||||
'id': '637307669609130f74cd3a6e',
|
||||
'title': 'Las Nuevas Lomas Town: Bernardo De La Mata se enfrentó a sujeto para luchar por el amor de Macarena',
|
||||
'description': 'md5:60d71772f1e1496923539ae58aa17124',
|
||||
'thumbnail': 're:^https?://[^?#]+637307669609130f74cd3a6e',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_embed_urls(cls, url, webpage):
|
||||
for mobj in re.finditer(r'<script[^>]+>[^>]*playerMdStream.mdstreamVideo\(\s*[\'"](?P<video_id>\w+)', webpage):
|
||||
yield f'https://mdstrm.com/embed/{mobj.group("video_id")}'
|
||||
|
||||
yield from re.findall(
|
||||
r'<iframe[^>]src\s*=\s*"(https://mdstrm.com/[\w-]+/\w+)', webpage)
|
||||
|
||||
for mobj in re.finditer(
|
||||
r'''(?x)
|
||||
<(?:div|ps-mediastream)[^>]+
|
||||
class\s*=\s*"[^"]*MediaStreamVideoPlayer[^"]*"[^>]+
|
||||
data-video-id\s*=\s*"(?P<video_id>\w+)\s*"
|
||||
(?:\s*data-video-type\s*=\s*"(?P<video_type>[^"]+))?
|
||||
''', webpage):
|
||||
|
||||
video_type = 'live-stream' if mobj.group('video_type') == 'live' else 'embed'
|
||||
yield f'https://mdstrm.com/{video_type}/{mobj.group("video_id")}'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
if 'Debido a tu ubicación no puedes ver el contenido' in webpage:
|
||||
self.raise_geo_restricted()
|
||||
|
||||
player_config = self._search_json(r'window.MDSTRM.OPTIONS\s*=', webpage, 'metadata', video_id)
|
||||
|
||||
formats, subtitles = [], {}
|
||||
for video_format in player_config['src']:
|
||||
if video_format == 'hls':
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(player_config['src'][video_format], video_id)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
elif video_format == 'mpd':
|
||||
fmts, subs = self._extract_mpd_formats_and_subtitles(player_config['src'][video_format], video_id)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
else:
|
||||
formats.append({
|
||||
'url': player_config['src'][video_format],
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': self._og_search_title(webpage) or player_config.get('title'),
|
||||
'description': self._og_search_description(webpage),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'is_live': player_config.get('type') == 'live',
|
||||
'thumbnail': self._og_search_thumbnail(webpage),
|
||||
}
|
||||
|
||||
|
||||
class WinSportsVideoIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.winsports\.co/videos/(?P<display_id>[\w-]+)-(?P<id>\d+)'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.winsports.co/videos/siempre-castellanos-gran-atajada-del-portero-cardenal-para-evitar-la-caida-de-su-arco-60536',
|
||||
'info_dict': {
|
||||
'id': '62dc8357162c4b0821fcfb3c',
|
||||
'display_id': 'siempre-castellanos-gran-atajada-del-portero-cardenal-para-evitar-la-caida-de-su-arco',
|
||||
'title': '¡Siempre Castellanos! Gran atajada del portero \'cardenal\' para evitar la caída de su arco',
|
||||
'description': 'md5:eb811b2b2882bdc59431732c06b905f2',
|
||||
'thumbnail': r're:^https?://[^?#]+62dc8357162c4b0821fcfb3c',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.winsports.co/videos/observa-aqui-los-goles-del-empate-entre-tolima-y-nacional-60548',
|
||||
'info_dict': {
|
||||
'id': '62dcb875ef12a5526790b552',
|
||||
'display_id': 'observa-aqui-los-goles-del-empate-entre-tolima-y-nacional',
|
||||
'title': 'Observa aquí los goles del empate entre Tolima y Nacional',
|
||||
'description': 'md5:b19402ba6e46558b93fd24b873eea9c9',
|
||||
'thumbnail': r're:^https?://[^?#]+62dcb875ef12a5526790b552',
|
||||
'ext': 'mp4',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id, video_id = self._match_valid_url(url).group('display_id', 'id')
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
media_setting_json = self._search_json(
|
||||
r'<script\s*[^>]+data-drupal-selector="drupal-settings-json">', webpage, 'drupal-setting-json', display_id)
|
||||
|
||||
mediastream_id = media_setting_json['settings']['mediastream_formatter'][video_id]['mediastream_id']
|
||||
|
||||
return self.url_result(
|
||||
f'https://mdstrm.com/embed/{mediastream_id}', MediaStreamIE, video_id, url_transparent=True,
|
||||
display_id=display_id, video_title=clean_html(get_element_html_by_class('title-news', webpage)))
|
|
@ -1,8 +1,5 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
traverse_obj,
|
||||
)
|
||||
from ..utils import UserNotLive, traverse_obj
|
||||
|
||||
|
||||
class MixchIE(InfoExtractor):
|
||||
|
@ -33,7 +30,7 @@ class MixchIE(InfoExtractor):
|
|||
initial_js_state = self._parse_json(self._search_regex(
|
||||
r'(?m)^\s*window\.__INITIAL_JS_STATE__\s*=\s*(\{.+?\});\s*$', webpage, 'initial JS state'), video_id)
|
||||
if not initial_js_state.get('liveInfo'):
|
||||
raise ExtractorError('Livestream has ended.', expected=True)
|
||||
raise UserNotLive(video_id=video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
|
@ -45,7 +42,8 @@ class MixchIE(InfoExtractor):
|
|||
'uploader_id': video_id,
|
||||
'formats': [{
|
||||
'format_id': 'hls',
|
||||
'url': traverse_obj(initial_js_state, ('liveInfo', 'hls')) or 'https://d1hd0ww6piyb43.cloudfront.net/hls/torte_%s.m3u8' % video_id,
|
||||
'url': (traverse_obj(initial_js_state, ('liveInfo', 'hls'))
|
||||
or f'https://d1hd0ww6piyb43.cloudfront.net/hls/torte_{video_id}.m3u8'),
|
||||
'ext': 'mp4',
|
||||
'protocol': 'm3u8',
|
||||
}],
|
||||
|
|
|
@ -8,6 +8,7 @@ from ..utils import (
|
|||
clean_html,
|
||||
dict_get,
|
||||
int_or_none,
|
||||
join_nonempty,
|
||||
merge_dicts,
|
||||
parse_duration,
|
||||
traverse_obj,
|
||||
|
@ -72,13 +73,11 @@ class NaverBaseIE(InfoExtractor):
|
|||
|
||||
def get_subs(caption_url):
|
||||
if re.search(self._CAPTION_EXT_RE, caption_url):
|
||||
return [{
|
||||
'url': replace_ext(caption_url, 'ttml'),
|
||||
}, {
|
||||
'url': replace_ext(caption_url, 'vtt'),
|
||||
}]
|
||||
else:
|
||||
return [{'url': caption_url}]
|
||||
return [
|
||||
replace_ext(caption_url, 'ttml'),
|
||||
replace_ext(caption_url, 'vtt'),
|
||||
]
|
||||
return [caption_url]
|
||||
|
||||
automatic_captions = {}
|
||||
subtitles = {}
|
||||
|
@ -87,7 +86,13 @@ class NaverBaseIE(InfoExtractor):
|
|||
if not caption_url:
|
||||
continue
|
||||
sub_dict = automatic_captions if caption.get('type') == 'auto' else subtitles
|
||||
sub_dict.setdefault(dict_get(caption, ('locale', 'language')), []).extend(get_subs(caption_url))
|
||||
lang = caption.get('locale') or join_nonempty('language', 'country', from_dict=caption) or 'und'
|
||||
if caption.get('type') == 'fan':
|
||||
lang += '_fan%d' % next(i for i in itertools.count(1) if f'{lang}_fan{i}' not in sub_dict)
|
||||
sub_dict.setdefault(lang, []).extend({
|
||||
'url': sub_url,
|
||||
'name': join_nonempty('label', 'fanName', from_dict=caption, delim=' - '),
|
||||
} for sub_url in get_subs(caption_url))
|
||||
|
||||
user = meta.get('user', {})
|
||||
|
||||
|
@ -254,7 +259,7 @@ class NaverLiveIE(InfoExtractor):
|
|||
|
||||
class NaverNowIE(NaverBaseIE):
|
||||
IE_NAME = 'navernow'
|
||||
_VALID_URL = r'https?://now\.naver\.com/s/now\.(?P<id>[0-9]+)'
|
||||
_VALID_URL = r'https?://now\.naver\.com/s/now\.(?P<id>\w+)'
|
||||
_API_URL = 'https://apis.naver.com/now_web/oldnow_web/v4'
|
||||
_TESTS = [{
|
||||
'url': 'https://now.naver.com/s/now.4759?shareReplayId=26331132#replay=',
|
||||
|
@ -313,6 +318,9 @@ class NaverNowIE(NaverBaseIE):
|
|||
'title': '아이키의 떰즈업',
|
||||
},
|
||||
'playlist_mincount': 101,
|
||||
}, {
|
||||
'url': 'https://now.naver.com/s/now.kihyunplay?shareReplayId=30573291#replay',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _extract_replay(self, show_id, replay_id):
|
||||
|
|
|
@ -3,29 +3,31 @@ import json
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from .theplatform import ThePlatformIE
|
||||
from .theplatform import ThePlatformIE, default_ns
|
||||
from .adobepass import AdobePassIE
|
||||
from ..compat import compat_urllib_parse_unquote
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
HEADRequest,
|
||||
RegexNotFoundError,
|
||||
UserNotLive,
|
||||
clean_html,
|
||||
int_or_none,
|
||||
parse_age_limit,
|
||||
parse_duration,
|
||||
RegexNotFoundError,
|
||||
smuggle_url,
|
||||
str_or_none,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unified_strdate,
|
||||
unescapeHTML,
|
||||
unified_timestamp,
|
||||
update_url_query,
|
||||
url_basename,
|
||||
variadic,
|
||||
xpath_attr,
|
||||
)
|
||||
|
||||
|
||||
class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
|
||||
_VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>n?\d+))'
|
||||
_VALID_URL = r'https?(?P<permalink>://(?:www\.)?nbc\.com/(?:classic-tv/)?[^/]+/video/[^/]+/(?P<id>(?:NBCE|n)?\d+))'
|
||||
|
||||
_TESTS = [
|
||||
{
|
||||
|
@ -38,10 +40,18 @@ class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
|
|||
'timestamp': 1424246400,
|
||||
'upload_date': '20150218',
|
||||
'uploader': 'NBCU-COM',
|
||||
'episode': 'Jimmy Fallon Surprises Fans at Ben & Jerry\'s',
|
||||
'episode_number': 86,
|
||||
'season': 'Season 2',
|
||||
'season_number': 2,
|
||||
'series': 'Tonight Show: Jimmy Fallon',
|
||||
'duration': 237.0,
|
||||
'chapters': 'count:1',
|
||||
'tags': 'count:4',
|
||||
'thumbnail': r're:https?://.+\.jpg',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -55,11 +65,7 @@ class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
|
|||
'upload_date': '20141206',
|
||||
'uploader': 'NBCU-COM',
|
||||
},
|
||||
'params': {
|
||||
# m3u8 download
|
||||
'skip_download': True,
|
||||
},
|
||||
'skip': 'Only works from US',
|
||||
'skip': 'page not found',
|
||||
},
|
||||
{
|
||||
# HLS streams requires the 'hdnea3' cookie
|
||||
|
@ -73,10 +79,59 @@ class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
|
|||
'upload_date': '20090315',
|
||||
'uploader': 'NBCU-COM',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
'skip': 'page not found',
|
||||
},
|
||||
{
|
||||
# manifest url does not have extension
|
||||
'url': 'https://www.nbc.com/the-golden-globe-awards/video/oprah-winfrey-receives-cecil-b-de-mille-award-at-the-2018-golden-globes/3646439',
|
||||
'info_dict': {
|
||||
'id': '3646439',
|
||||
'ext': 'mp4',
|
||||
'title': 'Oprah Winfrey Receives Cecil B. de Mille Award at the 2018 Golden Globes',
|
||||
'episode': 'Oprah Winfrey Receives Cecil B. de Mille Award at the 2018 Golden Globes',
|
||||
'episode_number': 1,
|
||||
'season': 'Season 75',
|
||||
'season_number': 75,
|
||||
'series': 'The Golden Globe Awards',
|
||||
'description': 'Oprah Winfrey receives the Cecil B. de Mille Award at the 75th Annual Golden Globe Awards.',
|
||||
'uploader': 'NBCU-COM',
|
||||
'upload_date': '20180107',
|
||||
'timestamp': 1515312000,
|
||||
'duration': 570.0,
|
||||
'tags': 'count:8',
|
||||
'thumbnail': r're:https?://.+\.jpg',
|
||||
'chapters': 'count:1',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
},
|
||||
{
|
||||
# new video_id format
|
||||
'url': 'https://www.nbc.com/quantum-leap/video/bens-first-leap-nbcs-quantum-leap/NBCE125189978',
|
||||
'info_dict': {
|
||||
'id': 'NBCE125189978',
|
||||
'ext': 'mp4',
|
||||
'title': 'Ben\'s First Leap | NBC\'s Quantum Leap',
|
||||
'description': 'md5:a82762449b7ec4bb83291a7b355ebf8e',
|
||||
'uploader': 'NBCU-COM',
|
||||
'series': 'Quantum Leap',
|
||||
'season': 'Season 1',
|
||||
'season_number': 1,
|
||||
'episode': 'Ben\'s First Leap | NBC\'s Quantum Leap',
|
||||
'episode_number': 1,
|
||||
'duration': 170.171,
|
||||
'chapters': [],
|
||||
'timestamp': 1663956155,
|
||||
'upload_date': '20220923',
|
||||
'tags': 'count:10',
|
||||
'age_limit': 0,
|
||||
'thumbnail': r're:https?://.+\.jpg',
|
||||
},
|
||||
'expected_warnings': ['Ignoring subtitle tracks'],
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
'skip': 'Only works from US',
|
||||
},
|
||||
{
|
||||
'url': 'https://www.nbc.com/classic-tv/charles-in-charge/video/charles-in-charge-pilot/n3310',
|
||||
|
@ -136,6 +191,7 @@ class NBCIE(ThePlatformIE): # XXX: Do not subclass from concrete IE
|
|||
query = {
|
||||
'mbr': 'true',
|
||||
'manifest': 'm3u',
|
||||
'switch': 'HLSServiceSecure',
|
||||
}
|
||||
video_id = video_data['mpxGuid']
|
||||
tp_path = 'NnzsPC/media/guid/%s/%s' % (video_data.get('mpxAccountId') or '2410887629', video_id)
|
||||
|
@ -599,21 +655,22 @@ class NBCStationsIE(InfoExtractor):
|
|||
|
||||
_TESTS = [{
|
||||
'url': 'https://www.nbclosangeles.com/news/local/large-structure-fire-in-downtown-la-prompts-smoke-odor-advisory/2968618/',
|
||||
'md5': '462041d91bd762ef5a38b7d85d6dc18f',
|
||||
'info_dict': {
|
||||
'id': '2968618',
|
||||
'ext': 'mp4',
|
||||
'title': 'Large Structure Fire in Downtown LA Prompts Smoke Odor Advisory',
|
||||
'description': None,
|
||||
'description': 'md5:417ed3c2d91fe9d301e6db7b0942f182',
|
||||
'timestamp': 1661135892,
|
||||
'upload_date': '20220821',
|
||||
'upload_date': '20220822',
|
||||
'uploader': 'NBC 4',
|
||||
'uploader_id': 'KNBC',
|
||||
'channel_id': 'KNBC',
|
||||
'channel': 'nbclosangeles',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.telemundoarizona.com/responde/huracan-complica-reembolso-para-televidente-de-tucson/2247002/',
|
||||
'md5': '0917dcf7885be1023a9220630d415f67',
|
||||
'info_dict': {
|
||||
'id': '2247002',
|
||||
'ext': 'mp4',
|
||||
|
@ -622,9 +679,12 @@ class NBCStationsIE(InfoExtractor):
|
|||
'timestamp': 1660886507,
|
||||
'upload_date': '20220819',
|
||||
'uploader': 'Telemundo Arizona',
|
||||
'uploader_id': 'KTAZ',
|
||||
'channel_id': 'KTAZ',
|
||||
'channel': 'telemundoarizona',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}]
|
||||
|
||||
_RESOLUTIONS = {
|
||||
|
@ -640,51 +700,42 @@ class NBCStationsIE(InfoExtractor):
|
|||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
nbc_data = self._search_json(
|
||||
r'<script>var\s*nbc\s*=', webpage, 'NBC JSON data', video_id)
|
||||
r'<script>\s*var\s+nbc\s*=', webpage, 'NBC JSON data', video_id)
|
||||
pdk_acct = nbc_data.get('pdkAcct') or 'Yh1nAC'
|
||||
fw_ssid = traverse_obj(nbc_data, ('video', 'fwSSID'))
|
||||
fw_network_id = traverse_obj(nbc_data, ('video', 'fwNetworkID'), default='382114')
|
||||
|
||||
video_data = self._parse_json(self._html_search_regex(
|
||||
r'data-videos="([^"]*)"', webpage, 'video data', default='{}'), video_id)
|
||||
video_data = variadic(video_data)[0]
|
||||
video_data.update(self._parse_json(self._html_search_regex(
|
||||
r'data-meta="([^"]*)"', webpage, 'metadata', default='{}'), video_id))
|
||||
video_data = self._search_json(
|
||||
r'data-videos="\[', webpage, 'video data', video_id, default={}, transform_source=unescapeHTML)
|
||||
video_data.update(self._search_json(
|
||||
r'data-meta="', webpage, 'metadata', video_id, default={}, transform_source=unescapeHTML))
|
||||
if not video_data:
|
||||
raise ExtractorError('No video metadata found in webpage', expected=True)
|
||||
|
||||
formats = []
|
||||
|
||||
if video_data.get('mpx_is_livestream') == '1':
|
||||
live = True
|
||||
player_id = traverse_obj(
|
||||
video_data, 'mpx_m3upid', ('video', 'meta', 'mpx_m3upid'), 'mpx_pid',
|
||||
('video', 'meta', 'mpx_pid'), 'pid_streaming_web_medium')
|
||||
info, formats, subtitles = {}, [], {}
|
||||
is_live = int_or_none(video_data.get('mpx_is_livestream')) == 1
|
||||
query = {
|
||||
'mbr': 'true',
|
||||
'assetTypes': 'LegacyRelease',
|
||||
'formats': 'MPEG-DASH none,M3U none,MPEG-DASH none,MPEG4,MP3',
|
||||
'format': 'SMIL',
|
||||
'fwsitesection': fw_ssid,
|
||||
'fwNetworkID': fw_network_id,
|
||||
'fwNetworkID': traverse_obj(nbc_data, ('video', 'fwNetworkID'), default='382114'),
|
||||
'pprofile': 'ots_desktop_html',
|
||||
'sensitive': 'false',
|
||||
'w': '1920',
|
||||
'h': '1080',
|
||||
'rnd': '1660303',
|
||||
'mode': 'LIVE',
|
||||
'format': 'SMIL',
|
||||
'tracking': 'true',
|
||||
'formats': 'M3U+none,MPEG-DASH+none,MPEG4,MP3',
|
||||
'mode': 'LIVE' if is_live else 'on-demand',
|
||||
'vpaid': 'script',
|
||||
'schema': '2.0',
|
||||
'SDK': 'PDK+6.1.3',
|
||||
}
|
||||
info = {
|
||||
'title': f'{channel} livestream',
|
||||
'sdk': 'PDK 6.1.3',
|
||||
}
|
||||
|
||||
if is_live:
|
||||
player_id = traverse_obj(video_data, ((None, ('video', 'meta')), (
|
||||
'mpx_m3upid', 'mpx_pid', 'pid_streaming_web_medium')), get_all=False)
|
||||
info['title'] = f'{channel} livestream'
|
||||
|
||||
else:
|
||||
live = False
|
||||
player_id = traverse_obj(
|
||||
video_data, ('video', 'meta', 'pid_streaming_web_high'), 'pid_streaming_web_high',
|
||||
('video', 'meta', 'mpx_pid'), 'mpx_pid')
|
||||
player_id = traverse_obj(video_data, (
|
||||
(None, ('video', 'meta')), ('pid_streaming_web_high', 'mpx_pid')), get_all=False)
|
||||
|
||||
date_string = traverse_obj(video_data, 'date_string', 'date_gmt')
|
||||
if date_string:
|
||||
|
@ -692,63 +743,58 @@ class NBCStationsIE(InfoExtractor):
|
|||
r'datetime="([^"]+)"', date_string, 'date string', fatal=False)
|
||||
else:
|
||||
date_string = traverse_obj(
|
||||
nbc_data, ('dataLayer', 'adobe', 'prop70'), ('dataLayer', 'adobe', 'eVar70'),
|
||||
('dataLayer', 'adobe', 'eVar59'))
|
||||
nbc_data, ('dataLayer', 'adobe', ('prop70', 'eVar70', 'eVar59')), get_all=False)
|
||||
|
||||
video_url = traverse_obj(video_data, ('video', 'meta', 'mp4_url'), 'mp4_url')
|
||||
video_url = traverse_obj(video_data, ((None, ('video', 'meta')), 'mp4_url'), get_all=False)
|
||||
if video_url:
|
||||
height = url_basename(video_url).split('-')[1].split('p')[0]
|
||||
height = self._search_regex(r'\d+-(\d+)p', url_basename(video_url), 'height', default=None)
|
||||
formats.append({
|
||||
'url': video_url,
|
||||
'ext': 'mp4',
|
||||
'width': int_or_none(self._RESOLUTIONS.get(height)),
|
||||
'height': int_or_none(height),
|
||||
'format_id': f'http-{height}',
|
||||
'format_id': 'http-mp4',
|
||||
})
|
||||
|
||||
query = {
|
||||
'mbr': 'true',
|
||||
'assetTypes': 'LegacyRelease',
|
||||
'fwsitesection': fw_ssid,
|
||||
'fwNetworkID': fw_network_id,
|
||||
'format': 'redirect',
|
||||
'manifest': 'm3u',
|
||||
'Tracking': 'true',
|
||||
'Embedded': 'true',
|
||||
'formats': 'MPEG4',
|
||||
}
|
||||
info = {
|
||||
'title': video_data.get('title') or traverse_obj(
|
||||
nbc_data, ('dataLayer', 'contenttitle'), ('dataLayer', 'title'),
|
||||
('dataLayer', 'adobe', 'prop22'), ('dataLayer', 'id')),
|
||||
'description': traverse_obj(video_data, 'summary', 'excerpt', 'video_hero_text'),
|
||||
'upload_date': str_or_none(unified_strdate(date_string)),
|
||||
'timestamp': int_or_none(unified_timestamp(date_string)),
|
||||
}
|
||||
info.update({
|
||||
'title': video_data.get('title') or traverse_obj(nbc_data, (
|
||||
'dataLayer', (None, 'adobe'), ('contenttitle', 'title', 'prop22')), get_all=False),
|
||||
'description':
|
||||
traverse_obj(video_data, 'summary', 'excerpt', 'video_hero_text')
|
||||
or clean_html(traverse_obj(nbc_data, ('dataLayer', 'summary'))),
|
||||
'timestamp': unified_timestamp(date_string),
|
||||
})
|
||||
|
||||
if not player_id:
|
||||
raise ExtractorError(
|
||||
'No video player ID or livestream player ID found in webpage', expected=True)
|
||||
|
||||
headers = {'Origin': f'https://www.{channel}.com'}
|
||||
manifest, urlh = self._download_webpage_handle(
|
||||
smil = None
|
||||
if player_id and fw_ssid:
|
||||
smil = self._download_xml(
|
||||
f'https://link.theplatform.com/s/{pdk_acct}/{player_id}', video_id,
|
||||
headers=headers, query=query, note='Downloading manifest')
|
||||
if live:
|
||||
manifest_url = self._search_regex(r'<video src="([^"]*)', manifest, 'manifest URL')
|
||||
else:
|
||||
manifest_url = urlh.geturl()
|
||||
note='Downloading SMIL data', query=query, fatal=is_live)
|
||||
if smil:
|
||||
manifest_url = xpath_attr(smil, f'.//{{{default_ns}}}video', 'src', fatal=is_live)
|
||||
subtitles = self._parse_smil_subtitles(smil, default_ns)
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(
|
||||
manifest_url, video_id, 'mp4', m3u8_id='hls', fatal=is_live,
|
||||
live=is_live, errnote='No HLS formats found')
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
manifest_url, video_id, 'mp4', headers=headers, m3u8_id='hls',
|
||||
fatal=live, live=live, errnote='No HLS formats found'))
|
||||
if not formats:
|
||||
self.raise_no_formats('No video content found in webpage', expected=True)
|
||||
elif is_live:
|
||||
try:
|
||||
self._request_webpage(
|
||||
HEADRequest(formats[0]['url']), video_id, note='Checking live status')
|
||||
except ExtractorError:
|
||||
raise UserNotLive(video_id=channel)
|
||||
|
||||
return {
|
||||
'id': str_or_none(video_id),
|
||||
'id': video_id,
|
||||
'channel': channel,
|
||||
'uploader': str_or_none(nbc_data.get('on_air_name')),
|
||||
'uploader_id': str_or_none(nbc_data.get('callLetters')),
|
||||
'channel_id': nbc_data.get('callLetters'),
|
||||
'uploader': nbc_data.get('on_air_name'),
|
||||
'formats': formats,
|
||||
'is_live': live,
|
||||
'subtitles': subtitles,
|
||||
'is_live': is_live,
|
||||
**info,
|
||||
}
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
from .common import InfoExtractor
|
||||
import itertools
|
||||
|
||||
from .common import InfoExtractor, SearchInfoExtractor
|
||||
from .dailymotion import DailymotionIE
|
||||
from ..utils import smuggle_url, traverse_obj
|
||||
|
||||
|
@ -16,6 +18,26 @@ class NetverseBaseIE(InfoExtractor):
|
|||
f'https://api.netverse.id/medias/api/v2/{self._ENDPOINTS[endpoint]}/{slug}/{season_id}',
|
||||
display_id or slug, query=query)
|
||||
|
||||
def _get_comments(self, video_id):
|
||||
last_page_number = None
|
||||
for i in itertools.count(1):
|
||||
comment_data = self._download_json(
|
||||
f'https://api.netverse.id/mediadetails/api/v3/videos/comments/{video_id}',
|
||||
video_id, data=b'', fatal=False, query={'page': i},
|
||||
note=f'Downloading JSON comment metadata page {i}') or {}
|
||||
yield from traverse_obj(comment_data, ('response', 'comments', 'data', ..., {
|
||||
'id': '_id',
|
||||
'text': 'comment',
|
||||
'author_id': 'customer_id',
|
||||
'author': ('customer', 'name'),
|
||||
'author_thumbnail': ('customer', 'profile_picture'),
|
||||
}))
|
||||
|
||||
if not last_page_number:
|
||||
last_page_number = traverse_obj(comment_data, ('response', 'comments', 'last_page'))
|
||||
if i >= (last_page_number or 0):
|
||||
break
|
||||
|
||||
|
||||
class NetverseIE(NetverseBaseIE):
|
||||
_VALID_URL = r'https?://(?:\w+\.)?netverse\.id/(?P<type>watch|video)/(?P<display_id>[^/?#&]+)'
|
||||
|
@ -28,7 +50,7 @@ class NetverseIE(NetverseBaseIE):
|
|||
'ext': 'mp4',
|
||||
'season': 'Season 2016',
|
||||
'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/T7aV31Y0eGRWBbwkK/x1080',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
|
||||
'episode_number': 22,
|
||||
'episode': 'Episode 22',
|
||||
'uploader_id': 'x2ir3vq',
|
||||
|
@ -51,7 +73,7 @@ class NetverseIE(NetverseBaseIE):
|
|||
'ext': 'mp4',
|
||||
'season': 'Season 2',
|
||||
'description': 'md5:8a74f70812cca267e19ee0635f0af835',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/Thwuy1YURicFmGu0v/x1080',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
|
||||
'episode_number': 2,
|
||||
'episode': 'Episode 2',
|
||||
'view_count': int,
|
||||
|
@ -75,7 +97,7 @@ class NetverseIE(NetverseBaseIE):
|
|||
'title': 'Tetangga Baru',
|
||||
'season': 'Season 1',
|
||||
'description': 'md5:23fcf70e97d461d3029d25d59b2ccfb9',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/T3Ogm1YEnnyjVKAFF/x1080',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
|
||||
'episode_number': 1,
|
||||
'episode': 'Episode 1',
|
||||
'timestamp': 1624538169,
|
||||
|
@ -96,7 +118,7 @@ class NetverseIE(NetverseBaseIE):
|
|||
'info_dict': {
|
||||
'id': 'x887jzz',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/TfuZ_1Y6PboJ5An_s/x1080',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
|
||||
'season': 'Season 1',
|
||||
'episode_number': 1,
|
||||
'description': 'md5:d4f627b3e7a3f9acdc55f6cdd5ea41d5',
|
||||
|
@ -114,6 +136,60 @@ class NetverseIE(NetverseBaseIE):
|
|||
'upload_date': '20220225',
|
||||
},
|
||||
'skip': 'This video get Geo-blocked for some country'
|
||||
}, {
|
||||
# video with comments
|
||||
'url': 'https://netverse.id/video/episode-1-season-2016-ok-food',
|
||||
'info_dict': {
|
||||
'id': 'k6hetBPiQMljSxxvAy7',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
|
||||
'display_id': 'episode-1-season-2016-ok-food',
|
||||
'like_count': int,
|
||||
'description': '',
|
||||
'duration': 1471,
|
||||
'age_limit': 0,
|
||||
'timestamp': 1642405848,
|
||||
'episode_number': 1,
|
||||
'season': 'Season 2016',
|
||||
'uploader_id': 'x2ir3vq',
|
||||
'title': 'Episode 1 - Season 2016 - Ok Food',
|
||||
'upload_date': '20220117',
|
||||
'tags': [],
|
||||
'view_count': int,
|
||||
'episode': 'Episode 1',
|
||||
'uploader': 'Net Prime',
|
||||
'comment_count': int,
|
||||
},
|
||||
'params':{
|
||||
'getcomments': True
|
||||
}
|
||||
}, {
|
||||
# video with multiple page comment
|
||||
'url': 'https://netverse.id/video/match-island-eps-1-fix',
|
||||
'info_dict': {
|
||||
'id': 'x8aznjc',
|
||||
'ext': 'mp4',
|
||||
'like_count': int,
|
||||
'tags': ['Match-Island', 'Pd00111'],
|
||||
'display_id': 'match-island-eps-1-fix',
|
||||
'view_count': int,
|
||||
'episode': 'Episode 1',
|
||||
'uploader': 'Net Prime',
|
||||
'duration': 4070,
|
||||
'timestamp': 1653068165,
|
||||
'description': 'md5:e9cf3b480ad18e9c33b999e3494f223f',
|
||||
'age_limit': 0,
|
||||
'title': 'Welcome To Match Island',
|
||||
'upload_date': '20220520',
|
||||
'episode_number': 1,
|
||||
'thumbnail': r're:https?://s\d+\.dmcdn\.net/v/[^/]+/x1080',
|
||||
'uploader_id': 'x2ir3vq',
|
||||
'season': 'Season 1',
|
||||
'comment_count': int,
|
||||
},
|
||||
'params':{
|
||||
'getcomments': True
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
|
@ -131,6 +207,7 @@ class NetverseIE(NetverseBaseIE):
|
|||
'thumbnail': traverse_obj(videos, ('program_detail', 'thumbnail_image')),
|
||||
'description': traverse_obj(videos, ('program_detail', 'description')),
|
||||
'episode_number': videos.get('episode_order'),
|
||||
'__post_extractor': self.extract_comments(display_id),
|
||||
}
|
||||
|
||||
|
||||
|
@ -174,3 +251,31 @@ class NetversePlaylistIE(NetverseBaseIE):
|
|||
self.parse_playlist(playlist_data['response'], playlist_id),
|
||||
traverse_obj(playlist_data, ('response', 'webseries_info', 'slug')),
|
||||
traverse_obj(playlist_data, ('response', 'webseries_info', 'title')))
|
||||
|
||||
|
||||
class NetverseSearchIE(SearchInfoExtractor):
|
||||
_SEARCH_KEY = 'netsearch'
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'netsearch10:tetangga',
|
||||
'info_dict': {
|
||||
'id': 'tetangga',
|
||||
'title': 'tetangga',
|
||||
},
|
||||
'playlist_count': 10,
|
||||
}]
|
||||
|
||||
def _search_results(self, query):
|
||||
last_page = None
|
||||
for i in itertools.count(1):
|
||||
search_data = self._download_json(
|
||||
'https://api.netverse.id/search/elastic/search', query,
|
||||
query={'q': query, 'page': i}, note=f'Downloading page {i}')
|
||||
|
||||
videos = traverse_obj(search_data, ('response', 'data', ...))
|
||||
for video in videos:
|
||||
yield self.url_result(f'https://netverse.id/video/{video["slug"]}', NetverseIE)
|
||||
|
||||
last_page = last_page or traverse_obj(search_data, ('response', 'lastpage'))
|
||||
if not videos or i >= (last_page or 0):
|
||||
break
|
||||
|
|
|
@ -675,8 +675,8 @@ class NiconicoSeriesIE(InfoExtractor):
|
|||
|
||||
class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
|
||||
IE_NAME = 'niconico:history'
|
||||
IE_DESC = 'NicoNico user history. Requires cookies.'
|
||||
_VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/history'
|
||||
IE_DESC = 'NicoNico user history or likes. Requires cookies.'
|
||||
_VALID_URL = r'https?://(?:www\.|sp\.)?nicovideo\.jp/my/(?P<id>history(?:/like)?)'
|
||||
|
||||
_TESTS = [{
|
||||
'note': 'PC page, with /video',
|
||||
|
@ -694,23 +694,29 @@ class NiconicoHistoryIE(NiconicoPlaylistBaseIE):
|
|||
'note': 'mobile page, without /video',
|
||||
'url': 'https://sp.nicovideo.jp/my/history',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'note': 'PC page',
|
||||
'url': 'https://www.nicovideo.jp/my/history/like',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'note': 'Mobile page',
|
||||
'url': 'https://sp.nicovideo.jp/my/history/like',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _call_api(self, list_id, resource, query):
|
||||
path = 'likes' if list_id == 'history/like' else 'watch/history'
|
||||
return self._download_json(
|
||||
'https://nvapi.nicovideo.jp/v1/users/me/watch/history', 'history',
|
||||
f'Downloading {resource}', query=query,
|
||||
headers=self._API_HEADERS)['data']
|
||||
f'https://nvapi.nicovideo.jp/v1/users/me/{path}', list_id,
|
||||
f'Downloading {resource}', query=query, headers=self._API_HEADERS)['data']
|
||||
|
||||
def _real_extract(self, url):
|
||||
list_id = 'history'
|
||||
list_id = self._match_id(url)
|
||||
try:
|
||||
mylist = self._call_api(list_id, 'list', {
|
||||
'pageSize': 1,
|
||||
})
|
||||
mylist = self._call_api(list_id, 'list', {'pageSize': 1})
|
||||
except ExtractorError as e:
|
||||
if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
|
||||
self.raise_login_required('You have to be logged in to get your watch history')
|
||||
self.raise_login_required('You have to be logged in to get your history')
|
||||
raise
|
||||
return self.playlist_result(self._entries(list_id), list_id, **self._parse_owner(mylist))
|
||||
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
clean_html,
|
||||
determine_ext,
|
||||
int_or_none,
|
||||
parse_iso8601,
|
||||
traverse_obj,
|
||||
variadic,
|
||||
)
|
||||
|
||||
|
||||
class NoicePodcastIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://open\.noice\.id/content/(?P<id>[a-fA-F0-9-]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://open.noice.id/content/7694bb04-ff0f-40fa-a60b-5b39f29584b2',
|
||||
'info_dict': {
|
||||
'id': '7694bb04-ff0f-40fa-a60b-5b39f29584b2',
|
||||
'ext': 'm4a',
|
||||
'season': 'Season 1',
|
||||
'description': 'md5:58d1274e6857b6fbbecf47075885380d',
|
||||
'release_date': '20221115',
|
||||
'timestamp': 1668496642,
|
||||
'season_number': 1,
|
||||
'upload_date': '20221115',
|
||||
'release_timestamp': 1668496642,
|
||||
'title': 'Eps 1. Belajar dari Wishnutama: Kreatif Bukan Followers! (bersama Wishnutama)',
|
||||
'modified_date': '20221121',
|
||||
'categories': ['Bisnis dan Keuangan'],
|
||||
'duration': 3567,
|
||||
'modified_timestamp': 1669030647,
|
||||
'thumbnail': 'https://images.noiceid.cc/catalog/content-1668496302560',
|
||||
'channel_id': '9dab1024-5b92-4265-ae1c-63da87359832',
|
||||
'like_count': int,
|
||||
'channel': 'Noice Space Talks',
|
||||
'comment_count': int,
|
||||
'dislike_count': int,
|
||||
'channel_follower_count': int,
|
||||
}
|
||||
}, {
|
||||
'url': 'https://open.noice.id/content/222134e4-99f2-456f-b8a2-b8be404bf063',
|
||||
'info_dict': {
|
||||
'id': '222134e4-99f2-456f-b8a2-b8be404bf063',
|
||||
'ext': 'm4a',
|
||||
'release_timestamp': 1653488220,
|
||||
'description': 'md5:35074f6190cef52b05dd133bb2ef460e',
|
||||
'upload_date': '20220525',
|
||||
'timestamp': 1653460637,
|
||||
'release_date': '20220525',
|
||||
'thumbnail': 'https://images.noiceid.cc/catalog/content-1653460337625',
|
||||
'title': 'Eps 1: Dijodohin Sama Anak Pak RT',
|
||||
'modified_timestamp': 1669030647,
|
||||
'season_number': 1,
|
||||
'modified_date': '20221121',
|
||||
'categories': ['Cerita dan Drama'],
|
||||
'duration': 1830,
|
||||
'season': 'Season 1',
|
||||
'channel_id': '60193f6b-d24d-4b23-913b-ceed5a731e74',
|
||||
'dislike_count': int,
|
||||
'like_count': int,
|
||||
'comment_count': int,
|
||||
'channel': 'Dear Jerome',
|
||||
'channel_follower_count': int,
|
||||
}
|
||||
}]
|
||||
|
||||
def _get_formats_and_subtitles(self, media_url, video_id):
|
||||
formats, subtitles = [], {}
|
||||
for url in variadic(media_url):
|
||||
ext = determine_ext(url)
|
||||
if ext == 'm3u8':
|
||||
fmts, subs = self._extract_m3u8_formats_and_subtitles(url, video_id)
|
||||
formats.extend(fmts)
|
||||
self._merge_subtitles(subs, target=subtitles)
|
||||
else:
|
||||
formats.append({
|
||||
'url': url,
|
||||
'ext': 'mp3',
|
||||
'vcodec': 'none',
|
||||
'acodec': 'mp3',
|
||||
})
|
||||
return formats, subtitles
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
nextjs_data = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['contentDetails']
|
||||
|
||||
media_url_list = traverse_obj(nextjs_data, (('rawContentUrl', 'url'), ))
|
||||
formats, subtitles = self._get_formats_and_subtitles(media_url_list, display_id)
|
||||
|
||||
return {
|
||||
'id': nextjs_data.get('id') or display_id,
|
||||
'title': nextjs_data.get('title') or self._html_search_meta('og:title', webpage),
|
||||
'formats': formats,
|
||||
'subtitles': subtitles,
|
||||
'description': (nextjs_data.get('description') or clean_html(nextjs_data.get('htmlDescription'))
|
||||
or self._html_search_meta(['description', 'og:description'], webpage)),
|
||||
'thumbnail': nextjs_data.get('image') or self._html_search_meta('og:image', webpage),
|
||||
'timestamp': parse_iso8601(nextjs_data.get('createdAt')),
|
||||
'release_timestamp': parse_iso8601(nextjs_data.get('publishedAt')),
|
||||
'modified_timestamp': parse_iso8601(
|
||||
nextjs_data.get('updatedAt') or self._html_search_meta('og:updated_time', webpage)),
|
||||
'duration': int_or_none(nextjs_data.get('duration')),
|
||||
'categories': traverse_obj(nextjs_data, ('genres', ..., 'name')),
|
||||
'season': nextjs_data.get('seasonName'),
|
||||
'season_number': int_or_none(nextjs_data.get('seasonNumber')),
|
||||
'channel': traverse_obj(nextjs_data, ('catalog', 'title')),
|
||||
'channel_id': traverse_obj(nextjs_data, ('catalog', 'id'), 'catalogId'),
|
||||
**traverse_obj(nextjs_data, ('meta', 'aggregations', {
|
||||
'like_count': 'likes',
|
||||
'dislike_count': 'dislikes',
|
||||
'comment_count': 'comments',
|
||||
'channel_follower_count': 'followers',
|
||||
}))
|
||||
}
|
|
@ -3,7 +3,7 @@ from ..utils import parse_duration, parse_iso8601, traverse_obj
|
|||
|
||||
|
||||
class NOSNLArticleIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://nos\.nl/((?!video)(\w+/)?\w+/)\d+-(?P<display_id>[\w-]+)'
|
||||
_VALID_URL = r'https?://nos\.nl/(?P<type>video|(\w+/)?\w+)/?\d+-(?P<display_id>[\w-]+)'
|
||||
_TESTS = [
|
||||
{
|
||||
# only 1 video
|
||||
|
@ -22,13 +22,14 @@ class NOSNLArticleIE(InfoExtractor):
|
|||
'info_dict': {
|
||||
'id': '2440409',
|
||||
'title': 'Vannacht sliepen weer enkele honderden asielzoekers in Ter Apel buiten',
|
||||
'description': 'Er werd wel geprobeerd om kwetsbare migranten onderdak te bieden, zegt het COA.',
|
||||
'description': 'md5:72b1e1674d798460e79d78fa37e9f56d',
|
||||
'tags': ['aanmeldcentrum', 'Centraal Orgaan opvang asielzoekers', 'COA', 'asielzoekers', 'Ter Apel'],
|
||||
'modified_timestamp': 1660452773,
|
||||
'modified_date': '20220814',
|
||||
'upload_date': '20220813',
|
||||
'thumbnail': 'https://cdn.nos.nl/image/2022/07/18/880346/1024x576a.jpg',
|
||||
'timestamp': 1660401384,
|
||||
'categories': ['Regionaal nieuws', 'Binnenland'],
|
||||
},
|
||||
'playlist_count': 2,
|
||||
}, {
|
||||
|
@ -37,20 +38,37 @@ class NOSNLArticleIE(InfoExtractor):
|
|||
'info_dict': {
|
||||
'id': '2440789',
|
||||
'title': 'Wekdienst 16/8: Groningse acties tien jaar na zware aardbeving • Femke Bol in actie op EK atletiek ',
|
||||
'description': 'Nieuws, weer, verkeer: met dit overzicht begin je geïnformeerd aan de dag.',
|
||||
'description': 'md5:0bd277ed7a44fc15cb12a9d27d8f6641',
|
||||
'tags': ['wekdienst'],
|
||||
'modified_date': '20220816',
|
||||
'modified_timestamp': 1660625449,
|
||||
'timestamp': 1660625449,
|
||||
'upload_date': '20220816',
|
||||
'thumbnail': 'https://cdn.nos.nl/image/2022/08/16/888178/1024x576a.jpg',
|
||||
'categories': ['Binnenland', 'Buitenland'],
|
||||
},
|
||||
'playlist_count': 2,
|
||||
}, {
|
||||
# video url
|
||||
'url': 'https://nos.nl/video/2452718-xi-en-trudeau-botsen-voor-de-camera-op-g20-top-je-hebt-gelekt',
|
||||
'info_dict': {
|
||||
'id': '2452718',
|
||||
'title': 'Xi en Trudeau botsen voor de camera op G20-top: \'Je hebt gelekt\'',
|
||||
'modified_date': '20221117',
|
||||
'description': 'md5:61907dac576f75c11bf8ffffd4a3cc0f',
|
||||
'tags': ['Xi', 'Trudeau', 'G20', 'indonesié'],
|
||||
'upload_date': '20221117',
|
||||
'thumbnail': 'https://cdn.nos.nl/image/2022/11/17/916155/1024x576a.jpg',
|
||||
'modified_timestamp': 1668663388,
|
||||
'timestamp': 1668663388,
|
||||
'categories': ['Buitenland'],
|
||||
},
|
||||
'playlist_mincount': 1,
|
||||
}
|
||||
]
|
||||
|
||||
def _entries(self, nextjs_json, display_id):
|
||||
for item in nextjs_json['items']:
|
||||
for item in nextjs_json:
|
||||
if item.get('type') == 'video':
|
||||
formats, subtitle = self._extract_m3u8_formats_and_subtitles(
|
||||
traverse_obj(item, ('source', 'url')), display_id, ext='mp4')
|
||||
|
@ -77,13 +95,14 @@ class NOSNLArticleIE(InfoExtractor):
|
|||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
display_id = self._match_valid_url(url).group('display_id')
|
||||
site_type, display_id = self._match_valid_url(url).group('type', 'display_id')
|
||||
webpage = self._download_webpage(url, display_id)
|
||||
|
||||
nextjs_json = self._search_nextjs_data(webpage, display_id)['props']['pageProps']['data']
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'entries': self._entries(nextjs_json, display_id),
|
||||
'entries': self._entries(
|
||||
[nextjs_json['video']] if site_type == 'video' else nextjs_json['items'], display_id),
|
||||
'id': str(nextjs_json['id']),
|
||||
'title': nextjs_json.get('title') or self._html_search_meta(['title', 'og:title', 'twitter:title'], webpage),
|
||||
'description': (nextjs_json.get('description')
|
||||
|
@ -91,5 +110,6 @@ class NOSNLArticleIE(InfoExtractor):
|
|||
'tags': nextjs_json.get('keywords'),
|
||||
'modified_timestamp': parse_iso8601(nextjs_json.get('modifiedAt')),
|
||||
'thumbnail': nextjs_json.get('shareImageSrc') or self._html_search_meta(['og:image', 'twitter:image'], webpage),
|
||||
'timestamp': parse_iso8601(nextjs_json.get('publishedAt'))
|
||||
'timestamp': parse_iso8601(nextjs_json.get('publishedAt')),
|
||||
'categories': traverse_obj(nextjs_json, ('categories', ..., 'label')),
|
||||
}
|
||||
|
|
|
@ -11,6 +11,7 @@ from ..utils import (
|
|||
int_or_none,
|
||||
qualities,
|
||||
smuggle_url,
|
||||
traverse_obj,
|
||||
unescapeHTML,
|
||||
unified_strdate,
|
||||
unsmuggle_url,
|
||||
|
@ -153,6 +154,26 @@ class OdnoklassnikiIE(InfoExtractor):
|
|||
'title': 'Быковское крещение',
|
||||
'duration': 3038.181,
|
||||
},
|
||||
'skip': 'HTTP Error 400',
|
||||
}, {
|
||||
'note': 'subtitles',
|
||||
'url': 'https://ok.ru/video/4249587550747',
|
||||
'info_dict': {
|
||||
'id': '4249587550747',
|
||||
'ext': 'mp4',
|
||||
'title': 'Small Country An African Childhood (2020) (1080p) +subtitle',
|
||||
'uploader': 'Sunflower Movies',
|
||||
'uploader_id': '595802161179',
|
||||
'upload_date': '20220816',
|
||||
'duration': 6728,
|
||||
'age_limit': 0,
|
||||
'thumbnail': r're:^https?://i\.mycdn\.me/videoPreview\?.+',
|
||||
'like_count': int,
|
||||
'subtitles': dict,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452',
|
||||
'only_matching': True,
|
||||
|
@ -202,6 +223,7 @@ class OdnoklassnikiIE(InfoExtractor):
|
|||
'like_count': 0,
|
||||
'duration': 10444,
|
||||
},
|
||||
'skip': 'Site no longer embeds',
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
|
@ -294,6 +316,16 @@ class OdnoklassnikiIE(InfoExtractor):
|
|||
|
||||
like_count = int_or_none(metadata.get('likeCount'))
|
||||
|
||||
subtitles = {}
|
||||
for sub in traverse_obj(metadata, ('movie', 'subtitleTracks', ...), expected_type=dict):
|
||||
sub_url = sub.get('url')
|
||||
if not sub_url:
|
||||
continue
|
||||
subtitles.setdefault(sub.get('language') or 'en', []).append({
|
||||
'url': sub_url,
|
||||
'ext': 'vtt',
|
||||
})
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
|
@ -305,6 +337,7 @@ class OdnoklassnikiIE(InfoExtractor):
|
|||
'like_count': like_count,
|
||||
'age_limit': age_limit,
|
||||
'start_time': start_time,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
|
||||
# pladform
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class OnePlacePodcastIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://www\.oneplace\.com/[\w]+/[^/]+/listen/[\w-]+-(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.oneplace.com/ministries/a-daily-walk/listen/living-in-the-last-days-part-2-958461.html',
|
||||
'info_dict': {
|
||||
'id': '958461',
|
||||
'ext': 'mp3',
|
||||
'title': 'Living in the Last Days Part 2 | A Daily Walk with John Randall',
|
||||
'description': 'md5:fbb8f1cf21447ac54ecaa2887fc20c6e',
|
||||
}
|
||||
}, {
|
||||
'url': 'https://www.oneplace.com/ministries/ankerberg-show/listen/ep-3-relying-on-the-constant-companionship-of-the-holy-spirit-part-2-922513.html',
|
||||
'info_dict': {
|
||||
'id': '922513',
|
||||
'ext': 'mp3',
|
||||
'description': 'md5:8b810b4349aa40a5d033b4536fe428e1',
|
||||
'title': 'md5:ce10f7d8d5ddcf485ed8905ef109659d',
|
||||
}
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'url': self._search_regex((
|
||||
r'mp3-url\s*=\s*"([^"]+)',
|
||||
r'<div[^>]+id\s*=\s*"player"[^>]+data-media-url\s*=\s*"(?P<media_url>[^"]+)',
|
||||
), webpage, 'media url'),
|
||||
'ext': 'mp3',
|
||||
'vcodec': 'none',
|
||||
'title': self._html_search_regex((
|
||||
r'<div[^>]class\s*=\s*"details"[^>]+>[^<]<h2[^>]+>(?P<content>[^>]+)>',
|
||||
self._meta_regex('og:title'), self._meta_regex('title'),
|
||||
), webpage, 'title', group='content', default=None),
|
||||
'description': self._html_search_regex(
|
||||
r'<div[^>]+class="[^"]+epDesc"[^>]*>\s*(?P<desc>.+?)\s*</div>',
|
||||
webpage, 'description', default=None),
|
||||
}
|
|
@ -1,71 +1,128 @@
|
|||
import re
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
get_element_by_class,
|
||||
int_or_none,
|
||||
merge_dicts,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class PeekVidsIE(InfoExtractor):
|
||||
class PeekVidsBaseIE(InfoExtractor):
|
||||
def _real_extract(self, url):
|
||||
domain, video_id = self._match_valid_url(url).group('domain', 'id')
|
||||
webpage = self._download_webpage(url, video_id, expected_status=429)
|
||||
if '>Rate Limit Exceeded' in webpage:
|
||||
raise ExtractorError(
|
||||
f'You are suspected as a bot. Wait, or pass the captcha on the site and provide cookies. {self._login_hint()}',
|
||||
video_id=video_id, expected=True)
|
||||
|
||||
title = self._html_search_regex(r'(?s)<h1\b[^>]*>(.+?)</h1>', webpage, 'title')
|
||||
|
||||
display_id = video_id
|
||||
video_id = self._search_regex(r'(?s)<video\b[^>]+\bdata-id\s*=\s*["\']?([\w-]+)', webpage, 'short video ID')
|
||||
srcs = self._download_json(
|
||||
f'https://www.{domain}/v-alt/{video_id}', video_id,
|
||||
note='Downloading list of source files')
|
||||
|
||||
formats = []
|
||||
for k, v in srcs.items():
|
||||
f_url = url_or_none(v)
|
||||
if not f_url:
|
||||
continue
|
||||
|
||||
height = self._search_regex(r'^data-src(\d{3,})$', k, 'height', default=None)
|
||||
if not height:
|
||||
continue
|
||||
|
||||
formats.append({
|
||||
'url': f_url,
|
||||
'format_id': height,
|
||||
'height': int_or_none(height),
|
||||
})
|
||||
|
||||
if not formats:
|
||||
formats = [{'url': url} for url in srcs.values()]
|
||||
|
||||
info = self._search_json_ld(webpage, video_id, expected_type='VideoObject', default={})
|
||||
info.pop('url', None)
|
||||
|
||||
# may not have found the thumbnail if it was in a list in the ld+json
|
||||
info.setdefault('thumbnail', self._og_search_thumbnail(webpage))
|
||||
detail = (get_element_by_class('detail-video-block', webpage)
|
||||
or get_element_by_class('detail-block', webpage) or '')
|
||||
info['description'] = self._html_search_regex(
|
||||
rf'(?s)(.+?)(?:{re.escape(info.get("description", ""))}\s*<|<ul\b)',
|
||||
detail, 'description', default=None) or None
|
||||
info['title'] = re.sub(r'\s*[,-][^,-]+$', '', info.get('title') or title) or self._generic_title(url)
|
||||
|
||||
def cat_tags(name, html):
|
||||
l = self._html_search_regex(
|
||||
rf'(?s)<span\b[^>]*>\s*{re.escape(name)}\s*:\s*</span>(.+?)</li>',
|
||||
html, name, default='')
|
||||
return list(filter(None, re.split(r'\s+', l)))
|
||||
|
||||
return merge_dicts({
|
||||
'id': video_id,
|
||||
'display_id': display_id,
|
||||
'age_limit': 18,
|
||||
'formats': formats,
|
||||
'categories': cat_tags('Categories', detail),
|
||||
'tags': cat_tags('Tags', detail),
|
||||
'uploader': self._html_search_regex(r'[Uu]ploaded\s+by\s(.+?)"', webpage, 'uploader', default=None),
|
||||
}, info)
|
||||
|
||||
|
||||
class PeekVidsIE(PeekVidsBaseIE):
|
||||
_VALID_URL = r'''(?x)
|
||||
https?://(?:www\.)?peekvids\.com/
|
||||
https?://(?:www\.)?(?P<domain>peekvids\.com)/
|
||||
(?:(?:[^/?#]+/){2}|embed/?\?(?:[^#]*&)?v=)
|
||||
(?P<id>[^/?&#]*)
|
||||
'''
|
||||
_TESTS = [{
|
||||
'url': 'https://peekvids.com/pc/dane-jones-cute-redhead-with-perfect-tits-with-mini-vamp/BSyLMbN0YCd',
|
||||
'md5': 'a00940646c428e232407e3e62f0e8ef5',
|
||||
'md5': '2ff6a357a9717dc9dc9894b51307e9a2',
|
||||
'info_dict': {
|
||||
'id': 'BSyLMbN0YCd',
|
||||
'title': ' Dane Jones - Cute redhead with perfect tits with Mini Vamp, SEXYhub',
|
||||
'id': '1262717',
|
||||
'display_id': 'BSyLMbN0YCd',
|
||||
'title': ' Dane Jones - Cute redhead with perfect tits with Mini Vamp',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'description': 'Watch Dane Jones - Cute redhead with perfect tits with Mini Vamp (7 min), uploaded by SEXYhub.com',
|
||||
'description': 'md5:0a61df3620de26c0af8963b1a730cd69',
|
||||
'timestamp': 1642579329,
|
||||
'upload_date': '20220119',
|
||||
'duration': 416,
|
||||
'view_count': int,
|
||||
'age_limit': 18,
|
||||
'uploader': 'SEXYhub.com',
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
},
|
||||
}]
|
||||
_DOMAIN = 'www.peekvids.com'
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
short_video_id = self._html_search_regex(r'<video [^>]*data-id="(.+?)"', webpage, 'short video ID')
|
||||
srcs = self._download_json(
|
||||
f'https://{self._DOMAIN}/v-alt/{short_video_id}', video_id,
|
||||
note='Downloading list of source files')
|
||||
formats = [{
|
||||
'url': url,
|
||||
'ext': 'mp4',
|
||||
'format_id': name[8:],
|
||||
} for name, url in srcs.items() if len(name) > 8 and name.startswith('data-src')]
|
||||
if not formats:
|
||||
formats = [{'url': url} for url in srcs.values()]
|
||||
|
||||
info = self._search_json_ld(webpage, video_id, expected_type='VideoObject')
|
||||
info.update({
|
||||
'id': video_id,
|
||||
'age_limit': 18,
|
||||
'formats': formats,
|
||||
})
|
||||
return info
|
||||
|
||||
|
||||
class PlayVidsIE(PeekVidsIE): # XXX: Do not subclass from concrete IE
|
||||
_VALID_URL = r'https?://(?:www\.)?playvids\.com/(?:embed/|[^/]{2}/)?(?P<id>[^/?#]*)'
|
||||
class PlayVidsIE(PeekVidsBaseIE):
|
||||
_VALID_URL = r'https?://(?:www\.)?(?P<domain>playvids\.com)/(?:embed/|\w\w?/)?(?P<id>[^/?#]*)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.playvids.com/U3pBrYhsjXM/pc/dane-jones-cute-redhead-with-perfect-tits-with-mini-vamp',
|
||||
'md5': 'cd7dfd8a2e815a45402369c76e3c1825',
|
||||
'md5': '2f12e50213dd65f142175da633c4564c',
|
||||
'info_dict': {
|
||||
'id': 'U3pBrYhsjXM',
|
||||
'title': ' Dane Jones - Cute redhead with perfect tits with Mini Vamp, SEXYhub',
|
||||
'id': '1978030',
|
||||
'display_id': 'U3pBrYhsjXM',
|
||||
'title': ' Dane Jones - Cute redhead with perfect tits with Mini Vamp',
|
||||
'ext': 'mp4',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'description': 'Watch Dane Jones - Cute redhead with perfect tits with Mini Vamp video in HD, uploaded by SEXYhub.com',
|
||||
'description': 'md5:0a61df3620de26c0af8963b1a730cd69',
|
||||
'timestamp': 1640435839,
|
||||
'upload_date': '20211225',
|
||||
'duration': 416,
|
||||
'view_count': int,
|
||||
'age_limit': 18,
|
||||
'uploader': 'SEXYhub.com',
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.playvids.com/es/U3pBrYhsjXM/pc/dane-jones-cute-redhead-with-perfect-tits-with-mini-vamp',
|
||||
|
@ -73,5 +130,62 @@ class PlayVidsIE(PeekVidsIE): # XXX: Do not subclass from concrete IE
|
|||
}, {
|
||||
'url': 'https://www.playvids.com/embed/U3pBrYhsjXM',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://www.playvids.com/bKmGLe3IwjZ/sv/brazzers-800-phone-sex-madison-ivy-always-on-the-line',
|
||||
'md5': 'e783986e596cafbf46411a174ab42ba6',
|
||||
'info_dict': {
|
||||
'id': '762385',
|
||||
'display_id': 'bKmGLe3IwjZ',
|
||||
'ext': 'mp4',
|
||||
'title': 'Brazzers - 1 800 Phone Sex: Madison Ivy Always On The Line 6',
|
||||
'description': 'md5:bdcd2db2b8ad85831a491d7c8605dcef',
|
||||
'timestamp': 1516958544,
|
||||
'upload_date': '20180126',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 480,
|
||||
'uploader': 'Brazzers',
|
||||
'age_limit': 18,
|
||||
'view_count': int,
|
||||
'age_limit': 18,
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.playvids.com/v/47iUho33toY',
|
||||
'md5': 'b056b5049d34b648c1e86497cf4febce',
|
||||
'info_dict': {
|
||||
'id': '700621',
|
||||
'display_id': '47iUho33toY',
|
||||
'ext': 'mp4',
|
||||
'title': 'KATEE OWEN STRIPTIASE IN SEXY RED LINGERIE',
|
||||
'description': None,
|
||||
'timestamp': 1507052209,
|
||||
'upload_date': '20171003',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 332,
|
||||
'uploader': 'Cacerenele',
|
||||
'age_limit': 18,
|
||||
'view_count': int,
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.playvids.com/z3_7iwWCmqt/sexy-teen-filipina-striptease-beautiful-pinay-bargirl-strips-and-dances',
|
||||
'md5': 'efa09be9f031314b7b7e3bc6510cd0df',
|
||||
'info_dict': {
|
||||
'id': '1523518',
|
||||
'display_id': 'z3_7iwWCmqt',
|
||||
'ext': 'mp4',
|
||||
'title': 'SEXY TEEN FILIPINA STRIPTEASE - Beautiful Pinay Bargirl Strips and Dances',
|
||||
'description': None,
|
||||
'timestamp': 1607470323,
|
||||
'upload_date': '20201208',
|
||||
'thumbnail': r're:^https?://.*\.jpg$',
|
||||
'duration': 593,
|
||||
'uploader': 'yorours',
|
||||
'age_limit': 18,
|
||||
'view_count': int,
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
},
|
||||
}]
|
||||
_DOMAIN = 'www.playvids.com'
|
||||
|
|
|
@ -1,19 +1,24 @@
|
|||
import json
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..compat import compat_str
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
try_get,
|
||||
str_or_none,
|
||||
strip_or_none,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
url_or_none,
|
||||
)
|
||||
|
||||
|
||||
class PinterestBaseIE(InfoExtractor):
|
||||
_VALID_URL_BASE = r'https?://(?:[^/]+\.)?pinterest\.(?:com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx|dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu|co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)'
|
||||
_VALID_URL_BASE = r'''(?x)
|
||||
https?://(?:[^/]+\.)?pinterest\.(?:
|
||||
com|fr|de|ch|jp|cl|ca|it|co\.uk|nz|ru|com\.au|at|pt|co\.kr|es|com\.mx|
|
||||
dk|ph|th|com\.uy|co|nl|info|kr|ie|vn|com\.vn|ec|mx|in|pe|co\.at|hu|
|
||||
co\.in|co\.nz|id|com\.ec|com\.py|tw|be|uk|com\.bo|com\.pe)'''
|
||||
|
||||
def _call_api(self, resource, video_id, options):
|
||||
return self._download_json(
|
||||
|
@ -24,14 +29,53 @@ class PinterestBaseIE(InfoExtractor):
|
|||
|
||||
def _extract_video(self, data, extract_formats=True):
|
||||
video_id = data['id']
|
||||
thumbnails = []
|
||||
images = data.get('images')
|
||||
if isinstance(images, dict):
|
||||
for thumbnail_id, thumbnail in images.items():
|
||||
if not isinstance(thumbnail, dict):
|
||||
continue
|
||||
thumbnail_url = url_or_none(thumbnail.get('url'))
|
||||
if not thumbnail_url:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': thumbnail_url,
|
||||
'width': int_or_none(thumbnail.get('width')),
|
||||
'height': int_or_none(thumbnail.get('height')),
|
||||
})
|
||||
|
||||
title = (data.get('title') or data.get('grid_title') or video_id).strip()
|
||||
info = {
|
||||
'title': strip_or_none(traverse_obj(data, 'title', 'grid_title', default='')),
|
||||
'description': traverse_obj(data, 'seo_description', 'description'),
|
||||
'timestamp': unified_timestamp(data.get('created_at')),
|
||||
'thumbnails': thumbnails,
|
||||
'uploader': traverse_obj(data, ('closeup_attribution', 'full_name')),
|
||||
'uploader_id': str_or_none(traverse_obj(data, ('closeup_attribution', 'id'))),
|
||||
'repost_count': int_or_none(data.get('repin_count')),
|
||||
'comment_count': int_or_none(data.get('comment_count')),
|
||||
'categories': traverse_obj(data, ('pin_join', 'visual_annotation'), expected_type=list),
|
||||
'tags': traverse_obj(data, 'hashtags', expected_type=list),
|
||||
}
|
||||
|
||||
urls = []
|
||||
formats = []
|
||||
duration = None
|
||||
if extract_formats:
|
||||
for format_id, format_dict in data['videos']['video_list'].items():
|
||||
domain = data.get('domain', '')
|
||||
if domain.lower() != 'uploaded by user' and traverse_obj(data, ('embed', 'src')):
|
||||
if not info['title']:
|
||||
info['title'] = None
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
'url': data['embed']['src'],
|
||||
**info,
|
||||
}
|
||||
|
||||
elif extract_formats:
|
||||
video_list = traverse_obj(
|
||||
data, ('videos', 'video_list'),
|
||||
('story_pin_data', 'pages', ..., 'blocks', ..., 'video', 'video_list'),
|
||||
expected_type=dict, get_all=False, default={})
|
||||
for format_id, format_dict in video_list.items():
|
||||
if not isinstance(format_dict, dict):
|
||||
continue
|
||||
format_url = url_or_none(format_dict.get('url'))
|
||||
|
@ -53,72 +97,79 @@ class PinterestBaseIE(InfoExtractor):
|
|||
'duration': duration,
|
||||
})
|
||||
|
||||
description = data.get('description') or data.get('description_html') or data.get('seo_description')
|
||||
timestamp = unified_timestamp(data.get('created_at'))
|
||||
|
||||
def _u(field):
|
||||
return try_get(data, lambda x: x['closeup_attribution'][field], compat_str)
|
||||
|
||||
uploader = _u('full_name')
|
||||
uploader_id = _u('id')
|
||||
|
||||
repost_count = int_or_none(data.get('repin_count'))
|
||||
comment_count = int_or_none(data.get('comment_count'))
|
||||
categories = try_get(data, lambda x: x['pin_join']['visual_annotation'], list)
|
||||
tags = data.get('hashtags')
|
||||
|
||||
thumbnails = []
|
||||
images = data.get('images')
|
||||
if isinstance(images, dict):
|
||||
for thumbnail_id, thumbnail in images.items():
|
||||
if not isinstance(thumbnail, dict):
|
||||
continue
|
||||
thumbnail_url = url_or_none(thumbnail.get('url'))
|
||||
if not thumbnail_url:
|
||||
continue
|
||||
thumbnails.append({
|
||||
'url': thumbnail_url,
|
||||
'width': int_or_none(thumbnail.get('width')),
|
||||
'height': int_or_none(thumbnail.get('height')),
|
||||
})
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': title,
|
||||
'description': description,
|
||||
'duration': duration,
|
||||
'timestamp': timestamp,
|
||||
'thumbnails': thumbnails,
|
||||
'uploader': uploader,
|
||||
'uploader_id': uploader_id,
|
||||
'repost_count': repost_count,
|
||||
'comment_count': comment_count,
|
||||
'categories': categories,
|
||||
'tags': tags,
|
||||
'formats': formats,
|
||||
'duration': duration,
|
||||
'webpage_url': f'https://www.pinterest.com/pin/{video_id}/',
|
||||
'extractor_key': PinterestIE.ie_key(),
|
||||
'extractor': PinterestIE.IE_NAME,
|
||||
**info,
|
||||
}
|
||||
|
||||
|
||||
class PinterestIE(PinterestBaseIE):
|
||||
_VALID_URL = r'%s/pin/(?P<id>\d+)' % PinterestBaseIE._VALID_URL_BASE
|
||||
_TESTS = [{
|
||||
# formats found in data['videos']
|
||||
'url': 'https://www.pinterest.com/pin/664281013778109217/',
|
||||
'md5': '6550c2af85d6d9f3fe3b88954d1577fc',
|
||||
'info_dict': {
|
||||
'id': '664281013778109217',
|
||||
'ext': 'mp4',
|
||||
'title': 'Origami',
|
||||
'description': 'md5:b9d90ddf7848e897882de9e73344f7dd',
|
||||
'description': 'md5:e29801cab7d741ea8c741bc50c8d00ab',
|
||||
'duration': 57.7,
|
||||
'timestamp': 1593073622,
|
||||
'upload_date': '20200625',
|
||||
'uploader': 'Love origami -I am Dafei',
|
||||
'uploader_id': '586523688879454212',
|
||||
'repost_count': 50,
|
||||
'comment_count': 0,
|
||||
'repost_count': int,
|
||||
'comment_count': int,
|
||||
'categories': list,
|
||||
'tags': list,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
|
||||
},
|
||||
}, {
|
||||
# formats found in data['story_pin_data']
|
||||
'url': 'https://www.pinterest.com/pin/1084663891475263837/',
|
||||
'md5': '069ac19919ab9e1e13fa60de46290b03',
|
||||
'info_dict': {
|
||||
'id': '1084663891475263837',
|
||||
'ext': 'mp4',
|
||||
'title': 'Gadget, Cool products, Amazon product, technology, Kitchen gadgets',
|
||||
'description': 'md5:d0a4b6ae996ff0c6eed83bc869598d13',
|
||||
'uploader': 'CoolCrazyGadgets',
|
||||
'uploader_id': '1084664028912989237',
|
||||
'upload_date': '20211003',
|
||||
'timestamp': 1633246654.0,
|
||||
'duration': 14.9,
|
||||
'comment_count': int,
|
||||
'repost_count': int,
|
||||
'categories': 'count:9',
|
||||
'tags': list,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
|
||||
},
|
||||
}, {
|
||||
# vimeo.com embed
|
||||
'url': 'https://www.pinterest.ca/pin/441282463481903715/',
|
||||
'info_dict': {
|
||||
'id': '111691128',
|
||||
'ext': 'mp4',
|
||||
'title': 'Tonite Let\'s All Make Love In London (1967)',
|
||||
'description': 'md5:8190f37b3926807809ec57ec21aa77b2',
|
||||
'uploader': 'Vimeo',
|
||||
'uploader_id': '473792960706651251',
|
||||
'upload_date': '20180120',
|
||||
'timestamp': 1516409040,
|
||||
'duration': 3404,
|
||||
'comment_count': int,
|
||||
'repost_count': int,
|
||||
'categories': 'count:9',
|
||||
'tags': [],
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
|
||||
'uploader_url': 'https://vimeo.com/willardandrade',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://co.pinterest.com/pin/824721750502199491/',
|
||||
|
|
|
@ -84,6 +84,17 @@ class PlutoTVIE(InfoExtractor):
|
|||
}, {
|
||||
'url': 'https://pluto.tv/it/on-demand/series/csi-vegas/episode/legacy-2021-1-1',
|
||||
'only_matching': True,
|
||||
},
|
||||
{
|
||||
'url': 'https://pluto.tv/en/on-demand/movies/attack-of-the-killer-tomatoes-1977-1-1-ptv1',
|
||||
'md5': '7db56369c0da626a32d505ec6eb3f89f',
|
||||
'info_dict': {
|
||||
'id': '5b190c7bb0875c36c90c29c4',
|
||||
'ext': 'mp4',
|
||||
'title': 'Attack of the Killer Tomatoes',
|
||||
'description': 'A group of scientists band together to save the world from mutated tomatoes that KILL! (1978)',
|
||||
'duration': 5700,
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
|
@ -103,7 +114,7 @@ class PlutoTVIE(InfoExtractor):
|
|||
compat_urlparse.urljoin(first_segment_url.group(1), '0-end/master.m3u8'))
|
||||
continue
|
||||
first_segment_url = re.search(
|
||||
r'^(https?://.*/).+\-0+\.ts$', res,
|
||||
r'^(https?://.*/).+\-0+[0-1]0\.ts$', res,
|
||||
re.MULTILINE)
|
||||
if first_segment_url:
|
||||
m3u8_urls.add(
|
||||
|
|
|
@ -10,6 +10,7 @@ from ..compat import (
|
|||
compat_urlparse
|
||||
)
|
||||
from ..utils import (
|
||||
determine_ext,
|
||||
extract_attributes,
|
||||
ExtractorError,
|
||||
InAdvancePagedList,
|
||||
|
@ -17,6 +18,7 @@ from ..utils import (
|
|||
js_to_json,
|
||||
parse_iso8601,
|
||||
strip_or_none,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
unescapeHTML,
|
||||
url_or_none,
|
||||
|
@ -48,28 +50,11 @@ class PolskieRadioBaseExtractor(InfoExtractor):
|
|||
yield entry
|
||||
|
||||
|
||||
class PolskieRadioIE(PolskieRadioBaseExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?polskieradio(?:24)?\.pl/\d+/\d+/Artykul/(?P<id>[0-9]+)'
|
||||
_TESTS = [{ # Old-style single broadcast.
|
||||
'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943,Prof-Andrzej-Nowak-o-historii-nie-da-sie-myslec-beznamietnie',
|
||||
'info_dict': {
|
||||
'id': '1587943',
|
||||
'title': 'Prof. Andrzej Nowak: o historii nie da się myśleć beznamiętnie',
|
||||
'description': 'md5:12f954edbf3120c5e7075e17bf9fc5c5',
|
||||
},
|
||||
'playlist': [{
|
||||
'md5': '2984ee6ce9046d91fc233bc1a864a09a',
|
||||
'info_dict': {
|
||||
'id': '1540576',
|
||||
'ext': 'mp3',
|
||||
'title': 'md5:d4623290d4ac983bf924061c75c23a0d',
|
||||
'timestamp': 1456594200,
|
||||
'upload_date': '20160227',
|
||||
'duration': 2364,
|
||||
'thumbnail': r're:^https?://static\.prsa\.pl/images/.*\.jpg$'
|
||||
},
|
||||
}],
|
||||
}, { # New-style single broadcast.
|
||||
class PolskieRadioLegacyIE(PolskieRadioBaseExtractor):
|
||||
# legacy sites
|
||||
IE_NAME = 'polskieradio:legacy'
|
||||
_VALID_URL = r'https?://(?:www\.)?polskieradio(?:24)?\.pl/\d+/\d+/[Aa]rtykul/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.polskieradio.pl/8/2382/Artykul/2534482,Zagarysci-Poezja-jak-spoiwo',
|
||||
'info_dict': {
|
||||
'id': '2534482',
|
||||
|
@ -96,16 +81,6 @@ class PolskieRadioIE(PolskieRadioBaseExtractor):
|
|||
'ext': 'mp3',
|
||||
'title': 'Pogłos 29 października godz. 23:01',
|
||||
},
|
||||
}, {
|
||||
'url': 'http://polskieradio.pl/9/305/Artykul/1632955,Bardzo-popularne-slowo-remis',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.polskieradio.pl/7/5102/Artykul/1587943',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# with mp4 video
|
||||
'url': 'http://www.polskieradio.pl/9/299/Artykul/1634903,Brexit-Leszek-Miller-swiat-sie-nie-zawali-Europa-bedzie-trwac-dalej',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://polskieradio24.pl/130/4503/Artykul/2621876,Narusza-nasza-suwerennosc-Publicysci-o-uzaleznieniu-funduszy-UE-od-praworzadnosci',
|
||||
'only_matching': True,
|
||||
|
@ -114,7 +89,9 @@ class PolskieRadioIE(PolskieRadioBaseExtractor):
|
|||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
webpage, urlh = self._download_webpage_handle(url, playlist_id)
|
||||
if PolskieRadioIE.suitable(urlh.url):
|
||||
return self.url_result(urlh.url, PolskieRadioIE, playlist_id)
|
||||
|
||||
content = self._search_regex(
|
||||
r'(?s)<div[^>]+class="\s*this-article\s*"[^>]*>(.+?)<div[^>]+class="tags"[^>]*>',
|
||||
|
@ -153,23 +130,160 @@ class PolskieRadioIE(PolskieRadioBaseExtractor):
|
|||
return self.playlist_result(entries, playlist_id, title, description)
|
||||
|
||||
|
||||
class PolskieRadioCategoryIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+(?:,[^/]+)?/(?P<id>\d+)'
|
||||
class PolskieRadioIE(InfoExtractor):
|
||||
# new next.js sites, excluding radiokierowcow.pl
|
||||
_VALID_URL = r'https?://(?:[^/]+\.)?polskieradio(?:24)?\.pl/artykul/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.polskieradio.pl/7/5102,HISTORIA-ZYWA',
|
||||
'url': 'https://jedynka.polskieradio.pl/artykul/1587943',
|
||||
'info_dict': {
|
||||
'id': '1587943',
|
||||
'title': 'Prof. Andrzej Nowak: o historii nie da się myśleć beznamiętnie',
|
||||
'description': 'md5:12f954edbf3120c5e7075e17bf9fc5c5',
|
||||
},
|
||||
'playlist': [{
|
||||
'md5': '2984ee6ce9046d91fc233bc1a864a09a',
|
||||
'info_dict': {
|
||||
'id': '7a85d429-5356-4def-a347-925e4ae7406b',
|
||||
'ext': 'mp3',
|
||||
'title': 'md5:d4623290d4ac983bf924061c75c23a0d',
|
||||
},
|
||||
}],
|
||||
}, {
|
||||
'url': 'https://trojka.polskieradio.pl/artykul/1632955',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# with mp4 video
|
||||
'url': 'https://trojka.polskieradio.pl/artykul/1634903',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://jedynka.polskieradio.pl/artykul/3042436,Polityka-wschodnia-ojca-i-syna-Wladyslawa-Lokietka-i-Kazimierza-Wielkiego',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
webpage = self._download_webpage(url, playlist_id)
|
||||
|
||||
article_data = traverse_obj(
|
||||
self._search_nextjs_data(webpage, playlist_id), ('props', 'pageProps', 'data', 'articleData'))
|
||||
|
||||
title = strip_or_none(article_data['title'])
|
||||
|
||||
description = strip_or_none(article_data.get('lead'))
|
||||
|
||||
entries = [{
|
||||
'url': entry['file'],
|
||||
'ext': determine_ext(entry.get('fileName')),
|
||||
'id': self._search_regex(
|
||||
r'([a-f\d]{8}-(?:[a-f\d]{4}-){3}[a-f\d]{12})', entry['file'], 'entry id'),
|
||||
'title': strip_or_none(entry.get('description')) or title,
|
||||
} for entry in article_data.get('attachments') or () if entry['fileType'] in ('Audio', )]
|
||||
|
||||
return self.playlist_result(entries, playlist_id, title, description)
|
||||
|
||||
|
||||
class PolskieRadioAuditionIE(InfoExtractor):
|
||||
# new next.js sites
|
||||
IE_NAME = 'polskieradio:audition'
|
||||
_VALID_URL = r'https?://(?:[^/]+\.)?polskieradio\.pl/audycj[ae]/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
# articles, PR1
|
||||
'url': 'https://jedynka.polskieradio.pl/audycje/5102',
|
||||
'info_dict': {
|
||||
'id': '5102',
|
||||
'title': 'HISTORIA ŻYWA',
|
||||
'title': 'Historia żywa',
|
||||
'thumbnail': r're:https://static\.prsa\.pl/images/.+',
|
||||
},
|
||||
'playlist_mincount': 38,
|
||||
}, {
|
||||
'url': 'http://www.polskieradio.pl/7/4807',
|
||||
# episodes, PR1
|
||||
'url': 'https://jedynka.polskieradio.pl/audycje/5769',
|
||||
'info_dict': {
|
||||
'id': '4807',
|
||||
'title': 'Vademecum 1050. rocznicy Chrztu Polski'
|
||||
'id': '5769',
|
||||
'title': 'AgroFakty',
|
||||
'thumbnail': r're:https://static\.prsa\.pl/images/.+',
|
||||
},
|
||||
'playlist_mincount': 5
|
||||
'playlist_mincount': 269,
|
||||
}, {
|
||||
# both episodes and articles, PR3
|
||||
'url': 'https://trojka.polskieradio.pl/audycja/8906',
|
||||
'info_dict': {
|
||||
'id': '8906',
|
||||
'title': 'Trójka budzi',
|
||||
'thumbnail': r're:https://static\.prsa\.pl/images/.+',
|
||||
},
|
||||
'playlist_mincount': 722,
|
||||
}]
|
||||
|
||||
def _call_lp3(self, path, query, video_id, note):
|
||||
return self._download_json(
|
||||
f'https://lp3test.polskieradio.pl/{path}', video_id, note,
|
||||
query=query, headers={'x-api-key': '9bf6c5a2-a7d0-4980-9ed7-a3f7291f2a81'})
|
||||
|
||||
def _entries(self, playlist_id, has_episodes, has_articles):
|
||||
for i in itertools.count(1) if has_episodes else []:
|
||||
page = self._call_lp3(
|
||||
'AudioArticle/GetListByCategoryId', {
|
||||
'categoryId': playlist_id,
|
||||
'PageSize': 10,
|
||||
'skip': i,
|
||||
'format': 400,
|
||||
}, playlist_id, f'Downloading episode list page {i}')
|
||||
if not traverse_obj(page, 'data'):
|
||||
break
|
||||
for episode in page['data']:
|
||||
yield {
|
||||
'id': str(episode['id']),
|
||||
'url': episode['file'],
|
||||
'title': episode.get('title'),
|
||||
'duration': int_or_none(episode.get('duration')),
|
||||
'timestamp': parse_iso8601(episode.get('datePublic')),
|
||||
}
|
||||
|
||||
for i in itertools.count(1) if has_articles else []:
|
||||
page = self._call_lp3(
|
||||
'Article/GetListByCategoryId', {
|
||||
'categoryId': playlist_id,
|
||||
'PageSize': 9,
|
||||
'skip': i,
|
||||
'format': 400,
|
||||
}, playlist_id, f'Downloading article list page {i}')
|
||||
if not traverse_obj(page, 'data'):
|
||||
break
|
||||
for article in page['data']:
|
||||
yield {
|
||||
'_type': 'url_transparent',
|
||||
'ie_key': PolskieRadioIE.ie_key(),
|
||||
'id': str(article['id']),
|
||||
'url': article['url'],
|
||||
'title': article.get('shortTitle'),
|
||||
'description': traverse_obj(article, ('description', 'lead')),
|
||||
'timestamp': parse_iso8601(article.get('datePublic')),
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
playlist_id = self._match_id(url)
|
||||
|
||||
page_props = traverse_obj(
|
||||
self._search_nextjs_data(self._download_webpage(url, playlist_id), playlist_id),
|
||||
('props', 'pageProps', ('data', None)), get_all=False)
|
||||
|
||||
has_episodes = bool(traverse_obj(page_props, 'episodes', 'audios'))
|
||||
has_articles = bool(traverse_obj(page_props, 'articles'))
|
||||
|
||||
return self.playlist_result(
|
||||
self._entries(playlist_id, has_episodes, has_articles), playlist_id,
|
||||
title=traverse_obj(page_props, ('details', 'name')),
|
||||
description=traverse_obj(page_props, ('details', 'description', 'lead')),
|
||||
thumbnail=traverse_obj(page_props, ('details', 'photo')))
|
||||
|
||||
|
||||
class PolskieRadioCategoryIE(InfoExtractor):
|
||||
# legacy sites
|
||||
IE_NAME = 'polskieradio:category'
|
||||
_VALID_URL = r'https?://(?:www\.)?polskieradio\.pl/\d+(?:,[^/]+)?/(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'http://www.polskieradio.pl/7/129,Sygnaly-dnia?ref=source',
|
||||
'only_matching': True
|
||||
}, {
|
||||
|
@ -186,9 +300,6 @@ class PolskieRadioCategoryIE(InfoExtractor):
|
|||
'title': 'Muzyka',
|
||||
},
|
||||
'playlist_mincount': 61
|
||||
}, {
|
||||
'url': 'http://www.polskieradio.pl/7,Jedynka/5102,HISTORIA-ZYWA',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'http://www.polskieradio.pl/8,Dwojka/196,Publicystyka',
|
||||
'only_matching': True,
|
||||
|
@ -196,7 +307,7 @@ class PolskieRadioCategoryIE(InfoExtractor):
|
|||
|
||||
@classmethod
|
||||
def suitable(cls, url):
|
||||
return False if PolskieRadioIE.suitable(url) else super(PolskieRadioCategoryIE, cls).suitable(url)
|
||||
return False if PolskieRadioLegacyIE.suitable(url) else super().suitable(url)
|
||||
|
||||
def _entries(self, url, page, category_id):
|
||||
content = page
|
||||
|
@ -209,7 +320,7 @@ class PolskieRadioCategoryIE(InfoExtractor):
|
|||
if not href:
|
||||
continue
|
||||
yield self.url_result(
|
||||
compat_urlparse.urljoin(url, href), PolskieRadioIE.ie_key(),
|
||||
compat_urlparse.urljoin(url, href), PolskieRadioLegacyIE,
|
||||
entry_id, entry.get('title'))
|
||||
mobj = re.search(
|
||||
r'<div[^>]+class=["\']next["\'][^>]*>\s*<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1',
|
||||
|
@ -222,7 +333,9 @@ class PolskieRadioCategoryIE(InfoExtractor):
|
|||
|
||||
def _real_extract(self, url):
|
||||
category_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, category_id)
|
||||
webpage, urlh = self._download_webpage_handle(url, category_id)
|
||||
if PolskieRadioAuditionIE.suitable(urlh.url):
|
||||
return self.url_result(urlh.url, PolskieRadioAuditionIE, category_id)
|
||||
title = self._html_search_regex(
|
||||
r'<title>([^<]+) - [^<]+ - [^<]+</title>',
|
||||
webpage, 'title', fatal=False)
|
||||
|
@ -358,7 +471,7 @@ class PolskieRadioPodcastListIE(PolskieRadioPodcastBaseExtractor):
|
|||
'entries': InAdvancePagedList(
|
||||
get_page, math.ceil(data['itemCount'] / self._PAGE_SIZE), self._PAGE_SIZE),
|
||||
'id': str(data['id']),
|
||||
'title': data['title'],
|
||||
'title': data.get('title'),
|
||||
'description': data.get('description'),
|
||||
'uploader': data.get('announcer'),
|
||||
}
|
||||
|
@ -374,6 +487,10 @@ class PolskieRadioPodcastIE(PolskieRadioPodcastBaseExtractor):
|
|||
'ext': 'mp3',
|
||||
'title': 'Theresa May rezygnuje. Co dalej z brexitem?',
|
||||
'description': 'md5:e41c409a29d022b70ef0faa61dbded60',
|
||||
'episode': 'Theresa May rezygnuje. Co dalej z brexitem?',
|
||||
'duration': 2893,
|
||||
'thumbnail': 'https://static.prsa.pl/images/58649376-c8a0-4ba2-a714-78b383285f5f.jpg',
|
||||
'series': 'Raport o stanie świata',
|
||||
},
|
||||
}]
|
||||
|
||||
|
|
|
@ -1,20 +1,20 @@
|
|||
import random
|
||||
from urllib.parse import urlparse
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
float_or_none,
|
||||
int_or_none,
|
||||
traverse_obj,
|
||||
try_get,
|
||||
unescapeHTML,
|
||||
url_or_none,
|
||||
traverse_obj
|
||||
)
|
||||
|
||||
|
||||
class RedditIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/r/(?P<slug>[^/]+/comments/(?P<id>[^/?#&]+))'
|
||||
_VALID_URL = r'https?://(?P<subdomain>[^/]+\.)?reddit(?:media)?\.com/(?P<slug>(?:r|user)/[^/]+/comments/(?P<id>[^/?#&]+))'
|
||||
_TESTS = [{
|
||||
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/',
|
||||
'info_dict': {
|
||||
|
@ -32,6 +32,7 @@ class RedditIE(InfoExtractor):
|
|||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'age_limit': 0,
|
||||
'channel_id': 'videos',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
|
@ -55,6 +56,58 @@ class RedditIE(InfoExtractor):
|
|||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'age_limit': 0,
|
||||
'channel_id': 'aww',
|
||||
},
|
||||
}, {
|
||||
# User post
|
||||
'url': 'https://www.reddit.com/user/creepyt0es/comments/nip71r/i_plan_to_make_more_stickers_and_prints_check/',
|
||||
'info_dict': {
|
||||
'id': 'zasobba6wp071',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'nip71r',
|
||||
'title': 'I plan to make more stickers and prints! Check them out on my Etsy! Or get them through my Patreon. Links below.',
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:5',
|
||||
'timestamp': 1621709093,
|
||||
'upload_date': '20210522',
|
||||
'uploader': 'creepyt0es',
|
||||
'duration': 6,
|
||||
'like_count': int,
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'age_limit': 0,
|
||||
'channel_id': 'u_creepyt0es',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': True,
|
||||
},
|
||||
}, {
|
||||
# videos embedded in reddit text post
|
||||
'url': 'https://www.reddit.com/r/KamenRider/comments/wzqkxp/finale_kamen_rider_revice_episode_50_family_to/',
|
||||
'playlist_count': 2,
|
||||
'info_dict': {
|
||||
'id': 'wzqkxp',
|
||||
'title': 'md5:72d3d19402aa11eff5bd32fc96369b37',
|
||||
},
|
||||
}, {
|
||||
# crossposted reddit-hosted media
|
||||
'url': 'https://www.reddit.com/r/dumbfuckers_club/comments/zjjw82/cringe/',
|
||||
'md5': '746180895c7b75a9d6b05341f507699a',
|
||||
'info_dict': {
|
||||
'id': 'a1oneun6pa5a1',
|
||||
'ext': 'mp4',
|
||||
'display_id': 'zjjw82',
|
||||
'title': 'Cringe',
|
||||
'uploader': 'Otaku-senpai69420',
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'upload_date': '20221212',
|
||||
'timestamp': 1670812309,
|
||||
'duration': 16,
|
||||
'like_count': int,
|
||||
'dislike_count': int,
|
||||
'comment_count': int,
|
||||
'age_limit': 0,
|
||||
'channel_id': 'dumbfuckers_club',
|
||||
},
|
||||
}, {
|
||||
'url': 'https://www.reddit.com/r/videos/comments/6rrwyj',
|
||||
|
@ -95,17 +148,13 @@ class RedditIE(InfoExtractor):
|
|||
|
||||
self._set_cookie('.reddit.com', 'reddit_session', self._gen_session_id())
|
||||
self._set_cookie('.reddit.com', '_options', '%7B%22pref_quarantine_optin%22%3A%20true%7D')
|
||||
data = self._download_json(f'https://{subdomain}reddit.com/r/{slug}/.json', video_id, fatal=False)
|
||||
data = self._download_json(f'https://{subdomain}reddit.com/{slug}/.json', video_id, fatal=False)
|
||||
if not data:
|
||||
# Fall back to old.reddit.com in case the requested subdomain fails
|
||||
data = self._download_json(f'https://old.reddit.com/r/{slug}/.json', video_id)
|
||||
data = self._download_json(f'https://old.reddit.com/{slug}/.json', video_id)
|
||||
data = data[0]['data']['children'][0]['data']
|
||||
video_url = data['url']
|
||||
|
||||
# Avoid recursing into the same reddit URL
|
||||
if 'reddit.com/' in video_url and '/%s/' % video_id in video_url:
|
||||
raise ExtractorError('No media found', expected=True)
|
||||
|
||||
over_18 = data.get('over_18')
|
||||
if over_18 is True:
|
||||
age_limit = 18
|
||||
|
@ -142,14 +191,42 @@ class RedditIE(InfoExtractor):
|
|||
'thumbnails': thumbnails,
|
||||
'timestamp': float_or_none(data.get('created_utc')),
|
||||
'uploader': data.get('author'),
|
||||
'channel_id': data.get('subreddit'),
|
||||
'like_count': int_or_none(data.get('ups')),
|
||||
'dislike_count': int_or_none(data.get('downs')),
|
||||
'comment_count': int_or_none(data.get('num_comments')),
|
||||
'age_limit': age_limit,
|
||||
}
|
||||
|
||||
parsed_url = urllib.parse.urlparse(video_url)
|
||||
|
||||
# Check for embeds in text posts, or else raise to avoid recursing into the same reddit URL
|
||||
if 'reddit.com' in parsed_url.netloc and f'/{video_id}/' in parsed_url.path:
|
||||
entries = []
|
||||
for media in traverse_obj(data, ('media_metadata', ...), expected_type=dict):
|
||||
if not media.get('id') or media.get('e') != 'RedditVideo':
|
||||
continue
|
||||
formats = []
|
||||
if media.get('hlsUrl'):
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
unescapeHTML(media['hlsUrl']), video_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||
if media.get('dashUrl'):
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
unescapeHTML(media['dashUrl']), video_id, mpd_id='dash', fatal=False))
|
||||
if formats:
|
||||
entries.append({
|
||||
'id': media['id'],
|
||||
'display_id': video_id,
|
||||
'formats': formats,
|
||||
**info,
|
||||
})
|
||||
if entries:
|
||||
return self.playlist_result(entries, video_id, info.get('title'))
|
||||
raise ExtractorError('No media found', expected=True)
|
||||
|
||||
# Check if media is hosted on reddit:
|
||||
reddit_video = traverse_obj(data, (('media', 'secure_media'), 'reddit_video'), get_all=False)
|
||||
reddit_video = traverse_obj(data, (
|
||||
(None, ('crosspost_parent_list', ...)), ('secure_media', 'media'), 'reddit_video'), get_all=False)
|
||||
if reddit_video:
|
||||
playlist_urls = [
|
||||
try_get(reddit_video, lambda x: unescapeHTML(x[y]))
|
||||
|
@ -189,7 +266,6 @@ class RedditIE(InfoExtractor):
|
|||
'duration': int_or_none(reddit_video.get('duration')),
|
||||
}
|
||||
|
||||
parsed_url = urlparse(video_url)
|
||||
if parsed_url.netloc == 'v.redd.it':
|
||||
self.raise_no_formats('This video is processing', expected=True, video_id=video_id)
|
||||
return {
|
||||
|
|
|
@ -1,8 +1,5 @@
|
|||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
int_or_none,
|
||||
remove_start,
|
||||
)
|
||||
from ..utils import extract_attributes, int_or_none, remove_start, traverse_obj
|
||||
|
||||
|
||||
class RozhlasIE(InfoExtractor):
|
||||
|
@ -45,3 +42,138 @@ class RozhlasIE(InfoExtractor):
|
|||
'duration': duration,
|
||||
'vcodec': 'none',
|
||||
}
|
||||
|
||||
|
||||
class RozhlasVltavaIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:\w+\.rozhlas|english\.radio)\.cz/[\w-]+-(?P<id>\d+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://wave.rozhlas.cz/papej-masicko-porcujeme-a-bilancujeme-filmy-a-serialy-ktere-letos-zabily-8891337',
|
||||
'md5': 'ba2fdbc1242fc16771c7695d271ec355',
|
||||
'info_dict': {
|
||||
'id': 8891337,
|
||||
'title': 'md5:21f99739d04ab49d8c189ec711eef4ec',
|
||||
},
|
||||
'playlist_count': 1,
|
||||
'playlist': [{
|
||||
'md5': 'ba2fdbc1242fc16771c7695d271ec355',
|
||||
'info_dict': {
|
||||
'id': '10520988',
|
||||
'ext': 'mp3',
|
||||
'title': 'Papej masíčko! Porcujeme a bilancujeme filmy a seriály, které to letos zabily',
|
||||
'description': 'md5:1c6d29fb9564e1f17fc1bb83ae7da0bc',
|
||||
'duration': 1574,
|
||||
'artist': 'Aleš Stuchlý',
|
||||
'channel_id': 'radio-wave',
|
||||
},
|
||||
}]
|
||||
}, {
|
||||
'url': 'https://wave.rozhlas.cz/poslechnete-si-neklid-podcastovy-thriller-o-vine-strachu-a-vztahu-ktery-zasel-8554744',
|
||||
'info_dict': {
|
||||
'id': 8554744,
|
||||
'title': 'Poslechněte si Neklid. Podcastový thriller o vině, strachu a vztahu, který zašel příliš daleko',
|
||||
},
|
||||
'playlist_count': 5,
|
||||
'playlist': [{
|
||||
'md5': '93d4109cf8f40523699ae9c1d4600bdd',
|
||||
'info_dict': {
|
||||
'id': '9890713',
|
||||
'ext': 'mp3',
|
||||
'title': 'Neklid #1',
|
||||
'description': '1. díl: Neklid: 1. díl',
|
||||
'duration': 1025,
|
||||
'artist': 'Josef Kokta',
|
||||
'channel_id': 'radio-wave',
|
||||
'chapter': 'Neklid #1',
|
||||
'chapter_number': 1,
|
||||
},
|
||||
}, {
|
||||
'md5': 'e9763235be4a6dcf94bc8a5bac1ca126',
|
||||
'info_dict': {
|
||||
'id': '9890716',
|
||||
'ext': 'mp3',
|
||||
'title': 'Neklid #2',
|
||||
'description': '2. díl: Neklid: 2. díl',
|
||||
'duration': 768,
|
||||
'artist': 'Josef Kokta',
|
||||
'channel_id': 'radio-wave',
|
||||
'chapter': 'Neklid #2',
|
||||
'chapter_number': 2,
|
||||
},
|
||||
}, {
|
||||
'md5': '00b642ea94b78cc949ac84da09f87895',
|
||||
'info_dict': {
|
||||
'id': '9890722',
|
||||
'ext': 'mp3',
|
||||
'title': 'Neklid #3',
|
||||
'description': '3. díl: Neklid: 3. díl',
|
||||
'duration': 607,
|
||||
'artist': 'Josef Kokta',
|
||||
'channel_id': 'radio-wave',
|
||||
'chapter': 'Neklid #3',
|
||||
'chapter_number': 3,
|
||||
},
|
||||
}, {
|
||||
'md5': 'faef97b1b49da7df874740f118c19dea',
|
||||
'info_dict': {
|
||||
'id': '9890728',
|
||||
'ext': 'mp3',
|
||||
'title': 'Neklid #4',
|
||||
'description': '4. díl: Neklid: 4. díl',
|
||||
'duration': 621,
|
||||
'artist': 'Josef Kokta',
|
||||
'channel_id': 'radio-wave',
|
||||
'chapter': 'Neklid #4',
|
||||
'chapter_number': 4,
|
||||
},
|
||||
}, {
|
||||
'md5': '6e729fa39b647325b868d419c76f3efa',
|
||||
'info_dict': {
|
||||
'id': '9890734',
|
||||
'ext': 'mp3',
|
||||
'title': 'Neklid #5',
|
||||
'description': '5. díl: Neklid: 5. díl',
|
||||
'duration': 908,
|
||||
'artist': 'Josef Kokta',
|
||||
'channel_id': 'radio-wave',
|
||||
'chapter': 'Neklid #5',
|
||||
'chapter_number': 5,
|
||||
},
|
||||
}]
|
||||
}]
|
||||
|
||||
def _extract_video(self, entry):
|
||||
chapter_number = int_or_none(traverse_obj(entry, ('meta', 'ga', 'contentSerialPart')))
|
||||
return {
|
||||
'id': entry['meta']['ga']['contentId'],
|
||||
'title': traverse_obj(entry, ('meta', 'ga', 'contentName')),
|
||||
'description': entry.get('title'),
|
||||
'duration': entry.get('duration'),
|
||||
'artist': traverse_obj(entry, ('meta', 'ga', 'contentAuthor')),
|
||||
'channel_id': traverse_obj(entry, ('meta', 'ga', 'contentCreator')),
|
||||
'chapter': traverse_obj(entry, ('meta', 'ga', 'contentNameShort')) if chapter_number else None,
|
||||
'chapter_number': chapter_number,
|
||||
'formats': [{
|
||||
'url': audio_link['url'],
|
||||
'ext': audio_link.get('variant'),
|
||||
'format_id': audio_link.get('variant'),
|
||||
'abr': audio_link.get('bitrate'),
|
||||
'acodec': audio_link.get('variant'),
|
||||
'vcodec': 'none',
|
||||
} for audio_link in entry['audioLinks']],
|
||||
}
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, video_id)
|
||||
|
||||
# FIXME: Use get_element_text_and_html_by_tag when it accepts less strict html
|
||||
data = self._parse_json(extract_attributes(self._search_regex(
|
||||
r'(<div class="mujRozhlasPlayer" data-player=\'[^\']+\'>)',
|
||||
webpage, 'player'))['data-player'], video_id)['data']
|
||||
|
||||
return {
|
||||
'_type': 'playlist',
|
||||
'id': data.get('embedId'),
|
||||
'title': traverse_obj(data, ('series', 'title')),
|
||||
'entries': map(self._extract_video, data['playlist']),
|
||||
}
|
||||
|
|
|
@ -4,11 +4,15 @@ import re
|
|||
from .common import InfoExtractor
|
||||
from ..compat import compat_HTTPError
|
||||
from ..utils import (
|
||||
ExtractorError,
|
||||
UnsupportedError,
|
||||
clean_html,
|
||||
get_element_by_class,
|
||||
int_or_none,
|
||||
parse_count,
|
||||
parse_iso8601,
|
||||
traverse_obj,
|
||||
unescapeHTML,
|
||||
ExtractorError,
|
||||
)
|
||||
|
||||
|
||||
|
@ -111,24 +115,6 @@ class RumbleEmbedIE(InfoExtractor):
|
|||
}]
|
||||
|
||||
_WEBPAGE_TESTS = [
|
||||
{
|
||||
'note': 'Rumble embed',
|
||||
'url': 'https://rumble.com/vdmum1-moose-the-dog-helps-girls-dig-a-snow-fort.html',
|
||||
'md5': '53af34098a7f92c4e51cf0bd1c33f009',
|
||||
'info_dict': {
|
||||
'id': 'vb0ofn',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1612662578,
|
||||
'uploader': 'LovingMontana',
|
||||
'channel': 'LovingMontana',
|
||||
'upload_date': '20210207',
|
||||
'title': 'Winter-loving dog helps girls dig a snow fort ',
|
||||
'channel_url': 'https://rumble.com/c/c-546523',
|
||||
'thumbnail': 'https://sp.rmbl.ws/s8/1/5/f/x/x/5fxxb.OvCc.1-small-Moose-The-Dog-Helps-Girls-D.jpg',
|
||||
'duration': 103,
|
||||
'live_status': 'not_live',
|
||||
}
|
||||
},
|
||||
{
|
||||
'note': 'Rumble JS embed',
|
||||
'url': 'https://therightscoop.com/what-does-9-plus-1-plus-1-equal-listen-to-this-audio-of-attempted-kavanaugh-assassins-call-and-youll-get-it',
|
||||
|
@ -200,7 +186,7 @@ class RumbleEmbedIE(InfoExtractor):
|
|||
'filesize': 'size',
|
||||
'width': 'w',
|
||||
'height': 'h',
|
||||
}, default={})
|
||||
}, expected_type=lambda x: int(x) or None)
|
||||
})
|
||||
|
||||
subtitles = {
|
||||
|
@ -235,6 +221,84 @@ class RumbleEmbedIE(InfoExtractor):
|
|||
}
|
||||
|
||||
|
||||
class RumbleIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://(?:www\.)?rumble\.com/(?P<id>v(?!ideos)[\w.-]+)[^/]*$'
|
||||
_EMBED_REGEX = [r'<a class=video-item--a href=(?P<url>/v[\w.-]+\.html)>']
|
||||
_TESTS = [{
|
||||
'add_ie': ['RumbleEmbed'],
|
||||
'url': 'https://rumble.com/vdmum1-moose-the-dog-helps-girls-dig-a-snow-fort.html',
|
||||
'md5': '53af34098a7f92c4e51cf0bd1c33f009',
|
||||
'info_dict': {
|
||||
'id': 'vb0ofn',
|
||||
'ext': 'mp4',
|
||||
'timestamp': 1612662578,
|
||||
'uploader': 'LovingMontana',
|
||||
'channel': 'LovingMontana',
|
||||
'upload_date': '20210207',
|
||||
'title': 'Winter-loving dog helps girls dig a snow fort ',
|
||||
'description': 'Moose the dog is more than happy to help with digging out this epic snow fort. Great job, Moose!',
|
||||
'channel_url': 'https://rumble.com/c/c-546523',
|
||||
'thumbnail': r're:https://.+\.jpg',
|
||||
'duration': 103,
|
||||
'like_count': int,
|
||||
'view_count': int,
|
||||
'live_status': 'not_live',
|
||||
}
|
||||
}, {
|
||||
'url': 'http://www.rumble.com/vDMUM1?key=value',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_WEBPAGE_TESTS = [{
|
||||
'url': 'https://rumble.com/videos?page=2',
|
||||
'playlist_count': 25,
|
||||
'info_dict': {
|
||||
'id': 'videos?page=2',
|
||||
'title': 'All videos',
|
||||
'description': 'Browse videos uploaded to Rumble.com',
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://rumble.com/live-videos',
|
||||
'playlist_mincount': 19,
|
||||
'info_dict': {
|
||||
'id': 'live-videos',
|
||||
'title': 'Live Videos',
|
||||
'description': 'Live videos on Rumble.com',
|
||||
'age_limit': 0,
|
||||
},
|
||||
}, {
|
||||
'url': 'https://rumble.com/search/video?q=rumble&sort=views',
|
||||
'playlist_count': 24,
|
||||
'info_dict': {
|
||||
'id': 'video?q=rumble&sort=views',
|
||||
'title': 'Search results for: rumble',
|
||||
'age_limit': 0,
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
page_id = self._match_id(url)
|
||||
webpage = self._download_webpage(url, page_id)
|
||||
url_info = next(RumbleEmbedIE.extract_from_webpage(self._downloader, url, webpage), None)
|
||||
if not url_info:
|
||||
raise UnsupportedError(url)
|
||||
|
||||
release_ts_str = self._search_regex(
|
||||
r'(?:Livestream begins|Streamed on):\s+<time datetime="([^"]+)',
|
||||
webpage, 'release date', fatal=False, default=None)
|
||||
view_count_str = self._search_regex(r'<span class="media-heading-info">([\d,]+) Views',
|
||||
webpage, 'view count', fatal=False, default=None)
|
||||
|
||||
return self.url_result(
|
||||
url_info['url'], ie_key=url_info['ie_key'], url_transparent=True,
|
||||
view_count=parse_count(view_count_str),
|
||||
release_timestamp=parse_iso8601(release_ts_str),
|
||||
like_count=parse_count(get_element_by_class('rumbles-count', webpage)),
|
||||
description=clean_html(get_element_by_class('media-description', webpage)),
|
||||
)
|
||||
|
||||
|
||||
class RumbleChannelIE(InfoExtractor):
|
||||
_VALID_URL = r'(?P<url>https?://(?:www\.)?rumble\.com/(?:c|user)/(?P<id>[^&?#$/]+))'
|
||||
|
||||
|
|
|
@ -91,12 +91,12 @@ class RutubeBaseIE(InfoExtractor):
|
|||
class RutubeIE(RutubeBaseIE):
|
||||
IE_NAME = 'rutube'
|
||||
IE_DESC = 'Rutube videos'
|
||||
_VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/(?P<id>[\da-z]{32})'
|
||||
_VALID_URL = r'https?://rutube\.ru/(?:video(?:/private)?|(?:play/)?embed)/(?P<id>[\da-z]{32})'
|
||||
_EMBED_REGEX = [r'<iframe[^>]+?src=(["\'])(?P<url>(?:https?:)?//rutube\.ru/(?:play/)?embed/[\da-z]{32}.*?)\1']
|
||||
|
||||
_TESTS = [{
|
||||
'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/',
|
||||
'md5': '1d24f180fac7a02f3900712e5a5764d6',
|
||||
'md5': 'e33ac625efca66aba86cbec9851f2692',
|
||||
'info_dict': {
|
||||
'id': '3eac3b4561676c17df9132a9a1e62e3e',
|
||||
'ext': 'mp4',
|
||||
|
@ -108,6 +108,10 @@ class RutubeIE(RutubeBaseIE):
|
|||
'timestamp': 1381943602,
|
||||
'upload_date': '20131016',
|
||||
'age_limit': 0,
|
||||
'view_count': int,
|
||||
'thumbnail': 'http://pic.rutubelist.ru/video/d2/a0/d2a0aec998494a396deafc7ba2c82add.jpg',
|
||||
'category': ['Новости и СМИ'],
|
||||
|
||||
},
|
||||
}, {
|
||||
'url': 'http://rutube.ru/play/embed/a10e53b86e8f349080f718582ce4c661',
|
||||
|
@ -121,6 +125,24 @@ class RutubeIE(RutubeBaseIE):
|
|||
}, {
|
||||
'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_type=source',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
'url': 'https://rutube.ru/video/private/884fb55f07a97ab673c7d654553e0f48/?p=x2QojCumHTS3rsKHWXN8Lg',
|
||||
'md5': 'd106225f15d625538fe22971158e896f',
|
||||
'info_dict': {
|
||||
'id': '884fb55f07a97ab673c7d654553e0f48',
|
||||
'ext': 'mp4',
|
||||
'title': 'Яцуноками, Nioh2',
|
||||
'description': 'Nioh2: финал сражения с боссом Яцуноками',
|
||||
'duration': 15,
|
||||
'uploader': 'mexus',
|
||||
'uploader_id': '24222106',
|
||||
'timestamp': 1670646232,
|
||||
'upload_date': '20221210',
|
||||
'age_limit': 0,
|
||||
'view_count': int,
|
||||
'thumbnail': 'http://pic.rutubelist.ru/video/f2/d4/f2d42b54be0a6e69c1c22539e3152156.jpg',
|
||||
'category': ['Видеоигры'],
|
||||
},
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
|
@ -129,8 +151,9 @@ class RutubeIE(RutubeBaseIE):
|
|||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
info = self._download_and_extract_info(video_id)
|
||||
info['formats'] = self._download_and_extract_formats(video_id)
|
||||
query = parse_qs(url)
|
||||
info = self._download_and_extract_info(video_id, query)
|
||||
info['formats'] = self._download_and_extract_formats(video_id, query)
|
||||
return info
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1,52 @@
|
|||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import traverse_obj, update_url_query
|
||||
|
||||
|
||||
class ScreencastifyIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://watch\.screencastify\.com/v/(?P<id>[^/?#]+)'
|
||||
_TESTS = [{
|
||||
'url': 'https://watch.screencastify.com/v/sYVkZip3quLKhHw4Ybk8',
|
||||
'info_dict': {
|
||||
'id': 'sYVkZip3quLKhHw4Ybk8',
|
||||
'ext': 'mp4',
|
||||
'title': 'Inserting and Aligning the Case Top and Bottom',
|
||||
'description': '',
|
||||
'uploader': 'Paul Gunn',
|
||||
'extra_param_to_segment_url': str,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
info = self._download_json(
|
||||
f'https://umbrella.svc.screencastify.com/api/umbrellaService/watch/{video_id}', video_id)
|
||||
|
||||
query_string = traverse_obj(info, ('manifest', 'auth', 'query'))
|
||||
query = urllib.parse.parse_qs(query_string)
|
||||
formats = []
|
||||
dash_manifest_url = traverse_obj(info, ('manifest', 'url'))
|
||||
if dash_manifest_url:
|
||||
formats.extend(
|
||||
self._extract_mpd_formats(
|
||||
dash_manifest_url, video_id, mpd_id='dash', query=query, fatal=False))
|
||||
hls_manifest_url = traverse_obj(info, ('manifest', 'hlsUrl'))
|
||||
if hls_manifest_url:
|
||||
formats.extend(
|
||||
self._extract_m3u8_formats(
|
||||
hls_manifest_url, video_id, ext='mp4', m3u8_id='hls', query=query, fatal=False))
|
||||
for f in formats:
|
||||
f['url'] = update_url_query(f['url'], query)
|
||||
|
||||
return {
|
||||
'id': video_id,
|
||||
'title': info.get('title'),
|
||||
'description': info.get('description'),
|
||||
'uploader': info.get('userName'),
|
||||
'formats': formats,
|
||||
'extra_param_to_segment_url': query_string,
|
||||
}
|
|
@ -0,0 +1,17 @@
|
|||
from .common import InfoExtractor
|
||||
|
||||
|
||||
class SibnetEmbedIE(InfoExtractor):
|
||||
# Ref: https://help.sibnet.ru/?sibnet_video_embed
|
||||
_VALID_URL = False
|
||||
_EMBED_REGEX = [r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//video\.sibnet\.ru/shell\.php\?.*?\bvideoid=\d+.*?)\1']
|
||||
_WEBPAGE_TESTS = [{
|
||||
'url': 'https://phpbb3.x-tk.ru/bbcode-video-sibnet-t24.html',
|
||||
'info_dict': {
|
||||
'id': 'shell', # FIXME?
|
||||
'ext': 'mp4',
|
||||
'age_limit': 0,
|
||||
'thumbnail': 'https://video.sibnet.ru/upload/cover/video_1887072_0.jpg',
|
||||
'title': 'КВН Москва не сразу строилась - Девушка впервые играет в Mortal Kombat',
|
||||
}
|
||||
}]
|
|
@ -1,92 +1,521 @@
|
|||
import re
|
||||
import urllib.parse
|
||||
|
||||
from .common import InfoExtractor
|
||||
from ..utils import (
|
||||
bool_or_none,
|
||||
ExtractorError,
|
||||
int_or_none,
|
||||
parse_qs,
|
||||
smuggle_url,
|
||||
try_get,
|
||||
traverse_obj,
|
||||
unified_timestamp,
|
||||
update_url_query,
|
||||
url_or_none,
|
||||
xpath_text,
|
||||
)
|
||||
|
||||
|
||||
class SlidesLiveIE(InfoExtractor):
|
||||
_VALID_URL = r'https?://slideslive\.com/(?P<id>[0-9]+)'
|
||||
_WORKING = False
|
||||
_VALID_URL = r'https?://slideslive\.com/(?:embed/(?:presentation/)?)?(?P<id>[0-9]+)'
|
||||
_TESTS = [{
|
||||
# video_service_name = YOUTUBE
|
||||
# service_name = yoda, only XML slides info
|
||||
'url': 'https://slideslive.com/38902413/gcc-ia16-backend',
|
||||
'md5': 'b29fcd6c6952d0c79c5079b0e7a07e6f',
|
||||
'info_dict': {
|
||||
'id': 'LMtgR8ba0b0',
|
||||
'id': '38902413',
|
||||
'ext': 'mp4',
|
||||
'title': 'GCC IA16 backend',
|
||||
'description': 'Watch full version of this video at https://slideslive.com/38902413.',
|
||||
'uploader': 'SlidesLive Videos - A',
|
||||
'uploader_id': 'UC62SdArr41t_-_fX40QCLRw',
|
||||
'timestamp': 1597615266,
|
||||
'upload_date': '20170925',
|
||||
}
|
||||
}, {
|
||||
# video_service_name = yoda
|
||||
'url': 'https://slideslive.com/38935785',
|
||||
'md5': '575cd7a6c0acc6e28422fe76dd4bcb1a',
|
||||
'info_dict': {
|
||||
'id': 'RMraDYN5ozA_',
|
||||
'ext': 'mp4',
|
||||
'title': 'Offline Reinforcement Learning: From Algorithms to Practical Challenges',
|
||||
'timestamp': 1648189972,
|
||||
'upload_date': '20220325',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'thumbnails': 'count:42',
|
||||
'chapters': 'count:41',
|
||||
'duration': 1638,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# video_service_name = youtube
|
||||
# service_name = yoda, /v7/ slides
|
||||
'url': 'https://slideslive.com/38935785',
|
||||
'info_dict': {
|
||||
'id': '38935785',
|
||||
'ext': 'mp4',
|
||||
'title': 'Offline Reinforcement Learning: From Algorithms to Practical Challenges',
|
||||
'upload_date': '20211115',
|
||||
'timestamp': 1636996003,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:640',
|
||||
'chapters': 'count:639',
|
||||
'duration': 9832,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# service_name = yoda, /v1/ slides
|
||||
'url': 'https://slideslive.com/38973182/how-should-a-machine-learning-researcher-think-about-ai-ethics',
|
||||
'info_dict': {
|
||||
'id': '38973182',
|
||||
'ext': 'mp4',
|
||||
'title': 'How Should a Machine Learning Researcher Think About AI Ethics?',
|
||||
'upload_date': '20220201',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'timestamp': 1643728135,
|
||||
'thumbnails': 'count:3',
|
||||
'chapters': 'count:2',
|
||||
'duration': 5889,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# service_name = youtube, only XML slides info
|
||||
'url': 'https://slideslive.com/38897546/special-metaprednaska-petra-ludwiga-hodnoty-pro-lepsi-spolecnost',
|
||||
'md5': '8a79b5e3d700837f40bd2afca3c8fa01',
|
||||
'info_dict': {
|
||||
'id': 'jmg02wCJD5M',
|
||||
'display_id': '38897546',
|
||||
'ext': 'mp4',
|
||||
'title': 'SPECIÁL: Meta-přednáška Petra Ludwiga - Hodnoty pro lepší společnost',
|
||||
'description': 'Watch full version of this video at https://slideslive.com/38897546.',
|
||||
'channel_url': 'https://www.youtube.com/channel/UCZWdAkNYFncuX0khyvhqnxw',
|
||||
'channel': 'SlidesLive Videos - G1',
|
||||
'channel_id': 'UCZWdAkNYFncuX0khyvhqnxw',
|
||||
'uploader_id': 'UCZWdAkNYFncuX0khyvhqnxw',
|
||||
'uploader': 'SlidesLive Videos - G1',
|
||||
'uploader_url': 'http://www.youtube.com/channel/UCZWdAkNYFncuX0khyvhqnxw',
|
||||
'live_status': 'not_live',
|
||||
'upload_date': '20160710',
|
||||
'timestamp': 1618786715,
|
||||
'duration': 6827,
|
||||
'like_count': int,
|
||||
'view_count': int,
|
||||
'comment_count': int,
|
||||
'channel_follower_count': int,
|
||||
'age_limit': 0,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|webp)',
|
||||
'thumbnails': 'count:169',
|
||||
'playable_in_embed': True,
|
||||
'availability': 'unlisted',
|
||||
'tags': [],
|
||||
'categories': ['People & Blogs'],
|
||||
'chapters': 'count:168',
|
||||
},
|
||||
}, {
|
||||
# embed-only presentation, only XML slides info
|
||||
'url': 'https://slideslive.com/embed/presentation/38925850',
|
||||
'info_dict': {
|
||||
'id': '38925850',
|
||||
'ext': 'mp4',
|
||||
'title': 'Towards a Deep Network Architecture for Structured Smoothness',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'thumbnails': 'count:8',
|
||||
'timestamp': 1629671508,
|
||||
'upload_date': '20210822',
|
||||
'chapters': 'count:7',
|
||||
'duration': 326,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# embed-only presentation, only JSON slides info, /v5/ slides (.png)
|
||||
'url': 'https://slideslive.com/38979920/',
|
||||
'info_dict': {
|
||||
'id': '38979920',
|
||||
'ext': 'mp4',
|
||||
'title': 'MoReL: Multi-omics Relational Learning',
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:7',
|
||||
'timestamp': 1654714970,
|
||||
'upload_date': '20220608',
|
||||
'chapters': 'count:6',
|
||||
'duration': 171,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# /v2/ slides (.jpg)
|
||||
'url': 'https://slideslive.com/38954074',
|
||||
'info_dict': {
|
||||
'id': '38954074',
|
||||
'ext': 'mp4',
|
||||
'title': 'Decentralized Attribution of Generative Models',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'thumbnails': 'count:16',
|
||||
'timestamp': 1622806321,
|
||||
'upload_date': '20210604',
|
||||
'chapters': 'count:15',
|
||||
'duration': 306,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# /v4/ slides (.png)
|
||||
'url': 'https://slideslive.com/38979570/',
|
||||
'info_dict': {
|
||||
'id': '38979570',
|
||||
'ext': 'mp4',
|
||||
'title': 'Efficient Active Search for Combinatorial Optimization Problems',
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:9',
|
||||
'timestamp': 1654714896,
|
||||
'upload_date': '20220608',
|
||||
'chapters': 'count:8',
|
||||
'duration': 295,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# /v10/ slides
|
||||
'url': 'https://slideslive.com/embed/presentation/38979880?embed_parent_url=https%3A%2F%2Fedit.videoken.com%2F',
|
||||
'info_dict': {
|
||||
'id': '38979880',
|
||||
'ext': 'mp4',
|
||||
'title': 'The Representation Power of Neural Networks',
|
||||
'timestamp': 1654714962,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:22',
|
||||
'upload_date': '20220608',
|
||||
'chapters': 'count:21',
|
||||
'duration': 294,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# /v7/ slides, 2 video slides
|
||||
'url': 'https://slideslive.com/embed/presentation/38979682?embed_container_origin=https%3A%2F%2Fedit.videoken.com',
|
||||
'playlist_count': 3,
|
||||
'info_dict': {
|
||||
'id': '38979682-playlist',
|
||||
'title': 'LoRA: Low-Rank Adaptation of Large Language Models',
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '38979682',
|
||||
'ext': 'mp4',
|
||||
'title': 'LoRA: Low-Rank Adaptation of Large Language Models',
|
||||
'timestamp': 1654714920,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:30',
|
||||
'upload_date': '20220608',
|
||||
'chapters': 'count:31',
|
||||
'duration': 272,
|
||||
},
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '38979682-021',
|
||||
'ext': 'mp4',
|
||||
'title': 'LoRA: Low-Rank Adaptation of Large Language Models - Slide 021',
|
||||
'duration': 3,
|
||||
'timestamp': 1654714920,
|
||||
'upload_date': '20220608',
|
||||
},
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '38979682-024',
|
||||
'ext': 'mp4',
|
||||
'title': 'LoRA: Low-Rank Adaptation of Large Language Models - Slide 024',
|
||||
'duration': 4,
|
||||
'timestamp': 1654714920,
|
||||
'upload_date': '20220608',
|
||||
},
|
||||
}],
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# /v6/ slides, 1 video slide, edit.videoken.com embed
|
||||
'url': 'https://slideslive.com/38979481/',
|
||||
'playlist_count': 2,
|
||||
'info_dict': {
|
||||
'id': '38979481-playlist',
|
||||
'title': 'How to Train Your MAML to Excel in Few-Shot Classification',
|
||||
},
|
||||
'playlist': [{
|
||||
'info_dict': {
|
||||
'id': '38979481',
|
||||
'ext': 'mp4',
|
||||
'title': 'How to Train Your MAML to Excel in Few-Shot Classification',
|
||||
'timestamp': 1654714877,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:43',
|
||||
'upload_date': '20220608',
|
||||
'chapters': 'count:43',
|
||||
'duration': 315,
|
||||
},
|
||||
}, {
|
||||
'info_dict': {
|
||||
'id': '38979481-013',
|
||||
'ext': 'mp4',
|
||||
'title': 'How to Train Your MAML to Excel in Few-Shot Classification - Slide 013',
|
||||
'duration': 3,
|
||||
'timestamp': 1654714877,
|
||||
'upload_date': '20220608',
|
||||
},
|
||||
}],
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# /v3/ slides, .jpg and .png, service_name = youtube
|
||||
'url': 'https://slideslive.com/embed/38932460/',
|
||||
'info_dict': {
|
||||
'id': 'RTPdrgkyTiE',
|
||||
'display_id': '38932460',
|
||||
'ext': 'mp4',
|
||||
'title': 'Active Learning for Hierarchical Multi-Label Classification',
|
||||
'description': 'Watch full version of this video at https://slideslive.com/38932460.',
|
||||
'channel': 'SlidesLive Videos - A',
|
||||
'channel_id': 'UC62SdArr41t_-_fX40QCLRw',
|
||||
'channel_url': 'https://www.youtube.com/channel/UC62SdArr41t_-_fX40QCLRw',
|
||||
'uploader': 'SlidesLive Videos - A',
|
||||
'uploader_id': 'UC62SdArr41t_-_fX40QCLRw',
|
||||
'uploader_url': 'http://www.youtube.com/channel/UC62SdArr41t_-_fX40QCLRw',
|
||||
'upload_date': '20200903',
|
||||
'timestamp': 1602599092,
|
||||
'duration': 942,
|
||||
'age_limit': 0,
|
||||
'live_status': 'not_live',
|
||||
'playable_in_embed': True,
|
||||
'availability': 'unlisted',
|
||||
'categories': ['People & Blogs'],
|
||||
'tags': [],
|
||||
'channel_follower_count': int,
|
||||
'like_count': int,
|
||||
'view_count': int,
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png|webp)',
|
||||
'thumbnails': 'count:21',
|
||||
'chapters': 'count:20',
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# /v3/ slides, .png only, service_name = yoda
|
||||
'url': 'https://slideslive.com/38983994',
|
||||
'info_dict': {
|
||||
'id': '38983994',
|
||||
'ext': 'mp4',
|
||||
'title': 'Zero-Shot AutoML with Pretrained Models',
|
||||
'timestamp': 1662384834,
|
||||
'upload_date': '20220905',
|
||||
'thumbnail': r're:^https?://.*\.(?:jpg|png)',
|
||||
'thumbnails': 'count:23',
|
||||
'chapters': 'count:22',
|
||||
'duration': 295,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}, {
|
||||
# service_name = yoda
|
||||
'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# video_service_name = url
|
||||
# dead link, service_name = url
|
||||
'url': 'https://slideslive.com/38922070/learning-transferable-skills-1',
|
||||
'only_matching': True,
|
||||
}, {
|
||||
# video_service_name = vimeo
|
||||
# dead link, service_name = vimeo
|
||||
'url': 'https://slideslive.com/38921896/retrospectives-a-venue-for-selfreflection-in-ml-research-3',
|
||||
'only_matching': True,
|
||||
}]
|
||||
|
||||
_WEBPAGE_TESTS = [{
|
||||
# only XML slides info
|
||||
'url': 'https://iclr.cc/virtual_2020/poster_Hklr204Fvr.html',
|
||||
'info_dict': {
|
||||
'id': '38925850',
|
||||
'ext': 'mp4',
|
||||
'title': 'Towards a Deep Network Architecture for Structured Smoothness',
|
||||
'thumbnail': r're:^https?://.*\.jpg',
|
||||
'thumbnails': 'count:8',
|
||||
'timestamp': 1629671508,
|
||||
'upload_date': '20210822',
|
||||
'chapters': 'count:7',
|
||||
'duration': 326,
|
||||
},
|
||||
'params': {
|
||||
'skip_download': 'm3u8',
|
||||
},
|
||||
}]
|
||||
|
||||
@classmethod
|
||||
def _extract_embed_urls(cls, url, webpage):
|
||||
# Reference: https://slideslive.com/embed_presentation.js
|
||||
for embed_id in re.findall(r'(?s)new\s+SlidesLiveEmbed\s*\([^)]+\bpresentationId:\s*["\'](\d+)["\']', webpage):
|
||||
url_parsed = urllib.parse.urlparse(url)
|
||||
origin = f'{url_parsed.scheme}://{url_parsed.netloc}'
|
||||
yield update_url_query(
|
||||
f'https://slideslive.com/embed/presentation/{embed_id}', {
|
||||
'embed_parent_url': url,
|
||||
'embed_container_origin': origin,
|
||||
})
|
||||
|
||||
def _download_embed_webpage_handle(self, video_id, headers):
|
||||
return self._download_webpage_handle(
|
||||
f'https://slideslive.com/embed/presentation/{video_id}', video_id,
|
||||
headers=headers, query=traverse_obj(headers, {
|
||||
'embed_parent_url': 'Referer',
|
||||
'embed_container_origin': 'Origin',
|
||||
}))
|
||||
|
||||
def _extract_custom_m3u8_info(self, m3u8_data):
|
||||
m3u8_dict = {}
|
||||
|
||||
lookup = {
|
||||
'PRESENTATION-TITLE': 'title',
|
||||
'PRESENTATION-UPDATED-AT': 'timestamp',
|
||||
'PRESENTATION-THUMBNAIL': 'thumbnail',
|
||||
'PLAYLIST-TYPE': 'playlist_type',
|
||||
'VOD-VIDEO-SERVICE-NAME': 'service_name',
|
||||
'VOD-VIDEO-ID': 'service_id',
|
||||
'VOD-VIDEO-SERVERS': 'video_servers',
|
||||
'VOD-SUBTITLES': 'subtitles',
|
||||
'VOD-SLIDES-JSON-URL': 'slides_json_url',
|
||||
'VOD-SLIDES-XML-URL': 'slides_xml_url',
|
||||
}
|
||||
|
||||
for line in m3u8_data.splitlines():
|
||||
if not line.startswith('#EXT-SL-'):
|
||||
continue
|
||||
tag, _, value = line.partition(':')
|
||||
key = lookup.get(tag.lstrip('#EXT-SL-'))
|
||||
if not key:
|
||||
continue
|
||||
m3u8_dict[key] = value
|
||||
|
||||
# Some values are stringified JSON arrays
|
||||
for key in ('video_servers', 'subtitles'):
|
||||
if key in m3u8_dict:
|
||||
m3u8_dict[key] = self._parse_json(m3u8_dict[key], None, fatal=False) or []
|
||||
|
||||
return m3u8_dict
|
||||
|
||||
def _extract_formats_and_duration(self, cdn_hostname, path, video_id, skip_duration=False):
|
||||
formats, duration = [], None
|
||||
|
||||
hls_formats = self._extract_m3u8_formats(
|
||||
f'https://{cdn_hostname}/{path}/master.m3u8',
|
||||
video_id, 'mp4', m3u8_id='hls', fatal=False, live=True)
|
||||
if hls_formats:
|
||||
if not skip_duration:
|
||||
duration = self._extract_m3u8_vod_duration(
|
||||
hls_formats[0]['url'], video_id, note='Extracting duration from HLS manifest')
|
||||
formats.extend(hls_formats)
|
||||
|
||||
dash_formats = self._extract_mpd_formats(
|
||||
f'https://{cdn_hostname}/{path}/master.mpd', video_id, mpd_id='dash', fatal=False)
|
||||
if dash_formats:
|
||||
if not duration and not skip_duration:
|
||||
duration = self._extract_mpd_vod_duration(
|
||||
f'https://{cdn_hostname}/{path}/master.mpd', video_id,
|
||||
note='Extracting duration from DASH manifest')
|
||||
formats.extend(dash_formats)
|
||||
|
||||
return formats, duration
|
||||
|
||||
def _real_extract(self, url):
|
||||
video_id = self._match_id(url)
|
||||
video_data = self._download_json(
|
||||
'https://ben.slideslive.com/player/' + video_id, video_id)
|
||||
service_name = video_data['video_service_name'].lower()
|
||||
webpage, urlh = self._download_embed_webpage_handle(
|
||||
video_id, headers=traverse_obj(parse_qs(url), {
|
||||
'Referer': ('embed_parent_url', -1),
|
||||
'Origin': ('embed_container_origin', -1)}))
|
||||
redirect_url = urlh.geturl()
|
||||
if 'domain_not_allowed' in redirect_url:
|
||||
domain = traverse_obj(parse_qs(redirect_url), ('allowed_domains[]', ...), get_all=False)
|
||||
if not domain:
|
||||
raise ExtractorError(
|
||||
'This is an embed-only presentation. Try passing --referer', expected=True)
|
||||
webpage, _ = self._download_embed_webpage_handle(video_id, headers={
|
||||
'Referer': f'https://{domain}/',
|
||||
'Origin': f'https://{domain}',
|
||||
})
|
||||
|
||||
player_token = self._search_regex(r'data-player-token="([^"]+)"', webpage, 'player token')
|
||||
player_data = self._download_webpage(
|
||||
f'https://ben.slideslive.com/player/{video_id}', video_id,
|
||||
note='Downloading player info', query={'player_token': player_token})
|
||||
player_info = self._extract_custom_m3u8_info(player_data)
|
||||
|
||||
service_name = player_info['service_name'].lower()
|
||||
assert service_name in ('url', 'yoda', 'vimeo', 'youtube')
|
||||
service_id = video_data['video_service_id']
|
||||
service_id = player_info['service_id']
|
||||
|
||||
slide_url_template = 'https://slides.slideslive.com/%s/slides/original/%s%s'
|
||||
slides, slides_info = {}, []
|
||||
|
||||
if player_info.get('slides_json_url'):
|
||||
slides = self._download_json(
|
||||
player_info['slides_json_url'], video_id, fatal=False,
|
||||
note='Downloading slides JSON', errnote=False) or {}
|
||||
slide_ext_default = '.png'
|
||||
slide_quality = traverse_obj(slides, ('slide_qualities', 0))
|
||||
if slide_quality:
|
||||
slide_ext_default = '.jpg'
|
||||
slide_url_template = f'https://cdn.slideslive.com/data/presentations/%s/slides/{slide_quality}/%s%s'
|
||||
for slide_id, slide in enumerate(traverse_obj(slides, ('slides', ...), expected_type=dict), 1):
|
||||
slides_info.append((
|
||||
slide_id, traverse_obj(slide, ('image', 'name')),
|
||||
traverse_obj(slide, ('image', 'extname'), default=slide_ext_default),
|
||||
int_or_none(slide.get('time'), scale=1000)))
|
||||
|
||||
if not slides and player_info.get('slides_xml_url'):
|
||||
slides = self._download_xml(
|
||||
player_info['slides_xml_url'], video_id, fatal=False,
|
||||
note='Downloading slides XML', errnote='Failed to download slides info')
|
||||
slide_url_template = 'https://cdn.slideslive.com/data/presentations/%s/slides/big/%s%s'
|
||||
for slide_id, slide in enumerate(slides.findall('./slide') if slides else [], 1):
|
||||
slides_info.append((
|
||||
slide_id, xpath_text(slide, './slideName', 'name'), '.jpg',
|
||||
int_or_none(xpath_text(slide, './timeSec', 'time'))))
|
||||
|
||||
chapters, thumbnails = [], []
|
||||
if url_or_none(player_info.get('thumbnail')):
|
||||
thumbnails.append({'id': 'cover', 'url': player_info['thumbnail']})
|
||||
for slide_id, slide_path, slide_ext, start_time in slides_info:
|
||||
if slide_path:
|
||||
thumbnails.append({
|
||||
'id': f'{slide_id:03d}',
|
||||
'url': slide_url_template % (video_id, slide_path, slide_ext),
|
||||
})
|
||||
chapters.append({
|
||||
'title': f'Slide {slide_id:03d}',
|
||||
'start_time': start_time,
|
||||
})
|
||||
|
||||
subtitles = {}
|
||||
for sub in try_get(video_data, lambda x: x['subtitles'], list) or []:
|
||||
if not isinstance(sub, dict):
|
||||
continue
|
||||
for sub in traverse_obj(player_info, ('subtitles', ...), expected_type=dict):
|
||||
webvtt_url = url_or_none(sub.get('webvtt_url'))
|
||||
if not webvtt_url:
|
||||
continue
|
||||
lang = sub.get('language') or 'en'
|
||||
subtitles.setdefault(lang, []).append({
|
||||
subtitles.setdefault(sub.get('language') or 'en', []).append({
|
||||
'url': webvtt_url,
|
||||
'ext': 'vtt',
|
||||
})
|
||||
|
||||
info = {
|
||||
'id': video_id,
|
||||
'thumbnail': video_data.get('thumbnail'),
|
||||
'is_live': bool_or_none(video_data.get('is_live')),
|
||||
'title': player_info.get('title') or self._html_search_meta('title', webpage, default=''),
|
||||
'timestamp': unified_timestamp(player_info.get('timestamp')),
|
||||
'is_live': player_info.get('playlist_type') != 'vod',
|
||||
'thumbnails': thumbnails,
|
||||
'chapters': chapters,
|
||||
'subtitles': subtitles,
|
||||
}
|
||||
if service_name in ('url', 'yoda'):
|
||||
info['title'] = video_data['title']
|
||||
|
||||
if service_name == 'url':
|
||||
info['url'] = service_id
|
||||
else:
|
||||
formats = []
|
||||
_MANIFEST_PATTERN = 'https://01.cdn.yoda.slideslive.com/%s/master.%s'
|
||||
# use `m3u8` entry_protocol until EXT-X-MAP is properly supported by `m3u8_native` entry_protocol
|
||||
formats.extend(self._extract_m3u8_formats(
|
||||
_MANIFEST_PATTERN % (service_id, 'm3u8'),
|
||||
service_id, 'mp4', m3u8_id='hls', fatal=False))
|
||||
formats.extend(self._extract_mpd_formats(
|
||||
_MANIFEST_PATTERN % (service_id, 'mpd'), service_id,
|
||||
mpd_id='dash', fatal=False))
|
||||
elif service_name == 'yoda':
|
||||
formats, duration = self._extract_formats_and_duration(
|
||||
player_info['video_servers'][0], service_id, video_id)
|
||||
info.update({
|
||||
'id': service_id,
|
||||
'duration': duration,
|
||||
'formats': formats,
|
||||
})
|
||||
else:
|
||||
|
@ -94,10 +523,45 @@ class SlidesLiveIE(InfoExtractor):
|
|||
'_type': 'url_transparent',
|
||||
'url': service_id,
|
||||
'ie_key': service_name.capitalize(),
|
||||
'title': video_data.get('title'),
|
||||
'display_id': video_id,
|
||||
})
|
||||
if service_name == 'vimeo':
|
||||
info['url'] = smuggle_url(
|
||||
'https://player.vimeo.com/video/' + service_id,
|
||||
f'https://player.vimeo.com/video/{service_id}',
|
||||
{'http_headers': {'Referer': url}})
|
||||
|
||||
video_slides = traverse_obj(slides, ('slides', ..., 'video', 'id'))
|
||||
if not video_slides:
|
||||
return info
|
||||
|
||||
def entries():
|
||||
yield info
|
||||
|
||||
service_data = self._download_json(
|
||||
f'https://ben.slideslive.com/player/{video_id}/slides_video_service_data',
|
||||
video_id, fatal=False, query={
|
||||
'player_token': player_token,
|
||||
'videos': ','.join(video_slides),
|
||||
}, note='Downloading video slides info', errnote='Failed to download video slides info') or {}
|
||||
|
||||
for slide_id, slide in enumerate(traverse_obj(slides, ('slides', ...)), 1):
|
||||
if not traverse_obj(slide, ('video', 'service')) == 'yoda':
|
||||
continue
|
||||
video_path = traverse_obj(slide, ('video', 'id'))
|
||||
cdn_hostname = traverse_obj(service_data, (
|
||||
video_path, 'video_servers', ...), get_all=False)
|
||||
if not cdn_hostname or not video_path:
|
||||
continue
|
||||
formats, _ = self._extract_formats_and_duration(
|
||||
cdn_hostname, video_path, video_id, skip_duration=True)
|
||||
if not formats:
|
||||
continue
|
||||
yield {
|
||||
'id': f'{video_id}-{slide_id:03d}',
|
||||
'title': f'{info["title"]} - Slide {slide_id:03d}',
|
||||
'timestamp': info['timestamp'],
|
||||
'duration': int_or_none(traverse_obj(slide, ('video', 'duration_ms')), scale=1000),
|
||||
'formats': formats,
|
||||
}
|
||||
|
||||
return self.playlist_result(entries(), f'{video_id}-playlist', info['title'])
|
||||
|
|
|
@ -782,6 +782,27 @@ class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE):
|
|||
'%s (%s)' % (user['username'], resource.capitalize()))
|
||||
|
||||
|
||||
class SoundcloudUserPermalinkIE(SoundcloudPagedPlaylistBaseIE):
|
||||
_VALID_URL = r'https?://api\.soundcloud\.com/users/(?P<id>\d+)'
|
||||
IE_NAME = 'soundcloud:user:permalink'
|
||||
_TESTS = [{
|
||||
'url': 'https://api.soundcloud.com/users/30909869',
|
||||
'info_dict': {
|
||||
'id': '30909869',
|
||||
'title': 'neilcic',
|
||||
},
|
||||
'playlist_mincount': 23,
|
||||
}]
|
||||
|
||||
def _real_extract(self, url):
|
||||
user_id = self._match_id(url)
|
||||
user = self._download_json(
|
||||
self._resolv_url(url), user_id, 'Downloading user info', headers=self._HEADERS)
|
||||
|
||||
return self._extract_playlist(
|
||||
f'{self._API_V2_BASE}stream/users/{user["id"]}', str(user['id']), user.get('username'))
|
||||
|
||||
|
||||
class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE):
|
||||
_VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)'
|
||||
IE_NAME = 'soundcloud:trackstation'
|
||||
|
|
|
@ -177,7 +177,6 @@ class SpankBangPlaylistIE(InfoExtractor):
|
|||
def _real_extract(self, url):
|
||||
mobj = self._match_valid_url(url)
|
||||
playlist_id = mobj.group('id')
|
||||
display_id = mobj.group('display_id')
|
||||
|
||||
webpage = self._download_webpage(
|
||||
url, playlist_id, headers={'Cookie': 'country=US; mobile=on'})
|
||||
|
@ -186,11 +185,11 @@ class SpankBangPlaylistIE(InfoExtractor):
|
|||
urljoin(url, mobj.group('path')),
|
||||
ie=SpankBangIE.ie_key(), video_id=mobj.group('id'))
|
||||
for mobj in re.finditer(
|
||||
r'<a[^>]+\bhref=(["\'])(?P<path>/?[\da-z]+-(?P<id>[\da-z]+)/playlist/%s(?:(?!\1).)*)\1'
|
||||
% re.escape(display_id), webpage)]
|
||||
r'<a[^>]+\bhref=(["\'])(?P<path>/?[\da-z]+-(?P<id>[\da-z]+)/playlist/[^"\'](?:(?!\1).)*)\1',
|
||||
webpage)]
|
||||
|
||||
title = self._html_search_regex(
|
||||
r'<h1>([^<]+)\s+playlist\s*<', webpage, 'playlist title',
|
||||
r'<em>([^<]+)</em>\s+playlist\s*<', webpage, 'playlist title',
|
||||
fatal=False)
|
||||
|
||||
return self.playlist_result(entries, playlist_id, title)
|
||||
|
|
|
@ -73,6 +73,8 @@ class STVPlayerIE(InfoExtractor):
|
|||
})
|
||||
|
||||
programme = result.get('programme') or {}
|
||||
if programme.get('drmEnabled'):
|
||||
self.report_drm(video_id)
|
||||
|
||||
return {
|
||||
'_type': 'url_transparent',
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue