yt-dlp/youtube_dl/extractor/ivi.py

249 lines
9.3 KiB
Python
Raw Normal View History

2016-08-21 14:31:33 -06:00
# coding: utf-8
from __future__ import unicode_literals
2013-12-18 15:28:16 -07:00
import re
import json
from .common import InfoExtractor
from ..utils import (
2013-12-18 15:28:16 -07:00
ExtractorError,
2016-01-03 14:34:15 -07:00
int_or_none,
2016-08-21 14:31:33 -06:00
qualities,
2013-12-18 15:28:16 -07:00
)
class IviIE(InfoExtractor):
IE_DESC = 'ivi.ru'
IE_NAME = 'ivi'
2018-10-28 09:49:10 -06:00
_VALID_URL = r'https?://(?:www\.)?ivi\.(?:ru|tv)/(?:watch/(?:[^/]+/)?|video/player\?.*?videoId=)(?P<id>\d+)'
2017-02-23 20:52:41 -07:00
_GEO_BYPASS = False
_GEO_COUNTRIES = ['RU']
_LIGHT_KEY = b'\xf1\x02\x32\xb7\xbc\x5c\x7a\xe8\xf7\x96\xc1\x33\x2b\x27\xa1\x8c'
_LIGHT_URL = 'https://api.ivi.ru/light/'
2013-12-18 15:28:16 -07:00
_TESTS = [
# Single movie
{
'url': 'http://www.ivi.ru/watch/53141',
'md5': '6ff5be2254e796ed346251d117196cf4',
'info_dict': {
2014-02-07 05:36:50 -07:00
'id': '53141',
'ext': 'mp4',
'title': 'Иван Васильевич меняет профессию',
'description': 'md5:b924063ea1677c8fe343d8a72ac2195f',
'duration': 5498,
'thumbnail': r're:^https?://.*\.jpg$',
2013-12-18 15:28:16 -07:00
},
'skip': 'Only works from Russia',
2013-12-18 15:28:16 -07:00
},
2016-01-10 08:17:47 -07:00
# Serial's series
2013-12-18 15:28:16 -07:00
{
2014-05-30 06:12:55 -06:00
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/9549',
'md5': '221f56b35e3ed815fde2df71032f4b3e',
'info_dict': {
2014-05-30 06:12:55 -06:00
'id': '9549',
2014-02-07 05:36:50 -07:00
'ext': 'mp4',
2016-01-03 14:34:15 -07:00
'title': 'Двое из ларца - Дело Гольдберга (1 часть)',
'series': 'Двое из ларца',
2016-01-03 14:54:52 -07:00
'season': 'Сезон 1',
'season_number': 1,
2016-01-03 14:34:15 -07:00
'episode': 'Дело Гольдберга (1 часть)',
'episode_number': 1,
2014-05-30 06:12:55 -06:00
'duration': 2655,
'thumbnail': r're:^https?://.*\.jpg$',
2013-12-18 15:28:16 -07:00
},
'skip': 'Only works from Russia',
2016-08-21 14:31:33 -06:00
},
{
# with MP4-HD720 format
'url': 'http://www.ivi.ru/watch/146500',
'md5': 'd63d35cdbfa1ea61a5eafec7cc523e1e',
'info_dict': {
'id': '146500',
'ext': 'mp4',
'title': 'Кукла',
'description': 'md5:ffca9372399976a2d260a407cc74cce6',
'duration': 5599,
'thumbnail': r're:^https?://.*\.jpg$',
2016-08-21 14:31:33 -06:00
},
'skip': 'Only works from Russia',
2018-10-28 09:49:10 -06:00
},
{
'url': 'https://www.ivi.tv/watch/33560/',
'only_matching': True,
},
2013-12-18 15:28:16 -07:00
]
2013-12-18 15:28:16 -07:00
# Sorted by quality
2016-08-21 14:31:33 -06:00
_KNOWN_FORMATS = (
'MP4-low-mobile', 'MP4-mobile', 'FLV-lo', 'MP4-lo', 'FLV-hi', 'MP4-hi',
'MP4-SHQ', 'MP4-HD720', 'MP4-HD1080')
2013-12-18 15:28:16 -07:00
def _real_extract(self, url):
2015-01-28 10:58:14 -07:00
video_id = self._match_id(url)
2013-12-18 15:28:16 -07:00
data = json.dumps({
2015-01-28 10:58:14 -07:00
'method': 'da.content.get',
'params': [
video_id, {
'site': 's%d',
2015-01-28 10:58:14 -07:00
'referrer': 'http://www.ivi.ru/watch/%s' % video_id,
'contentid': video_id
2013-12-18 15:28:16 -07:00
}
2015-01-28 10:58:14 -07:00
]
}).encode()
2013-12-18 15:28:16 -07:00
try:
from Crypto.Cipher import Blowfish
from Crypto.Hash import CMAC
timestamp = self._download_json(
self._LIGHT_URL, video_id,
'Downloading timestamp JSON', data=json.dumps({
'method': 'da.timestamp.get',
'params': []
}).encode())['result']
data = data % 353
query = {
'ts': timestamp,
'sign': CMAC.new(self._LIGHT_KEY, timestamp.encode() + data, Blowfish).hexdigest(),
}
except ImportError:
data = data % 183
query = {}
video_json = self._download_json(
self._LIGHT_URL, video_id,
'Downloading video JSON', data=data, query=query)
error = video_json.get('error')
if error:
2017-02-23 20:52:41 -07:00
origin = error['origin']
if origin == 'NotAllowedForLocation':
self.raise_geo_restricted(
msg=error['message'], countries=self._GEO_COUNTRIES)
elif origin == 'NoRedisValidData':
raise ExtractorError('Video %s does not exist' % video_id, expected=True)
elif origin == 'NotAllowedError':
raise ExtractorError('pycryptodome not found. Please install it.', expected=True)
2015-01-28 10:58:14 -07:00
raise ExtractorError(
'Unable to download video %s: %s' % (video_id, error['message']),
expected=True)
2013-12-18 15:28:16 -07:00
result = video_json['result']
title = result['title']
2013-12-18 15:28:16 -07:00
2016-08-21 14:31:33 -06:00
quality = qualities(self._KNOWN_FORMATS)
formats = []
for f in result.get('files', []):
f_url = f.get('url')
content_format = f.get('content_format')
if not f_url or '-MDRM-' in content_format or '-FPS-' in content_format:
continue
formats.append({
'url': f_url,
'format_id': content_format,
'quality': quality(content_format),
'filesize': int_or_none(f.get('size_in_bytes')),
})
2013-12-26 10:40:09 -07:00
self._sort_formats(formats)
2016-01-03 14:34:15 -07:00
compilation = result.get('compilation')
episode = title if compilation else None
2014-11-23 12:41:03 -07:00
title = '%s - %s' % (compilation, title) if compilation is not None else title
2013-12-18 15:28:16 -07:00
2016-01-03 14:34:15 -07:00
thumbnails = [{
'url': preview['url'],
'id': preview.get('content_format'),
} for preview in result.get('preview', []) if preview.get('url')]
webpage = self._download_webpage(url, video_id)
2016-01-03 14:54:52 -07:00
season = self._search_regex(
r'<li[^>]+class="season active"[^>]*><a[^>]+>([^<]+)',
webpage, 'season', default=None)
season_number = int_or_none(self._search_regex(
r'<li[^>]+class="season active"[^>]*><a[^>]+data-season(?:-index)?="(\d+)"',
webpage, 'season number', default=None))
2016-01-03 14:34:15 -07:00
episode_number = int_or_none(self._search_regex(
2016-08-21 14:34:27 -06:00
r'[^>]+itemprop="episode"[^>]*>\s*<meta[^>]+itemprop="episodeNumber"[^>]+content="(\d+)',
2016-01-03 14:34:15 -07:00
webpage, 'episode number', default=None))
2013-12-18 15:28:16 -07:00
2016-01-03 14:34:15 -07:00
description = self._og_search_description(webpage, default=None) or self._html_search_meta(
'description', webpage, 'description', default=None)
2013-12-18 15:28:16 -07:00
return {
'id': video_id,
'title': title,
2016-01-03 14:34:15 -07:00
'series': compilation,
2016-01-03 14:54:52 -07:00
'season': season,
'season_number': season_number,
2016-01-03 14:34:15 -07:00
'episode': episode,
'episode_number': episode_number,
'thumbnails': thumbnails,
2013-12-18 15:28:16 -07:00
'description': description,
'duration': int_or_none(result.get('duration')),
2013-12-18 15:28:16 -07:00
'formats': formats,
}
class IviCompilationIE(InfoExtractor):
IE_DESC = 'ivi.ru compilations'
IE_NAME = 'ivi:compilation'
2014-02-07 05:36:50 -07:00
_VALID_URL = r'https?://(?:www\.)?ivi\.ru/watch/(?!\d+)(?P<compilationid>[a-z\d_-]+)(?:/season(?P<seasonid>\d+))?$'
_TESTS = [{
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa',
'info_dict': {
'id': 'dvoe_iz_lartsa',
'title': 'Двое из ларца (2006 - 2008)',
},
'playlist_mincount': 24,
}, {
'url': 'http://www.ivi.ru/watch/dvoe_iz_lartsa/season1',
'info_dict': {
'id': 'dvoe_iz_lartsa/season1',
'title': 'Двое из ларца (2006 - 2008) 1 сезон',
},
'playlist_mincount': 12,
}]
2013-12-18 15:28:16 -07:00
def _extract_entries(self, html, compilation_id):
2016-01-03 14:49:18 -07:00
return [
self.url_result(
'http://www.ivi.ru/watch/%s/%s' % (compilation_id, serie), IviIE.ie_key())
for serie in re.findall(
r'<a href="/watch/%s/(\d+)"[^>]+data-id="\1"' % compilation_id, html)]
2013-12-18 15:28:16 -07:00
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
compilation_id = mobj.group('compilationid')
season_id = mobj.group('seasonid')
2014-11-23 12:41:03 -07:00
if season_id is not None: # Season link
2016-01-03 14:49:18 -07:00
season_page = self._download_webpage(
url, compilation_id, 'Downloading season %s web page' % season_id)
2013-12-18 15:28:16 -07:00
playlist_id = '%s/season%s' % (compilation_id, season_id)
playlist_title = self._html_search_meta('title', season_page, 'title')
2013-12-18 15:28:16 -07:00
entries = self._extract_entries(season_page, compilation_id)
2014-11-23 12:41:03 -07:00
else: # Compilation link
compilation_page = self._download_webpage(url, compilation_id, 'Downloading compilation web page')
2013-12-18 15:28:16 -07:00
playlist_id = compilation_id
playlist_title = self._html_search_meta('title', compilation_page, 'title')
2016-01-03 14:49:18 -07:00
seasons = re.findall(
r'<a href="/watch/%s/season(\d+)' % compilation_id, compilation_page)
if not seasons: # No seasons in this compilation
2013-12-18 15:28:16 -07:00
entries = self._extract_entries(compilation_page, compilation_id)
else:
entries = []
for season_id in seasons:
season_page = self._download_webpage(
'http://www.ivi.ru/watch/%s/season%s' % (compilation_id, season_id),
compilation_id, 'Downloading season %s web page' % season_id)
2013-12-18 15:28:16 -07:00
entries.extend(self._extract_entries(season_page, compilation_id))
2014-11-23 12:41:03 -07:00
return self.playlist_result(entries, playlist_id, playlist_title)