youtube-dl/youtube_dl/extractor/lrt.py

76 lines
2.5 KiB
Python
Raw Normal View History

2014-09-29 17:26:16 -06:00
# coding: utf-8
from __future__ import unicode_literals
import re
2014-09-29 17:26:16 -06:00
from .common import InfoExtractor
from ..utils import (
2020-11-16 11:17:10 -07:00
clean_html,
merge_dicts,
2014-09-29 17:26:16 -06:00
)
class LRTIE(InfoExtractor):
IE_NAME = 'lrt.lt'
2020-11-16 11:17:10 -07:00
_VALID_URL = r'https?://(?:www\.)?lrt\.lt(?P<path>/mediateka/irasas/(?P<id>[0-9]+))'
_TESTS = [{
# m3u8 download
2020-11-16 11:17:10 -07:00
'url': 'https://www.lrt.lt/mediateka/irasas/2000127261/greita-ir-gardu-sicilijos-ikvepta-klasikiniu-makaronu-su-baklazanais-vakariene',
'md5': '85cb2bb530f31d91a9c65b479516ade4',
2014-09-29 17:26:16 -06:00
'info_dict': {
2020-11-16 11:17:10 -07:00
'id': '2000127261',
2014-09-29 17:26:16 -06:00
'ext': 'mp4',
2020-11-16 11:17:10 -07:00
'title': 'Greita ir gardu: Sicilijos įkvėpta klasikinių makaronų su baklažanais vakarienė',
'description': 'md5:ad7d985f51b0dc1489ba2d76d7ed47fa',
'duration': 3035,
'timestamp': 1604079000,
'upload_date': '20201030',
2014-09-29 17:26:16 -06:00
},
}, {
# direct mp3 download
'url': 'http://www.lrt.lt/mediateka/irasas/1013074524/',
'md5': '389da8ca3cad0f51d12bed0c844f6a0a',
'info_dict': {
'id': '1013074524',
'ext': 'mp3',
'title': 'Kita tema 2016-09-05 15:05',
'description': 'md5:1b295a8fc7219ed0d543fc228c931fb5',
'duration': 3008,
'view_count': int,
'like_count': int,
2014-09-29 17:26:16 -06:00
},
}]
2014-09-29 17:26:16 -06:00
2020-11-16 11:17:10 -07:00
def _extract_js_var(self, webpage, var_name, default):
return self._search_regex(
r'%s\s*=\s*(["\'])((?:(?!\1).)+)\1' % var_name,
webpage, var_name.replace('_', ' '), default, group=2)
2014-09-29 17:26:16 -06:00
def _real_extract(self, url):
2020-11-16 11:17:10 -07:00
path, video_id = re.match(self._VALID_URL, url).groups()
2014-09-29 17:26:16 -06:00
webpage = self._download_webpage(url, video_id)
2020-11-16 11:17:10 -07:00
media_url = self._extract_js_var(webpage, 'main_url', path)
media = self._download_json(self._extract_js_var(
webpage, 'media_info_url',
'https://www.lrt.lt/servisai/stream_url/vod/media_info/'),
video_id, query={'url': media_url})
jw_data = self._parse_jwplayer_data(
media['playlist_item'], video_id, base_url=url)
2015-12-26 23:16:55 -07:00
2020-11-16 11:17:10 -07:00
json_ld_data = self._search_json_ld(webpage, video_id)
2014-09-29 17:26:16 -06:00
2020-11-16 11:17:10 -07:00
tags = []
for tag in media.get('tags', []):
tag_name = tag.get('name')
if not tag_name:
continue
tags.append(tag_name)
2015-12-26 23:26:48 -07:00
2020-11-16 11:17:10 -07:00
clean_info = {
'description': clean_html(media.get('content')),
'tags': tags,
2014-09-29 17:26:16 -06:00
}
2020-11-16 11:17:10 -07:00
return merge_dicts(clean_info, jw_data, json_ld_data)