mirror of https://github.com/yt-dlp/yt-dlp.git
Merge commit '7151f63a5f3820a322ba8bf61eebe8d9f75d6ee5'
This commit is contained in:
commit
b3a653c245
37
youtube-dl
37
youtube-dl
|
@ -766,7 +766,7 @@ class FileDownloader(object):
|
||||||
raise MaxDownloadsReached()
|
raise MaxDownloadsReached()
|
||||||
|
|
||||||
filename = self.prepare_filename(info_dict)
|
filename = self.prepare_filename(info_dict)
|
||||||
|
|
||||||
# Forced printings
|
# Forced printings
|
||||||
if self.params.get('forcetitle', False):
|
if self.params.get('forcetitle', False):
|
||||||
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
|
print info_dict['title'].encode(preferredencoding(), 'xmlcharrefreplace')
|
||||||
|
@ -842,7 +842,7 @@ class FileDownloader(object):
|
||||||
except (ContentTooShortError, ), err:
|
except (ContentTooShortError, ), err:
|
||||||
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||||
return
|
return
|
||||||
|
|
||||||
if success:
|
if success:
|
||||||
try:
|
try:
|
||||||
self.post_process(filename, info_dict)
|
self.post_process(filename, info_dict)
|
||||||
|
@ -1183,7 +1183,7 @@ class YoutubeIE(InfoExtractor):
|
||||||
'43': '360x640',
|
'43': '360x640',
|
||||||
'44': '480x854',
|
'44': '480x854',
|
||||||
'45': '720x1280',
|
'45': '720x1280',
|
||||||
}
|
}
|
||||||
IE_NAME = u'youtube'
|
IE_NAME = u'youtube'
|
||||||
|
|
||||||
def report_lang(self):
|
def report_lang(self):
|
||||||
|
@ -2507,7 +2507,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||||
|
|
||||||
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
_VALID_URL = r'(?:https?://)?(?:\w+\.)?youtube\.com/(?:(?:course|view_play_list|my_playlists|artist|playlist)\?.*?(p|a|list)=|user/.*?/user/|p/|user/.*?#[pg]/c/)(?:PL)?([0-9A-Za-z-_]+)(?:/.*?/([0-9A-Za-z_-]+))?.*'
|
||||||
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
|
_TEMPLATE_URL = 'http://www.youtube.com/%s?%s=%s&page=%s&gl=US&hl=en'
|
||||||
_VIDEO_INDICATOR = r'/watch\?v=(.+?)&'
|
_VIDEO_INDICATOR_TEMPLATE = r'/watch\?v=(.+?)&list=PL%s&'
|
||||||
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
|
_MORE_PAGES_INDICATOR = r'(?m)>\s*Next\s*</a>'
|
||||||
_youtube_ie = None
|
_youtube_ie = None
|
||||||
IE_NAME = u'youtube:playlist'
|
IE_NAME = u'youtube:playlist'
|
||||||
|
@ -2559,7 +2559,8 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||||
|
|
||||||
# Extract video identifiers
|
# Extract video identifiers
|
||||||
ids_in_page = []
|
ids_in_page = []
|
||||||
for mobj in re.finditer(self._VIDEO_INDICATOR, page):
|
video_indicator = self._VIDEO_INDICATOR_TEMPLATE % playlist_id
|
||||||
|
for mobj in re.finditer(video_indicator, page):
|
||||||
if mobj.group(1) not in ids_in_page:
|
if mobj.group(1) not in ids_in_page:
|
||||||
ids_in_page.append(mobj.group(1))
|
ids_in_page.append(mobj.group(1))
|
||||||
video_ids.extend(ids_in_page)
|
video_ids.extend(ids_in_page)
|
||||||
|
@ -2570,7 +2571,11 @@ class YoutubePlaylistIE(InfoExtractor):
|
||||||
|
|
||||||
playliststart = self._downloader.params.get('playliststart', 1) - 1
|
playliststart = self._downloader.params.get('playliststart', 1) - 1
|
||||||
playlistend = self._downloader.params.get('playlistend', -1)
|
playlistend = self._downloader.params.get('playlistend', -1)
|
||||||
video_ids = video_ids[playliststart:playlistend]
|
|
||||||
|
if playlistend == -1:
|
||||||
|
video_ids = video_ids[playliststart:]
|
||||||
|
else:
|
||||||
|
video_ids = video_ids[playliststart:playlistend]
|
||||||
|
|
||||||
for id in video_ids:
|
for id in video_ids:
|
||||||
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
self._youtube_ie.extract('http://www.youtube.com/watch?v=%s' % id)
|
||||||
|
@ -3016,14 +3021,14 @@ class BlipTVIE(InfoExtractor):
|
||||||
data = json_data['Post']
|
data = json_data['Post']
|
||||||
else:
|
else:
|
||||||
data = json_data
|
data = json_data
|
||||||
|
|
||||||
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
|
upload_date = datetime.datetime.strptime(data['datestamp'], '%m-%d-%y %H:%M%p').strftime('%Y%m%d')
|
||||||
video_url = data['media']['url']
|
video_url = data['media']['url']
|
||||||
umobj = re.match(self._URL_EXT, video_url)
|
umobj = re.match(self._URL_EXT, video_url)
|
||||||
if umobj is None:
|
if umobj is None:
|
||||||
raise ValueError('Can not determine filename extension')
|
raise ValueError('Can not determine filename extension')
|
||||||
ext = umobj.group(1)
|
ext = umobj.group(1)
|
||||||
|
|
||||||
info = {
|
info = {
|
||||||
'id': data['item_id'],
|
'id': data['item_id'],
|
||||||
'url': video_url,
|
'url': video_url,
|
||||||
|
@ -3057,7 +3062,7 @@ class MyVideoIE(InfoExtractor):
|
||||||
|
|
||||||
def __init__(self, downloader=None):
|
def __init__(self, downloader=None):
|
||||||
InfoExtractor.__init__(self, downloader)
|
InfoExtractor.__init__(self, downloader)
|
||||||
|
|
||||||
def report_download_webpage(self, video_id):
|
def report_download_webpage(self, video_id):
|
||||||
"""Report webpage download."""
|
"""Report webpage download."""
|
||||||
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
|
self._downloader.to_screen(u'[myvideo] %s: Downloading webpage' % video_id)
|
||||||
|
@ -3124,7 +3129,7 @@ class ComedyCentralIE(InfoExtractor):
|
||||||
|
|
||||||
def report_extraction(self, episode_id):
|
def report_extraction(self, episode_id):
|
||||||
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
|
self._downloader.to_screen(u'[comedycentral] %s: Extracting information' % episode_id)
|
||||||
|
|
||||||
def report_config_download(self, episode_id):
|
def report_config_download(self, episode_id):
|
||||||
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
|
self._downloader.to_screen(u'[comedycentral] %s: Downloading configuration' % episode_id)
|
||||||
|
|
||||||
|
@ -3551,7 +3556,7 @@ class SoundcloudIE(InfoExtractor):
|
||||||
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
|
mobj = re.search('track-description-value"><p>(.*?)</p>', webpage)
|
||||||
if mobj:
|
if mobj:
|
||||||
description = mobj.group(1)
|
description = mobj.group(1)
|
||||||
|
|
||||||
# upload date
|
# upload date
|
||||||
upload_date = None
|
upload_date = None
|
||||||
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
|
mobj = re.search("pretty-date'>on ([\w]+ [\d]+, [\d]+ \d+:\d+)</abbr></h2>", webpage)
|
||||||
|
@ -3686,7 +3691,7 @@ class MixcloudIE(InfoExtractor):
|
||||||
url_list = jsonData[fmt][bitrate]
|
url_list = jsonData[fmt][bitrate]
|
||||||
except TypeError: # we have no bitrate info.
|
except TypeError: # we have no bitrate info.
|
||||||
url_list = jsonData[fmt]
|
url_list = jsonData[fmt]
|
||||||
|
|
||||||
return url_list
|
return url_list
|
||||||
|
|
||||||
def check_urls(self, url_list):
|
def check_urls(self, url_list):
|
||||||
|
@ -3806,7 +3811,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
|
||||||
info = {
|
info = {
|
||||||
'id': _simplify_title(course + '_' + video),
|
'id': _simplify_title(course + '_' + video),
|
||||||
}
|
}
|
||||||
|
|
||||||
self.report_extraction(info['id'])
|
self.report_extraction(info['id'])
|
||||||
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
|
baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
|
||||||
xmlUrl = baseUrl + video + '.xml'
|
xmlUrl = baseUrl + video + '.xml'
|
||||||
|
@ -3940,7 +3945,7 @@ class MTVIE(InfoExtractor):
|
||||||
self._downloader.trouble(u'ERROR: unable to extract performer')
|
self._downloader.trouble(u'ERROR: unable to extract performer')
|
||||||
return
|
return
|
||||||
performer = _unescapeHTML(mobj.group(1).decode('iso-8859-1'))
|
performer = _unescapeHTML(mobj.group(1).decode('iso-8859-1'))
|
||||||
video_title = performer + ' - ' + song_name
|
video_title = performer + ' - ' + song_name
|
||||||
|
|
||||||
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
|
mobj = re.search(r'<meta name="mtvn_uri" content="([^"]+)"/>', webpage)
|
||||||
if mobj is None:
|
if mobj is None:
|
||||||
|
@ -4182,7 +4187,7 @@ def updateSelf(downloader, filename):
|
||||||
try:
|
try:
|
||||||
urlh = urllib.urlopen(UPDATE_URL)
|
urlh = urllib.urlopen(UPDATE_URL)
|
||||||
newcontent = urlh.read()
|
newcontent = urlh.read()
|
||||||
|
|
||||||
vmatch = re.search("__version__ = '([^']+)'", newcontent)
|
vmatch = re.search("__version__ = '([^']+)'", newcontent)
|
||||||
if vmatch is not None and vmatch.group(1) == __version__:
|
if vmatch is not None and vmatch.group(1) == __version__:
|
||||||
downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
|
downloader.to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
|
||||||
|
@ -4603,7 +4608,7 @@ def _real_main():
|
||||||
parser.error(u'you must provide at least one URL')
|
parser.error(u'you must provide at least one URL')
|
||||||
else:
|
else:
|
||||||
sys.exit()
|
sys.exit()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
retcode = fd.download(all_urls)
|
retcode = fd.download(all_urls)
|
||||||
except MaxDownloadsReached:
|
except MaxDownloadsReached:
|
||||||
|
|
Loading…
Reference in New Issue