mirror of https://github.com/yt-dlp/yt-dlp.git
Use except .. as everywhere (#180)
This commit is contained in:
parent
96731798db
commit
e08bee320e
|
@ -247,7 +247,7 @@ class FileDownloader(object):
|
|||
if old_filename == new_filename:
|
||||
return
|
||||
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
self.trouble(u'ERROR: unable to rename file')
|
||||
|
||||
def try_utime(self, filename, last_modified_hdr):
|
||||
|
@ -305,7 +305,7 @@ class FileDownloader(object):
|
|||
"""Report file has already been fully downloaded."""
|
||||
try:
|
||||
self.to_screen(u'[download] %s has already been downloaded' % file_name)
|
||||
except (UnicodeEncodeError), err:
|
||||
except (UnicodeEncodeError) as err:
|
||||
self.to_screen(u'[download] The file has already been downloaded')
|
||||
|
||||
def report_unable_to_resume(self):
|
||||
|
@ -336,7 +336,7 @@ class FileDownloader(object):
|
|||
|
||||
filename = self.params['outtmpl'] % template_dict
|
||||
return filename
|
||||
except (ValueError, KeyError), err:
|
||||
except (ValueError, KeyError) as err:
|
||||
self.trouble(u'ERROR: invalid system charset or erroneous output template')
|
||||
return None
|
||||
|
||||
|
@ -402,7 +402,7 @@ class FileDownloader(object):
|
|||
dn = os.path.dirname(encodeFilename(filename))
|
||||
if dn != '' and not os.path.exists(dn): # dn is already encoded
|
||||
os.makedirs(dn)
|
||||
except (OSError, IOError), err:
|
||||
except (OSError, IOError) as err:
|
||||
self.trouble(u'ERROR: unable to create directory ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -459,19 +459,19 @@ class FileDownloader(object):
|
|||
else:
|
||||
try:
|
||||
success = self._do_download(filename, info_dict)
|
||||
except (OSError, IOError), err:
|
||||
except (OSError, IOError) as err:
|
||||
raise UnavailableVideoError
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self.trouble(u'ERROR: unable to download video data: %s' % str(err))
|
||||
return
|
||||
except (ContentTooShortError, ), err:
|
||||
except (ContentTooShortError, ) as err:
|
||||
self.trouble(u'ERROR: content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
||||
return
|
||||
|
||||
if success:
|
||||
try:
|
||||
self.post_process(filename, info_dict)
|
||||
except (PostProcessingError), err:
|
||||
except (PostProcessingError) as err:
|
||||
self.trouble(u'ERROR: postprocessing: %s' % str(err))
|
||||
return
|
||||
|
||||
|
@ -612,7 +612,7 @@ class FileDownloader(object):
|
|||
data = info_dict['urlhandle']
|
||||
data = urllib2.urlopen(request)
|
||||
break
|
||||
except (urllib2.HTTPError, ), err:
|
||||
except (urllib2.HTTPError, ) as err:
|
||||
if (err.code < 500 or err.code >= 600) and err.code != 416:
|
||||
# Unexpected HTTP error
|
||||
raise
|
||||
|
@ -622,7 +622,7 @@ class FileDownloader(object):
|
|||
# Open the connection again without the range header
|
||||
data = urllib2.urlopen(basic_request)
|
||||
content_length = data.info()['Content-Length']
|
||||
except (urllib2.HTTPError, ), err:
|
||||
except (urllib2.HTTPError, ) as err:
|
||||
if err.code < 500 or err.code >= 600:
|
||||
raise
|
||||
else:
|
||||
|
@ -676,12 +676,12 @@ class FileDownloader(object):
|
|||
assert stream is not None
|
||||
filename = self.undo_temp_name(tmpfilename)
|
||||
self.report_destination(filename)
|
||||
except (OSError, IOError), err:
|
||||
except (OSError, IOError) as err:
|
||||
self.trouble(u'ERROR: unable to open for writing: %s' % str(err))
|
||||
return False
|
||||
try:
|
||||
stream.write(data_block)
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
self.trouble(u'\nERROR: unable to write data: %s' % str(err))
|
||||
return False
|
||||
if not self.params.get('noresizebuffer', False):
|
||||
|
|
|
@ -252,7 +252,7 @@ class YoutubeIE(InfoExtractor):
|
|||
password = info[2]
|
||||
else:
|
||||
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
||||
except (IOError, netrc.NetrcParseError), err:
|
||||
except (IOError, netrc.NetrcParseError) as err:
|
||||
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -261,7 +261,7 @@ class YoutubeIE(InfoExtractor):
|
|||
try:
|
||||
self.report_lang()
|
||||
urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.to_stderr(u'WARNING: unable to set language: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -284,7 +284,7 @@ class YoutubeIE(InfoExtractor):
|
|||
if re.search(r'(?i)<form[^>]* name="loginForm"', login_results) is not None:
|
||||
self._downloader.to_stderr(u'WARNING: unable to log in: bad username or password')
|
||||
return
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -297,7 +297,7 @@ class YoutubeIE(InfoExtractor):
|
|||
try:
|
||||
self.report_age_confirmation()
|
||||
age_results = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -319,7 +319,7 @@ class YoutubeIE(InfoExtractor):
|
|||
request = urllib2.Request('http://www.youtube.com/watch?v=%s&gl=US&hl=en&has_verified=1' % video_id)
|
||||
try:
|
||||
video_webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -341,7 +341,7 @@ class YoutubeIE(InfoExtractor):
|
|||
video_info = parse_qs(video_info_webpage)
|
||||
if 'token' in video_info:
|
||||
break
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
|
||||
return
|
||||
if 'token' not in video_info:
|
||||
|
@ -404,7 +404,7 @@ class YoutubeIE(InfoExtractor):
|
|||
request = urllib2.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id)
|
||||
try:
|
||||
srt_list = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
|
||||
srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list)
|
||||
srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list)
|
||||
|
@ -421,7 +421,7 @@ class YoutubeIE(InfoExtractor):
|
|||
request = urllib2.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id))
|
||||
try:
|
||||
srt_xml = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err))
|
||||
if not srt_xml:
|
||||
raise Trouble(u'WARNING: unable to download video subtitles')
|
||||
|
@ -543,7 +543,7 @@ class MetacafeIE(InfoExtractor):
|
|||
try:
|
||||
self.report_disclaimer()
|
||||
disclaimer = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to retrieve disclaimer: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -556,7 +556,7 @@ class MetacafeIE(InfoExtractor):
|
|||
try:
|
||||
self.report_age_confirmation()
|
||||
disclaimer = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to confirm age: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -580,7 +580,7 @@ class MetacafeIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -671,7 +671,7 @@ class DailymotionIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -767,7 +767,7 @@ class GoogleIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -806,7 +806,7 @@ class GoogleIE(InfoExtractor):
|
|||
request = urllib2.Request('http://video.google.com/videosearch?q=%s+site:video.google.com&hl=en' % abs(int(video_id)))
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
mobj = re.search(r'<img class=thumbnail-img (?:.* )?src=(http.*)>', webpage)
|
||||
|
@ -860,7 +860,7 @@ class PhotobucketIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -928,7 +928,7 @@ class YahooIE(InfoExtractor):
|
|||
request = urllib2.Request(url)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -952,7 +952,7 @@ class YahooIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1010,7 +1010,7 @@ class YahooIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1066,7 +1066,7 @@ class VimeoIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1172,10 +1172,10 @@ class ArteTvIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(url)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
except ValueError, err:
|
||||
except ValueError as err:
|
||||
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
||||
return
|
||||
return webpage
|
||||
|
@ -1368,10 +1368,10 @@ class GenericIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
except ValueError, err:
|
||||
except ValueError as err:
|
||||
# since this is the last-resort InfoExtractor, if
|
||||
# this error is thrown, it'll be thrown here
|
||||
self._downloader.trouble(u'ERROR: Invalid URL: %s' % url)
|
||||
|
@ -1487,7 +1487,7 @@ class YoutubeSearchIE(InfoExtractor):
|
|||
request = urllib2.Request(result_url)
|
||||
try:
|
||||
data = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download API page: %s' % compat_str(err))
|
||||
return
|
||||
api_response = json.loads(data)['data']
|
||||
|
@ -1564,7 +1564,7 @@ class GoogleSearchIE(InfoExtractor):
|
|||
request = urllib2.Request(result_url)
|
||||
try:
|
||||
page = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1647,7 +1647,7 @@ class YahooSearchIE(InfoExtractor):
|
|||
request = urllib2.Request(result_url)
|
||||
try:
|
||||
page = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1717,7 +1717,7 @@ class YoutubePlaylistIE(InfoExtractor):
|
|||
request = urllib2.Request(url)
|
||||
try:
|
||||
page = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1774,7 +1774,7 @@ class YoutubeChannelIE(InfoExtractor):
|
|||
request = urllib2.Request(url)
|
||||
try:
|
||||
page = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1837,7 +1837,7 @@ class YoutubeUserIE(InfoExtractor):
|
|||
|
||||
try:
|
||||
page = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1909,7 +1909,7 @@ class BlipTVUserIE(InfoExtractor):
|
|||
page = urllib2.urlopen(request).read().decode('utf-8')
|
||||
mobj = re.search(r'data-users-id="([^"]+)"', page)
|
||||
page_base = page_base % mobj.group(1)
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -1929,7 +1929,7 @@ class BlipTVUserIE(InfoExtractor):
|
|||
|
||||
try:
|
||||
page = urllib2.urlopen(request).read().decode('utf-8')
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % str(err))
|
||||
return
|
||||
|
||||
|
@ -1997,7 +1997,7 @@ class DepositFilesIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(file_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve file webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2113,7 +2113,7 @@ class FacebookIE(InfoExtractor):
|
|||
password = info[2]
|
||||
else:
|
||||
raise netrc.NetrcParseError('No authenticators for %s' % self._NETRC_MACHINE)
|
||||
except (IOError, netrc.NetrcParseError), err:
|
||||
except (IOError, netrc.NetrcParseError) as err:
|
||||
self._downloader.to_stderr(u'WARNING: parsing .netrc: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2133,7 +2133,7 @@ class FacebookIE(InfoExtractor):
|
|||
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
|
||||
self._downloader.to_stderr(u'WARNING: unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
|
||||
return
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.to_stderr(u'WARNING: unable to log in: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2150,7 +2150,7 @@ class FacebookIE(InfoExtractor):
|
|||
try:
|
||||
page = urllib2.urlopen(request)
|
||||
video_webpage = page.read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2285,13 +2285,13 @@ class BlipTVIE(InfoExtractor):
|
|||
'ext': ext,
|
||||
'urlhandle': urlh
|
||||
}
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video info webpage: %s' % compat_str(err))
|
||||
return
|
||||
if info is None: # Regular URL
|
||||
try:
|
||||
json_code = urlh.read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to read video info webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2321,7 +2321,7 @@ class BlipTVIE(InfoExtractor):
|
|||
'description': data['description'],
|
||||
'player_url': data['embedUrl']
|
||||
}
|
||||
except (ValueError,KeyError), err:
|
||||
except (ValueError,KeyError) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to parse video information: %s' % repr(err))
|
||||
return
|
||||
|
||||
|
@ -2359,7 +2359,7 @@ class MyVideoIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_webpage(video_id)
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2456,7 +2456,7 @@ class ComedyCentralIE(InfoExtractor):
|
|||
try:
|
||||
htmlHandle = urllib2.urlopen(req)
|
||||
html = htmlHandle.read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
if dlNewest:
|
||||
|
@ -2489,7 +2489,7 @@ class ComedyCentralIE(InfoExtractor):
|
|||
try:
|
||||
urlHandle = urllib2.urlopen(playerUrl_raw)
|
||||
playerUrl = urlHandle.geturl()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to find out player URL: ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2498,7 +2498,7 @@ class ComedyCentralIE(InfoExtractor):
|
|||
self.report_index_download(epTitle)
|
||||
try:
|
||||
indexXml = urllib2.urlopen(indexUrl).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download episode index: ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2519,7 +2519,7 @@ class ComedyCentralIE(InfoExtractor):
|
|||
self.report_config_download(epTitle)
|
||||
try:
|
||||
configXml = urllib2.urlopen(configReq).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2602,7 +2602,7 @@ class EscapistIE(InfoExtractor):
|
|||
webPageBytes = webPage.read()
|
||||
m = re.match(r'text/html; charset="?([^"]+)"?', webPage.headers['Content-Type'])
|
||||
webPage = webPageBytes.decode(m.group(1) if m else 'utf-8')
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download webpage: ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2618,7 +2618,7 @@ class EscapistIE(InfoExtractor):
|
|||
self.report_config_download(showName)
|
||||
try:
|
||||
configJSON = urllib2.urlopen(configUrl).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download configuration: ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2627,7 +2627,7 @@ class EscapistIE(InfoExtractor):
|
|||
|
||||
try:
|
||||
config = json.loads(configJSON)
|
||||
except (ValueError,), err:
|
||||
except (ValueError,) as err:
|
||||
self._downloader.trouble(u'ERROR: Invalid JSON in configuration file: ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2674,7 +2674,7 @@ class CollegeHumorIE(InfoExtractor):
|
|||
request = urllib2.Request(url)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2695,7 +2695,7 @@ class CollegeHumorIE(InfoExtractor):
|
|||
xmlUrl = 'http://www.collegehumor.com/moogaloop/video:' + internal_video_id
|
||||
try:
|
||||
metaXml = urllib2.urlopen(xmlUrl).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2740,7 +2740,7 @@ class XVideosIE(InfoExtractor):
|
|||
request = urllib2.Request(r'http://www.xvideos.com/video' + video_id)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2824,7 +2824,7 @@ class SoundcloudIE(InfoExtractor):
|
|||
request = urllib2.Request('http://soundcloud.com/%s/%s' % (uploader, slug_title))
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2901,7 +2901,7 @@ class InfoQIE(InfoExtractor):
|
|||
request = urllib2.Request(url)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -2980,7 +2980,7 @@ class MixcloudIE(InfoExtractor):
|
|||
try:
|
||||
urllib2.urlopen(url)
|
||||
return url
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
url = None
|
||||
|
||||
return None
|
||||
|
@ -3013,7 +3013,7 @@ class MixcloudIE(InfoExtractor):
|
|||
try:
|
||||
self.report_download_json(file_url)
|
||||
jsonData = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve file: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -3092,7 +3092,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
|
|||
xmlUrl = baseUrl + video + '.xml'
|
||||
try:
|
||||
metaXml = urllib2.urlopen(xmlUrl).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video info XML: %s' % compat_str(err))
|
||||
return
|
||||
mdoc = xml.etree.ElementTree.fromstring(metaXml)
|
||||
|
@ -3116,7 +3116,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
|
|||
self.report_download_webpage(info['id'])
|
||||
try:
|
||||
coursepage = urllib2.urlopen(url).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -3155,7 +3155,7 @@ class StanfordOpenClassroomIE(InfoExtractor):
|
|||
rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
|
||||
try:
|
||||
rootpage = urllib2.urlopen(rootURL).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download course info page: ' + compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -3202,7 +3202,7 @@ class MTVIE(InfoExtractor):
|
|||
request = urllib2.Request(url)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -3235,7 +3235,7 @@ class MTVIE(InfoExtractor):
|
|||
request = urllib2.Request(videogen_url)
|
||||
try:
|
||||
metadataXml = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video metadata: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -3417,7 +3417,7 @@ class XNXXIE(InfoExtractor):
|
|||
# Get webpage content
|
||||
try:
|
||||
webpage = urllib2.urlopen(url).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
|
||||
return
|
||||
|
||||
|
@ -3497,7 +3497,7 @@ class GooglePlusIE(InfoExtractor):
|
|||
request = urllib2.Request(post_url)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve entry webpage: %s' % compat_str(err))
|
||||
return
|
||||
|
||||
|
@ -3539,7 +3539,7 @@ class GooglePlusIE(InfoExtractor):
|
|||
request = urllib2.Request(video_page)
|
||||
try:
|
||||
webpage = urllib2.urlopen(request).read()
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
||||
except (urllib2.URLError, httplib.HTTPException, socket.error) as err:
|
||||
self._downloader.trouble(u'ERROR: Unable to retrieve video webpage: %s' % compat_str(err))
|
||||
return
|
||||
self.report_extract_vid_page(video_page)
|
||||
|
|
|
@ -74,7 +74,7 @@ def updateSelf(downloader, filename):
|
|||
urlh.close()
|
||||
with open(exe + '.new', 'wb') as outf:
|
||||
outf.write(newcontent)
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to download latest version')
|
||||
|
||||
try:
|
||||
|
@ -89,7 +89,7 @@ del "%s"
|
|||
b.close()
|
||||
|
||||
os.startfile(bat)
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to overwrite current version')
|
||||
|
||||
else:
|
||||
|
@ -97,13 +97,13 @@ del "%s"
|
|||
urlh = urllib2.urlopen(UPDATE_URL)
|
||||
newcontent = urlh.read()
|
||||
urlh.close()
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to download latest version')
|
||||
|
||||
try:
|
||||
with open(filename, 'wb') as outf:
|
||||
outf.write(newcontent)
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit('ERROR: unable to overwrite current version')
|
||||
|
||||
downloader.to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')
|
||||
|
@ -386,7 +386,7 @@ def _real_main():
|
|||
jar = cookielib.MozillaCookieJar(opts.cookiefile)
|
||||
if os.path.isfile(opts.cookiefile) and os.access(opts.cookiefile, os.R_OK):
|
||||
jar.load()
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit(u'ERROR: unable to open cookie file')
|
||||
# Set user agent
|
||||
if opts.user_agent is not None:
|
||||
|
@ -394,7 +394,7 @@ def _real_main():
|
|||
|
||||
# Dump user agent
|
||||
if opts.dump_user_agent:
|
||||
print std_headers['User-Agent']
|
||||
print(std_headers['User-Agent'])
|
||||
sys.exit(0)
|
||||
|
||||
# Batch file verification
|
||||
|
@ -450,7 +450,7 @@ def _real_main():
|
|||
if opts.retries is not None:
|
||||
try:
|
||||
opts.retries = int(opts.retries)
|
||||
except (TypeError, ValueError), err:
|
||||
except (TypeError, ValueError) as err:
|
||||
parser.error(u'invalid retry count specified')
|
||||
if opts.buffersize is not None:
|
||||
numeric_buffersize = FileDownloader.parse_bytes(opts.buffersize)
|
||||
|
@ -461,13 +461,13 @@ def _real_main():
|
|||
opts.playliststart = int(opts.playliststart)
|
||||
if opts.playliststart <= 0:
|
||||
raise ValueError(u'Playlist start must be positive')
|
||||
except (TypeError, ValueError), err:
|
||||
except (TypeError, ValueError) as err:
|
||||
parser.error(u'invalid playlist start number specified')
|
||||
try:
|
||||
opts.playlistend = int(opts.playlistend)
|
||||
if opts.playlistend != -1 and (opts.playlistend <= 0 or opts.playlistend < opts.playliststart):
|
||||
raise ValueError(u'Playlist end must be greater than playlist start')
|
||||
except (TypeError, ValueError), err:
|
||||
except (TypeError, ValueError) as err:
|
||||
parser.error(u'invalid playlist end number specified')
|
||||
if opts.extractaudio:
|
||||
if opts.audioformat not in ['best', 'aac', 'mp3', 'vorbis', 'm4a', 'wav']:
|
||||
|
@ -559,7 +559,7 @@ def _real_main():
|
|||
if opts.cookiefile is not None:
|
||||
try:
|
||||
jar.save()
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
sys.exit(u'ERROR: unable to save cookie jar')
|
||||
|
||||
sys.exit(retcode)
|
||||
|
|
|
@ -177,7 +177,7 @@ def sanitize_open(filename, open_mode):
|
|||
return (sys.stdout, filename)
|
||||
stream = open(encodeFilename(filename), open_mode)
|
||||
return (stream, filename)
|
||||
except (IOError, OSError), err:
|
||||
except (IOError, OSError) as err:
|
||||
# In case of error, try to remove win32 forbidden chars
|
||||
filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename)
|
||||
|
||||
|
|
Loading…
Reference in New Issue