2012-03-24 19:07:37 -06:00
#!/usr/bin/env python
# -*- coding: utf-8 -*-
2014-03-23 18:40:09 -06:00
import calendar
2014-02-24 17:43:17 -07:00
import contextlib
2013-12-15 21:04:12 -07:00
import ctypes
2013-08-28 04:57:10 -06:00
import datetime
import email . utils
2013-05-13 01:20:08 -06:00
import errno
2014-03-18 07:27:42 -06:00
import getpass
2012-03-24 19:07:37 -06:00
import gzip
2014-01-20 03:36:47 -07:00
import itertools
2012-11-27 16:09:17 -07:00
import io
2012-12-20 05:13:24 -07:00
import json
2012-03-24 19:07:37 -06:00
import locale
2013-11-24 19:12:26 -07:00
import math
2012-03-24 19:07:37 -06:00
import os
2013-10-12 05:49:27 -06:00
import pipes
2013-08-28 04:57:10 -06:00
import platform
2012-03-24 19:07:37 -06:00
import re
2013-11-23 22:37:14 -07:00
import ssl
2013-08-28 04:57:10 -06:00
import socket
2014-02-15 08:24:43 -07:00
import struct
2013-12-09 10:29:07 -07:00
import subprocess
2012-03-24 19:07:37 -06:00
import sys
2013-01-03 07:39:55 -07:00
import traceback
2014-03-10 10:31:32 -06:00
import xml . etree . ElementTree
2012-03-24 19:07:37 -06:00
import zlib
2012-11-27 15:54:09 -07:00
try :
2012-11-27 18:04:46 -07:00
import urllib . request as compat_urllib_request
2012-11-27 15:54:09 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
import urllib2 as compat_urllib_request
2012-11-27 15:54:09 -07:00
try :
2012-11-27 18:04:46 -07:00
import urllib . error as compat_urllib_error
2012-11-27 15:54:09 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
import urllib2 as compat_urllib_error
2012-11-27 15:54:09 -07:00
try :
2012-11-27 18:04:46 -07:00
import urllib . parse as compat_urllib_parse
2012-11-27 15:54:09 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
import urllib as compat_urllib_parse
2012-11-27 15:54:09 -07:00
2012-11-27 20:51:27 -07:00
try :
from urllib . parse import urlparse as compat_urllib_parse_urlparse
except ImportError : # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
2013-07-12 06:53:28 -06:00
try :
import urllib . parse as compat_urlparse
except ImportError : # Python 2
import urlparse as compat_urlparse
2012-11-27 15:54:09 -07:00
try :
2012-11-27 18:04:46 -07:00
import http . cookiejar as compat_cookiejar
2012-11-27 15:54:09 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
import cookielib as compat_cookiejar
2012-11-27 15:54:09 -07:00
2012-11-27 16:02:55 -07:00
try :
2012-11-27 18:04:46 -07:00
import html . entities as compat_html_entities
2012-11-27 16:17:12 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
import htmlentitydefs as compat_html_entities
2012-11-27 16:02:55 -07:00
2012-11-27 16:06:28 -07:00
try :
2012-11-27 18:04:46 -07:00
import html . parser as compat_html_parser
2012-11-27 16:17:12 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
import HTMLParser as compat_html_parser
2012-11-27 16:06:28 -07:00
2012-11-27 16:13:00 -07:00
try :
2012-11-27 18:04:46 -07:00
import http . client as compat_http_client
2012-11-27 16:17:12 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
import httplib as compat_http_client
2012-11-27 16:13:00 -07:00
2013-08-27 20:25:38 -06:00
try :
2013-08-28 02:18:39 -06:00
from urllib . error import HTTPError as compat_HTTPError
2013-08-27 20:25:38 -06:00
except ImportError : # Python 2
from urllib2 import HTTPError as compat_HTTPError
2013-09-21 06:19:30 -06:00
try :
from urllib . request import urlretrieve as compat_urlretrieve
except ImportError : # Python 2
from urllib import urlretrieve as compat_urlretrieve
2012-12-16 04:29:03 -07:00
try :
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda : DEVNULL
except ImportError :
compat_subprocess_get_DEVNULL = lambda : open ( os . path . devnull , ' w ' )
2012-11-27 16:17:12 -07:00
try :
2012-11-27 18:04:46 -07:00
from urllib . parse import parse_qs as compat_parse_qs
2012-11-27 16:17:12 -07:00
except ImportError : # Python 2
2012-11-27 18:04:46 -07:00
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _unquote ( string , encoding = ' utf-8 ' , errors = ' replace ' ) :
if string == ' ' :
return string
res = string . split ( ' % ' )
if len ( res ) == 1 :
return string
if encoding is None :
encoding = ' utf-8 '
if errors is None :
errors = ' replace '
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b ' '
string = res [ 0 ]
for item in res [ 1 : ] :
try :
if not item :
raise ValueError
pct_sequence + = item [ : 2 ] . decode ( ' hex ' )
rest = item [ 2 : ]
if not rest :
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError :
rest = ' % ' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string + = pct_sequence . decode ( encoding , errors ) + rest
pct_sequence = b ' '
if pct_sequence :
# Flush the final pct_sequence
string + = pct_sequence . decode ( encoding , errors )
return string
def _parse_qsl ( qs , keep_blank_values = False , strict_parsing = False ,
encoding = ' utf-8 ' , errors = ' replace ' ) :
qs , _coerce_result = qs , unicode
pairs = [ s2 for s1 in qs . split ( ' & ' ) for s2 in s1 . split ( ' ; ' ) ]
r = [ ]
for name_value in pairs :
if not name_value and not strict_parsing :
continue
nv = name_value . split ( ' = ' , 1 )
if len ( nv ) != 2 :
if strict_parsing :
raise ValueError ( " bad query field: %r " % ( name_value , ) )
# Handle case of a control-name with no equal sign
if keep_blank_values :
nv . append ( ' ' )
else :
continue
if len ( nv [ 1 ] ) or keep_blank_values :
name = nv [ 0 ] . replace ( ' + ' , ' ' )
name = _unquote ( name , encoding = encoding , errors = errors )
name = _coerce_result ( name )
value = nv [ 1 ] . replace ( ' + ' , ' ' )
value = _unquote ( value , encoding = encoding , errors = errors )
value = _coerce_result ( value )
r . append ( ( name , value ) )
return r
def compat_parse_qs ( qs , keep_blank_values = False , strict_parsing = False ,
encoding = ' utf-8 ' , errors = ' replace ' ) :
parsed_result = { }
pairs = _parse_qsl ( qs , keep_blank_values , strict_parsing ,
encoding = encoding , errors = errors )
for name , value in pairs :
if name in parsed_result :
parsed_result [ name ] . append ( value )
else :
parsed_result [ name ] = [ value ]
return parsed_result
2012-11-27 16:13:00 -07:00
2012-11-27 16:02:55 -07:00
try :
2012-11-27 18:04:46 -07:00
compat_str = unicode # Python 2
2012-11-27 16:02:55 -07:00
except NameError :
2012-11-27 18:04:46 -07:00
compat_str = str
2012-11-27 16:02:55 -07:00
try :
2012-11-27 18:04:46 -07:00
compat_chr = unichr # Python 2
2012-11-27 16:02:55 -07:00
except NameError :
2012-11-27 18:04:46 -07:00
compat_chr = chr
2012-11-27 16:02:55 -07:00
2014-02-21 08:59:10 -07:00
try :
from xml . etree . ElementTree import ParseError as compat_xml_parse_error
except ImportError : # Python 2.6
from xml . parsers . expat import ExpatError as compat_xml_parse_error
2013-05-20 03:57:10 -06:00
def compat_ord ( c ) :
if type ( c ) is int : return c
else : return ord ( c )
2013-06-06 06:35:08 -06:00
# This is not clearly defined otherwise
compiled_regex_type = type ( re . compile ( ' ' ) )
2012-11-27 16:02:55 -07:00
std_headers = {
2013-11-18 05:52:24 -07:00
' User-Agent ' : ' Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0 (Chrome) ' ,
2012-11-27 18:04:46 -07:00
' Accept-Charset ' : ' ISO-8859-1,utf-8;q=0.7,*;q=0.7 ' ,
' Accept ' : ' text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8 ' ,
' Accept-Encoding ' : ' gzip, deflate ' ,
' Accept-Language ' : ' en-us,en;q=0.5 ' ,
2012-11-27 16:02:55 -07:00
}
2012-12-30 10:22:36 -07:00
2012-03-24 19:07:37 -06:00
def preferredencoding ( ) :
2012-11-27 18:04:46 -07:00
""" Get preferred encoding.
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
Returns the best encoding scheme for the system , based on
locale . getpreferredencoding ( ) and some further tweaks .
"""
try :
pref = locale . getpreferredencoding ( )
u ' TEST ' . encode ( pref )
except :
pref = ' UTF-8 '
2012-07-01 10:21:27 -06:00
2012-11-27 18:04:46 -07:00
return pref
2012-03-24 19:07:37 -06:00
2012-11-27 16:46:21 -07:00
if sys . version_info < ( 3 , 0 ) :
2012-11-27 18:04:46 -07:00
def compat_print ( s ) :
print ( s . encode ( preferredencoding ( ) , ' xmlcharrefreplace ' ) )
2012-11-27 16:46:21 -07:00
else :
2012-11-27 18:04:46 -07:00
def compat_print ( s ) :
assert type ( s ) == type ( u ' ' )
print ( s )
2012-03-24 19:07:37 -06:00
2012-12-20 05:13:24 -07:00
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys . version_info < ( 3 , 0 ) :
def write_json_file ( obj , fn ) :
with open ( fn , ' wb ' ) as f :
json . dump ( obj , f )
else :
def write_json_file ( obj , fn ) :
with open ( fn , ' w ' , encoding = ' utf-8 ' ) as f :
json . dump ( obj , f )
2013-07-11 08:12:08 -06:00
if sys . version_info > = ( 2 , 7 ) :
def find_xpath_attr ( node , xpath , key , val ) :
""" Find the xpath xpath[@key=val] """
2013-07-11 08:16:02 -06:00
assert re . match ( r ' ^[a-zA-Z]+$ ' , key )
2014-01-21 18:04:51 -07:00
assert re . match ( r ' ^[a-zA-Z0-9@ \ s:._]*$ ' , val )
2013-07-11 08:12:08 -06:00
expr = xpath + u " [@ %s = ' %s ' ] " % ( key , val )
return node . find ( expr )
else :
def find_xpath_attr ( node , xpath , key , val ) :
for f in node . findall ( xpath ) :
if f . attrib . get ( key ) == val :
return f
return None
2013-10-12 13:34:04 -06:00
# On python2.6 the xml.etree.ElementTree.Element methods don't support
# the namespace parameter
def xpath_with_ns ( path , ns_map ) :
components = [ c . split ( ' : ' ) for c in path . split ( ' / ' ) ]
replaced = [ ]
for c in components :
if len ( c ) == 1 :
replaced . append ( c [ 0 ] )
else :
ns , tag = c
replaced . append ( ' { %s } %s ' % ( ns_map [ ns ] , tag ) )
return ' / ' . join ( replaced )
2012-03-24 19:07:37 -06:00
def htmlentity_transform ( matchobj ) :
2012-11-27 18:04:46 -07:00
""" Transforms an HTML entity to a character.
This function receives a match object and is intended to be used with
the re . sub ( ) function .
"""
entity = matchobj . group ( 1 )
# Known non-numeric HTML entity
if entity in compat_html_entities . name2codepoint :
return compat_chr ( compat_html_entities . name2codepoint [ entity ] )
mobj = re . match ( u ' (?u)#(x? \\ d+) ' , entity )
if mobj is not None :
numstr = mobj . group ( 1 )
if numstr . startswith ( u ' x ' ) :
base = 16
numstr = u ' 0 %s ' % numstr
else :
base = 10
return compat_chr ( int ( numstr , base ) )
# Unknown entity in name, return its literal representation
return ( u ' & %s ; ' % entity )
2012-03-24 19:07:37 -06:00
2012-11-27 16:06:28 -07:00
compat_html_parser . locatestarttagend = re . compile ( r """ <[a-zA-Z][-.a-zA-Z0-9:_]*(?: \ s+(?:(?<=[ ' " \ s])[^ \ s/>][^ \ s/=>]*(?: \ s*=+ \ s*(?: ' [^ ' ]* ' | " [^ " ]* " |(?![ ' " ])[^> \ s]*))? \ s*)*)? \ s* """ , re . VERBOSE ) # backport bugfix
2013-09-13 14:05:29 -06:00
class BaseHTMLParser ( compat_html_parser . HTMLParser ) :
def __init ( self ) :
compat_html_parser . HTMLParser . __init__ ( self )
self . html = None
def loads ( self , html ) :
self . html = html
self . feed ( html )
self . close ( )
class AttrParser ( BaseHTMLParser ) :
2012-12-19 07:21:14 -07:00
""" Modified HTMLParser that isolates a tag with the specified attribute """
def __init__ ( self , attribute , value ) :
self . attribute = attribute
self . value = value
2012-11-27 18:04:46 -07:00
self . result = None
self . started = False
self . depth = { }
self . watch_startpos = False
self . error_count = 0
2013-09-13 14:05:29 -06:00
BaseHTMLParser . __init__ ( self )
2012-11-27 18:04:46 -07:00
def error ( self , message ) :
if self . error_count > 10 or self . started :
raise compat_html_parser . HTMLParseError ( message , self . getpos ( ) )
self . rawdata = ' \n ' . join ( self . html . split ( ' \n ' ) [ self . getpos ( ) [ 0 ] : ] ) # skip one line
self . error_count + = 1
self . goahead ( 1 )
def handle_starttag ( self , tag , attrs ) :
attrs = dict ( attrs )
if self . started :
self . find_startpos ( None )
2012-12-19 07:21:14 -07:00
if self . attribute in attrs and attrs [ self . attribute ] == self . value :
2012-11-27 18:04:46 -07:00
self . result = [ tag ]
self . started = True
self . watch_startpos = True
if self . started :
if not tag in self . depth : self . depth [ tag ] = 0
self . depth [ tag ] + = 1
def handle_endtag ( self , tag ) :
if self . started :
if tag in self . depth : self . depth [ tag ] - = 1
if self . depth [ self . result [ 0 ] ] == 0 :
self . started = False
self . result . append ( self . getpos ( ) )
def find_startpos ( self , x ) :
""" Needed to put the start position of the result (self.result[1])
after the opening tag with the requested id """
if self . watch_startpos :
self . watch_startpos = False
self . result . append ( self . getpos ( ) )
handle_entityref = handle_charref = handle_data = handle_comment = \
handle_decl = handle_pi = unknown_decl = find_startpos
def get_result ( self ) :
if self . result is None :
return None
if len ( self . result ) != 3 :
return None
lines = self . html . split ( ' \n ' )
lines = lines [ self . result [ 1 ] [ 0 ] - 1 : self . result [ 2 ] [ 0 ] ]
lines [ 0 ] = lines [ 0 ] [ self . result [ 1 ] [ 1 ] : ]
if len ( lines ) == 1 :
lines [ - 1 ] = lines [ - 1 ] [ : self . result [ 2 ] [ 1 ] - self . result [ 1 ] [ 1 ] ]
lines [ - 1 ] = lines [ - 1 ] [ : self . result [ 2 ] [ 1 ] ]
return ' \n ' . join ( lines ) . strip ( )
2013-02-01 09:29:50 -07:00
# Hack for https://github.com/rg3/youtube-dl/issues/662
if sys . version_info < ( 2 , 7 , 3 ) :
AttrParser . parse_endtag = ( lambda self , i :
i + len ( " </scr ' + ' ipt> " )
if self . rawdata [ i : ] . startswith ( " </scr ' + ' ipt> " )
else compat_html_parser . HTMLParser . parse_endtag ( self , i ) )
2012-04-10 16:22:51 -06:00
def get_element_by_id ( id , html ) :
2012-12-19 07:21:14 -07:00
""" Return the content of the tag with the specified ID in the passed HTML document """
return get_element_by_attribute ( " id " , id , html )
def get_element_by_attribute ( attribute , value , html ) :
""" Return the content of the tag with the specified attribute in the passed HTML document """
parser = AttrParser ( attribute , value )
2012-11-27 18:04:46 -07:00
try :
parser . loads ( html )
except compat_html_parser . HTMLParseError :
pass
return parser . get_result ( )
2012-04-10 16:22:51 -06:00
2013-09-13 14:05:29 -06:00
class MetaParser ( BaseHTMLParser ) :
"""
Modified HTMLParser that isolates a meta tag with the specified name
attribute .
"""
def __init__ ( self , name ) :
BaseHTMLParser . __init__ ( self )
self . name = name
self . content = None
self . result = None
def handle_starttag ( self , tag , attrs ) :
if tag != ' meta ' :
return
attrs = dict ( attrs )
if attrs . get ( ' name ' ) == self . name :
self . result = attrs . get ( ' content ' )
def get_result ( self ) :
return self . result
def get_meta_content ( name , html ) :
"""
Return the content attribute from the meta tag with the given name attribute .
"""
parser = MetaParser ( name )
try :
parser . loads ( html )
except compat_html_parser . HTMLParseError :
pass
return parser . get_result ( )
2012-04-10 16:22:51 -06:00
def clean_html ( html ) :
2012-11-27 18:04:46 -07:00
""" Clean an HTML snippet into a readable string """
# Newline vs <br />
html = html . replace ( ' \n ' , ' ' )
2012-12-20 08:30:55 -07:00
html = re . sub ( r ' \ s*< \ s*br \ s*/? \ s*> \ s* ' , ' \n ' , html )
html = re . sub ( r ' < \ s*/ \ s*p \ s*> \ s*< \ s*p[^>]*> ' , ' \n ' , html )
2012-11-27 18:04:46 -07:00
# Strip html tags
html = re . sub ( ' <.*?> ' , ' ' , html )
# Replace html entities
html = unescapeHTML ( html )
2013-03-29 08:59:13 -06:00
return html . strip ( )
2012-04-10 16:22:51 -06:00
2012-03-24 19:07:37 -06:00
def sanitize_open ( filename , open_mode ) :
2012-11-27 18:04:46 -07:00
""" Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename . If this fails , it tries to change
the filename slightly , step by step , until it ' s either able to open it
or it fails and raises a final exception , like the standard open ( )
function .
It returns the tuple ( stream , definitive_file_name ) .
"""
try :
if filename == u ' - ' :
if sys . platform == ' win32 ' :
import msvcrt
msvcrt . setmode ( sys . stdout . fileno ( ) , os . O_BINARY )
2013-03-28 06:13:03 -06:00
return ( sys . stdout . buffer if hasattr ( sys . stdout , ' buffer ' ) else sys . stdout , filename )
2012-11-27 18:04:46 -07:00
stream = open ( encodeFilename ( filename ) , open_mode )
return ( stream , filename )
except ( IOError , OSError ) as err :
2013-05-13 01:20:08 -06:00
if err . errno in ( errno . EACCES , ) :
raise
2012-11-27 18:04:46 -07:00
2013-05-13 01:20:08 -06:00
# In case of error, try to remove win32 forbidden chars
alt_filename = os . path . join (
re . sub ( u ' [/<>: " \\ | \\ \\ ? \\ *] ' , u ' # ' , path_part )
for path_part in os . path . split ( filename )
)
if alt_filename == filename :
raise
else :
# An exception here should be caught in the caller
stream = open ( encodeFilename ( filename ) , open_mode )
return ( stream , alt_filename )
2012-03-24 19:07:37 -06:00
def timeconvert ( timestr ) :
2012-11-27 18:04:46 -07:00
""" Convert RFC 2822 defined time string into system timestamp """
timestamp = None
timetuple = email . utils . parsedate_tz ( timestr )
if timetuple is not None :
timestamp = email . utils . mktime_tz ( timetuple )
return timestamp
2012-11-26 15:58:46 -07:00
2012-12-03 07:36:24 -07:00
def sanitize_filename ( s , restricted = False , is_id = False ) :
2012-11-27 18:04:46 -07:00
""" Sanitizes a string so it could be used as part of a filename.
If restricted is set , use a stricter subset of allowed characters .
2012-12-03 07:36:24 -07:00
Set is_id if this is not an arbitrary string , but an ID that should be kept if possible
2012-11-27 18:04:46 -07:00
"""
def replace_insane ( char ) :
if char == ' ? ' or ord ( char ) < 32 or ord ( char ) == 127 :
return ' '
elif char == ' " ' :
return ' ' if restricted else ' \' '
elif char == ' : ' :
return ' _- ' if restricted else ' - '
elif char in ' \\ /|*<> ' :
return ' _ '
2012-11-28 04:59:27 -07:00
if restricted and ( char in ' !& \' ()[] {} $;`^,# ' or char . isspace ( ) ) :
2012-11-27 18:04:46 -07:00
return ' _ '
if restricted and ord ( char ) > 127 :
return ' _ '
return char
result = u ' ' . join ( map ( replace_insane , s ) )
2012-12-03 07:36:24 -07:00
if not is_id :
while ' __ ' in result :
result = result . replace ( ' __ ' , ' _ ' )
result = result . strip ( ' _ ' )
# Common case of "Foreign band name - English song title"
if restricted and result . startswith ( ' -_ ' ) :
result = result [ 2 : ]
if not result :
result = ' _ '
2012-11-27 18:04:46 -07:00
return result
2012-03-24 19:07:37 -06:00
def orderedSet ( iterable ) :
2012-11-27 18:04:46 -07:00
""" Remove all duplicates from the input iterable """
res = [ ]
for el in iterable :
if el not in res :
res . append ( el )
return res
2012-03-24 19:07:37 -06:00
2014-03-23 18:40:09 -06:00
2012-03-24 19:07:37 -06:00
def unescapeHTML ( s ) :
2014-03-23 18:40:09 -06:00
if s is None :
return None
assert type ( s ) == compat_str
2012-03-24 19:07:37 -06:00
2014-03-23 18:40:09 -06:00
result = re . sub ( r ' (?u)&(.+?); ' , htmlentity_transform , s )
2012-11-27 18:04:46 -07:00
return result
2012-03-24 19:07:37 -06:00
2014-01-04 19:07:55 -07:00
def encodeFilename ( s , for_subprocess = False ) :
2012-11-27 18:04:46 -07:00
"""
@param s The name of the file
"""
2012-03-24 19:07:37 -06:00
2014-01-04 19:07:55 -07:00
assert type ( s ) == compat_str
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
# Python 3 has a Unicode API
if sys . version_info > = ( 3 , 0 ) :
return s
2012-11-27 16:56:20 -07:00
2012-11-27 18:04:46 -07:00
if sys . platform == ' win32 ' and sys . getwindowsversion ( ) [ 0 ] > = 5 :
# Pass u'' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
2014-01-04 19:07:55 -07:00
if not for_subprocess :
return s
else :
# For subprocess calls, encode with locale encoding
# Refer to http://stackoverflow.com/a/9951851/35070
encoding = preferredencoding ( )
2012-11-27 18:04:46 -07:00
else :
2013-01-19 17:48:05 -07:00
encoding = sys . getfilesystemencoding ( )
2014-01-04 19:07:55 -07:00
if encoding is None :
encoding = ' utf-8 '
return s . encode ( encoding , ' ignore ' )
2013-02-21 09:09:39 -07:00
def decodeOption ( optval ) :
if optval is None :
return optval
if isinstance ( optval , bytes ) :
optval = optval . decode ( preferredencoding ( ) )
assert isinstance ( optval , compat_str )
return optval
2013-01-01 12:27:53 -07:00
2013-05-04 04:02:18 -06:00
def formatSeconds ( secs ) :
if secs > 3600 :
return ' %d : %02d : %02d ' % ( secs / / 3600 , ( secs % 3600 ) / / 60 , secs % 60 )
elif secs > 60 :
return ' %d : %02d ' % ( secs / / 60 , secs % 60 )
else :
return ' %d ' % secs
2013-12-29 07:28:32 -07:00
def make_HTTPS_handler ( opts_no_check_certificate , * * kwargs ) :
2013-11-23 22:37:14 -07:00
if sys . version_info < ( 3 , 2 ) :
import httplib
class HTTPSConnectionV3 ( httplib . HTTPSConnection ) :
def __init__ ( self , * args , * * kwargs ) :
httplib . HTTPSConnection . __init__ ( self , * args , * * kwargs )
def connect ( self ) :
sock = socket . create_connection ( ( self . host , self . port ) , self . timeout )
2013-12-08 19:02:54 -07:00
if getattr ( self , ' _tunnel_host ' , False ) :
2013-11-23 22:37:14 -07:00
self . sock = sock
self . _tunnel ( )
try :
self . sock = ssl . wrap_socket ( sock , self . key_file , self . cert_file , ssl_version = ssl . PROTOCOL_SSLv3 )
2013-11-24 22:06:18 -07:00
except ssl . SSLError :
2013-11-23 22:37:14 -07:00
self . sock = ssl . wrap_socket ( sock , self . key_file , self . cert_file , ssl_version = ssl . PROTOCOL_SSLv23 )
class HTTPSHandlerV3 ( compat_urllib_request . HTTPSHandler ) :
def https_open ( self , req ) :
return self . do_open ( HTTPSConnectionV3 , req )
2013-12-29 07:28:32 -07:00
return HTTPSHandlerV3 ( * * kwargs )
2013-05-04 04:19:02 -06:00
else :
2013-11-23 22:37:14 -07:00
context = ssl . SSLContext ( ssl . PROTOCOL_SSLv3 )
2013-05-04 04:19:02 -06:00
context . verify_mode = ( ssl . CERT_NONE
2013-11-22 11:57:52 -07:00
if opts_no_check_certificate
2013-05-04 04:19:02 -06:00
else ssl . CERT_REQUIRED )
2013-12-07 22:54:39 -07:00
context . set_default_verify_paths ( )
try :
context . load_default_certs ( )
except AttributeError :
pass # Python < 3.4
2013-12-29 07:28:32 -07:00
return compat_urllib_request . HTTPSHandler ( context = context , * * kwargs )
2013-05-04 04:19:02 -06:00
2013-01-01 12:27:53 -07:00
class ExtractorError ( Exception ) :
""" Error during info extraction. """
2013-08-27 20:25:38 -06:00
def __init__ ( self , msg , tb = None , expected = False , cause = None ) :
2013-07-02 00:40:21 -06:00
""" tb, if given, is the original traceback (so that it can be printed out).
If expected is set , this is a normal error message and most likely not a bug in youtube - dl .
"""
if sys . exc_info ( ) [ 0 ] in ( compat_urllib_error . URLError , socket . timeout , UnavailableVideoError ) :
expected = True
if not expected :
2013-08-10 22:46:24 -06:00
msg = msg + u ' ; please report this issue on https://yt-dl.org/bug . Be sure to call youtube-dl with the --verbose flag and include its complete output. Make sure you are using the latest version; type youtube-dl -U to update. '
2013-01-01 12:27:53 -07:00
super ( ExtractorError , self ) . __init__ ( msg )
2013-06-09 03:55:08 -06:00
2013-01-01 12:27:53 -07:00
self . traceback = tb
2013-03-09 02:05:43 -07:00
self . exc_info = sys . exc_info ( ) # preserve original exception
2013-08-27 20:25:38 -06:00
self . cause = cause
2013-01-01 12:27:53 -07:00
2013-01-03 07:39:55 -07:00
def format_traceback ( self ) :
if self . traceback is None :
return None
return u ' ' . join ( traceback . format_tb ( self . traceback ) )
2013-01-01 12:27:53 -07:00
2013-10-23 06:38:03 -06:00
class RegexNotFoundError ( ExtractorError ) :
""" Error when a regex didn ' t match """
pass
2012-03-24 19:07:37 -06:00
class DownloadError ( Exception ) :
2012-11-27 18:04:46 -07:00
""" Download Error exception.
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors . They will contain the appropriate
error message .
"""
2013-03-09 02:05:43 -07:00
def __init__ ( self , msg , exc_info = None ) :
""" exc_info, if given, is the original exception that caused the trouble (as returned by sys.exc_info()). """
super ( DownloadError , self ) . __init__ ( msg )
self . exc_info = exc_info
2012-03-24 19:07:37 -06:00
class SameFileError ( Exception ) :
2012-11-27 18:04:46 -07:00
""" Same File exception.
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk .
"""
pass
2012-03-24 19:07:37 -06:00
class PostProcessingError ( Exception ) :
2012-11-27 18:04:46 -07:00
""" Post Processing exception.
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
This exception may be raised by PostProcessor ' s .run() method to
indicate an error in the postprocessing task .
"""
2013-01-12 07:07:59 -07:00
def __init__ ( self , msg ) :
self . msg = msg
2012-03-24 19:07:37 -06:00
class MaxDownloadsReached ( Exception ) :
2012-11-27 18:04:46 -07:00
""" --max-downloads limit has been reached. """
pass
2012-03-24 19:07:37 -06:00
class UnavailableVideoError ( Exception ) :
2012-11-27 18:04:46 -07:00
""" Unavailable Format exception.
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
This exception will be thrown when a video is requested
in a format that is not available for that video .
"""
pass
2012-03-24 19:07:37 -06:00
class ContentTooShortError ( Exception ) :
2012-11-27 18:04:46 -07:00
""" Content Too Short exception.
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first , indicating
the connection was probably interrupted .
"""
# Both in bytes
downloaded = None
expected = None
2012-03-24 19:07:37 -06:00
2012-11-27 18:04:46 -07:00
def __init__ ( self , downloaded , expected ) :
self . downloaded = downloaded
self . expected = expected
2012-03-24 19:07:37 -06:00
2013-08-27 15:15:01 -06:00
class YoutubeDLHandler ( compat_urllib_request . HTTPHandler ) :
2012-11-27 18:04:46 -07:00
""" Handler for HTTP requests and responses.
This class , when installed with an OpenerDirector , automatically adds
the standard headers to every HTTP request and handles gzipped and
deflated responses from web servers . If compression is to be avoided in
a particular request , the original request in the program code only has
to include the HTTP header " Youtubedl-No-Compression " , which will be
removed before making the real request .
Part of this code was copied from :
http : / / techknack . net / python - urllib2 - handlers /
Andrew Rowls , the author of that code , agreed to release it to the
public domain .
"""
@staticmethod
def deflate ( data ) :
try :
return zlib . decompress ( data , - zlib . MAX_WBITS )
except zlib . error :
return zlib . decompress ( data )
@staticmethod
def addinfourl_wrapper ( stream , headers , url , code ) :
if hasattr ( compat_urllib_request . addinfourl , ' getcode ' ) :
return compat_urllib_request . addinfourl ( stream , headers , url , code )
ret = compat_urllib_request . addinfourl ( stream , headers , url )
ret . code = code
return ret
2013-08-27 15:15:01 -06:00
def http_request ( self , req ) :
for h , v in std_headers . items ( ) :
2012-11-27 18:04:46 -07:00
if h in req . headers :
del req . headers [ h ]
2013-01-12 10:38:23 -07:00
req . add_header ( h , v )
2012-11-27 18:04:46 -07:00
if ' Youtubedl-no-compression ' in req . headers :
if ' Accept-encoding ' in req . headers :
del req . headers [ ' Accept-encoding ' ]
del req . headers [ ' Youtubedl-no-compression ' ]
2013-01-12 08:49:13 -07:00
if ' Youtubedl-user-agent ' in req . headers :
2013-01-12 10:38:23 -07:00
if ' User-agent ' in req . headers :
del req . headers [ ' User-agent ' ]
req . headers [ ' User-agent ' ] = req . headers [ ' Youtubedl-user-agent ' ]
2013-01-12 08:49:13 -07:00
del req . headers [ ' Youtubedl-user-agent ' ]
2012-11-27 18:04:46 -07:00
return req
2013-08-27 15:15:01 -06:00
def http_response ( self , req , resp ) :
2012-11-27 18:04:46 -07:00
old_resp = resp
# gzip
if resp . headers . get ( ' Content-encoding ' , ' ' ) == ' gzip ' :
2013-08-28 03:57:13 -06:00
content = resp . read ( )
gz = gzip . GzipFile ( fileobj = io . BytesIO ( content ) , mode = ' rb ' )
try :
uncompressed = io . BytesIO ( gz . read ( ) )
except IOError as original_ioerror :
# There may be junk add the end of the file
# See http://stackoverflow.com/q/4928560/35070 for details
for i in range ( 1 , 1024 ) :
try :
gz = gzip . GzipFile ( fileobj = io . BytesIO ( content [ : - i ] ) , mode = ' rb ' )
uncompressed = io . BytesIO ( gz . read ( ) )
except IOError :
continue
break
else :
raise original_ioerror
resp = self . addinfourl_wrapper ( uncompressed , old_resp . headers , old_resp . url , old_resp . code )
2012-11-27 18:04:46 -07:00
resp . msg = old_resp . msg
# deflate
if resp . headers . get ( ' Content-encoding ' , ' ' ) == ' deflate ' :
gz = io . BytesIO ( self . deflate ( resp . read ( ) ) )
resp = self . addinfourl_wrapper ( gz , old_resp . headers , old_resp . url , old_resp . code )
resp . msg = old_resp . msg
return resp
2012-12-06 16:39:44 -07:00
2013-08-27 15:15:01 -06:00
https_request = http_request
https_response = http_response
2013-04-27 07:14:20 -06:00
2014-02-06 03:29:46 -07:00
2014-03-23 18:40:09 -06:00
def parse_iso8601 ( date_str ) :
""" Return a UNIX timestamp from the given date """
if date_str is None :
return None
m = re . search (
r ' Z$| ?(?P<sign> \ +|-)(?P<hours>[0-9] {2} ):?(?P<minutes>[0-9] {2} )$ ' ,
date_str )
if not m :
timezone = datetime . timedelta ( )
else :
date_str = date_str [ : - len ( m . group ( 0 ) ) ]
if not m . group ( ' sign ' ) :
timezone = datetime . timedelta ( )
else :
sign = 1 if m . group ( ' sign ' ) == ' + ' else - 1
timezone = datetime . timedelta (
hours = sign * int ( m . group ( ' hours ' ) ) ,
minutes = sign * int ( m . group ( ' minutes ' ) ) )
dt = datetime . datetime . strptime ( date_str , ' % Y- % m- %d T % H: % M: % S ' ) - timezone
return calendar . timegm ( dt . timetuple ( ) )
2013-04-27 07:14:20 -06:00
def unified_strdate ( date_str ) :
""" Return a string with the date in the format YYYYMMDD """
2014-03-21 07:38:37 -06:00
if date_str is None :
return None
2013-04-27 07:14:20 -06:00
upload_date = None
#Replace commas
2014-02-09 10:09:57 -07:00
date_str = date_str . replace ( ' , ' , ' ' )
2013-04-27 07:14:20 -06:00
# %z (UTC offset) is only supported in python>=3.2
2014-02-09 10:09:57 -07:00
date_str = re . sub ( r ' ?( \ +|-)[0-9] {2} :?[0-9] {2} $ ' , ' ' , date_str )
2013-09-14 06:26:42 -06:00
format_expressions = [
' %d % B % Y ' ,
2014-02-16 13:47:03 -07:00
' %d % b % Y ' ,
2013-09-14 06:26:42 -06:00
' % B %d % Y ' ,
' % b %d % Y ' ,
' % Y- % m- %d ' ,
2014-02-27 03:44:05 -07:00
' %d . % m. % Y ' ,
2013-09-14 06:26:42 -06:00
' %d / % m/ % Y ' ,
' % Y/ % m/ %d % H: % M: % S ' ,
2014-01-06 09:15:27 -07:00
' % Y- % m- %d % H: % M: % S ' ,
2013-09-14 06:26:42 -06:00
' %d . % m. % Y % H: % M ' ,
2014-03-11 15:18:43 -06:00
' %d . % m. % Y % H. % M ' ,
2013-09-14 06:26:42 -06:00
' % Y- % m- %d T % H: % M: % SZ ' ,
2013-11-19 22:13:19 -07:00
' % Y- % m- %d T % H: % M: % S. %f Z ' ,
' % Y- % m- %d T % H: % M: % S. %f 0Z ' ,
2013-10-10 07:25:11 -06:00
' % Y- % m- %d T % H: % M: % S ' ,
2014-02-23 05:00:51 -07:00
' % Y- % m- %d T % H: % M: % S. %f ' ,
2014-02-06 03:29:46 -07:00
' % Y- % m- %d T % H: % M ' ,
2013-09-14 06:26:42 -06:00
]
2013-04-27 07:14:20 -06:00
for expression in format_expressions :
try :
upload_date = datetime . datetime . strptime ( date_str , expression ) . strftime ( ' % Y % m %d ' )
2014-02-06 03:29:46 -07:00
except ValueError :
2013-04-27 07:14:20 -06:00
pass
2013-12-17 04:33:55 -07:00
if upload_date is None :
timetuple = email . utils . parsedate_tz ( date_str )
if timetuple :
upload_date = datetime . datetime ( * timetuple [ : 6 ] ) . strftime ( ' % Y % m %d ' )
2013-04-27 07:14:20 -06:00
return upload_date
2013-07-12 13:52:59 -06:00
def determine_ext ( url , default_ext = u ' unknown_video ' ) :
2013-07-07 17:13:55 -06:00
guess = url . partition ( u ' ? ' ) [ 0 ] . rpartition ( u ' . ' ) [ 2 ]
if re . match ( r ' ^[A-Za-z0-9]+$ ' , guess ) :
return guess
else :
2013-07-12 13:52:59 -06:00
return default_ext
2013-07-07 17:13:55 -06:00
2013-07-20 04:48:57 -06:00
def subtitles_filename ( filename , sub_lang , sub_format ) :
return filename . rsplit ( ' . ' , 1 ) [ 0 ] + u ' . ' + sub_lang + u ' . ' + sub_format
2013-04-27 06:01:55 -06:00
def date_from_str ( date_str ) :
2013-04-28 03:39:37 -06:00
"""
Return a datetime object from a string in the format YYYYMMDD or
( now | today ) [ + - ] [ 0 - 9 ] ( day | week | month | year ) ( s ) ? """
today = datetime . date . today ( )
if date_str == ' now ' or date_str == ' today ' :
return today
match = re . match ( ' (now|today)(?P<sign>[+-])(?P<time> \ d+)(?P<unit>day|week|month|year)(s)? ' , date_str )
if match is not None :
sign = match . group ( ' sign ' )
time = int ( match . group ( ' time ' ) )
if sign == ' - ' :
time = - time
unit = match . group ( ' unit ' )
#A bad aproximation?
if unit == ' month ' :
unit = ' day '
time * = 30
elif unit == ' year ' :
unit = ' day '
time * = 365
unit + = ' s '
delta = datetime . timedelta ( * * { unit : time } )
return today + delta
2013-04-27 06:01:55 -06:00
return datetime . datetime . strptime ( date_str , " % Y % m %d " ) . date ( )
2014-01-02 05:47:28 -07:00
def hyphenate_date ( date_str ) :
"""
Convert a date in ' YYYYMMDD ' format to ' YYYY-MM-DD ' format """
match = re . match ( r ' ^( \ d \ d \ d \ d)( \ d \ d)( \ d \ d)$ ' , date_str )
if match is not None :
return ' - ' . join ( match . groups ( ) )
else :
return date_str
2013-04-27 06:01:55 -06:00
class DateRange ( object ) :
""" Represents a time interval between two dates """
def __init__ ( self , start = None , end = None ) :
""" start and end must be strings in the format accepted by date """
if start is not None :
self . start = date_from_str ( start )
else :
self . start = datetime . datetime . min . date ( )
if end is not None :
self . end = date_from_str ( end )
else :
self . end = datetime . datetime . max . date ( )
2013-04-28 03:39:37 -06:00
if self . start > self . end :
2013-04-27 06:01:55 -06:00
raise ValueError ( ' Date range: " %s " , the start date must be before the end date ' % self )
@classmethod
def day ( cls , day ) :
""" Returns a range that only contains the given day """
return cls ( day , day )
def __contains__ ( self , date ) :
""" Check if the date is in the range """
2013-04-28 03:39:37 -06:00
if not isinstance ( date , datetime . date ) :
date = date_from_str ( date )
return self . start < = date < = self . end
2013-04-27 06:01:55 -06:00
def __str__ ( self ) :
return ' %s - %s ' % ( self . start . isoformat ( ) , self . end . isoformat ( ) )
2013-08-28 04:57:10 -06:00
def platform_name ( ) :
""" Returns the platform name as a compat_str """
res = platform . platform ( )
if isinstance ( res , bytes ) :
res = res . decode ( preferredencoding ( ) )
assert isinstance ( res , compat_str )
return res
2013-08-28 10:22:28 -06:00
2013-09-15 22:55:33 -06:00
def write_string ( s , out = None ) :
if out is None :
out = sys . stderr
2014-01-04 19:07:55 -07:00
assert type ( s ) == compat_str
2013-09-15 22:55:33 -06:00
if ( ' b ' in getattr ( out , ' mode ' , ' ' ) or
sys . version_info [ 0 ] < 3 ) : # Python 2 lies about mode of sys.stderr
s = s . encode ( preferredencoding ( ) , ' ignore ' )
2014-01-04 19:07:55 -07:00
try :
out . write ( s )
except UnicodeEncodeError :
# In Windows shells, this can fail even when the codec is just charmap!?
# See https://wiki.python.org/moin/PrintFails#Issue
if sys . platform == ' win32 ' and hasattr ( out , ' encoding ' ) :
s = s . encode ( out . encoding , ' ignore ' ) . decode ( out . encoding )
out . write ( s )
else :
raise
2013-09-15 22:55:33 -06:00
out . flush ( )
2013-08-28 06:28:55 -06:00
def bytes_to_intlist ( bs ) :
if not bs :
return [ ]
if isinstance ( bs [ 0 ] , int ) : # Python 3
return list ( bs )
else :
return [ ord ( c ) for c in bs ]
2013-08-28 10:22:28 -06:00
2013-08-28 07:59:07 -06:00
def intlist_to_bytes ( xs ) :
if not xs :
return b ' '
if isinstance ( chr ( 0 ) , bytes ) : # Python 2
return ' ' . join ( [ chr ( x ) for x in xs ] )
else :
return bytes ( xs )
2013-10-02 00:41:03 -06:00
def get_cachedir ( params = { } ) :
cache_root = os . environ . get ( ' XDG_CACHE_HOME ' ,
os . path . expanduser ( ' ~/.cache ' ) )
return params . get ( ' cachedir ' , os . path . join ( cache_root , ' youtube-dl ' ) )
2013-10-05 20:27:09 -06:00
# Cross-platform file locking
if sys . platform == ' win32 ' :
import ctypes . wintypes
import msvcrt
class OVERLAPPED ( ctypes . Structure ) :
_fields_ = [
( ' Internal ' , ctypes . wintypes . LPVOID ) ,
( ' InternalHigh ' , ctypes . wintypes . LPVOID ) ,
( ' Offset ' , ctypes . wintypes . DWORD ) ,
( ' OffsetHigh ' , ctypes . wintypes . DWORD ) ,
( ' hEvent ' , ctypes . wintypes . HANDLE ) ,
]
kernel32 = ctypes . windll . kernel32
LockFileEx = kernel32 . LockFileEx
LockFileEx . argtypes = [
ctypes . wintypes . HANDLE , # hFile
ctypes . wintypes . DWORD , # dwFlags
ctypes . wintypes . DWORD , # dwReserved
ctypes . wintypes . DWORD , # nNumberOfBytesToLockLow
ctypes . wintypes . DWORD , # nNumberOfBytesToLockHigh
ctypes . POINTER ( OVERLAPPED ) # Overlapped
]
LockFileEx . restype = ctypes . wintypes . BOOL
UnlockFileEx = kernel32 . UnlockFileEx
UnlockFileEx . argtypes = [
ctypes . wintypes . HANDLE , # hFile
ctypes . wintypes . DWORD , # dwReserved
ctypes . wintypes . DWORD , # nNumberOfBytesToLockLow
ctypes . wintypes . DWORD , # nNumberOfBytesToLockHigh
ctypes . POINTER ( OVERLAPPED ) # Overlapped
]
UnlockFileEx . restype = ctypes . wintypes . BOOL
whole_low = 0xffffffff
whole_high = 0x7fffffff
def _lock_file ( f , exclusive ) :
overlapped = OVERLAPPED ( )
overlapped . Offset = 0
overlapped . OffsetHigh = 0
overlapped . hEvent = 0
f . _lock_file_overlapped_p = ctypes . pointer ( overlapped )
handle = msvcrt . get_osfhandle ( f . fileno ( ) )
if not LockFileEx ( handle , 0x2 if exclusive else 0x0 , 0 ,
whole_low , whole_high , f . _lock_file_overlapped_p ) :
raise OSError ( ' Locking file failed: %r ' % ctypes . FormatError ( ) )
def _unlock_file ( f ) :
assert f . _lock_file_overlapped_p
handle = msvcrt . get_osfhandle ( f . fileno ( ) )
if not UnlockFileEx ( handle , 0 ,
whole_low , whole_high , f . _lock_file_overlapped_p ) :
raise OSError ( ' Unlocking file failed: %r ' % ctypes . FormatError ( ) )
else :
import fcntl
def _lock_file ( f , exclusive ) :
fcntl . lockf ( f , fcntl . LOCK_EX if exclusive else fcntl . LOCK_SH )
def _unlock_file ( f ) :
fcntl . lockf ( f , fcntl . LOCK_UN )
class locked_file ( object ) :
def __init__ ( self , filename , mode , encoding = None ) :
assert mode in [ ' r ' , ' a ' , ' w ' ]
self . f = io . open ( filename , mode , encoding = encoding )
self . mode = mode
def __enter__ ( self ) :
exclusive = self . mode != ' r '
try :
_lock_file ( self . f , exclusive )
except IOError :
self . f . close ( )
raise
return self
def __exit__ ( self , etype , value , traceback ) :
try :
_unlock_file ( self . f )
finally :
self . f . close ( )
def __iter__ ( self ) :
return iter ( self . f )
def write ( self , * args ) :
return self . f . write ( * args )
def read ( self , * args ) :
return self . f . read ( * args )
2013-10-12 05:49:27 -06:00
def shell_quote ( args ) :
2013-11-21 06:09:28 -07:00
quoted_args = [ ]
encoding = sys . getfilesystemencoding ( )
if encoding is None :
encoding = ' utf-8 '
for a in args :
if isinstance ( a , bytes ) :
# We may get a filename encoded with 'encodeFilename'
a = a . decode ( encoding )
quoted_args . append ( pipes . quote ( a ) )
return u ' ' . join ( quoted_args )
2013-10-15 04:05:13 -06:00
2013-10-17 16:46:35 -06:00
def takewhile_inclusive ( pred , seq ) :
""" Like itertools.takewhile, but include the latest evaluated element
( the first element so that Not pred ( e ) ) """
for e in seq :
yield e
if not pred ( e ) :
return
2013-10-15 04:05:13 -06:00
def smuggle_url ( url , data ) :
""" Pass additional data in a URL for internal use. """
sdata = compat_urllib_parse . urlencode (
{ u ' __youtubedl_smuggle ' : json . dumps ( data ) } )
return url + u ' # ' + sdata
2014-01-06 21:34:14 -07:00
def unsmuggle_url ( smug_url , default = None ) :
2013-10-15 04:05:13 -06:00
if not ' #__youtubedl_smuggle ' in smug_url :
2014-01-06 21:34:14 -07:00
return smug_url , default
2013-10-15 04:05:13 -06:00
url , _ , sdata = smug_url . rpartition ( u ' # ' )
jsond = compat_parse_qs ( sdata ) [ u ' __youtubedl_smuggle ' ] [ 0 ]
data = json . loads ( jsond )
return url , data
2013-11-24 19:12:26 -07:00
def format_bytes ( bytes ) :
if bytes is None :
return u ' N/A '
if type ( bytes ) is str :
bytes = float ( bytes )
if bytes == 0.0 :
exponent = 0
else :
exponent = int ( math . log ( bytes , 1024.0 ) )
suffix = [ u ' B ' , u ' KiB ' , u ' MiB ' , u ' GiB ' , u ' TiB ' , u ' PiB ' , u ' EiB ' , u ' ZiB ' , u ' YiB ' ] [ exponent ]
converted = float ( bytes ) / float ( 1024 * * exponent )
return u ' %.2f %s ' % ( converted , suffix )
2013-12-06 05:36:36 -07:00
2013-12-09 10:29:07 -07:00
2013-12-06 05:36:36 -07:00
def str_to_int ( int_str ) :
int_str = re . sub ( r ' [, \ .] ' , u ' ' , int_str )
return int ( int_str )
2013-12-09 10:29:07 -07:00
def get_term_width ( ) :
columns = os . environ . get ( ' COLUMNS ' , None )
if columns :
return int ( columns )
try :
sp = subprocess . Popen (
[ ' stty ' , ' size ' ] ,
stdout = subprocess . PIPE , stderr = subprocess . PIPE )
out , err = sp . communicate ( )
return int ( out . split ( ) [ 1 ] )
except :
pass
return None
2013-12-09 11:39:41 -07:00
def month_by_name ( name ) :
""" Return the number of a month by (locale-independently) English name """
ENGLISH_NAMES = [
2013-12-13 08:27:37 -07:00
u ' January ' , u ' February ' , u ' March ' , u ' April ' , u ' May ' , u ' June ' ,
2013-12-09 11:39:41 -07:00
u ' July ' , u ' August ' , u ' September ' , u ' October ' , u ' November ' , u ' December ' ]
try :
return ENGLISH_NAMES . index ( name ) + 1
except ValueError :
return None
2013-12-10 13:03:53 -07:00
2014-01-20 14:11:34 -07:00
def fix_xml_ampersands ( xml_str ) :
2013-12-10 13:03:53 -07:00
""" Replace all the ' & ' by ' & ' in XML """
2014-01-20 14:11:34 -07:00
return re . sub (
r ' &(?!amp;|lt;|gt;|apos;|quot;|#x[0-9a-fA-F] { ,4};|#[0-9] { ,4};) ' ,
u ' & ' ,
xml_str )
2013-12-15 21:04:12 -07:00
def setproctitle ( title ) :
2014-01-04 19:07:55 -07:00
assert isinstance ( title , compat_str )
2013-12-15 21:04:12 -07:00
try :
libc = ctypes . cdll . LoadLibrary ( " libc.so.6 " )
except OSError :
return
2014-03-23 07:28:22 -06:00
title_bytes = title . encode ( ' utf-8 ' )
buf = ctypes . create_string_buffer ( len ( title_bytes ) )
buf . value = title_bytes
2013-12-15 21:04:12 -07:00
try :
2014-03-23 07:28:22 -06:00
libc . prctl ( 15 , buf , 0 , 0 , 0 )
2013-12-15 21:04:12 -07:00
except AttributeError :
return # Strange libc, just skip this
2013-12-16 05:56:13 -07:00
def remove_start ( s , start ) :
if s . startswith ( start ) :
return s [ len ( start ) : ]
return s
2013-12-16 20:13:36 -07:00
def url_basename ( url ) :
2013-12-17 06:56:29 -07:00
path = compat_urlparse . urlparse ( url ) . path
return path . strip ( u ' / ' ) . split ( u ' / ' ) [ - 1 ]
2013-12-20 09:05:28 -07:00
class HEADRequest ( compat_urllib_request . Request ) :
def get_method ( self ) :
return " HEAD "
2013-12-25 07:18:40 -07:00
2014-04-03 08:21:21 -06:00
def int_or_none ( v , scale = 1 , default = None ) :
return default if v is None else ( int ( v ) / / scale )
2013-12-26 05:49:44 -07:00
2014-04-03 08:21:21 -06:00
def float_or_none ( v , scale = 1 , default = None ) :
return default if v is None else ( float ( v ) / scale )
2014-03-28 16:06:34 -06:00
2013-12-26 05:49:44 -07:00
def parse_duration ( s ) :
if s is None :
return None
m = re . match (
2014-03-24 15:30:32 -06:00
r ' (?:(?:(?P<hours>[0-9]+)[:h])?(?P<mins>[0-9]+)[:m])?(?P<secs>[0-9]+)s?(?::[0-9]+)?$ ' , s )
2013-12-26 05:49:44 -07:00
if not m :
return None
res = int ( m . group ( ' secs ' ) )
if m . group ( ' mins ' ) :
res + = int ( m . group ( ' mins ' ) ) * 60
if m . group ( ' hours ' ) :
res + = int ( m . group ( ' hours ' ) ) * 60 * 60
return res
2014-01-03 04:52:27 -07:00
def prepend_extension ( filename , ext ) :
name , real_ext = os . path . splitext ( filename )
return u ' {0} . {1} {2} ' . format ( name , ext , real_ext )
2014-01-06 22:23:41 -07:00
def check_executable ( exe , args = [ ] ) :
""" Checks if the given binary is installed somewhere in PATH, and returns its name.
args can be a list of arguments for a short output ( like - version ) """
try :
subprocess . Popen ( [ exe ] + args , stdout = subprocess . PIPE , stderr = subprocess . PIPE ) . communicate ( )
except OSError :
return False
return exe
2014-01-20 03:36:47 -07:00
class PagedList ( object ) :
def __init__ ( self , pagefunc , pagesize ) :
self . _pagefunc = pagefunc
self . _pagesize = pagesize
2014-01-22 13:43:33 -07:00
def __len__ ( self ) :
# This is only useful for tests
return len ( self . getslice ( ) )
2014-01-20 03:36:47 -07:00
def getslice ( self , start = 0 , end = None ) :
res = [ ]
for pagenum in itertools . count ( start / / self . _pagesize ) :
firstid = pagenum * self . _pagesize
nextfirstid = pagenum * self . _pagesize + self . _pagesize
if start > = nextfirstid :
continue
page_results = list ( self . _pagefunc ( pagenum ) )
startv = (
start % self . _pagesize
if firstid < = start < nextfirstid
else 0 )
endv = (
( ( end - 1 ) % self . _pagesize ) + 1
if ( end is not None and firstid < = end < = nextfirstid )
else None )
if startv != 0 or endv is not None :
page_results = page_results [ startv : endv ]
res . extend ( page_results )
# A little optimization - if current page is not "full", ie. does
# not contain page_size videos then we can assume that this page
# is the last one - there are no more ids on further pages -
# i.e. no need to query again.
if len ( page_results ) + startv < self . _pagesize :
break
# If we got the whole page, but the next page is not interesting,
# break out early as well
if end == nextfirstid :
break
return res
2014-02-09 09:56:10 -07:00
def uppercase_escape ( s ) :
return re . sub (
2014-04-01 05:17:07 -06:00
r ' \\ U[0-9a-fA-F] {8} ' ,
lambda m : m . group ( 0 ) . decode ( ' unicode-escape ' ) , s )
2014-02-15 08:24:43 -07:00
try :
struct . pack ( u ' !I ' , 0 )
except TypeError :
# In Python 2.6 (and some 2.7 versions), struct requires a bytes argument
def struct_pack ( spec , * args ) :
if isinstance ( spec , compat_str ) :
spec = spec . encode ( ' ascii ' )
return struct . pack ( spec , * args )
def struct_unpack ( spec , * args ) :
if isinstance ( spec , compat_str ) :
spec = spec . encode ( ' ascii ' )
return struct . unpack ( spec , * args )
else :
struct_pack = struct . pack
struct_unpack = struct . unpack
2014-02-24 17:43:17 -07:00
def read_batch_urls ( batch_fd ) :
def fixup ( url ) :
if not isinstance ( url , compat_str ) :
url = url . decode ( ' utf-8 ' , ' replace ' )
BOM_UTF8 = u ' \xef \xbb \xbf '
if url . startswith ( BOM_UTF8 ) :
url = url [ len ( BOM_UTF8 ) : ]
url = url . strip ( )
if url . startswith ( ( ' # ' , ' ; ' , ' ] ' ) ) :
return False
return url
with contextlib . closing ( batch_fd ) as fd :
return [ url for url in map ( fixup , fd ) if url ]
2014-03-07 07:25:33 -07:00
def urlencode_postdata ( * args , * * kargs ) :
return compat_urllib_parse . urlencode ( * args , * * kargs ) . encode ( ' ascii ' )
2014-03-10 10:31:32 -06:00
def parse_xml ( s ) :
class TreeBuilder ( xml . etree . ElementTree . TreeBuilder ) :
def doctype ( self , name , pubid , system ) :
pass # Ignore doctypes
parser = xml . etree . ElementTree . XMLParser ( target = TreeBuilder ( ) )
kwargs = { ' parser ' : parser } if sys . version_info > = ( 2 , 7 ) else { }
return xml . etree . ElementTree . XML ( s . encode ( ' utf-8 ' ) , * * kwargs )
2014-03-18 07:27:42 -06:00
if sys . version_info < ( 3 , 0 ) and sys . platform == ' win32 ' :
def compat_getpass ( prompt , * args , * * kwargs ) :
if isinstance ( prompt , compat_str ) :
2014-03-18 07:28:53 -06:00
prompt = prompt . encode ( preferredencoding ( ) )
2014-03-18 07:27:42 -06:00
return getpass . getpass ( prompt , * args , * * kwargs )
else :
compat_getpass = getpass . getpass
2014-03-20 17:59:51 -06:00
US_RATINGS = {
' G ' : 0 ,
' PG ' : 10 ,
' PG-13 ' : 13 ,
' R ' : 16 ,
' NC ' : 18 ,
}
2014-03-24 16:21:20 -06:00
def strip_jsonp ( code ) :
return re . sub ( r ' (?s)^[a-zA-Z_]+ \ s* \ ( \ s*(.*) \ ); \ s*? \ s*$ ' , r ' \ 1 ' , code )