2013-10-23 08:31:53 -06:00
# encoding: utf-8
2014-01-06 15:38:16 -07:00
from __future__ import unicode_literals
2013-06-23 12:18:21 -06:00
import json
import re
2013-07-29 05:12:09 -06:00
import itertools
2013-06-23 12:18:21 -06:00
from . common import InfoExtractor
2014-02-03 06:02:58 -07:00
from . subtitles import SubtitlesInfoExtractor
2013-06-23 12:18:21 -06:00
from . . utils import (
2014-09-28 16:36:06 -06:00
clean_html ,
2014-03-08 04:24:43 -07:00
compat_HTTPError ,
2013-06-23 12:18:21 -06:00
compat_urllib_parse ,
compat_urllib_request ,
2014-09-28 16:36:06 -06:00
compat_urlparse ,
2013-06-23 12:18:21 -06:00
ExtractorError ,
2014-09-28 16:36:06 -06:00
get_element_by_attribute ,
InAdvancePagedList ,
int_or_none ,
2013-10-23 06:38:03 -06:00
RegexNotFoundError ,
2013-06-23 12:18:21 -06:00
std_headers ,
2013-10-15 04:05:13 -06:00
unsmuggle_url ,
2014-04-24 06:44:27 -06:00
urlencode_postdata ,
2013-06-23 12:18:21 -06:00
)
2013-12-21 19:17:56 -07:00
2014-04-24 13:51:20 -06:00
class VimeoBaseInfoExtractor ( InfoExtractor ) :
_NETRC_MACHINE = ' vimeo '
_LOGIN_REQUIRED = False
def _login ( self ) :
( username , password ) = self . _get_login_info ( )
if username is None :
if self . _LOGIN_REQUIRED :
2014-05-04 14:27:56 -06:00
raise ExtractorError ( ' No login info available, needed for using %s . ' % self . IE_NAME , expected = True )
2014-04-24 13:51:20 -06:00
return
self . report_login ( )
login_url = ' https://vimeo.com/log_in '
webpage = self . _download_webpage ( login_url , None , False )
token = self . _search_regex ( r ' xsrft: \' (.*?) \' ' , webpage , ' login token ' )
data = urlencode_postdata ( {
' email ' : username ,
' password ' : password ,
' action ' : ' login ' ,
' service ' : ' vimeo ' ,
' token ' : token ,
} )
login_request = compat_urllib_request . Request ( login_url , data )
login_request . add_header ( ' Content-Type ' , ' application/x-www-form-urlencoded ' )
login_request . add_header ( ' Cookie ' , ' xsrft= %s ' % token )
self . _download_webpage ( login_request , None , False , ' Wrong login info ' )
class VimeoIE ( VimeoBaseInfoExtractor , SubtitlesInfoExtractor ) :
2013-06-23 12:18:21 -06:00
""" Information extractor for vimeo.com. """
# _VALID_URL matches Vimeo URLs
2013-12-21 19:17:56 -07:00
_VALID_URL = r ''' (?x)
2014-01-08 14:42:52 -07:00
( ? P < proto > ( ? : https ? : ) ? / / ) ?
2013-12-21 19:17:56 -07:00
( ? : ( ? : www | ( ? P < player > player ) ) \. ) ?
vimeo ( ? P < pro > pro ) ? \. com /
2014-08-27 03:36:01 -06:00
( ? ! channels / [ ^ / ? #]+/?(?:$|[?#])|album/)
2013-12-21 19:17:56 -07:00
( ? : . * ? / ) ?
2013-12-21 19:34:13 -07:00
( ? : ( ? : play_redirect_hls | moogaloop \. swf ) \? clip_id = ) ?
2013-12-21 19:17:56 -07:00
( ? : videos ? / ) ?
( ? P < id > [ 0 - 9 ] + )
2013-12-21 19:34:13 -07:00
/ ? ( ? : [ ? & ] . * ) ? ( ? : [ #].*)?$'''
2014-01-06 15:38:16 -07:00
IE_NAME = ' vimeo '
2013-08-21 05:48:19 -06:00
_TESTS = [
{
2014-01-06 15:38:16 -07:00
' url ' : ' http://vimeo.com/56015672#at=0 ' ,
' md5 ' : ' 8879b6cc097e987f02484baf890129e5 ' ,
' info_dict ' : {
2014-02-17 03:44:24 -07:00
' id ' : ' 56015672 ' ,
' ext ' : ' mp4 ' ,
" upload_date " : " 20121220 " ,
" description " : " This is a test case for youtube-dl. \n For more information, see github.com/rg3/youtube-dl \n Test chars: \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550 " ,
" uploader_id " : " user7108434 " ,
" uploader " : " Filippo Valsorda " ,
2014-01-06 15:38:16 -07:00
" title " : " youtube-dl test video - \u2605 \" ' \u5e78 / \\ \u00e4 \u21ad \U0001d550 " ,
2014-05-09 12:46:40 -06:00
" duration " : 10 ,
2013-08-21 05:48:19 -06:00
} ,
} ,
{
2014-01-06 15:38:16 -07:00
' url ' : ' http://vimeopro.com/openstreetmapus/state-of-the-map-us-2013/video/68093876 ' ,
' md5 ' : ' 3b5ca6aa22b60dfeeadf50b72e44ed82 ' ,
' note ' : ' Vimeo Pro video (#1197) ' ,
' info_dict ' : {
2014-05-04 14:27:56 -06:00
' id ' : ' 68093876 ' ,
' ext ' : ' mp4 ' ,
2014-01-06 15:38:16 -07:00
' uploader_id ' : ' openstreetmapus ' ,
' uploader ' : ' OpenStreetMap US ' ,
' title ' : ' Andy Allan - Putting the Carto into OpenStreetMap Cartography ' ,
2014-05-09 12:46:40 -06:00
' duration ' : 1595 ,
2013-08-21 05:48:19 -06:00
} ,
} ,
2013-09-03 02:48:56 -06:00
{
2014-01-06 15:38:16 -07:00
' url ' : ' http://player.vimeo.com/video/54469442 ' ,
' md5 ' : ' 619b811a4417aa4abe78dc653becf511 ' ,
' note ' : ' Videos that embed the url in the player page ' ,
' info_dict ' : {
2014-05-04 14:27:56 -06:00
' id ' : ' 54469442 ' ,
' ext ' : ' mp4 ' ,
2014-07-21 05:11:24 -06:00
' title ' : ' Kathy Sierra: Building the minimum Badass User, Business of Software 2012 ' ,
2014-01-06 15:38:16 -07:00
' uploader ' : ' The BLN & Business of Software ' ,
' uploader_id ' : ' theblnbusinessofsoftware ' ,
2014-05-09 12:46:40 -06:00
' duration ' : 3610 ,
2013-09-03 02:48:56 -06:00
} ,
2013-10-23 08:31:53 -06:00
} ,
{
2014-01-06 15:38:16 -07:00
' url ' : ' http://vimeo.com/68375962 ' ,
' md5 ' : ' aaf896bdb7ddd6476df50007a0ac0ae7 ' ,
' note ' : ' Video protected with password ' ,
' info_dict ' : {
2014-05-04 14:27:56 -06:00
' id ' : ' 68375962 ' ,
' ext ' : ' mp4 ' ,
2014-01-06 15:38:16 -07:00
' title ' : ' youtube-dl password protected test video ' ,
' upload_date ' : ' 20130614 ' ,
' uploader_id ' : ' user18948128 ' ,
' uploader ' : ' Jaime Marquínez Ferrándiz ' ,
2014-05-09 12:46:40 -06:00
' duration ' : 10 ,
2013-10-23 08:31:53 -06:00
} ,
2014-01-06 15:38:16 -07:00
' params ' : {
' videopassword ' : ' youtube-dl ' ,
2013-10-23 08:31:53 -06:00
} ,
} ,
2014-08-03 11:04:47 -06:00
{
' url ' : ' http://vimeo.com/channels/keypeele/75629013 ' ,
' md5 ' : ' 2f86a05afe9d7abc0b9126d229bbe15d ' ,
' note ' : ' Video is freely available via original URL '
' and protected with password when accessed via http://vimeo.com/75629013 ' ,
' info_dict ' : {
' id ' : ' 75629013 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' Key & Peele: Terrorist Interrogation ' ,
' description ' : ' md5:8678b246399b070816b12313e8b4eb5c ' ,
' uploader_id ' : ' atencio ' ,
' uploader ' : ' Peter Atencio ' ,
' duration ' : 187 ,
} ,
} ,
2014-02-03 06:02:58 -07:00
{
' url ' : ' http://vimeo.com/76979871 ' ,
' md5 ' : ' 3363dd6ffebe3784d56f4132317fd446 ' ,
' note ' : ' Video with subtitles ' ,
' info_dict ' : {
' id ' : ' 76979871 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' The New Vimeo Player (You Know, For Videos) ' ,
' description ' : ' md5:2ec900bf97c3f389378a96aee11260ea ' ,
' upload_date ' : ' 20131015 ' ,
' uploader_id ' : ' staff ' ,
' uploader ' : ' Vimeo Staff ' ,
2014-05-09 12:46:40 -06:00
' duration ' : 62 ,
2014-02-03 06:02:58 -07:00
}
} ,
2013-08-21 05:48:19 -06:00
]
2013-06-23 12:18:21 -06:00
def _verify_video_password ( self , url , video_id , webpage ) :
2013-06-25 14:22:32 -06:00
password = self . _downloader . params . get ( ' videopassword ' , None )
2013-06-23 12:18:21 -06:00
if password is None :
2014-01-06 15:38:16 -07:00
raise ExtractorError ( ' This video is protected by a password, use the --video-password option ' )
2014-01-06 21:19:28 -07:00
token = self . _search_regex ( r ' xsrft: \' (.*?) \' ' , webpage , ' login token ' )
2014-05-04 14:27:56 -06:00
data = compat_urllib_parse . urlencode ( {
' password ' : password ,
' token ' : token ,
} )
2013-06-23 12:18:21 -06:00
# I didn't manage to use the password with https
if url . startswith ( ' https ' ) :
2014-05-04 14:27:56 -06:00
pass_url = url . replace ( ' https ' , ' http ' )
2013-06-23 12:18:21 -06:00
else :
pass_url = url
2014-05-04 14:27:56 -06:00
password_request = compat_urllib_request . Request ( pass_url + ' /password ' , data )
2013-06-23 12:18:21 -06:00
password_request . add_header ( ' Content-Type ' , ' application/x-www-form-urlencoded ' )
password_request . add_header ( ' Cookie ' , ' xsrft= %s ' % token )
self . _download_webpage ( password_request , video_id ,
2014-01-06 15:38:16 -07:00
' Verifying the password ' ,
' Wrong password ' )
2013-06-23 12:18:21 -06:00
2014-01-07 01:51:57 -07:00
def _verify_player_video_password ( self , url , video_id ) :
password = self . _downloader . params . get ( ' videopassword ' , None )
if password is None :
raise ExtractorError ( ' This video is protected by a password, use the --video-password option ' )
data = compat_urllib_parse . urlencode ( { ' password ' : password } )
pass_url = url + ' /check-password '
password_request = compat_urllib_request . Request ( pass_url , data )
password_request . add_header ( ' Content-Type ' , ' application/x-www-form-urlencoded ' )
return self . _download_json (
password_request , video_id ,
' Verifying the password ' ,
' Wrong password ' )
2013-07-07 15:24:34 -06:00
def _real_initialize ( self ) :
self . _login ( )
2013-12-10 12:43:16 -07:00
def _real_extract ( self , url ) :
2013-10-15 04:05:13 -06:00
url , data = unsmuggle_url ( url )
headers = std_headers
if data is not None :
headers = headers . copy ( )
headers . update ( data )
2014-08-25 01:35:37 -06:00
if ' Referer ' not in headers :
headers [ ' Referer ' ] = url
2013-10-15 04:05:13 -06:00
2013-06-23 12:18:21 -06:00
# Extract ID from URL
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
2013-11-03 04:11:13 -07:00
if mobj . group ( ' pro ' ) or mobj . group ( ' player ' ) :
2013-08-21 05:48:19 -06:00
url = ' http://player.vimeo.com/video/ ' + video_id
2013-06-23 12:18:21 -06:00
# Retrieve video webpage to extract further information
2013-10-15 04:05:13 -06:00
request = compat_urllib_request . Request ( url , None , headers )
2014-03-08 04:24:43 -07:00
try :
webpage = self . _download_webpage ( request , video_id )
except ExtractorError as ee :
if isinstance ( ee . cause , compat_HTTPError ) and ee . cause . code == 403 :
errmsg = ee . cause . read ( )
if b ' Because of its privacy settings, this video cannot be played here ' in errmsg :
raise ExtractorError (
' Cannot download embed-only video without embedding '
' URL. Please call youtube-dl with the URL of the page '
' that embeds this video. ' ,
expected = True )
raise
2013-06-23 12:18:21 -06:00
# Now we begin extracting as much information as we can from what we
# retrieved. First we extract the information common to all extractors,
# and latter we extract those that are Vimeo specific.
self . report_extraction ( video_id )
# Extract the config JSON
try :
2013-10-23 08:31:53 -06:00
try :
config_url = self . _html_search_regex (
2014-01-06 15:38:16 -07:00
r ' data-config-url= " (.+?) " ' , webpage , ' config URL ' )
2013-10-23 08:31:53 -06:00
config_json = self . _download_webpage ( config_url , video_id )
config = json . loads ( config_json )
except RegexNotFoundError :
# For pro videos or player.vimeo.com urls
2013-12-10 12:28:12 -07:00
# We try to find out to which variable is assigned the config dic
m_variable_name = re . search ( ' ( \ w) \ .video \ .id ' , webpage )
if m_variable_name is not None :
config_re = r ' %s =( { .+?}); ' % re . escape ( m_variable_name . group ( 1 ) )
else :
config_re = [ r ' = { config:( { .+?}),assets: ' , r ' (?:[abc])=( { .+?}); ' ]
2014-01-06 15:38:16 -07:00
config = self . _search_regex ( config_re , webpage , ' info section ' ,
2013-12-10 12:28:12 -07:00
flags = re . DOTALL )
2013-10-23 08:31:53 -06:00
config = json . loads ( config )
2013-10-23 03:38:51 -06:00
except Exception as e :
2013-06-23 12:18:21 -06:00
if re . search ( ' The creator of this video has not given you permission to embed it on this domain. ' , webpage ) :
2014-01-06 15:38:16 -07:00
raise ExtractorError ( ' The author has restricted the access to this video, try with the " --referer " option ' )
2013-06-23 12:18:21 -06:00
2013-10-23 08:31:53 -06:00
if re . search ( ' <form[^>]+?id= " pw_form " ' , webpage ) is not None :
2013-06-23 12:18:21 -06:00
self . _verify_video_password ( url , video_id , webpage )
return self . _real_extract ( url )
else :
2014-01-06 15:38:16 -07:00
raise ExtractorError ( ' Unable to extract info section ' ,
2013-10-23 03:38:51 -06:00
cause = e )
2014-01-06 15:35:24 -07:00
else :
if config . get ( ' view ' ) == 4 :
2014-01-07 01:51:57 -07:00
config = self . _verify_player_video_password ( url , video_id )
2013-06-23 12:18:21 -06:00
# Extract title
video_title = config [ " video " ] [ " title " ]
# Extract uploader and uploader_id
video_uploader = config [ " video " ] [ " owner " ] [ " name " ]
video_uploader_id = config [ " video " ] [ " owner " ] [ " url " ] . split ( ' / ' ) [ - 1 ] if config [ " video " ] [ " owner " ] [ " url " ] else None
# Extract video thumbnail
2013-09-03 02:48:56 -06:00
video_thumbnail = config [ " video " ] . get ( " thumbnail " )
2014-02-28 04:00:12 -07:00
if video_thumbnail is None :
video_thumbs = config [ " video " ] . get ( " thumbs " )
if video_thumbs and isinstance ( video_thumbs , dict ) :
2014-08-04 13:37:36 -06:00
_ , video_thumbnail = sorted ( ( int ( width if width . isdigit ( ) else 0 ) , t_url ) for ( width , t_url ) in video_thumbs . items ( ) ) [ - 1 ]
2013-06-23 12:18:21 -06:00
# Extract video description
2013-09-03 03:11:36 -06:00
video_description = None
try :
2014-05-04 13:48:08 -06:00
video_description = get_element_by_attribute ( " class " , " description_wrapper " , webpage )
if video_description :
video_description = clean_html ( video_description )
2013-09-03 03:11:36 -06:00
except AssertionError as err :
# On some pages like (http://player.vimeo.com/video/54469442) the
# html tags are not closed, python 2.6 cannot handle it
if err . args [ 0 ] == ' we should not get here! ' :
pass
else :
raise
2013-06-23 12:18:21 -06:00
2014-05-09 12:46:40 -06:00
# Extract video duration
video_duration = int_or_none ( config [ " video " ] . get ( " duration " ) )
2013-06-23 12:18:21 -06:00
# Extract upload date
video_upload_date = None
mobj = re . search ( r ' <meta itemprop= " dateCreated " content= " ( \ d {4} )-( \ d {2} )-( \ d {2} )T ' , webpage )
if mobj is not None :
video_upload_date = mobj . group ( 1 ) + mobj . group ( 2 ) + mobj . group ( 3 )
2013-12-06 05:03:08 -07:00
try :
2014-01-06 15:38:16 -07:00
view_count = int ( self . _search_regex ( r ' UserPlays:( \ d+) ' , webpage , ' view count ' ) )
like_count = int ( self . _search_regex ( r ' UserLikes:( \ d+) ' , webpage , ' like count ' ) )
comment_count = int ( self . _search_regex ( r ' UserComments:( \ d+) ' , webpage , ' comment count ' ) )
2013-12-06 05:03:08 -07:00
except RegexNotFoundError :
# This info is only available in vimeo.com/{id} urls
view_count = None
like_count = None
comment_count = None
2013-06-23 12:18:21 -06:00
# Vimeo specific: extract request signature and timestamp
sig = config [ ' request ' ] [ ' signature ' ]
timestamp = config [ ' request ' ] [ ' timestamp ' ]
# Vimeo specific: extract video codec and quality information
# First consider quality, then codecs, then take everything
2013-07-05 10:10:57 -06:00
codecs = [ ( ' vp6 ' , ' flv ' ) , ( ' vp8 ' , ' flv ' ) , ( ' h264 ' , ' mp4 ' ) ]
2013-11-03 06:03:17 -07:00
files = { ' hd ' : [ ] , ' sd ' : [ ] , ' other ' : [ ] }
2013-09-03 02:48:56 -06:00
config_files = config [ " video " ] . get ( " files " ) or config [ " request " ] . get ( " files " )
2013-06-23 12:18:21 -06:00
for codec_name , codec_extension in codecs :
2013-07-05 10:10:57 -06:00
for quality in config_files . get ( codec_name , [ ] ) :
format_id = ' - ' . join ( ( codec_name , quality ) ) . lower ( )
key = quality if quality in files else ' other '
video_url = None
if isinstance ( config_files [ codec_name ] , dict ) :
file_info = config_files [ codec_name ] [ quality ]
video_url = file_info . get ( ' url ' )
2013-06-23 12:18:21 -06:00
else :
2013-07-05 10:10:57 -06:00
file_info = { }
if video_url is None :
video_url = " http://player.vimeo.com/play_redirect?clip_id= %s &sig= %s &time= %s &quality= %s &codecs= %s &type=moogaloop_local&embed_location= " \
2014-05-04 14:27:56 -06:00
% ( video_id , sig , timestamp , quality , codec_name . upper ( ) )
2013-07-05 10:10:57 -06:00
files [ key ] . append ( {
' ext ' : codec_extension ,
' url ' : video_url ,
' format_id ' : format_id ,
' width ' : file_info . get ( ' width ' ) ,
' height ' : file_info . get ( ' height ' ) ,
} )
formats = [ ]
for key in ( ' other ' , ' sd ' , ' hd ' ) :
formats + = files [ key ]
if len ( formats ) == 0 :
2014-01-06 15:38:16 -07:00
raise ExtractorError ( ' No known codec found ' )
2013-06-23 12:18:21 -06:00
2014-02-03 06:02:58 -07:00
subtitles = { }
text_tracks = config [ ' request ' ] . get ( ' text_tracks ' )
if text_tracks :
for tt in text_tracks :
subtitles [ tt [ ' lang ' ] ] = ' http://vimeo.com ' + tt [ ' url ' ]
video_subtitles = self . extract_subtitles ( video_id , subtitles )
if self . _downloader . params . get ( ' listsubtitles ' , False ) :
self . _list_available_subtitles ( video_id , subtitles )
return
2013-11-03 04:11:13 -07:00
return {
2014-02-03 06:24:11 -07:00
' id ' : video_id ,
2013-06-23 12:18:21 -06:00
' uploader ' : video_uploader ,
' uploader_id ' : video_uploader_id ,
2014-02-03 06:24:11 -07:00
' upload_date ' : video_upload_date ,
' title ' : video_title ,
' thumbnail ' : video_thumbnail ,
' description ' : video_description ,
2014-05-09 12:46:40 -06:00
' duration ' : video_duration ,
2013-07-05 10:10:57 -06:00
' formats ' : formats ,
2013-11-03 04:11:13 -07:00
' webpage_url ' : url ,
2013-12-06 05:03:08 -07:00
' view_count ' : view_count ,
' like_count ' : like_count ,
' comment_count ' : comment_count ,
2014-02-03 06:02:58 -07:00
' subtitles ' : video_subtitles ,
2013-11-03 04:11:13 -07:00
}
2013-07-29 05:12:09 -06:00
class VimeoChannelIE ( InfoExtractor ) :
2014-01-06 15:38:16 -07:00
IE_NAME = ' vimeo:channel '
2014-08-27 03:36:01 -06:00
_VALID_URL = r ' https?://vimeo \ .com/channels/(?P<id>[^/?#]+)/?(?:$|[?#]) '
2013-07-29 05:12:09 -06:00
_MORE_PAGES_INDICATOR = r ' <a.+?rel= " next " '
2013-12-01 14:36:18 -07:00
_TITLE_RE = r ' <link rel= " alternate " [^>]+?title= " (.*?) " '
2014-08-27 03:36:01 -06:00
_TESTS = [ {
' url ' : ' http://vimeo.com/channels/tributes ' ,
' info_dict ' : {
' title ' : ' Vimeo Tributes ' ,
} ,
' playlist_mincount ' : 25 ,
} ]
2013-07-29 05:12:09 -06:00
2013-12-06 13:47:32 -07:00
def _page_url ( self , base_url , pagenum ) :
return ' %s /videos/page: %d / ' % ( base_url , pagenum )
2013-12-06 14:01:41 -07:00
def _extract_list_title ( self , webpage ) :
2014-01-06 15:38:16 -07:00
return self . _html_search_regex ( self . _TITLE_RE , webpage , ' list title ' )
2013-12-06 14:01:41 -07:00
2013-12-01 14:36:18 -07:00
def _extract_videos ( self , list_id , base_url ) :
2013-07-29 05:12:09 -06:00
video_ids = [ ]
for pagenum in itertools . count ( 1 ) :
2013-12-01 14:36:18 -07:00
webpage = self . _download_webpage (
2014-05-04 14:27:56 -06:00
self . _page_url ( base_url , pagenum ) , list_id ,
2014-01-06 15:38:16 -07:00
' Downloading page %s ' % pagenum )
2013-07-29 05:12:09 -06:00
video_ids . extend ( re . findall ( r ' id= " clip_( \ d+?) " ' , webpage ) )
if re . search ( self . _MORE_PAGES_INDICATOR , webpage , re . DOTALL ) is None :
break
entries = [ self . url_result ( ' http://vimeo.com/ %s ' % video_id , ' Vimeo ' )
for video_id in video_ids ]
return { ' _type ' : ' playlist ' ,
2013-12-01 14:36:18 -07:00
' id ' : list_id ,
2013-12-06 14:01:41 -07:00
' title ' : self . _extract_list_title ( webpage ) ,
2013-07-29 05:12:09 -06:00
' entries ' : entries ,
}
2013-12-01 14:36:18 -07:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
2014-05-04 14:27:56 -06:00
channel_id = mobj . group ( ' id ' )
2013-12-01 14:36:18 -07:00
return self . _extract_videos ( channel_id , ' http://vimeo.com/channels/ %s ' % channel_id )
class VimeoUserIE ( VimeoChannelIE ) :
2014-01-06 15:38:16 -07:00
IE_NAME = ' vimeo:user '
2014-08-27 03:36:01 -06:00
_VALID_URL = r ' https?://vimeo \ .com/(?![0-9]+(?:$|[?#/]))(?P<name>[^/]+)(?:/videos|[#?]|$) '
2013-12-01 14:36:18 -07:00
_TITLE_RE = r ' <a[^>]+?class= " user " >([^<>]+?)</a> '
2014-08-27 03:36:01 -06:00
_TESTS = [ {
' url ' : ' http://vimeo.com/nkistudio/videos ' ,
' info_dict ' : {
' title ' : ' Nki ' ,
} ,
' playlist_mincount ' : 66 ,
} ]
2013-12-01 14:36:18 -07:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
name = mobj . group ( ' name ' )
return self . _extract_videos ( name , ' http://vimeo.com/ %s ' % name )
2013-12-06 13:47:32 -07:00
class VimeoAlbumIE ( VimeoChannelIE ) :
2014-01-06 15:38:16 -07:00
IE_NAME = ' vimeo:album '
2014-08-27 03:36:01 -06:00
_VALID_URL = r ' https?://vimeo \ .com/album/(?P<id> \ d+) '
2013-12-06 13:47:32 -07:00
_TITLE_RE = r ' <header id= " page_header " > \ n \ s*<h1>(.*?)</h1> '
2014-08-27 03:36:01 -06:00
_TESTS = [ {
' url ' : ' http://vimeo.com/album/2632481 ' ,
' info_dict ' : {
' title ' : ' Staff Favorites: November 2013 ' ,
} ,
' playlist_mincount ' : 13 ,
} ]
2013-12-06 13:47:32 -07:00
def _page_url ( self , base_url , pagenum ) :
return ' %s /page: %d / ' % ( base_url , pagenum )
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
2014-01-06 09:31:47 -07:00
album_id = mobj . group ( ' id ' )
2013-12-06 13:47:32 -07:00
return self . _extract_videos ( album_id , ' http://vimeo.com/album/ %s ' % album_id )
2013-12-06 14:01:41 -07:00
class VimeoGroupsIE ( VimeoAlbumIE ) :
2014-01-06 15:38:16 -07:00
IE_NAME = ' vimeo:group '
2014-01-25 03:48:08 -07:00
_VALID_URL = r ' (?:https?://)?vimeo \ .com/groups/(?P<name>[^/]+) '
2014-08-27 03:36:01 -06:00
_TESTS = [ {
' url ' : ' http://vimeo.com/groups/rolexawards ' ,
' info_dict ' : {
' title ' : ' Rolex Awards for Enterprise ' ,
} ,
' playlist_mincount ' : 73 ,
} ]
2013-12-06 14:01:41 -07:00
def _extract_list_title ( self , webpage ) :
return self . _og_search_title ( webpage )
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
name = mobj . group ( ' name ' )
return self . _extract_videos ( name , ' http://vimeo.com/groups/ %s ' % name )
2014-01-06 09:31:47 -07:00
class VimeoReviewIE ( InfoExtractor ) :
2014-01-06 15:38:16 -07:00
IE_NAME = ' vimeo:review '
IE_DESC = ' Review pages on vimeo '
2014-08-27 03:13:42 -06:00
_VALID_URL = r ' https?://vimeo \ .com/[^/]+/review/(?P<id>[^/]+) '
_TESTS = [ {
2014-01-06 09:31:47 -07:00
' url ' : ' https://vimeo.com/user21297594/review/75524534/3c257a1b5d ' ,
' file ' : ' 75524534.mp4 ' ,
' md5 ' : ' c507a72f780cacc12b2248bb4006d253 ' ,
' info_dict ' : {
' title ' : " DICK HARDWICK ' Comedian ' " ,
' uploader ' : ' Richard Hardwick ' ,
}
2014-08-27 03:13:42 -06:00
} , {
' note ' : ' video player needs Referer ' ,
' url ' : ' http://vimeo.com/user22258446/review/91613211/13f927e053 ' ,
' md5 ' : ' 6295fdab8f4bf6a002d058b2c6dce276 ' ,
' info_dict ' : {
' id ' : ' 91613211 ' ,
' ext ' : ' mp4 ' ,
' title ' : ' Death by dogma versus assembling agile - Sander Hoogendoorn ' ,
' uploader ' : ' DevWeek Events ' ,
' duration ' : 2773 ,
' thumbnail ' : ' re:^https?://.* \ .jpg$ ' ,
}
} ]
2014-01-06 09:31:47 -07:00
def _real_extract ( self , url ) :
mobj = re . match ( self . _VALID_URL , url )
video_id = mobj . group ( ' id ' )
player_url = ' https://player.vimeo.com/player/ ' + video_id
return self . url_result ( player_url , ' Vimeo ' , video_id )
2014-04-24 13:51:20 -06:00
class VimeoWatchLaterIE ( VimeoBaseInfoExtractor , VimeoChannelIE ) :
IE_NAME = ' vimeo:watchlater '
IE_DESC = ' Vimeo watch later list, " vimeowatchlater " keyword (requires authentication) '
_VALID_URL = r ' https?://vimeo \ .com/home/watchlater|:vimeowatchlater '
_LOGIN_REQUIRED = True
_TITLE_RE = r ' href= " /home/watchlater " .*?>(.*?)< '
2014-08-27 03:36:01 -06:00
_TESTS = [ {
' url ' : ' http://vimeo.com/home/watchlater ' ,
' only_matching ' : True ,
} ]
2014-04-24 13:51:20 -06:00
def _real_initialize ( self ) :
self . _login ( )
def _page_url ( self , base_url , pagenum ) :
url = ' %s /page: %d / ' % ( base_url , pagenum )
request = compat_urllib_request . Request ( url )
# Set the header to get a partial html page with the ids,
# the normal page doesn't contain them.
request . add_header ( ' X-Requested-With ' , ' XMLHttpRequest ' )
return request
def _real_extract ( self , url ) :
return self . _extract_videos ( ' watchlater ' , ' https://vimeo.com/home/watchlater ' )
2014-09-28 04:14:16 -06:00
class VimeoLikesIE ( InfoExtractor ) :
2014-09-28 16:36:06 -06:00
_VALID_URL = r ' https?://(?:www \ .)?vimeo \ .com/user(?P<id>[0-9]+)/likes/?(?:$|[?#]|sort:) '
2014-09-28 04:14:16 -06:00
IE_NAME = ' vimeo:likes '
IE_DESC = ' Vimeo user likes '
_TEST = {
2014-09-28 16:36:06 -06:00
' url ' : ' https://vimeo.com/user755559/likes/ ' ,
' playlist_mincount ' : 293 ,
2014-09-28 04:14:16 -06:00
" info_dict " : {
2014-09-28 16:36:06 -06:00
" description " : " See all the videos urza likes " ,
" title " : ' Videos urza likes ' ,
2014-09-28 04:14:16 -06:00
} ,
}
def _real_extract ( self , url ) :
user_id = self . _match_id ( url )
2014-09-28 16:36:06 -06:00
webpage = self . _download_webpage ( url , user_id )
page_count = self . _int (
self . _search_regex (
r ''' (?x)<li><a \ s+href= " [^ " ]+ " \ s+data-page= " ([0-9]+) " >
. * ? < / a > < / li > \s * < li \s + class = " pagination_next " >
''' , webpage, ' page count ' ),
' page count ' , fatal = True )
PAGE_SIZE = 12
title = self . _html_search_regex (
r ' (?s)<h1>(.+?)</h1> ' , webpage , ' title ' , fatal = False )
description = self . _html_search_meta ( ' description ' , webpage )
def _get_page ( idx ) :
page_url = ' %s //vimeo.com/user %s /likes/page: %d /sort:date ' % (
self . http_scheme ( ) , user_id , idx + 1 )
webpage = self . _download_webpage (
page_url , user_id ,
note = ' Downloading page %d / %d ' % ( idx + 1 , page_count ) )
video_list = self . _search_regex (
r ' (?s)<ol class= " js-browse_list[^ " ]+ " [^>]*>(.*?)</ol> ' ,
webpage , ' video content ' )
paths = re . findall (
r ' <li[^>]*> \ s*<a \ s+href= " ([^ " ]+) " ' , video_list )
for path in paths :
yield {
' _type ' : ' url ' ,
' url ' : compat_urlparse . urljoin ( page_url , path ) ,
}
pl = InAdvancePagedList ( _get_page , page_count , PAGE_SIZE )
2014-09-28 04:14:16 -06:00
return {
2014-09-28 16:36:06 -06:00
' _type ' : ' playlist ' ,
' id ' : ' user %s _likes ' % user_id ,
' title ' : title ,
' description ' : description ,
' entries ' : pl ,
2014-09-28 04:14:16 -06:00
}