2013-01-01 13:07:37 -07:00
|
|
|
import base64
|
2012-03-24 19:07:37 -06:00
|
|
|
import datetime
|
2013-01-26 19:01:23 -07:00
|
|
|
import itertools
|
2012-03-24 19:07:37 -06:00
|
|
|
import netrc
|
|
|
|
import os
|
|
|
|
import re
|
|
|
|
import socket
|
|
|
|
import time
|
|
|
|
import email.utils
|
2012-05-01 09:01:51 -06:00
|
|
|
import xml.etree.ElementTree
|
2012-08-08 12:04:02 -06:00
|
|
|
import random
|
|
|
|
import math
|
2013-02-26 02:39:26 -07:00
|
|
|
import operator
|
2013-05-15 15:38:44 -06:00
|
|
|
import hashlib
|
|
|
|
import binascii
|
|
|
|
import urllib
|
2012-03-24 19:07:37 -06:00
|
|
|
|
2012-11-27 19:34:40 -07:00
|
|
|
from .utils import *
|
2013-06-23 11:57:38 -06:00
|
|
|
from .extractor.common import InfoExtractor, SearchInfoExtractor
|
2013-06-23 12:24:07 -06:00
|
|
|
|
|
|
|
from .extractor.ard import ARDIE
|
|
|
|
from .extractor.arte import ArteTvIE
|
2013-06-23 12:44:48 -06:00
|
|
|
from .extractor.bliptv import BlipTVIE, BlipTVUserIE
|
2013-06-23 12:50:22 -06:00
|
|
|
from .extractor.comedycentral import ComedyCentralIE
|
2013-06-23 13:10:21 -06:00
|
|
|
from .extractor.collegehumor import CollegeHumorIE
|
2013-06-23 12:09:47 -06:00
|
|
|
from .extractor.dailymotion import DailymotionIE
|
2013-06-23 13:06:20 -06:00
|
|
|
from .extractor.depositfiles import DepositFilesIE
|
2013-06-23 13:08:17 -06:00
|
|
|
from .extractor.escapist import EscapistIE
|
2013-06-23 13:00:34 -06:00
|
|
|
from .extractor.facebook import FacebookIE
|
2013-06-23 12:29:46 -06:00
|
|
|
from .extractor.gametrailers import GametrailersIE
|
2013-06-23 12:31:45 -06:00
|
|
|
from .extractor.generic import GenericIE
|
2013-06-23 12:55:15 -06:00
|
|
|
from .extractor.googleplus import GooglePlusIE
|
|
|
|
from .extractor.googlesearch import GoogleSearchIE
|
2013-06-23 13:14:19 -06:00
|
|
|
from .extractor.infoq import InfoQIE
|
2013-06-23 12:07:51 -06:00
|
|
|
from .extractor.metacafe import MetacafeIE
|
2013-06-23 13:27:38 -06:00
|
|
|
from .extractor.mtv import MTVIE
|
2013-06-23 12:48:32 -06:00
|
|
|
from .extractor.myvideo import MyVideoIE
|
2013-06-23 13:18:00 -06:00
|
|
|
from .extractor.nba import NBAIE
|
2013-06-23 12:07:51 -06:00
|
|
|
from .extractor.statigram import StatigramIE
|
2013-06-23 12:12:18 -06:00
|
|
|
from .extractor.photobucket import PhotobucketIE
|
2013-06-23 12:57:44 -06:00
|
|
|
from .extractor.soundcloud import SoundcloudIE, SoundcloudSetIE
|
2013-06-23 13:16:32 -06:00
|
|
|
from .extractor.stanfordoc import StanfordOpenClassroomIE
|
2013-06-23 12:18:21 -06:00
|
|
|
from .extractor.vimeo import VimeoIE
|
2013-06-23 13:11:47 -06:00
|
|
|
from .extractor.xvideos import XVideosIE
|
2013-06-23 12:41:54 -06:00
|
|
|
from .extractor.yahoo import YahooIE, YahooSearchIE
|
2013-06-23 12:28:15 -06:00
|
|
|
from .extractor.youtube import YoutubeIE, YoutubePlaylistIE, YoutubeSearchIE, YoutubeUserIE, YoutubeChannelIE
|
2013-06-23 12:24:07 -06:00
|
|
|
from .extractor.zdf import ZDFIE
|
2013-05-11 15:04:56 -06:00
|
|
|
|
2013-01-01 12:43:43 -07:00
|
|
|
|
2012-03-24 19:07:37 -06:00
|
|
|
|
|
|
|
class MixcloudIE(InfoExtractor):
|
2012-11-27 18:04:46 -07:00
|
|
|
"""Information extractor for www.mixcloud.com"""
|
2012-12-17 10:33:11 -07:00
|
|
|
|
|
|
|
_WORKING = False # New API, but it seems good http://www.mixcloud.com/developers/documentation/
|
2012-11-27 18:04:46 -07:00
|
|
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?mixcloud\.com/([\w\d-]+)/([\w\d-]+)'
|
|
|
|
IE_NAME = u'mixcloud'
|
|
|
|
|
|
|
|
def report_download_json(self, file_id):
|
|
|
|
"""Report JSON download."""
|
2013-04-20 11:35:49 -06:00
|
|
|
self.to_screen(u'Downloading json')
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
def get_urls(self, jsonData, fmt, bitrate='best'):
|
|
|
|
"""Get urls from 'audio_formats' section in json"""
|
|
|
|
file_url = None
|
|
|
|
try:
|
|
|
|
bitrate_list = jsonData[fmt]
|
|
|
|
if bitrate is None or bitrate == 'best' or bitrate not in bitrate_list:
|
|
|
|
bitrate = max(bitrate_list) # select highest
|
|
|
|
|
|
|
|
url_list = jsonData[fmt][bitrate]
|
|
|
|
except TypeError: # we have no bitrate info.
|
|
|
|
url_list = jsonData[fmt]
|
|
|
|
return url_list
|
|
|
|
|
|
|
|
def check_urls(self, url_list):
|
|
|
|
"""Returns 1st active url from list"""
|
|
|
|
for url in url_list:
|
|
|
|
try:
|
|
|
|
compat_urllib_request.urlopen(url)
|
|
|
|
return url
|
|
|
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
|
|
|
url = None
|
|
|
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
def _print_formats(self, formats):
|
|
|
|
print('Available formats:')
|
|
|
|
for fmt in formats.keys():
|
|
|
|
for b in formats[fmt]:
|
|
|
|
try:
|
|
|
|
ext = formats[fmt][b][0]
|
|
|
|
print('%s\t%s\t[%s]' % (fmt, b, ext.split('.')[-1]))
|
|
|
|
except TypeError: # we have no bitrate info
|
|
|
|
ext = formats[fmt][0]
|
|
|
|
print('%s\t%s\t[%s]' % (fmt, '??', ext.split('.')[-1]))
|
|
|
|
break
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2012-11-27 18:04:46 -07:00
|
|
|
# extract uploader & filename from url
|
|
|
|
uploader = mobj.group(1).decode('utf-8')
|
|
|
|
file_id = uploader + "-" + mobj.group(2).decode('utf-8')
|
|
|
|
|
|
|
|
# construct API request
|
|
|
|
file_url = 'http://www.mixcloud.com/api/1/cloudcast/' + '/'.join(url.split('/')[-3:-1]) + '.json'
|
|
|
|
# retrieve .json file with links to files
|
|
|
|
request = compat_urllib_request.Request(file_url)
|
|
|
|
try:
|
|
|
|
self.report_download_json(file_url)
|
|
|
|
jsonData = compat_urllib_request.urlopen(request).read()
|
|
|
|
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Unable to retrieve file: %s' % compat_str(err))
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
# parse JSON
|
|
|
|
json_data = json.loads(jsonData)
|
|
|
|
player_url = json_data['player_swf_url']
|
|
|
|
formats = dict(json_data['audio_formats'])
|
|
|
|
|
|
|
|
req_format = self._downloader.params.get('format', None)
|
|
|
|
bitrate = None
|
|
|
|
|
|
|
|
if self._downloader.params.get('listformats', None):
|
|
|
|
self._print_formats(formats)
|
|
|
|
return
|
|
|
|
|
|
|
|
if req_format is None or req_format == 'best':
|
|
|
|
for format_param in formats.keys():
|
|
|
|
url_list = self.get_urls(formats, format_param)
|
|
|
|
# check urls
|
|
|
|
file_url = self.check_urls(url_list)
|
|
|
|
if file_url is not None:
|
|
|
|
break # got it!
|
|
|
|
else:
|
2012-12-26 12:39:33 -07:00
|
|
|
if req_format not in formats:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Format is not available')
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
url_list = self.get_urls(formats, req_format)
|
|
|
|
file_url = self.check_urls(url_list)
|
|
|
|
format_param = req_format
|
|
|
|
|
|
|
|
return [{
|
|
|
|
'id': file_id.decode('utf-8'),
|
|
|
|
'url': file_url.decode('utf-8'),
|
|
|
|
'uploader': uploader.decode('utf-8'),
|
|
|
|
'upload_date': None,
|
|
|
|
'title': json_data['name'],
|
|
|
|
'ext': file_url.split('.')[-1].decode('utf-8'),
|
|
|
|
'format': (format_param is None and u'NA' or format_param.decode('utf-8')),
|
|
|
|
'thumbnail': json_data['thumbnail_url'],
|
|
|
|
'description': json_data['description'],
|
|
|
|
'player_url': player_url.decode('utf-8'),
|
|
|
|
}]
|
2012-03-24 19:07:37 -06:00
|
|
|
|
|
|
|
|
2012-08-15 17:54:03 -06:00
|
|
|
|
2012-08-08 12:04:02 -06:00
|
|
|
|
|
|
|
class YoukuIE(InfoExtractor):
|
2012-11-27 18:04:46 -07:00
|
|
|
_VALID_URL = r'(?:http://)?v\.youku\.com/v_show/id_(?P<ID>[A-Za-z0-9]+)\.html'
|
|
|
|
|
|
|
|
def _gen_sid(self):
|
|
|
|
nowTime = int(time.time() * 1000)
|
|
|
|
random1 = random.randint(1000,1998)
|
|
|
|
random2 = random.randint(1000,9999)
|
|
|
|
|
|
|
|
return "%d%d%d" %(nowTime,random1,random2)
|
|
|
|
|
|
|
|
def _get_file_ID_mix_string(self, seed):
|
|
|
|
mixed = []
|
|
|
|
source = list("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ/\:._-1234567890")
|
|
|
|
seed = float(seed)
|
|
|
|
for i in range(len(source)):
|
|
|
|
seed = (seed * 211 + 30031 ) % 65536
|
|
|
|
index = math.floor(seed / 65536 * len(source) )
|
|
|
|
mixed.append(source[int(index)])
|
|
|
|
source.remove(source[int(index)])
|
|
|
|
#return ''.join(mixed)
|
|
|
|
return mixed
|
|
|
|
|
|
|
|
def _get_file_id(self, fileId, seed):
|
|
|
|
mixed = self._get_file_ID_mix_string(seed)
|
|
|
|
ids = fileId.split('*')
|
|
|
|
realId = []
|
|
|
|
for ch in ids:
|
|
|
|
if ch:
|
|
|
|
realId.append(mixed[int(ch)])
|
|
|
|
return ''.join(realId)
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2012-11-27 18:04:46 -07:00
|
|
|
video_id = mobj.group('ID')
|
|
|
|
|
|
|
|
info_url = 'http://v.youku.com/player/getPlayList/VideoIDS/' + video_id
|
|
|
|
|
2013-05-02 10:18:27 -06:00
|
|
|
jsondata = self._download_webpage(info_url, video_id)
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
try:
|
2013-05-02 10:18:27 -06:00
|
|
|
config = json.loads(jsondata)
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
video_title = config['data'][0]['title']
|
|
|
|
seed = config['data'][0]['seed']
|
|
|
|
|
|
|
|
format = self._downloader.params.get('format', None)
|
2012-12-20 06:18:23 -07:00
|
|
|
supported_format = list(config['data'][0]['streamfileids'].keys())
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
if format is None or format == 'best':
|
|
|
|
if 'hd2' in supported_format:
|
|
|
|
format = 'hd2'
|
|
|
|
else:
|
|
|
|
format = 'flv'
|
|
|
|
ext = u'flv'
|
|
|
|
elif format == 'worst':
|
|
|
|
format = 'mp4'
|
|
|
|
ext = u'mp4'
|
|
|
|
else:
|
|
|
|
format = 'flv'
|
|
|
|
ext = u'flv'
|
|
|
|
|
|
|
|
|
|
|
|
fileid = config['data'][0]['streamfileids'][format]
|
2012-12-15 09:57:13 -07:00
|
|
|
keys = [s['k'] for s in config['data'][0]['segs'][format]]
|
2012-12-15 09:59:09 -07:00
|
|
|
except (UnicodeDecodeError, ValueError, KeyError):
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Unable to extract info section')
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
files_info=[]
|
|
|
|
sid = self._gen_sid()
|
|
|
|
fileid = self._get_file_id(fileid, seed)
|
|
|
|
|
|
|
|
#column 8,9 of fileid represent the segment number
|
|
|
|
#fileid[7:9] should be changed
|
|
|
|
for index, key in enumerate(keys):
|
|
|
|
|
|
|
|
temp_fileid = '%s%02X%s' % (fileid[0:8], index, fileid[10:])
|
|
|
|
download_url = 'http://f.youku.com/player/getFlvPath/sid/%s_%02X/st/flv/fileid/%s?k=%s' % (sid, index, temp_fileid, key)
|
|
|
|
|
|
|
|
info = {
|
|
|
|
'id': '%s_part%02d' % (video_id, index),
|
|
|
|
'url': download_url,
|
|
|
|
'uploader': None,
|
|
|
|
'upload_date': None,
|
|
|
|
'title': video_title,
|
|
|
|
'ext': ext,
|
|
|
|
}
|
|
|
|
files_info.append(info)
|
|
|
|
|
|
|
|
return files_info
|
2012-08-19 10:39:43 -06:00
|
|
|
|
|
|
|
|
2012-08-15 17:54:03 -06:00
|
|
|
class XNXXIE(InfoExtractor):
|
2012-11-27 18:04:46 -07:00
|
|
|
"""Information extractor for xnxx.com"""
|
|
|
|
|
2013-01-05 14:05:23 -07:00
|
|
|
_VALID_URL = r'^(?:https?://)?video\.xnxx\.com/video([0-9]+)/(.*)'
|
2012-11-27 18:04:46 -07:00
|
|
|
IE_NAME = u'xnxx'
|
|
|
|
VIDEO_URL_RE = r'flv_url=(.*?)&'
|
|
|
|
VIDEO_TITLE_RE = r'<title>(.*?)\s+-\s+XNXX.COM'
|
|
|
|
VIDEO_THUMB_RE = r'url_bigthumb=(.*?)&'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2012-12-15 10:19:25 -07:00
|
|
|
video_id = mobj.group(1)
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
# Get webpage content
|
2013-05-02 10:18:27 -06:00
|
|
|
webpage = self._download_webpage(url, video_id)
|
2012-11-27 18:04:46 -07:00
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
video_url = self._search_regex(self.VIDEO_URL_RE,
|
|
|
|
webpage, u'video URL')
|
|
|
|
video_url = compat_urllib_parse.unquote(video_url)
|
2012-11-27 18:04:46 -07:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(self.VIDEO_TITLE_RE,
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
2012-11-27 18:04:46 -07:00
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
video_thumbnail = self._search_regex(self.VIDEO_THUMB_RE,
|
|
|
|
webpage, u'thumbnail', fatal=False)
|
2012-11-27 18:04:46 -07:00
|
|
|
|
|
|
|
return [{
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'uploader': None,
|
|
|
|
'upload_date': None,
|
|
|
|
'title': video_title,
|
|
|
|
'ext': 'flv',
|
|
|
|
'thumbnail': video_thumbnail,
|
|
|
|
'description': None,
|
|
|
|
}]
|
2012-10-09 02:48:49 -06:00
|
|
|
|
|
|
|
|
2012-12-13 13:27:57 -07:00
|
|
|
|
2012-12-16 01:50:41 -07:00
|
|
|
|
|
|
|
class JustinTVIE(InfoExtractor):
|
|
|
|
"""Information extractor for justin.tv and twitch.tv"""
|
2012-12-16 02:05:39 -07:00
|
|
|
# TODO: One broadcast may be split into multiple videos. The key
|
|
|
|
# 'broadcast_id' is the same for all parts, and 'broadcast_part'
|
|
|
|
# starts at 1 and increases. Can we treat all parts as one video?
|
|
|
|
|
2012-12-16 02:45:46 -07:00
|
|
|
_VALID_URL = r"""(?x)^(?:http://)?(?:www\.)?(?:twitch|justin)\.tv/
|
2013-05-04 02:33:34 -06:00
|
|
|
(?:
|
|
|
|
(?P<channelid>[^/]+)|
|
|
|
|
(?:(?:[^/]+)/b/(?P<videoid>[^/]+))|
|
|
|
|
(?:(?:[^/]+)/c/(?P<chapterid>[^/]+))
|
|
|
|
)
|
|
|
|
/?(?:\#.*)?$
|
|
|
|
"""
|
2012-12-16 02:45:46 -07:00
|
|
|
_JUSTIN_PAGE_LIMIT = 100
|
2012-12-16 01:50:41 -07:00
|
|
|
IE_NAME = u'justin.tv'
|
|
|
|
|
2012-12-16 02:45:46 -07:00
|
|
|
def report_download_page(self, channel, offset):
|
|
|
|
"""Report attempt to download a single page of videos."""
|
2013-04-20 11:35:49 -06:00
|
|
|
self.to_screen(u'%s: Downloading video information from %d to %d' %
|
|
|
|
(channel, offset, offset + self._JUSTIN_PAGE_LIMIT))
|
2012-12-16 02:45:46 -07:00
|
|
|
|
2012-12-16 02:05:39 -07:00
|
|
|
# Return count of items, list of *valid* items
|
2013-05-02 10:18:27 -06:00
|
|
|
def _parse_page(self, url, video_id):
|
|
|
|
webpage = self._download_webpage(url, video_id,
|
|
|
|
u'Downloading video info JSON',
|
|
|
|
u'unable to download video info JSON')
|
2012-12-19 07:19:08 -07:00
|
|
|
|
2012-12-16 01:50:41 -07:00
|
|
|
response = json.loads(webpage)
|
2013-01-07 05:59:39 -07:00
|
|
|
if type(response) != list:
|
|
|
|
error_text = response.get('error', 'unknown error')
|
2013-05-04 00:38:28 -06:00
|
|
|
raise ExtractorError(u'Justin.tv API: %s' % error_text)
|
2012-12-16 01:50:41 -07:00
|
|
|
info = []
|
|
|
|
for clip in response:
|
|
|
|
video_url = clip['video_file_url']
|
|
|
|
if video_url:
|
|
|
|
video_extension = os.path.splitext(video_url)[1][1:]
|
2013-01-07 05:59:39 -07:00
|
|
|
video_date = re.sub('-', '', clip['start_time'][:10])
|
|
|
|
video_uploader_id = clip.get('user_id', clip.get('channel_id'))
|
2013-01-16 01:55:45 -07:00
|
|
|
video_id = clip['id']
|
|
|
|
video_title = clip.get('title', video_id)
|
2012-12-16 01:50:41 -07:00
|
|
|
info.append({
|
2013-01-16 01:55:45 -07:00
|
|
|
'id': video_id,
|
2012-12-16 01:50:41 -07:00
|
|
|
'url': video_url,
|
2013-01-16 01:55:45 -07:00
|
|
|
'title': video_title,
|
2013-01-07 05:59:39 -07:00
|
|
|
'uploader': clip.get('channel_name', video_uploader_id),
|
|
|
|
'uploader_id': video_uploader_id,
|
2012-12-16 01:50:41 -07:00
|
|
|
'upload_date': video_date,
|
|
|
|
'ext': video_extension,
|
|
|
|
})
|
2012-12-16 02:05:39 -07:00
|
|
|
return (len(response), info)
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 02:33:34 -06:00
|
|
|
raise ExtractorError(u'invalid URL: %s' % url)
|
2012-12-19 07:19:08 -07:00
|
|
|
|
2013-05-04 02:33:34 -06:00
|
|
|
api_base = 'http://api.justin.tv'
|
2012-12-16 02:05:39 -07:00
|
|
|
paged = False
|
2013-05-04 02:33:34 -06:00
|
|
|
if mobj.group('channelid'):
|
2012-12-16 02:05:39 -07:00
|
|
|
paged = True
|
2013-05-04 02:33:34 -06:00
|
|
|
video_id = mobj.group('channelid')
|
|
|
|
api = api_base + '/channel/archives/%s.json' % video_id
|
|
|
|
elif mobj.group('chapterid'):
|
|
|
|
chapter_id = mobj.group('chapterid')
|
|
|
|
|
|
|
|
webpage = self._download_webpage(url, chapter_id)
|
|
|
|
m = re.search(r'PP\.archive_id = "([0-9]+)";', webpage)
|
|
|
|
if not m:
|
2013-05-04 03:27:39 -06:00
|
|
|
raise ExtractorError(u'Cannot find archive of a chapter')
|
2013-05-04 02:33:34 -06:00
|
|
|
archive_id = m.group(1)
|
2013-05-04 03:27:39 -06:00
|
|
|
|
|
|
|
api = api_base + '/broadcast/by_chapter/%s.xml' % chapter_id
|
|
|
|
chapter_info_xml = self._download_webpage(api, chapter_id,
|
|
|
|
note=u'Downloading chapter information',
|
|
|
|
errnote=u'Chapter information download failed')
|
|
|
|
doc = xml.etree.ElementTree.fromstring(chapter_info_xml)
|
|
|
|
for a in doc.findall('.//archive'):
|
|
|
|
if archive_id == a.find('./id').text:
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise ExtractorError(u'Could not find chapter in chapter information')
|
|
|
|
|
|
|
|
video_url = a.find('./video_file_url').text
|
|
|
|
video_ext = video_url.rpartition('.')[2] or u'flv'
|
|
|
|
|
2013-05-04 03:42:44 -06:00
|
|
|
chapter_api_url = u'https://api.twitch.tv/kraken/videos/c' + chapter_id
|
2013-05-04 03:44:27 -06:00
|
|
|
chapter_info_json = self._download_webpage(chapter_api_url, u'c' + chapter_id,
|
2013-05-04 03:42:44 -06:00
|
|
|
note='Downloading chapter metadata',
|
|
|
|
errnote='Download of chapter metadata failed')
|
|
|
|
chapter_info = json.loads(chapter_info_json)
|
|
|
|
|
2013-05-04 04:02:18 -06:00
|
|
|
bracket_start = int(doc.find('.//bracket_start').text)
|
|
|
|
bracket_end = int(doc.find('.//bracket_end').text)
|
2013-05-04 03:44:27 -06:00
|
|
|
|
2013-05-04 03:27:39 -06:00
|
|
|
# TODO determine start (and probably fix up file)
|
|
|
|
# youtube-dl -v http://www.twitch.tv/firmbelief/c/1757457
|
2013-05-04 04:02:18 -06:00
|
|
|
#video_url += u'?start=' + TODO:start_timestamp
|
|
|
|
# bracket_start is 13290, but we want 51670615
|
|
|
|
self._downloader.report_warning(u'Chapter detected, but we can just download the whole file. '
|
|
|
|
u'Chapter starts at %s and ends at %s' % (formatSeconds(bracket_start), formatSeconds(bracket_end)))
|
2013-05-04 02:33:34 -06:00
|
|
|
|
2013-05-04 03:27:39 -06:00
|
|
|
info = {
|
|
|
|
'id': u'c' + chapter_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': video_ext,
|
2013-05-04 03:42:44 -06:00
|
|
|
'title': chapter_info['title'],
|
|
|
|
'thumbnail': chapter_info['preview'],
|
|
|
|
'description': chapter_info['description'],
|
2013-05-04 03:44:27 -06:00
|
|
|
'uploader': chapter_info['channel']['display_name'],
|
|
|
|
'uploader_id': chapter_info['channel']['name'],
|
2013-05-04 03:27:39 -06:00
|
|
|
}
|
|
|
|
return [info]
|
2012-12-16 02:05:39 -07:00
|
|
|
else:
|
2013-05-04 02:33:34 -06:00
|
|
|
video_id = mobj.group('videoid')
|
|
|
|
api = api_base + '/broadcast/by_archive/%s.json' % video_id
|
2012-12-19 07:19:08 -07:00
|
|
|
|
2012-12-16 02:05:39 -07:00
|
|
|
self.report_extraction(video_id)
|
2012-12-19 07:19:08 -07:00
|
|
|
|
2012-12-16 02:05:39 -07:00
|
|
|
info = []
|
|
|
|
offset = 0
|
2012-12-16 02:45:46 -07:00
|
|
|
limit = self._JUSTIN_PAGE_LIMIT
|
|
|
|
while True:
|
|
|
|
if paged:
|
|
|
|
self.report_download_page(video_id, offset)
|
2012-12-16 02:05:39 -07:00
|
|
|
page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
|
2013-05-02 10:18:27 -06:00
|
|
|
page_count, page_info = self._parse_page(page_url, video_id)
|
2012-12-16 02:05:39 -07:00
|
|
|
info.extend(page_info)
|
|
|
|
if not paged or page_count != limit:
|
|
|
|
break
|
|
|
|
offset += limit
|
2012-12-16 01:50:41 -07:00
|
|
|
return info
|
2012-12-20 13:28:27 -07:00
|
|
|
|
|
|
|
class FunnyOrDieIE(InfoExtractor):
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:www\.)?funnyordie\.com/videos/(?P<id>[0-9a-f]+)/.*$'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 00:38:28 -06:00
|
|
|
raise ExtractorError(u'invalid URL: %s' % url)
|
2012-12-20 13:28:27 -07:00
|
|
|
|
|
|
|
video_id = mobj.group('id')
|
2013-01-01 12:52:59 -07:00
|
|
|
webpage = self._download_webpage(url, video_id)
|
2012-12-20 13:28:27 -07:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_url = self._html_search_regex(r'<video[^>]*>\s*<source[^>]*>\s*<source src="(?P<url>[^"]+)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'video URL', flags=re.DOTALL)
|
2012-12-20 13:28:27 -07:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
title = self._html_search_regex((r"<h1 class='player_page_h1'.*?>(?P<title>.*?)</h1>",
|
2013-06-06 06:35:08 -06:00
|
|
|
r'<title>(?P<title>[^<]+?)</title>'), webpage, 'title', flags=re.DOTALL)
|
2012-12-20 13:28:27 -07:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
|
2013-06-07 03:46:03 -06:00
|
|
|
webpage, u'description', fatal=False, flags=re.DOTALL)
|
2012-12-20 13:28:27 -07:00
|
|
|
|
|
|
|
info = {
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': title,
|
2013-06-06 05:27:27 -06:00
|
|
|
'description': video_description,
|
2012-12-20 13:28:27 -07:00
|
|
|
}
|
|
|
|
return [info]
|
2012-12-26 17:38:41 -07:00
|
|
|
|
2013-01-01 06:12:14 -07:00
|
|
|
class SteamIE(InfoExtractor):
|
2013-05-02 05:39:56 -06:00
|
|
|
_VALID_URL = r"""http://store\.steampowered\.com/
|
2013-04-27 03:03:34 -06:00
|
|
|
(agecheck/)?
|
2013-01-01 06:12:14 -07:00
|
|
|
(?P<urltype>video|app)/ #If the page is only for videos or for a game
|
|
|
|
(?P<gameID>\d+)/?
|
|
|
|
(?P<videoID>\d*)(?P<extra>\??) #For urltype == video we sometimes get the videoID
|
|
|
|
"""
|
2013-06-20 05:43:44 -06:00
|
|
|
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
|
|
|
|
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
|
2013-01-01 11:37:07 -07:00
|
|
|
|
2013-02-26 11:02:31 -07:00
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
2013-01-01 06:12:14 -07:00
|
|
|
"""Receives a URL and returns True if suitable for this IE."""
|
2013-02-26 11:02:31 -07:00
|
|
|
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
|
2013-01-01 12:52:59 -07:00
|
|
|
|
2013-01-01 06:12:14 -07:00
|
|
|
def _real_extract(self, url):
|
|
|
|
m = re.match(self._VALID_URL, url, re.VERBOSE)
|
|
|
|
gameID = m.group('gameID')
|
2013-06-20 05:43:44 -06:00
|
|
|
|
|
|
|
videourl = self._VIDEO_PAGE_TEMPLATE % gameID
|
2013-01-01 12:52:59 -07:00
|
|
|
webpage = self._download_webpage(videourl, gameID)
|
2013-06-20 05:43:44 -06:00
|
|
|
|
|
|
|
if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
|
|
|
|
videourl = self._AGECHECK_TEMPLATE % gameID
|
|
|
|
self.report_age_confirmation()
|
|
|
|
webpage = self._download_webpage(videourl, gameID)
|
|
|
|
|
|
|
|
self.report_extraction(gameID)
|
|
|
|
game_title = self._html_search_regex(r'<h2 class="pageheader">(.*?)</h2>',
|
|
|
|
webpage, 'game title')
|
|
|
|
|
2013-04-21 14:05:21 -06:00
|
|
|
urlRE = r"'movie_(?P<videoID>\d+)': \{\s*FILENAME: \"(?P<videoURL>[\w:/\.\?=]+)\"(,\s*MOVIE_NAME: \"(?P<videoName>[\w:/\.\?=\+-]+)\")?\s*\},"
|
2013-01-01 06:12:14 -07:00
|
|
|
mweb = re.finditer(urlRE, webpage)
|
2013-01-03 15:51:48 -07:00
|
|
|
namesRE = r'<span class="title">(?P<videoName>.+?)</span>'
|
|
|
|
titles = re.finditer(namesRE, webpage)
|
2013-02-23 08:48:15 -07:00
|
|
|
thumbsRE = r'<img class="movie_thumb" src="(?P<thumbnail>.+?)">'
|
|
|
|
thumbs = re.finditer(thumbsRE, webpage)
|
2013-01-01 06:12:14 -07:00
|
|
|
videos = []
|
2013-02-23 08:48:15 -07:00
|
|
|
for vid,vtitle,thumb in zip(mweb,titles,thumbs):
|
2013-01-01 06:12:14 -07:00
|
|
|
video_id = vid.group('videoID')
|
2013-01-01 12:52:59 -07:00
|
|
|
title = vtitle.group('videoName')
|
|
|
|
video_url = vid.group('videoURL')
|
2013-02-23 08:48:15 -07:00
|
|
|
video_thumb = thumb.group('thumbnail')
|
2013-01-01 06:12:14 -07:00
|
|
|
if not video_url:
|
2013-05-05 05:59:25 -06:00
|
|
|
raise ExtractorError(u'Cannot find video url for %s' % video_id)
|
2013-01-01 06:12:14 -07:00
|
|
|
info = {
|
|
|
|
'id':video_id,
|
|
|
|
'url':video_url,
|
|
|
|
'ext': 'flv',
|
2013-02-23 08:48:15 -07:00
|
|
|
'title': unescapeHTML(title),
|
|
|
|
'thumbnail': video_thumb
|
2013-01-01 06:12:14 -07:00
|
|
|
}
|
|
|
|
videos.append(info)
|
2013-04-21 14:05:21 -06:00
|
|
|
return [self.playlist_result(videos, gameID, game_title)]
|
2013-01-12 05:49:14 -07:00
|
|
|
|
2013-01-01 09:52:46 -07:00
|
|
|
class UstreamIE(InfoExtractor):
|
2013-01-12 05:49:14 -07:00
|
|
|
_VALID_URL = r'https?://www\.ustream\.tv/recorded/(?P<videoID>\d+)'
|
2013-01-01 09:52:46 -07:00
|
|
|
IE_NAME = u'ustream'
|
2013-01-12 05:49:14 -07:00
|
|
|
|
2013-01-01 09:52:46 -07:00
|
|
|
def _real_extract(self, url):
|
|
|
|
m = re.match(self._VALID_URL, url)
|
|
|
|
video_id = m.group('videoID')
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-01-01 09:52:46 -07:00
|
|
|
video_url = u'http://tcdn.ustream.tv/video/%s' % video_id
|
2013-01-01 12:43:43 -07:00
|
|
|
webpage = self._download_webpage(url, video_id)
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-05-18 03:54:18 -06:00
|
|
|
self.report_extraction(video_id)
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'data-title="(?P<title>.+)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
uploader = self._html_search_regex(r'data-content-type="channel".*?>(?P<uploader>.*?)</a>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'uploader', fatal=False, flags=re.DOTALL)
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
thumbnail = self._html_search_regex(r'<link rel="image_src" href="(?P<thumb>.*?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'thumbnail', fatal=False)
|
|
|
|
|
2013-01-01 09:52:46 -07:00
|
|
|
info = {
|
2013-06-06 05:27:27 -06:00
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
2013-01-01 09:52:46 -07:00
|
|
|
'ext': 'flv',
|
2013-06-06 05:27:27 -06:00
|
|
|
'title': video_title,
|
2013-05-18 03:54:18 -06:00
|
|
|
'uploader': uploader,
|
2013-06-06 05:27:27 -06:00
|
|
|
'thumbnail': thumbnail,
|
|
|
|
}
|
2013-05-18 03:54:18 -06:00
|
|
|
return info
|
2013-01-01 11:37:07 -07:00
|
|
|
|
2013-03-06 22:09:55 -07:00
|
|
|
class WorldStarHipHopIE(InfoExtractor):
|
2013-05-04 00:06:56 -06:00
|
|
|
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/videos/video\.php\?v=(?P<id>.*)'
|
2013-03-06 22:09:55 -07:00
|
|
|
IE_NAME = u'WorldStarHipHop'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
2013-03-08 23:48:05 -07:00
|
|
|
m = re.match(self._VALID_URL, url)
|
|
|
|
video_id = m.group('id')
|
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage_src = self._download_webpage(url, video_id)
|
2013-05-02 10:18:27 -06:00
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
video_url = self._search_regex(r'so\.addVariable\("file","(.*?)"\)',
|
|
|
|
webpage_src, u'video URL')
|
2013-05-02 10:18:27 -06:00
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
if 'mp4' in video_url:
|
|
|
|
ext = 'mp4'
|
2013-03-06 22:09:55 -07:00
|
|
|
else:
|
2013-06-06 05:27:27 -06:00
|
|
|
ext = 'flv'
|
2013-03-06 22:09:55 -07:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r"<title>(.*)</title>",
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage_src, u'title')
|
2013-03-06 22:09:55 -07:00
|
|
|
|
|
|
|
# Getting thumbnail and if not thumbnail sets correct title for WSHH candy video.
|
2013-06-09 03:57:13 -06:00
|
|
|
thumbnail = self._html_search_regex(r'rel="image_src" href="(.*)" />',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage_src, u'thumbnail', fatal=False)
|
|
|
|
|
|
|
|
if not thumbnail:
|
2013-03-06 22:09:55 -07:00
|
|
|
_title = r"""candytitles.*>(.*)</span>"""
|
|
|
|
mobj = re.search(_title, webpage_src)
|
|
|
|
if mobj is not None:
|
2013-06-06 05:27:27 -06:00
|
|
|
video_title = mobj.group(1)
|
2013-03-30 19:02:05 -06:00
|
|
|
|
2013-03-07 16:39:17 -07:00
|
|
|
results = [{
|
2013-03-07 17:27:21 -07:00
|
|
|
'id': video_id,
|
2013-03-07 16:39:17 -07:00
|
|
|
'url' : video_url,
|
2013-06-06 05:27:27 -06:00
|
|
|
'title' : video_title,
|
2013-03-07 16:39:17 -07:00
|
|
|
'thumbnail' : thumbnail,
|
|
|
|
'ext' : ext,
|
|
|
|
}]
|
2013-03-06 22:09:55 -07:00
|
|
|
return results
|
|
|
|
|
2013-01-12 09:58:39 -07:00
|
|
|
class RBMARadioIE(InfoExtractor):
|
|
|
|
_VALID_URL = r'https?://(?:www\.)?rbmaradio\.com/shows/(?P<videoID>[^/]+)$'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
m = re.match(self._VALID_URL, url)
|
|
|
|
video_id = m.group('videoID')
|
|
|
|
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-06-20 06:37:43 -06:00
|
|
|
json_data = self._search_regex(r'window\.gon.*?gon\.show=(.+?);$',
|
|
|
|
webpage, u'json data', flags=re.MULTILINE)
|
2013-01-12 09:58:39 -07:00
|
|
|
|
|
|
|
try:
|
|
|
|
data = json.loads(json_data)
|
|
|
|
except ValueError as e:
|
|
|
|
raise ExtractorError(u'Invalid JSON: ' + str(e))
|
|
|
|
|
|
|
|
video_url = data['akamai_url'] + '&cbr=256'
|
|
|
|
url_parts = compat_urllib_parse_urlparse(video_url)
|
|
|
|
video_ext = url_parts.path.rpartition('.')[2]
|
|
|
|
info = {
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': video_ext,
|
|
|
|
'title': data['title'],
|
|
|
|
'description': data.get('teaser_text'),
|
|
|
|
'location': data.get('country_of_origin'),
|
|
|
|
'uploader': data.get('host', {}).get('name'),
|
|
|
|
'uploader_id': data.get('host', {}).get('slug'),
|
2013-01-12 10:45:50 -07:00
|
|
|
'thumbnail': data.get('image', {}).get('large_url_2x'),
|
2013-01-12 09:58:39 -07:00
|
|
|
'duration': data.get('duration'),
|
|
|
|
}
|
|
|
|
return [info]
|
2013-01-01 11:37:07 -07:00
|
|
|
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
class YouPornIE(InfoExtractor):
|
|
|
|
"""Information extractor for youporn.com."""
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
|
2013-02-26 02:39:26 -07:00
|
|
|
|
2013-01-05 13:42:35 -07:00
|
|
|
def _print_formats(self, formats):
|
|
|
|
"""Print all available formats"""
|
2013-01-12 07:17:04 -07:00
|
|
|
print(u'Available formats:')
|
2013-01-06 13:40:50 -07:00
|
|
|
print(u'ext\t\tformat')
|
|
|
|
print(u'---------------------------------')
|
2013-01-05 13:42:35 -07:00
|
|
|
for format in formats:
|
2013-01-06 13:40:50 -07:00
|
|
|
print(u'%s\t\t%s' % (format['ext'], format['format']))
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
def _specific(self, req_format, formats):
|
|
|
|
for x in formats:
|
|
|
|
if(x["format"]==req_format):
|
|
|
|
return x
|
|
|
|
return None
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2013-01-06 13:40:50 -07:00
|
|
|
video_id = mobj.group('videoid')
|
2013-01-05 13:42:35 -07:00
|
|
|
|
2013-01-12 08:10:35 -07:00
|
|
|
req = compat_urllib_request.Request(url)
|
|
|
|
req.add_header('Cookie', 'age_verified=1')
|
|
|
|
webpage = self._download_webpage(req, video_id)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
2013-06-07 03:19:27 -06:00
|
|
|
# Get JSON parameters
|
|
|
|
json_params = self._search_regex(r'var currentVideo = new Video\((.*)\);', webpage, u'JSON parameters')
|
|
|
|
try:
|
|
|
|
params = json.loads(json_params)
|
|
|
|
except:
|
|
|
|
raise ExtractorError(u'Invalid JSON')
|
2013-01-05 13:42:35 -07:00
|
|
|
|
2013-06-07 03:19:27 -06:00
|
|
|
self.report_extraction(video_id)
|
|
|
|
try:
|
|
|
|
video_title = params['title']
|
|
|
|
upload_date = unified_strdate(params['release_date_f'])
|
|
|
|
video_description = params['description']
|
|
|
|
video_uploader = params['submitted_by']
|
|
|
|
thumbnail = params['thumbnails'][0]['image']
|
|
|
|
except KeyError:
|
|
|
|
raise ExtractorError('Missing JSON parameter: ' + sys.exc_info()[1])
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
# Get all of the formats available
|
2013-01-06 13:40:50 -07:00
|
|
|
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
|
2013-06-06 05:27:27 -06:00
|
|
|
download_list_html = self._search_regex(DOWNLOAD_LIST_RE,
|
|
|
|
webpage, u'download list').strip()
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
# Get all of the links from the page
|
2013-01-06 13:40:50 -07:00
|
|
|
LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
|
|
|
|
links = re.findall(LINK_RE, download_list_html)
|
2013-01-05 13:42:35 -07:00
|
|
|
if(len(links) == 0):
|
2013-01-12 08:10:35 -07:00
|
|
|
raise ExtractorError(u'ERROR: no known formats available for video')
|
2013-02-26 02:39:26 -07:00
|
|
|
|
2013-04-20 11:35:49 -06:00
|
|
|
self.to_screen(u'Links found: %d' % len(links))
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
formats = []
|
|
|
|
for link in links:
|
|
|
|
|
|
|
|
# A link looks like this:
|
|
|
|
# http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
|
|
|
|
# A path looks like this:
|
|
|
|
# /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
|
2013-01-06 13:40:50 -07:00
|
|
|
video_url = unescapeHTML( link )
|
|
|
|
path = compat_urllib_parse_urlparse( video_url ).path
|
2013-01-05 13:42:35 -07:00
|
|
|
extension = os.path.splitext( path )[1][1:]
|
|
|
|
format = path.split('/')[4].split('_')[:2]
|
|
|
|
size = format[0]
|
|
|
|
bitrate = format[1]
|
|
|
|
format = "-".join( format )
|
2013-06-09 06:21:42 -06:00
|
|
|
# title = u'%s-%s-%s' % (video_title, size, bitrate)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
formats.append({
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'uploader': video_uploader,
|
|
|
|
'upload_date': upload_date,
|
2013-06-09 06:21:42 -06:00
|
|
|
'title': video_title,
|
2013-01-05 13:42:35 -07:00
|
|
|
'ext': extension,
|
|
|
|
'format': format,
|
2013-06-07 03:19:27 -06:00
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'description': video_description
|
2013-01-05 13:42:35 -07:00
|
|
|
})
|
|
|
|
|
|
|
|
if self._downloader.params.get('listformats', None):
|
|
|
|
self._print_formats(formats)
|
|
|
|
return
|
|
|
|
|
|
|
|
req_format = self._downloader.params.get('format', None)
|
2013-04-20 11:35:49 -06:00
|
|
|
self.to_screen(u'Format: %s' % req_format)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
if req_format is None or req_format == 'best':
|
|
|
|
return [formats[0]]
|
|
|
|
elif req_format == 'worst':
|
|
|
|
return [formats[-1]]
|
|
|
|
elif req_format in ('-1', 'all'):
|
|
|
|
return formats
|
|
|
|
else:
|
|
|
|
format = self._specific( req_format, formats )
|
|
|
|
if result is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Requested format not available')
|
2013-01-05 13:42:35 -07:00
|
|
|
return [format]
|
|
|
|
|
2013-02-26 02:39:26 -07:00
|
|
|
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
class PornotubeIE(InfoExtractor):
|
|
|
|
"""Information extractor for pornotube.com."""
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
2013-01-06 13:40:50 -07:00
|
|
|
video_id = mobj.group('videoid')
|
|
|
|
video_title = mobj.group('title')
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
# Get webpage content
|
2013-01-06 13:40:50 -07:00
|
|
|
webpage = self._download_webpage(url, video_id)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
# Get the video URL
|
2013-01-06 13:40:50 -07:00
|
|
|
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
|
2013-06-06 05:27:27 -06:00
|
|
|
video_url = self._search_regex(VIDEO_URL_RE, webpage, u'video url')
|
|
|
|
video_url = compat_urllib_parse.unquote(video_url)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
#Get the uploaded date
|
2013-01-06 13:40:50 -07:00
|
|
|
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
2013-06-09 03:57:13 -06:00
|
|
|
upload_date = self._html_search_regex(VIDEO_UPLOADED_RE, webpage, u'upload date', fatal=False)
|
2013-06-06 05:27:27 -06:00
|
|
|
if upload_date: upload_date = unified_strdate(upload_date)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
info = {'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'uploader': None,
|
|
|
|
'upload_date': upload_date,
|
|
|
|
'title': video_title,
|
|
|
|
'ext': 'flv',
|
2013-01-12 07:17:04 -07:00
|
|
|
'format': 'flv'}
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
return [info]
|
|
|
|
|
|
|
|
class YouJizzIE(InfoExtractor):
|
|
|
|
"""Information extractor for youjizz.com."""
|
2013-01-06 13:40:50 -07:00
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
2013-01-06 13:40:50 -07:00
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2013-01-06 13:40:50 -07:00
|
|
|
|
|
|
|
video_id = mobj.group('videoid')
|
|
|
|
|
|
|
|
# Get webpage content
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
# Get the video title
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<title>(?P<title>.*)</title>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title').strip()
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
# Get the embed page
|
2013-01-12 08:36:51 -07:00
|
|
|
result = re.search(r'https?://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)', webpage)
|
2013-01-05 13:42:35 -07:00
|
|
|
if result is None:
|
2013-01-12 08:36:51 -07:00
|
|
|
raise ExtractorError(u'ERROR: unable to extract embed page')
|
2013-01-05 13:42:35 -07:00
|
|
|
|
2013-01-06 13:40:50 -07:00
|
|
|
embed_page_url = result.group(0).strip()
|
|
|
|
video_id = result.group('videoid')
|
2013-02-26 02:39:26 -07:00
|
|
|
|
2013-01-06 13:40:50 -07:00
|
|
|
webpage = self._download_webpage(embed_page_url, video_id)
|
|
|
|
|
2013-01-05 13:42:35 -07:00
|
|
|
# Get the video URL
|
2013-06-06 05:27:27 -06:00
|
|
|
video_url = self._search_regex(r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);',
|
|
|
|
webpage, u'video URL')
|
2013-01-05 13:42:35 -07:00
|
|
|
|
|
|
|
info = {'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'title': video_title,
|
|
|
|
'ext': 'flv',
|
|
|
|
'format': 'flv',
|
|
|
|
'player_url': embed_page_url}
|
|
|
|
|
|
|
|
return [info]
|
|
|
|
|
2013-01-26 19:01:23 -07:00
|
|
|
class EightTracksIE(InfoExtractor):
|
|
|
|
IE_NAME = '8tracks'
|
2013-01-26 20:15:12 -07:00
|
|
|
_VALID_URL = r'https?://8tracks.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
|
2013-01-26 19:01:23 -07:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
|
|
playlist_id = mobj.group('id')
|
|
|
|
|
|
|
|
webpage = self._download_webpage(url, playlist_id)
|
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
json_like = self._search_regex(r"PAGE.mix = (.*?);\n", webpage, u'trax information', flags=re.DOTALL)
|
2013-01-26 19:01:23 -07:00
|
|
|
data = json.loads(json_like)
|
|
|
|
|
|
|
|
session = str(random.randint(0, 1000000000))
|
|
|
|
mix_id = data['id']
|
|
|
|
track_count = data['tracks_count']
|
|
|
|
first_url = 'http://8tracks.com/sets/%s/play?player=sm&mix_id=%s&format=jsonh' % (session, mix_id)
|
|
|
|
next_url = first_url
|
|
|
|
res = []
|
|
|
|
for i in itertools.count():
|
|
|
|
api_json = self._download_webpage(next_url, playlist_id,
|
|
|
|
note=u'Downloading song information %s/%s' % (str(i+1), track_count),
|
|
|
|
errnote=u'Failed to download song information')
|
|
|
|
api_data = json.loads(api_json)
|
|
|
|
track_data = api_data[u'set']['track']
|
|
|
|
info = {
|
|
|
|
'id': track_data['id'],
|
|
|
|
'url': track_data['track_file_stream_url'],
|
2013-01-26 20:05:53 -07:00
|
|
|
'title': track_data['performer'] + u' - ' + track_data['name'],
|
|
|
|
'raw_title': track_data['name'],
|
|
|
|
'uploader_id': data['user']['login'],
|
2013-01-26 19:01:23 -07:00
|
|
|
'ext': 'm4a',
|
|
|
|
}
|
|
|
|
res.append(info)
|
|
|
|
if api_data['set']['at_last_track']:
|
|
|
|
break
|
|
|
|
next_url = 'http://8tracks.com/sets/%s/next?player=sm&mix_id=%s&format=jsonh&track_id=%s' % (session, mix_id, track_data['id'])
|
|
|
|
return res
|
2013-01-05 13:42:35 -07:00
|
|
|
|
2013-02-08 00:25:55 -07:00
|
|
|
class KeekIE(InfoExtractor):
|
|
|
|
_VALID_URL = r'http://(?:www\.)?keek\.com/(?:!|\w+/keeks/)(?P<videoID>\w+)'
|
|
|
|
IE_NAME = u'keek'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
m = re.match(self._VALID_URL, url)
|
|
|
|
video_id = m.group('videoID')
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-02-08 00:25:55 -07:00
|
|
|
video_url = u'http://cdn.keek.com/keek/video/%s' % video_id
|
|
|
|
thumbnail = u'http://cdn.keek.com/keek/thumbnail/%s/w100/h75' % video_id
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
uploader = self._html_search_regex(r'<div class="user-name-and-bio">[\S\s]+?<h2>(?P<uploader>.+?)</h2>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'uploader', fatal=False)
|
|
|
|
|
2013-02-08 00:25:55 -07:00
|
|
|
info = {
|
2013-03-20 05:13:52 -06:00
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
2013-02-08 00:25:55 -07:00
|
|
|
'ext': 'mp4',
|
2013-06-06 05:27:27 -06:00
|
|
|
'title': video_title,
|
2013-02-08 00:25:55 -07:00
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'uploader': uploader
|
2013-02-08 03:00:28 -07:00
|
|
|
}
|
2013-02-08 00:25:55 -07:00
|
|
|
return [info]
|
|
|
|
|
2013-02-17 09:13:06 -07:00
|
|
|
class TEDIE(InfoExtractor):
|
2013-05-02 05:39:56 -06:00
|
|
|
_VALID_URL=r'''http://www\.ted\.com/
|
2013-02-18 13:42:06 -07:00
|
|
|
(
|
|
|
|
((?P<type_playlist>playlists)/(?P<playlist_id>\d+)) # We have a playlist
|
|
|
|
|
|
|
|
|
((?P<type_talk>talks)) # We have a simple talk
|
|
|
|
)
|
2013-05-02 10:28:07 -06:00
|
|
|
(/lang/(.*?))? # The url may contain the language
|
2013-02-18 13:42:06 -07:00
|
|
|
/(?P<name>\w+) # Here goes the name and then ".html"
|
|
|
|
'''
|
|
|
|
|
2013-02-26 11:02:31 -07:00
|
|
|
@classmethod
|
|
|
|
def suitable(cls, url):
|
2013-02-18 13:42:06 -07:00
|
|
|
"""Receives a URL and returns True if suitable for this IE."""
|
2013-02-26 11:02:31 -07:00
|
|
|
return re.match(cls._VALID_URL, url, re.VERBOSE) is not None
|
2013-02-18 13:42:06 -07:00
|
|
|
|
2013-02-17 09:13:06 -07:00
|
|
|
def _real_extract(self, url):
|
2013-02-18 13:42:06 -07:00
|
|
|
m=re.match(self._VALID_URL, url, re.VERBOSE)
|
|
|
|
if m.group('type_talk'):
|
|
|
|
return [self._talk_info(url)]
|
|
|
|
else :
|
|
|
|
playlist_id=m.group('playlist_id')
|
|
|
|
name=m.group('name')
|
2013-04-20 11:35:49 -06:00
|
|
|
self.to_screen(u'Getting info of playlist %s: "%s"' % (playlist_id,name))
|
2013-04-20 05:31:21 -06:00
|
|
|
return [self._playlist_videos_info(url,name,playlist_id)]
|
2013-02-18 13:42:06 -07:00
|
|
|
|
|
|
|
def _playlist_videos_info(self,url,name,playlist_id=0):
|
|
|
|
'''Returns the videos of the playlist'''
|
|
|
|
video_RE=r'''
|
|
|
|
<li\ id="talk_(\d+)"([.\s]*?)data-id="(?P<video_id>\d+)"
|
|
|
|
([.\s]*?)data-playlist_item_id="(\d+)"
|
|
|
|
([.\s]*?)data-mediaslug="(?P<mediaSlug>.+?)"
|
|
|
|
'''
|
2013-02-23 09:27:49 -07:00
|
|
|
video_name_RE=r'<p\ class="talk-title"><a href="(?P<talk_url>/talks/(.+).html)">(?P<fullname>.+?)</a></p>'
|
2013-02-18 13:42:06 -07:00
|
|
|
webpage=self._download_webpage(url, playlist_id, 'Downloading playlist webpage')
|
|
|
|
m_videos=re.finditer(video_RE,webpage,re.VERBOSE)
|
|
|
|
m_names=re.finditer(video_name_RE,webpage)
|
2013-04-20 05:31:21 -06:00
|
|
|
|
2013-06-20 12:51:20 -06:00
|
|
|
playlist_title = self._html_search_regex(r'div class="headline">\s*?<h1>\s*?<span>(.*?)</span>',
|
|
|
|
webpage, 'playlist title')
|
2013-04-20 05:31:21 -06:00
|
|
|
|
|
|
|
playlist_entries = []
|
2013-02-18 13:42:06 -07:00
|
|
|
for m_video, m_name in zip(m_videos,m_names):
|
2013-02-23 09:27:49 -07:00
|
|
|
video_id=m_video.group('video_id')
|
|
|
|
talk_url='http://www.ted.com%s' % m_name.group('talk_url')
|
2013-04-20 05:31:21 -06:00
|
|
|
playlist_entries.append(self.url_result(talk_url, 'TED'))
|
|
|
|
return self.playlist_result(playlist_entries, playlist_id = playlist_id, playlist_title = playlist_title)
|
2013-02-23 09:27:49 -07:00
|
|
|
|
2013-02-18 13:42:06 -07:00
|
|
|
def _talk_info(self, url, video_id=0):
|
|
|
|
"""Return the video for the talk in the url"""
|
2013-06-20 12:51:20 -06:00
|
|
|
m = re.match(self._VALID_URL, url,re.VERBOSE)
|
|
|
|
video_name = m.group('name')
|
|
|
|
webpage = self._download_webpage(url, video_id, 'Downloading \"%s\" page' % video_name)
|
|
|
|
self.report_extraction(video_name)
|
2013-02-18 13:42:06 -07:00
|
|
|
# If the url includes the language we get the title translated
|
2013-06-20 12:51:20 -06:00
|
|
|
title = self._html_search_regex(r'<span id="altHeadline" >(?P<title>.*)</span>',
|
|
|
|
webpage, 'title')
|
|
|
|
json_data = self._search_regex(r'<script.*?>var talkDetails = ({.*?})</script>',
|
|
|
|
webpage, 'json data')
|
|
|
|
info = json.loads(json_data)
|
|
|
|
desc = self._html_search_regex(r'<div class="talk-intro">.*?<p.*?>(.*?)</p>',
|
|
|
|
webpage, 'description', flags = re.DOTALL)
|
|
|
|
|
|
|
|
thumbnail = self._search_regex(r'</span>[\s.]*</div>[\s.]*<img src="(.*?)"',
|
|
|
|
webpage, 'thumbnail')
|
2013-02-17 09:13:06 -07:00
|
|
|
info = {
|
2013-06-20 12:51:20 -06:00
|
|
|
'id': info['id'],
|
|
|
|
'url': info['htmlStreams'][-1]['file'],
|
2013-02-17 09:13:06 -07:00
|
|
|
'ext': 'mp4',
|
2013-02-23 09:27:49 -07:00
|
|
|
'title': title,
|
2013-06-20 12:51:20 -06:00
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'description': desc,
|
2013-02-18 13:42:06 -07:00
|
|
|
}
|
|
|
|
return info
|
2013-02-08 00:25:55 -07:00
|
|
|
|
2013-02-18 10:45:09 -07:00
|
|
|
class MySpassIE(InfoExtractor):
|
2013-02-16 05:46:13 -07:00
|
|
|
_VALID_URL = r'http://www.myspass.de/.*'
|
2013-02-26 02:39:26 -07:00
|
|
|
|
2013-02-16 05:46:13 -07:00
|
|
|
def _real_extract(self, url):
|
|
|
|
META_DATA_URL_TEMPLATE = 'http://www.myspass.de/myspass/includes/apps/video/getvideometadataxml.php?id=%s'
|
2013-02-18 10:45:09 -07:00
|
|
|
|
2013-02-16 05:46:13 -07:00
|
|
|
# video id is the last path element of the URL
|
|
|
|
# usually there is a trailing slash, so also try the second but last
|
|
|
|
url_path = compat_urllib_parse_urlparse(url).path
|
|
|
|
url_parent_path, video_id = os.path.split(url_path)
|
|
|
|
if not video_id:
|
|
|
|
_, video_id = os.path.split(url_parent_path)
|
2013-02-26 02:39:26 -07:00
|
|
|
|
2013-02-16 05:46:13 -07:00
|
|
|
# get metadata
|
|
|
|
metadata_url = META_DATA_URL_TEMPLATE % video_id
|
|
|
|
metadata_text = self._download_webpage(metadata_url, video_id)
|
|
|
|
metadata = xml.etree.ElementTree.fromstring(metadata_text.encode('utf-8'))
|
2013-02-26 02:39:26 -07:00
|
|
|
|
2013-02-16 05:46:13 -07:00
|
|
|
# extract values from metadata
|
|
|
|
url_flv_el = metadata.find('url_flv')
|
|
|
|
if url_flv_el is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Unable to extract download url')
|
2013-02-16 05:46:13 -07:00
|
|
|
video_url = url_flv_el.text
|
|
|
|
extension = os.path.splitext(video_url)[1][1:]
|
|
|
|
title_el = metadata.find('title')
|
|
|
|
if title_el is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Unable to extract title')
|
2013-02-16 05:46:13 -07:00
|
|
|
title = title_el.text
|
|
|
|
format_id_el = metadata.find('format_id')
|
|
|
|
if format_id_el is None:
|
|
|
|
format = ext
|
|
|
|
else:
|
|
|
|
format = format_id_el.text
|
|
|
|
description_el = metadata.find('description')
|
|
|
|
if description_el is not None:
|
|
|
|
description = description_el.text
|
|
|
|
else:
|
|
|
|
description = None
|
|
|
|
imagePreview_el = metadata.find('imagePreview')
|
|
|
|
if imagePreview_el is not None:
|
|
|
|
thumbnail = imagePreview_el.text
|
|
|
|
else:
|
|
|
|
thumbnail = None
|
|
|
|
info = {
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'title': title,
|
|
|
|
'ext': extension,
|
|
|
|
'format': format,
|
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'description': description
|
|
|
|
}
|
|
|
|
return [info]
|
|
|
|
|
2013-03-11 18:08:54 -06:00
|
|
|
class SpiegelIE(InfoExtractor):
|
2013-03-29 08:31:38 -06:00
|
|
|
_VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<videoID>[0-9]+)(?:\.html)?(?:#.*)?$'
|
2013-03-11 18:08:54 -06:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
m = re.match(self._VALID_URL, url)
|
|
|
|
video_id = m.group('videoID')
|
|
|
|
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<div class="module-title">(.*?)</div>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
2013-03-11 18:08:54 -06:00
|
|
|
|
|
|
|
xml_url = u'http://video2.spiegel.de/flash/' + video_id + u'.xml'
|
|
|
|
xml_code = self._download_webpage(xml_url, video_id,
|
|
|
|
note=u'Downloading XML', errnote=u'Failed to download XML')
|
|
|
|
|
|
|
|
idoc = xml.etree.ElementTree.fromstring(xml_code)
|
|
|
|
last_type = idoc[-1]
|
|
|
|
filename = last_type.findall('./filename')[0].text
|
|
|
|
duration = float(last_type.findall('./duration')[0].text)
|
|
|
|
|
|
|
|
video_url = 'http://video2.spiegel.de/flash/' + filename
|
|
|
|
video_ext = filename.rpartition('.')[2]
|
|
|
|
info = {
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': video_ext,
|
|
|
|
'title': video_title,
|
|
|
|
'duration': duration,
|
|
|
|
}
|
|
|
|
return [info]
|
|
|
|
|
2013-03-29 08:13:24 -06:00
|
|
|
class LiveLeakIE(InfoExtractor):
|
2013-03-26 14:37:08 -06:00
|
|
|
|
2013-03-29 08:13:24 -06:00
|
|
|
_VALID_URL = r'^(?:http?://)?(?:\w+\.)?liveleak\.com/view\?(?:.*?)i=(?P<video_id>[\w_]+)(?:.*)'
|
2013-03-26 14:37:08 -06:00
|
|
|
IE_NAME = u'liveleak'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-04 06:23:16 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2013-03-26 14:37:08 -06:00
|
|
|
|
2013-03-29 08:13:24 -06:00
|
|
|
video_id = mobj.group('video_id')
|
2013-03-26 14:37:08 -06:00
|
|
|
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
video_url = self._search_regex(r'file: "(.*?)",',
|
|
|
|
webpage, u'video URL')
|
2013-03-29 08:13:24 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<meta property="og:title" content="(?P<title>.*?)"',
|
|
|
|
webpage, u'title').replace('LiveLeak.com -', '').strip()
|
2013-03-26 14:37:08 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_description = self._html_search_regex(r'<meta property="og:description" content="(?P<desc>.*?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'description', fatal=False)
|
2013-03-26 14:37:08 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_uploader = self._html_search_regex(r'By:.*?(\w+)</a>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'uploader', fatal=False)
|
2013-03-26 14:37:08 -06:00
|
|
|
|
|
|
|
info = {
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'mp4',
|
2013-06-06 05:27:27 -06:00
|
|
|
'title': video_title,
|
|
|
|
'description': video_description,
|
|
|
|
'uploader': video_uploader
|
2013-03-26 14:37:08 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
return [info]
|
|
|
|
|
2013-05-23 13:42:03 -06:00
|
|
|
|
|
|
|
|
2013-04-22 13:07:49 -06:00
|
|
|
class TumblrIE(InfoExtractor):
|
2013-05-02 05:39:56 -06:00
|
|
|
_VALID_URL = r'http://(?P<blog_name>.*?)\.tumblr\.com/((post)|(video))/(?P<id>\d*)/(.*?)'
|
2013-04-22 13:07:49 -06:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
m_url = re.match(self._VALID_URL, url)
|
|
|
|
video_id = m_url.group('id')
|
|
|
|
blog = m_url.group('blog_name')
|
|
|
|
|
|
|
|
url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id)
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
|
2013-05-02 05:39:56 -06:00
|
|
|
re_video = r'src=\\x22(?P<video_url>http://%s\.tumblr\.com/video_file/%s/(.*?))\\x22 type=\\x22video/(?P<ext>.*?)\\x22' % (blog, video_id)
|
2013-04-22 13:07:49 -06:00
|
|
|
video = re.search(re_video, webpage)
|
|
|
|
if video is None:
|
2013-06-06 05:27:27 -06:00
|
|
|
raise ExtractorError(u'Unable to extract video')
|
2013-04-22 13:07:49 -06:00
|
|
|
video_url = video.group('video_url')
|
|
|
|
ext = video.group('ext')
|
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
video_thumbnail = self._search_regex(r'posters(.*?)\[\\x22(?P<thumb>.*?)\\x22',
|
|
|
|
webpage, u'thumbnail', fatal=False) # We pick the first poster
|
|
|
|
if video_thumbnail: video_thumbnail = video_thumbnail.replace('\\', '')
|
2013-04-22 13:07:49 -06:00
|
|
|
|
|
|
|
# The only place where you can get a title, it's not complete,
|
|
|
|
# but searching in other places doesn't work for all videos
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<title>(?P<title>.*?)</title>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title', flags=re.DOTALL)
|
2013-04-22 13:07:49 -06:00
|
|
|
|
|
|
|
return [{'id': video_id,
|
|
|
|
'url': video_url,
|
2013-06-06 05:27:27 -06:00
|
|
|
'title': video_title,
|
|
|
|
'thumbnail': video_thumbnail,
|
2013-04-22 13:07:49 -06:00
|
|
|
'ext': ext
|
|
|
|
}]
|
|
|
|
|
2013-05-01 07:55:46 -06:00
|
|
|
class BandcampIE(InfoExtractor):
|
2013-05-02 05:39:56 -06:00
|
|
|
_VALID_URL = r'http://.*?\.bandcamp\.com/track/(?P<title>.*)'
|
2013-05-01 07:55:46 -06:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
title = mobj.group('title')
|
|
|
|
webpage = self._download_webpage(url, title)
|
|
|
|
# We get the link to the free download page
|
|
|
|
m_download = re.search(r'freeDownloadPage: "(.*?)"', webpage)
|
|
|
|
if m_download is None:
|
2013-06-06 05:27:27 -06:00
|
|
|
raise ExtractorError(u'No free songs found')
|
2013-05-04 06:23:16 -06:00
|
|
|
|
2013-05-01 07:55:46 -06:00
|
|
|
download_link = m_download.group(1)
|
|
|
|
id = re.search(r'var TralbumData = {(.*?)id: (?P<id>\d*?)$',
|
|
|
|
webpage, re.MULTILINE|re.DOTALL).group('id')
|
|
|
|
|
|
|
|
download_webpage = self._download_webpage(download_link, id,
|
|
|
|
'Downloading free downloads page')
|
|
|
|
# We get the dictionary of the track from some javascrip code
|
|
|
|
info = re.search(r'items: (.*?),$',
|
|
|
|
download_webpage, re.MULTILINE).group(1)
|
|
|
|
info = json.loads(info)[0]
|
|
|
|
# We pick mp3-320 for now, until format selection can be easily implemented.
|
|
|
|
mp3_info = info[u'downloads'][u'mp3-320']
|
|
|
|
# If we try to use this url it says the link has expired
|
|
|
|
initial_url = mp3_info[u'url']
|
2013-05-02 05:39:56 -06:00
|
|
|
re_url = r'(?P<server>http://(.*?)\.bandcamp\.com)/download/track\?enc=mp3-320&fsig=(?P<fsig>.*?)&id=(?P<id>.*?)&ts=(?P<ts>.*)$'
|
2013-05-01 07:55:46 -06:00
|
|
|
m_url = re.match(re_url, initial_url)
|
|
|
|
#We build the url we will use to get the final track url
|
|
|
|
# This url is build in Bandcamp in the script download_bunde_*.js
|
|
|
|
request_url = '%s/statdownload/track?enc=mp3-320&fsig=%s&id=%s&ts=%s&.rand=665028774616&.vrs=1' % (m_url.group('server'), m_url.group('fsig'), id, m_url.group('ts'))
|
|
|
|
final_url_webpage = self._download_webpage(request_url, id, 'Requesting download url')
|
|
|
|
# If we could correctly generate the .rand field the url would be
|
|
|
|
#in the "download_url" key
|
|
|
|
final_url = re.search(r'"retry_url":"(.*?)"', final_url_webpage).group(1)
|
|
|
|
|
|
|
|
track_info = {'id':id,
|
|
|
|
'title' : info[u'title'],
|
2013-06-06 05:27:27 -06:00
|
|
|
'ext' : 'mp3',
|
|
|
|
'url' : final_url,
|
2013-05-01 07:55:46 -06:00
|
|
|
'thumbnail' : info[u'thumb_url'],
|
2013-06-06 05:27:27 -06:00
|
|
|
'uploader' : info[u'artist']
|
2013-05-01 07:55:46 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
return [track_info]
|
|
|
|
|
2013-05-03 12:07:35 -06:00
|
|
|
class RedTubeIE(InfoExtractor):
|
2013-05-03 11:57:16 -06:00
|
|
|
"""Information Extractor for redtube"""
|
|
|
|
_VALID_URL = r'(?:http://)?(?:www\.)?redtube\.com/(?P<id>[0-9]+)'
|
|
|
|
|
|
|
|
def _real_extract(self,url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
2013-05-03 12:07:35 -06:00
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
|
|
|
2013-05-03 11:57:16 -06:00
|
|
|
video_id = mobj.group('id')
|
|
|
|
video_extension = 'mp4'
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-05-03 11:57:16 -06:00
|
|
|
self.report_extraction(video_id)
|
2013-05-03 12:07:35 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_url = self._html_search_regex(r'<source src="(.+?)" type="video/mp4">',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'video URL')
|
2013-05-03 12:07:35 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex('<h1 class="videoTitle slidePanelMovable">(.+?)</h1>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
2013-05-03 11:57:16 -06:00
|
|
|
|
|
|
|
return [{
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': video_extension,
|
|
|
|
'title': video_title,
|
|
|
|
}]
|
2013-05-05 08:07:19 -06:00
|
|
|
|
|
|
|
class InaIE(InfoExtractor):
|
|
|
|
"""Information Extractor for Ina.fr"""
|
2013-05-18 11:17:19 -06:00
|
|
|
_VALID_URL = r'(?:http://)?(?:www\.)?ina\.fr/video/(?P<id>I[0-9]+)/.*'
|
2013-05-05 12:57:19 -06:00
|
|
|
|
2013-05-05 08:07:19 -06:00
|
|
|
def _real_extract(self,url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
2013-05-05 12:57:19 -06:00
|
|
|
|
2013-05-05 08:07:19 -06:00
|
|
|
video_id = mobj.group('id')
|
2013-05-05 12:57:19 -06:00
|
|
|
mrss_url='http://player.ina.fr/notices/%s.mrss' % video_id
|
|
|
|
video_extension = 'mp4'
|
|
|
|
webpage = self._download_webpage(mrss_url, video_id)
|
2013-05-05 08:07:19 -06:00
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
self.report_extraction(video_id)
|
2013-05-05 12:57:19 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_url = self._html_search_regex(r'<media:player url="(?P<mp4url>http://mp4.ina.fr/[^"]+\.mp4)',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'video URL')
|
|
|
|
|
|
|
|
video_title = self._search_regex(r'<title><!\[CDATA\[(?P<titre>.*?)]]></title>',
|
|
|
|
webpage, u'title')
|
2013-05-05 08:07:19 -06:00
|
|
|
|
|
|
|
return [{
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': video_extension,
|
|
|
|
'title': video_title,
|
|
|
|
}]
|
2013-03-11 18:08:54 -06:00
|
|
|
|
2013-05-18 11:17:19 -06:00
|
|
|
class HowcastIE(InfoExtractor):
|
2013-05-19 16:25:26 -06:00
|
|
|
"""Information Extractor for Howcast.com"""
|
|
|
|
_VALID_URL = r'(?:https?://)?(?:www\.)?howcast\.com/videos/(?P<id>\d+)'
|
2013-05-18 11:17:19 -06:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
|
|
|
video_id = mobj.group('id')
|
|
|
|
webpage_url = 'http://www.howcast.com/videos/' + video_id
|
|
|
|
webpage = self._download_webpage(webpage_url, video_id)
|
|
|
|
|
2013-05-20 00:39:41 -06:00
|
|
|
self.report_extraction(video_id)
|
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
video_url = self._search_regex(r'\'?file\'?: "(http://mobile-media\.howcast\.com/[0-9]+\.mp4)',
|
|
|
|
webpage, u'video URL')
|
2013-05-18 11:17:19 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') property=\'og:title\'',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
2013-05-18 11:17:19 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_description = self._html_search_regex(r'<meta content=(?:"([^"]+)"|\'([^\']+)\') name=\'description\'',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'description', fatal=False)
|
2013-05-18 11:17:19 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
thumbnail = self._html_search_regex(r'<meta content=\'(.+?)\' property=\'og:image\'',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'thumbnail', fatal=False)
|
2013-05-20 00:39:41 -06:00
|
|
|
|
2013-05-18 11:17:19 -06:00
|
|
|
return [{
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': video_title,
|
|
|
|
'description': video_description,
|
2013-05-20 00:39:41 -06:00
|
|
|
'thumbnail': thumbnail,
|
2013-05-18 11:17:19 -06:00
|
|
|
}]
|
|
|
|
|
2013-05-19 16:25:26 -06:00
|
|
|
class VineIE(InfoExtractor):
|
|
|
|
"""Information Extractor for Vine.co"""
|
|
|
|
_VALID_URL = r'(?:https?://)?(?:www\.)?vine\.co/v/(?P<id>\w+)'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
|
|
|
video_id = mobj.group('id')
|
|
|
|
webpage_url = 'https://vine.co/v/' + video_id
|
|
|
|
webpage = self._download_webpage(webpage_url, video_id)
|
|
|
|
|
2013-05-20 00:31:03 -06:00
|
|
|
self.report_extraction(video_id)
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_url = self._html_search_regex(r'<meta property="twitter:player:stream" content="(.+?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'video URL')
|
2013-05-19 16:25:26 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<meta property="og:title" content="(.+?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
2013-05-19 16:25:26 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
thumbnail = self._html_search_regex(r'<meta property="og:image" content="(.+?)(\?.*?)?"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'thumbnail', fatal=False)
|
2013-05-20 00:31:03 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
uploader = self._html_search_regex(r'<div class="user">.*?<h2>(.+?)</h2>',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'uploader', fatal=False, flags=re.DOTALL)
|
2013-05-20 00:31:03 -06:00
|
|
|
|
2013-05-19 16:25:26 -06:00
|
|
|
return [{
|
2013-05-20 00:31:03 -06:00
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': video_title,
|
|
|
|
'thumbnail': thumbnail,
|
|
|
|
'uploader': uploader,
|
2013-05-19 16:25:26 -06:00
|
|
|
}]
|
|
|
|
|
2013-05-20 15:18:40 -06:00
|
|
|
class FlickrIE(InfoExtractor):
|
|
|
|
"""Information Extractor for Flickr videos"""
|
2013-05-21 08:07:27 -06:00
|
|
|
_VALID_URL = r'(?:https?://)?(?:www\.)?flickr\.com/photos/(?P<uploader_id>[\w\-_@]+)/(?P<id>\d+).*'
|
2013-05-20 15:18:40 -06:00
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
|
|
|
video_id = mobj.group('id')
|
|
|
|
video_uploader_id = mobj.group('uploader_id')
|
|
|
|
webpage_url = 'http://www.flickr.com/photos/' + video_uploader_id + '/' + video_id
|
|
|
|
webpage = self._download_webpage(webpage_url, video_id)
|
|
|
|
|
2013-06-06 05:27:27 -06:00
|
|
|
secret = self._search_regex(r"photo_secret: '(\w+)'", webpage, u'secret')
|
2013-05-20 15:18:40 -06:00
|
|
|
|
|
|
|
first_url = 'https://secure.flickr.com/apps/video/video_mtl_xml.gne?v=x&photo_id=' + video_id + '&secret=' + secret + '&bitrate=700&target=_self'
|
2013-05-21 08:07:27 -06:00
|
|
|
first_xml = self._download_webpage(first_url, video_id, 'Downloading first data webpage')
|
2013-05-20 15:18:40 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
node_id = self._html_search_regex(r'<Item id="id">(\d+-\d+)</Item>',
|
2013-06-06 05:27:27 -06:00
|
|
|
first_xml, u'node_id')
|
2013-05-20 15:18:40 -06:00
|
|
|
|
|
|
|
second_url = 'https://secure.flickr.com/video_playlist.gne?node_id=' + node_id + '&tech=flash&mode=playlist&bitrate=700&secret=' + secret + '&rd=video.yahoo.com&noad=1'
|
2013-05-21 08:07:27 -06:00
|
|
|
second_xml = self._download_webpage(second_url, video_id, 'Downloading second data webpage')
|
|
|
|
|
|
|
|
self.report_extraction(video_id)
|
2013-05-20 15:18:40 -06:00
|
|
|
|
|
|
|
mobj = re.search(r'<STREAM APP="(.+?)" FULLPATH="(.+?)"', second_xml)
|
|
|
|
if mobj is None:
|
|
|
|
raise ExtractorError(u'Unable to extract video url')
|
|
|
|
video_url = mobj.group(1) + unescapeHTML(mobj.group(2))
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<meta property="og:title" content=(?:"([^"]+)"|\'([^\']+)\')',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'video title')
|
2013-05-20 15:18:40 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_description = self._html_search_regex(r'<meta property="og:description" content=(?:"([^"]+)"|\'([^\']+)\')',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'description', fatal=False)
|
2013-05-20 15:18:40 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
thumbnail = self._html_search_regex(r'<meta property="og:image" content=(?:"([^"]+)"|\'([^\']+)\')',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'thumbnail', fatal=False)
|
2013-05-20 15:18:40 -06:00
|
|
|
|
|
|
|
return [{
|
2013-05-21 08:07:27 -06:00
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': video_title,
|
2013-05-20 15:18:40 -06:00
|
|
|
'description': video_description,
|
2013-05-21 08:07:27 -06:00
|
|
|
'thumbnail': thumbnail,
|
2013-05-20 15:18:40 -06:00
|
|
|
'uploader_id': video_uploader_id,
|
|
|
|
}]
|
|
|
|
|
2013-05-21 06:37:32 -06:00
|
|
|
class TeamcocoIE(InfoExtractor):
|
|
|
|
_VALID_URL = r'http://teamcoco\.com/video/(?P<url_title>.*)'
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
|
|
url_title = mobj.group('url_title')
|
|
|
|
webpage = self._download_webpage(url, url_title)
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_id = self._html_search_regex(r'<article class="video" data-id="(\d+?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'video id')
|
2013-05-21 06:37:32 -06:00
|
|
|
|
|
|
|
self.report_extraction(video_id)
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<meta property="og:title" content="(.+?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'title')
|
2013-05-21 06:37:32 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
thumbnail = self._html_search_regex(r'<meta property="og:image" content="(.+?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'thumbnail', fatal=False)
|
2013-05-21 06:37:32 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_description = self._html_search_regex(r'<meta property="og:description" content="(.*?)"',
|
2013-06-06 05:27:27 -06:00
|
|
|
webpage, u'description', fatal=False)
|
2013-05-21 06:37:32 -06:00
|
|
|
|
|
|
|
data_url = 'http://teamcoco.com/cvp/2.0/%s.xml' % video_id
|
|
|
|
data = self._download_webpage(data_url, video_id, 'Downloading data webpage')
|
2013-06-06 05:27:27 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_url = self._html_search_regex(r'<file type="high".*?>(.*?)</file>',
|
2013-06-06 05:27:27 -06:00
|
|
|
data, u'video URL')
|
2013-05-21 06:37:32 -06:00
|
|
|
|
|
|
|
return [{
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': 'mp4',
|
|
|
|
'title': video_title,
|
|
|
|
'thumbnail': thumbnail,
|
2013-06-06 05:27:27 -06:00
|
|
|
'description': video_description,
|
2013-05-21 06:37:32 -06:00
|
|
|
}]
|
2013-06-07 03:46:03 -06:00
|
|
|
|
2013-06-04 06:30:54 -06:00
|
|
|
class XHamsterIE(InfoExtractor):
|
|
|
|
"""Information Extractor for xHamster"""
|
|
|
|
_VALID_URL = r'(?:http://)?(?:www.)?xhamster\.com/movies/(?P<id>[0-9]+)/.*\.html'
|
|
|
|
|
|
|
|
def _real_extract(self,url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
|
|
|
|
video_id = mobj.group('id')
|
2013-06-07 03:46:03 -06:00
|
|
|
mrss_url = 'http://xhamster.com/movies/%s/.html' % video_id
|
2013-06-04 06:30:54 -06:00
|
|
|
webpage = self._download_webpage(mrss_url, video_id)
|
2013-06-07 03:46:03 -06:00
|
|
|
|
2013-06-04 06:30:54 -06:00
|
|
|
mobj = re.search(r'\'srv\': \'(?P<server>[^\']*)\',\s*\'file\': \'(?P<file>[^\']+)\',', webpage)
|
|
|
|
if mobj is None:
|
|
|
|
raise ExtractorError(u'Unable to extract media URL')
|
|
|
|
if len(mobj.group('server')) == 0:
|
|
|
|
video_url = compat_urllib_parse.unquote(mobj.group('file'))
|
|
|
|
else:
|
|
|
|
video_url = mobj.group('server')+'/key='+mobj.group('file')
|
|
|
|
video_extension = video_url.split('.')[-1]
|
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
video_title = self._html_search_regex(r'<title>(?P<title>.+?) - xHamster\.com</title>',
|
2013-06-07 03:46:03 -06:00
|
|
|
webpage, u'title')
|
2013-06-04 06:30:54 -06:00
|
|
|
|
2013-06-07 04:10:02 -06:00
|
|
|
# Can't see the description anywhere in the UI
|
2013-06-09 03:57:13 -06:00
|
|
|
# video_description = self._html_search_regex(r'<span>Description: </span>(?P<description>[^<]+)',
|
2013-06-07 04:10:02 -06:00
|
|
|
# webpage, u'description', fatal=False)
|
|
|
|
# if video_description: video_description = unescapeHTML(video_description)
|
2013-06-04 06:30:54 -06:00
|
|
|
|
|
|
|
mobj = re.search(r'hint=\'(?P<upload_date_Y>[0-9]{4})-(?P<upload_date_m>[0-9]{2})-(?P<upload_date_d>[0-9]{2}) [0-9]{2}:[0-9]{2}:[0-9]{2} [A-Z]{3,4}\'', webpage)
|
2013-06-07 03:46:03 -06:00
|
|
|
if mobj:
|
|
|
|
video_upload_date = mobj.group('upload_date_Y')+mobj.group('upload_date_m')+mobj.group('upload_date_d')
|
2013-06-04 06:30:54 -06:00
|
|
|
else:
|
2013-06-07 03:46:03 -06:00
|
|
|
video_upload_date = None
|
|
|
|
self._downloader.report_warning(u'Unable to extract upload date')
|
2013-06-04 06:30:54 -06:00
|
|
|
|
2013-06-09 06:21:42 -06:00
|
|
|
video_uploader_id = self._html_search_regex(r'<a href=\'/user/[^>]+>(?P<uploader_id>[^<]+)',
|
2013-06-07 03:46:03 -06:00
|
|
|
webpage, u'uploader id', default=u'anonymous')
|
|
|
|
|
|
|
|
video_thumbnail = self._search_regex(r'\'image\':\'(?P<thumbnail>[^\']+)\'',
|
|
|
|
webpage, u'thumbnail', fatal=False)
|
2013-06-04 06:30:54 -06:00
|
|
|
|
|
|
|
return [{
|
|
|
|
'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'ext': video_extension,
|
|
|
|
'title': video_title,
|
2013-06-07 04:10:02 -06:00
|
|
|
# 'description': video_description,
|
2013-06-04 06:30:54 -06:00
|
|
|
'upload_date': video_upload_date,
|
|
|
|
'uploader_id': video_uploader_id,
|
|
|
|
'thumbnail': video_thumbnail
|
|
|
|
}]
|
2013-05-20 15:18:40 -06:00
|
|
|
|
2013-06-05 08:16:53 -06:00
|
|
|
class HypemIE(InfoExtractor):
|
|
|
|
"""Information Extractor for hypem"""
|
|
|
|
_VALID_URL = r'(?:http://)?(?:www\.)?hypem\.com/track/([^/]+)/([^/]+)'
|
|
|
|
|
2013-06-06 04:02:36 -06:00
|
|
|
def _real_extract(self, url):
|
2013-06-05 08:16:53 -06:00
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
2013-06-06 04:02:36 -06:00
|
|
|
track_id = mobj.group(1)
|
|
|
|
|
|
|
|
data = { 'ax': 1, 'ts': time.time() }
|
2013-06-05 08:16:53 -06:00
|
|
|
data_encoded = compat_urllib_parse.urlencode(data)
|
2013-06-06 04:02:36 -06:00
|
|
|
complete_url = url + "?" + data_encoded
|
2013-06-05 08:16:53 -06:00
|
|
|
request = compat_urllib_request.Request(complete_url)
|
2013-06-06 04:02:36 -06:00
|
|
|
response, urlh = self._download_webpage_handle(request, track_id, u'Downloading webpage with the url')
|
2013-06-05 08:16:53 -06:00
|
|
|
cookie = urlh.headers.get('Set-Cookie', '')
|
2013-06-06 04:02:36 -06:00
|
|
|
|
|
|
|
self.report_extraction(track_id)
|
2013-06-07 03:46:03 -06:00
|
|
|
|
2013-06-09 03:57:13 -06:00
|
|
|
html_tracks = self._html_search_regex(r'<script type="application/json" id="displayList-data">(.*?)</script>',
|
2013-06-07 03:46:03 -06:00
|
|
|
response, u'tracks', flags=re.MULTILINE|re.DOTALL).strip()
|
2013-06-05 08:16:53 -06:00
|
|
|
try:
|
|
|
|
track_list = json.loads(html_tracks)
|
2013-06-06 04:02:36 -06:00
|
|
|
track = track_list[u'tracks'][0]
|
2013-06-05 08:16:53 -06:00
|
|
|
except ValueError:
|
2013-06-06 04:02:36 -06:00
|
|
|
raise ExtractorError(u'Hypemachine contained invalid JSON.')
|
|
|
|
|
|
|
|
key = track[u"key"]
|
|
|
|
track_id = track[u"id"]
|
|
|
|
artist = track[u"artist"]
|
|
|
|
title = track[u"song"]
|
|
|
|
|
|
|
|
serve_url = "http://hypem.com/serve/source/%s/%s" % (compat_str(track_id), compat_str(key))
|
2013-06-05 08:16:53 -06:00
|
|
|
request = compat_urllib_request.Request(serve_url, "" , {'Content-Type': 'application/json'})
|
|
|
|
request.add_header('cookie', cookie)
|
2013-06-06 04:02:36 -06:00
|
|
|
song_data_json = self._download_webpage(request, track_id, u'Downloading metadata')
|
|
|
|
try:
|
|
|
|
song_data = json.loads(song_data_json)
|
|
|
|
except ValueError:
|
|
|
|
raise ExtractorError(u'Hypemachine contained invalid JSON.')
|
2013-06-05 08:16:53 -06:00
|
|
|
final_url = song_data[u"url"]
|
2013-06-06 04:02:36 -06:00
|
|
|
|
2013-06-05 08:16:53 -06:00
|
|
|
return [{
|
2013-06-06 04:02:36 -06:00
|
|
|
'id': track_id,
|
2013-06-05 08:16:53 -06:00
|
|
|
'url': final_url,
|
|
|
|
'ext': "mp3",
|
|
|
|
'title': title,
|
|
|
|
'artist': artist,
|
|
|
|
}]
|
|
|
|
|
2013-06-08 01:44:38 -06:00
|
|
|
class Vbox7IE(InfoExtractor):
|
|
|
|
"""Information Extractor for Vbox7"""
|
|
|
|
_VALID_URL = r'(?:http://)?(?:www\.)?vbox7\.com/play:([^/]+)'
|
|
|
|
|
|
|
|
def _real_extract(self,url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
raise ExtractorError(u'Invalid URL: %s' % url)
|
|
|
|
video_id = mobj.group(1)
|
|
|
|
|
|
|
|
redirect_page, urlh = self._download_webpage_handle(url, video_id)
|
2013-06-17 11:47:44 -06:00
|
|
|
new_location = self._search_regex(r'window\.location = \'(.*)\';', redirect_page, u'redirect location')
|
|
|
|
redirect_url = urlh.geturl() + new_location
|
2013-06-08 01:44:38 -06:00
|
|
|
webpage = self._download_webpage(redirect_url, video_id, u'Downloading redirect page')
|
|
|
|
|
2013-06-17 11:47:44 -06:00
|
|
|
title = self._html_search_regex(r'<title>(.*)</title>',
|
|
|
|
webpage, u'title').split('/')[0].strip()
|
2013-06-08 01:44:38 -06:00
|
|
|
|
|
|
|
ext = "flv"
|
|
|
|
info_url = "http://vbox7.com/play/magare.do"
|
|
|
|
data = compat_urllib_parse.urlencode({'as3':'1','vid':video_id})
|
|
|
|
info_request = compat_urllib_request.Request(info_url, data)
|
|
|
|
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
|
|
|
|
info_response = self._download_webpage(info_request, video_id, u'Downloading info webpage')
|
|
|
|
if info_response is None:
|
|
|
|
raise ExtractorError(u'Unable to extract the media url')
|
2013-06-08 02:47:52 -06:00
|
|
|
(final_url, thumbnail_url) = map(lambda x: x.split('=')[1], info_response.split('&'))
|
2013-06-08 01:44:38 -06:00
|
|
|
|
|
|
|
return [{
|
2013-06-08 02:47:52 -06:00
|
|
|
'id': video_id,
|
|
|
|
'url': final_url,
|
|
|
|
'ext': ext,
|
|
|
|
'title': title,
|
|
|
|
'thumbnail': thumbnail_url,
|
2013-06-08 01:44:38 -06:00
|
|
|
}]
|
2013-06-05 08:16:53 -06:00
|
|
|
|
2013-06-16 12:34:45 -06:00
|
|
|
|
2013-01-01 11:37:07 -07:00
|
|
|
def gen_extractors():
|
|
|
|
""" Return a list of an instance of every supported extractor.
|
|
|
|
The order does matter; the first extractor matched is the one handling the URL.
|
|
|
|
"""
|
|
|
|
return [
|
|
|
|
YoutubePlaylistIE(),
|
|
|
|
YoutubeChannelIE(),
|
|
|
|
YoutubeUserIE(),
|
|
|
|
YoutubeSearchIE(),
|
|
|
|
YoutubeIE(),
|
|
|
|
MetacafeIE(),
|
|
|
|
DailymotionIE(),
|
|
|
|
GoogleSearchIE(),
|
|
|
|
PhotobucketIE(),
|
|
|
|
YahooIE(),
|
|
|
|
YahooSearchIE(),
|
|
|
|
DepositFilesIE(),
|
|
|
|
FacebookIE(),
|
|
|
|
BlipTVIE(),
|
2013-05-28 07:12:39 -06:00
|
|
|
BlipTVUserIE(),
|
2013-01-01 11:37:07 -07:00
|
|
|
VimeoIE(),
|
|
|
|
MyVideoIE(),
|
|
|
|
ComedyCentralIE(),
|
|
|
|
EscapistIE(),
|
|
|
|
CollegeHumorIE(),
|
|
|
|
XVideosIE(),
|
2013-03-23 19:24:07 -06:00
|
|
|
SoundcloudSetIE(),
|
2013-01-01 11:37:07 -07:00
|
|
|
SoundcloudIE(),
|
|
|
|
InfoQIE(),
|
|
|
|
MixcloudIE(),
|
|
|
|
StanfordOpenClassroomIE(),
|
|
|
|
MTVIE(),
|
|
|
|
YoukuIE(),
|
|
|
|
XNXXIE(),
|
2013-01-06 13:52:33 -07:00
|
|
|
YouJizzIE(),
|
|
|
|
PornotubeIE(),
|
|
|
|
YouPornIE(),
|
2013-01-01 11:37:07 -07:00
|
|
|
GooglePlusIE(),
|
|
|
|
ArteTvIE(),
|
|
|
|
NBAIE(),
|
2013-03-06 22:09:55 -07:00
|
|
|
WorldStarHipHopIE(),
|
2013-01-01 11:37:07 -07:00
|
|
|
JustinTVIE(),
|
|
|
|
FunnyOrDieIE(),
|
|
|
|
SteamIE(),
|
|
|
|
UstreamIE(),
|
2013-01-12 09:58:39 -07:00
|
|
|
RBMARadioIE(),
|
2013-01-26 19:01:23 -07:00
|
|
|
EightTracksIE(),
|
2013-02-08 00:25:55 -07:00
|
|
|
KeekIE(),
|
2013-02-17 09:13:06 -07:00
|
|
|
TEDIE(),
|
2013-02-18 10:45:09 -07:00
|
|
|
MySpassIE(),
|
2013-03-11 18:08:54 -06:00
|
|
|
SpiegelIE(),
|
2013-03-29 08:13:24 -06:00
|
|
|
LiveLeakIE(),
|
2013-04-07 07:23:48 -06:00
|
|
|
ARDIE(),
|
2013-05-23 13:42:03 -06:00
|
|
|
ZDFIE(),
|
2013-04-22 13:07:49 -06:00
|
|
|
TumblrIE(),
|
2013-05-01 07:55:46 -06:00
|
|
|
BandcampIE(),
|
2013-05-03 12:07:35 -06:00
|
|
|
RedTubeIE(),
|
2013-05-05 12:57:19 -06:00
|
|
|
InaIE(),
|
2013-05-18 11:17:19 -06:00
|
|
|
HowcastIE(),
|
2013-05-19 16:25:26 -06:00
|
|
|
VineIE(),
|
2013-05-20 15:18:40 -06:00
|
|
|
FlickrIE(),
|
2013-05-21 06:37:32 -06:00
|
|
|
TeamcocoIE(),
|
2013-06-04 06:30:54 -06:00
|
|
|
XHamsterIE(),
|
2013-06-05 08:16:53 -06:00
|
|
|
HypemIE(),
|
2013-06-08 01:44:38 -06:00
|
|
|
Vbox7IE(),
|
2013-06-16 12:34:45 -06:00
|
|
|
GametrailersIE(),
|
2013-06-23 10:58:53 -06:00
|
|
|
StatigramIE(),
|
2013-01-01 11:37:07 -07:00
|
|
|
GenericIE()
|
|
|
|
]
|
2013-04-20 04:42:57 -06:00
|
|
|
|
|
|
|
def get_info_extractor(ie_name):
|
|
|
|
"""Returns the info extractor class with the given ie_name"""
|
|
|
|
return globals()[ie_name+'IE']
|