pytastic code 4

#294
Raw
Author
Anonymous
Created
Nov. 15, 2020, 8:01 a.m.
Expires
Never
Size
1.4 MB
Hits
64
Syntax
Python
file_api = quality.get('file_api')
                file_id = quality.get('file_id')
                if not file_api or not file_id:
                    continue
                formats.append({
                    'format_id': quality_id,
                    'url': update_url_query(file_api, {'vid': file_id}),
                    'preference': preference(quality_id),
                    'ext': 'mp4',
                })
            self._sort_formats(formats)

            return {
                'id': video_id,
                'title': title,
                'description': description,
                'thumbnail': video_data.get('image'),
                'duration': int_or_none(video_data.get('length')),
                'timestamp': int_or_none(video_data.get('create_time')),
                'formats': formats,
            }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..compat import (
    compat_parse_qs,
    compat_str,
    compat_urllib_parse_urlparse,
)
from ..utils import (
    determine_ext,
    int_or_none,
    try_get,
    qualities,
)


class SixPlayIE(InfoExtractor):
    IE_NAME = '6play'
    _VALID_URL = r'(?:6play:|https?://(?:www\.)?(?P<domain>6play\.fr|rtlplay\.be|play\.rtl\.hr|rtlmost\.hu)/.+?-c_)(?P<id>[0-9]+)'
    _TESTS = [{
        'url': 'https://www.6play.fr/minute-par-minute-p_9533/le-but-qui-a-marque-lhistoire-du-football-francais-c_12041051',
        'md5': '31fcd112637baa0c2ab92c4fcd8baf27',
        'info_dict': {
            'id': '12041051',
            'ext': 'mp4',
            'title': 'Le but qui a marqué l\'histoire du football français !',
            'description': 'md5:b59e7e841d646ef1eb42a7868eb6a851',
        },
    }, {
        'url': 'https://www.rtlplay.be/rtl-info-13h-p_8551/les-titres-du-rtlinfo-13h-c_12045869',
        'only_matching': True,
    }, {
        'url': 'https://play.rtl.hr/pj-masks-p_9455/epizoda-34-sezona-1-catboyevo-cudo-na-dva-kotaca-c_11984989',
        'only_matching': True,
    }, {
        'url': 'https://www.rtlmost.hu/megtorve-p_14167/megtorve-6-resz-c_12397787',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        domain, video_id = re.search(self._VALID_URL, url).groups()
        service, consumer_name = {
            '6play.fr': ('6play', 'm6web'),
            'rtlplay.be': ('rtlbe_rtl_play', 'rtlbe'),
            'play.rtl.hr': ('rtlhr_rtl_play', 'rtlhr'),
            'rtlmost.hu': ('rtlhu_rtl_most', 'rtlhu'),
        }.get(domain, ('6play', 'm6web'))

        data = self._download_json(
            'https://pc.middleware.6play.fr/6play/v2/platforms/m6group_web/services/%s/videos/clip_%s' % (service, video_id),
            video_id, headers={
                'x-customer-name': consumer_name
            }, query={
                'csa': 5,
                'with': 'clips',
            })

        clip_data = data['clips'][0]
        title = clip_data['title']

        urls = []
        quality_key = qualities(['lq', 'sd', 'hq', 'hd'])
        formats = []
        subtitles = {}
        assets = clip_data.get('assets') or []
        for asset in assets:
            asset_url = asset.get('full_physical_path')
            protocol = asset.get('protocol')
            if not asset_url or ((protocol == 'primetime' or asset.get('type') == 'usp_hlsfp_h264') and not ('_drmnp.ism/' in asset_url or '_unpnp.ism/' in asset_url)) or asset_url in urls:
                continue
            urls.append(asset_url)
            container = asset.get('video_container')
            ext = determine_ext(asset_url)
            if protocol == 'http_subtitle' or ext == 'vtt':
                subtitles.setdefault('fr', []).append({'url': asset_url})
                continue
            if container == 'm3u8' or ext == 'm3u8':
                if protocol == 'usp':
                    if compat_parse_qs(compat_urllib_parse_urlparse(asset_url).query).get('token', [None])[0]:
                        urlh = self._request_webpage(
                            asset_url, video_id, fatal=False,
                            headers=self.geo_verification_headers())
                        if not urlh:
                            continue
                        asset_url = urlh.geturl()
                    asset_url = asset_url.replace('_drmnp.ism/', '_unpnp.ism/')
                    for i in range(3, 0, -1):
                        asset_url = asset_url = asset_url.replace('_sd1/', '_sd%d/' % i)
                        m3u8_formats = self._extract_m3u8_formats(
                            asset_url, video_id, 'mp4', 'm3u8_native',
                            m3u8_id='hls', fatal=False)
                        formats.extend(m3u8_formats)
                        formats.extend(self._extract_mpd_formats(
                            asset_url.replace('.m3u8', '.mpd'),
                            video_id, mpd_id='dash', fatal=False))
                        if m3u8_formats:
                            break
                else:
                    formats.extend(self._extract_m3u8_formats(
                        asset_url, video_id, 'mp4', 'm3u8_native',
                        m3u8_id='hls', fatal=False))
            elif container == 'mp4' or ext == 'mp4':
                quality = asset.get('video_quality')
                formats.append({
                    'url': asset_url,
                    'format_id': quality,
                    'quality': quality_key(quality),
                    'ext': ext,
                })
        self._sort_formats(formats)

        def get(getter):
            for src in (data, clip_data):
                v = try_get(src, getter, compat_str)
                if v:
                    return v

        return {
            'id': video_id,
            'title': title,
            'description': get(lambda x: x['description']),
            'duration': int_or_none(clip_data.get('duration')),
            'series': get(lambda x: x['program']['title']),
            'formats': formats,
            'subtitles': subtitles,
        }
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor


class SkylineWebcamsIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?skylinewebcams\.com/[^/]+/webcam/(?:[^/]+/)+(?P<id>[^/]+)\.html'
    _TEST = {
        'url': 'https://www.skylinewebcams.com/it/webcam/italia/lazio/roma/scalinata-piazza-di-spagna-barcaccia.html',
        'info_dict': {
            'id': 'scalinata-piazza-di-spagna-barcaccia',
            'ext': 'mp4',
            'title': 're:^Live Webcam Scalinata di Piazza di Spagna - La Barcaccia [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
            'description': 'Roma, veduta sulla Scalinata di Piazza di Spagna e sulla Barcaccia',
            'is_live': True,
        },
        'params': {
            'skip_download': True,
        }
    }

    def _real_extract(self, url):
        video_id = self._match_id(url)

        webpage = self._download_webpage(url, video_id)

        stream_url = self._search_regex(
            r'(?:url|source)\s*:\s*(["\'])(?P<url>(?:https?:)?//.+?\.m3u8.*?)\1', webpage,
            'stream url', group='url')

        title = self._og_search_title(webpage)
        description = self._og_search_description(webpage)

        return {
            'id': video_id,
            'url': stream_url,
            'ext': 'mp4',
            'title': self._live_title(title),
            'description': description,
            'is_live': True,
        }
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
    parse_iso8601,
    parse_duration,
)


class SkyNewsArabiaBaseIE(InfoExtractor):
    _IMAGE_BASE_URL = 'http://www.skynewsarabia.com/web/images'

    def _call_api(self, path, value):
        return self._download_json('http://api.skynewsarabia.com/web/rest/v2/%s/%s.json' % (path, value), value)

    def _get_limelight_media_id(self, url):
        return self._search_regex(r'/media/[^/]+/([a-z0-9]{32})', url, 'limelight media id')

    def _get_image_url(self, image_path_template, width='1600', height='1200'):
        return self._IMAGE_BASE_URL + image_path_template.format(width=width, height=height)

    def _extract_video_info(self, video_data):
        video_id = compat_str(video_data['id'])
        topic = video_data.get('topicTitle')
        return {
            '_type': 'url_transparent',
            'url': 'limelight:media:%s' % self._get_limelight_media_id(video_data['videoUrl'][0]['url']),
            'id': video_id,
            'title': video_data['headline'],
            'description': video_data.get('summary'),
            'thumbnail': self._get_image_url(video_data['mediaAsset']['imageUrl']),
            'timestamp': parse_iso8601(video_data.get('date')),
            'duration': parse_duration(video_data.get('runTime')),
            'tags': video_data.get('tags', []),
            'categories': [topic] if topic else [],
            'webpage_url': 'http://www.skynewsarabia.com/web/video/%s' % video_id,
            'ie_key': 'LimelightMedia',
        }


class SkyNewsArabiaIE(SkyNewsArabiaBaseIE):
    IE_NAME = 'skynewsarabia:video'
    _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/video/(?P<id>[0-9]+)'
    _TEST = {
        'url': 'http://www.skynewsarabia.com/web/video/794902/%D9%86%D8%B5%D9%81-%D9%85%D9%84%D9%8A%D9%88%D9%86-%D9%85%D8%B5%D8%A8%D8%A7%D8%AD-%D8%B4%D8%AC%D8%B1%D8%A9-%D9%83%D8%B1%D9%8A%D8%B3%D9%85%D8%A7%D8%B3',
        'info_dict': {
            'id': '794902',
            'ext': 'flv',
            'title': 'نصف مليون مصباح على شجرة كريسماس',
            'description': 'md5:22f1b27f0850eeb10c7e59b1f16eb7c6',
            'upload_date': '20151128',
            'timestamp': 1448697198,
            'duration': 2119,
        },
        'params': {
            # rtmp download
            'skip_download': True,
        },
    }

    def _real_extract(self, url):
        video_id = self._match_id(url)
        video_data = self._call_api('video', video_id)
        return self._extract_video_info(video_data)


class SkyNewsArabiaArticleIE(SkyNewsArabiaBaseIE):
    IE_NAME = 'skynewsarabia:article'
    _VALID_URL = r'https?://(?:www\.)?skynewsarabia\.com/web/article/(?P<id>[0-9]+)'
    _TESTS = [{
        'url': 'http://www.skynewsarabia.com/web/article/794549/%D8%A7%D9%94%D8%AD%D8%AF%D8%A7%D8%AB-%D8%A7%D9%84%D8%B4%D8%B1%D9%82-%D8%A7%D9%84%D8%A7%D9%94%D9%88%D8%B3%D8%B7-%D8%AE%D8%B1%D9%8A%D8%B7%D8%A9-%D8%A7%D9%84%D8%A7%D9%94%D9%84%D8%B9%D8%A7%D8%A8-%D8%A7%D9%84%D8%B0%D9%83%D9%8A%D8%A9',
        'info_dict': {
            'id': '794549',
            'ext': 'flv',
            'title': 'بالفيديو.. ألعاب ذكية تحاكي واقع المنطقة',
            'description': 'md5:0c373d29919a851e080ee4edd0c5d97f',
            'upload_date': '20151126',
            'timestamp': 1448559336,
            'duration': 281.6,
        },
        'params': {
            # rtmp download
            'skip_download': True,
        },
    }, {
        'url': 'http://www.skynewsarabia.com/web/article/794844/%D8%A7%D8%B3%D8%AA%D9%87%D8%AF%D8%A7%D9%81-%D9%82%D9%88%D8%A7%D8%B1%D8%A8-%D8%A7%D9%94%D8%B3%D9%84%D8%AD%D8%A9-%D9%84%D9%85%D9%8A%D9%84%D9%8A%D8%B4%D9%8A%D8%A7%D8%AA-%D8%A7%D9%84%D8%AD%D9%88%D8%AB%D9%8A-%D9%88%D8%B5%D8%A7%D9%84%D8%AD',
        'info_dict': {
            'id': '794844',
            'title': 'إحباط تهريب أسلحة لميليشيات الحوثي وصالح بجنوب اليمن',
            'description': 'md5:5c927b8b2e805796e7f693538d96fc7e',
        },
        'playlist_mincount': 2,
    }]

    def _real_extract(self, url):
        article_id = self._match_id(url)
        article_data = self._call_api('article', article_id)
        media_asset = article_data['mediaAsset']
        if media_asset['type'] == 'VIDEO':
            topic = article_data.get('topicTitle')
            return {
                '_type': 'url_transparent',
                'url': 'limelight:media:%s' % self._get_limelight_media_id(media_asset['videoUrl'][0]['url']),
                'id': article_id,
                'title': article_data['headline'],
                'description': article_data.get('summary'),
                'thumbnail': self._get_image_url(media_asset['imageUrl']),
                'timestamp': parse_iso8601(article_data.get('date')),
                'tags': article_data.get('tags', []),
                'categories': [topic] if topic else [],
                'webpage_url': url,
                'ie_key': 'LimelightMedia',
            }
        entries = [self._extract_video_info(item) for item in article_data.get('inlineItems', []) if item['type'] == 'VIDEO']
        return self.playlist_result(entries, article_id, article_data['headline'], article_data.get('summary'))
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from ..utils import (
    extract_attributes,
    smuggle_url,
    strip_or_none,
    urljoin,
)


class SkyBaseIE(InfoExtractor):
    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)
        video_data = extract_attributes(self._search_regex(
            r'(<div.+?class="[^"]*sdc-article-video__media-ooyala[^"]*"[^>]+>)',
            webpage, 'video data'))

        video_url = 'ooyala:%s' % video_data['data-video-id']
        if video_data.get('data-token-required') == 'true':
            token_fetch_options = self._parse_json(video_data.get(
                'data-token-fetch-options', '{}'), video_id, fatal=False) or {}
            token_fetch_url = token_fetch_options.get('url')
            if token_fetch_url:
                embed_token = self._download_webpage(urljoin(
                    url, token_fetch_url), video_id, fatal=False)
                if embed_token:
                    video_url = smuggle_url(
                        video_url, {'embed_token': embed_token.strip('"')})

        return {
            '_type': 'url_transparent',
            'id': video_id,
            'url': video_url,
            'title': self._og_search_title(webpage),
            'description': strip_or_none(self._og_search_description(webpage)),
            'ie_key': 'Ooyala',
        }


class SkySportsIE(SkyBaseIE):
    _VALID_URL = r'https?://(?:www\.)?skysports\.com/watch/video/(?P<id>[0-9]+)'
    _TEST = {
        'url': 'http://www.skysports.com/watch/video/10328419/bale-its-our-time-to-shine',
        'md5': '77d59166cddc8d3cb7b13e35eaf0f5ec',
        'info_dict': {
            'id': 'o3eWJnNDE6l7kfNO8BOoBlRxXRQ4ANNQ',
            'ext': 'mp4',
            'title': 'Bale: It\'s our time to shine',
            'description': 'md5:e88bda94ae15f7720c5cb467e777bb6d',
        },
        'add_ie': ['Ooyala'],
    }


class SkyNewsIE(SkyBaseIE):
    _VALID_URL = r'https?://news\.sky\.com/video/[0-9a-z-]+-(?P<id>[0-9]+)'
    _TEST = {
        'url': 'https://news.sky.com/video/russian-plane-inspected-after-deadly-fire-11712962',
        'md5': 'd6327e581473cea9976a3236ded370cd',
        'info_dict': {
            'id': '1ua21xaDE6lCtZDmbYfl8kwsKLooJbNM',
            'ext': 'mp4',
            'title': 'Russian plane inspected after deadly fire',
            'description': 'The Russian Investigative Committee has released video of the wreckage of a passenger plane which caught fire near Moscow.',
        },
        'add_ie': ['Ooyala'],
    }
from __future__ import unicode_literals

import re
import json

from .common import InfoExtractor
from ..compat import (
    compat_urlparse,
)
from ..utils import (
    ExtractorError,
    get_element_by_id,
)


class SlideshareIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?slideshare\.net/[^/]+?/(?P<title>.+?)($|\?)'

    _TEST = {
        'url': 'http://www.slideshare.net/Dataversity/keynote-presentation-managing-scale-and-complexity',
        'info_dict': {
            'id': '25665706',
            'ext': 'mp4',
            'title': 'Managing Scale and Complexity',
            'description': 'This was a keynote presentation at the NoSQL Now! 2013 Conference & Expo (http://www.nosqlnow.com). This presentation was given by Adrian Cockcroft from Netflix.',
        },
    }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        page_title = mobj.group('title')
        webpage = self._download_webpage(url, page_title)
        slideshare_obj = self._search_regex(
            r'\$\.extend\(.*?slideshare_object,\s*(\{.*?\})\);',
            webpage, 'slideshare object')
        info = json.loads(slideshare_obj)
        if info['slideshow']['type'] != 'video':
            raise ExtractorError('Webpage type is "%s": only video extraction is supported for Slideshare' % info['slideshow']['type'], expected=True)

        doc = info['doc']
        bucket = info['jsplayer']['video_bucket']
        ext = info['jsplayer']['video_extension']
        video_url = compat_urlparse.urljoin(bucket, doc + '-SD.' + ext)
        description = get_element_by_id('slideshow-description-paragraph', webpage) or self._html_search_regex(
            r'(?s)<p[^>]+itemprop="description"[^>]*>(.+?)</p>', webpage,
            'description', fatal=False)

        return {
            '_type': 'video',
            'id': info['slideshow']['id'],
            'title': info['slideshow']['title'],
            'ext': ext,
            'url': video_url,
            'thumbnail': info['slideshow']['pin_image_url'],
            'description': description.strip() if description else None,
        }
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from ..utils import smuggle_url


class SlidesLiveIE(InfoExtractor):
    _VALID_URL = r'https?://slideslive\.com/(?P<id>[0-9]+)'
    _TESTS = [{
        # video_service_name = YOUTUBE
        'url': 'https://slideslive.com/38902413/gcc-ia16-backend',
        'md5': 'b29fcd6c6952d0c79c5079b0e7a07e6f',
        'info_dict': {
            'id': 'LMtgR8ba0b0',
            'ext': 'mp4',
            'title': 'GCC IA16 backend',
            'description': 'Watch full version of this video at https://slideslive.com/38902413.',
            'uploader': 'SlidesLive Videos - A',
            'uploader_id': 'UC62SdArr41t_-_fX40QCLRw',
            'upload_date': '20170925',
        }
    }, {
        # video_service_name = youtube
        'url': 'https://slideslive.com/38903721/magic-a-scientific-resurrection-of-an-esoteric-legend',
        'only_matching': True,
    }, {
        # video_service_name = url
        'url': 'https://slideslive.com/38922070/learning-transferable-skills-1',
        'only_matching': True,
    }, {
        # video_service_name = vimeo
        'url': 'https://slideslive.com/38921896/retrospectives-a-venue-for-selfreflection-in-ml-research-3',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        video_data = self._download_json(
            'https://ben.slideslive.com/player/' + video_id, video_id)
        service_name = video_data['video_service_name'].lower()
        assert service_name in ('url', 'vimeo', 'youtube')
        service_id = video_data['video_service_id']
        info = {
            'id': video_id,
            'thumbnail': video_data.get('thumbnail'),
            'url': service_id,
        }
        if service_name == 'url':
            info['title'] = video_data['title']
        else:
            info.update({
                '_type': 'url_transparent',
                'ie_key': service_name.capitalize(),
                'title': video_data.get('title'),
            })
            if service_name == 'vimeo':
                info['url'] = smuggle_url(
                    'https://player.vimeo.com/video/' + service_id,
                    {'http_headers': {'Referer': url}})
        return info
from __future__ import unicode_literals

from .common import InfoExtractor


class SlutloadIE(InfoExtractor):
    _VALID_URL = r'https?://(?:\w+\.)?slutload\.com/(?:video/[^/]+|embed_player|watch)/(?P<id>[^/]+)'
    _TESTS = [{
        'url': 'http://www.slutload.com/video/virginie-baisee-en-cam/TD73btpBqSxc/',
        'md5': '868309628ba00fd488cf516a113fd717',
        'info_dict': {
            'id': 'TD73btpBqSxc',
            'ext': 'mp4',
            'title': 'virginie baisee en cam',
            'age_limit': 18,
            'thumbnail': r're:https?://.*?\.jpg'
        },
    }, {
        # mobile site
        'url': 'http://mobile.slutload.com/video/masturbation-solo/fviFLmc6kzJ/',
        'only_matching': True,
    }, {
        'url': 'http://www.slutload.com/embed_player/TD73btpBqSxc/',
        'only_matching': True,
    }, {
        'url': 'http://www.slutload.com/watch/TD73btpBqSxc/Virginie-Baisee-En-Cam.html',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)

        embed_page = self._download_webpage(
            'http://www.slutload.com/embed_player/%s' % video_id, video_id,
            'Downloading embed page', fatal=False)

        if embed_page:
            def extract(what):
                return self._html_search_regex(
                    r'data-video-%s=(["\'])(?P<url>(?:(?!\1).)+)\1' % what,
                    embed_page, 'video %s' % what, default=None, group='url')

            video_url = extract('url')
            if video_url:
                title = self._html_search_regex(
                    r'<title>([^<]+)', embed_page, 'title', default=video_id)
                return {
                    'id': video_id,
                    'url': video_url,
                    'title': title,
                    'thumbnail': extract('preview'),
                    'age_limit': 18
                }

        webpage = self._download_webpage(
            'http://www.slutload.com/video/_/%s/' % video_id, video_id)
        title = self._html_search_regex(
            r'<h1><strong>([^<]+)</strong>', webpage, 'title').strip()
        info = self._parse_html5_media_entries(url, webpage, video_id)[0]
        info.update({
            'id': video_id,
            'title': title,
            'age_limit': 18,
        })
        return info
# coding: utf-8
from __future__ import unicode_literals

import re
import json
import hashlib
import uuid

from .common import InfoExtractor
from ..utils import (
    ExtractorError,
    int_or_none,
    sanitized_Request,
    unified_strdate,
    urlencode_postdata,
    xpath_text,
)


class SmotriIE(InfoExtractor):
    IE_DESC = 'Smotri.com'
    IE_NAME = 'smotri'
    _VALID_URL = r'https?://(?:www\.)?(?:smotri\.com/video/view/\?id=|pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=)(?P<id>v(?P<realvideoid>[0-9]+)[a-z0-9]{4})'
    _NETRC_MACHINE = 'smotri'

    _TESTS = [
        # real video id 2610366
        {
            'url': 'http://smotri.com/video/view/?id=v261036632ab',
            'md5': '02c0dfab2102984e9c5bb585cc7cc321',
            'info_dict': {
                'id': 'v261036632ab',
                'ext': 'mp4',
                'title': 'катастрофа с камер видеонаблюдения',
                'uploader': 'rbc2008',
                'uploader_id': 'rbc08',
                'upload_date': '20131118',
                'thumbnail': 'http://frame6.loadup.ru/8b/a9/2610366.3.3.jpg',
            },
        },
        # real video id 57591
        {
            'url': 'http://smotri.com/video/view/?id=v57591cb20',
            'md5': '830266dfc21f077eac5afd1883091bcd',
            'info_dict': {
                'id': 'v57591cb20',
                'ext': 'flv',
                'title': 'test',
                'uploader': 'Support Photofile@photofile',
                'uploader_id': 'support-photofile',
                'upload_date': '20070704',
                'thumbnail': 'http://frame4.loadup.ru/03/ed/57591.2.3.jpg',
            },
        },
        # video-password, not approved by moderator
        {
            'url': 'http://smotri.com/video/view/?id=v1390466a13c',
            'md5': 'f6331cef33cad65a0815ee482a54440b',
            'info_dict': {
                'id': 'v1390466a13c',
                'ext': 'mp4',
                'title': 'TOCCA_A_NOI_-_LE_COSE_NON_VANNO_CAMBIAMOLE_ORA-1',
                'uploader': 'timoxa40',
                'uploader_id': 'timoxa40',
                'upload_date': '20100404',
                'thumbnail': 'http://frame7.loadup.ru/af/3f/1390466.3.3.jpg',
            },
            'params': {
                'videopassword': 'qwerty',
            },
            'skip': 'Video is not approved by moderator',
        },
        # video-password
        {
            'url': 'http://smotri.com/video/view/?id=v6984858774#',
            'md5': 'f11e01d13ac676370fc3b95b9bda11b0',
            'info_dict': {
                'id': 'v6984858774',
                'ext': 'mp4',
                'title': 'Дача Солженицина ПАРОЛЬ 223322',
                'uploader': 'psavari1',
                'uploader_id': 'psavari1',
                'upload_date': '20081103',
                'thumbnail': r're:^https?://.*\.jpg$',
            },
            'params': {
                'videopassword': '223322',
            },
        },
        # age limit + video-password, not approved by moderator
        {
            'url': 'http://smotri.com/video/view/?id=v15408898bcf',
            'md5': '91e909c9f0521adf5ee86fbe073aad70',
            'info_dict': {
                'id': 'v15408898bcf',
                'ext': 'flv',
                'title': 'этот ролик не покажут по ТВ',
                'uploader': 'zzxxx',
                'uploader_id': 'ueggb',
                'upload_date': '20101001',
                'thumbnail': 'http://frame3.loadup.ru/75/75/1540889.1.3.jpg',
                'age_limit': 18,
            },
            'params': {
                'videopassword': '333'
            },
            'skip': 'Video is not approved by moderator',
        },
        # age limit + video-password
        {
            'url': 'http://smotri.com/video/view/?id=v7780025814',
            'md5': 'b4599b068422559374a59300c5337d72',
            'info_dict': {
                'id': 'v7780025814',
                'ext': 'mp4',
                'title': 'Sexy Beach (пароль 123)',
                'uploader': 'вАся',
                'uploader_id': 'asya_prosto',
                'upload_date': '20081218',
                'thumbnail': r're:^https?://.*\.jpg$',
                'age_limit': 18,
            },
            'params': {
                'videopassword': '123'
            },
        },
        # swf player
        {
            'url': 'http://pics.smotri.com/scrubber_custom8.swf?file=v9188090500',
            'md5': '31099eeb4bc906712c5f40092045108d',
            'info_dict': {
                'id': 'v9188090500',
                'ext': 'mp4',
                'title': 'Shakira - Don\'t Bother',
                'uploader': 'HannahL',
                'uploader_id': 'lisaha95',
                'upload_date': '20090331',
                'thumbnail': 'http://frame8.loadup.ru/44/0b/918809.7.3.jpg',
            },
        },
    ]

    @classmethod
    def _extract_url(cls, webpage):
        mobj = re.search(
            r'<embed[^>]src=(["\'])(?P<url>http://pics\.smotri\.com/(?:player|scrubber_custom8)\.swf\?file=v.+?\1)',
            webpage)
        if mobj is not None:
            return mobj.group('url')

        mobj = re.search(
            r'''(?x)<div\s+class="video_file">http://smotri\.com/video/download/file/[^<]+</div>\s*
                    <div\s+class="video_image">[^<]+</div>\s*
                    <div\s+class="video_id">(?P<id>[^<]+)</div>''', webpage)
        if mobj is not None:
            return 'http://smotri.com/video/view/?id=%s' % mobj.group('id')

    def _search_meta(self, name, html, display_name=None):
        if display_name is None:
            display_name = name
        return self._html_search_meta(name, html, display_name)

    def _real_extract(self, url):
        video_id = self._match_id(url)

        video_form = {
            'ticket': video_id,
            'video_url': '1',
            'frame_url': '1',
            'devid': 'LoadupFlashPlayer',
            'getvideoinfo': '1',
        }

        video_password = self._downloader.params.get('videopassword')
        if video_password:
            video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()

        video = self._download_json(
            'http://smotri.com/video/view/url/bot/',
            video_id, 'Downloading video JSON',
            data=urlencode_postdata(video_form),
            headers={'Content-Type': 'application/x-www-form-urlencoded'})

        video_url = video.get('_vidURL') or video.get('_vidURL_mp4')

        if not video_url:
            if video.get('_moderate_no'):
                raise ExtractorError(
                    'Video %s has not been approved by moderator' % video_id, expected=True)

            if video.get('error'):
                raise ExtractorError('Video %s does not exist' % video_id, expected=True)

            if video.get('_pass_protected') == 1:
                msg = ('Invalid video password' if video_password
                       else 'This video is protected by a password, use the --video-password option')
                raise ExtractorError(msg, expected=True)

        title = video['title']
        thumbnail = video.get('_imgURL')
        upload_date = unified_strdate(video.get('added'))
        uploader = video.get('userNick')
        uploader_id = video.get('userLogin')
        duration = int_or_none(video.get('duration'))

        # Video JSON does not provide enough meta data
        # We will extract some from the video web page instead
        webpage_url = 'http://smotri.com/video/view/?id=%s' % video_id
        webpage = self._download_webpage(webpage_url, video_id, 'Downloading video page')

        # Warning if video is unavailable
        warning = self._html_search_regex(
            r'<div[^>]+class="videoUnModer"[^>]*>(.+?)</div>', webpage,
            'warning message', default=None)
        if warning is not None:
            self._downloader.report_warning(
                'Video %s may not be available; smotri said: %s ' %
                (video_id, warning))

        # Adult content
        if 'EroConfirmText">' in webpage:
            self.report_age_confirmation()
            confirm_string = self._html_search_regex(
                r'<a[^>]+href="/video/view/\?id=%s&confirm=([^"]+)"' % video_id,
                webpage, 'confirm string')
            confirm_url = webpage_url + '&confirm=%s' % confirm_string
            webpage = self._download_webpage(
                confirm_url, video_id,
                'Downloading video page (age confirmed)')
            adult_content = True
        else:
            adult_content = False

        view_count = self._html_search_regex(
            r'(?s)Общее количество просмотров.*?<span class="Number">(\d+)</span>',
            webpage, 'view count', fatal=False)

        return {
            'id': video_id,
            'url': video_url,
            'title': title,
            'thumbnail': thumbnail,
            'uploader': uploader,
            'upload_date': upload_date,
            'uploader_id': uploader_id,
            'duration': duration,
            'view_count': int_or_none(view_count),
            'age_limit': 18 if adult_content else 0,
        }


class SmotriCommunityIE(InfoExtractor):
    IE_DESC = 'Smotri.com community videos'
    IE_NAME = 'smotri:community'
    _VALID_URL = r'https?://(?:www\.)?smotri\.com/community/video/(?P<id>[0-9A-Za-z_\'-]+)'
    _TEST = {
        'url': 'http://smotri.com/community/video/kommuna',
        'info_dict': {
            'id': 'kommuna',
        },
        'playlist_mincount': 4,
    }

    def _real_extract(self, url):
        community_id = self._match_id(url)

        rss = self._download_xml(
            'http://smotri.com/export/rss/video/by/community/-/%s/video.xml' % community_id,
            community_id, 'Downloading community RSS')

        entries = [
            self.url_result(video_url.text, SmotriIE.ie_key())
            for video_url in rss.findall('./channel/item/link')]

        return self.playlist_result(entries, community_id)


class SmotriUserIE(InfoExtractor):
    IE_DESC = 'Smotri.com user videos'
    IE_NAME = 'smotri:user'
    _VALID_URL = r'https?://(?:www\.)?smotri\.com/user/(?P<id>[0-9A-Za-z_\'-]+)'
    _TESTS = [{
        'url': 'http://smotri.com/user/inspector',
        'info_dict': {
            'id': 'inspector',
            'title': 'Inspector',
        },
        'playlist_mincount': 9,
    }]

    def _real_extract(self, url):
        user_id = self._match_id(url)

        rss = self._download_xml(
            'http://smotri.com/export/rss/user/video/-/%s/video.xml' % user_id,
            user_id, 'Downloading user RSS')

        entries = [self.url_result(video_url.text, 'Smotri')
                   for video_url in rss.findall('./channel/item/link')]

        description_text = xpath_text(rss, './channel/description') or ''
        user_nickname = self._search_regex(
            '^Видео режиссера (.+)$', description_text,
            'user nickname', fatal=False)

        return self.playlist_result(entries, user_id, user_nickname)


class SmotriBroadcastIE(InfoExtractor):
    IE_DESC = 'Smotri.com broadcasts'
    IE_NAME = 'smotri:broadcast'
    _VALID_URL = r'https?://(?:www\.)?(?P<url>smotri\.com/live/(?P<id>[^/]+))/?.*'
    _NETRC_MACHINE = 'smotri'

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        broadcast_id = mobj.group('id')

        broadcast_url = 'http://' + mobj.group('url')
        broadcast_page = self._download_webpage(broadcast_url, broadcast_id, 'Downloading broadcast page')

        if re.search('>Режиссер с логином <br/>"%s"<br/> <span>не существует<' % broadcast_id, broadcast_page) is not None:
            raise ExtractorError(
                'Broadcast %s does not exist' % broadcast_id, expected=True)

        # Adult content
        if re.search('EroConfirmText">', broadcast_page) is not None:

            (username, password) = self._get_login_info()
            if username is None:
                self.raise_login_required(
                    'Erotic broadcasts allowed only for registered users')

            login_form = {
                'login-hint53': '1',
                'confirm_erotic': '1',
                'login': username,
                'password': password,
            }

            request = sanitized_Request(
                broadcast_url + '/?no_redirect=1', urlencode_postdata(login_form))
            request.add_header('Content-Type', 'application/x-www-form-urlencoded')
            broadcast_page = self._download_webpage(
                request, broadcast_id, 'Logging in and confirming age')

            if '>Неверный логин или пароль<' in broadcast_page:
                raise ExtractorError(
                    'Unable to log in: bad username or password', expected=True)

            adult_content = True
        else:
            adult_content = False

        ticket = self._html_search_regex(
            (r'data-user-file=(["\'])(?P<ticket>(?!\1).+)\1',
             r"window\.broadcast_control\.addFlashVar\('file'\s*,\s*'(?P<ticket>[^']+)'\)"),
            broadcast_page, 'broadcast ticket', group='ticket')

        broadcast_url = 'http://smotri.com/broadcast/view/url/?ticket=%s' % ticket

        broadcast_password = self._downloader.params.get('videopassword')
        if broadcast_password:
            broadcast_url += '&pass=%s' % hashlib.md5(broadcast_password.encode('utf-8')).hexdigest()

        broadcast_json_page = self._download_webpage(
            broadcast_url, broadcast_id, 'Downloading broadcast JSON')

        try:
            broadcast_json = json.loads(broadcast_json_page)

            protected_broadcast = broadcast_json['_pass_protected'] == 1
            if protected_broadcast and not broadcast_password:
                raise ExtractorError(
                    'This broadcast is protected by a password, use the --video-password option',
                    expected=True)

            broadcast_offline = broadcast_json['is_play'] == 0
            if broadcast_offline:
                raise ExtractorError('Broadcast %s is offline' % broadcast_id, expected=True)

            rtmp_url = broadcast_json['_server']
            mobj = re.search(r'^rtmp://[^/]+/(?P<app>.+)/?$', rtmp_url)
            if not mobj:
                raise ExtractorError('Unexpected broadcast rtmp URL')

            broadcast_playpath = broadcast_json['_streamName']
            broadcast_app = '%s/%s' % (mobj.group('app'), broadcast_json['_vidURL'])
            broadcast_thumbnail = broadcast_json.get('_imgURL')
            broadcast_title = self._live_title(broadcast_json['title'])
            broadcast_description = broadcast_json.get('description')
            broadcaster_nick = broadcast_json.get('nick')
            broadcaster_login = broadcast_json.get('login')
            rtmp_conn = 'S:%s' % uuid.uuid4().hex
        except KeyError:
            if protected_broadcast:
                raise ExtractorError('Bad broadcast password', expected=True)
            raise ExtractorError('Unexpected broadcast JSON')

        return {
            'id': broadcast_id,
            'url': rtmp_url,
            'title': broadcast_title,
            'thumbnail': broadcast_thumbnail,
            'description': broadcast_description,
            'uploader': broadcaster_nick,
            'uploader_id': broadcaster_login,
            'age_limit': 18 if adult_content else 0,
            'ext': 'flv',
            'play_path': broadcast_playpath,
            'player_url': 'http://pics.smotri.com/broadcast_play.swf',
            'app': broadcast_app,
            'rtmp_live': True,
            'rtmp_conn': rtmp_conn,
            'is_live': True,
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    parse_duration,
    parse_filesize,
    str_to_int,
)


class SnotrIE(InfoExtractor):
    _VALID_URL = r'http?://(?:www\.)?snotr\.com/video/(?P<id>\d+)/([\w]+)'
    _TESTS = [{
        'url': 'http://www.snotr.com/video/13708/Drone_flying_through_fireworks',
        'info_dict': {
            'id': '13708',
            'ext': 'mp4',
            'title': 'Drone flying through fireworks!',
            'duration': 248,
            'filesize_approx': 40700000,
            'description': 'A drone flying through Fourth of July Fireworks',
            'thumbnail': r're:^https?://.*\.jpg$',
        },
        'expected_warnings': ['description'],
    }, {
        'url': 'http://www.snotr.com/video/530/David_Letteman_-_George_W_Bush_Top_10',
        'info_dict': {
            'id': '530',
            'ext': 'mp4',
            'title': 'David Letteman - George W. Bush Top 10',
            'duration': 126,
            'filesize_approx': 8500000,
            'description': 'The top 10 George W. Bush moments, brought to you by David Letterman!',
            'thumbnail': r're:^https?://.*\.jpg$',
        }
    }]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        video_id = mobj.group('id')

        webpage = self._download_webpage(url, video_id)
        title = self._og_search_title(webpage)

        description = self._og_search_description(webpage)
        info_dict = self._parse_html5_media_entries(
            url, webpage, video_id, m3u8_entry_protocol='m3u8_native')[0]

        view_count = str_to_int(self._html_search_regex(
            r'<p[^>]*>\s*<strong[^>]*>Views:</strong>\s*<span[^>]*>([\d,\.]+)',
            webpage, 'view count', fatal=False))

        duration = parse_duration(self._html_search_regex(
            r'<p[^>]*>\s*<strong[^>]*>Length:</strong>\s*<span[^>]*>([\d:]+)',
            webpage, 'duration', fatal=False))

        filesize_approx = parse_filesize(self._html_search_regex(
            r'<p[^>]*>\s*<strong[^>]*>Filesize:</strong>\s*<span[^>]*>([^<]+)',
            webpage, 'filesize', fatal=False))

        info_dict.update({
            'id': video_id,
            'description': description,
            'title': title,
            'view_count': view_count,
            'duration': duration,
            'filesize_approx': filesize_approx,
        })

        return info_dict
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..compat import (
    compat_str,
    compat_urllib_parse_urlencode,
)
from ..utils import (
    ExtractorError,
    int_or_none,
    try_get,
)


class SohuIE(InfoExtractor):
    _VALID_URL = r'https?://(?P<mytv>my\.)?tv\.sohu\.com/.+?/(?(mytv)|n)(?P<id>\d+)\.shtml.*?'

    # Sohu videos give different MD5 sums on Travis CI and my machine
    _TESTS = [{
        'note': 'This video is available only in Mainland China',
        'url': 'http://tv.sohu.com/20130724/n382479172.shtml#super',
        'info_dict': {
            'id': '382479172',
            'ext': 'mp4',
            'title': 'MV:Far East Movement《The Illest》',
        },
        'skip': 'On available in China',
    }, {
        'url': 'http://tv.sohu.com/20150305/n409385080.shtml',
        'info_dict': {
            'id': '409385080',
            'ext': 'mp4',
            'title': '《2015湖南卫视羊年元宵晚会》唐嫣《花好月圆》',
        }
    }, {
        'url': 'http://my.tv.sohu.com/us/232799889/78693464.shtml',
        'info_dict': {
            'id': '78693464',
            'ext': 'mp4',
            'title': '【爱范品】第31期:MWC见不到的奇葩手机',
        }
    }, {
        'note': 'Multipart video',
        'url': 'http://my.tv.sohu.com/pl/8384802/78910339.shtml',
        'info_dict': {
            'id': '78910339',
            'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
        },
        'playlist': [{
            'info_dict': {
                'id': '78910339_part1',
                'ext': 'mp4',
                'duration': 294,
                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
            }
        }, {
            'info_dict': {
                'id': '78910339_part2',
                'ext': 'mp4',
                'duration': 300,
                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
            }
        }, {
            'info_dict': {
                'id': '78910339_part3',
                'ext': 'mp4',
                'duration': 150,
                'title': '【神探苍实战秘籍】第13期 战争之影 赫卡里姆',
            }
        }]
    }, {
        'note': 'Video with title containing dash',
        'url': 'http://my.tv.sohu.com/us/249884221/78932792.shtml',
        'info_dict': {
            'id': '78932792',
            'ext': 'mp4',
            'title': 'youtube-dl testing video',
        },
        'params': {
            'skip_download': True
        }
    }]

    def _real_extract(self, url):

        def _fetch_data(vid_id, mytv=False):
            if mytv:
                base_data_url = 'http://my.tv.sohu.com/play/videonew.do?vid='
            else:
                base_data_url = 'http://hot.vrs.sohu.com/vrs_flash.action?vid='

            return self._download_json(
                base_data_url + vid_id, video_id,
                'Downloading JSON data for %s' % vid_id,
                headers=self.geo_verification_headers())

        mobj = re.match(self._VALID_URL, url)
        video_id = mobj.group('id')
        mytv = mobj.group('mytv') is not None

        webpage = self._download_webpage(url, video_id)

        title = re.sub(r' - 搜狐视频$', '', self._og_search_title(webpage))

        vid = self._html_search_regex(
            r'var vid ?= ?["\'](\d+)["\']',
            webpage, 'video path')
        vid_data = _fetch_data(vid, mytv)
        if vid_data['play'] != 1:
            if vid_data.get('status') == 12:
                raise ExtractorError(
                    '%s said: There\'s something wrong in the video.' % self.IE_NAME,
                    expected=True)
            else:
                self.raise_geo_restricted(
                    '%s said: The video is only licensed to users in Mainland China.' % self.IE_NAME)

        formats_json = {}
        for format_id in ('nor', 'high', 'super', 'ori', 'h2644k', 'h2654k'):
            vid_id = vid_data['data'].get('%sVid' % format_id)
            if not vid_id:
                continue
            vid_id = compat_str(vid_id)
            formats_json[format_id] = vid_data if vid == vid_id else _fetch_data(vid_id, mytv)

        part_count = vid_data['data']['totalBlocks']

        playlist = []
        for i in range(part_count):
            formats = []
            for format_id, format_data in formats_json.items():
                allot = format_data['allot']

                data = format_data['data']
                clips_url = data['clipsURL']
                su = data['su']

                video_url = 'newflv.sohu.ccgslb.net'
                cdnId = None
                retries = 0

                while 'newflv.sohu.ccgslb.net' in video_url:
                    params = {
                        'prot': 9,
                        'file': clips_url[i],
                        'new': su[i],
                        'prod': 'flash',
                        'rb': 1,
                    }

                    if cdnId is not None:
                        params['idc'] = cdnId

                    download_note = 'Downloading %s video URL part %d of %d' % (
                        format_id, i + 1, part_count)

                    if retries > 0:
                        download_note += ' (retry #%d)' % retries
                    part_info = self._parse_json(self._download_webpage(
                        'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
                        video_id, download_note), video_id)

                    video_url = part_info['url']
                    cdnId = part_info.get('nid')

                    retries += 1
                    if retries > 5:
                        raise ExtractorError('Failed to get video URL')

                formats.append({
                    'url': video_url,
                    'format_id': format_id,
                    'filesize': int_or_none(
                        try_get(data, lambda x: x['clipsBytes'][i])),
                    'width': int_or_none(data.get('width')),
                    'height': int_or_none(data.get('height')),
                    'fps': int_or_none(data.get('fps')),
                })
            self._sort_formats(formats)

            playlist.append({
                'id': '%s_part%d' % (video_id, i + 1),
                'title': title,
                'duration': vid_data['data']['clipsDuration'][i],
                'formats': formats,
            })

        if len(playlist) == 1:
            info = playlist[0]
            info['id'] = video_id
        else:
            info = {
                '_type': 'multi_video',
                'entries': playlist,
                'id': video_id,
                'title': title,
            }

        return info
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from ..utils import smuggle_url


class SonyLIVIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?sonyliv\.com/details/[^/]+/(?P<id>\d+)'
    _TESTS = [{
        'url': "http://www.sonyliv.com/details/episodes/5024612095001/Ep.-1---Achaari-Cheese-Toast---Bachelor's-Delight",
        'info_dict': {
            'title': "Ep. 1 - Achaari Cheese Toast - Bachelor's Delight",
            'id': 'ref:5024612095001',
            'ext': 'mp4',
            'upload_date': '20170923',
            'description': 'md5:7f28509a148d5be9d0782b4d5106410d',
            'uploader_id': '5182475815001',
            'timestamp': 1506200547,
        },
        'params': {
            'skip_download': True,
        },
        'add_ie': ['BrightcoveNew'],
    }, {
        'url': 'http://www.sonyliv.com/details/full%20movie/4951168986001/Sei-Raat-(Bangla)',
        'only_matching': True,
    }]

    # BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/4338955589001/default_default/index.html?videoId=%s'
    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/5182475815001/default_default/index.html?videoId=ref:%s'

    def _real_extract(self, url):
        brightcove_id = self._match_id(url)
        return self.url_result(
            smuggle_url(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {
                'geo_countries': ['IN'],
                'referrer': url,
            }),
            'BrightcoveNew', brightcove_id)
# coding: utf-8
from __future__ import unicode_literals

import itertools
import re

from .common import (
    InfoExtractor,
    SearchInfoExtractor
)
from ..compat import (
    compat_HTTPError,
    compat_kwargs,
    compat_str,
    compat_urlparse,
)
from ..utils import (
    error_to_compat_str,
    ExtractorError,
    float_or_none,
    HEADRequest,
    int_or_none,
    KNOWN_EXTENSIONS,
    mimetype2ext,
    str_or_none,
    try_get,
    unified_timestamp,
    update_url_query,
    url_or_none,
    urlhandle_detect_ext,
)


class SoundcloudEmbedIE(InfoExtractor):
    _VALID_URL = r'https?://(?:w|player|p)\.soundcloud\.com/player/?.*?\burl=(?P<id>.+)'
    _TEST = {
        # from https://www.soundi.fi/uutiset/ennakkokuuntelussa-timo-kaukolammen-station-to-station-to-station-julkaisua-juhlitaan-tanaan-g-livelabissa/
        'url': 'https://w.soundcloud.com/player/?visual=true&url=https%3A%2F%2Fapi.soundcloud.com%2Fplaylists%2F922213810&show_artwork=true&maxwidth=640&maxheight=960&dnt=1&secret_token=s-ziYey',
        'only_matching': True,
    }

    @staticmethod
    def _extract_urls(webpage):
        return [m.group('url') for m in re.finditer(
            r'<iframe[^>]+src=(["\'])(?P<url>(?:https?://)?(?:w\.)?soundcloud\.com/player.+?)\1',
            webpage)]

    def _real_extract(self, url):
        query = compat_urlparse.parse_qs(
            compat_urlparse.urlparse(url).query)
        api_url = query['url'][0]
        secret_token = query.get('secret_token')
        if secret_token:
            api_url = update_url_query(api_url, {'secret_token': secret_token[0]})
        return self.url_result(api_url)


class SoundcloudIE(InfoExtractor):
    """Information extractor for soundcloud.com
       To access the media, the uid of the song and a stream token
       must be extracted from the page source and the script must make
       a request to media.soundcloud.com/crossdomain.xml. Then
       the media can be grabbed by requesting from an url composed
       of the stream token and uid
     """

    _VALID_URL = r'''(?x)^(?:https?://)?
                    (?:(?:(?:www\.|m\.)?soundcloud\.com/
                            (?!stations/track)
                            (?P<uploader>[\w\d-]+)/
                            (?!(?:tracks|albums|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#]))
                            (?P<title>[\w\d-]+)/?
                            (?P<token>[^?]+?)?(?:[?].*)?$)
                       |(?:api(?:-v2)?\.soundcloud\.com/tracks/(?P<track_id>\d+)
                          (?:/?\?secret_token=(?P<secret_token>[^&]+))?)
                    )
                    '''
    IE_NAME = 'soundcloud'
    _TESTS = [
        {
            'url': 'http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy',
            'md5': 'ebef0a451b909710ed1d7787dddbf0d7',
            'info_dict': {
                'id': '62986583',
                'ext': 'mp3',
                'title': 'Lostin Powers - She so Heavy (SneakPreview) Adrian Ackers Blueprint 1',
                'description': 'No Downloads untill we record the finished version this weekend, i was too pumped n i had to post it , earl is prolly gonna b hella p.o\'d',
                'uploader': 'E.T. ExTerrestrial Music',
                'uploader_id': '1571244',
                'timestamp': 1349920598,
                'upload_date': '20121011',
                'duration': 143.216,
                'license': 'all-rights-reserved',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            }
        },
        # geo-restricted
        {
            'url': 'https://soundcloud.com/the-concept-band/goldrushed-mastered?in=the-concept-band/sets/the-royal-concept-ep',
            'info_dict': {
                'id': '47127627',
                'ext': 'mp3',
                'title': 'Goldrushed',
                'description': 'From Stockholm Sweden\r\nPovel / Magnus / Filip / David\r\nwww.theroyalconcept.com',
                'uploader': 'The Royal Concept',
                'uploader_id': '9615865',
                'timestamp': 1337635207,
                'upload_date': '20120521',
                'duration': 227.155,
                'license': 'all-rights-reserved',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            },
        },
        # private link
        {
            'url': 'https://soundcloud.com/jaimemf/youtube-dl-test-video-a-y-baw/s-8Pjrp',
            'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
            'info_dict': {
                'id': '123998367',
                'ext': 'mp3',
                'title': 'Youtube - Dl Test Video \'\' Ä↭',
                'description': 'test chars:  \"\'/\\ä↭',
                'uploader': 'jaimeMF',
                'uploader_id': '69767071',
                'timestamp': 1386604920,
                'upload_date': '20131209',
                'duration': 9.927,
                'license': 'all-rights-reserved',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            },
        },
        # private link (alt format)
        {
            'url': 'https://api.soundcloud.com/tracks/123998367?secret_token=s-8Pjrp',
            'md5': 'aa0dd32bfea9b0c5ef4f02aacd080604',
            'info_dict': {
                'id': '123998367',
                'ext': 'mp3',
                'title': 'Youtube - Dl Test Video \'\' Ä↭',
                'description': 'test chars:  \"\'/\\ä↭',
                'uploader': 'jaimeMF',
                'uploader_id': '69767071',
                'timestamp': 1386604920,
                'upload_date': '20131209',
                'duration': 9.927,
                'license': 'all-rights-reserved',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            },
        },
        # downloadable song
        {
            'url': 'https://soundcloud.com/oddsamples/bus-brakes',
            'md5': '7624f2351f8a3b2e7cd51522496e7631',
            'info_dict': {
                'id': '128590877',
                'ext': 'mp3',
                'title': 'Bus Brakes',
                'description': 'md5:0053ca6396e8d2fd7b7e1595ef12ab66',
                'uploader': 'oddsamples',
                'uploader_id': '73680509',
                'timestamp': 1389232924,
                'upload_date': '20140109',
                'duration': 17.346,
                'license': 'cc-by-sa',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            },
        },
        # private link, downloadable format
        {
            'url': 'https://soundcloud.com/oriuplift/uponly-238-no-talking-wav/s-AyZUd',
            'md5': '64a60b16e617d41d0bef032b7f55441e',
            'info_dict': {
                'id': '340344461',
                'ext': 'wav',
                'title': 'Uplifting Only 238 [No Talking] (incl. Alex Feed Guestmix) (Aug 31, 2017) [wav]',
                'description': 'md5:fa20ee0fca76a3d6df8c7e57f3715366',
                'uploader': 'Ori Uplift Music',
                'uploader_id': '12563093',
                'timestamp': 1504206263,
                'upload_date': '20170831',
                'duration': 7449.096,
                'license': 'all-rights-reserved',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            },
        },
        # no album art, use avatar pic for thumbnail
        {
            'url': 'https://soundcloud.com/garyvee/sideways-prod-mad-real',
            'md5': '59c7872bc44e5d99b7211891664760c2',
            'info_dict': {
                'id': '309699954',
                'ext': 'mp3',
                'title': 'Sideways (Prod. Mad Real)',
                'description': 'md5:d41d8cd98f00b204e9800998ecf8427e',
                'uploader': 'garyvee',
                'uploader_id': '2366352',
                'timestamp': 1488152409,
                'upload_date': '20170226',
                'duration': 207.012,
                'thumbnail': r're:https?://.*\.jpg',
                'license': 'all-rights-reserved',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            },
            'params': {
                'skip_download': True,
            },
        },
        {
            'url': 'https://soundcloud.com/giovannisarani/mezzo-valzer',
            'md5': 'e22aecd2bc88e0e4e432d7dcc0a1abf7',
            'info_dict': {
                'id': '583011102',
                'ext': 'mp3',
                'title': 'Mezzo Valzer',
                'description': 'md5:4138d582f81866a530317bae316e8b61',
                'uploader': 'Micronie',
                'uploader_id': '3352531',
                'timestamp': 1551394171,
                'upload_date': '20190228',
                'duration': 180.157,
                'thumbnail': r're:https?://.*\.jpg',
                'license': 'all-rights-reserved',
                'view_count': int,
                'like_count': int,
                'comment_count': int,
                'repost_count': int,
            },
        },
        {
            # with AAC HQ format available via OAuth token
            'url': 'https://soundcloud.com/wandw/the-chainsmokers-ft-daya-dont-let-me-down-ww-remix-1',
            'only_matching': True,
        },
    ]

    _API_V2_BASE = 'https://api-v2.soundcloud.com/'
    _BASE_URL = 'https://soundcloud.com/'
    _IMAGE_REPL_RE = r'-([0-9a-z]+)\.jpg'

    _ARTWORK_MAP = {
        'mini': 16,
        'tiny': 20,
        'small': 32,
        'badge': 47,
        't67x67': 67,
        'large': 100,
        't300x300': 300,
        'crop': 400,
        't500x500': 500,
        'original': 0,
    }

    def _store_client_id(self, client_id):
        self._downloader.cache.store('soundcloud', 'client_id', client_id)

    def _update_client_id(self):
        webpage = self._download_webpage('https://soundcloud.com/', None)
        for src in reversed(re.findall(r'<script[^>]+src="([^"]+)"', webpage)):
            script = self._download_webpage(src, None, fatal=False)
            if script:
                client_id = self._search_regex(
                    r'client_id\s*:\s*"([0-9a-zA-Z]{32})"',
                    script, 'client id', default=None)
                if client_id:
                    self._CLIENT_ID = client_id
                    self._store_client_id(client_id)
                    return
        raise ExtractorError('Unable to extract client id')

    def _download_json(self, *args, **kwargs):
        non_fatal = kwargs.get('fatal') is False
        if non_fatal:
            del kwargs['fatal']
        query = kwargs.get('query', {}).copy()
        for _ in range(2):
            query['client_id'] = self._CLIENT_ID
            kwargs['query'] = query
            try:
                return super(SoundcloudIE, self)._download_json(*args, **compat_kwargs(kwargs))
            except ExtractorError as e:
                if isinstance(e.cause, compat_HTTPError) and e.cause.code == 401:
                    self._store_client_id(None)
                    self._update_client_id()
                    continue
                elif non_fatal:
                    self._downloader.report_warning(error_to_compat_str(e))
                    return False
                raise

    def _real_initialize(self):
        self._CLIENT_ID = self._downloader.cache.load('soundcloud', 'client_id') or 'YUKXoArFcqrlQn9tfNHvvyfnDISj04zk'

    @classmethod
    def _resolv_url(cls, url):
        return SoundcloudIE._API_V2_BASE + 'resolve?url=' + url

    def _extract_info_dict(self, info, full_title=None, secret_token=None):
        track_id = compat_str(info['id'])
        title = info['title']

        format_urls = set()
        formats = []
        query = {'client_id': self._CLIENT_ID}
        if secret_token:
            query['secret_token'] = secret_token

        if info.get('downloadable') and info.get('has_downloads_left'):
            download_url = update_url_query(
                self._API_V2_BASE + 'tracks/' + track_id + '/download', query)
            redirect_url = (self._download_json(download_url, track_id, fatal=False) or {}).get('redirectUri')
            if redirect_url:
                urlh = self._request_webpage(
                    HEADRequest(redirect_url), track_id, fatal=False)
                if urlh:
                    format_url = urlh.geturl()
                    format_urls.add(format_url)
                    formats.append({
                        'format_id': 'download',
                        'ext': urlhandle_detect_ext(urlh) or 'mp3',
                        'filesize': int_or_none(urlh.headers.get('Content-Length')),
                        'url': format_url,
                        'preference': 10,
                    })

        def invalid_url(url):
            return not url or url in format_urls

        def add_format(f, protocol, is_preview=False):
            mobj = re.search(r'\.(?P<abr>\d+)\.(?P<ext>[0-9a-z]{3,4})(?=[/?])', stream_url)
            if mobj:
                for k, v in mobj.groupdict().items():
                    if not f.get(k):
                        f[k] = v
            format_id_list = []
            if protocol:
                format_id_list.append(protocol)
            ext = f.get('ext')
            if ext == 'aac':
                f['abr'] = '256'
            for k in ('ext', 'abr'):
                v = f.get(k)
                if v:
                    format_id_list.append(v)
            preview = is_preview or re.search(r'/(?:preview|playlist)/0/30/', f['url'])
            if preview:
                format_id_list.append('preview')
            abr = f.get('abr')
            if abr:
                f['abr'] = int(abr)
            if protocol == 'hls':
                protocol = 'm3u8' if ext == 'aac' else 'm3u8_native'
            else:
                protocol = 'http'
            f.update({
                'format_id': '_'.join(format_id_list),
                'protocol': protocol,
                'preference': -10 if preview else None,
            })
            formats.append(f)

        # New API
        transcodings = try_get(
            info, lambda x: x['media']['transcodings'], list) or []
        for t in transcodings:
            if not isinstance(t, dict):
                continue
            format_url = url_or_none(t.get('url'))
            if not format_url:
                continue
            stream = self._download_json(
                format_url, track_id, query=query, fatal=False)
            if not isinstance(stream, dict):
                continue
            stream_url = url_or_none(stream.get('url'))
            if invalid_url(stream_url):
                continue
            format_urls.add(stream_url)
            stream_format = t.get('format') or {}
            protocol = stream_format.get('protocol')
            if protocol != 'hls' and '/hls' in format_url:
                protocol = 'hls'
            ext = None
            preset = str_or_none(t.get('preset'))
            if preset:
                ext = preset.split('_')[0]
            if ext not in KNOWN_EXTENSIONS:
                ext = mimetype2ext(stream_format.get('mime_type'))
            add_format({
                'url': stream_url,
                'ext': ext,
            }, 'http' if protocol == 'progressive' else protocol,
                t.get('snipped') or '/preview/' in format_url)

        for f in formats:
            f['vcodec'] = 'none'

        if not formats and info.get('policy') == 'BLOCK':
            self.raise_geo_restricted()
        self._sort_formats(formats)

        user = info.get('user') or {}

        thumbnails = []
        artwork_url = info.get('artwork_url')
        thumbnail = artwork_url or user.get('avatar_url')
        if isinstance(thumbnail, compat_str):
            if re.search(self._IMAGE_REPL_RE, thumbnail):
                for image_id, size in self._ARTWORK_MAP.items():
                    i = {
                        'id': image_id,
                        'url': re.sub(self._IMAGE_REPL_RE, '-%s.jpg' % image_id, thumbnail),
                    }
                    if image_id == 'tiny' and not artwork_url:
                        size = 18
                    elif image_id == 'original':
                        i['preference'] = 10
                    if size:
                        i.update({
                            'width': size,
                            'height': size,
                        })
                    thumbnails.append(i)
            else:
                thumbnails = [{'url': thumbnail}]

        def extract_count(key):
            return int_or_none(info.get('%s_count' % key))

        return {
            'id': track_id,
            'uploader': user.get('username'),
            'uploader_id': str_or_none(user.get('id')) or user.get('permalink'),
            'uploader_url': user.get('permalink_url'),
            'timestamp': unified_timestamp(info.get('created_at')),
            'title': title,
            'description': info.get('description'),
            'thumbnails': thumbnails,
            'duration': float_or_none(info.get('duration'), 1000),
            'webpage_url': info.get('permalink_url'),
            'license': info.get('license'),
            'view_count': extract_count('playback'),
            'like_count': extract_count('favoritings') or extract_count('likes'),
            'comment_count': extract_count('comment'),
            'repost_count': extract_count('reposts'),
            'genre': info.get('genre'),
            'formats': formats
        }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)

        track_id = mobj.group('track_id')

        query = {}
        if track_id:
            info_json_url = self._API_V2_BASE + 'tracks/' + track_id
            full_title = track_id
            token = mobj.group('secret_token')
            if token:
                query['secret_token'] = token
        else:
            full_title = resolve_title = '%s/%s' % mobj.group('uploader', 'title')
            token = mobj.group('token')
            if token:
                resolve_title += '/%s' % token
            info_json_url = self._resolv_url(self._BASE_URL + resolve_title)

        info = self._download_json(
            info_json_url, full_title, 'Downloading info JSON', query=query)

        return self._extract_info_dict(info, full_title, token)


class SoundcloudPlaylistBaseIE(SoundcloudIE):
    def _extract_set(self, playlist, token=None):
        playlist_id = compat_str(playlist['id'])
        tracks = playlist.get('tracks') or []
        if not all([t.get('permalink_url') for t in tracks]) and token:
            tracks = self._download_json(
                self._API_V2_BASE + 'tracks', playlist_id,
                'Downloading tracks', query={
                    'ids': ','.join([compat_str(t['id']) for t in tracks]),
                    'playlistId': playlist_id,
                    'playlistSecretToken': token,
                })
        entries = []
        for track in tracks:
            track_id = str_or_none(track.get('id'))
            url = track.get('permalink_url')
            if not url:
                if not track_id:
                    continue
                url = self._API_V2_BASE + 'tracks/' + track_id
                if token:
                    url += '?secret_token=' + token
            entries.append(self.url_result(
                url, SoundcloudIE.ie_key(), track_id))
        return self.playlist_result(
            entries, playlist_id,
            playlist.get('title'),
            playlist.get('description'))


class SoundcloudSetIE(SoundcloudPlaylistBaseIE):
    _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/(?P<uploader>[\w\d-]+)/sets/(?P<slug_title>[\w\d-]+)(?:/(?P<token>[^?/]+))?'
    IE_NAME = 'soundcloud:set'
    _TESTS = [{
        'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep',
        'info_dict': {
            'id': '2284613',
            'title': 'The Royal Concept EP',
            'description': 'md5:71d07087c7a449e8941a70a29e34671e',
        },
        'playlist_mincount': 5,
    }, {
        'url': 'https://soundcloud.com/the-concept-band/sets/the-royal-concept-ep/token',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)

        full_title = '%s/sets/%s' % mobj.group('uploader', 'slug_title')
        token = mobj.group('token')
        if token:
            full_title += '/' + token

        info = self._download_json(self._resolv_url(
            self._BASE_URL + full_title), full_title)

        if 'errors' in info:
            msgs = (compat_str(err['error_message']) for err in info['errors'])
            raise ExtractorError('unable to download video webpage: %s' % ','.join(msgs))

        return self._extract_set(info, token)


class SoundcloudPagedPlaylistBaseIE(SoundcloudIE):
    def _extract_playlist(self, base_url, playlist_id, playlist_title):
        # Per the SoundCloud documentation, the maximum limit for a linked partioning query is 200.
        # https://developers.soundcloud.com/blog/offset-pagination-deprecated
        COMMON_QUERY = {
            'limit': 200,
            'linked_partitioning': '1',
        }

        query = COMMON_QUERY.copy()
        query['offset'] = 0

        next_href = base_url

        entries = []
        for i in itertools.count():
            response = self._download_json(
                next_href, playlist_id,
                'Downloading track page %s' % (i + 1), query=query)

            collection = response['collection']

            if not isinstance(collection, list):
                collection = []

            # Empty collection may be returned, in this case we proceed
            # straight to next_href

            def resolve_entry(candidates):
                for cand in candidates:
                    if not isinstance(cand, dict):
                        continue
                    permalink_url = url_or_none(cand.get('permalink_url'))
                    if not permalink_url:
                        continue
                    return self.url_result(
                        permalink_url,
                        SoundcloudIE.ie_key() if SoundcloudIE.suitable(permalink_url) else None,
                        str_or_none(cand.get('id')), cand.get('title'))

            for e in collection:
                entry = resolve_entry((e, e.get('track'), e.get('playlist')))
                if entry:
                    entries.append(entry)

            next_href = response.get('next_href')
            if not next_href:
                break

            next_href = response['next_href']
            parsed_next_href = compat_urlparse.urlparse(next_href)
            query = compat_urlparse.parse_qs(parsed_next_href.query)
            query.update(COMMON_QUERY)

        return {
            '_type': 'playlist',
            'id': playlist_id,
            'title': playlist_title,
            'entries': entries,
        }


class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE):
    _VALID_URL = r'''(?x)
                        https?://
                            (?:(?:www|m)\.)?soundcloud\.com/
                            (?P<user>[^/]+)
                            (?:/
                                (?P<rsrc>tracks|albums|sets|reposts|likes|spotlight)
                            )?
                            /?(?:[?#].*)?$
                    '''
    IE_NAME = 'soundcloud:user'
    _TESTS = [{
        'url': 'https://soundcloud.com/soft-cell-official',
        'info_dict': {
            'id': '207965082',
            'title': 'Soft Cell (All)',
        },
        'playlist_mincount': 28,
    }, {
        'url': 'https://soundcloud.com/soft-cell-official/tracks',
        'info_dict': {
            'id': '207965082',
            'title': 'Soft Cell (Tracks)',
        },
        'playlist_mincount': 27,
    }, {
        'url': 'https://soundcloud.com/soft-cell-official/albums',
        'info_dict': {
            'id': '207965082',
            'title': 'Soft Cell (Albums)',
        },
        'playlist_mincount': 1,
    }, {
        'url': 'https://soundcloud.com/jcv246/sets',
        'info_dict': {
            'id': '12982173',
            'title': 'Jordi / cv (Sets)',
        },
        'playlist_mincount': 2,
    }, {
        'url': 'https://soundcloud.com/jcv246/reposts',
        'info_dict': {
            'id': '12982173',
            'title': 'Jordi / cv (Reposts)',
        },
        'playlist_mincount': 6,
    }, {
        'url': 'https://soundcloud.com/clalberg/likes',
        'info_dict': {
            'id': '11817582',
            'title': 'clalberg (Likes)',
        },
        'playlist_mincount': 5,
    }, {
        'url': 'https://soundcloud.com/grynpyret/spotlight',
        'info_dict': {
            'id': '7098329',
            'title': 'Grynpyret (Spotlight)',
        },
        'playlist_mincount': 1,
    }]

    _BASE_URL_MAP = {
        'all': 'stream/users/%s',
        'tracks': 'users/%s/tracks',
        'albums': 'users/%s/albums',
        'sets': 'users/%s/playlists',
        'reposts': 'stream/users/%s/reposts',
        'likes': 'users/%s/likes',
        'spotlight': 'users/%s/spotlight',
    }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        uploader = mobj.group('user')

        user = self._download_json(
            self._resolv_url(self._BASE_URL + uploader),
            uploader, 'Downloading user info')

        resource = mobj.group('rsrc') or 'all'

        return self._extract_playlist(
            self._API_V2_BASE + self._BASE_URL_MAP[resource] % user['id'],
            str_or_none(user.get('id')),
            '%s (%s)' % (user['username'], resource.capitalize()))


class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE):
    _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)'
    IE_NAME = 'soundcloud:trackstation'
    _TESTS = [{
        'url': 'https://soundcloud.com/stations/track/officialsundial/your-text',
        'info_dict': {
            'id': '286017854',
            'title': 'Track station: your text',
        },
        'playlist_mincount': 47,
    }]

    def _real_extract(self, url):
        track_name = self._match_id(url)

        track = self._download_json(self._resolv_url(url), track_name)
        track_id = self._search_regex(
            r'soundcloud:track-stations:(\d+)', track['id'], 'track id')

        return self._extract_playlist(
            self._API_V2_BASE + 'stations/%s/tracks' % track['id'],
            track_id, 'Track station: %s' % track['title'])


class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE):
    _VALID_URL = r'https?://api(?:-v2)?\.soundcloud\.com/playlists/(?P<id>[0-9]+)(?:/?\?secret_token=(?P<token>[^&]+?))?$'
    IE_NAME = 'soundcloud:playlist'
    _TESTS = [{
        'url': 'https://api.soundcloud.com/playlists/4110309',
        'info_dict': {
            'id': '4110309',
            'title': 'TILT Brass - Bowery Poetry Club, August \'03 [Non-Site SCR 02]',
            'description': 're:.*?TILT Brass - Bowery Poetry Club',
        },
        'playlist_count': 6,
    }]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        playlist_id = mobj.group('id')

        query = {}
        token = mobj.group('token')
        if token:
            query['secret_token'] = token

        data = self._download_json(
            self._API_V2_BASE + 'playlists/' + playlist_id,
            playlist_id, 'Downloading playlist', query=query)

        return self._extract_set(data, token)


class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
    IE_NAME = 'soundcloud:search'
    IE_DESC = 'Soundcloud search'
    _MAX_RESULTS = float('inf')
    _TESTS = [{
        'url': 'scsearch15:post-avant jazzcore',
        'info_dict': {
            'title': 'post-avant jazzcore',
        },
        'playlist_count': 15,
    }]

    _SEARCH_KEY = 'scsearch'
    _MAX_RESULTS_PER_PAGE = 200
    _DEFAULT_RESULTS_PER_PAGE = 50

    def _get_collection(self, endpoint, collection_id, **query):
        limit = min(
            query.get('limit', self._DEFAULT_RESULTS_PER_PAGE),
            self._MAX_RESULTS_PER_PAGE)
        query.update({
            'limit': limit,
            'linked_partitioning': 1,
            'offset': 0,
        })
        next_url = update_url_query(self._API_V2_BASE + endpoint, query)

        collected_results = 0

        for i in itertools.count(1):
            response = self._download_json(
                next_url, collection_id, 'Downloading page {0}'.format(i),
                'Unable to download API page')

            collection = response.get('collection', [])
            if not collection:
                break

            collection = list(filter(bool, collection))
            collected_results += len(collection)

            for item in collection:
                yield self.url_result(item['uri'], SoundcloudIE.ie_key())

            if not collection or collected_results >= limit:
                break

            next_url = response.get('next_href')
            if not next_url:
                break

    def _get_n_results(self, query, n):
        tracks = self._get_collection('search/tracks', query, limit=n, q=query)
        return self.playlist_result(tracks, playlist_title=query)
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor


class SoundgasmIE(InfoExtractor):
    IE_NAME = 'soundgasm'
    _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<user>[0-9a-zA-Z_-]+)/(?P<display_id>[0-9a-zA-Z_-]+)'
    _TEST = {
        'url': 'http://soundgasm.net/u/ytdl/Piano-sample',
        'md5': '010082a2c802c5275bb00030743e75ad',
        'info_dict': {
            'id': '88abd86ea000cafe98f96321b23cc1206cbcbcc9',
            'ext': 'm4a',
            'title': 'Piano sample',
            'description': 'Royalty Free Sample Music',
            'uploader': 'ytdl',
        }
    }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        display_id = mobj.group('display_id')

        webpage = self._download_webpage(url, display_id)

        audio_url = self._html_search_regex(
            r'(?s)m4a\s*:\s*(["\'])(?P<url>(?:(?!\1).)+)\1', webpage,
            'audio URL', group='url')

        title = self._search_regex(
            r'<div[^>]+\bclass=["\']jp-title[^>]+>([^<]+)',
            webpage, 'title', default=display_id)

        description = self._html_search_regex(
            (r'(?s)<div[^>]+\bclass=["\']jp-description[^>]+>(.+?)</div>',
             r'(?s)<li>Description:\s(.*?)<\/li>'),
            webpage, 'description', fatal=False)

        audio_id = self._search_regex(
            r'/([^/]+)\.m4a', audio_url, 'audio id', default=display_id)

        return {
            'id': audio_id,
            'display_id': display_id,
            'url': audio_url,
            'vcodec': 'none',
            'title': title,
            'description': description,
            'uploader': mobj.group('user'),
        }


class SoundgasmProfileIE(InfoExtractor):
    IE_NAME = 'soundgasm:profile'
    _VALID_URL = r'https?://(?:www\.)?soundgasm\.net/u/(?P<id>[^/]+)/?(?:\#.*)?$'
    _TEST = {
        'url': 'http://soundgasm.net/u/ytdl',
        'info_dict': {
            'id': 'ytdl',
        },
        'playlist_count': 1,
    }

    def _real_extract(self, url):
        profile_id = self._match_id(url)

        webpage = self._download_webpage(url, profile_id)

        entries = [
            self.url_result(audio_url, 'Soundgasm')
            for audio_url in re.findall(r'href="([^"]+/u/%s/[^"]+)' % profile_id, webpage)]

        return self.playlist_result(entries, profile_id)
# coding: utf-8
from __future__ import unicode_literals

from .mtv import MTVServicesInfoExtractor


class SouthParkIE(MTVServicesInfoExtractor):
    IE_NAME = 'southpark.cc.com'
    _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.cc\.com/(?:clips|(?:full-)?episodes|collections)/(?P<id>.+?)(\?|#|$))'

    _FEED_URL = 'http://www.southparkstudios.com/feeds/video-player/mrss'

    _TESTS = [{
        'url': 'http://southpark.cc.com/clips/104437/bat-daded#tab=featured',
        'info_dict': {
            'id': 'a7bff6c2-ed00-11e0-aca6-0026b9414f30',
            'ext': 'mp4',
            'title': 'South Park|Bat Daded',
            'description': 'Randy disqualifies South Park by getting into a fight with Bat Dad.',
            'timestamp': 1112760000,
            'upload_date': '20050406',
        },
    }, {
        'url': 'http://southpark.cc.com/collections/7758/fan-favorites/1',
        'only_matching': True,
    }]


class SouthParkEsIE(SouthParkIE):
    IE_NAME = 'southpark.cc.com:español'
    _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.cc\.com/episodios-en-espanol/(?P<id>.+?)(\?|#|$))'
    _LANG = 'es'

    _TESTS = [{
        'url': 'http://southpark.cc.com/episodios-en-espanol/s01e01-cartman-consigue-una-sonda-anal#source=351c1323-0b96-402d-a8b9-40d01b2e9bde&position=1&sort=!airdate',
        'info_dict': {
            'title': 'Cartman Consigue Una Sonda Anal',
            'description': 'Cartman Consigue Una Sonda Anal',
        },
        'playlist_count': 4,
        'skip': 'Geo-restricted',
    }]


class SouthParkDeIE(SouthParkIE):
    IE_NAME = 'southpark.de'
    _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.de/(?:clips|alle-episoden|collections)/(?P<id>.+?)(\?|#|$))'
    _FEED_URL = 'http://www.southpark.de/feeds/video-player/mrss/'

    _TESTS = [{
        'url': 'http://www.southpark.de/clips/uygssh/the-government-wont-respect-my-privacy#tab=featured',
        'info_dict': {
            'id': '85487c96-b3b9-4e39-9127-ad88583d9bf2',
            'ext': 'mp4',
            'title': 'South Park|The Government Won\'t Respect My Privacy',
            'description': 'Cartman explains the benefits of "Shitter" to Stan, Kyle and Craig.',
            'timestamp': 1380160800,
            'upload_date': '20130926',
        },
    }, {
        # non-ASCII characters in initial URL
        'url': 'http://www.southpark.de/alle-episoden/s18e09-hashtag-aufwärmen',
        'info_dict': {
            'title': 'Hashtag „Aufwärmen“',
            'description': 'Kyle will mit seinem kleinen Bruder Ike Videospiele spielen. Als der nicht mehr mit ihm spielen will, hat Kyle Angst, dass er die Kids von heute nicht mehr versteht.',
        },
        'playlist_count': 3,
    }, {
        # non-ASCII characters in redirect URL
        'url': 'http://www.southpark.de/alle-episoden/s18e09',
        'info_dict': {
            'title': 'Hashtag „Aufwärmen“',
            'description': 'Kyle will mit seinem kleinen Bruder Ike Videospiele spielen. Als der nicht mehr mit ihm spielen will, hat Kyle Angst, dass er die Kids von heute nicht mehr versteht.',
        },
        'playlist_count': 3,
    }, {
        'url': 'http://www.southpark.de/collections/2476/superhero-showdown/1',
        'only_matching': True,
    }]


class SouthParkNlIE(SouthParkIE):
    IE_NAME = 'southpark.nl'
    _VALID_URL = r'https?://(?:www\.)?(?P<url>southpark\.nl/(?:clips|(?:full-)?episodes|collections)/(?P<id>.+?)(\?|#|$))'
    _FEED_URL = 'http://www.southpark.nl/feeds/video-player/mrss/'

    _TESTS = [{
        'url': 'http://www.southpark.nl/full-episodes/s18e06-freemium-isnt-free',
        'info_dict': {
            'title': 'Freemium Isn\'t Free',
            'description': 'Stan is addicted to the new Terrance and Phillip mobile game.',
        },
        'playlist_mincount': 3,
    }]


class SouthParkDkIE(SouthParkIE):
    IE_NAME = 'southparkstudios.dk'
    _VALID_URL = r'https?://(?:www\.)?(?P<url>southparkstudios\.(?:dk|nu)/(?:clips|full-episodes|collections)/(?P<id>.+?)(\?|#|$))'
    _FEED_URL = 'http://www.southparkstudios.dk/feeds/video-player/mrss/'

    _TESTS = [{
        'url': 'http://www.southparkstudios.dk/full-episodes/s18e07-grounded-vindaloop',
        'info_dict': {
            'title': 'Grounded Vindaloop',
            'description': 'Butters is convinced he\'s living in a virtual reality.',
        },
        'playlist_mincount': 3,
    }, {
        'url': 'http://www.southparkstudios.dk/collections/2476/superhero-showdown/1',
        'only_matching': True,
    }, {
        'url': 'http://www.southparkstudios.nu/collections/2476/superhero-showdown/1',
        'only_matching': True,
    }]
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    determine_ext,
    ExtractorError,
    merge_dicts,
    orderedSet,
    parse_duration,
    parse_resolution,
    str_to_int,
    url_or_none,
    urlencode_postdata,
)


class SpankBangIE(InfoExtractor):
    _VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/(?:video|play|embed)\b'
    _TESTS = [{
        'url': 'http://spankbang.com/3vvn/video/fantasy+solo',
        'md5': '1cc433e1d6aa14bc376535b8679302f7',
        'info_dict': {
            'id': '3vvn',
            'ext': 'mp4',
            'title': 'fantasy solo',
            'description': 'dillion harper masturbates on a bed',
            'thumbnail': r're:^https?://.*\.jpg$',
            'uploader': 'silly2587',
            'timestamp': 1422571989,
            'upload_date': '20150129',
            'age_limit': 18,
        }
    }, {
        # 480p only
        'url': 'http://spankbang.com/1vt0/video/solvane+gangbang',
        'only_matching': True,
    }, {
        # no uploader
        'url': 'http://spankbang.com/lklg/video/sex+with+anyone+wedding+edition+2',
        'only_matching': True,
    }, {
        # mobile page
        'url': 'http://m.spankbang.com/1o2de/video/can+t+remember+her+name',
        'only_matching': True,
    }, {
        # 4k
        'url': 'https://spankbang.com/1vwqx/video/jade+kush+solo+4k',
        'only_matching': True,
    }, {
        'url': 'https://m.spankbang.com/3vvn/play/fantasy+solo/480p/',
        'only_matching': True,
    }, {
        'url': 'https://m.spankbang.com/3vvn/play',
        'only_matching': True,
    }, {
        'url': 'https://spankbang.com/2y3td/embed/',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(
            url.replace('/%s/embed' % video_id, '/%s/video' % video_id),
            video_id, headers={'Cookie': 'country=US'})

        if re.search(r'<[^>]+\b(?:id|class)=["\']video_removed', webpage):
            raise ExtractorError(
                'Video %s is not available' % video_id, expected=True)

        formats = []

        def extract_format(format_id, format_url):
            f_url = url_or_none(format_url)
            if not f_url:
                return
            f = parse_resolution(format_id)
            ext = determine_ext(f_url)
            if format_id.startswith('m3u8') or ext == 'm3u8':
                formats.extend(self._extract_m3u8_formats(
                    f_url, video_id, 'mp4', entry_protocol='m3u8_native',
                    m3u8_id='hls', fatal=False))
            elif format_id.startswith('mpd') or ext == 'mpd':
                formats.extend(self._extract_mpd_formats(
                    f_url, video_id, mpd_id='dash', fatal=False))
            elif ext == 'mp4' or f.get('width') or f.get('height'):
                f.update({
                    'url': f_url,
                    'format_id': format_id,
                })
                formats.append(f)

        STREAM_URL_PREFIX = 'stream_url_'

        for mobj in re.finditer(
                r'%s(?P<id>[^\s=]+)\s*=\s*(["\'])(?P<url>(?:(?!\2).)+)\2'
                % STREAM_URL_PREFIX, webpage):
            extract_format(mobj.group('id', 'url'))

        if not formats:
            stream_key = self._search_regex(
                r'data-streamkey\s*=\s*(["\'])(?P<value>(?:(?!\1).)+)\1',
                webpage, 'stream key', group='value')

            stream = self._download_json(
                'https://spankbang.com/api/videos/stream', video_id,
                'Downloading stream JSON', data=urlencode_postdata({
                    'id': stream_key,
                    'data': 0,
                }), headers={
                    'Referer': url,
                    'X-Requested-With': 'XMLHttpRequest',
                })

            for format_id, format_url in stream.items():
                if format_url and isinstance(format_url, list):
                    format_url = format_url[0]
                extract_format(format_id, format_url)

        self._sort_formats(formats, field_preference=('preference', 'height', 'width', 'fps', 'tbr', 'format_id'))

        info = self._search_json_ld(webpage, video_id, default={})

        title = self._html_search_regex(
            r'(?s)<h1[^>]*>(.+?)</h1>', webpage, 'title', default=None)
        description = self._search_regex(
            r'<div[^>]+\bclass=["\']bottom[^>]+>\s*<p>[^<]*</p>\s*<p>([^<]+)',
            webpage, 'description', default=None)
        thumbnail = self._og_search_thumbnail(webpage, default=None)
        uploader = self._html_search_regex(
            (r'(?s)<li[^>]+class=["\']profile[^>]+>(.+?)</a>',
             r'class="user"[^>]*><img[^>]+>([^<]+)'),
            webpage, 'uploader', default=None)
        duration = parse_duration(self._search_regex(
            r'<div[^>]+\bclass=["\']right_side[^>]+>\s*<span>([^<]+)',
            webpage, 'duration', default=None))
        view_count = str_to_int(self._search_regex(
            r'([\d,.]+)\s+plays', webpage, 'view count', default=None))

        age_limit = self._rta_search(webpage)

        return merge_dicts({
            'id': video_id,
            'title': title or video_id,
            'description': description,
            'thumbnail': thumbnail,
            'uploader': uploader,
            'duration': duration,
            'view_count': view_count,
            'formats': formats,
            'age_limit': age_limit,
        }, info
        )


class SpankBangPlaylistIE(InfoExtractor):
    _VALID_URL = r'https?://(?:[^/]+\.)?spankbang\.com/(?P<id>[\da-z]+)/playlist/[^/]+'
    _TEST = {
        'url': 'https://spankbang.com/ug0k/playlist/big+ass+titties',
        'info_dict': {
            'id': 'ug0k',
            'title': 'Big Ass Titties',
        },
        'playlist_mincount': 50,
    }

    def _real_extract(self, url):
        playlist_id = self._match_id(url)

        webpage = self._download_webpage(
            url, playlist_id, headers={'Cookie': 'country=US; mobile=on'})

        entries = [self.url_result(
            'https://spankbang.com/%s/video' % video_id,
            ie=SpankBangIE.ie_key(), video_id=video_id)
            for video_id in orderedSet(re.findall(
                r'<a[^>]+\bhref=["\']/?([\da-z]+)/play/', webpage))]

        title = self._html_search_regex(
            r'<h1>([^<]+)\s+playlist</h1>', webpage, 'playlist title',
            fatal=False)

        return self.playlist_result(entries, playlist_id, title)
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    float_or_none,
    int_or_none,
    merge_dicts,
    str_or_none,
    str_to_int,
    url_or_none,
)


class SpankwireIE(InfoExtractor):
    _VALID_URL = r'''(?x)
                    https?://
                        (?:www\.)?spankwire\.com/
                        (?:
                            [^/]+/video|
                            EmbedPlayer\.aspx/?\?.*?\bArticleId=
                        )
                        (?P<id>\d+)
                    '''
    _TESTS = [{
        # download URL pattern: */<height>P_<tbr>K_<video_id>.mp4
        'url': 'http://www.spankwire.com/Buckcherry-s-X-Rated-Music-Video-Crazy-Bitch/video103545/',
        'md5': '5aa0e4feef20aad82cbcae3aed7ab7cd',
        'info_dict': {
            'id': '103545',
            'ext': 'mp4',
            'title': 'Buckcherry`s X Rated Music Video Crazy Bitch',
            'description': 'Crazy Bitch X rated music video.',
            'duration': 222,
            'uploader': 'oreusz',
            'uploader_id': '124697',
            'timestamp': 1178587885,
            'upload_date': '20070508',
            'average_rating': float,
            'view_count': int,
            'comment_count': int,
            'age_limit': 18,
            'categories': list,
            'tags': list,
        },
    }, {
        # download URL pattern: */mp4_<format_id>_<video_id>.mp4
        'url': 'http://www.spankwire.com/Titcums-Compiloation-I/video1921551/',
        'md5': '09b3c20833308b736ae8902db2f8d7e6',
        'info_dict': {
            'id': '1921551',
            'ext': 'mp4',
            'title': 'Titcums Compiloation I',
            'description': 'cum on tits',
            'uploader': 'dannyh78999',
            'uploader_id': '3056053',
            'upload_date': '20150822',
            'age_limit': 18,
        },
        'params': {
            'proxy': '127.0.0.1:8118'
        },
        'skip': 'removed',
    }, {
        'url': 'https://www.spankwire.com/EmbedPlayer.aspx/?ArticleId=156156&autostart=true',
        'only_matching': True,
    }]

    @staticmethod
    def _extract_urls(webpage):
        return re.findall(
            r'<iframe[^>]+\bsrc=["\']((?:https?:)?//(?:www\.)?spankwire\.com/EmbedPlayer\.aspx/?\?.*?\bArticleId=\d+)',
            webpage)

    def _real_extract(self, url):
        video_id = self._match_id(url)

        video = self._download_json(
            'https://www.spankwire.com/api/video/%s.json' % video_id, video_id)

        title = video['title']

        formats = []
        videos = video.get('videos')
        if isinstance(videos, dict):
            for format_id, format_url in videos.items():
                video_url = url_or_none(format_url)
                if not format_url:
                    continue
                height = int_or_none(self._search_regex(
                    r'(\d+)[pP]', format_id, 'height', default=None))
                m = re.search(
                    r'/(?P<height>\d+)[pP]_(?P<tbr>\d+)[kK]', video_url)
                if m:
                    tbr = int(m.group('tbr'))
                    height = height or int(m.group('height'))
                else:
                    tbr = None
                formats.append({
                    'url': video_url,
                    'format_id': '%dp' % height if height else format_id,
                    'height': height,
                    'tbr': tbr,
                })
        m3u8_url = url_or_none(video.get('HLS'))
        if m3u8_url:
            formats.extend(self._extract_m3u8_formats(
                m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native',
                m3u8_id='hls', fatal=False))
        self._sort_formats(formats, ('height', 'tbr', 'width', 'format_id'))

        view_count = str_to_int(video.get('viewed'))

        thumbnails = []
        for preference, t in enumerate(('', '2x'), start=0):
            thumbnail_url = url_or_none(video.get('poster%s' % t))
            if not thumbnail_url:
                continue
            thumbnails.append({
                'url': thumbnail_url,
                'preference': preference,
            })

        def extract_names(key):
            entries_list = video.get(key)
            if not isinstance(entries_list, list):
                return
            entries = []
            for entry in entries_list:
                name = str_or_none(entry.get('name'))
                if name:
                    entries.append(name)
            return entries

        categories = extract_names('categories')
        tags = extract_names('tags')

        uploader = None
        info = {}

        webpage = self._download_webpage(
            'https://www.spankwire.com/_/video%s/' % video_id, video_id,
            fatal=False)
        if webpage:
            info = self._search_json_ld(webpage, video_id, default={})
            thumbnail_url = None
            if 'thumbnail' in info:
                thumbnail_url = url_or_none(info['thumbnail'])
                del info['thumbnail']
            if not thumbnail_url:
                thumbnail_url = self._og_search_thumbnail(webpage)
            if thumbnail_url:
                thumbnails.append({
                    'url': thumbnail_url,
                    'preference': 10,
                })
            uploader = self._html_search_regex(
                r'(?s)by\s*<a[^>]+\bclass=["\']uploaded__by[^>]*>(.+?)</a>',
                webpage, 'uploader', fatal=False)
            if not view_count:
                view_count = str_to_int(self._search_regex(
                    r'data-views=["\']([\d,.]+)', webpage, 'view count',
                    fatal=False))

        return merge_dicts({
            'id': video_id,
            'title': title,
            'description': video.get('description'),
            'duration': int_or_none(video.get('duration')),
            'thumbnails': thumbnails,
            'uploader': uploader,
            'uploader_id': str_or_none(video.get('userId')),
            'timestamp': int_or_none(video.get('time_approved_on')),
            'average_rating': float_or_none(video.get('rating')),
            'view_count': view_count,
            'comment_count': int_or_none(video.get('comments')),
            'age_limit': 18,
            'categories': categories,
            'tags': tags,
            'formats': formats,
        }, info)
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from .nexx import (
    NexxIE,
    NexxEmbedIE,
)
from .spiegeltv import SpiegeltvIE
from ..compat import compat_urlparse
from ..utils import (
    parse_duration,
    strip_or_none,
    unified_timestamp,
)


class SpiegelIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/video/[^/]*-(?P<id>[0-9]+)(?:-embed|-iframe)?(?:\.html)?(?:#.*)?$'
    _TESTS = [{
        'url': 'http://www.spiegel.de/video/vulkan-tungurahua-in-ecuador-ist-wieder-aktiv-video-1259285.html',
        'md5': 'b57399839d055fccfeb9a0455c439868',
        'info_dict': {
            'id': '563747',
            'ext': 'mp4',
            'title': 'Vulkanausbruch in Ecuador: Der "Feuerschlund" ist wieder aktiv',
            'description': 'md5:8029d8310232196eb235d27575a8b9f4',
            'duration': 49,
            'upload_date': '20130311',
            'timestamp': 1362994320,
        },
    }, {
        'url': 'http://www.spiegel.de/video/schach-wm-videoanalyse-des-fuenften-spiels-video-1309159.html',
        'md5': '5b6c2f4add9d62912ed5fc78a1faed80',
        'info_dict': {
            'id': '580988',
            'ext': 'mp4',
            'title': 'Schach-WM in der Videoanalyse: Carlsen nutzt die Fehlgriffe des Titelverteidigers',
            'description': 'md5:c2322b65e58f385a820c10fa03b2d088',
            'duration': 983,
            'upload_date': '20131115',
            'timestamp': 1384546642,
        },
    }, {
        'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-embed.html',
        'md5': '97b91083a672d72976faa8433430afb9',
        'info_dict': {
            'id': '601883',
            'ext': 'mp4',
            'description': 'SPIEGEL ONLINE-Nutzer durften den deutschen Astronauten Alexander Gerst über sein Leben auf der ISS-Station befragen. Hier kommen seine Antworten auf die besten sechs Fragen.',
            'title': 'Fragen an Astronaut Alexander Gerst: "Bekommen Sie die Tageszeiten mit?"',
            'upload_date': '20140904',
            'timestamp': 1409834160,
        }
    }, {
        'url': 'http://www.spiegel.de/video/astronaut-alexander-gerst-von-der-iss-station-beantwortet-fragen-video-1519126-iframe.html',
        'only_matching': True,
    }, {
        # nexx video
        'url': 'http://www.spiegel.de/video/spiegel-tv-magazin-ueber-guellekrise-in-schleswig-holstein-video-99012776.html',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        metadata_url = 'http://www.spiegel.de/video/metadata/video-%s.json' % video_id
        handle = self._request_webpage(metadata_url, video_id)

        # 302 to spiegel.tv, like http://www.spiegel.de/video/der-film-zum-wochenende-die-wahrheit-ueber-maenner-video-99003272.html
        if SpiegeltvIE.suitable(handle.geturl()):
            return self.url_result(handle.geturl(), 'Spiegeltv')

        video_data = self._parse_json(self._webpage_read_content(
            handle, metadata_url, video_id), video_id)
        title = video_data['title']
        nexx_id = video_data['nexxOmniaId']
        domain_id = video_data.get('nexxOmniaDomain') or '748'

        return {
            '_type': 'url_transparent',
            'id': video_id,
            'url': 'nexx:%s:%s' % (domain_id, nexx_id),
            'title': title,
            'description': strip_or_none(video_data.get('teaser')),
            'duration': parse_duration(video_data.get('duration')),
            'timestamp': unified_timestamp(video_data.get('datum')),
            'ie_key': NexxIE.ie_key(),
        }


class SpiegelArticleIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?spiegel\.de/(?!video/)[^?#]*?-(?P<id>[0-9]+)\.html'
    IE_NAME = 'Spiegel:Article'
    IE_DESC = 'Articles on spiegel.de'
    _TESTS = [{
        'url': 'http://www.spiegel.de/sport/sonst/badminton-wm-die-randsportart-soll-populaerer-werden-a-987092.html',
        'info_dict': {
            'id': '1516455',
            'ext': 'mp4',
            'title': 'Faszination Badminton: Nennt es bloß nicht Federball',
            'description': 're:^Patrick Kämnitz gehört.{100,}',
            'upload_date': '20140825',
        },
    }, {
        'url': 'http://www.spiegel.de/wissenschaft/weltall/astronaut-alexander-gerst-antwortet-spiegel-online-lesern-a-989876.html',
        'info_dict': {

        },
        'playlist_count': 6,
    }, {
        # Nexx iFrame embed
        'url': 'http://www.spiegel.de/sptv/spiegeltv/spiegel-tv-ueber-schnellste-katapult-achterbahn-der-welt-taron-a-1137884.html',
        'info_dict': {
            'id': '161464',
            'ext': 'mp4',
            'title': 'Nervenkitzel Achterbahn',
            'alt_title': 'Karussellbauer in Deutschland',
            'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc',
            'release_year': 2005,
            'creator': 'SPIEGEL TV',
            'thumbnail': r're:^https?://.*\.jpg$',
            'duration': 2761,
            'timestamp': 1394021479,
            'upload_date': '20140305',
        },
        'params': {
            'format': 'bestvideo',
            'skip_download': True,
        },
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)

        # Single video on top of the page
        video_link = self._search_regex(
            r'<a href="([^"]+)" onclick="return spOpenVideo\(this,', webpage,
            'video page URL', default=None)
        if video_link:
            video_url = compat_urlparse.urljoin(
                self.http_scheme() + '//spiegel.de/', video_link)
            return self.url_result(video_url)

        # Multiple embedded videos
        embeds = re.findall(
            r'<div class="vid_holder[0-9]+.*?</div>\s*.*?url\s*=\s*"([^"]+)"',
            webpage)
        entries = [
            self.url_result(compat_urlparse.urljoin(
                self.http_scheme() + '//spiegel.de/', embed_path))
            for embed_path in embeds]
        if embeds:
            return self.playlist_result(entries)

        return self.playlist_from_matches(
            NexxEmbedIE._extract_urls(webpage), ie=NexxEmbedIE.ie_key())
from __future__ import unicode_literals

from .common import InfoExtractor
from .nexx import NexxIE


class SpiegeltvIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?spiegel\.tv/videos/(?P<id>\d+)'
    _TEST = {
        'url': 'http://www.spiegel.tv/videos/161681-flug-mh370/',
        'only_matching': True,
    }

    def _real_extract(self, url):
        return self.url_result(
            'https://api.nexx.cloud/v3/748/videos/byid/%s'
            % self._match_id(url), ie=NexxIE.ie_key())
from __future__ import unicode_literals

from .mtv import MTVServicesInfoExtractor


class BellatorIE(MTVServicesInfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?bellator\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'
    _TESTS = [{
        'url': 'http://www.bellator.com/fight/atwr7k/bellator-158-michael-page-vs-evangelista-cyborg',
        'info_dict': {
            'title': 'Michael Page vs. Evangelista Cyborg',
            'description': 'md5:0d917fc00ffd72dd92814963fc6cbb05',
        },
        'playlist_count': 3,
    }, {
        'url': 'http://www.bellator.com/video-clips/bw6k7n/bellator-158-foundations-michael-venom-page',
        'only_matching': True,
    }]

    _FEED_URL = 'http://www.bellator.com/feeds/mrss/'
    _GEO_COUNTRIES = ['US']

    def _extract_mgid(self, webpage):
        return self._extract_triforce_mgid(webpage)


class ParamountNetworkIE(MTVServicesInfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?paramountnetwork\.com/[^/]+/[\da-z]{6}(?:[/?#&]|$)'
    _TESTS = [{
        'url': 'http://www.paramountnetwork.com/episodes/j830qm/lip-sync-battle-joel-mchale-vs-jim-rash-season-2-ep-13',
        'info_dict': {
            'id': '37ace3a8-1df6-48be-85b8-38df8229e241',
            'ext': 'mp4',
            'title': 'Lip Sync Battle|April 28, 2016|2|209|Joel McHale Vs. Jim Rash|Act 1',
            'description': 'md5:a739ca8f978a7802f67f8016d27ce114',
        },
        'params': {
            # m3u8 download
            'skip_download': True,
        },
    }]

    _FEED_URL = 'http://www.paramountnetwork.com/feeds/mrss/'
    _GEO_COUNTRIES = ['US']

    def _extract_mgid(self, webpage):
        root_data = self._parse_json(self._search_regex(
            r'window\.__DATA__\s*=\s*({.+})',
            webpage, 'data'), None)

        def find_sub_data(data, data_type):
            return next(c for c in data['children'] if c.get('type') == data_type)

        c = find_sub_data(find_sub_data(root_data, 'MainContainer'), 'VideoPlayer')
        return c['props']['media']['video']['config']['uri']
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import ExtractorError


class Sport5IE(InfoExtractor):
    _VALID_URL = r'https?://(?:www|vod)?\.sport5\.co\.il/.*\b(?:Vi|docID)=(?P<id>\d+)'
    _TESTS = [
        {
            'url': 'http://vod.sport5.co.il/?Vc=147&Vi=176331&Page=1',
            'info_dict': {
                'id': 's5-Y59xx1-GUh2',
                'ext': 'mp4',
                'title': 'ולנסיה-קורדובה 0:3',
                'description': 'אלקאסר, גאייה ופגולי סידרו לקבוצה של נונו ניצחון על קורדובה ואת המקום הראשון בליגה',
                'duration': 228,
                'categories': list,
            },
            'skip': 'Blocked outside of Israel',
        }, {
            'url': 'http://www.sport5.co.il/articles.aspx?FolderID=3075&docID=176372&lang=HE',
            'info_dict': {
                'id': 's5-SiXxx1-hKh2',
                'ext': 'mp4',
                'title': 'GOALS_CELTIC_270914.mp4',
                'description': '',
                'duration': 87,
                'categories': list,
            },
            'skip': 'Blocked outside of Israel',
        }
    ]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        media_id = mobj.group('id')

        webpage = self._download_webpage(url, media_id)

        video_id = self._html_search_regex(r'clipId=([\w-]+)', webpage, 'video id')

        metadata = self._download_xml(
            'http://sport5-metadata-rr-d.nsacdn.com/vod/vod/%s/HDS/metadata.xml' % video_id,
            video_id)

        error = metadata.find('./Error')
        if error is not None:
            raise ExtractorError(
                '%s returned error: %s - %s' % (
                    self.IE_NAME,
                    error.find('./Name').text,
                    error.find('./Description').text),
                expected=True)

        title = metadata.find('./Title').text
        description = metadata.find('./Description').text
        duration = int(metadata.find('./Duration').text)

        posters_el = metadata.find('./PosterLinks')
        thumbnails = [{
            'url': thumbnail.text,
            'width': int(thumbnail.get('width')),
            'height': int(thumbnail.get('height')),
        } for thumbnail in posters_el.findall('./PosterIMG')] if posters_el is not None else []

        categories_el = metadata.find('./Categories')
        categories = [
            cat.get('name') for cat in categories_el.findall('./Category')
        ] if categories_el is not None else []

        formats = [{
            'url': fmt.text,
            'ext': 'mp4',
            'vbr': int(fmt.get('bitrate')),
            'width': int(fmt.get('width')),
            'height': int(fmt.get('height')),
        } for fmt in metadata.findall('./PlaybackLinks/FileURL')]
        self._sort_formats(formats)

        return {
            'id': video_id,
            'title': title,
            'description': description,
            'thumbnails': thumbnails,
            'duration': duration,
            'categories': categories,
            'formats': formats,
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    determine_ext,
    int_or_none,
    js_to_json,
    merge_dicts,
)


class SportBoxIE(InfoExtractor):
    _VALID_URL = r'https?://(?:news\.sportbox|matchtv)\.ru/vdl/player(?:/[^/]+/|\?.*?\bn?id=)(?P<id>\d+)'
    _TESTS = [{
        'url': 'http://news.sportbox.ru/vdl/player/ci/211355',
        'info_dict': {
            'id': '109158',
            'ext': 'mp4',
            'title': 'В Новороссийске прошел детский турнир «Поле славы боевой»',
            'description': 'В Новороссийске прошел детский турнир «Поле славы боевой»',
            'thumbnail': r're:^https?://.*\.jpg$',
            'duration': 292,
            'view_count': int,
            'timestamp': 1426237001,
            'upload_date': '20150313',
        },
        'params': {
            # m3u8 download
            'skip_download': True,
        },
    }, {
        'url': 'http://news.sportbox.ru/vdl/player?nid=370908&only_player=1&autostart=false&playeri=2&height=340&width=580',
        'only_matching': True,
    }, {
        'url': 'https://news.sportbox.ru/vdl/player/media/193095',
        'only_matching': True,
    }, {
        'url': 'https://news.sportbox.ru/vdl/player/media/109158',
        'only_matching': True,
    }, {
        'url': 'https://matchtv.ru/vdl/player/media/109158',
        'only_matching': True,
    }]

    @staticmethod
    def _extract_urls(webpage):
        return re.findall(
            r'<iframe[^>]+src="(https?://(?:news\.sportbox|matchtv)\.ru/vdl/player[^"]+)"',
            webpage)

    def _real_extract(self, url):
        video_id = self._match_id(url)

        webpage = self._download_webpage(url, video_id)

        sources = self._parse_json(
            self._search_regex(
                r'(?s)playerOptions\.sources(?:WithRes)?\s*=\s*(\[.+?\])\s*;\s*\n',
                webpage, 'sources'),
            video_id, transform_source=js_to_json)

        formats = []
        for source in sources:
            src = source.get('src')
            if not src:
                continue
            if determine_ext(src) == 'm3u8':
                formats.extend(self._extract_m3u8_formats(
                    src, video_id, 'mp4', entry_protocol='m3u8_native',
                    m3u8_id='hls', fatal=False))
            else:
                formats.append({
                    'url': src,
                })
        self._sort_formats(formats)

        player = self._parse_json(
            self._search_regex(
                r'(?s)playerOptions\s*=\s*({.+?})\s*;\s*\n', webpage,
                'player options', default='{}'),
            video_id, transform_source=js_to_json)
        media_id = player['mediaId']

        info = self._search_json_ld(webpage, media_id, default={})

        view_count = int_or_none(self._search_regex(
            r'Просмотров\s*:\s*(\d+)', webpage, 'view count', default=None))

        return merge_dicts(info, {
            'id': media_id,
            'title': self._og_search_title(webpage, default=None) or media_id,
            'thumbnail': player.get('poster'),
            'duration': int_or_none(player.get('duration')),
            'view_count': view_count,
            'formats': formats,
        })
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    parse_iso8601,
    sanitized_Request,
)


class SportDeutschlandIE(InfoExtractor):
    _VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])'
    _TESTS = [{
        'url': 'https://sportdeutschland.tv/badminton/re-live-deutsche-meisterschaften-2020-halbfinals?playlistId=0',
        'info_dict': {
            'id': 're-live-deutsche-meisterschaften-2020-halbfinals',
            'ext': 'mp4',
            'title': 're:Re-live: Deutsche Meisterschaften 2020.*Halbfinals',
            'categories': ['Badminton-Deutschland'],
            'view_count': int,
            'thumbnail': r're:^https?://.*\.(?:jpg|png)$',
            'timestamp': int,
            'upload_date': '20200201',
            'description': 're:.*',  # meaningless description for THIS video
        },
    }]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        video_id = mobj.group('id')
        sport_id = mobj.group('sport')

        api_url = 'https://proxy.vidibusdynamic.net/ssl/backend.sportdeutschland.tv/api/permalinks/%s/%s?access_token=true' % (
            sport_id, video_id)
        req = sanitized_Request(api_url, headers={
            'Accept': 'application/vnd.vidibus.v2.html+json',
            'Referer': url,
        })
        data = self._download_json(req, video_id)

        asset = data['asset']
        categories = [data['section']['title']]

        formats = []
        smil_url = asset['video']
        if '.smil' in smil_url:
            m3u8_url = smil_url.replace('.smil', '.m3u8')
            formats.extend(
                self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4'))

            smil_doc = self._download_xml(
                smil_url, video_id, note='Downloading SMIL metadata')
            base_url_el = smil_doc.find('./head/meta')
            if base_url_el:
                base_url = base_url_el.attrib['base']
            formats.extend([{
                'format_id': 'rmtp',
                'url': base_url if base_url_el else n.attrib['src'],
                'play_path': n.attrib['src'],
                'ext': 'flv',
                'preference': -100,
                'format_note': 'Seems to fail at example stream',
            } for n in smil_doc.findall('./body/video')])
        else:
            formats.append({'url': smil_url})

        self._sort_formats(formats)

        return {
            'id': video_id,
            'formats': formats,
            'title': asset['title'],
            'thumbnail': asset.get('image'),
            'description': asset.get('teaser'),
            'duration': asset.get('duration'),
            'categories': categories,
            'view_count': asset.get('views'),
            'rtmp_live': asset.get('live'),
            'timestamp': parse_iso8601(asset.get('date')),
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    ExtractorError,
    int_or_none,
    xpath_attr,
    xpath_text,
    xpath_element,
    unescapeHTML,
    unified_timestamp,
)


class SpringboardPlatformIE(InfoExtractor):
    _VALID_URL = r'''(?x)
                    https?://
                        cms\.springboardplatform\.com/
                        (?:
                            (?:previews|embed_iframe)/(?P<index>\d+)/video/(?P<id>\d+)|
                            xml_feeds_advanced/index/(?P<index_2>\d+)/rss3/(?P<id_2>\d+)
                        )
                    '''
    _TESTS = [{
        'url': 'http://cms.springboardplatform.com/previews/159/video/981017/0/0/1',
        'md5': '5c3cb7b5c55740d482561099e920f192',
        'info_dict': {
            'id': '981017',
            'ext': 'mp4',
            'title': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
            'description': 'Redman "BUD like YOU" "Usher Good Kisser" REMIX',
            'thumbnail': r're:^https?://.*\.jpg$',
            'timestamp': 1409132328,
            'upload_date': '20140827',
            'duration': 193,
        },
    }, {
        'url': 'http://cms.springboardplatform.com/embed_iframe/159/video/981017/rab007/rapbasement.com/1/1',
        'only_matching': True,
    }, {
        'url': 'http://cms.springboardplatform.com/embed_iframe/20/video/1731611/ki055/kidzworld.com/10',
        'only_matching': True,
    }, {
        'url': 'http://cms.springboardplatform.com/xml_feeds_advanced/index/159/rss3/981017/0/0/1/',
        'only_matching': True,
    }]

    @staticmethod
    def _extract_urls(webpage):
        return [
            mobj.group('url')
            for mobj in re.finditer(
                r'<iframe\b[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//cms\.springboardplatform\.com/embed_iframe/\d+/video/\d+.*?)\1',
                webpage)]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        video_id = mobj.group('id') or mobj.group('id_2')
        index = mobj.group('index') or mobj.group('index_2')

        video = self._download_xml(
            'http://cms.springboardplatform.com/xml_feeds_advanced/index/%s/rss3/%s'
            % (index, video_id), video_id)

        item = xpath_element(video, './/item', 'item', fatal=True)

        content = xpath_element(
            item, './{http://search.yahoo.com/mrss/}content', 'content',
            fatal=True)
        title = unescapeHTML(xpath_text(item, './title', 'title', fatal=True))

        video_url = content.attrib['url']

        if 'error_video.mp4' in video_url:
            raise ExtractorError(
                'Video %s no longer exists' % video_id, expected=True)

        duration = int_or_none(content.get('duration'))
        tbr = int_or_none(content.get('bitrate'))
        filesize = int_or_none(content.get('fileSize'))
        width = int_or_none(content.get('width'))
        height = int_or_none(content.get('height'))

        description = unescapeHTML(xpath_text(
            item, './description', 'description'))
        thumbnail = xpath_attr(
            item, './{http://search.yahoo.com/mrss/}thumbnail', 'url',
            'thumbnail')

        timestamp = unified_timestamp(xpath_text(
            item, './{http://cms.springboardplatform.com/namespaces.html}created',
            'timestamp'))

        formats = [{
            'url': video_url,
            'format_id': 'http',
            'tbr': tbr,
            'filesize': filesize,
            'width': width,
            'height': height,
        }]

        m3u8_format = formats[0].copy()
        m3u8_format.update({
            'url': re.sub(r'(https?://)cdn\.', r'\1hls.', video_url) + '.m3u8',
            'ext': 'mp4',
            'format_id': 'hls',
            'protocol': 'm3u8_native',
        })
        formats.append(m3u8_format)

        self._sort_formats(formats)

        return {
            'id': video_id,
            'title': title,
            'description': description,
            'thumbnail': thumbnail,
            'timestamp': timestamp,
            'duration': duration,
            'formats': formats,
        }
# coding: utf-8
from __future__ import unicode_literals

from .adobepass import AdobePassIE
from ..utils import (
    extract_attributes,
    update_url_query,
    smuggle_url,
)


class SproutIE(AdobePassIE):
    _VALID_URL = r'https?://(?:www\.)?sproutonline\.com/watch/(?P<id>[^/?#]+)'
    _TEST = {
        'url': 'http://www.sproutonline.com/watch/cowboy-adventure',
        'md5': '74bf14128578d1e040c3ebc82088f45f',
        'info_dict': {
            'id': '9dexnwtmh8_X',
            'ext': 'mp4',
            'title': 'A Cowboy Adventure',
            'description': 'Ruff-Ruff, Tweet and Dave get to be cowboys for the day at Six Cow Corral.',
            'timestamp': 1437758640,
            'upload_date': '20150724',
            'uploader': 'NBCU-SPROUT-NEW',
        }
    }

    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)
        video_component = self._search_regex(
            r'(?s)(<div[^>]+data-component="video"[^>]*?>)',
            webpage, 'video component', default=None)
        if video_component:
            options = self._parse_json(extract_attributes(
                video_component)['data-options'], video_id)
            theplatform_url = options['video']
            query = {
                'mbr': 'true',
                'manifest': 'm3u',
            }
            if options.get('protected'):
                query['auth'] = self._extract_mvpd_auth(url, options['pid'], 'sprout', 'sprout')
            theplatform_url = smuggle_url(update_url_query(
                theplatform_url, query), {'force_smil_url': True})
        else:
            iframe = self._search_regex(
                r'(<iframe[^>]+id="sproutVideoIframe"[^>]*?>)',
                webpage, 'iframe')
            theplatform_url = extract_attributes(iframe)['src']

        return self.url_result(theplatform_url, 'ThePlatform')
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..compat import compat_urllib_parse_urlparse
from ..utils import (
    ExtractorError,
    parse_iso8601,
    qualities,
)


class SRGSSRIE(InfoExtractor):
    _VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
    _GEO_BYPASS = False
    _GEO_COUNTRIES = ['CH']

    _ERRORS = {
        'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
        'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
        # 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
        'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
        'LEGAL': 'The video cannot be transmitted for legal reasons.',
        'STARTDATE': 'This video is not yet available. Please try again later.',
    }

    def _get_tokenized_src(self, url, video_id, format_id):
        sp = compat_urllib_parse_urlparse(url).path.split('/')
        token = self._download_json(
            'http://tp.srgssr.ch/akahd/token?acl=/%s/%s/*' % (sp[1], sp[2]),
            video_id, 'Downloading %s token' % format_id, fatal=False) or {}
        auth_params = token.get('token', {}).get('authparams')
        if auth_params:
            url += '?' + auth_params
        return url

    def get_media_data(self, bu, media_type, media_id):
        media_data = self._download_json(
            'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
            media_id)[media_type.capitalize()]

        if media_data.get('block') and media_data['block'] in self._ERRORS:
            message = self._ERRORS[media_data['block']]
            if media_data['block'] == 'GEOBLOCK':
                self.raise_geo_restricted(
                    msg=message, countries=self._GEO_COUNTRIES)
            raise ExtractorError(
                '%s said: %s' % (self.IE_NAME, message), expected=True)

        return media_data

    def _real_extract(self, url):
        bu, media_type, media_id = re.match(self._VALID_URL, url).groups()

        media_data = self.get_media_data(bu, media_type, media_id)

        metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
        title = metadata['title']
        description = metadata.get('description')
        created_date = media_data.get('createdDate') or metadata.get('createdDate')
        timestamp = parse_iso8601(created_date)

        thumbnails = [{
            'id': image.get('id'),
            'url': image['url'],
        } for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]

        preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
        formats = []
        for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
            protocol = source.get('@protocol')
            for asset in source['url']:
                asset_url = asset['text']
                quality = asset['@quality']
                format_id = '%s-%s' % (protocol, quality)
                if protocol.startswith('HTTP-HDS') or protocol.startswith('HTTP-HLS'):
                    asset_url = self._get_tokenized_src(asset_url, media_id, format_id)
                    if protocol.startswith('HTTP-HDS'):
                        formats.extend(self._extract_f4m_formats(
                            asset_url + ('?' if '?' not in asset_url else '&') + 'hdcore=3.4.0',
                            media_id, f4m_id=format_id, fatal=False))
                    elif protocol.startswith('HTTP-HLS'):
                        formats.extend(self._extract_m3u8_formats(
                            asset_url, media_id, 'mp4', 'm3u8_native',
                            m3u8_id=format_id, fatal=False))
                else:
                    formats.append({
                        'format_id': format_id,
                        'url': asset_url,
                        'preference': preference(quality),
                        'ext': 'flv' if protocol == 'RTMP' else None,
                    })
        self._sort_formats(formats)

        return {
            'id': media_id,
            'title': title,
            'description': description,
            'timestamp': timestamp,
            'thumbnails': thumbnails,
            'formats': formats,
        }


class SRGSSRPlayIE(InfoExtractor):
    IE_DESC = 'srf.ch, rts.ch, rsi.ch, rtr.ch and swissinfo.ch play sites'
    _VALID_URL = r'''(?x)
                    https?://
                        (?:(?:www|play)\.)?
                        (?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/
                        (?:
                            [^/]+/(?P<type>video|audio)/[^?]+|
                            popup(?P<type_2>video|audio)player
                        )
                        \?.*?\b(?:id=|urn=urn:[^:]+:video:)(?P<id>[0-9a-f\-]{36}|\d+)
                    '''

    _TESTS = [{
        'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
        'md5': 'da6b5b3ac9fa4761a942331cef20fcb3',
        'info_dict': {
            'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
            'ext': 'mp4',
            'upload_date': '20130701',
            'title': 'Snowden beantragt Asyl in Russland',
            'timestamp': 1372713995,
        }
    }, {
        # No Speichern (Save) button
        'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
        'md5': '0a274ce38fda48c53c01890651985bc6',
        'info_dict': {
            'id': '677f5829-e473-4823-ac83-a1087fe97faa',
            'ext': 'flv',
            'upload_date': '20130710',
            'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
            'description': 'md5:88604432b60d5a38787f152dec89cd56',
            'timestamp': 1373493600,
        },
    }, {
        'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
        'info_dict': {
            'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
            'ext': 'mp3',
            'upload_date': '20151013',
            'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
            'timestamp': 1444750398,
        },
        'params': {
            # rtmp download
            'skip_download': True,
        },
    }, {
        'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
        'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
        'info_dict': {
            'id': '6348260',
            'display_id': '6348260',
            'ext': 'mp4',
            'duration': 1796,
            'title': 'Le 19h30',
            'description': '',
            'uploader': '19h30',
            'upload_date': '20141201',
            'timestamp': 1417458600,
            'thumbnail': r're:^https?://.*\.image',
            'view_count': int,
        },
        'params': {
            # m3u8 download
            'skip_download': True,
        }
    }, {
        'url': 'https://www.srf.ch/play/tv/popupvideoplayer?id=c4dba0ca-e75b-43b2-a34f-f708a4932e01',
        'only_matching': True,
    }, {
        'url': 'https://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?urn=urn:srf:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5',
        'only_matching': True,
    }, {
        'url': 'https://www.rts.ch/play/tv/19h30/video/le-19h30?urn=urn:rts:video:6348260',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        bu = mobj.group('bu')
        media_type = mobj.group('type') or mobj.group('type_2')
        media_id = mobj.group('id')
        # other info can be extracted from url + '&layout=json'
        return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')
# coding: utf-8
from __future__ import unicode_literals

from .ard import ARDMediathekBaseIE
from ..utils import (
    ExtractorError,
    get_element_by_attribute,
)


class SRMediathekIE(ARDMediathekBaseIE):
    IE_NAME = 'sr:mediathek'
    IE_DESC = 'Saarländischer Rundfunk'
    _VALID_URL = r'https?://sr-mediathek(?:\.sr-online)?\.de/index\.php\?.*?&id=(?P<id>[0-9]+)'

    _TESTS = [{
        'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=28455',
        'info_dict': {
            'id': '28455',
            'ext': 'mp4',
            'title': 'sportarena (26.10.2014)',
            'description': 'Ringen: KSV Köllerbach gegen Aachen-Walheim; Frauen-Fußball: 1. FC Saarbrücken gegen Sindelfingen; Motorsport: Rallye in Losheim; dazu: Interview mit Timo Bernhard; Turnen: TG Saar; Reitsport: Deutscher Voltigier-Pokal; Badminton: Interview mit Michael Fuchs ',
            'thumbnail': r're:^https?://.*\.jpg$',
        },
        'skip': 'no longer available',
    }, {
        'url': 'http://sr-mediathek.sr-online.de/index.php?seite=7&id=37682',
        'info_dict': {
            'id': '37682',
            'ext': 'mp4',
            'title': 'Love, Cakes and Rock\'n\'Roll',
            'description': 'md5:18bf9763631c7d326c22603681e1123d',
        },
        'params': {
            # m3u8 download
            'skip_download': True,
        },
    }, {
        'url': 'http://sr-mediathek.de/index.php?seite=7&id=7480',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)

        if '>Der gew&uuml;nschte Beitrag ist leider nicht mehr verf&uuml;gbar.<' in webpage:
            raise ExtractorError('Video %s is no longer available' % video_id, expected=True)

        media_collection_url = self._search_regex(
            r'data-mediacollection-ardplayer="([^"]+)"', webpage, 'media collection url')
        info = self._extract_media_info(media_collection_url, webpage, video_id)
        info.update({
            'id': video_id,
            'title': get_element_by_attribute('class', 'ardplayer-title', webpage),
            'description': self._og_search_description(webpage),
            'thumbnail': self._og_search_thumbnail(webpage),
        })
        return info
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    ExtractorError,
    orderedSet,
    unescapeHTML,
)


class StanfordOpenClassroomIE(InfoExtractor):
    IE_NAME = 'stanfordoc'
    IE_DESC = 'Stanford Open ClassRoom'
    _VALID_URL = r'https?://openclassroom\.stanford\.edu(?P<path>/?|(/MainFolder/(?:HomePage|CoursePage|VideoPage)\.php([?]course=(?P<course>[^&]+)(&video=(?P<video>[^&]+))?(&.*)?)?))$'
    _TEST = {
        'url': 'http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100',
        'md5': '544a9468546059d4e80d76265b0443b8',
        'info_dict': {
            'id': 'PracticalUnix_intro-environment',
            'ext': 'mp4',
            'title': 'Intro Environment',
        }
    }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)

        if mobj.group('course') and mobj.group('video'):  # A specific video
            course = mobj.group('course')
            video = mobj.group('video')
            info = {
                'id': course + '_' + video,
                'uploader': None,
                'upload_date': None,
            }

            baseUrl = 'http://openclassroom.stanford.edu/MainFolder/courses/' + course + '/videos/'
            xmlUrl = baseUrl + video + '.xml'
            mdoc = self._download_xml(xmlUrl, info['id'])
            try:
                info['title'] = mdoc.findall('./title')[0].text
                info['url'] = baseUrl + mdoc.findall('./videoFile')[0].text
            except IndexError:
                raise ExtractorError('Invalid metadata XML file')
            return info
        elif mobj.group('course'):  # A course page
            course = mobj.group('course')
            info = {
                'id': course,
                '_type': 'playlist',
                'uploader': None,
                'upload_date': None,
            }

            coursepage = self._download_webpage(
                url, info['id'],
                note='Downloading course info page',
                errnote='Unable to download course info page')

            info['title'] = self._html_search_regex(
                r'<h1>([^<]+)</h1>', coursepage, 'title', default=info['id'])

            info['description'] = self._html_search_regex(
                r'(?s)<description>([^<]+)</description>',
                coursepage, 'description', fatal=False)

            links = orderedSet(re.findall(r'<a href="(VideoPage\.php\?[^"]+)">', coursepage))
            info['entries'] = [self.url_result(
                'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
            ) for l in links]
            return info
        else:  # Root page
            info = {
                'id': 'Stanford OpenClassroom',
                '_type': 'playlist',
                'uploader': None,
                'upload_date': None,
            }
            info['title'] = info['id']

            rootURL = 'http://openclassroom.stanford.edu/MainFolder/HomePage.php'
            rootpage = self._download_webpage(rootURL, info['id'],
                                              errnote='Unable to download course info page')

            links = orderedSet(re.findall(r'<a href="(CoursePage\.php\?[^"]+)">', rootpage))
            info['entries'] = [self.url_result(
                'http://openclassroom.stanford.edu/MainFolder/%s' % unescapeHTML(l)
            ) for l in links]
            return info
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    extract_attributes,
    ExtractorError,
    get_element_by_class,
    js_to_json,
)


class SteamIE(InfoExtractor):
    _VALID_URL = r"""(?x)
        https?://store\.steampowered\.com/
            (agecheck/)?
            (?P<urltype>video|app)/ #If the page is only for videos or for a game
            (?P<gameID>\d+)/?
            (?P<videoID>\d*)(?P<extra>\??) # For urltype == video we sometimes get the videoID
        |
        https?://(?:www\.)?steamcommunity\.com/sharedfiles/filedetails/\?id=(?P<fileID>[0-9]+)
    """
    _VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
    _AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
    _TESTS = [{
        'url': 'http://store.steampowered.com/video/105600/',
        'playlist': [
            {
                'md5': '6a294ee0c4b1f47f5bb76a65e31e3592',
                'info_dict': {
                    'id': '2040428',
                    'ext': 'mp4',
                    'title': 'Terraria 1.3 Trailer',
                    'playlist_index': 1,
                }
            },
            {
                'md5': '911672b20064ca3263fa89650ba5a7aa',
                'info_dict': {
                    'id': '2029566',
                    'ext': 'mp4',
                    'title': 'Terraria 1.2 Trailer',
                    'playlist_index': 2,
                }
            }
        ],
        'info_dict': {
            'id': '105600',
            'title': 'Terraria',
        },
        'params': {
            'playlistend': 2,
        }
    }, {
        'url': 'http://steamcommunity.com/sharedfiles/filedetails/?id=242472205',
        'info_dict': {
            'id': 'X8kpJBlzD2E',
            'ext': 'mp4',
            'upload_date': '20140617',
            'title': 'FRONTIERS - Trapping',
            'description': 'md5:bf6f7f773def614054089e5769c12a6e',
            'uploader': 'AAD Productions',
            'uploader_id': 'AtomicAgeDogGames',
        }
    }]

    def _real_extract(self, url):
        m = re.match(self._VALID_URL, url)
        fileID = m.group('fileID')
        if fileID:
            videourl = url
            playlist_id = fileID
        else:
            gameID = m.group('gameID')
            playlist_id = gameID
            videourl = self._VIDEO_PAGE_TEMPLATE % playlist_id

        self._set_cookie('steampowered.com', 'mature_content', '1')

        webpage = self._download_webpage(videourl, playlist_id)

        if re.search('<h2>Please enter your birth date to continue:</h2>', webpage) is not None:
            videourl = self._AGECHECK_TEMPLATE % playlist_id
            self.report_age_confirmation()
            webpage = self._download_webpage(videourl, playlist_id)

        flash_vars = self._parse_json(self._search_regex(
            r'(?s)rgMovieFlashvars\s*=\s*({.+?});', webpage,
            'flash vars'), playlist_id, js_to_json)

        playlist_title = None
        entries = []
        if fileID:
            playlist_title = get_element_by_class('workshopItemTitle', webpage)
            for movie in flash_vars.values():
                if not movie:
                    continue
                youtube_id = movie.get('YOUTUBE_VIDEO_ID')
                if not youtube_id:
                    continue
                entries.append({
                    '_type': 'url',
                    'url': youtube_id,
                    'ie_key': 'Youtube',
                })
        else:
            playlist_title = get_element_by_class('apphub_AppName', webpage)
            for movie_id, movie in flash_vars.items():
                if not movie:
                    continue
                video_id = self._search_regex(r'movie_(\d+)', movie_id, 'video id', fatal=False)
                title = movie.get('MOVIE_NAME')
                if not title or not video_id:
                    continue
                entry = {
                    'id': video_id,
                    'title': title.replace('+', ' '),
                }
                formats = []
                flv_url = movie.get('FILENAME')
                if flv_url:
                    formats.append({
                        'format_id': 'flv',
                        'url': flv_url,
                    })
                highlight_element = self._search_regex(
                    r'(<div[^>]+id="highlight_movie_%s"[^>]+>)' % video_id,
                    webpage, 'highlight element', fatal=False)
                if highlight_element:
                    highlight_attribs = extract_attributes(highlight_element)
                    if highlight_attribs:
                        entry['thumbnail'] = highlight_attribs.get('data-poster')
                        for quality in ('', '-hd'):
                            for ext in ('webm', 'mp4'):
                                video_url = highlight_attribs.get('data-%s%s-source' % (ext, quality))
                                if video_url:
                                    formats.append({
                                        'format_id': ext + quality,
                                        'url': video_url,
                                    })
                if not formats:
                    continue
                entry['formats'] = formats
                entries.append(entry)
        if not entries:
            raise ExtractorError('Could not find any videos')

        return self.playlist_result(entries, playlist_id, playlist_title)
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    determine_ext,
    int_or_none,
    js_to_json,
    unescapeHTML,
)


class StitcherIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?stitcher\.com/podcast/(?:[^/]+/)+e/(?:(?P<display_id>[^/#?&]+?)-)?(?P<id>\d+)(?:[/#?&]|$)'
    _TESTS = [{
        'url': 'http://www.stitcher.com/podcast/the-talking-machines/e/40789481?autoplay=true',
        'md5': '391dd4e021e6edeb7b8e68fbf2e9e940',
        'info_dict': {
            'id': '40789481',
            'ext': 'mp3',
            'title': 'Machine Learning Mastery and Cancer Clusters',
            'description': 'md5:55163197a44e915a14a1ac3a1de0f2d3',
            'duration': 1604,
            'thumbnail': r're:^https?://.*\.jpg',
        },
    }, {
        'url': 'http://www.stitcher.com/podcast/panoply/vulture-tv/e/the-rare-hourlong-comedy-plus-40846275?autoplay=true',
        'info_dict': {
            'id': '40846275',
            'display_id': 'the-rare-hourlong-comedy-plus',
            'ext': 'mp3',
            'title': "The CW's 'Crazy Ex-Girlfriend'",
            'description': 'md5:04f1e2f98eb3f5cbb094cea0f9e19b17',
            'duration': 2235,
            'thumbnail': r're:^https?://.*\.jpg',
        },
        'params': {
            'skip_download': True,
        },
    }, {
        # escaped title
        'url': 'http://www.stitcher.com/podcast/marketplace-on-stitcher/e/40910226?autoplay=true',
        'only_matching': True,
    }, {
        'url': 'http://www.stitcher.com/podcast/panoply/getting-in/e/episode-2a-how-many-extracurriculars-should-i-have-40876278?autoplay=true',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        audio_id = mobj.group('id')
        display_id = mobj.group('display_id') or audio_id

        webpage = self._download_webpage(url, display_id)

        episode = self._parse_json(
            js_to_json(self._search_regex(
                r'(?s)var\s+stitcher(?:Config)?\s*=\s*({.+?});\n', webpage, 'episode config')),
            display_id)['config']['episode']

        title = unescapeHTML(episode['title'])
        formats = [{
            'url': episode[episode_key],
            'ext': determine_ext(episode[episode_key]) or 'mp3',
            'vcodec': 'none',
        } for episode_key in ('episodeURL',) if episode.get(episode_key)]
        description = self._search_regex(
            r'Episode Info:\s*</span>([^<]+)<', webpage, 'description', fatal=False)
        duration = int_or_none(episode.get('duration'))
        thumbnail = episode.get('episodeImage')

        return {
            'id': audio_id,
            'display_id': display_id,
            'title': title,
            'description': description,
            'duration': duration,
            'thumbnail': thumbnail,
            'formats': formats,
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    ExtractorError,
    float_or_none,
    int_or_none,
)


class StreamableIE(InfoExtractor):
    _VALID_URL = r'https?://streamable\.com/(?:[es]/)?(?P<id>\w+)'
    _TESTS = [
        {
            'url': 'https://streamable.com/dnd1',
            'md5': '3e3bc5ca088b48c2d436529b64397fef',
            'info_dict': {
                'id': 'dnd1',
                'ext': 'mp4',
                'title': 'Mikel Oiarzabal scores to make it 0-3 for La Real against Espanyol',
                'thumbnail': r're:https?://.*\.jpg$',
                'uploader': 'teabaker',
                'timestamp': 1454964157.35115,
                'upload_date': '20160208',
                'duration': 61.516,
                'view_count': int,
            }
        },
        # older video without bitrate, width/height, etc. info
        {
            'url': 'https://streamable.com/moo',
            'md5': '2cf6923639b87fba3279ad0df3a64e73',
            'info_dict': {
                'id': 'moo',
                'ext': 'mp4',
                'title': '"Please don\'t eat me!"',
                'thumbnail': r're:https?://.*\.jpg$',
                'timestamp': 1426115495,
                'upload_date': '20150311',
                'duration': 12,
                'view_count': int,
            }
        },
        {
            'url': 'https://streamable.com/e/dnd1',
            'only_matching': True,
        },
        {
            'url': 'https://streamable.com/s/okkqk/drxjds',
            'only_matching': True,
        }
    ]

    @staticmethod
    def _extract_url(webpage):
        mobj = re.search(
            r'<iframe[^>]+src=(?P<q1>[\'"])(?P<src>(?:https?:)?//streamable\.com/(?:(?!\1).+))(?P=q1)',
            webpage)
        if mobj:
            return mobj.group('src')

    def _real_extract(self, url):
        video_id = self._match_id(url)

        # Note: Using the ajax API, as the public Streamable API doesn't seem
        # to return video info like the title properly sometimes, and doesn't
        # include info like the video duration
        video = self._download_json(
            'https://ajax.streamable.com/videos/%s' % video_id, video_id)

        # Format IDs:
        # 0 The video is being uploaded
        # 1 The video is being processed
        # 2 The video has at least one file ready
        # 3 The video is unavailable due to an error
        status = video.get('status')
        if status != 2:
            raise ExtractorError(
                'This video is currently unavailable. It may still be uploading or processing.',
                expected=True)

        title = video.get('reddit_title') or video['title']

        formats = []
        for key, info in video['files'].items():
            if not info.get('url'):
                continue
            formats.append({
                'format_id': key,
                'url': self._proto_relative_url(info['url']),
                'width': int_or_none(info.get('width')),
                'height': int_or_none(info.get('height')),
                'filesize': int_or_none(info.get('size')),
                'fps': int_or_none(info.get('framerate')),
                'vbr': float_or_none(info.get('bitrate'), 1000)
            })
        self._sort_formats(formats)

        return {
            'id': video_id,
            'title': title,
            'description': video.get('description'),
            'thumbnail': self._proto_relative_url(video.get('thumbnail_url')),
            'uploader': video.get('owner', {}).get('user_name'),
            'timestamp': float_or_none(video.get('date_added')),
            'duration': float_or_none(video.get('duration')),
            'view_count': int_or_none(video.get('plays')),
            'formats': formats
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    ExtractorError,
    urlencode_postdata,
)


class StreamcloudIE(InfoExtractor):
    IE_NAME = 'streamcloud.eu'
    _VALID_URL = r'https?://streamcloud\.eu/(?P<id>[a-zA-Z0-9_-]+)(?:/(?P<fname>[^#?]*)\.html)?'

    _TESTS = [{
        'url': 'http://streamcloud.eu/skp9j99s4bpz/youtube-dl_test_video_____________-BaW_jenozKc.mp4.html',
        'md5': '6bea4c7fa5daaacc2a946b7146286686',
        'info_dict': {
            'id': 'skp9j99s4bpz',
            'ext': 'mp4',
            'title': 'youtube-dl test video  \'/\\ ä ↭',
        },
        'skip': 'Only available from the EU'
    }, {
        'url': 'http://streamcloud.eu/ua8cmfh1nbe6/NSHIP-148--KUC-NG--H264-.mp4.html',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        url = 'http://streamcloud.eu/%s' % video_id

        orig_webpage = self._download_webpage(url, video_id)

        if '>File Not Found<' in orig_webpage:
            raise ExtractorError(
                'Video %s does not exist' % video_id, expected=True)

        fields = re.findall(r'''(?x)<input\s+
            type="(?:hidden|submit)"\s+
            name="([^"]+)"\s+
            (?:id="[^"]+"\s+)?
            value="([^"]*)"
            ''', orig_webpage)

        self._sleep(6, video_id)

        webpage = self._download_webpage(
            url, video_id, data=urlencode_postdata(fields), headers={
                b'Content-Type': b'application/x-www-form-urlencoded',
            })

        try:
            title = self._html_search_regex(
                r'<h1[^>]*>([^<]+)<', webpage, 'title')
            video_url = self._search_regex(
                r'file:\s*"([^"]+)"', webpage, 'video URL')
        except ExtractorError:
            message = self._html_search_regex(
                r'(?s)<div[^>]+class=(["\']).*?msgboxinfo.*?\1[^>]*>(?P<message>.+?)</div>',
                webpage, 'message', default=None, group='message')
            if message:
                raise ExtractorError('%s said: %s' % (self.IE_NAME, message), expected=True)
            raise
        thumbnail = self._search_regex(
            r'image:\s*"([^"]+)"', webpage, 'thumbnail URL', fatal=False)

        return {
            'id': video_id,
            'title': title,
            'url': video_url,
            'thumbnail': thumbnail,
            'http_headers': {
                'Referer': url,
            },
        }
# coding: utf-8
from __future__ import unicode_literals

import hashlib
import time

from .common import InfoExtractor
from ..utils import (
    int_or_none,
    sanitized_Request,
)


def _get_api_key(api_path):
    if api_path.endswith('?'):
        api_path = api_path[:-1]

    api_key = 'fb5f58a820353bd7095de526253c14fd'
    a = '{0:}{1:}{2:}'.format(api_key, api_path, int(round(time.time() / 24 / 3600)))
    return hashlib.md5(a.encode('ascii')).hexdigest()


class StreamCZIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?stream\.cz/.+/(?P<id>[0-9]+)'
    _API_URL = 'http://www.stream.cz/API'

    _TESTS = [{
        'url': 'http://www.stream.cz/peklonataliri/765767-ecka-pro-deti',
        'md5': '934bb6a6d220d99c010783c9719960d5',
        'info_dict': {
            'id': '765767',
            'ext': 'mp4',
            'title': 'Peklo na talíři: Éčka pro děti',
            'description': 'Taška s grónskou pomazánkou a další pekelnosti ZDE',
            'thumbnail': 're:^http://im.stream.cz/episode/52961d7e19d423f8f06f0100',
            'duration': 256,
        },
    }, {
        'url': 'http://www.stream.cz/blanik/10002447-tri-roky-pro-mazanka',
        'md5': '849a88c1e1ca47d41403c2ba5e59e261',
        'info_dict': {
            'id': '10002447',
            'ext': 'mp4',
            'title': 'Kancelář Blaník: Tři roky pro Mazánka',
            'description': 'md5:3862a00ba7bf0b3e44806b544032c859',
            'thumbnail': 're:^http://im.stream.cz/episode/537f838c50c11f8d21320000',
            'duration': 368,
        },
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        api_path = '/episode/%s' % video_id

        req = sanitized_Request(self._API_URL + api_path)
        req.add_header('Api-Password', _get_api_key(api_path))
        data = self._download_json(req, video_id)

        formats = []
        for quality, video in enumerate(data['video_qualities']):
            for f in video['formats']:
                typ = f['type'].partition('/')[2]
                qlabel = video.get('quality_label')
                formats.append({
                    'format_note': '%s-%s' % (qlabel, typ) if qlabel else typ,
                    'format_id': '%s-%s' % (typ, f['quality']),
                    'url': f['source'],
                    'height': int_or_none(f['quality'].rstrip('p')),
                    'quality': quality,
                })
        self._sort_formats(formats)

        image = data.get('image')
        if image:
            thumbnail = self._proto_relative_url(
                image.replace('{width}', '1240').replace('{height}', '697'),
                scheme='http:',
            )
        else:
            thumbnail = None

        stream = data.get('_embedded', {}).get('stream:show', {}).get('name')
        if stream:
            title = '%s: %s' % (stream, data['name'])
        else:
            title = data['name']

        subtitles = {}
        srt_url = data.get('subtitles_srt')
        if srt_url:
            subtitles['cs'] = [{
                'ext': 'srt',
                'url': srt_url,
            }]

        return {
            'id': video_id,
            'title': title,
            'thumbnail': thumbnail,
            'formats': formats,
            'description': data.get('web_site_text'),
            'duration': int_or_none(data.get('duration')),
            'view_count': int_or_none(data.get('views')),
            'subtitles': subtitles,
        }
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from ..compat import compat_str
from ..utils import unified_strdate


class StreetVoiceIE(InfoExtractor):
    _VALID_URL = r'https?://(?:.+?\.)?streetvoice\.com/[^/]+/songs/(?P<id>[0-9]+)'
    _TESTS = [{
        'url': 'http://streetvoice.com/skippylu/songs/94440/',
        'md5': '15974627fc01a29e492c98593c2fd472',
        'info_dict': {
            'id': '94440',
            'ext': 'mp3',
            'title': '輸',
            'description': 'Crispy脆樂團 - 輸',
            'thumbnail': r're:^https?://.*\.jpg$',
            'duration': 260,
            'upload_date': '20091018',
            'uploader': 'Crispy脆樂團',
            'uploader_id': '627810',
        }
    }, {
        'url': 'http://tw.streetvoice.com/skippylu/songs/94440/',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        song_id = self._match_id(url)

        song = self._download_json(
            'https://streetvoice.com/api/v1/public/song/%s/' % song_id, song_id, data=b'')

        title = song['name']
        author = song['user']['nickname']

        return {
            'id': song_id,
            'url': song['file'],
            'title': title,
            'description': '%s - %s' % (author, title),
            'thumbnail': self._proto_relative_url(song.get('image'), 'http:'),
            'duration': song.get('length'),
            'upload_date': unified_strdate(song.get('created_at')),
            'uploader': author,
            'uploader_id': compat_str(song['user']['id']),
        }
from __future__ import unicode_literals

from .common import InfoExtractor
from ..utils import int_or_none


class StretchInternetIE(InfoExtractor):
    _VALID_URL = r'https?://portal\.stretchinternet\.com/[^/]+/(?:portal|full)\.htm\?.*?\beventId=(?P<id>\d+)'
    _TEST = {
        'url': 'https://portal.stretchinternet.com/umary/portal.htm?eventId=573272&streamType=video',
        'info_dict': {
            'id': '573272',
            'ext': 'mp4',
            'title': 'University of Mary Wrestling vs. Upper Iowa',
            'timestamp': 1575668361,
            'upload_date': '20191206',
        }
    }

    def _real_extract(self, url):
        video_id = self._match_id(url)

        event = self._download_json(
            'https://api.stretchinternet.com/trinity/event/tcg/' + video_id,
            video_id)[0]

        return {
            'id': video_id,
            'title': event['title'],
            'timestamp': int_or_none(event.get('dateCreated'), 1000),
            'url': 'https://' + event['media'][0]['url'],
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    compat_str,
    float_or_none,
    int_or_none,
)


class STVPlayerIE(InfoExtractor):
    IE_NAME = 'stv:player'
    _VALID_URL = r'https?://player\.stv\.tv/(?P<type>episode|video)/(?P<id>[a-z0-9]{4})'
    _TEST = {
        'url': 'https://player.stv.tv/video/4gwd/emmerdale/60-seconds-on-set-with-laura-norton/',
        'md5': '5adf9439c31d554f8be0707c7abe7e0a',
        'info_dict': {
            'id': '5333973339001',
            'ext': 'mp4',
            'upload_date': '20170301',
            'title': '60 seconds on set with Laura Norton',
            'description': "How many questions can Laura - a.k.a Kerry Wyatt - answer in 60 seconds? Let\'s find out!",
            'timestamp': 1488388054,
            'uploader_id': '1486976045',
        },
        'skip': 'this resource is unavailable outside of the UK',
    }
    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1486976045/default_default/index.html?videoId=%s'
    _PTYPE_MAP = {
        'episode': 'episodes',
        'video': 'shortform',
    }

    def _real_extract(self, url):
        ptype, video_id = re.match(self._VALID_URL, url).groups()
        resp = self._download_json(
            'https://player.api.stv.tv/v1/%s/%s' % (self._PTYPE_MAP[ptype], video_id),
            video_id)

        result = resp['results']
        video = result['video']
        video_id = compat_str(video['id'])

        subtitles = {}
        _subtitles = result.get('_subtitles') or {}
        for ext, sub_url in _subtitles.items():
            subtitles.setdefault('en', []).append({
                'ext': 'vtt' if ext == 'webvtt' else ext,
                'url': sub_url,
            })

        programme = result.get('programme') or {}

        return {
            '_type': 'url_transparent',
            'id': video_id,
            'url': self.BRIGHTCOVE_URL_TEMPLATE % video_id,
            'description': result.get('summary'),
            'duration': float_or_none(video.get('length'), 1000),
            'subtitles': subtitles,
            'view_count': int_or_none(result.get('views')),
            'series': programme.get('name') or programme.get('shortName'),
            'ie_key': 'BrightcoveNew',
        }
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    parse_duration,
    int_or_none,
    qualities,
    determine_ext,
)


class SunPornoIE(InfoExtractor):
    _VALID_URL = r'https?://(?:(?:www\.)?sunporno\.com/videos|embeds\.sunporno\.com/embed)/(?P<id>\d+)'
    _TESTS = [{
        'url': 'http://www.sunporno.com/videos/807778/',
        'md5': '507887e29033502f29dba69affeebfc9',
        'info_dict': {
            'id': '807778',
            'ext': 'mp4',
            'title': 'md5:0a400058e8105d39e35c35e7c5184164',
            'description': 'md5:a31241990e1bd3a64e72ae99afb325fb',
            'thumbnail': r're:^https?://.*\.jpg$',
            'duration': 302,
            'age_limit': 18,
        }
    }, {
        'url': 'http://embeds.sunporno.com/embed/807778',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)

        webpage = self._download_webpage(
            'http://www.sunporno.com/videos/%s' % video_id, video_id)

        title = self._html_search_regex(
            r'<title>([^<]+)</title>', webpage, 'title')
        description = self._html_search_meta(
            'description', webpage, 'description')
        thumbnail = self._html_search_regex(
            r'poster="([^"]+)"', webpage, 'thumbnail', fatal=False)

        duration = parse_duration(self._search_regex(
            (r'itemprop="duration"[^>]*>\s*(\d+:\d+)\s*<',
             r'>Duration:\s*<span[^>]+>\s*(\d+:\d+)\s*<'),
            webpage, 'duration', fatal=False))

        view_count = int_or_none(self._html_search_regex(
            r'class="views">(?:<noscript>)?\s*(\d+)\s*<',
            webpage, 'view count', fatal=False))
        comment_count = int_or_none(self._html_search_regex(
            r'(\d+)</b> Comments?',
            webpage, 'comment count', fatal=False, default=None))

        formats = []
        quality = qualities(['mp4', 'flv'])
        for video_url in re.findall(r'<(?:source|video) src="([^"]+)"', webpage):
            video_ext = determine_ext(video_url)
            formats.append({
                'url': video_url,
                'format_id': video_ext,
                'quality': quality(video_ext),
            })
        self._sort_formats(formats)

        return {
            'id': video_id,
            'title': title,
            'description': description,
            'thumbnail': thumbnail,
            'duration': duration,
            'view_count': view_count,
            'comment_count': comment_count,
            'formats': formats,
            'age_limit': 18,
        }
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from ..utils import (
    determine_ext,
    int_or_none,
    str_or_none,
)


class SverigesRadioBaseIE(InfoExtractor):
    _BASE_URL = 'https://sverigesradio.se/sida/playerajax/'
    _QUALITIES = ['low', 'medium', 'high']
    _EXT_TO_CODEC_MAP = {
        'mp3': 'mp3',
        'm4a': 'aac',
    }
    _CODING_FORMAT_TO_ABR_MAP = {
        5: 128,
        11: 192,
        12: 32,
        13: 96,
    }

    def _real_extract(self, url):
        audio_id = self._match_id(url)
        query = {
            'id': audio_id,
            'type': self._AUDIO_TYPE,
        }

        item = self._download_json(
            self._BASE_URL + 'audiometadata', audio_id,
            'Downloading audio JSON metadata', query=query)['items'][0]
        title = item['subtitle']

        query['format'] = 'iis'
        urls = []
        formats = []
        for quality in self._QUALITIES:
            query['quality'] = quality
            audio_url_data = self._download_json(
                self._BASE_URL + 'getaudiourl', audio_id,
                'Downloading %s format JSON metadata' % quality,
                fatal=False, query=query) or {}
            audio_url = audio_url_data.get('audioUrl')
            if not audio_url or audio_url in urls:
                continue
            urls.append(audio_url)
            ext = determine_ext(audio_url)
            coding_format = audio_url_data.get('codingFormat')
            abr = int_or_none(self._search_regex(
                r'_a(\d+)\.m4a', audio_url, 'audio bitrate',
                default=None)) or self._CODING_FORMAT_TO_ABR_MAP.get(coding_format)
            formats.append({
                'abr': abr,
                'acodec': self._EXT_TO_CODEC_MAP.get(ext),
                'ext': ext,
                'format_id': str_or_none(coding_format),
                'vcodec': 'none',
                'url': audio_url,
            })
        self._sort_formats(formats)

        return {
            'id': audio_id,
            'title': title,
            'formats': formats,
            'series': item.get('title'),
            'duration': int_or_none(item.get('duration')),
            'thumbnail': item.get('displayimageurl'),
            'description': item.get('description'),
        }


class SverigesRadioPublicationIE(SverigesRadioBaseIE):
    IE_NAME = 'sverigesradio:publication'
    _VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/sida/(?:artikel|gruppsida)\.aspx\?.*?\bartikel=(?P<id>[0-9]+)'
    _TESTS = [{
        'url': 'https://sverigesradio.se/sida/artikel.aspx?programid=83&artikel=7038546',
        'md5': '6a4917e1923fccb080e5a206a5afa542',
        'info_dict': {
            'id': '7038546',
            'ext': 'm4a',
            'duration': 132,
            'series': 'Nyheter (Ekot)',
            'title': 'Esa Teittinen: Sanningen har inte kommit fram',
            'description': 'md5:daf7ce66a8f0a53d5465a5984d3839df',
            'thumbnail': r're:^https?://.*\.jpg',
        },
    }, {
        'url': 'https://sverigesradio.se/sida/gruppsida.aspx?programid=3304&grupp=6247&artikel=7146887',
        'only_matching': True,
    }]
    _AUDIO_TYPE = 'publication'


class SverigesRadioEpisodeIE(SverigesRadioBaseIE):
    IE_NAME = 'sverigesradio:episode'
    _VALID_URL = r'https?://(?:www\.)?sverigesradio\.se/(?:sida/)?avsnitt/(?P<id>[0-9]+)'
    _TEST = {
        'url': 'https://sverigesradio.se/avsnitt/1140922?programid=1300',
        'md5': '20dc4d8db24228f846be390b0c59a07c',
        'info_dict': {
            'id': '1140922',
            'ext': 'mp3',
            'duration': 3307,
            'series': 'Konflikt',
            'title': 'Metoo och valen',
            'description': 'md5:fcb5c1f667f00badcc702b196f10a27e',
            'thumbnail': r're:^https?://.*\.jpg',
        }
    }
    _AUDIO_TYPE = 'episode'
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..compat import compat_str
from ..utils import (
    determine_ext,
    dict_get,
    int_or_none,
    str_or_none,
    strip_or_none,
    try_get,
)


class SVTBaseIE(InfoExtractor):
    _GEO_COUNTRIES = ['SE']

    def _extract_video(self, video_info, video_id):
        is_live = dict_get(video_info, ('live', 'simulcast'), default=False)
        m3u8_protocol = 'm3u8' if is_live else 'm3u8_native'
        formats = []
        for vr in video_info['videoReferences']:
            player_type = vr.get('playerType') or vr.get('format')
            vurl = vr['url']
            ext = determine_ext(vurl)
            if ext == 'm3u8':
                formats.extend(self._extract_m3u8_formats(
                    vurl, video_id,
                    ext='mp4', entry_protocol=m3u8_protocol,
                    m3u8_id=player_type, fatal=False))
            elif ext == 'f4m':
                formats.extend(self._extract_f4m_formats(
                    vurl + '?hdcore=3.3.0', video_id,
                    f4m_id=player_type, fatal=False))
            elif ext == 'mpd':
                if player_type == 'dashhbbtv':
                    formats.extend(self._extract_mpd_formats(
                        vurl, video_id, mpd_id=player_type, fatal=False))
            else:
                formats.append({
                    'format_id': player_type,
                    'url': vurl,
                })
        if not formats and video_info.get('rights', {}).get('geoBlockedSweden'):
            self.raise_geo_restricted(
                'This video is only available in Sweden',
                countries=self._GEO_COUNTRIES)
        self._sort_formats(formats)

        subtitles = {}
        subtitle_references = dict_get(video_info, ('subtitles', 'subtitleReferences'))
        if isinstance(subtitle_references, list):
            for sr in subtitle_references:
                subtitle_url = sr.get('url')
                subtitle_lang = sr.get('language', 'sv')
                if subtitle_url:
                    if determine_ext(subtitle_url) == 'm3u8':
                        # TODO(yan12125): handle WebVTT in m3u8 manifests
                        continue

                    subtitles.setdefault(subtitle_lang, []).append({'url': subtitle_url})

        title = video_info.get('title')

        series = video_info.get('programTitle')
        season_number = int_or_none(video_info.get('season'))
        episode = video_info.get('episodeTitle')
        episode_number = int_or_none(video_info.get('episodeNumber'))

        duration = int_or_none(dict_get(video_info, ('materialLength', 'contentDuration')))
        age_limit = None
        adult = dict_get(
            video_info, ('inappropriateForChildren', 'blockedForChildren'),
            skip_false_values=False)
        if adult is not None:
            age_limit = 18 if adult else 0

        return {
            'id': video_id,
            'title': title,
            'formats': formats,
            'subtitles': subtitles,
            'duration': duration,
            'age_limit': age_limit,
            'series': series,
            'season_number': season_number,
            'episode': episode,
            'episode_number': episode_number,
            'is_live': is_live,
        }


class SVTIE(SVTBaseIE):
    _VALID_URL = r'https?://(?:www\.)?svt\.se/wd\?(?:.*?&)?widgetId=(?P<widget_id>\d+)&.*?\barticleId=(?P<id>\d+)'
    _TEST = {
        'url': 'http://www.svt.se/wd?widgetId=23991&sectionId=541&articleId=2900353&type=embed&contextSectionId=123&autostart=false',
        'md5': '33e9a5d8f646523ce0868ecfb0eed77d',
        'info_dict': {
            'id': '2900353',
            'ext': 'mp4',
            'title': 'Stjärnorna skojar till det - under SVT-intervjun',
            'duration': 27,
            'age_limit': 0,
        },
    }

    @staticmethod
    def _extract_url(webpage):
        mobj = re.search(
            r'(?:<iframe src|href)="(?P<url>%s[^"]*)"' % SVTIE._VALID_URL, webpage)
        if mobj:
            return mobj.group('url')

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        widget_id = mobj.group('widget_id')
        article_id = mobj.group('id')

        info = self._download_json(
            'http://www.svt.se/wd?widgetId=%s&articleId=%s&format=json&type=embed&output=json' % (widget_id, article_id),
            article_id)

        info_dict = self._extract_video(info['video'], article_id)
        info_dict['title'] = info['context']['title']
        return info_dict


class SVTPlayBaseIE(SVTBaseIE):
    _SVTPLAY_RE = r'root\s*\[\s*(["\'])_*svtplay\1\s*\]\s*=\s*(?P<json>{.+?})\s*;\s*\n'


class SVTPlayIE(SVTPlayBaseIE):
    IE_DESC = 'SVT Play and Öppet arkiv'
    _VALID_URL = r'''(?x)
                    (?:
                        svt:(?P<svt_id>[^/?#&]+)|
                        https?://(?:www\.)?(?:svtplay|oppetarkiv)\.se/(?:video|klipp|kanaler)/(?P<id>[^/?#&]+)
                    )
                    '''
    _TESTS = [{
        'url': 'http://www.svtplay.se/video/5996901/flygplan-till-haile-selassie/flygplan-till-haile-selassie-2',
        'md5': '2b6704fe4a28801e1a098bbf3c5ac611',
        'info_dict': {
            'id': '5996901',
            'ext': 'mp4',
            'title': 'Flygplan till Haile Selassie',
            'duration': 3527,
            'thumbnail': r're:^https?://.*[\.-]jpg$',
            'age_limit': 0,
            'subtitles': {
                'sv': [{
                    'ext': 'wsrt',
                }]
            },
        },
    }, {
        # geo restricted to Sweden
        'url': 'http://www.oppetarkiv.se/video/5219710/trollflojten',
        'only_matching': True,
    }, {
        'url': 'http://www.svtplay.se/klipp/9023742/stopptid-om-bjorn-borg',
        'only_matching': True,
    }, {
        'url': 'https://www.svtplay.se/kanaler/svt1',
        'only_matching': True,
    }, {
        'url': 'svt:1376446-003A',
        'only_matching': True,
    }, {
        'url': 'svt:14278044',
        'only_matching': True,
    }]

    def _adjust_title(self, info):
        if info['is_live']:
            info['title'] = self._live_title(info['title'])

    def _extract_by_video_id(self, video_id, webpage=None):
        data = self._download_json(
            'https://api.svt.se/videoplayer-api/video/%s' % video_id,
            video_id, headers=self.geo_verification_headers())
        info_dict = self._extract_video(data, video_id)
        if not info_dict.get('title'):
            title = dict_get(info_dict, ('episode', 'series'))
            if not title and webpage:
                title = re.sub(
                    r'\s*\|\s*.+?$', '', self._og_search_title(webpage))
            if not title:
                title = video_id
            info_dict['title'] = title
        self._adjust_title(info_dict)
        return info_dict

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        video_id, svt_id = mobj.group('id', 'svt_id')

        if svt_id:
            return self._extract_by_video_id(svt_id)

        webpage = self._download_webpage(url, video_id)

        data = self._parse_json(
            self._search_regex(
                self._SVTPLAY_RE, webpage, 'embedded data', default='{}',
                group='json'),
            video_id, fatal=False)

        thumbnail = self._og_search_thumbnail(webpage)

        if data:
            video_info = try_get(
                data, lambda x: x['context']['dispatcher']['stores']['VideoTitlePageStore']['data']['video'],
                dict)
            if video_info:
                info_dict = self._extract_video(video_info, video_id)
                info_dict.update({
                    'title': data['context']['dispatcher']['stores']['MetaStore']['title'],
                    'thumbnail': thumbnail,
                })
                self._adjust_title(info_dict)
                return info_dict

            svt_id = try_get(
                data, lambda x: x['statistics']['dataLake']['content']['id'],
                compat_str)

        if not svt_id:
            svt_id = self._search_regex(
                (r'<video[^>]+data-video-id=["\']([\da-zA-Z-]+)',
                 r'["\']videoSvtId["\']\s*:\s*["\']([\da-zA-Z-]+)',
                 r'"content"\s*:\s*{.*?"id"\s*:\s*"([\da-zA-Z-]+)"',
                 r'["\']svtId["\']\s*:\s*["\']([\da-zA-Z-]+)'),
                webpage, 'video id')

        return self._extract_by_video_id(svt_id, webpage)


class SVTSeriesIE(SVTPlayBaseIE):
    _VALID_URL = r'https?://(?:www\.)?svtplay\.se/(?P<id>[^/?&#]+)(?:.+?\btab=(?P<season_slug>[^&#]+))?'
    _TESTS = [{
        'url': 'https://www.svtplay.se/rederiet',
        'info_dict': {
            'id': '14445680',
            'title': 'Rederiet',
            'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
        },
        'playlist_mincount': 318,
    }, {
        'url': 'https://www.svtplay.se/rederiet?tab=season-2-14445680',
        'info_dict': {
            'id': 'season-2-14445680',
            'title': 'Rederiet - Säsong 2',
            'description': 'md5:d9fdfff17f5d8f73468176ecd2836039',
        },
        'playlist_mincount': 12,
    }]

    @classmethod
    def suitable(cls, url):
        return False if SVTIE.suitable(url) or SVTPlayIE.suitable(url) else super(SVTSeriesIE, cls).suitable(url)

    def _real_extract(self, url):
        series_slug, season_id = re.match(self._VALID_URL, url).groups()

        series = self._download_json(
            'https://api.svt.se/contento/graphql', series_slug,
            'Downloading series page', query={
                'query': '''{
  listablesBySlug(slugs: ["%s"]) {
    associatedContent(include: [productionPeriod, season]) {
      items {
        item {
          ... on Episode {
            videoSvtId
          }
        }
      }
      id
      name
    }
    id
    longDescription
    name
    shortDescription
  }
}''' % series_slug,
            })['data']['listablesBySlug'][0]

        season_name = None

        entries = []
        for season in series['associatedContent']:
            if not isinstance(season, dict):
                continue
            if season_id:
                if season.get('id') != season_id:
                    continue
                season_name = season.get('name')
            items = season.get('items')
            if not isinstance(items, list):
                continue
            for item in items:
                video = item.get('item') or {}
                content_id = video.get('videoSvtId')
                if not content_id or not isinstance(content_id, compat_str):
                    continue
                entries.append(self.url_result(
                    'svt:' + content_id, SVTPlayIE.ie_key(), content_id))

        title = series.get('name')
        season_name = season_name or season_id

        if title and season_name:
            title = '%s - %s' % (title, season_name)
        elif season_id:
            title = season_id

        return self.playlist_result(
            entries, season_id or series.get('id'), title,
            dict_get(series, ('longDescription', 'shortDescription')))


class SVTPageIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?svt\.se/(?P<path>(?:[^/]+/)*(?P<id>[^/?&#]+))'
    _TESTS = [{
        'url': 'https://www.svt.se/sport/ishockey/bakom-masken-lehners-kamp-mot-mental-ohalsa',
        'info_dict': {
            'id': '25298267',
            'title': 'Bakom masken – Lehners kamp mot mental ohälsa',
        },
        'playlist_count': 4,
    }, {
        'url': 'https://www.svt.se/nyheter/utrikes/svenska-andrea-ar-en-mil-fran-branderna-i-kalifornien',
        'info_dict': {
            'id': '24243746',
            'title': 'Svenska Andrea redo att fly sitt hem i Kalifornien',
        },
        'playlist_count': 2,
    }, {
        # only programTitle
        'url': 'http://www.svt.se/sport/ishockey/jagr-tacklar-giroux-under-intervjun',
        'info_dict': {
            'id': '8439V2K',
            'ext': 'mp4',
            'title': 'Stjärnorna skojar till det - under SVT-intervjun',
            'duration': 27,
            'age_limit': 0,
        },
    }, {
        'url': 'https://www.svt.se/nyheter/lokalt/vast/svt-testar-tar-nagon-upp-skrapet-1',
        'only_matching': True,
    }, {
        'url': 'https://www.svt.se/vader/manadskronikor/maj2018',
        'only_matching': True,
    }]

    @classmethod
    def suitable(cls, url):
        return False if SVTIE.suitable(url) else super(SVTPageIE, cls).suitable(url)

    def _real_extract(self, url):
        path, display_id = re.match(self._VALID_URL, url).groups()

        article = self._download_json(
            'https://api.svt.se/nss-api/page/' + path, display_id,
            query={'q': 'articles'})['articles']['content'][0]

        entries = []

        def _process_content(content):
            if content.get('_type') in ('VIDEOCLIP', 'VIDEOEPISODE'):
                video_id = compat_str(content['image']['svtId'])
                entries.append(self.url_result(
                    'svt:' + video_id, SVTPlayIE.ie_key(), video_id))

        for media in article.get('media', []):
            _process_content(media)

        for obj in article.get('structuredBody', []):
            _process_content(obj.get('content') or {})

        return self.playlist_result(
            entries, str_or_none(article.get('id')),
            strip_or_none(article.get('title')))
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from ..utils import (
    parse_duration,
    int_or_none,
    determine_protocol,
)


class SWRMediathekIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?swrmediathek\.de/(?:content/)?player\.htm\?show=(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})'

    _TESTS = [{
        'url': 'http://swrmediathek.de/player.htm?show=849790d0-dab8-11e3-a953-0026b975f2e6',
        'md5': '8c5f6f0172753368547ca8413a7768ac',
        'info_dict': {
            'id': '849790d0-dab8-11e3-a953-0026b975f2e6',
            'ext': 'mp4',
            'title': 'SWR odysso',
            'description': 'md5:2012e31baad36162e97ce9eb3f157b8a',
            'thumbnail': r're:^http:.*\.jpg$',
            'duration': 2602,
            'upload_date': '20140515',
            'uploader': 'SWR Fernsehen',
            'uploader_id': '990030',
        },
    }, {
        'url': 'http://swrmediathek.de/player.htm?show=0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
        'md5': 'b10ab854f912eecc5a6b55cd6fc1f545',
        'info_dict': {
            'id': '0e1a8510-ddf2-11e3-9be3-0026b975f2e6',
            'ext': 'mp4',
            'title': 'Nachtcafé - Alltagsdroge Alkohol - zwischen Sektempfang und Komasaufen',
            'description': 'md5:e0a3adc17e47db2c23aab9ebc36dbee2',
            'thumbnail': r're:http://.*\.jpg',
            'duration': 5305,
            'upload_date': '20140516',
            'uploader': 'SWR Fernsehen',
            'uploader_id': '990030',
        },
        'skip': 'redirect to http://swrmediathek.de/index.htm?hinweis=swrlink',
    }, {
        'url': 'http://swrmediathek.de/player.htm?show=bba23e10-cb93-11e3-bf7f-0026b975f2e6',
        'md5': '4382e4ef2c9d7ce6852535fa867a0dd3',
        'info_dict': {
            'id': 'bba23e10-cb93-11e3-bf7f-0026b975f2e6',
            'ext': 'mp3',
            'title': 'Saša Stanišic: Vor dem Fest',
            'description': 'md5:5b792387dc3fbb171eb709060654e8c9',
            'thumbnail': r're:http://.*\.jpg',
            'duration': 3366,
            'upload_date': '20140520',
            'uploader': 'SWR 2',
            'uploader_id': '284670',
        },
        'skip': 'redirect to http://swrmediathek.de/index.htm?hinweis=swrlink',
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)

        video = self._download_json(
            'http://swrmediathek.de/AjaxEntry?ekey=%s' % video_id,
            video_id, 'Downloading video JSON')

        attr = video['attr']
        title = attr['entry_title']
        media_type = attr.get('entry_etype')

        formats = []
        for entry in video.get('sub', []):
            if entry.get('name') != 'entry_media':
                continue

            entry_attr = entry.get('attr', {})
            f_url = entry_attr.get('val2')
            if not f_url:
                continue
            codec = entry_attr.get('val0')
            if codec == 'm3u8':
                formats.extend(self._extract_m3u8_formats(
                    f_url, video_id, 'mp4', 'm3u8_native',
                    m3u8_id='hls', fatal=False))
            elif codec == 'f4m':
                formats.extend(self._extract_f4m_formats(
                    f_url + '?hdcore=3.7.0', video_id,
                    f4m_id='hds', fatal=False))
            else:
                formats.append({
                    'format_id': determine_protocol({'url': f_url}),
                    'url': f_url,
                    'quality': int_or_none(entry_attr.get('val1')),
                    'vcodec': codec if media_type == 'Video' else 'none',
                    'acodec': codec if media_type == 'Audio' else None,
                })
        self._sort_formats(formats)

        upload_date = None
        entry_pdatet = attr.get('entry_pdatet')
        if entry_pdatet:
            upload_date = entry_pdatet[:-4]

        return {
            'id': video_id,
            'title': title,
            'description': attr.get('entry_descl'),
            'thumbnail': attr.get('entry_image_16_9'),
            'duration': parse_duration(attr.get('entry_durat')),
            'upload_date': upload_date,
            'uploader': attr.get('channel_title'),
            'uploader_id': attr.get('channel_idkey'),
            'formats': formats,
        }
from __future__ import unicode_literals

from .adobepass import AdobePassIE
from ..utils import (
    update_url_query,
    smuggle_url,
)


class SyfyIE(AdobePassIE):
    _VALID_URL = r'https?://(?:www\.)?syfy\.com/(?:[^/]+/)?videos/(?P<id>[^/?#]+)'
    _TESTS = [{
        'url': 'http://www.syfy.com/theinternetruinedmylife/videos/the-internet-ruined-my-life-season-1-trailer',
        'info_dict': {
            'id': '2968097',
            'ext': 'mp4',
            'title': 'The Internet Ruined My Life: Season 1 Trailer',
            'description': 'One tweet, one post, one click, can destroy everything.',
            'uploader': 'NBCU-MPAT',
            'upload_date': '20170113',
            'timestamp': 1484345640,
        },
        'params': {
            # m3u8 download
            'skip_download': True,
        },
        'add_ie': ['ThePlatform'],
    }]

    def _real_extract(self, url):
        display_id = self._match_id(url)
        webpage = self._download_webpage(url, display_id)
        syfy_mpx = list(self._parse_json(self._search_regex(
            r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', webpage, 'drupal settings'),
            display_id)['syfy']['syfy_mpx'].values())[0]
        video_id = syfy_mpx['mpxGUID']
        title = syfy_mpx['episodeTitle']
        query = {
            'mbr': 'true',
            'manifest': 'm3u',
        }
        if syfy_mpx.get('entitlement') == 'auth':
            resource = self._get_mvpd_resource(
                'syfy', title, video_id,
                syfy_mpx.get('mpxRating', 'TV-14'))
            query['auth'] = self._extract_mvpd_auth(
                url, video_id, 'syfy', resource)

        return {
            '_type': 'url_transparent',
            'ie_key': 'ThePlatform',
            'url': smuggle_url(update_url_query(
                self._proto_relative_url(syfy_mpx['releaseURL']), query),
                {'force_smil_url': True}),
            'title': title,
            'id': video_id,
            'display_id': display_id,
        }
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor


class SztvHuIE(InfoExtractor):
    _VALID_URL = r'https?://(?:(?:www\.)?sztv\.hu|www\.tvszombathely\.hu)/(?:[^/]+)/.+-(?P<id>[0-9]+)'
    _TEST = {
        'url': 'http://sztv.hu/hirek/cserkeszek-nepszerusitettek-a-kornyezettudatos-eletmodot-a-savaria-teren-20130909',
        'md5': 'a6df607b11fb07d0e9f2ad94613375cb',
        'info_dict': {
            'id': '20130909',
            'ext': 'mp4',
            'title': 'Cserkészek népszerűsítették a környezettudatos életmódot a Savaria téren',
            'description': 'A zöld nap játékos ismeretterjesztő programjait a Magyar Cserkész Szövetség szervezte, akik az ország nyolc városában adják át tudásukat az érdeklődőknek. A PET...',
        },
    }

    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)
        video_file = self._search_regex(
            r'file: "...:(.*?)",', webpage, 'video file')
        title = self._html_search_regex(
            r'<meta name="title" content="([^"]*?) - [^-]*? - [^-]*?"',
            webpage, 'video title')
        description = self._html_search_regex(
            r'<meta name="description" content="([^"]*)"/>',
            webpage, 'video description', fatal=False)
        thumbnail = self._og_search_thumbnail(webpage)

        video_url = 'http://media.sztv.hu/vod/' + video_file

        return {
            'id': video_id,
            'url': video_url,
            'title': title,
            'description': description,
            'thumbnail': thumbnail,
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    determine_ext,
    js_to_json,
    parse_iso8601,
    parse_filesize,
)


class TagesschauPlayerIE(InfoExtractor):
    IE_NAME = 'tagesschau:player'
    _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/multimedia/(?P<kind>audio|video)/(?P=kind)-(?P<id>\d+)~player(?:_[^/?#&]+)?\.html'

    _TESTS = [{
        'url': 'http://www.tagesschau.de/multimedia/video/video-179517~player.html',
        'md5': '8d09548d5c15debad38bee3a4d15ca21',
        'info_dict': {
            'id': '179517',
            'ext': 'mp4',
            'title': 'Marie Kristin Boese, ARD Berlin, über den zukünftigen Kurs der AfD',
            'thumbnail': r're:^https?:.*\.jpg$',
            'formats': 'mincount:6',
        },
    }, {
        'url': 'https://www.tagesschau.de/multimedia/audio/audio-29417~player.html',
        'md5': '76e6eec6ebd40740671cf0a2c88617e5',
        'info_dict': {
            'id': '29417',
            'ext': 'mp3',
            'title': 'Trabi - Bye, bye Rennpappe',
            'thumbnail': r're:^https?:.*\.jpg$',
            'formats': 'mincount:2',
        },
    }, {
        'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417~player_autoplay-true.html',
        'only_matching': True,
    }]

    _FORMATS = {
        'xs': {'quality': 0},
        's': {'width': 320, 'height': 180, 'quality': 1},
        'm': {'width': 512, 'height': 288, 'quality': 2},
        'l': {'width': 960, 'height': 540, 'quality': 3},
        'xl': {'width': 1280, 'height': 720, 'quality': 4},
        'xxl': {'quality': 5},
    }

    def _extract_via_api(self, kind, video_id):
        info = self._download_json(
            'https://www.tagesschau.de/api/multimedia/{0}/{0}-{1}.json'.format(kind, video_id),
            video_id)
        title = info['headline']
        formats = []
        for media in info['mediadata']:
            for format_id, format_url in media.items():
                if determine_ext(format_url) == 'm3u8':
                    formats.extend(self._extract_m3u8_formats(
                        format_url, video_id, 'mp4',
                        entry_protocol='m3u8_native', m3u8_id='hls'))
                else:
                    formats.append({
                        'url': format_url,
                        'format_id': format_id,
                        'vcodec': 'none' if kind == 'audio' else None,
                    })
        self._sort_formats(formats)
        timestamp = parse_iso8601(info.get('date'))
        return {
            'id': video_id,
            'title': title,
            'timestamp': timestamp,
            'formats': formats,
        }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        video_id = mobj.group('id')

        # kind = mobj.group('kind').lower()
        # if kind == 'video':
        #     return self._extract_via_api(kind, video_id)

        # JSON api does not provide some audio formats (e.g. ogg) thus
        # extractiong audio via webpage

        webpage = self._download_webpage(url, video_id)

        title = self._og_search_title(webpage).strip()
        formats = []

        for media_json in re.findall(r'({src\s*:\s*["\']http[^}]+type\s*:[^}]+})', webpage):
            media = self._parse_json(js_to_json(media_json), video_id, fatal=False)
            if not media:
                continue
            src = media.get('src')
            if not src:
                return
            quality = media.get('quality')
            kind = media.get('type', '').split('/')[0]
            ext = determine_ext(src)
            f = {
                'url': src,
                'format_id': '%s_%s' % (quality, ext) if quality else ext,
                'ext': ext,
                'vcodec': 'none' if kind == 'audio' else None,
            }
            f.update(self._FORMATS.get(quality, {}))
            formats.append(f)

        self._sort_formats(formats)

        thumbnail = self._og_search_thumbnail(webpage)

        return {
            'id': video_id,
            'title': title,
            'thumbnail': thumbnail,
            'formats': formats,
        }


class TagesschauIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?tagesschau\.de/(?P<path>[^/]+/(?:[^/]+/)*?(?P<id>[^/#?]+?(?:-?[0-9]+)?))(?:~_?[^/#?]+?)?\.html'

    _TESTS = [{
        'url': 'http://www.tagesschau.de/multimedia/video/video-102143.html',
        'md5': 'f7c27a0eff3bfe8c7727e65f8fe1b1e6',
        'info_dict': {
            'id': 'video-102143',
            'ext': 'mp4',
            'title': 'Regierungsumbildung in Athen: Neue Minister in Griechenland vereidigt',
            'description': '18.07.2015 20:10 Uhr',
            'thumbnail': r're:^https?:.*\.jpg$',
        },
    }, {
        'url': 'http://www.tagesschau.de/multimedia/sendung/ts-5727.html',
        'md5': '3c54c1f6243d279b706bde660ceec633',
        'info_dict': {
            'id': 'ts-5727',
            'ext': 'mp4',
            'title': 'Sendung: tagesschau \t04.12.2014 20:00 Uhr',
            'description': 'md5:695c01bfd98b7e313c501386327aea59',
            'thumbnail': r're:^https?:.*\.jpg$',
        },
    }, {
        # exclusive audio
        'url': 'http://www.tagesschau.de/multimedia/audio/audio-29417.html',
        'md5': '76e6eec6ebd40740671cf0a2c88617e5',
        'info_dict': {
            'id': 'audio-29417',
            'ext': 'mp3',
            'title': 'Trabi - Bye, bye Rennpappe',
            'description': 'md5:8687dda862cbbe2cfb2df09b56341317',
            'thumbnail': r're:^https?:.*\.jpg$',
        },
    }, {
        # audio in article
        'url': 'http://www.tagesschau.de/inland/bnd-303.html',
        'md5': 'e0916c623e85fc1d2b26b78f299d3958',
        'info_dict': {
            'id': 'bnd-303',
            'ext': 'mp3',
            'title': 'Viele Baustellen für neuen BND-Chef',
            'description': 'md5:1e69a54be3e1255b2b07cdbce5bcd8b4',
            'thumbnail': r're:^https?:.*\.jpg$',
        },
    }, {
        'url': 'http://www.tagesschau.de/inland/afd-parteitag-135.html',
        'info_dict': {
            'id': 'afd-parteitag-135',
            'title': 'Möchtegern-Underdog mit Machtanspruch',
        },
        'playlist_count': 2,
    }, {
        'url': 'http://www.tagesschau.de/multimedia/sendung/tsg-3771.html',
        'only_matching': True,
    }, {
        'url': 'http://www.tagesschau.de/multimedia/sendung/tt-3827.html',
        'only_matching': True,
    }, {
        'url': 'http://www.tagesschau.de/multimedia/sendung/nm-3475.html',
        'only_matching': True,
    }, {
        'url': 'http://www.tagesschau.de/multimedia/sendung/weltspiegel-3167.html',
        'only_matching': True,
    }, {
        'url': 'http://www.tagesschau.de/multimedia/tsvorzwanzig-959.html',
        'only_matching': True,
    }, {
        'url': 'http://www.tagesschau.de/multimedia/sendung/bab/bab-3299~_bab-sendung-209.html',
        'only_matching': True,
    }, {
        'url': 'http://www.tagesschau.de/multimedia/video/video-102303~_bab-sendung-211.html',
        'only_matching': True,
    }, {
        'url': 'http://www.tagesschau.de/100sekunden/index.html',
        'only_matching': True,
    }, {
        # playlist article with collapsing sections
        'url': 'http://www.tagesschau.de/wirtschaft/faq-freihandelszone-eu-usa-101.html',
        'only_matching': True,
    }]

    @classmethod
    def suitable(cls, url):
        return False if TagesschauPlayerIE.suitable(url) else super(TagesschauIE, cls).suitable(url)

    def _extract_formats(self, download_text, media_kind):
        links = re.finditer(
            r'<div class="button" title="(?P<title>[^"]*)"><a href="(?P<url>[^"]+)">(?P<name>.+?)</a></div>',
            download_text)
        formats = []
        for l in links:
            link_url = l.group('url')
            if not link_url:
                continue
            format_id = self._search_regex(
                r'.*/[^/.]+\.([^/]+)\.[^/.]+$', link_url, 'format ID',
                default=determine_ext(link_url))
            format = {
                'format_id': format_id,
                'url': l.group('url'),
                'format_name': l.group('name'),
            }
            title = l.group('title')
            if title:
                if media_kind.lower() == 'video':
                    m = re.match(
                        r'''(?x)
                            Video:\s*(?P<vcodec>[a-zA-Z0-9/._-]+)\s*&\#10;
                            (?P<width>[0-9]+)x(?P<height>[0-9]+)px&\#10;
                            (?P<vbr>[0-9]+)kbps&\#10;
                            Audio:\s*(?P<abr>[0-9]+)kbps,\s*(?P<audio_desc>[A-Za-z\.0-9]+)&\#10;
                            Gr&ouml;&szlig;e:\s*(?P<filesize_approx>[0-9.,]+\s+[a-zA-Z]*B)''',
                        title)
                    if m:
                        format.update({
                            'format_note': m.group('audio_desc'),
                            'vcodec': m.group('vcodec'),
                            'width': int(m.group('width')),
                            'height': int(m.group('height')),
                            'abr': int(m.group('abr')),
                            'vbr': int(m.group('vbr')),
                            'filesize_approx': parse_filesize(m.group('filesize_approx')),
                        })
                else:
                    m = re.match(
                        r'(?P<format>.+?)-Format\s*:\s*(?P<abr>\d+)kbps\s*,\s*(?P<note>.+)',
                        title)
                    if m:
                        format.update({
                            'format_note': '%s, %s' % (m.group('format'), m.group('note')),
                            'vcodec': 'none',
                            'abr': int(m.group('abr')),
                        })
            formats.append(format)
        self._sort_formats(formats)
        return formats

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        video_id = mobj.group('id') or mobj.group('path')
        display_id = video_id.lstrip('-')

        webpage = self._download_webpage(url, display_id)

        title = self._html_search_regex(
            r'<span[^>]*class="headline"[^>]*>(.+?)</span>',
            webpage, 'title', default=None) or self._og_search_title(webpage)

        DOWNLOAD_REGEX = r'(?s)<p>Wir bieten dieses (?P<kind>Video|Audio) in folgenden Formaten zum Download an:</p>\s*<div class="controls">(?P<links>.*?)</div>\s*<p>'

        webpage_type = self._og_search_property('type', webpage, default=None)
        if webpage_type == 'website':  # Article
            entries = []
            for num, (entry_title, media_kind, download_text) in enumerate(re.findall(
                    r'(?s)<p[^>]+class="infotext"[^>]*>\s*(?:<a[^>]+>)?\s*<strong>(.+?)</strong>.*?</p>.*?%s' % DOWNLOAD_REGEX,
                    webpage), 1):
                entries.append({
                    'id': '%s-%d' % (display_id, num),
                    'title': '%s' % entry_title,
                    'formats': self._extract_formats(download_text, media_kind),
                })
            if len(entries) > 1:
                return self.playlist_result(entries, display_id, title)
            formats = entries[0]['formats']
        else:  # Assume single video
            download_text = self._search_regex(
                DOWNLOAD_REGEX, webpage, 'download links', group='links')
            media_kind = self._search_regex(
                DOWNLOAD_REGEX, webpage, 'media kind', default='Video', group='kind')
            formats = self._extract_formats(download_text, media_kind)
        thumbnail = self._og_search_thumbnail(webpage)
        description = self._html_search_regex(
            r'(?s)<p class="teasertext">(.*?)</p>',
            webpage, 'description', default=None)

        self._sort_formats(formats)

        return {
            'id': display_id,
            'title': title,
            'thumbnail': thumbnail,
            'formats': formats,
            'description': description,
        }
# coding: utf-8
from __future__ import unicode_literals

import json

from .common import InfoExtractor
from ..utils import (
    js_to_json,
    qualities,
)


class TassIE(InfoExtractor):
    _VALID_URL = r'https?://(?:tass\.ru|itar-tass\.com)/[^/]+/(?P<id>\d+)'
    _TESTS = [
        {
            'url': 'http://tass.ru/obschestvo/1586870',
            'md5': '3b4cdd011bc59174596b6145cda474a4',
            'info_dict': {
                'id': '1586870',
                'ext': 'mp4',
                'title': 'Посетителям московского зоопарка показали красную панду',
                'description': 'Приехавшую из Дублина Зейну можно увидеть в павильоне "Кошки тропиков"',
                'thumbnail': r're:^https?://.*\.jpg$',
            },
        },
        {
            'url': 'http://itar-tass.com/obschestvo/1600009',
            'only_matching': True,
        },
    ]

    def _real_extract(self, url):
        video_id = self._match_id(url)

        webpage = self._download_webpage(url, video_id)

        sources = json.loads(js_to_json(self._search_regex(
            r'(?s)sources\s*:\s*(\[.+?\])', webpage, 'sources')))

        quality = qualities(['sd', 'hd'])

        formats = []
        for source in sources:
            video_url = source.get('file')
            if not video_url or not video_url.startswith('http') or not video_url.endswith('.mp4'):
                continue
            label = source.get('label')
            formats.append({
                'url': video_url,
                'format_id': label,
                'quality': quality(label),
            })
        self._sort_formats(formats)

        return {
            'id': video_id,
            'title': self._og_search_title(webpage),
            'description': self._og_search_description(webpage),
            'thumbnail': self._og_search_thumbnail(webpage),
            'formats': formats,
        }
from __future__ import unicode_literals

from .common import InfoExtractor
from .ooyala import OoyalaIE


class TastyTradeIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?tastytrade\.com/tt/shows/[^/]+/episodes/(?P<id>[^/?#&]+)'

    _TESTS = [{
        'url': 'https://www.tastytrade.com/tt/shows/market-measures/episodes/correlation-in-short-volatility-06-28-2017',
        'info_dict': {
            'id': 'F3bnlzbToeI6pLEfRyrlfooIILUjz4nM',
            'ext': 'mp4',
            'title': 'A History of Teaming',
            'description': 'md5:2a9033db8da81f2edffa4c99888140b3',
            'duration': 422.255,
        },
        'params': {
            'skip_download': True,
        },
        'add_ie': ['Ooyala'],
    }, {
        'url': 'https://www.tastytrade.com/tt/shows/daily-dose/episodes/daily-dose-06-30-2017',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        display_id = self._match_id(url)
        webpage = self._download_webpage(url, display_id)

        ooyala_code = self._search_regex(
            r'data-media-id=(["\'])(?P<code>(?:(?!\1).)+)\1',
            webpage, 'ooyala code', group='code')

        info = self._search_json_ld(webpage, display_id, fatal=False)
        info.update({
            '_type': 'url_transparent',
            'ie_key': OoyalaIE.ie_key(),
            'url': 'ooyala:%s' % ooyala_code,
            'display_id': display_id,
        })
        return info
# coding: utf-8
from __future__ import unicode_literals

import re

from .turner import TurnerBaseIE
from ..compat import (
    compat_urllib_parse_urlparse,
    compat_parse_qs,
)
from ..utils import (
    float_or_none,
    int_or_none,
    strip_or_none,
)


class TBSIE(TurnerBaseIE):
    _VALID_URL = r'https?://(?:www\.)?(?P<site>tbs|tntdrama)\.com(?P<path>/(?:movies|shows/[^/]+/(?:clips|season-\d+/episode-\d+))/(?P<id>[^/?#]+))'
    _TESTS = [{
        'url': 'http://www.tntdrama.com/shows/the-alienist/clips/monster',
        'info_dict': {
            'id': '8d384cde33b89f3a43ce5329de42903ed5099887',
            'ext': 'mp4',
            'title': 'Monster',
            'description': 'Get a first look at the theatrical trailer for TNT’s highly anticipated new psychological thriller The Alienist, which premieres January 22 on TNT.',
            'timestamp': 1508175329,
            'upload_date': '20171016',
        },
        'params': {
            # m3u8 download
            'skip_download': True,
        }
    }, {
        'url': 'http://www.tbs.com/shows/search-party/season-1/episode-1/explicit-the-mysterious-disappearance-of-the-girl-no-one-knew',
        'only_matching': True,
    }, {
        'url': 'http://www.tntdrama.com/movies/star-wars-a-new-hope',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        site, path, display_id = re.match(self._VALID_URL, url).groups()
        webpage = self._download_webpage(url, display_id)
        drupal_settings = self._parse_json(self._search_regex(
            r'<script[^>]+?data-drupal-selector="drupal-settings-json"[^>]*?>({.+?})</script>',
            webpage, 'drupal setting'), display_id)
        video_data = next(v for v in drupal_settings['turner_playlist'] if v.get('url') == path)

        media_id = video_data['mediaID']
        title = video_data['title']
        tokenizer_query = compat_parse_qs(compat_urllib_parse_urlparse(
            drupal_settings['ngtv_token_url']).query)

        info = self._extract_ngtv_info(
            media_id, tokenizer_query, {
                'url': url,
                'site_name': site[:3].upper(),
                'auth_required': video_data.get('authRequired') == '1',
            })

        thumbnails = []
        for image_id, image in video_data.get('images', {}).items():
            image_url = image.get('url')
            if not image_url or image.get('type') != 'video':
                continue
            i = {
                'id': image_id,
                'url': image_url,
            }
            mobj = re.search(r'(\d+)x(\d+)', image_url)
            if mobj:
                i.update({
                    'width': int(mobj.group(1)),
                    'height': int(mobj.group(2)),
                })
            thumbnails.append(i)

        info.update({
            'id': media_id,
            'title': title,
            'description': strip_or_none(video_data.get('descriptionNoTags') or video_data.get('shortDescriptionNoTags')),
            'duration': float_or_none(video_data.get('duration')) or info.get('duration'),
            'timestamp': int_or_none(video_data.get('created')),
            'season_number': int_or_none(video_data.get('season')),
            'episode_number': int_or_none(video_data.get('episode')),
            'thumbnails': thumbnails,
        })
        return info
from __future__ import unicode_literals

from .common import InfoExtractor


class TDSLifewayIE(InfoExtractor):
    _VALID_URL = r'https?://tds\.lifeway\.com/v1/trainingdeliverysystem/courses/(?P<id>\d+)/index\.html'

    _TEST = {
        # From http://www.ministrygrid.com/training-viewer/-/training/t4g-2014-conference/the-gospel-by-numbers-4/the-gospel-by-numbers
        'url': 'http://tds.lifeway.com/v1/trainingdeliverysystem/courses/3453494717001/index.html?externalRegistration=AssetId%7C34F466F1-78F3-4619-B2AB-A8EFFA55E9E9%21InstanceId%7C0%21UserId%7Caaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa&grouping=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&activity_id=http%3A%2F%2Flifeway.com%2Fvideo%2F3453494717001&content_endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2Fcontent%2F&actor=%7B%22name%22%3A%5B%22Guest%20Guest%22%5D%2C%22account%22%3A%5B%7B%22accountServiceHomePage%22%3A%22http%3A%2F%2Fscorm.lifeway.com%2F%22%2C%22accountName%22%3A%22aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa%22%7D%5D%2C%22objectType%22%3A%22Agent%22%7D&content_token=462a50b2-b6f9-4970-99b1-930882c499fb&registration=93d6ec8e-7f7b-4ed3-bbc8-a857913c0b2a&externalConfiguration=access%7CFREE%21adLength%7C-1%21assignOrgId%7C4AE36F78-299A-425D-91EF-E14A899B725F%21assignOrgParentId%7C%21courseId%7C%21isAnonymous%7Cfalse%21previewAsset%7Cfalse%21previewLength%7C-1%21previewMode%7Cfalse%21royalty%7CFREE%21sessionId%7C671422F9-8E79-48D4-9C2C-4EE6111EA1CD%21trackId%7C&auth=Basic%20OjhmZjk5MDBmLTBlYTMtNDJhYS04YjFlLWE4MWQ3NGNkOGRjYw%3D%3D&endpoint=http%3A%2F%2Ftds.lifeway.com%2Fv1%2Ftrainingdeliverysystem%2FScormEngineInterface%2FTCAPI%2F',
        'info_dict': {
            'id': '3453494717001',
            'ext': 'mp4',
            'title': 'The Gospel by Numbers',
            'thumbnail': r're:^https?://.*\.jpg',
            'upload_date': '20140410',
            'description': 'Coming soon from T4G 2014!',
            'uploader_id': '2034960640001',
            'timestamp': 1397145591,
        },
        'params': {
            # m3u8 download
            'skip_download': True,
        },
        'add_ie': ['BrightcoveNew'],
    }

    BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/2034960640001/default_default/index.html?videoId=%s'

    def _real_extract(self, url):
        brightcove_id = self._match_id(url)
        return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id)
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from .wistia import WistiaIE
from ..utils import (
    clean_html,
    ExtractorError,
    int_or_none,
    get_element_by_class,
    strip_or_none,
    urlencode_postdata,
    urljoin,
)


class TeachableBaseIE(InfoExtractor):
    _NETRC_MACHINE = 'teachable'
    _URL_PREFIX = 'teachable:'

    _SITES = {
        # Only notable ones here
        'v1.upskillcourses.com': 'upskill',
        'gns3.teachable.com': 'gns3',
        'academyhacker.com': 'academyhacker',
        'stackskills.com': 'stackskills',
        'market.saleshacker.com': 'saleshacker',
        'learnability.org': 'learnability',
        'edurila.com': 'edurila',
        'courses.workitdaily.com': 'workitdaily',
    }

    _VALID_URL_SUB_TUPLE = (_URL_PREFIX, '|'.join(re.escape(site) for site in _SITES.keys()))

    def _real_initialize(self):
        self._logged_in = False

    def _login(self, site):
        if self._logged_in:
            return

        username, password = self._get_login_info(
            netrc_machine=self._SITES.get(site, site))
        if username is None:
            return

        login_page, urlh = self._download_webpage_handle(
            'https://%s/sign_in' % site, None,
            'Downloading %s login page' % site)

        def is_logged(webpage):
            return any(re.search(p, webpage) for p in (
                r'class=["\']user-signout',
                r'<a[^>]+\bhref=["\']/sign_out',
                r'Log\s+[Oo]ut\s*<'))

        if is_logged(login_page):
            self._logged_in = True
            return

        login_url = urlh.geturl()

        login_form = self._hidden_inputs(login_page)

        login_form.update({
            'user[email]': username,
            'user[password]': password,
        })

        post_url = self._search_regex(
            r'<form[^>]+action=(["\'])(?P<url>(?:(?!\1).)+)\1', login_page,
            'post url', default=login_url, group='url')

        if not post_url.startswith('http'):
            post_url = urljoin(login_url, post_url)

        response = self._download_webpage(
            post_url, None, 'Logging in to %s' % site,
            data=urlencode_postdata(login_form),
            headers={
                'Content-Type': 'application/x-www-form-urlencoded',
                'Referer': login_url,
            })

        if '>I accept the new Privacy Policy<' in response:
            raise ExtractorError(
                'Unable to login: %s asks you to accept new Privacy Policy. '
                'Go to https://%s/ and accept.' % (site, site), expected=True)

        # Successful login
        if is_logged(response):
            self._logged_in = True
            return

        message = get_element_by_class('alert', response)
        if message is not None:
            raise ExtractorError(
                'Unable to login: %s' % clean_html(message), expected=True)

        raise ExtractorError('Unable to log in')


class TeachableIE(TeachableBaseIE):
    _VALID_URL = r'''(?x)
                    (?:
                        %shttps?://(?P<site_t>[^/]+)|
                        https?://(?:www\.)?(?P<site>%s)
                    )
                    /courses/[^/]+/lectures/(?P<id>\d+)
                    ''' % TeachableBaseIE._VALID_URL_SUB_TUPLE

    _TESTS = [{
        'url': 'https://gns3.teachable.com/courses/gns3-certified-associate/lectures/6842364',
        'info_dict': {
            'id': 'untlgzk1v7',
            'ext': 'bin',
            'title': 'Overview',
            'description': 'md5:071463ff08b86c208811130ea1c2464c',
            'duration': 736.4,
            'timestamp': 1542315762,
            'upload_date': '20181115',
            'chapter': 'Welcome',
            'chapter_number': 1,
        },
        'params': {
            'skip_download': True,
        },
    }, {
        'url': 'http://v1.upskillcourses.com/courses/119763/lectures/1747100',
        'only_matching': True,
    }, {
        'url': 'https://gns3.teachable.com/courses/423415/lectures/6885939',
        'only_matching': True,
    }, {
        'url': 'teachable:https://v1.upskillcourses.com/courses/essential-web-developer-course/lectures/1747100',
        'only_matching': True,
    }]

    @staticmethod
    def _is_teachable(webpage):
        return 'teachableTracker.linker:autoLink' in webpage and re.search(
            r'<link[^>]+href=["\']https?://process\.fs\.teachablecdn\.com',
            webpage)

    @staticmethod
    def _extract_url(webpage, source_url):
        if not TeachableIE._is_teachable(webpage):
            return
        if re.match(r'https?://[^/]+/(?:courses|p)', source_url):
            return '%s%s' % (TeachableBaseIE._URL_PREFIX, source_url)

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        site = mobj.group('site') or mobj.group('site_t')
        video_id = mobj.group('id')

        self._login(site)

        prefixed = url.startswith(self._URL_PREFIX)
        if prefixed:
            url = url[len(self._URL_PREFIX):]

        webpage = self._download_webpage(url, video_id)

        wistia_urls = WistiaIE._extract_urls(webpage)
        if not wistia_urls:
            if any(re.search(p, webpage) for p in (
                    r'class=["\']lecture-contents-locked',
                    r'>\s*Lecture contents locked',
                    r'id=["\']lecture-locked',
                    # https://academy.tailoredtutors.co.uk/courses/108779/lectures/1955313
                    r'class=["\'](?:inner-)?lesson-locked',
                    r'>LESSON LOCKED<')):
                self.raise_login_required('Lecture contents locked')
            raise ExtractorError('Unable to find video URL')

        title = self._og_search_title(webpage, default=None)

        chapter = None
        chapter_number = None
        section_item = self._search_regex(
            r'(?s)(?P<li><li[^>]+\bdata-lecture-id=["\']%s[^>]+>.+?</li>)' % video_id,
            webpage, 'section item', default=None, group='li')
        if section_item:
            chapter_number = int_or_none(self._search_regex(
                r'data-ss-position=["\'](\d+)', section_item, 'section id',
                default=None))
            if chapter_number is not None:
                sections = []
                for s in re.findall(
                        r'(?s)<div[^>]+\bclass=["\']section-title[^>]+>(.+?)</div>', webpage):
                    section = strip_or_none(clean_html(s))
                    if not section:
                        sections = []
                        break
                    sections.append(section)
                if chapter_number <= len(sections):
                    chapter = sections[chapter_number - 1]

        entries = [{
            '_type': 'url_transparent',
            'url': wistia_url,
            'ie_key': WistiaIE.ie_key(),
            'title': title,
            'chapter': chapter,
            'chapter_number': chapter_number,
        } for wistia_url in wistia_urls]

        return self.playlist_result(entries, video_id, title)


class TeachableCourseIE(TeachableBaseIE):
    _VALID_URL = r'''(?x)
                        (?:
                            %shttps?://(?P<site_t>[^/]+)|
                            https?://(?:www\.)?(?P<site>%s)
                        )
                        /(?:courses|p)/(?:enrolled/)?(?P<id>[^/?#&]+)
                    ''' % TeachableBaseIE._VALID_URL_SUB_TUPLE
    _TESTS = [{
        'url': 'http://v1.upskillcourses.com/courses/essential-web-developer-course/',
        'info_dict': {
            'id': 'essential-web-developer-course',
            'title': 'The Essential Web Developer Course (Free)',
        },
        'playlist_count': 192,
    }, {
        'url': 'http://v1.upskillcourses.com/courses/119763/',
        'only_matching': True,
    }, {
        'url': 'http://v1.upskillcourses.com/courses/enrolled/119763',
        'only_matching': True,
    }, {
        'url': 'https://gns3.teachable.com/courses/enrolled/423415',
        'only_matching': True,
    }, {
        'url': 'teachable:https://learn.vrdev.school/p/gear-vr-developer-mini',
        'only_matching': True,
    }, {
        'url': 'teachable:https://filmsimplified.com/p/davinci-resolve-15-crash-course',
        'only_matching': True,
    }]

    @classmethod
    def suitable(cls, url):
        return False if TeachableIE.suitable(url) else super(
            TeachableCourseIE, cls).suitable(url)

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        site = mobj.group('site') or mobj.group('site_t')
        course_id = mobj.group('id')

        self._login(site)

        prefixed = url.startswith(self._URL_PREFIX)
        if prefixed:
            prefix = self._URL_PREFIX
            url = url[len(prefix):]

        webpage = self._download_webpage(url, course_id)

        url_base = 'https://%s/' % site

        entries = []

        for mobj in re.finditer(
                r'(?s)(?P<li><li[^>]+class=(["\'])(?:(?!\2).)*?section-item[^>]+>.+?</li>)',
                webpage):
            li = mobj.group('li')
            if 'fa-youtube-play' not in li:
                continue
            lecture_url = self._search_regex(
                r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1', li,
                'lecture url', default=None, group='url')
            if not lecture_url:
                continue
            lecture_id = self._search_regex(
                r'/lectures/(\d+)', lecture_url, 'lecture id', default=None)
            title = self._html_search_regex(
                r'<span[^>]+class=["\']lecture-name[^>]+>([^<]+)', li,
                'title', default=None)
            entry_url = urljoin(url_base, lecture_url)
            if prefixed:
                entry_url = self._URL_PREFIX + entry_url
            entries.append(
                self.url_result(
                    entry_url,
                    ie=TeachableIE.ie_key(), video_id=lecture_id,
                    video_title=clean_html(title)))

        course_title = self._html_search_regex(
            (r'(?s)<img[^>]+class=["\']course-image[^>]+>\s*<h\d>(.+?)</h',
             r'(?s)<h\d[^>]+class=["\']course-title[^>]+>(.+?)</h'),
            webpage, 'course title', fatal=False)

        return self.playlist_result(entries, course_id, course_title)
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    determine_ext,
    ExtractorError,
    qualities,
)


class TeacherTubeIE(InfoExtractor):
    IE_NAME = 'teachertube'
    IE_DESC = 'teachertube.com videos'

    _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(viewVideo\.php\?video_id=|music\.php\?music_id=|video/(?:[\da-z-]+-)?|audio/)(?P<id>\d+)'

    _TESTS = [{
        # flowplayer
        'url': 'http://www.teachertube.com/viewVideo.php?video_id=339997',
        'md5': 'f9434ef992fd65936d72999951ee254c',
        'info_dict': {
            'id': '339997',
            'ext': 'mp4',
            'title': 'Measures of dispersion from a frequency table',
            'description': 'Measures of dispersion from a frequency table',
            'thumbnail': r're:https?://.*\.(?:jpg|png)',
        },
    }, {
        # jwplayer
        'url': 'http://www.teachertube.com/music.php?music_id=8805',
        'md5': '01e8352006c65757caf7b961f6050e21',
        'info_dict': {
            'id': '8805',
            'ext': 'mp3',
            'title': 'PER ASPERA AD ASTRA',
            'description': 'RADIJSKA EMISIJA ZRAKOPLOVNE TEHNI?KE ?KOLE P',
        },
    }, {
        # unavailable video
        'url': 'http://www.teachertube.com/video/intro-video-schleicher-297790',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        video_id = self._match_id(url)
        webpage = self._download_webpage(url, video_id)

        error = self._search_regex(
            r'<div\b[^>]+\bclass=["\']msgBox error[^>]+>([^<]+)', webpage,
            'error', default=None)
        if error:
            raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True)

        title = self._html_search_meta('title', webpage, 'title', fatal=True)
        TITLE_SUFFIX = ' - TeacherTube'
        if title.endswith(TITLE_SUFFIX):
            title = title[:-len(TITLE_SUFFIX)].strip()

        description = self._html_search_meta('description', webpage, 'description')
        if description:
            description = description.strip()

        quality = qualities(['mp3', 'flv', 'mp4'])

        media_urls = re.findall(r'data-contenturl="([^"]+)"', webpage)
        media_urls.extend(re.findall(r'var\s+filePath\s*=\s*"([^"]+)"', webpage))
        media_urls.extend(re.findall(r'\'file\'\s*:\s*["\']([^"\']+)["\'],', webpage))

        formats = [
            {
                'url': media_url,
                'quality': quality(determine_ext(media_url))
            } for media_url in set(media_urls)
        ]

        self._sort_formats(formats)

        thumbnail = self._og_search_thumbnail(
            webpage, default=None) or self._html_search_meta(
            'thumbnail', webpage)

        return {
            'id': video_id,
            'title': title,
            'description': description,
            'thumbnail': thumbnail,
            'formats': formats,
        }


class TeacherTubeUserIE(InfoExtractor):
    IE_NAME = 'teachertube:user:collection'
    IE_DESC = 'teachertube.com user and collection videos'

    _VALID_URL = r'https?://(?:www\.)?teachertube\.com/(user/profile|collection)/(?P<user>[0-9a-zA-Z]+)/?'

    _MEDIA_RE = r'''(?sx)
        class="?sidebar_thumb_time"?>[0-9:]+</div>
        \s*
        <a\s+href="(https?://(?:www\.)?teachertube\.com/(?:video|audio)/[^"]+)"
    '''
    _TEST = {
        'url': 'http://www.teachertube.com/user/profile/rbhagwati2',
        'info_dict': {
            'id': 'rbhagwati2'
        },
        'playlist_mincount': 179,
    }

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        user_id = mobj.group('user')

        urls = []
        webpage = self._download_webpage(url, user_id)
        urls.extend(re.findall(self._MEDIA_RE, webpage))

        pages = re.findall(r'/ajax-user/user-videos/%s\?page=([0-9]+)' % user_id, webpage)[:-1]
        for p in pages:
            more = 'http://www.teachertube.com/ajax-user/user-videos/%s?page=%s' % (user_id, p)
            webpage = self._download_webpage(more, user_id, 'Downloading page %s/%s' % (p, len(pages)))
            video_urls = re.findall(self._MEDIA_RE, webpage)
            urls.extend(video_urls)

        entries = [self.url_result(vurl, 'TeacherTube') for vurl in urls]
        return self.playlist_result(entries, user_id)
from __future__ import unicode_literals

from .common import InfoExtractor


class TeachingChannelIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?teachingchannel\.org/videos?/(?P<id>[^/?&#]+)'

    _TEST = {
        'url': 'https://www.teachingchannel.org/videos/teacher-teaming-evolution',
        'info_dict': {
            'id': '3swwlzkT',
            'ext': 'mp4',
            'title': 'A History of Teaming',
            'description': 'md5:2a9033db8da81f2edffa4c99888140b3',
            'duration': 422,
            'upload_date': '20170316',
            'timestamp': 1489691297,
        },
        'params': {
            'skip_download': True,
        },
        'add_ie': ['JWPlatform'],
    }

    def _real_extract(self, url):
        display_id = self._match_id(url)
        webpage = self._download_webpage(url, display_id)
        mid = self._search_regex(
            r'(?:data-mid=["\']|id=["\']jw-video-player-)([a-zA-Z0-9]{8})',
            webpage, 'media id')

        return self.url_result('jwplatform:' + mid, 'JWPlatform', mid)
# coding: utf-8
from __future__ import unicode_literals

import json

from .turner import TurnerBaseIE
from ..utils import (
    determine_ext,
    ExtractorError,
    int_or_none,
    mimetype2ext,
    parse_duration,
    parse_iso8601,
    qualities,
)


class TeamcocoIE(TurnerBaseIE):
    _VALID_URL = r'https?://(?:\w+\.)?teamcoco\.com/(?P<id>([^/]+/)*[^/?#]+)'
    _TESTS = [
        {
            'url': 'http://teamcoco.com/video/mary-kay-remote',
            'md5': '55d532f81992f5c92046ad02fec34d7d',
            'info_dict': {
                'id': '80187',
                'ext': 'mp4',
                'title': 'Conan Becomes A Mary Kay Beauty Consultant',
                'description': 'Mary Kay is perhaps the most trusted name in female beauty, so of course Conan is a natural choice to sell their products.',
                'duration': 495.0,
                'upload_date': '20140402',
                'timestamp': 1396407600,
            }
        }, {
            'url': 'http://teamcoco.com/video/louis-ck-interview-george-w-bush',
            'md5': 'cde9ba0fa3506f5f017ce11ead928f9a',
            'info_dict': {
                'id': '19705',
                'ext': 'mp4',
                'description': 'Louis C.K. got starstruck by George W. Bush, so what? Part one.',
                'title': 'Louis C.K. Interview Pt. 1 11/3/11',
                'duration': 288,
                'upload_date': '20111104',
                'timestamp': 1320405840,
            }
        }, {
            'url': 'http://teamcoco.com/video/timothy-olyphant-drinking-whiskey',
            'info_dict': {
                'id': '88748',
                'ext': 'mp4',
                'title': 'Timothy Olyphant Raises A Toast To “Justified”',
                'description': 'md5:15501f23f020e793aeca761205e42c24',
                'upload_date': '20150415',
                'timestamp': 1429088400,
            },
            'params': {
                'skip_download': True,  # m3u8 downloads
            }
        }, {
            'url': 'http://teamcoco.com/video/full-episode-mon-6-1-joel-mchale-jake-tapper-and-musical-guest-courtney-barnett?playlist=x;eyJ0eXBlIjoidGFnIiwiaWQiOjl9',
            'info_dict': {
                'id': '89341',
                'ext': 'mp4',
                'title': 'Full Episode - Mon. 6/1 - Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
                'description': 'Guests: Joel McHale, Jake Tapper, And Musical Guest Courtney Barnett',
            },
            'params': {
                'skip_download': True,  # m3u8 downloads
            },
            'skip': 'This video is no longer available.',
        }, {
            'url': 'http://teamcoco.com/video/the-conan-audiencey-awards-for-04/25/18',
            'only_matching': True,
        }, {
            'url': 'http://teamcoco.com/italy/conan-jordan-schlansky-hit-the-streets-of-florence',
            'only_matching': True,
        }, {
            'url': 'http://teamcoco.com/haiti/conan-s-haitian-history-lesson',
            'only_matching': True,
        }, {
            'url': 'http://teamcoco.com/israel/conan-hits-the-streets-beaches-of-tel-aviv',
            'only_matching': True,
        }, {
            'url': 'https://conan25.teamcoco.com/video/ice-cube-kevin-hart-conan-share-lyft',
            'only_matching': True,
        }
    ]
    _RECORD_TEMPL = '''id
        title
        teaser
        publishOn
        thumb {
          preview
        }
        tags {
          name
        }
        duration
        turnerMediaId
        turnerMediaAuthToken'''

    def _graphql_call(self, query_template, object_type, object_id):
        find_object = 'find' + object_type
        return self._download_json(
            'https://teamcoco.com/graphql', object_id, data=json.dumps({
                'query': query_template % (find_object, object_id)
            }).encode(), headers={
                'Content-Type': 'application/json',
            })['data'][find_object]

    def _real_extract(self, url):
        display_id = self._match_id(url)

        response = self._graphql_call('''{
  %%s(slug: "%%s") {
    ... on RecordSlug {
      record {
        %s
      }
    }
    ... on PageSlug {
      child {
        id
      }
    }
    ... on NotFoundSlug {
      status
    }
  }
}''' % self._RECORD_TEMPL, 'Slug', display_id)
        if response.get('status'):
            raise ExtractorError('This video is no longer available.', expected=True)

        child = response.get('child')
        if child:
            record = self._graphql_call('''{
  %%s(id: "%%s") {
    ... on Video {
      %s
    }
  }
}''' % self._RECORD_TEMPL, 'Record', child['id'])
        else:
            record = response['record']
        video_id = record['id']

        info = {
            'id': video_id,
            'display_id': display_id,
            'title': record['title'],
            'thumbnail': record.get('thumb', {}).get('preview'),
            'description': record.get('teaser'),
            'duration': parse_duration(record.get('duration')),
            'timestamp': parse_iso8601(record.get('publishOn')),
        }

        media_id = record.get('turnerMediaId')
        if media_id:
            self._initialize_geo_bypass({
                'countries': ['US'],
            })
            info.update(self._extract_ngtv_info(media_id, {
                'accessToken': record['turnerMediaAuthToken'],
                'accessTokenType': 'jws',
            }))
        else:
            video_sources = self._download_json(
                'https://teamcoco.com/_truman/d/' + video_id,
                video_id)['meta']['src']
            if isinstance(video_sources, dict):
                video_sources = video_sources.values()

            formats = []
            get_quality = qualities(['low', 'sd', 'hd', 'uhd'])
            for src in video_sources:
                if not isinstance(src, dict):
                    continue
                src_url = src.get('src')
                if not src_url:
                    continue
                format_id = src.get('label')
                ext = determine_ext(src_url, mimetype2ext(src.get('type')))
                if format_id == 'hls' or ext == 'm3u8':
                    # compat_urllib_parse.urljoin does not work here
                    if src_url.startswith('/'):
                        src_url = 'http://ht.cdn.turner.com/tbs/big/teamcoco' + src_url
                    formats.extend(self._extract_m3u8_formats(
                        src_url, video_id, 'mp4', m3u8_id=format_id, fatal=False))
                else:
                    if src_url.startswith('/mp4:protected/'):
                        # TODO Correct extraction for these files
                        continue
                    tbr = int_or_none(self._search_regex(
                        r'(\d+)k\.mp4', src_url, 'tbr', default=None))

                    formats.append({
                        'url': src_url,
                        'ext': ext,
                        'tbr': tbr,
                        'format_id': format_id,
                        'quality': get_quality(format_id),
                    })
            self._sort_formats(formats)
            info['formats'] = formats

        return info
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    clean_html,
    determine_ext,
    ExtractorError,
    float_or_none,
    get_element_by_class,
    get_element_by_id,
    parse_duration,
    remove_end,
    urlencode_postdata,
    urljoin,
)


class TeamTreeHouseIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?teamtreehouse\.com/library/(?P<id>[^/]+)'
    _TESTS = [{
        # Course
        'url': 'https://teamtreehouse.com/library/introduction-to-user-authentication-in-php',
        'info_dict': {
            'id': 'introduction-to-user-authentication-in-php',
            'title': 'Introduction to User Authentication in PHP',
            'description': 'md5:405d7b4287a159b27ddf30ca72b5b053',
        },
        'playlist_mincount': 24,
    }, {
        # WorkShop
        'url': 'https://teamtreehouse.com/library/deploying-a-react-app',
        'info_dict': {
            'id': 'deploying-a-react-app',
            'title': 'Deploying a React App',
            'description': 'md5:10a82e3ddff18c14ac13581c9b8e5921',
        },
        'playlist_mincount': 4,
    }, {
        # Video
        'url': 'https://teamtreehouse.com/library/application-overview-2',
        'info_dict': {
            'id': 'application-overview-2',
            'ext': 'mp4',
            'title': 'Application Overview',
            'description': 'md5:4b0a234385c27140a4378de5f1e15127',
        },
        'expected_warnings': ['This is just a preview'],
    }]
    _NETRC_MACHINE = 'teamtreehouse'

    def _real_initialize(self):
        email, password = self._get_login_info()
        if email is None:
            return

        signin_page = self._download_webpage(
            'https://teamtreehouse.com/signin',
            None, 'Downloading signin page')
        data = self._form_hidden_inputs('new_user_session', signin_page)
        data.update({
            'user_session[email]': email,
            'user_session[password]': password,
        })
        error_message = get_element_by_class('error-message', self._download_webpage(
            'https://teamtreehouse.com/person_session',
            None, 'Logging in', data=urlencode_postdata(data)))
        if error_message:
            raise ExtractorError(clean_html(error_message), expected=True)

    def _real_extract(self, url):
        display_id = self._match_id(url)
        webpage = self._download_webpage(url, display_id)
        title = self._html_search_meta(['og:title', 'twitter:title'], webpage)
        description = self._html_search_meta(
            ['description', 'og:description', 'twitter:description'], webpage)
        entries = self._parse_html5_media_entries(url, webpage, display_id)
        if entries:
            info = entries[0]

            for subtitles in info.get('subtitles', {}).values():
                for subtitle in subtitles:
                    subtitle['ext'] = determine_ext(subtitle['url'], 'srt')

            is_preview = 'data-preview="true"' in webpage
            if is_preview:
                self.report_warning(
                    'This is just a preview. You need to be signed in with a Basic account to download the entire video.', display_id)
                duration = 30
            else:
                duration = float_or_none(self._search_regex(
                    r'data-duration="(\d+)"', webpage, 'duration'), 1000)
                if not duration:
                    duration = parse_duration(get_element_by_id(
                        'video-duration', webpage))

            info.update({
                'id': display_id,
                'title': title,
                'description': description,
                'duration': duration,
            })
            return info
        else:
            def extract_urls(html, extract_info=None):
                for path in re.findall(r'<a[^>]+href="([^"]+)"', html):
                    page_url = urljoin(url, path)
                    entry = {
                        '_type': 'url_transparent',
                        'id': self._match_id(page_url),
                        'url': page_url,
                        'id_key': self.ie_key(),
                    }
                    if extract_info:
                        entry.update(extract_info)
                    entries.append(entry)

            workshop_videos = self._search_regex(
                r'(?s)<ul[^>]+id="workshop-videos"[^>]*>(.+?)</ul>',
                webpage, 'workshop videos', default=None)
            if workshop_videos:
                extract_urls(workshop_videos)
            else:
                stages_path = self._search_regex(
                    r'(?s)<div[^>]+id="syllabus-stages"[^>]+data-url="([^"]+)"',
                    webpage, 'stages path')
                if stages_path:
                    stages_page = self._download_webpage(
                        urljoin(url, stages_path), display_id, 'Downloading stages page')
                    for chapter_number, (chapter, steps_list) in enumerate(re.findall(r'(?s)<h2[^>]*>\s*(.+?)\s*</h2>.+?<ul[^>]*>(.+?)</ul>', stages_page), 1):
                        extract_urls(steps_list, {
                            'chapter': chapter,
                            'chapter_number': chapter_number,
                        })
                    title = remove_end(title, ' Course')

            return self.playlist_result(
                entries, display_id, title, description)
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from ..utils import (
    get_element_by_attribute,
    clean_html,
)


class TechTalksIE(InfoExtractor):
    _VALID_URL = r'https?://techtalks\.tv/talks/(?:[^/]+/)?(?P<id>\d+)'

    _TESTS = [{
        'url': 'http://techtalks.tv/talks/learning-topic-models-going-beyond-svd/57758/',
        'info_dict': {
            'id': '57758',
            'title': 'Learning Topic Models --- Going beyond SVD',
        },
        'playlist': [
            {
                'info_dict': {
                    'id': '57758',
                    'ext': 'flv',
                    'title': 'Learning Topic Models --- Going beyond SVD',
                },
            },
            {
                'info_dict': {
                    'id': '57758-slides',
                    'ext': 'flv',
                    'title': 'Learning Topic Models --- Going beyond SVD',
                },
            },
        ],
        'params': {
            # rtmp download
            'skip_download': True,
        },
    }, {
        'url': 'http://techtalks.tv/talks/57758',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        mobj = re.match(self._VALID_URL, url)
        talk_id = mobj.group('id')
        webpage = self._download_webpage(url, talk_id)
        rtmp_url = self._search_regex(
            r'netConnectionUrl: \'(.*?)\'', webpage, 'rtmp url')
        play_path = self._search_regex(
            r'href=\'(.*?)\' [^>]*id="flowplayer_presenter"',
            webpage, 'presenter play path')
        title = clean_html(get_element_by_attribute('class', 'title', webpage))
        video_info = {
            'id': talk_id,
            'title': title,
            'url': rtmp_url,
            'play_path': play_path,
            'ext': 'flv',
        }
        m_slides = re.search(r'<a class="slides" href=\'(.*?)\'', webpage)
        if m_slides is None:
            return video_info
        else:
            return {
                '_type': 'playlist',
                'id': talk_id,
                'title': title,
                'entries': [
                    video_info,
                    # The slides video
                    {
                        'id': talk_id + '-slides',
                        'title': title,
                        'url': rtmp_url,
                        'play_path': m_slides.group(1),
                        'ext': 'flv',
                    },
                ],
            }
from __future__ import unicode_literals

import json
import re

from .common import InfoExtractor

from ..compat import (
    compat_str,
    compat_urlparse
)
from ..utils import (
    extract_attributes,
    float_or_none,
    int_or_none,
    try_get,
    url_or_none,
)


class TEDIE(InfoExtractor):
    IE_NAME = 'ted'
    _VALID_URL = r'''(?x)
        (?P<proto>https?://)
        (?P<type>www|embed(?:-ssl)?)(?P<urlmain>\.ted\.com/
        (
            (?P<type_playlist>playlists(?:/(?P<playlist_id>\d+))?) # We have a playlist
            |
            ((?P<type_talk>talks)) # We have a simple talk
            |
            (?P<type_watch>watch)/[^/]+/[^/]+
        )
        (/lang/(.*?))? # The url may contain the language
        /(?P<name>[\w-]+) # Here goes the name and then ".html"
        .*)$
        '''
    _TESTS = [{
        'url': 'http://www.ted.com/talks/dan_dennett_on_our_consciousness.html',
        'md5': 'b0ce2b05ca215042124fbc9e3886493a',
        'info_dict': {
            'id': '102',
            'ext': 'mp4',
            'title': 'The illusion of consciousness',
            'description': ('Philosopher Dan Dennett makes a compelling '
                            'argument that not only don\'t we understand our own '
                            'consciousness, but that half the time our brains are '
                            'actively fooling us.'),
            'uploader': 'Dan Dennett',
            'width': 853,
            'duration': 1308,
            'view_count': int,
            'comment_count': int,
            'tags': list,
        },
        'params': {
            'skip_download': True,
        },
    }, {
        # missing HTTP bitrates
        'url': 'https://www.ted.com/talks/vishal_sikka_the_beauty_and_power_of_algorithms',
        'info_dict': {
            'id': '6069',
            'ext': 'mp4',
            'title': 'The beauty and power of algorithms',
            'thumbnail': r're:^https?://.+\.jpg',
            'description': 'md5:734e352710fb00d840ab87ae31aaf688',
            'uploader': 'Vishal Sikka',
        },
        'params': {
            'skip_download': True,
        },
    }, {
        'url': 'http://www.ted.com/talks/gabby_giffords_and_mark_kelly_be_passionate_be_courageous_be_your_best',
        'md5': 'e6b9617c01a7970ceac8bb2c92c346c0',
        'info_dict': {
            'id': '1972',
            'ext': 'mp4',
            'title': 'Be passionate. Be courageous. Be your best.',
            'uploader': 'Gabby Giffords and Mark Kelly',
            'description': 'md5:5174aed4d0f16021b704120360f72b92',
            'duration': 1128,
        },
        'params': {
            'skip_download': True,
        },
    }, {
        'url': 'http://www.ted.com/playlists/who_are_the_hackers',
        'info_dict': {
            'id': '10',
            'title': 'Who are the hackers?',
            'description': 'md5:49a0dbe8fb76d81a0e64b4a80af7f15a'
        },
        'playlist_mincount': 6,
    }, {
        # contains a youtube video
        'url': 'https://www.ted.com/talks/douglas_adams_parrots_the_universe_and_everything',
        'add_ie': ['Youtube'],
        'info_dict': {
            'id': '_ZG8HBuDjgc',
            'ext': 'webm',
            'title': 'Douglas Adams: Parrots the Universe and Everything',
            'description': 'md5:01ad1e199c49ac640cb1196c0e9016af',
            'uploader': 'University of California Television (UCTV)',
            'uploader_id': 'UCtelevision',
            'upload_date': '20080522',
        },
        'params': {
            'skip_download': True,
        },
    }, {
        # no nativeDownloads
        'url': 'https://www.ted.com/talks/tom_thum_the_orchestra_in_my_mouth',
        'info_dict': {
            'id': '1792',
            'ext': 'mp4',
            'title': 'The orchestra in my mouth',
            'description': 'md5:5d1d78650e2f8dfcbb8ebee2951ac29a',
            'uploader': 'Tom Thum',
            'view_count': int,
            'comment_count': int,
            'tags': list,
        },
        'params': {
            'skip_download': True,
        },
    }]

    _NATIVE_FORMATS = {
        'low': {'width': 320, 'height': 180},
        'medium': {'width': 512, 'height': 288},
        'high': {'width': 854, 'height': 480},
    }

    def _extract_info(self, webpage):
        info_json = self._search_regex(
            r'(?s)q\(\s*"\w+.init"\s*,\s*({.+?})\)\s*</script>',
            webpage, 'info json')
        return json.loads(info_json)

    def _real_extract(self, url):
        m = re.match(self._VALID_URL, url, re.VERBOSE)
        if m.group('type').startswith('embed'):
            desktop_url = m.group('proto') + 'www' + m.group('urlmain')
            return self.url_result(desktop_url, 'TED')
        name = m.group('name')
        if m.group('type_talk'):
            return self._talk_info(url, name)
        elif m.group('type_watch'):
            return self._watch_info(url, name)
        else:
            return self._playlist_videos_info(url, name)

    def _playlist_videos_info(self, url, name):
        '''Returns the videos of the playlist'''

        webpage = self._download_webpage(url, name,
                                         'Downloading playlist webpage')

        playlist_entries = []
        for entry in re.findall(r'(?s)<[^>]+data-ga-context=["\']playlist["\'][^>]*>', webpage):
            attrs = extract_attributes(entry)
            entry_url = compat_urlparse.urljoin(url, attrs['href'])
            playlist_entries.append(self.url_result(entry_url, self.ie_key()))

        final_url = self._og_search_url(webpage, fatal=False)
        playlist_id = (
            re.match(self._VALID_URL, final_url).group('playlist_id')
            if final_url else None)

        return self.playlist_result(
            playlist_entries, playlist_id=playlist_id,
            playlist_title=self._og_search_title(webpage, fatal=False),
            playlist_description=self._og_search_description(webpage))

    def _talk_info(self, url, video_name):
        webpage = self._download_webpage(url, video_name)

        info = self._extract_info(webpage)

        data = try_get(info, lambda x: x['__INITIAL_DATA__'], dict) or info
        talk_info = data['talks'][0]

        title = talk_info['title'].strip()

        downloads = talk_info.get('downloads') or {}
        native_downloads = downloads.get('nativeDownloads') or talk_info.get('nativeDownloads') or {}

        formats = [{
            'url': format_url,
            'format_id': format_id,
        } for (format_id, format_url) in native_downloads.items() if format_url is not None]

        subtitled_downloads = downloads.get('subtitledDownloads') or {}
        for lang, subtitled_download in subtitled_downloads.items():
            for q in self._NATIVE_FORMATS:
                q_url = subtitled_download.get(q)
                if not q_url:
                    continue
                formats.append({
                    'url': q_url,
                    'format_id': '%s-%s' % (q, lang),
                    'language': lang,
                })

        if formats:
            for f in formats:
                finfo = self._NATIVE_FORMATS.get(f['format_id'].split('-')[0])
                if finfo:
                    f.update(finfo)

        player_talk = talk_info['player_talks'][0]

        external = player_talk.get('external')
        if isinstance(external, dict):
            service = external.get('service')
            if isinstance(service, compat_str):
                ext_url = None
                if service.lower() == 'youtube':
                    ext_url = external.get('code')

                return self.url_result(ext_url or external['uri'])

        resources_ = player_talk.get('resources') or talk_info.get('resources')

        http_url = None
        for format_id, resources in resources_.items():
            if format_id == 'hls':
                if not isinstance(resources, dict):
                    continue
                stream_url = url_or_none(resources.get('stream'))
                if not stream_url:
                    continue
                formats.extend(self._extract_m3u8_formats(
                    stream_url, video_name, 'mp4', m3u8_id=format_id,
                    fatal=False))
            else:
                if not isinstance(resources, list):
                    continue
                if format_id == 'h264':
                    for resource in resources:
                        h264_url = resource.get('file')
                        if not h264_url:
                            continue
                        bitrate = int_or_none(resource.get('bitrate'))
                        formats.append({
                            'url': h264_url,
                            'format_id': '%s-%sk' % (format_id, bitrate),
                            'tbr': bitrate,
                        })
                        if re.search(r'\d+k', h264_url):
                            http_url = h264_url
                elif format_id == 'rtmp':
                    streamer = talk_info.get('streamer')
                    if not streamer:
                        continue
                    for resource in resources:
                        formats.append({
                            'format_id': '%s-%s' % (format_id, resource.get('name')),
                            'url': streamer,
                            'play_path': resource['file'],
                            'ext': 'flv',
                            'width': int_or_none(resource.get('width')),
                            'height': int_or_none(resource.get('height')),
                            'tbr': int_or_none(resource.get('bitrate')),
                        })

        m3u8_formats = list(filter(
            lambda f: f.get('protocol') == 'm3u8' and f.get('vcodec') != 'none',
            formats))
        if http_url:
            for m3u8_format in m3u8_formats:
                bitrate = self._search_regex(r'(\d+k)', m3u8_format['url'], 'bitrate', default=None)
                if not bitrate:
                    continue
                bitrate_url = re.sub(r'\d+k', bitrate, http_url)
                if not self._is_valid_url(
                        bitrate_url, video_name, '%s bitrate' % bitrate):
                    continue
                f = m3u8_format.copy()
                f.update({
                    'url': bitrate_url,
                    'format_id': m3u8_format['format_id'].replace('hls', 'http'),
                    'protocol': 'http',
                })
                if f.get('acodec') == 'none':
                    del f['acodec']
                formats.append(f)

        audio_download = talk_info.get('audioDownload')
        if audio_download:
            formats.append({
                'url': audio_download,
                'format_id': 'audio',
                'vcodec': 'none',
            })

        self._sort_formats(formats)

        video_id = compat_str(talk_info['id'])

        return {
            'id': video_id,
            'title': title,
            'uploader': player_talk.get('speaker') or talk_info.get('speaker'),
            'thumbnail': player_talk.get('thumb') or talk_info.get('thumb'),
            'description': self._og_search_description(webpage),
            'subtitles': self._get_subtitles(video_id, talk_info),
            'formats': formats,
            'duration': float_or_none(talk_info.get('duration')),
            'view_count': int_or_none(data.get('viewed_count')),
            'comment_count': int_or_none(
                try_get(data, lambda x: x['comments']['count'])),
            'tags': try_get(talk_info, lambda x: x['tags'], list),
        }

    def _get_subtitles(self, video_id, talk_info):
        sub_lang_list = {}
        for language in try_get(
                talk_info,
                (lambda x: x['downloads']['languages'],
                 lambda x: x['languages']), list):
            lang_code = language.get('languageCode') or language.get('ianaCode')
            if not lang_code:
                continue
            sub_lang_list[lang_code] = [
                {
                    'url': 'http://www.ted.com/talks/subtitles/id/%s/lang/%s/format/%s' % (video_id, lang_code, ext),
                    'ext': ext,
                }
                for ext in ['ted', 'srt']
            ]
        return sub_lang_list

    def _watch_info(self, url, name):
        webpage = self._download_webpage(url, name)

        config_json = self._html_search_regex(
            r'"pages\.jwplayer"\s*,\s*({.+?})\s*\)\s*</script>',
            webpage, 'config', default=None)
        if not config_json:
            embed_url = self._search_regex(
                r"<iframe[^>]+class='pages-video-embed__video__object'[^>]+src='([^']+)'", webpage, 'embed url')
            return self.url_result(self._proto_relative_url(embed_url))
        config = json.loads(config_json)['config']
        video_url = config['video']['url']
        thumbnail = config.get('image', {}).get('url')

        title = self._html_search_regex(
            r"(?s)<h1(?:\s+class='[^']+')?>(.+?)</h1>", webpage, 'title')
        description = self._html_search_regex(
            [
                r'(?s)<h4 class="[^"]+" id="h3--about-this-talk">.*?</h4>(.*?)</div>',
                r'(?s)<p><strong>About this talk:</strong>\s+(.*?)</p>',
            ],
            webpage, 'description', fatal=False)

        return {
            'id': name,
            'url': video_url,
            'title': title,
            'thumbnail': thumbnail,
            'description': description,
        }
# coding: utf-8
from __future__ import unicode_literals

from .common import InfoExtractor
from .youtube import YoutubeIE
from ..utils import (
    js_to_json,
    qualities,
    determine_ext,
)


class Tele13IE(InfoExtractor):
    _VALID_URL = r'^https?://(?:www\.)?t13\.cl/videos(?:/[^/]+)+/(?P<id>[\w-]+)'
    _TESTS = [
        {
            'url': 'http://www.t13.cl/videos/actualidad/el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
            'md5': '4cb1fa38adcad8fea88487a078831755',
            'info_dict': {
                'id': 'el-circulo-de-hierro-de-michelle-bachelet-en-su-regreso-a-la-moneda',
                'ext': 'mp4',
                'title': 'El círculo de hierro de Michelle Bachelet en su regreso a La Moneda',
            },
            'params': {
                # HTTP Error 404: Not Found
                'skip_download': True,
            },
        },
        {
            'url': 'http://www.t13.cl/videos/mundo/tendencias/video-captan-misteriosa-bola-fuego-cielos-bangkok',
            'md5': '867adf6a3b3fef932c68a71d70b70946',
            'info_dict': {
                'id': 'rOoKv2OMpOw',
                'ext': 'mp4',
                'title': 'Shooting star seen on 7-Sep-2015',
                'description': 'md5:7292ff2a34b2f673da77da222ae77e1e',
                'uploader': 'Porjai Jaturongkhakun',
                'upload_date': '20150906',
                'uploader_id': 'UCnLY_3ezwNcDSC_Wc6suZxw',
            },
            'add_ie': ['Youtube'],
        }
    ]

    def _real_extract(self, url):
        display_id = self._match_id(url)
        webpage = self._download_webpage(url, display_id)

        setup_js = self._search_regex(
            r"(?s)jwplayer\('player-vivo'\).setup\((\{.*?\})\)",
            webpage, 'setup code')
        sources = self._parse_json(self._search_regex(
            r'sources\s*:\s*(\[[^\]]+\])', setup_js, 'sources'),
            display_id, js_to_json)

        preference = qualities(['Móvil', 'SD', 'HD'])
        formats = []
        urls = []
        for f in sources:
            format_url = f['file']
            if format_url and format_url not in urls:
                ext = determine_ext(format_url)
                if ext == 'm3u8':
                    formats.extend(self._extract_m3u8_formats(
                        format_url, display_id, 'mp4', 'm3u8_native',
                        m3u8_id='hls', fatal=False))
                elif YoutubeIE.suitable(format_url):
                    return self.url_result(format_url, 'Youtube')
                else:
                    formats.append({
                        'url': format_url,
                        'format_id': f.get('label'),
                        'preference': preference(f.get('label')),
                        'ext': ext,
                    })
                urls.append(format_url)
        self._sort_formats(formats)

        return {
            'id': display_id,
            'title': self._search_regex(
                r'title\s*:\s*"([^"]+)"', setup_js, 'title'),
            'description': self._html_search_meta(
                'description', webpage, 'description'),
            'thumbnail': self._search_regex(
                r'image\s*:\s*"([^"]+)"', setup_js, 'thumbnail', default=None),
            'formats': formats,
        }
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor
from .jwplatform import JWPlatformIE
from .nexx import NexxIE
from ..compat import compat_urlparse
from ..utils import (
    NO_DEFAULT,
    smuggle_url,
)


class Tele5IE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?tele5\.de/(?:[^/]+/)*(?P<id>[^/?#&]+)'
    _GEO_COUNTRIES = ['DE']
    _TESTS = [{
        'url': 'https://www.tele5.de/mediathek/filme-online/videos?vid=1549416',
        'info_dict': {
            'id': '1549416',
            'ext': 'mp4',
            'upload_date': '20180814',
            'timestamp': 1534290623,
            'title': 'Pandorum',
        },
        'params': {
            'skip_download': True,
        },
    }, {
        # jwplatform, nexx unavailable
        'url': 'https://www.tele5.de/filme/ghoul-das-geheimnis-des-friedhofmonsters/',
        'info_dict': {
            'id': 'WJuiOlUp',
            'ext': 'mp4',
            'upload_date': '20200603',
            'timestamp': 1591214400,
            'title': 'Ghoul - Das Geheimnis des Friedhofmonsters',
            'description': 'md5:42002af1d887ff3d5b2b3ca1f8137d97',
        },
        'params': {
            'skip_download': True,
        },
        'add_ie': [JWPlatformIE.ie_key()],
    }, {
        'url': 'https://www.tele5.de/kalkofes-mattscheibe/video-clips/politik-und-gesellschaft?ve_id=1551191',
        'only_matching': True,
    }, {
        'url': 'https://www.tele5.de/video-clip/?ve_id=1609440',
        'only_matching': True,
    }, {
        'url': 'https://www.tele5.de/filme/schlefaz-dragon-crusaders/',
        'only_matching': True,
    }, {
        'url': 'https://www.tele5.de/filme/making-of/avengers-endgame/',
        'only_matching': True,
    }, {
        'url': 'https://www.tele5.de/star-trek/raumschiff-voyager/ganze-folge/das-vinculum/',
        'only_matching': True,
    }, {
        'url': 'https://www.tele5.de/anders-ist-sevda/',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        qs = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
        video_id = (qs.get('vid') or qs.get('ve_id') or [None])[0]

        NEXX_ID_RE = r'\d{6,}'
        JWPLATFORM_ID_RE = r'[a-zA-Z0-9]{8}'

        def nexx_result(nexx_id):
            return self.url_result(
                'https://api.nexx.cloud/v3/759/videos/byid/%s' % nexx_id,
                ie=NexxIE.ie_key(), video_id=nexx_id)

        nexx_id = jwplatform_id = None

        if video_id:
            if re.match(NEXX_ID_RE, video_id):
                return nexx_result(video_id)
            elif re.match(JWPLATFORM_ID_RE, video_id):
                jwplatform_id = video_id

        if not nexx_id:
            display_id = self._match_id(url)
            webpage = self._download_webpage(url, display_id)

            def extract_id(pattern, name, default=NO_DEFAULT):
                return self._html_search_regex(
                    (r'id\s*=\s*["\']video-player["\'][^>]+data-id\s*=\s*["\'](%s)' % pattern,
                     r'\s+id\s*=\s*["\']player_(%s)' % pattern,
                     r'\bdata-id\s*=\s*["\'](%s)' % pattern), webpage, name,
                    default=default)

            nexx_id = extract_id(NEXX_ID_RE, 'nexx id', default=None)
            if nexx_id:
                return nexx_result(nexx_id)

            if not jwplatform_id:
                jwplatform_id = extract_id(JWPLATFORM_ID_RE, 'jwplatform id')

        return self.url_result(
            smuggle_url(
                'jwplatform:%s' % jwplatform_id,
                {'geo_countries': self._GEO_COUNTRIES}),
            ie=JWPlatformIE.ie_key(), video_id=jwplatform_id)
# coding: utf-8
from __future__ import unicode_literals

import re

from .common import InfoExtractor


class TeleBruxellesIE(InfoExtractor):
    _VALID_URL = r'https?://(?:www\.)?(?:telebruxelles|bx1)\.be/(?:[^/]+/)*(?P<id>[^/#?]+)'
    _TESTS = [{
        'url': 'http://bx1.be/news/que-risque-lauteur-dune-fausse-alerte-a-la-bombe/',
        'md5': 'a2a67a5b1c3e8c9d33109b902f474fd9',
        'info_dict': {
            'id': '158856',
            'display_id': 'que-risque-lauteur-dune-fausse-alerte-a-la-bombe',
            'ext': 'mp4',
            'title': 'Que risque l’auteur d’une fausse alerte à la bombe ?',
            'description': 'md5:3cf8df235d44ebc5426373050840e466',
        },
    }, {
        'url': 'http://bx1.be/sport/futsal-schaerbeek-sincline-5-3-a-thulin/',
        'md5': 'dfe07ecc9c153ceba8582ac912687675',
        'info_dict': {
            'id': '158433',
            'display_id': 'futsal-schaerbeek-sincline-5-3-a-thulin',
            'ext': 'mp4',
            'title': 'Futsal : Schaerbeek s’incline 5-3 à Thulin',
            'description': 'md5:fd013f1488d5e2dceb9cebe39e2d569b',
        },
    }, {
        'url': 'http://bx1.be/emission/bxenf1-gastronomie/',
        'only_matching': True,
    }, {
        'url': 'https://bx1.be/berchem-sainte-agathe/personnel-carrefour-de-berchem-sainte-agathe-inquiet/',
        'only_matching': True,
    }, {
        'url': 'https://bx1.be/dernier-jt/',
        'only_matching': True,
    }, {
        # live stream
        'url': 'https://bx1.be/lives/direct-tv/',
        'only_matching': True,
    }]

    def _real_extract(self, url):
        display_id = self._match_id(url)
        webpage = self._download_webpage(url, display_id)

        article_id = self._html_search_regex(
            r'<article[^>]+\bid=["\']post-(\d+)', webpage, 'article ID', default=None)
        title = self._html_search_regex(
            r'<h1[^>]*>(.+?)</h1>', webpage, 'title',
            default=None) or self._og_search_title(webpage)
        description = self._og_search_description(webpage, default=None)

        rtmp_url = self._html_search_regex(
            r'file["\']?\s*:\s*"(r(?:tm|mt)ps?://[^/]+/(?:vod/mp4:"\s*\+\s*"[^"]+"\s*\+\s*"\.mp4|stream/live))"',
            webpage, 'RTMP url')
        # Yes, they have a typo in scheme name for live stream URLs (e.g.
        # https://bx1.be/lives/direct-tv/)
        rtmp_url = re.sub(r'^rmtp', 'rtmp', rtmp_url)
        rtmp_url = re.sub(r'"\s*\+\s*"', '', rtmp_url)
        formats = self._extract_wowza_formats(rtmp_url, article_id or display_id)
        self._sort_formats(formats)

        is_live = 'stream/live' in rtmp_url

        return {
            'id': article_id or display_id,
            'display_id': display_id,
            'title': self._live_title(title) if is_live else title,
            'description': description,
            'formats': formats,
            'is_live': is_live,
        }
# coding: utf-8
from __future__ import unicode_literals

import json
import re

from .common import InfoExtractor
from .ooyala import OoyalaIE
from ..utils import (
    clean_html,
    determine_ext,
    int_or_none,
    str_or_none,
    try_get,
    urljoin,
)


class TelecincoIE(InfoExtractor):
    IE_DESC = 'telecinco.es, cuatro.com and mediaset.es'
    _VALID_URL = r'https?://(?:www\.)?(?:telecinco\.es|cuatro\.com|mediaset\.es)/(?:[^/]+/)+(?P<id>.+?)\.html'

    _TESTS = [{
        'url': 'http://www.telecinco.es/robinfood/temporada-01/t01xp14/Bacalao-cocochas-pil-pil_0_1876350223.html',
        'info_dict': {
            'id': '1876350223',
            'title': 'Bacalao con kokotxas al pil-pil',
            'description': 'md5:716caf5601e25c3c5ab6605b1ae71529',
        },
        'playlist': [{
            'md5': 'adb28c37238b675dad0f042292f209a7',
            'info_dict': {
                'id': 'JEA5ijCnF6p5W08A1rNKn7',
                'ext': 'mp4',
                'title': 'Con Martín Berasategui, hacer un bacalao al pil-pil es fácil y divertido',
                'duration': 662,
            },
        }]
    }, {
        'url': 'http://www.cuatro.com/deportes/futbol/barcelona/Leo_Messi-Champions-Roma_2_2052780128.html',
        'md5': '9468140ebc300fbb8b9d65dc6e5c4b43',
        'info_dict': {
            'id': 'jn24Od1zGLG4XUZcnUnZB6',
            'ext': 'mp4',
            'title': '¿Quién es este ex futbolista con el que hablan Leo Messi y Luis Suárez?',
            'description': 'md5:a62ecb5f1934fc787107d7b9a2262805',
            'duration': 79,
        },
    }, {
        'url': 'http://www.mediaset.es/12meses/campanas/doylacara/conlatratanohaytrato/Ayudame-dar-cara-trata-trato_2_1986630220.html',
        'md5': 'ae2dc6b7b50b2392076a51c0f70e01f6',
        'info_dict': {
            'id': 'aywerkD2Sv1vGNqq9b85Q2',
            'ext': 'mp4',
            'title': '#DOYLACARA. Con la trata no hay trato',
            'description': 'md5:2771356ff7bfad9179c5f5cd954f1477',
            'duration': 50,
        },
    }, {
        # video in opening's content
        'url': 'https://www.telecinco.es/vivalavida/fiorella-sobrina-edmundo-arrocet-entrevista_18_2907195140.html',
        'info_dict': {
            'id': '2907195140',
            'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"',
            'description': 'md5:73f340a7320143d37ab895375b2bf13a',
        },
        'playlist': [{
            'md5': 'adb28c37238b675dad0f042292f209a7',
            'info_dict': {
                'id': 'TpI2EttSDAReWpJ1o0NVh2',
                'ext': 'mp4',
                'title': 'La surrealista entrevista a la sobrina de Edmundo Arrocet: "No puedes venir aquí y tomarnos por tontos"',
                'duration': 1015,
            },
        }],
        'params': {
            'skip_download': True,
        },
    }, {
        'url': 'http://www.telecinco.es/informativos/nacional/Pablo_Iglesias-Informativos_Telecinco-entrevista-Pedro_Piqueras_2_1945155182.html',
        'only_matching': True,
    }, {
        'url': 'http://www.telecinco.es/espanasinirmaslejos/Espana-gran-destino-turistico_2_1240605043.html',
        'only_matching': True,
    }, {
        # ooyala video
        'url': 'http://www.cuatro.com/chesterinlove/a-carta/chester-chester_in_love-chester_edu_2_2331030022.html',
        'only_matching': True,
    }]

    def _parse_content(self, content, url):
        video_id = content['dataMediaId']
        if content.get('dataCmsId') ==