aboutsummaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rwxr-xr-xyoutube_dl/YoutubeDL.py6
-rwxr-xr-xyoutube_dl/__main__.py2
-rw-r--r--youtube_dl/compat.py14
-rw-r--r--youtube_dl/downloader/http.py4
-rw-r--r--youtube_dl/extractor/aenetworks.py2
-rw-r--r--youtube_dl/extractor/bbc.py2
-rw-r--r--youtube_dl/extractor/ccc.py2
-rw-r--r--youtube_dl/extractor/ceskatelevize.py8
-rw-r--r--youtube_dl/extractor/cnn.py12
-rw-r--r--youtube_dl/extractor/collegerama.py6
-rw-r--r--youtube_dl/extractor/comedycentral.py2
-rw-r--r--youtube_dl/extractor/common.py8
-rw-r--r--youtube_dl/extractor/crunchyroll.py74
-rw-r--r--youtube_dl/extractor/drbonanza.py10
-rw-r--r--youtube_dl/extractor/eighttracks.py108
-rw-r--r--youtube_dl/extractor/ellentv.py2
-rw-r--r--youtube_dl/extractor/everyonesmixtape.py12
-rw-r--r--youtube_dl/extractor/exfm.py2
-rw-r--r--youtube_dl/extractor/fc2.py2
-rw-r--r--youtube_dl/extractor/franceinter.py2
-rw-r--r--youtube_dl/extractor/freevideo.py4
-rw-r--r--youtube_dl/extractor/hentaistigma.py4
-rw-r--r--youtube_dl/extractor/kankan.py2
-rw-r--r--youtube_dl/extractor/liveleak.py2
-rw-r--r--youtube_dl/extractor/mofosex.py2
-rw-r--r--youtube_dl/extractor/myspass.py4
-rw-r--r--youtube_dl/extractor/nerdcubed.py4
-rw-r--r--youtube_dl/extractor/pornhub.py8
-rw-r--r--youtube_dl/extractor/pornovoisines.py2
-rw-r--r--youtube_dl/extractor/radiobremen.py12
-rw-r--r--youtube_dl/extractor/radiofrance.py6
-rw-r--r--youtube_dl/extractor/rbmaradio.py10
-rw-r--r--youtube_dl/extractor/reverbnation.py12
-rw-r--r--youtube_dl/extractor/ringtv.py14
-rw-r--r--youtube_dl/extractor/rte.py2
-rw-r--r--youtube_dl/extractor/rtl2.py2
-rw-r--r--youtube_dl/extractor/screenwavemedia.py2
-rw-r--r--youtube_dl/extractor/senateisvp.py62
-rw-r--r--youtube_dl/extractor/slutload.py4
-rw-r--r--youtube_dl/extractor/snotr.py2
-rw-r--r--youtube_dl/extractor/soundcloud.py2
-rw-r--r--youtube_dl/extractor/steam.py16
-rw-r--r--youtube_dl/extractor/tenplay.py8
-rw-r--r--youtube_dl/extractor/thesixtyone.py32
-rw-r--r--youtube_dl/extractor/traileraddict.py8
-rw-r--r--youtube_dl/extractor/tudou.py2
-rw-r--r--youtube_dl/extractor/vbox7.py2
-rw-r--r--youtube_dl/extractor/videopremium.py10
-rw-r--r--youtube_dl/extractor/vimeo.py18
-rw-r--r--youtube_dl/extractor/vine.py6
-rw-r--r--youtube_dl/extractor/worldstarhiphop.py14
-rw-r--r--youtube_dl/extractor/youjizz.py4
-rw-r--r--youtube_dl/extractor/youku.py2
-rw-r--r--youtube_dl/options.py2
-rw-r--r--youtube_dl/postprocessor/execafterdownload.py2
-rw-r--r--youtube_dl/postprocessor/metadatafromtitle.py2
-rw-r--r--youtube_dl/postprocessor/xattrpp.py10
-rw-r--r--youtube_dl/update.py8
-rw-r--r--youtube_dl/utils.py16
59 files changed, 302 insertions, 302 deletions
diff --git a/youtube_dl/YoutubeDL.py b/youtube_dl/YoutubeDL.py
index d34f77a6d..f4324039c 100755
--- a/youtube_dl/YoutubeDL.py
+++ b/youtube_dl/YoutubeDL.py
@@ -782,7 +782,7 @@ class YoutubeDL(object):
entries = ie_entries[playliststart:playlistend]
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Collected %d video ids (downloading %d of them)" %
+ '[%s] playlist %s: Collected %d video ids (downloading %d of them)' %
(ie_result['extractor'], playlist, n_all_entries, n_entries))
elif isinstance(ie_entries, PagedList):
if playlistitems:
@@ -796,7 +796,7 @@ class YoutubeDL(object):
playliststart, playlistend)
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Downloading %d videos" %
+ '[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
else: # iterable
if playlistitems:
@@ -807,7 +807,7 @@ class YoutubeDL(object):
ie_entries, playliststart, playlistend))
n_entries = len(entries)
self.to_screen(
- "[%s] playlist %s: Downloading %d videos" %
+ '[%s] playlist %s: Downloading %d videos' %
(ie_result['extractor'], playlist, n_entries))
if self.params.get('playlistreverse', False):
diff --git a/youtube_dl/__main__.py b/youtube_dl/__main__.py
index 42a0f8c6f..138f5fbec 100755
--- a/youtube_dl/__main__.py
+++ b/youtube_dl/__main__.py
@@ -7,7 +7,7 @@ from __future__ import unicode_literals
import sys
-if __package__ is None and not hasattr(sys, "frozen"):
+if __package__ is None and not hasattr(sys, 'frozen'):
# direct call of __main__.py
import os.path
path = os.path.realpath(os.path.abspath(__file__))
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index 8ab688001..b497da696 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -181,20 +181,20 @@ except ImportError: # Python < 3.4
# parameter := attribute "=" value
url = req.get_full_url()
- scheme, data = url.split(":", 1)
- mediatype, data = data.split(",", 1)
+ scheme, data = url.split(':', 1)
+ mediatype, data = data.split(',', 1)
# even base64 encoded data URLs might be quoted so unquote in any case:
data = compat_urllib_parse_unquote_to_bytes(data)
- if mediatype.endswith(";base64"):
+ if mediatype.endswith(';base64'):
data = binascii.a2b_base64(data)
mediatype = mediatype[:-7]
if not mediatype:
- mediatype = "text/plain;charset=US-ASCII"
+ mediatype = 'text/plain;charset=US-ASCII'
headers = email.message_from_string(
- "Content-type: %s\nContent-length: %d\n" % (mediatype, len(data)))
+ 'Content-type: %s\nContent-length: %d\n' % (mediatype, len(data)))
return compat_urllib_response.addinfourl(io.BytesIO(data), headers, url)
@@ -268,7 +268,7 @@ except ImportError: # Python 2
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
- raise ValueError("bad query field: %r" % (name_value,))
+ raise ValueError('bad query field: %r' % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
@@ -466,7 +466,7 @@ if sys.version_info < (2, 7):
if err is not None:
raise err
else:
- raise socket.error("getaddrinfo returns an empty list")
+ raise socket.error('getaddrinfo returns an empty list')
else:
compat_socket_create_connection = socket.create_connection
diff --git a/youtube_dl/downloader/http.py b/youtube_dl/downloader/http.py
index 7089983ce..f8b69d186 100644
--- a/youtube_dl/downloader/http.py
+++ b/youtube_dl/downloader/http.py
@@ -140,8 +140,8 @@ class HttpFD(FileDownloader):
if data_len is not None:
data_len = int(data_len) + resume_len
- min_data_len = self.params.get("min_filesize")
- max_data_len = self.params.get("max_filesize")
+ min_data_len = self.params.get('min_filesize')
+ max_data_len = self.params.get('max_filesize')
if min_data_len is not None and data_len < min_data_len:
self.to_screen('\r[download] File is smaller than min-filesize (%s bytes < %s bytes). Aborting.' % (data_len, min_data_len))
return False
diff --git a/youtube_dl/extractor/aenetworks.py b/youtube_dl/extractor/aenetworks.py
index 43d7b0523..6018ae79a 100644
--- a/youtube_dl/extractor/aenetworks.py
+++ b/youtube_dl/extractor/aenetworks.py
@@ -28,7 +28,7 @@ class AENetworksIE(InfoExtractor):
'info_dict': {
'id': 'eg47EERs_JsZ',
'ext': 'mp4',
- 'title': "Winter Is Coming",
+ 'title': 'Winter Is Coming',
'description': 'md5:641f424b7a19d8e24f26dea22cf59d74',
},
'params': {
diff --git a/youtube_dl/extractor/bbc.py b/youtube_dl/extractor/bbc.py
index 6ddee686c..9d0dfb961 100644
--- a/youtube_dl/extractor/bbc.py
+++ b/youtube_dl/extractor/bbc.py
@@ -86,7 +86,7 @@ class BBCCoUkIE(InfoExtractor):
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
- 'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
+ 'description': 'Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.',
'duration': 5100,
},
'params': {
diff --git a/youtube_dl/extractor/ccc.py b/youtube_dl/extractor/ccc.py
index e94b1e35b..dda2c0959 100644
--- a/youtube_dl/extractor/ccc.py
+++ b/youtube_dl/extractor/ccc.py
@@ -45,7 +45,7 @@ class CCCIE(InfoExtractor):
title = self._html_search_regex(
r'(?s)<h1>(.*?)</h1>', webpage, 'title')
description = self._html_search_regex(
- r"(?s)<h3>About</h3>(.+?)<h3>",
+ r'(?s)<h3>About</h3>(.+?)<h3>',
webpage, 'description', fatal=False)
upload_date = unified_strdate(self._html_search_regex(
r"(?s)<span[^>]+class='[^']*fa-calendar-o'[^>]*>(.+?)</span>",
diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py
index 6f7b2a70d..b27b4e670 100644
--- a/youtube_dl/extractor/ceskatelevize.py
+++ b/youtube_dl/extractor/ceskatelevize.py
@@ -177,16 +177,16 @@ class CeskaTelevizeIE(InfoExtractor):
for divider in [1000, 60, 60, 100]:
components.append(msec % divider)
msec //= divider
- return "{3:02}:{2:02}:{1:02},{0:03}".format(*components)
+ return '{3:02}:{2:02}:{1:02},{0:03}'.format(*components)
def _fix_subtitle(subtitle):
for line in subtitle.splitlines():
- m = re.match(r"^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$", line)
+ m = re.match(r'^\s*([0-9]+);\s*([0-9]+)\s+([0-9]+)\s*$', line)
if m:
yield m.group(1)
start, stop = (_msectotimecode(int(t)) for t in m.groups()[1:])
- yield "{0} --> {1}".format(start, stop)
+ yield '{0} --> {1}'.format(start, stop)
else:
yield line
- return "\r\n".join(_fix_subtitle(subtitles))
+ return '\r\n'.join(_fix_subtitle(subtitles))
diff --git a/youtube_dl/extractor/cnn.py b/youtube_dl/extractor/cnn.py
index 3b1bd4033..53489a14e 100644
--- a/youtube_dl/extractor/cnn.py
+++ b/youtube_dl/extractor/cnn.py
@@ -26,14 +26,14 @@ class CNNIE(InfoExtractor):
'upload_date': '20130609',
},
}, {
- "url": "http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29",
- "md5": "b5cc60c60a3477d185af8f19a2a26f4e",
- "info_dict": {
+ 'url': 'http://edition.cnn.com/video/?/video/us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology&utm_source=feedburner&utm_medium=feed&utm_campaign=Feed%3A+rss%2Fcnn_topstories+%28RSS%3A+Top+Stories%29',
+ 'md5': 'b5cc60c60a3477d185af8f19a2a26f4e',
+ 'info_dict': {
'id': 'us/2013/08/21/sot-student-gives-epic-speech.georgia-institute-of-technology',
'ext': 'mp4',
- "title": "Student's epic speech stuns new freshmen",
- "description": "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
- "upload_date": "20130821",
+ 'title': "Student's epic speech stuns new freshmen",
+ 'description': "A Georgia Tech student welcomes the incoming freshmen with an epic speech backed by music from \"2001: A Space Odyssey.\"",
+ 'upload_date': '20130821',
}
}, {
'url': 'http://www.cnn.com/video/data/2.0/video/living/2014/12/22/growing-america-nashville-salemtown-board-episode-1.hln.html',
diff --git a/youtube_dl/extractor/collegerama.py b/youtube_dl/extractor/collegerama.py
index 40667a0f1..f9e84193d 100644
--- a/youtube_dl/extractor/collegerama.py
+++ b/youtube_dl/extractor/collegerama.py
@@ -46,9 +46,9 @@ class CollegeRamaIE(InfoExtractor):
video_id = self._match_id(url)
player_options_request = {
- "getPlayerOptionsRequest": {
- "ResourceId": video_id,
- "QueryString": "",
+ 'getPlayerOptionsRequest': {
+ 'ResourceId': video_id,
+ 'QueryString': '',
}
}
diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py
index 055c9eec5..5b1b99675 100644
--- a/youtube_dl/extractor/comedycentral.py
+++ b/youtube_dl/extractor/comedycentral.py
@@ -195,7 +195,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
if len(altMovieParams) == 0:
raise ExtractorError('unable to find Flash URL in webpage ' + url)
else:
- mMovieParams = [("http://media.mtvnservices.com/" + altMovieParams[0], altMovieParams[0])]
+ mMovieParams = [('http://media.mtvnservices.com/' + altMovieParams[0], altMovieParams[0])]
uri = mMovieParams[0][1]
# Correct cc.com in uri
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 144d8c6b6..f411ea763 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -1497,7 +1497,7 @@ class InfoExtractor(object):
def _live_title(self, name):
""" Generate the title for a live video """
now = datetime.datetime.now()
- now_str = now.strftime("%Y-%m-%d %H:%M")
+ now_str = now.strftime('%Y-%m-%d %H:%M')
return name + ' ' + now_str
def _int(self, v, name, fatal=False, **kwargs):
@@ -1570,7 +1570,7 @@ class InfoExtractor(object):
return {}
def _get_subtitles(self, *args, **kwargs):
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
@staticmethod
def _merge_subtitle_items(subtitle_list1, subtitle_list2):
@@ -1596,7 +1596,7 @@ class InfoExtractor(object):
return {}
def _get_automatic_captions(self, *args, **kwargs):
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
class SearchInfoExtractor(InfoExtractor):
@@ -1636,7 +1636,7 @@ class SearchInfoExtractor(InfoExtractor):
def _get_n_results(self, query, n):
"""Get a specified number of results for a query"""
- raise NotImplementedError("This method must be implemented by subclasses")
+ raise NotImplementedError('This method must be implemented by subclasses')
@property
def SEARCH_KEY(self):
diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index 785594df8..c7032ffa2 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -180,40 +180,40 @@ class CrunchyrollIE(CrunchyrollBaseIE):
return assvalue
output = '[Script Info]\n'
- output += 'Title: %s\n' % sub_root.attrib["title"]
+ output += 'Title: %s\n' % sub_root.attrib['title']
output += 'ScriptType: v4.00+\n'
- output += 'WrapStyle: %s\n' % sub_root.attrib["wrap_style"]
- output += 'PlayResX: %s\n' % sub_root.attrib["play_res_x"]
- output += 'PlayResY: %s\n' % sub_root.attrib["play_res_y"]
+ output += 'WrapStyle: %s\n' % sub_root.attrib['wrap_style']
+ output += 'PlayResX: %s\n' % sub_root.attrib['play_res_x']
+ output += 'PlayResY: %s\n' % sub_root.attrib['play_res_y']
output += """ScaledBorderAndShadow: yes
[V4+ Styles]
Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding
"""
for style in sub_root.findall('./styles/style'):
- output += 'Style: ' + style.attrib["name"]
- output += ',' + style.attrib["font_name"]
- output += ',' + style.attrib["font_size"]
- output += ',' + style.attrib["primary_colour"]
- output += ',' + style.attrib["secondary_colour"]
- output += ',' + style.attrib["outline_colour"]
- output += ',' + style.attrib["back_colour"]
- output += ',' + ass_bool(style.attrib["bold"])
- output += ',' + ass_bool(style.attrib["italic"])
- output += ',' + ass_bool(style.attrib["underline"])
- output += ',' + ass_bool(style.attrib["strikeout"])
- output += ',' + style.attrib["scale_x"]
- output += ',' + style.attrib["scale_y"]
- output += ',' + style.attrib["spacing"]
- output += ',' + style.attrib["angle"]
- output += ',' + style.attrib["border_style"]
- output += ',' + style.attrib["outline"]
- output += ',' + style.attrib["shadow"]
- output += ',' + style.attrib["alignment"]
- output += ',' + style.attrib["margin_l"]
- output += ',' + style.attrib["margin_r"]
- output += ',' + style.attrib["margin_v"]
- output += ',' + style.attrib["encoding"]
+ output += 'Style: ' + style.attrib['name']
+ output += ',' + style.attrib['font_name']
+ output += ',' + style.attrib['font_size']
+ output += ',' + style.attrib['primary_colour']
+ output += ',' + style.attrib['secondary_colour']
+ output += ',' + style.attrib['outline_colour']
+ output += ',' + style.attrib['back_colour']
+ output += ',' + ass_bool(style.attrib['bold'])
+ output += ',' + ass_bool(style.attrib['italic'])
+ output += ',' + ass_bool(style.attrib['underline'])
+ output += ',' + ass_bool(style.attrib['strikeout'])
+ output += ',' + style.attrib['scale_x']
+ output += ',' + style.attrib['scale_y']
+ output += ',' + style.attrib['spacing']
+ output += ',' + style.attrib['angle']
+ output += ',' + style.attrib['border_style']
+ output += ',' + style.attrib['outline']
+ output += ',' + style.attrib['shadow']
+ output += ',' + style.attrib['alignment']
+ output += ',' + style.attrib['margin_l']
+ output += ',' + style.attrib['margin_r']
+ output += ',' + style.attrib['margin_v']
+ output += ',' + style.attrib['encoding']
output += '\n'
output += """
@@ -222,15 +222,15 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
"""
for event in sub_root.findall('./events/event'):
output += 'Dialogue: 0'
- output += ',' + event.attrib["start"]
- output += ',' + event.attrib["end"]
- output += ',' + event.attrib["style"]
- output += ',' + event.attrib["name"]
- output += ',' + event.attrib["margin_l"]
- output += ',' + event.attrib["margin_r"]
- output += ',' + event.attrib["margin_v"]
- output += ',' + event.attrib["effect"]
- output += ',' + event.attrib["text"]
+ output += ',' + event.attrib['start']
+ output += ',' + event.attrib['end']
+ output += ',' + event.attrib['style']
+ output += ',' + event.attrib['name']
+ output += ',' + event.attrib['margin_l']
+ output += ',' + event.attrib['margin_r']
+ output += ',' + event.attrib['margin_v']
+ output += ',' + event.attrib['effect']
+ output += ',' + event.attrib['text']
output += '\n'
return output
@@ -376,7 +376,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
class CrunchyrollShowPlaylistIE(CrunchyrollBaseIE):
- IE_NAME = "crunchyroll:playlist"
+ IE_NAME = 'crunchyroll:playlist'
_VALID_URL = r'https?://(?:(?P<prefix>www|m)\.)?(?P<url>crunchyroll\.com/(?!(?:news|anime-news|library|forum|launchcalendar|lineup|store|comics|freetrial|login))(?P<id>[\w\-]+))/?(?:\?|$)'
_TESTS = [{
diff --git a/youtube_dl/extractor/drbonanza.py b/youtube_dl/extractor/drbonanza.py
index 8b98b013a..01271f8f0 100644
--- a/youtube_dl/extractor/drbonanza.py
+++ b/youtube_dl/extractor/drbonanza.py
@@ -87,7 +87,7 @@ class DRBonanzaIE(InfoExtractor):
formats = []
for file in info['Files']:
- if info['Type'] == "Video":
+ if info['Type'] == 'Video':
if file['Type'] in video_types:
format = parse_filename_info(file['Location'])
format.update({
@@ -101,10 +101,10 @@ class DRBonanzaIE(InfoExtractor):
if '/bonanza/' in rtmp_url:
format['play_path'] = rtmp_url.split('/bonanza/')[1]
formats.append(format)
- elif file['Type'] == "Thumb":
+ elif file['Type'] == 'Thumb':
thumbnail = file['Location']
- elif info['Type'] == "Audio":
- if file['Type'] == "Audio":
+ elif info['Type'] == 'Audio':
+ if file['Type'] == 'Audio':
format = parse_filename_info(file['Location'])
format.update({
'url': file['Location'],
@@ -112,7 +112,7 @@ class DRBonanzaIE(InfoExtractor):
'vcodec': 'none',
})
formats.append(format)
- elif file['Type'] == "Thumb":
+ elif file['Type'] == 'Thumb':
thumbnail = file['Location']
description = '%s\n%s\n%s\n' % (
diff --git a/youtube_dl/extractor/eighttracks.py b/youtube_dl/extractor/eighttracks.py
index 0b61ea0ba..9a44f89f3 100644
--- a/youtube_dl/extractor/eighttracks.py
+++ b/youtube_dl/extractor/eighttracks.py
@@ -17,85 +17,85 @@ class EightTracksIE(InfoExtractor):
IE_NAME = '8tracks'
_VALID_URL = r'https?://8tracks\.com/(?P<user>[^/]+)/(?P<id>[^/#]+)(?:#.*)?$'
_TEST = {
- "name": "EightTracks",
- "url": "http://8tracks.com/ytdl/youtube-dl-test-tracks-a",
- "info_dict": {
+ 'name': 'EightTracks',
+ 'url': 'http://8tracks.com/ytdl/youtube-dl-test-tracks-a',
+ 'info_dict': {
'id': '1336550',
'display_id': 'youtube-dl-test-tracks-a',
- "description": "test chars: \"'/\\ä↭",
- "title": "youtube-dl test tracks \"'/\\ä↭<>",
+ 'description': "test chars: \"'/\\ä↭",
+ 'title': "youtube-dl test tracks \"'/\\ä↭<>",
},
- "playlist": [
+ 'playlist': [
{
- "md5": "96ce57f24389fc8734ce47f4c1abcc55",
- "info_dict": {
- "id": "11885610",
- "ext": "m4a",
- "title": "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '96ce57f24389fc8734ce47f4c1abcc55',
+ 'info_dict': {
+ 'id': '11885610',
+ 'ext': 'm4a',
+ 'title': "youtue-dl project<>\"' - youtube-dl test track 1 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "4ab26f05c1f7291ea460a3920be8021f",
- "info_dict": {
- "id": "11885608",
- "ext": "m4a",
- "title": "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '4ab26f05c1f7291ea460a3920be8021f',
+ 'info_dict': {
+ 'id': '11885608',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project - youtube-dl test track 2 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "d30b5b5f74217410f4689605c35d1fd7",
- "info_dict": {
- "id": "11885679",
- "ext": "m4a",
- "title": "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'd30b5b5f74217410f4689605c35d1fd7',
+ 'info_dict': {
+ 'id': '11885679',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project as well - youtube-dl test track 3 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "4eb0a669317cd725f6bbd336a29f923a",
- "info_dict": {
- "id": "11885680",
- "ext": "m4a",
- "title": "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '4eb0a669317cd725f6bbd336a29f923a',
+ 'info_dict': {
+ 'id': '11885680',
+ 'ext': 'm4a',
+ 'title': "youtube-dl project as well - youtube-dl test track 4 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "1893e872e263a2705558d1d319ad19e8",
- "info_dict": {
- "id": "11885682",
- "ext": "m4a",
- "title": "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '1893e872e263a2705558d1d319ad19e8',
+ 'info_dict': {
+ 'id': '11885682',
+ 'ext': 'm4a',
+ 'title': "PH - youtube-dl test track 5 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "b673c46f47a216ab1741ae8836af5899",
- "info_dict": {
- "id": "11885683",
- "ext": "m4a",
- "title": "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'b673c46f47a216ab1741ae8836af5899',
+ 'info_dict': {
+ 'id': '11885683',
+ 'ext': 'm4a',
+ 'title': "PH - youtube-dl test track 6 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "1d74534e95df54986da7f5abf7d842b7",
- "info_dict": {
- "id": "11885684",
- "ext": "m4a",
- "title": "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': '1d74534e95df54986da7f5abf7d842b7',
+ 'info_dict': {
+ 'id': '11885684',
+ 'ext': 'm4a',
+ 'title': "phihag - youtube-dl test track 7 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
},
{
- "md5": "f081f47af8f6ae782ed131d38b9cd1c0",
- "info_dict": {
- "id": "11885685",
- "ext": "m4a",
- "title": "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
- "uploader_id": "ytdl"
+ 'md5': 'f081f47af8f6ae782ed131d38b9cd1c0',
+ 'info_dict': {
+ 'id': '11885685',
+ 'ext': 'm4a',
+ 'title': "phihag - youtube-dl test track 8 \"'/\\\u00e4\u21ad",
+ 'uploader_id': 'ytdl'
}
}
]
diff --git a/youtube_dl/extractor/ellentv.py b/youtube_dl/extractor/ellentv.py
index 476cce2d0..4c8190d68 100644
--- a/youtube_dl/extractor/ellentv.py
+++ b/youtube_dl/extractor/ellentv.py
@@ -72,7 +72,7 @@ class EllenTVClipsIE(InfoExtractor):
def _extract_playlist(self, webpage):
json_string = self._search_regex(r'playerView.addClips\(\[\{(.*?)\}\]\);', webpage, 'json')
try:
- return json.loads("[{" + json_string + "}]")
+ return json.loads('[{' + json_string + '}]')
except ValueError as ve:
raise ExtractorError('Failed to download JSON', cause=ve)
diff --git a/youtube_dl/extractor/everyonesmixtape.py b/youtube_dl/extractor/everyonesmixtape.py
index 493d38af8..84a9b750e 100644
--- a/youtube_dl/extractor/everyonesmixtape.py
+++ b/youtube_dl/extractor/everyonesmixtape.py
@@ -14,14 +14,14 @@ class EveryonesMixtapeIE(InfoExtractor):
_TESTS = [{
'url': 'http://everyonesmixtape.com/#/mix/m7m0jJAbMQi/5',
- "info_dict": {
+ 'info_dict': {
'id': '5bfseWNmlds',
'ext': 'mp4',
- "title": "Passion Pit - \"Sleepyhead\" (Official Music Video)",
- "uploader": "FKR.TV",
- "uploader_id": "frenchkissrecords",
- "description": "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
- "upload_date": "20081015"
+ 'title': "Passion Pit - \"Sleepyhead\" (Official Music Video)",
+ 'uploader': 'FKR.TV',
+ 'uploader_id': 'frenchkissrecords',
+ 'description': "Music video for \"Sleepyhead\" from Passion Pit's debut EP Chunk Of Change.\nBuy on iTunes: https://itunes.apple.com/us/album/chunk-of-change-ep/id300087641\n\nDirected by The Wilderness.\n\nhttp://www.passionpitmusic.com\nhttp://www.frenchkissrecords.com",
+ 'upload_date': '20081015'
},
'params': {
'skip_download': True, # This is simply YouTube
diff --git a/youtube_dl/extractor/exfm.py b/youtube_dl/extractor/exfm.py
index 4de02aee9..0c0fe6d65 100644
--- a/youtube_dl/extractor/exfm.py
+++ b/youtube_dl/extractor/exfm.py
@@ -41,7 +41,7 @@ class ExfmIE(InfoExtractor):
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
song_id = mobj.group('id')
- info_url = "http://ex.fm/api/v3/song/%s" % song_id
+ info_url = 'http://ex.fm/api/v3/song/%s' % song_id
info = self._download_json(info_url, song_id)['song']
song_url = info['url']
if re.match(self._SOUNDCLOUD_URL, song_url) is not None:
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index 4c81271d3..9580f5c0c 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -87,7 +87,7 @@ class FC2IE(InfoExtractor):
mimi = hashlib.md5((video_id + '_gGddgPfeaf_gzyr').encode('utf-8')).hexdigest()
info_url = (
- "http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&".
+ 'http://video.fc2.com/ginfo.php?mimi={1:s}&href={2:s}&v={0:s}&fversion=WIN%2011%2C6%2C602%2C180&from=2&otag=0&upid={0:s}&tk=null&'.
format(video_id, mimi, compat_urllib_request.quote(refer, safe=b'').replace('.', '%2E')))
info_webpage = self._download_webpage(
diff --git a/youtube_dl/extractor/franceinter.py b/youtube_dl/extractor/franceinter.py
index fdc51f44f..0388ba00c 100644
--- a/youtube_dl/extractor/franceinter.py
+++ b/youtube_dl/extractor/franceinter.py
@@ -10,7 +10,7 @@ class FranceInterIE(InfoExtractor):
_TEST = {
'url': 'http://www.franceinter.fr/player/reecouter?play=793962',
'md5': '4764932e466e6f6c79c317d2e74f6884',
- "info_dict": {
+ 'info_dict': {
'id': '793962',
'ext': 'mp3',
'title': 'L’Histoire dans les jeux vidéo',
diff --git a/youtube_dl/extractor/freevideo.py b/youtube_dl/extractor/freevideo.py
index f755e3c4a..c7bec027b 100644
--- a/youtube_dl/extractor/freevideo.py
+++ b/youtube_dl/extractor/freevideo.py
@@ -12,8 +12,8 @@ class FreeVideoIE(InfoExtractor):
'info_dict': {
'id': 'vysukany-zadecek-22033',
'ext': 'mp4',
- "title": "vysukany-zadecek-22033",
- "age_limit": 18,
+ 'title': 'vysukany-zadecek-22033',
+ 'age_limit': 18,
},
'skip': 'Blocked outside .cz',
}
diff --git a/youtube_dl/extractor/hentaistigma.py b/youtube_dl/extractor/hentaistigma.py
index f5aa73d18..86a93de4d 100644
--- a/youtube_dl/extractor/hentaistigma.py
+++ b/youtube_dl/extractor/hentaistigma.py
@@ -11,8 +11,8 @@ class HentaiStigmaIE(InfoExtractor):
'info_dict': {
'id': 'inyouchuu-etsu-bonus',
'ext': 'mp4',
- "title": "Inyouchuu Etsu Bonus",
- "age_limit": 18,
+ 'title': 'Inyouchuu Etsu Bonus',
+ 'age_limit': 18,
}
}
diff --git a/youtube_dl/extractor/kankan.py b/youtube_dl/extractor/kankan.py
index 364dc878e..a677ff447 100644
--- a/youtube_dl/extractor/kankan.py
+++ b/youtube_dl/extractor/kankan.py
@@ -28,7 +28,7 @@ class KankanIE(InfoExtractor):
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
- gcids = re.findall(r"http://.+?/.+?/(.+?)/", surls)
+ gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls)
gcid = gcids[-1]
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
diff --git a/youtube_dl/extractor/liveleak.py b/youtube_dl/extractor/liveleak.py
index 857edfde2..4684994e1 100644
--- a/youtube_dl/extractor/liveleak.py
+++ b/youtube_dl/extractor/liveleak.py
@@ -47,7 +47,7 @@ class LiveLeakIE(InfoExtractor):
'info_dict': {
'id': '801_1409392012',
'ext': 'mp4',
- 'description': "Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.",
+ 'description': 'Happened on 27.7.2014. \r\nAt 0:53 you can see people still swimming at near beach.',
'uploader': 'bony333',
'title': 'Crazy Hungarian tourist films close call waterspout in Croatia'
}
diff --git a/youtube_dl/extractor/mofosex.py b/youtube_dl/extractor/mofosex.py
index f8226cbb2..e47c80119 100644
--- a/youtube_dl/extractor/mofosex.py
+++ b/youtube_dl/extractor/mofosex.py
@@ -38,7 +38,7 @@ class MofosexIE(InfoExtractor):
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
- format = "-".join(format)
+ format = '-'.join(format)
age_limit = self._rta_search(webpage)
diff --git a/youtube_dl/extractor/myspass.py b/youtube_dl/extractor/myspass.py
index 4557a2b13..f936b92bb 100644
--- a/youtube_dl/extractor/myspass.py
+++ b/youtube_dl/extractor/myspass.py
@@ -18,8 +18,8 @@ class MySpassIE(InfoExtractor):
'info_dict': {
'id': '11741',
'ext': 'mp4',
- "description": "Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?",
- "title": "Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2",
+ 'description': 'Wer kann in die Fu\u00dfstapfen von Wolfgang Kubicki treten und die Mehrheit der Zuschauer hinter sich versammeln? Wird vielleicht sogar die Absolute Mehrheit geknackt und der Jackpot von 200.000 Euro mit nach Hause genommen?',
+ 'title': 'Absolute Mehrheit vom 17.02.2013 - Die Highlights, Teil 2',
},
}
diff --git a/youtube_dl/extractor/nerdcubed.py b/youtube_dl/extractor/nerdcubed.py
index dff78e486..9feccc672 100644
--- a/youtube_dl/extractor/nerdcubed.py
+++ b/youtube_dl/extractor/nerdcubed.py
@@ -18,14 +18,14 @@ class NerdCubedFeedIE(InfoExtractor):
}
def _real_extract(self, url):
- feed = self._download_json(url, url, "Downloading NerdCubed JSON feed")
+ feed = self._download_json(url, url, 'Downloading NerdCubed JSON feed')
entries = [{
'_type': 'url',
'title': feed_entry['title'],
'uploader': feed_entry['source']['name'] if feed_entry['source'] else None,
'upload_date': datetime.datetime.strptime(feed_entry['date'], '%Y-%m-%d').strftime('%Y%m%d'),
- 'url': "http://www.youtube.com/watch?v=" + feed_entry['youtube_id'],
+ 'url': 'http://www.youtube.com/watch?v=' + feed_entry['youtube_id'],
} for feed_entry in feed]
return {
diff --git a/youtube_dl/extractor/pornhub.py b/youtube_dl/extractor/pornhub.py
index 08275687d..91e574dc2 100644
--- a/youtube_dl/extractor/pornhub.py
+++ b/youtube_dl/extractor/pornhub.py
@@ -27,9 +27,9 @@ class PornHubIE(InfoExtractor):
'info_dict': {
'id': '648719015',
'ext': 'mp4',
- "uploader": "Babes",
- "title": "Seductive Indian beauty strips down and fingers her pink pussy",
- "age_limit": 18
+ 'uploader': 'Babes',
+ 'title': 'Seductive Indian beauty strips down and fingers her pink pussy',
+ 'age_limit': 18
}
}, {
'url': 'http://www.pornhub.com/view_video.php?viewkey=ph557bbb6676d2d',
@@ -95,7 +95,7 @@ class PornHubIE(InfoExtractor):
path = compat_urllib_parse_urlparse(video_url).path
extension = os.path.splitext(path)[1][1:]
format = path.split('/')[5].split('_')[:2]
- format = "-".join(format)
+ format = '-'.join(format)
m = re.match(r'^(?P<height>[0-9]+)[pP]-(?P<tbr>[0-9]+)[kK]$', format)
if m is None:
diff --git a/youtube_dl/extractor/pornovoisines.py b/youtube_dl/extractor/pornovoisines.py
index eba4dfbb3..1a53fd71c 100644
--- a/youtube_dl/extractor/pornovoisines.py
+++ b/youtube_dl/extractor/pornovoisines.py
@@ -56,7 +56,7 @@ class PornoVoisinesIE(InfoExtractor):
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
description = self._html_search_regex(
r'<article id="descriptif">(.+?)</article>',
- webpage, "description", fatal=False, flags=re.DOTALL)
+ webpage, 'description', fatal=False, flags=re.DOTALL)
thumbnail = self._search_regex(
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
diff --git a/youtube_dl/extractor/radiobremen.py b/youtube_dl/extractor/radiobremen.py
index 0d706312e..0cbb15f08 100644
--- a/youtube_dl/extractor/radiobremen.py
+++ b/youtube_dl/extractor/radiobremen.py
@@ -28,16 +28,16 @@ class RadioBremenIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- meta_url = "http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s" % video_id
+ meta_url = 'http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s' % video_id
meta_doc = self._download_webpage(
meta_url, video_id, 'Downloading metadata')
title = self._html_search_regex(
- r"<h1.*>(?P<title>.+)</h1>", meta_doc, "title")
+ r'<h1.*>(?P<title>.+)</h1>', meta_doc, 'title')
description = self._html_search_regex(
- r"<p>(?P<description>.*)</p>", meta_doc, "description", fatal=False)
+ r'<p>(?P<description>.*)</p>', meta_doc, 'description', fatal=False)
duration = parse_duration(self._html_search_regex(
- r"L&auml;nge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>",
- meta_doc, "duration", fatal=False))
+ r'L&auml;nge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>',
+ meta_doc, 'duration', fatal=False))
page_doc = self._download_webpage(
url, video_id, 'Downloading video information')
@@ -51,7 +51,7 @@ class RadioBremenIE(InfoExtractor):
formats = [{
'url': video_url,
'ext': 'mp4',
- 'width': int(mobj.group("width")),
+ 'width': int(mobj.group('width')),
}]
return {
'id': video_id,
diff --git a/youtube_dl/extractor/radiofrance.py b/youtube_dl/extractor/radiofrance.py
index 09352ed82..a8afc0014 100644
--- a/youtube_dl/extractor/radiofrance.py
+++ b/youtube_dl/extractor/radiofrance.py
@@ -16,9 +16,9 @@ class RadioFranceIE(InfoExtractor):
'info_dict': {
'id': 'one-one',
'ext': 'ogg',
- "title": "One to one",
- "description": "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
- "uploader": "Thomas Hercouët",
+ 'title': 'One to one',
+ 'description': "Plutôt que d'imaginer la radio de demain comme technologie ou comme création de contenu, je veux montrer que quelles que soient ses évolutions, j'ai l'intime conviction que la radio continuera d'être un grand média de proximité pour les auditeurs.",
+ 'uploader': 'Thomas Hercouët',
},
}
diff --git a/youtube_dl/extractor/rbmaradio.py b/youtube_dl/extractor/rbmaradio.py
index 0f8f3ebde..7932af6ef 100644
--- a/youtube_dl/extractor/rbmaradio.py
+++ b/youtube_dl/extractor/rbmaradio.py
@@ -18,11 +18,11 @@ class RBMARadioIE(InfoExtractor):
'info_dict': {
'id': 'ford-lopatin-live-at-primavera-sound-2011',
'ext': 'mp3',
- "uploader_id": "ford-lopatin",
- "location": "Spain",
- "description": "Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.",
- "uploader": "Ford & Lopatin",
- "title": "Live at Primavera Sound 2011",
+ 'uploader_id': 'ford-lopatin',
+ 'location': 'Spain',
+ 'description': 'Joel Ford and Daniel ’Oneohtrix Point Never’ Lopatin fly their midified pop extravaganza to Spain. Live at Primavera Sound 2011.',
+ 'uploader': 'Ford & Lopatin',
+ 'title': 'Live at Primavera Sound 2011',
},
}
diff --git a/youtube_dl/extractor/reverbnation.py b/youtube_dl/extractor/reverbnation.py
index ec7e7df7b..3c6725aeb 100644
--- a/youtube_dl/extractor/reverbnation.py
+++ b/youtube_dl/extractor/reverbnation.py
@@ -12,12 +12,12 @@ class ReverbNationIE(InfoExtractor):
'url': 'http://www.reverbnation.com/alkilados/song/16965047-mona-lisa',
'md5': '3da12ebca28c67c111a7f8b262d3f7a7',
'info_dict': {
- "id": "16965047",
- "ext": "mp3",
- "title": "MONA LISA",
- "uploader": "ALKILADOS",
- "uploader_id": "216429",
- "thumbnail": "re:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$"
+ 'id': '16965047',
+ 'ext': 'mp3',
+ 'title': 'MONA LISA',
+ 'uploader': 'ALKILADOS',
+ 'uploader_id': '216429',
+ 'thumbnail': 're:^https://gp1\.wac\.edgecastcdn\.net/.*?\.jpg$'
},
}]
diff --git a/youtube_dl/extractor/ringtv.py b/youtube_dl/extractor/ringtv.py
index efa4afeb6..508758075 100644
--- a/youtube_dl/extractor/ringtv.py
+++ b/youtube_dl/extractor/ringtv.py
@@ -8,13 +8,13 @@ from .common import InfoExtractor
class RingTVIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?ringtv\.craveonline\.com/(?P<type>news|videos/video)/(?P<id>[^/?#]+)'
_TEST = {
- "url": "http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30",
- "md5": "d25945f5df41cdca2d2587165ac28720",
- "info_dict": {
+ 'url': 'http://ringtv.craveonline.com/news/310833-luis-collazo-says-victor-ortiz-better-not-quit-on-jan-30',
+ 'md5': 'd25945f5df41cdca2d2587165ac28720',
+ 'info_dict': {
'id': '857645',
'ext': 'mp4',
- "title": 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
- "description": 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
+ 'title': 'Video: Luis Collazo says Victor Ortiz "better not quit on Jan. 30" - Ring TV',
+ 'description': 'Luis Collazo is excited about his Jan. 30 showdown with fellow former welterweight titleholder Victor Ortiz at Barclays Center in his hometown of Brooklyn. The SuperBowl week fight headlines a Golden Boy Live! card on Fox Sports 1.',
}
}
@@ -32,8 +32,8 @@ class RingTVIE(InfoExtractor):
description = self._html_search_regex(
r'addthis:description="([^"]+)"',
webpage, 'description', fatal=False)
- final_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4" % video_id
- thumbnail_url = "http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg" % video_id
+ final_url = 'http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/conversion/%s.mp4' % video_id
+ thumbnail_url = 'http://ringtv.craveonline.springboardplatform.com/storage/ringtv.craveonline.com/snapshots/%s.jpg' % video_id
return {
'id': video_id,
diff --git a/youtube_dl/extractor/rte.py b/youtube_dl/extractor/rte.py
index 73c9788be..042bc8dab 100644
--- a/youtube_dl/extractor/rte.py
+++ b/youtube_dl/extractor/rte.py
@@ -43,7 +43,7 @@ class RteIE(InfoExtractor):
r'<meta name="thumbnail" content="uri:irus:(.*?)" />', webpage, 'thumbnail')
thumbnail = 'http://img.rasset.ie/' + thumbnail_id + '.jpg'
- feeds_url = self._html_search_meta("feeds-prefix", webpage, 'feeds url') + video_id
+ feeds_url = self._html_search_meta('feeds-prefix', webpage, 'feeds url') + video_id
json_string = self._download_json(feeds_url, video_id)
# f4m_url = server + relative_url
diff --git a/youtube_dl/extractor/rtl2.py b/youtube_dl/extractor/rtl2.py
index 25f7faf76..de004671d 100644
--- a/youtube_dl/extractor/rtl2.py
+++ b/youtube_dl/extractor/rtl2.py
@@ -63,7 +63,7 @@ class RTL2IE(InfoExtractor):
download_url = video_info['streamurl']
download_url = download_url.replace('\\', '')
stream_url = 'mp4:' + self._html_search_regex(r'ondemand/(.*)', download_url, 'stream URL')
- rtmp_conn = ["S:connect", "O:1", "NS:pageUrl:" + url, "NB:fpad:0", "NN:videoFunction:1", "O:0"]
+ rtmp_conn = ['S:connect', 'O:1', 'NS:pageUrl:' + url, 'NB:fpad:0', 'NN:videoFunction:1', 'O:0']
formats = [{
'url': download_url,
diff --git a/youtube_dl/extractor/screenwavemedia.py b/youtube_dl/extractor/screenwavemedia.py
index e5d62a139..2cf210e0d 100644
--- a/youtube_dl/extractor/screenwavemedia.py
+++ b/youtube_dl/extractor/screenwavemedia.py
@@ -40,7 +40,7 @@ class ScreenwaveMediaIE(InfoExtractor):
re.sub(
r'(?s)/\*.*?\*/', '',
self._search_regex(
- r"sources\s*:\s*(\[[^\]]+?\])", playerconfig,
+ r'sources\s*:\s*(\[[^\]]+?\])', playerconfig,
'sources',
).replace(
"' + thisObj.options.videoserver + '",
diff --git a/youtube_dl/extractor/senateisvp.py b/youtube_dl/extractor/senateisvp.py
index 990ea0fa8..4d3b58522 100644
--- a/youtube_dl/extractor/senateisvp.py
+++ b/youtube_dl/extractor/senateisvp.py
@@ -15,37 +15,37 @@ from ..compat import (
class SenateISVPIE(InfoExtractor):
_COMM_MAP = [
- ["ag", "76440", "http://ag-f.akamaihd.net"],
- ["aging", "76442", "http://aging-f.akamaihd.net"],
- ["approps", "76441", "http://approps-f.akamaihd.net"],
- ["armed", "76445", "http://armed-f.akamaihd.net"],
- ["banking", "76446", "http://banking-f.akamaihd.net"],
- ["budget", "76447", "http://budget-f.akamaihd.net"],
- ["cecc", "76486", "http://srs-f.akamaihd.net"],
- ["commerce", "80177", "http://commerce1-f.akamaihd.net"],
- ["csce", "75229", "http://srs-f.akamaihd.net"],
- ["dpc", "76590", "http://dpc-f.akamaihd.net"],
- ["energy", "76448", "http://energy-f.akamaihd.net"],
- ["epw", "76478", "http://epw-f.akamaihd.net"],
- ["ethics", "76449", "http://ethics-f.akamaihd.net"],
- ["finance", "76450", "http://finance-f.akamaihd.net"],
- ["foreign", "76451", "http://foreign-f.akamaihd.net"],
- ["govtaff", "76453", "http://govtaff-f.akamaihd.net"],
- ["help", "76452", "http://help-f.akamaihd.net"],
- ["indian", "76455", "http://indian-f.akamaihd.net"],
- ["intel", "76456", "http://intel-f.akamaihd.net"],
- ["intlnarc", "76457", "http://intlnarc-f.akamaihd.net"],
- ["jccic", "85180", "http://jccic-f.akamaihd.net"],
- ["jec", "76458", "http://jec-f.akamaihd.net"],
- ["judiciary", "76459", "http://judiciary-f.akamaihd.net"],
- ["rpc", "76591", "http://rpc-f.akamaihd.net"],
- ["rules", "76460", "http://rules-f.akamaihd.net"],
- ["saa", "76489", "http://srs-f.akamaihd.net"],
- ["smbiz", "76461", "http://smbiz-f.akamaihd.net"],
- ["srs", "75229", "http://srs-f.akamaihd.net"],
- ["uscc", "76487", "http://srs-f.akamaihd.net"],
- ["vetaff", "76462", "http://vetaff-f.akamaihd.net"],
- ["arch", "", "http://ussenate-f.akamaihd.net/"]
+ ['ag', '76440', 'http://ag-f.akamaihd.net'],
+ ['aging', '76442', 'http://aging-f.akamaihd.net'],
+ ['approps', '76441', 'http://approps-f.akamaihd.net'],
+ ['armed', '76445', 'http://armed-f.akamaihd.net'],
+ ['banking', '76446', 'http://banking-f.akamaihd.net'],
+ ['budget', '76447', 'http://budget-f.akamaihd.net'],
+ ['cecc', '76486', 'http://srs-f.akamaihd.net'],
+ ['commerce', '80177', 'http://commerce1-f.akamaihd.net'],
+ ['csce', '75229', 'http://srs-f.akamaihd.net'],
+ ['dpc', '76590', 'http://dpc-f.akamaihd.net'],
+ ['energy', '76448', 'http://energy-f.akamaihd.net'],
+ ['epw', '76478', 'http://epw-f.akamaihd.net'],
+ ['ethics', '76449', 'http://ethics-f.akamaihd.net'],
+ ['finance', '76450', 'http://finance-f.akamaihd.net'],
+ ['foreign', '76451', 'http://foreign-f.akamaihd.net'],
+ ['govtaff', '76453', 'http://govtaff-f.akamaihd.net'],
+ ['help', '76452', 'http://help-f.akamaihd.net'],
+ ['indian', '76455', 'http://indian-f.akamaihd.net'],
+ ['intel', '76456', 'http://intel-f.akamaihd.net'],
+ ['intlnarc', '76457', 'http://intlnarc-f.akamaihd.net'],
+ ['jccic', '85180', 'http://jccic-f.akamaihd.net'],
+ ['jec', '76458', 'http://jec-f.akamaihd.net'],
+ ['judiciary', '76459', 'http://judiciary-f.akamaihd.net'],
+ ['rpc', '76591', 'http://rpc-f.akamaihd.net'],
+ ['rules', '76460', 'http://rules-f.akamaihd.net'],
+ ['saa', '76489', 'http://srs-f.akamaihd.net'],
+ ['smbiz', '76461', 'http://smbiz-f.akamaihd.net'],
+ ['srs', '75229', 'http://srs-f.akamaihd.net'],
+ ['uscc', '76487', 'http://srs-f.akamaihd.net'],
+ ['vetaff', '76462', 'http://vetaff-f.akamaihd.net'],
+ ['arch', '', 'http://ussenate-f.akamaihd.net/']
]
_IE_NAME = 'senate.gov'
_VALID_URL = r'http://www\.senate\.gov/isvp/?\?(?P<qs>.+)'
diff --git a/youtube_dl/extractor/slutload.py b/youtube_dl/extractor/slutload.py
index 3df71304d..7efb29f65 100644
--- a/youtube_dl/extractor/slutload.py
+++ b/youtube_dl/extractor/slutload.py
@@ -13,8 +13,8 @@ class SlutloadIE(InfoExtractor):
'info_dict': {
'id': 'TD73btpBqSxc',
'ext': 'mp4',
- "title": "virginie baisee en cam",
- "age_limit": 18,
+ 'title': 'virginie baisee en cam',
+ 'age_limit': 18,
'thumbnail': 're:https?://.*?\.jpg'
}
}
diff --git a/youtube_dl/extractor/snotr.py b/youtube_dl/extractor/snotr.py
index da3b05a8d..0d1ab07f8 100644
--- a/youtube_dl/extractor/snotr.py
+++ b/youtube_dl/extractor/snotr.py
@@ -43,7 +43,7 @@ class SnotrIE(InfoExtractor):
title = self._og_search_title(webpage)
description = self._og_search_description(webpage)
- video_url = "http://cdn.videos.snotr.com/%s.flv" % video_id
+ video_url = 'http://cdn.videos.snotr.com/%s.flv' % video_id
view_count = str_to_int(self._html_search_regex(
r'<p>\n<strong>Views:</strong>\n([\d,\.]+)</p>',
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index b2d5487ca..1efb2b980 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -222,7 +222,7 @@ class SoundcloudIE(InfoExtractor):
full_title = track_id
token = mobj.group('secret_token')
if token:
- info_json_url += "&secret_token=" + token
+ info_json_url += '&secret_token=' + token
elif mobj.group('player'):
query = compat_urlparse.parse_qs(compat_urlparse.urlparse(url).query)
real_url = query['url'][0]
diff --git a/youtube_dl/extractor/steam.py b/youtube_dl/extractor/steam.py
index 183dcb03c..1a831ef6d 100644
--- a/youtube_dl/extractor/steam.py
+++ b/youtube_dl/extractor/steam.py
@@ -22,23 +22,23 @@ class SteamIE(InfoExtractor):
_VIDEO_PAGE_TEMPLATE = 'http://store.steampowered.com/video/%s/'
_AGECHECK_TEMPLATE = 'http://store.steampowered.com/agecheck/video/%s/?snr=1_agecheck_agecheck__age-gate&ageDay=1&ageMonth=January&ageYear=1970'
_TESTS = [{
- "url": "http://store.steampowered.com/video/105600/",
- "playlist": [
+ 'url': 'http://store.steampowered.com/video/105600/',
+ 'playlist': [
{
- "md5": "f870007cee7065d7c76b88f0a45ecc07",
- "info_dict": {
+ 'md5': 'f870007cee7065d7c76b88f0a45ecc07',
+ 'info_dict': {
'id': '81300',
'ext': 'flv',
- "title": "Terraria 1.1 Trailer",
+ 'title': 'Terraria 1.1 Trailer',
'playlist_index': 1,
}
},
{
- "md5": "61aaf31a5c5c3041afb58fb83cbb5751",
- "info_dict": {
+ 'md5': '61aaf31a5c5c3041afb58fb83cbb5751',
+ 'info_dict': {
'id': '80859',
'ext': 'flv',
- "title": "Terraria Trailer",
+ 'title': 'Terraria Trailer',
'playlist_index': 2,
}
}
diff --git a/youtube_dl/extractor/tenplay.py b/youtube_dl/extractor/tenplay.py
index f6694149b..02a31a609 100644
--- a/youtube_dl/extractor/tenplay.py
+++ b/youtube_dl/extractor/tenplay.py
@@ -27,10 +27,10 @@ class TenPlayIE(InfoExtractor):
}
_video_fields = [
- "id", "name", "shortDescription", "longDescription", "creationDate",
- "publishedDate", "lastModifiedDate", "customFields", "videoStillURL",
- "thumbnailURL", "referenceId", "length", "playsTotal",
- "playsTrailingWeek", "renditions", "captioning", "startDate", "endDate"]
+ 'id', 'name', 'shortDescription', 'longDescription', 'creationDate',
+ 'publishedDate', 'lastModifiedDate', 'customFields', 'videoStillURL',
+ 'thumbnailURL', 'referenceId', 'length', 'playsTotal',
+ 'playsTrailingWeek', 'renditions', 'captioning', 'startDate', 'endDate']
def _real_extract(self, url):
webpage = self._download_webpage(url, url)
diff --git a/youtube_dl/extractor/thesixtyone.py b/youtube_dl/extractor/thesixtyone.py
index 5d09eb9a8..d8b1fd281 100644
--- a/youtube_dl/extractor/thesixtyone.py
+++ b/youtube_dl/extractor/thesixtyone.py
@@ -48,22 +48,22 @@ class TheSixtyOneIE(InfoExtractor):
]
_DECODE_MAP = {
- "x": "a",
- "m": "b",
- "w": "c",
- "q": "d",
- "n": "e",
- "p": "f",
- "a": "0",
- "h": "1",
- "e": "2",
- "u": "3",
- "s": "4",
- "i": "5",
- "o": "6",
- "y": "7",
- "r": "8",
- "c": "9"
+ 'x': 'a',
+ 'm': 'b',
+ 'w': 'c',
+ 'q': 'd',
+ 'n': 'e',
+ 'p': 'f',
+ 'a': '0',
+ 'h': '1',
+ 'e': '2',
+ 'u': '3',
+ 's': '4',
+ 'i': '5',
+ 'o': '6',
+ 'y': '7',
+ 'r': '8',
+ 'c': '9'
}
def _real_extract(self, url):
diff --git a/youtube_dl/extractor/traileraddict.py b/youtube_dl/extractor/traileraddict.py
index 1c53a3fd0..0e01b15fc 100644
--- a/youtube_dl/extractor/traileraddict.py
+++ b/youtube_dl/extractor/traileraddict.py
@@ -38,12 +38,12 @@ class TrailerAddictIE(InfoExtractor):
# Presence of (no)watchplus function indicates HD quality is available
if re.search(r'function (no)?watchplus()', webpage):
- fvar = "fvarhd"
+ fvar = 'fvarhd'
else:
- fvar = "fvar"
+ fvar = 'fvar'
- info_url = "http://www.traileraddict.com/%s.php?tid=%s" % (fvar, str(video_id))
- info_webpage = self._download_webpage(info_url, video_id, "Downloading the info webpage")
+ info_url = 'http://www.traileraddict.com/%s.php?tid=%s' % (fvar, str(video_id))
+ info_webpage = self._download_webpage(info_url, video_id, 'Downloading the info webpage')
final_url = self._search_regex(r'&fileurl=(.+)',
info_webpage, 'Download url').replace('%3F', '?')
diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py
index da3cd76f7..f56b66d06 100644
--- a/youtube_dl/extractor/tudou.py
+++ b/youtube_dl/extractor/tudou.py
@@ -49,7 +49,7 @@ class TudouIE(InfoExtractor):
info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id)
if quality:
info_url += '&hd' + quality
- xml_data = self._download_xml(info_url, video_id, "Opening the info XML page")
+ xml_data = self._download_xml(info_url, video_id, 'Opening the info XML page')
final_url = xml_data.text
return final_url
diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py
index 1e740fbe6..3794bcded 100644
--- a/youtube_dl/extractor/vbox7.py
+++ b/youtube_dl/extractor/vbox7.py
@@ -47,7 +47,7 @@ class Vbox7IE(InfoExtractor):
title = self._html_search_regex(r'<title>(.*)</title>',
webpage, 'title').split('/')[0].strip()
- info_url = "http://vbox7.com/play/magare.do"
+ info_url = 'http://vbox7.com/play/magare.do'
data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
info_request = sanitized_Request(info_url, data)
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
diff --git a/youtube_dl/extractor/videopremium.py b/youtube_dl/extractor/videopremium.py
index 3176e3b9d..5de8273c3 100644
--- a/youtube_dl/extractor/videopremium.py
+++ b/youtube_dl/extractor/videopremium.py
@@ -26,7 +26,7 @@ class VideoPremiumIE(InfoExtractor):
webpage_url = 'http://videopremium.tv/' + video_id
webpage = self._download_webpage(webpage_url, video_id)
- if re.match(r"^<html><head><script[^>]*>window.location\s*=", webpage):
+ if re.match(r'^<html><head><script[^>]*>window.location\s*=', webpage):
# Download again, we need a cookie
webpage = self._download_webpage(
webpage_url, video_id,
@@ -37,10 +37,10 @@ class VideoPremiumIE(InfoExtractor):
return {
'id': video_id,
- 'url': "rtmp://e%d.md.iplay.md/play" % random.randint(1, 16),
- 'play_path': "mp4:%s.f4v" % video_id,
- 'page_url': "http://videopremium.tv/" + video_id,
- 'player_url': "http://videopremium.tv/uplayer/uppod.swf",
+ 'url': 'rtmp://e%d.md.iplay.md/play' % random.randint(1, 16),
+ 'play_path': 'mp4:%s.f4v' % video_id,
+ 'page_url': 'http://videopremium.tv/' + video_id,
+ 'player_url': 'http://videopremium.tv/uplayer/uppod.swf',
'ext': 'f4v',
'title': video_title,
}
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index c7df6b0c5..3049dffb6 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -368,16 +368,16 @@ class VimeoIE(VimeoBaseInfoExtractor):
{'force_feature_id': True}), 'Vimeo')
# Extract title
- video_title = config["video"]["title"]
+ video_title = config['video']['title']
# Extract uploader and uploader_id
- video_uploader = config["video"]["owner"]["name"]
- video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] if config["video"]["owner"]["url"] else None
+ video_uploader = config['video']['owner']['name']
+ video_uploader_id = config['video']['owner']['url'].split('/')[-1] if config['video']['owner']['url'] else None
# Extract video thumbnail
- video_thumbnail = config["video"].get("thumbnail")
+ video_thumbnail = config['video'].get('thumbnail')
if video_thumbnail is None:
- video_thumbs = config["video"].get("thumbs")
+ video_thumbs = config['video'].get('thumbs')
if video_thumbs and isinstance(video_thumbs, dict):
_, video_thumbnail = sorted((int(width if width.isdigit() else 0), t_url) for (width, t_url) in video_thumbs.items())[-1]
@@ -401,7 +401,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
self._downloader.report_warning('Cannot find video description')
# Extract video duration
- video_duration = int_or_none(config["video"].get("duration"))
+ video_duration = int_or_none(config['video'].get('duration'))
# Extract upload date
video_upload_date = None
@@ -703,10 +703,10 @@ class VimeoLikesIE(InfoExtractor):
_TEST = {
'url': 'https://vimeo.com/user755559/likes/',
'playlist_mincount': 293,
- "info_dict": {
+ 'info_dict': {
'id': 'user755559_likes',
- "description": "See all the videos urza likes",
- "title": 'Videos urza likes',
+ 'description': 'See all the videos urza likes',
+ 'title': 'Videos urza likes',
},
}
diff --git a/youtube_dl/extractor/vine.py b/youtube_dl/extractor/vine.py
index cb2a4b0b5..a6a6cc479 100644
--- a/youtube_dl/extractor/vine.py
+++ b/youtube_dl/extractor/vine.py
@@ -119,7 +119,7 @@ class VineIE(InfoExtractor):
class VineUserIE(InfoExtractor):
IE_NAME = 'vine:user'
_VALID_URL = r'(?:https?://)?vine\.co/(?P<u>u/)?(?P<user>[^/]+)/?(\?.*)?$'
- _VINE_BASE_URL = "https://vine.co/"
+ _VINE_BASE_URL = 'https://vine.co/'
_TESTS = [
{
'url': 'https://vine.co/Visa',
@@ -139,7 +139,7 @@ class VineUserIE(InfoExtractor):
user = mobj.group('user')
u = mobj.group('u')
- profile_url = "%sapi/users/profiles/%s%s" % (
+ profile_url = '%sapi/users/profiles/%s%s' % (
self._VINE_BASE_URL, 'vanity/' if not u else '', user)
profile_data = self._download_json(
profile_url, user, note='Downloading user profile data')
@@ -147,7 +147,7 @@ class VineUserIE(InfoExtractor):
user_id = profile_data['data']['userId']
timeline_data = []
for pagenum in itertools.count(1):
- timeline_url = "%sapi/timelines/users/%s?page=%s&size=100" % (
+ timeline_url = '%sapi/timelines/users/%s?page=%s&size=100' % (
self._VINE_BASE_URL, user_id, pagenum)
timeline_page = self._download_json(
timeline_url, user, note='Downloading page %d' % pagenum)
diff --git a/youtube_dl/extractor/worldstarhiphop.py b/youtube_dl/extractor/worldstarhiphop.py
index a3ea26feb..09415b589 100644
--- a/youtube_dl/extractor/worldstarhiphop.py
+++ b/youtube_dl/extractor/worldstarhiphop.py
@@ -8,12 +8,12 @@ from .common import InfoExtractor
class WorldStarHipHopIE(InfoExtractor):
_VALID_URL = r'https?://(?:www|m)\.worldstar(?:candy|hiphop)\.com/(?:videos|android)/video\.php\?v=(?P<id>.*)'
_TESTS = [{
- "url": "http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO",
- "md5": "9d04de741161603bf7071bbf4e883186",
- "info_dict": {
- "id": "wshh6a7q1ny0G34ZwuIO",
- "ext": "mp4",
- "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
+ 'url': 'http://www.worldstarhiphop.com/videos/video.php?v=wshh6a7q1ny0G34ZwuIO',
+ 'md5': '9d04de741161603bf7071bbf4e883186',
+ 'info_dict': {
+ 'id': 'wshh6a7q1ny0G34ZwuIO',
+ 'ext': 'mp4',
+ 'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
}
}, {
'url': 'http://m.worldstarhiphop.com/android/video.php?v=wshh6a7q1ny0G34ZwuIO',
@@ -21,7 +21,7 @@ class WorldStarHipHopIE(InfoExtractor):
'info_dict': {
'id': 'wshh6a7q1ny0G34ZwuIO',
'ext': 'mp4',
- "title": "KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!"
+ 'title': 'KO Of The Week: MMA Fighter Gets Knocked Out By Swift Head Kick!'
}
}]
diff --git a/youtube_dl/extractor/youjizz.py b/youtube_dl/extractor/youjizz.py
index c642075dc..4150b28da 100644
--- a/youtube_dl/extractor/youjizz.py
+++ b/youtube_dl/extractor/youjizz.py
@@ -16,8 +16,8 @@ class YouJizzIE(InfoExtractor):
'info_dict': {
'id': '2189178',
'ext': 'flv',
- "title": "Zeichentrick 1",
- "age_limit": 18,
+ 'title': 'Zeichentrick 1',
+ 'age_limit': 18,
}
}
diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py
index 5c1f84a09..900eb2aba 100644
--- a/youtube_dl/extractor/youku.py
+++ b/youtube_dl/extractor/youku.py
@@ -217,7 +217,7 @@ class YoukuIE(InfoExtractor):
video_password = self._downloader.params.get('videopassword')
# request basic data
- basic_data_url = "http://play.youku.com/play/get.json?vid=%s&ct=12" % video_id
+ basic_data_url = 'http://play.youku.com/play/get.json?vid=%s&ct=12' % video_id
if video_password:
basic_data_url += '&pwd=%s' % video_password
diff --git a/youtube_dl/options.py b/youtube_dl/options.py
index 2137dfb3f..3afa8bb6f 100644
--- a/youtube_dl/options.py
+++ b/youtube_dl/options.py
@@ -85,7 +85,7 @@ def parseOpts(overrideArguments=None):
if option.takes_value():
opts.append(' %s' % option.metavar)
- return "".join(opts)
+ return ''.join(opts)
def _comma_separated_values_options_callback(option, opt_str, value, parser):
setattr(parser.values, option.dest, value.split(','))
diff --git a/youtube_dl/postprocessor/execafterdownload.py b/youtube_dl/postprocessor/execafterdownload.py
index 13794b7ba..74f66d669 100644
--- a/youtube_dl/postprocessor/execafterdownload.py
+++ b/youtube_dl/postprocessor/execafterdownload.py
@@ -19,7 +19,7 @@ class ExecAfterDownloadPP(PostProcessor):
cmd = cmd.replace('{}', shlex_quote(information['filepath']))
- self._downloader.to_screen("[exec] Executing command: %s" % cmd)
+ self._downloader.to_screen('[exec] Executing command: %s' % cmd)
retCode = subprocess.call(cmd, shell=True)
if retCode != 0:
raise PostProcessingError(
diff --git a/youtube_dl/postprocessor/metadatafromtitle.py b/youtube_dl/postprocessor/metadatafromtitle.py
index a56077f20..42377fa0f 100644
--- a/youtube_dl/postprocessor/metadatafromtitle.py
+++ b/youtube_dl/postprocessor/metadatafromtitle.py
@@ -24,7 +24,7 @@ class MetadataFromTitlePP(PostProcessor):
'(?P<title>.+)\ \-\ (?P<artist>.+)'
"""
lastpos = 0
- regex = ""
+ regex = ''
# replace %(..)s with regex group and escape other string parts
for match in re.finditer(r'%\((\w+)\)s', fmt):
regex += re.escape(fmt[lastpos:match.start()])
diff --git a/youtube_dl/postprocessor/xattrpp.py b/youtube_dl/postprocessor/xattrpp.py
index 7d88e1308..480d48d05 100644
--- a/youtube_dl/postprocessor/xattrpp.py
+++ b/youtube_dl/postprocessor/xattrpp.py
@@ -80,15 +80,15 @@ class XAttrMetadataPP(PostProcessor):
assert ':' not in key
assert os.path.exists(path)
- ads_fn = path + ":" + key
+ ads_fn = path + ':' + key
try:
- with open(ads_fn, "wb") as f:
+ with open(ads_fn, 'wb') as f:
f.write(value)
except EnvironmentError as e:
raise XAttrMetadataError(e.errno, e.strerror)
else:
- user_has_setfattr = check_executable("setfattr", ['--version'])
- user_has_xattr = check_executable("xattr", ['-h'])
+ user_has_setfattr = check_executable('setfattr', ['--version'])
+ user_has_xattr = check_executable('xattr', ['-h'])
if user_has_setfattr or user_has_xattr:
@@ -150,7 +150,7 @@ class XAttrMetadataPP(PostProcessor):
value = info.get(infoname)
if value:
- if infoname == "upload_date":
+ if infoname == 'upload_date':
value = hyphenate_date(value)
byte_value = value.encode('utf-8')
diff --git a/youtube_dl/update.py b/youtube_dl/update.py
index e4a1aaa64..676ebe1c4 100644
--- a/youtube_dl/update.py
+++ b/youtube_dl/update.py
@@ -31,12 +31,12 @@ def rsa_verify(message, signature, key):
def update_self(to_screen, verbose, opener):
"""Update the program file with the latest version from the repository"""
- UPDATE_URL = "https://rg3.github.io/youtube-dl/update/"
+ UPDATE_URL = 'https://rg3.github.io/youtube-dl/update/'
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
- if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
+ if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, 'frozen'):
to_screen('It looks like you installed youtube-dl with a package manager, pip, setup.py or a tarball. Please use that to update.')
return
@@ -85,7 +85,7 @@ def update_self(to_screen, verbose, opener):
filename = sys.argv[0]
# Py2EXE: Filename could be different
- if hasattr(sys, "frozen") and not os.path.isfile(filename):
+ if hasattr(sys, 'frozen') and not os.path.isfile(filename):
if os.path.isfile(filename + '.exe'):
filename += '.exe'
@@ -94,7 +94,7 @@ def update_self(to_screen, verbose, opener):
return
# Py2EXE
- if hasattr(sys, "frozen"):
+ if hasattr(sys, 'frozen'):
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index 3e4219b17..672ce05ea 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -248,7 +248,7 @@ def xpath_attr(node, xpath, key, name=None, fatal=False, default=NO_DEFAULT):
def get_element_by_id(id, html):
"""Return the content of the tag with the specified ID in the passed HTML document"""
- return get_element_by_attribute("id", id, html)
+ return get_element_by_attribute('id', id, html)
def get_element_by_attribute(attribute, value, html):
@@ -994,7 +994,7 @@ def date_from_str(date_str):
unit += 's'
delta = datetime.timedelta(**{unit: time})
return today + delta
- return datetime.datetime.strptime(date_str, "%Y%m%d").date()
+ return datetime.datetime.strptime(date_str, '%Y%m%d').date()
def hyphenate_date(date_str):
@@ -1074,22 +1074,22 @@ def _windows_write_string(s, out):
GetStdHandle = ctypes.WINFUNCTYPE(
ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD)(
- (b"GetStdHandle", ctypes.windll.kernel32))
+ (b'GetStdHandle', ctypes.windll.kernel32))
h = GetStdHandle(WIN_OUTPUT_IDS[fileno])
WriteConsoleW = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE, ctypes.wintypes.LPWSTR,
ctypes.wintypes.DWORD, ctypes.POINTER(ctypes.wintypes.DWORD),
- ctypes.wintypes.LPVOID)((b"WriteConsoleW", ctypes.windll.kernel32))
+ ctypes.wintypes.LPVOID)((b'WriteConsoleW', ctypes.windll.kernel32))
written = ctypes.wintypes.DWORD(0)
- GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b"GetFileType", ctypes.windll.kernel32))
+ GetFileType = ctypes.WINFUNCTYPE(ctypes.wintypes.DWORD, ctypes.wintypes.DWORD)((b'GetFileType', ctypes.windll.kernel32))
FILE_TYPE_CHAR = 0x0002
FILE_TYPE_REMOTE = 0x8000
GetConsoleMode = ctypes.WINFUNCTYPE(
ctypes.wintypes.BOOL, ctypes.wintypes.HANDLE,
ctypes.POINTER(ctypes.wintypes.DWORD))(
- (b"GetConsoleMode", ctypes.windll.kernel32))
+ (b'GetConsoleMode', ctypes.windll.kernel32))
INVALID_HANDLE_VALUE = ctypes.wintypes.DWORD(-1).value
def not_a_console(handle):
@@ -1387,7 +1387,7 @@ def fix_xml_ampersands(xml_str):
def setproctitle(title):
assert isinstance(title, compat_str)
try:
- libc = ctypes.cdll.LoadLibrary("libc.so.6")
+ libc = ctypes.cdll.LoadLibrary('libc.so.6')
except OSError:
return
title_bytes = title.encode('utf-8')
@@ -1427,7 +1427,7 @@ def url_basename(url):
class HEADRequest(compat_urllib_request.Request):
def get_method(self):
- return "HEAD"
+ return 'HEAD'
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):