aboutsummaryrefslogtreecommitdiffhomepage
path: root/youtube_dl
diff options
context:
space:
mode:
authorSergey M․ <[email protected]>2016-03-26 01:46:57 +0600
committerSergey M․ <[email protected]>2016-03-26 01:46:57 +0600
commit15707c7e024f1f29e7abd8ddaa362196ef2d4af6 (patch)
tree25f149d9df1cf58a171ecf765dd3cd5d0a20f87b /youtube_dl
parent2156f16ca7babde4c5fa813dbe4e7ac1a2f758d1 (diff)
downloadyoutube-dl-15707c7e024f1f29e7abd8ddaa362196ef2d4af6.tar.gz
youtube-dl-15707c7e024f1f29e7abd8ddaa362196ef2d4af6.zip
[compat] Add compat_urllib_parse_urlencode and eliminate encode_dict
encode_dict functionality has been improved and moved directly into compat_urllib_parse_urlencode All occurrences of compat_urllib_parse.urlencode throughout the codebase have been replaced by compat_urllib_parse_urlencode Closes #8974
Diffstat (limited to 'youtube_dl')
-rw-r--r--youtube_dl/compat.py26
-rw-r--r--youtube_dl/extractor/addanime.py4
-rw-r--r--youtube_dl/extractor/animeondemand.py3
-rw-r--r--youtube_dl/extractor/atresplayer.py4
-rw-r--r--youtube_dl/extractor/bambuser.py4
-rw-r--r--youtube_dl/extractor/camdemy.py4
-rw-r--r--youtube_dl/extractor/ceskatelevize.py4
-rw-r--r--youtube_dl/extractor/cloudy.py4
-rw-r--r--youtube_dl/extractor/comedycentral.py4
-rw-r--r--youtube_dl/extractor/common.py4
-rw-r--r--youtube_dl/extractor/condenast.py4
-rw-r--r--youtube_dl/extractor/crunchyroll.py8
-rw-r--r--youtube_dl/extractor/daum.py6
-rw-r--r--youtube_dl/extractor/dcn.py8
-rw-r--r--youtube_dl/extractor/dramafever.py4
-rw-r--r--youtube_dl/extractor/eroprofile.py4
-rw-r--r--youtube_dl/extractor/fc2.py5
-rw-r--r--youtube_dl/extractor/fivemin.py4
-rw-r--r--youtube_dl/extractor/flickr.py4
-rw-r--r--youtube_dl/extractor/funimation.py5
-rw-r--r--youtube_dl/extractor/gdcvault.py4
-rw-r--r--youtube_dl/extractor/hotnewhiphop.py4
-rw-r--r--youtube_dl/extractor/hypem.py4
-rw-r--r--youtube_dl/extractor/internetvideoarchive.py4
-rw-r--r--youtube_dl/extractor/iqiyi.py8
-rw-r--r--youtube_dl/extractor/ivideon.py4
-rw-r--r--youtube_dl/extractor/kaltura.py4
-rw-r--r--youtube_dl/extractor/laola1tv.py6
-rw-r--r--youtube_dl/extractor/leeco.py8
-rw-r--r--youtube_dl/extractor/lynda.py6
-rw-r--r--youtube_dl/extractor/matchtv.py4
-rw-r--r--youtube_dl/extractor/metacafe.py4
-rw-r--r--youtube_dl/extractor/minhateca.py4
-rw-r--r--youtube_dl/extractor/mitele.py5
-rw-r--r--youtube_dl/extractor/moevideo.py4
-rw-r--r--youtube_dl/extractor/moniker.py4
-rw-r--r--youtube_dl/extractor/mooshare.py4
-rw-r--r--youtube_dl/extractor/mtv.py4
-rw-r--r--youtube_dl/extractor/muzu.py8
-rw-r--r--youtube_dl/extractor/myvideo.py4
-rw-r--r--youtube_dl/extractor/naver.py6
-rw-r--r--youtube_dl/extractor/nba.py4
-rw-r--r--youtube_dl/extractor/neteasemusic.py4
-rw-r--r--youtube_dl/extractor/nextmovie.py4
-rw-r--r--youtube_dl/extractor/nfb.py4
-rw-r--r--youtube_dl/extractor/nhl.py6
-rw-r--r--youtube_dl/extractor/nick.py4
-rw-r--r--youtube_dl/extractor/niconico.py7
-rw-r--r--youtube_dl/extractor/noco.py4
-rw-r--r--youtube_dl/extractor/novamov.py3
-rw-r--r--youtube_dl/extractor/npr.py4
-rw-r--r--youtube_dl/extractor/ooyala.py4
-rw-r--r--youtube_dl/extractor/patreon.py2
-rw-r--r--youtube_dl/extractor/played.py4
-rw-r--r--youtube_dl/extractor/playtvak.py4
-rw-r--r--youtube_dl/extractor/pluralsight.py4
-rw-r--r--youtube_dl/extractor/porn91.py4
-rw-r--r--youtube_dl/extractor/primesharetv.py4
-rw-r--r--youtube_dl/extractor/promptfile.py4
-rw-r--r--youtube_dl/extractor/prosiebensat1.py10
-rw-r--r--youtube_dl/extractor/shahid.py4
-rw-r--r--youtube_dl/extractor/shared.py4
-rw-r--r--youtube_dl/extractor/sharesix.py4
-rw-r--r--youtube_dl/extractor/sina.py4
-rw-r--r--youtube_dl/extractor/smotri.py6
-rw-r--r--youtube_dl/extractor/sohu.py4
-rw-r--r--youtube_dl/extractor/soundcloud.py12
-rw-r--r--youtube_dl/extractor/streamcloud.py4
-rw-r--r--youtube_dl/extractor/telecinco.py4
-rw-r--r--youtube_dl/extractor/tubitv.py4
-rw-r--r--youtube_dl/extractor/twitch.py9
-rw-r--r--youtube_dl/extractor/udemy.py6
-rw-r--r--youtube_dl/extractor/vbox7.py4
-rw-r--r--youtube_dl/extractor/viddler.py4
-rw-r--r--youtube_dl/extractor/vimeo.py13
-rw-r--r--youtube_dl/extractor/vk.py4
-rw-r--r--youtube_dl/extractor/vlive.py4
-rw-r--r--youtube_dl/extractor/vodlocker.py4
-rw-r--r--youtube_dl/extractor/xfileshare.py5
-rw-r--r--youtube_dl/extractor/yahoo.py3
-rw-r--r--youtube_dl/extractor/yandexmusic.py4
-rw-r--r--youtube_dl/extractor/youku.py4
-rw-r--r--youtube_dl/extractor/youtube.py23
-rw-r--r--youtube_dl/utils.py14
84 files changed, 229 insertions, 222 deletions
diff --git a/youtube_dl/compat.py b/youtube_dl/compat.py
index dbb91a6ef..76b6b0e38 100644
--- a/youtube_dl/compat.py
+++ b/youtube_dl/compat.py
@@ -170,6 +170,31 @@ except ImportError: # Python 2
return compat_urllib_parse_unquote(string, encoding, errors)
try:
+ from urllib.parse import urlencode as compat_urllib_parse_urlencode
+except ImportError: # Python 2
+ # Python 2 will choke in urlencode on mixture of byte and unicode strings.
+ # Possible solutions are to either port it from python 3 with all
+ # the friends or manually ensure input query contains only byte strings.
+ # We will stick with latter thus recursively encoding the whole query.
+ def compat_urllib_parse_urlencode(query, doseq=0, encoding='utf-8'):
+ def encode_elem(e):
+ if isinstance(e, dict):
+ e = encode_dict(e)
+ elif isinstance(e, (list, tuple,)):
+ e = encode_list(e)
+ elif isinstance(e, compat_str):
+ e = e.encode(encoding)
+ return e
+
+ def encode_dict(d):
+ return dict((encode_elem(k), encode_elem(v)) for k, v in d.items())
+
+ def encode_list(l):
+ return [encode_elem(e) for e in l]
+
+ return compat_urllib_parse.urlencode(encode_elem(query), doseq=doseq)
+
+try:
from urllib.request import DataHandler as compat_urllib_request_DataHandler
except ImportError: # Python < 3.4
# Ported from CPython 98774:1733b3bd46db, Lib/urllib/request.py
@@ -588,6 +613,7 @@ __all__ = [
'compat_urllib_parse_unquote',
'compat_urllib_parse_unquote_plus',
'compat_urllib_parse_unquote_to_bytes',
+ 'compat_urllib_parse_urlencode',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urllib_request_DataHandler',
diff --git a/youtube_dl/extractor/addanime.py b/youtube_dl/extractor/addanime.py
index fb1cc02e1..55a9322a7 100644
--- a/youtube_dl/extractor/addanime.py
+++ b/youtube_dl/extractor/addanime.py
@@ -6,7 +6,7 @@ from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
@@ -60,7 +60,7 @@ class AddAnimeIE(InfoExtractor):
confirm_url = (
parsed_url.scheme + '://' + parsed_url.netloc +
action + '?' +
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'jschl_vc': vc, 'jschl_answer': compat_str(av_val)}))
self._download_webpage(
confirm_url, video_id,
diff --git a/youtube_dl/extractor/animeondemand.py b/youtube_dl/extractor/animeondemand.py
index 2cede55a7..9b01e38f5 100644
--- a/youtube_dl/extractor/animeondemand.py
+++ b/youtube_dl/extractor/animeondemand.py
@@ -9,7 +9,6 @@ from ..compat import (
)
from ..utils import (
determine_ext,
- encode_dict,
extract_attributes,
ExtractorError,
sanitized_Request,
@@ -71,7 +70,7 @@ class AnimeOnDemandIE(InfoExtractor):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
request = sanitized_Request(
- post_url, urlencode_postdata(encode_dict(login_form)))
+ post_url, urlencode_postdata(login_form))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
diff --git a/youtube_dl/extractor/atresplayer.py b/youtube_dl/extractor/atresplayer.py
index b8f9ae005..f9568cb5b 100644
--- a/youtube_dl/extractor/atresplayer.py
+++ b/youtube_dl/extractor/atresplayer.py
@@ -8,7 +8,7 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
int_or_none,
@@ -86,7 +86,7 @@ class AtresPlayerIE(InfoExtractor):
}
request = sanitized_Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
diff --git a/youtube_dl/extractor/bambuser.py b/youtube_dl/extractor/bambuser.py
index da986e063..1a2eef48d 100644
--- a/youtube_dl/extractor/bambuser.py
+++ b/youtube_dl/extractor/bambuser.py
@@ -5,7 +5,7 @@ import itertools
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_str,
)
from ..utils import (
@@ -58,7 +58,7 @@ class BambuserIE(InfoExtractor):
}
request = sanitized_Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._LOGIN_URL)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
diff --git a/youtube_dl/extractor/camdemy.py b/youtube_dl/extractor/camdemy.py
index dd4d96cec..6ffbeabd3 100644
--- a/youtube_dl/extractor/camdemy.py
+++ b/youtube_dl/extractor/camdemy.py
@@ -6,7 +6,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -139,7 +139,7 @@ class CamdemyFolderIE(InfoExtractor):
parsed_url = list(compat_urlparse.urlparse(url))
query = dict(compat_urlparse.parse_qsl(parsed_url[4]))
query.update({'displayMode': 'list'})
- parsed_url[4] = compat_urllib_parse.urlencode(query)
+ parsed_url[4] = compat_urllib_parse_urlencode(query)
final_url = compat_urlparse.urlunparse(parsed_url)
page = self._download_webpage(final_url, folder_id)
diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py
index b355111cb..d93108df5 100644
--- a/youtube_dl/extractor/ceskatelevize.py
+++ b/youtube_dl/extractor/ceskatelevize.py
@@ -5,8 +5,8 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
compat_urllib_parse_unquote,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
@@ -102,7 +102,7 @@ class CeskaTelevizeIE(InfoExtractor):
req = sanitized_Request(
'http://www.ceskatelevize.cz/ivysilani/ajax/get-client-playlist',
- data=compat_urllib_parse.urlencode(data))
+ data=compat_urllib_parse_urlencode(data))
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('x-addr', '127.0.0.1')
diff --git a/youtube_dl/extractor/cloudy.py b/youtube_dl/extractor/cloudy.py
index 0fa720ee8..9e267e6c0 100644
--- a/youtube_dl/extractor/cloudy.py
+++ b/youtube_dl/extractor/cloudy.py
@@ -6,7 +6,7 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_HTTPError,
)
from ..utils import (
@@ -64,7 +64,7 @@ class CloudyIE(InfoExtractor):
'errorUrl': error_url,
})
- data_url = self._API_URL % (video_host, compat_urllib_parse.urlencode(form))
+ data_url = self._API_URL % (video_host, compat_urllib_parse_urlencode(form))
player_data = self._download_webpage(
data_url, video_id, 'Downloading player data')
data = compat_parse_qs(player_data)
diff --git a/youtube_dl/extractor/comedycentral.py b/youtube_dl/extractor/comedycentral.py
index 5b1b99675..0c59102e0 100644
--- a/youtube_dl/extractor/comedycentral.py
+++ b/youtube_dl/extractor/comedycentral.py
@@ -5,7 +5,7 @@ import re
from .mtv import MTVServicesInfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
@@ -201,7 +201,7 @@ class ComedyCentralShowsIE(MTVServicesInfoExtractor):
# Correct cc.com in uri
uri = re.sub(r'(episode:[^.]+)(\.cc)?\.com', r'\1.com', uri)
- index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse.urlencode({'uri': uri}))
+ index_url = 'http://%s.cc.com/feeds/mrss?%s' % (show_name, compat_urllib_parse_urlencode({'uri': uri}))
idoc = self._download_xml(
index_url, epTitle,
'Downloading show index', 'Unable to download episode index')
diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py
index 770105a5b..b412fd030 100644
--- a/youtube_dl/extractor/common.py
+++ b/youtube_dl/extractor/common.py
@@ -21,7 +21,7 @@ from ..compat import (
compat_os_name,
compat_str,
compat_urllib_error,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -1300,7 +1300,7 @@ class InfoExtractor(object):
'plugin': 'flowplayer-3.2.0.1',
}
f4m_url += '&' if '?' in f4m_url else '?'
- f4m_url += compat_urllib_parse.urlencode(f4m_params)
+ f4m_url += compat_urllib_parse_urlencode(f4m_params)
formats.extend(self._extract_f4m_formats(f4m_url, video_id, f4m_id='hds', fatal=False))
continue
diff --git a/youtube_dl/extractor/condenast.py b/youtube_dl/extractor/condenast.py
index 054978ff2..e8f2b5a07 100644
--- a/youtube_dl/extractor/condenast.py
+++ b/youtube_dl/extractor/condenast.py
@@ -5,7 +5,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
@@ -97,7 +97,7 @@ class CondeNastIE(InfoExtractor):
video_id = self._search_regex(r'videoId: [\'"](.+?)[\'"]', params, 'video id')
player_id = self._search_regex(r'playerId: [\'"](.+?)[\'"]', params, 'player id')
target = self._search_regex(r'target: [\'"](.+?)[\'"]', params, 'target')
- data = compat_urllib_parse.urlencode({'videoId': video_id,
+ data = compat_urllib_parse_urlencode({'videoId': video_id,
'playerId': player_id,
'target': target,
})
diff --git a/youtube_dl/extractor/crunchyroll.py b/youtube_dl/extractor/crunchyroll.py
index 85fa7a725..7746f1be3 100644
--- a/youtube_dl/extractor/crunchyroll.py
+++ b/youtube_dl/extractor/crunchyroll.py
@@ -11,8 +11,8 @@ from math import pow, sqrt, floor
from .common import InfoExtractor
from ..compat import (
compat_etree_fromstring,
- compat_urllib_parse,
compat_urllib_parse_unquote,
+ compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
@@ -78,7 +78,7 @@ class CrunchyrollBaseIE(InfoExtractor):
# See https://github.com/rg3/youtube-dl/issues/7202.
qs['skip_wall'] = ['1']
return compat_urlparse.urlunparse(
- parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+ parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
class CrunchyrollIE(CrunchyrollBaseIE):
@@ -308,7 +308,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
playerdata_url = compat_urllib_parse_unquote(self._html_search_regex(r'"config_url":"([^"]+)', webpage, 'playerdata_url'))
playerdata_req = sanitized_Request(playerdata_url)
- playerdata_req.data = compat_urllib_parse.urlencode({'current_page': webpage_url})
+ playerdata_req.data = compat_urllib_parse_urlencode({'current_page': webpage_url})
playerdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
playerdata = self._download_webpage(playerdata_req, video_id, note='Downloading media info')
@@ -322,7 +322,7 @@ Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text
streamdata_req = sanitized_Request(
'http://www.crunchyroll.com/xml/?req=RpcApiVideoPlayer_GetStandardConfig&media_id=%s&video_format=%s&video_quality=%s'
% (stream_id, stream_format, stream_quality),
- compat_urllib_parse.urlencode({'current_page': url}).encode('utf-8'))
+ compat_urllib_parse_urlencode({'current_page': url}).encode('utf-8'))
streamdata_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
streamdata = self._download_xml(
streamdata_req, video_id,
diff --git a/youtube_dl/extractor/daum.py b/youtube_dl/extractor/daum.py
index c84c51058..86024a745 100644
--- a/youtube_dl/extractor/daum.py
+++ b/youtube_dl/extractor/daum.py
@@ -8,8 +8,8 @@ import itertools
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
- compat_urllib_parse,
compat_urllib_parse_unquote,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -70,7 +70,7 @@ class DaumIE(InfoExtractor):
def _real_extract(self, url):
video_id = compat_urllib_parse_unquote(self._match_id(url))
- query = compat_urllib_parse.urlencode({'vid': video_id})
+ query = compat_urllib_parse_urlencode({'vid': video_id})
movie_data = self._download_json(
'http://videofarm.daum.net/controller/api/closed/v1_2/IntegratedMovieData.json?' + query,
video_id, 'Downloading video formats info')
@@ -86,7 +86,7 @@ class DaumIE(InfoExtractor):
formats = []
for format_el in movie_data['output_list']['output_list']:
profile = format_el['profile']
- format_query = compat_urllib_parse.urlencode({
+ format_query = compat_urllib_parse_urlencode({
'vid': video_id,
'profile': profile,
})
diff --git a/youtube_dl/extractor/dcn.py b/youtube_dl/extractor/dcn.py
index 15a1c40f7..982ed94ea 100644
--- a/youtube_dl/extractor/dcn.py
+++ b/youtube_dl/extractor/dcn.py
@@ -6,7 +6,7 @@ import base64
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_str,
)
from ..utils import (
@@ -106,7 +106,7 @@ class DCNVideoIE(DCNBaseIE):
webpage = self._download_webpage(
'http://admin.mangomolo.com/analytics/index.php/customers/embed/video?' +
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'id': video_data['id'],
'user_id': video_data['user_id'],
'signature': video_data['signature'],
@@ -133,7 +133,7 @@ class DCNLiveIE(DCNBaseIE):
webpage = self._download_webpage(
'http://admin.mangomolo.com/analytics/index.php/customers/embed/index?' +
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'id': base64.b64encode(channel_data['user_id'].encode()).decode(),
'channelid': base64.b64encode(channel_data['id'].encode()).decode(),
'signature': channel_data['signature'],
@@ -174,7 +174,7 @@ class DCNSeasonIE(InfoExtractor):
data['show_id'] = show_id
request = sanitized_Request(
'http://admin.mangomolo.com/analytics/index.php/plus/show',
- compat_urllib_parse.urlencode(data),
+ compat_urllib_parse_urlencode(data),
{
'Origin': 'http://www.dcndigital.ae',
'Content-Type': 'application/x-www-form-urlencoded'
diff --git a/youtube_dl/extractor/dramafever.py b/youtube_dl/extractor/dramafever.py
index d35e88881..2101acaaf 100644
--- a/youtube_dl/extractor/dramafever.py
+++ b/youtube_dl/extractor/dramafever.py
@@ -6,7 +6,7 @@ import itertools
from .amp import AMPIE
from ..compat import (
compat_HTTPError,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -50,7 +50,7 @@ class DramaFeverBaseIE(AMPIE):
}
request = sanitized_Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
diff --git a/youtube_dl/extractor/eroprofile.py b/youtube_dl/extractor/eroprofile.py
index 7fcd0151d..297f8a6f5 100644
--- a/youtube_dl/extractor/eroprofile.py
+++ b/youtube_dl/extractor/eroprofile.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
unescapeHTML
@@ -43,7 +43,7 @@ class EroProfileIE(InfoExtractor):
if username is None:
return
- query = compat_urllib_parse.urlencode({
+ query = compat_urllib_parse_urlencode({
'username': username,
'password': password,
'url': 'http://www.eroprofile.com/',
diff --git a/youtube_dl/extractor/fc2.py b/youtube_dl/extractor/fc2.py
index 508684d2e..cacf61973 100644
--- a/youtube_dl/extractor/fc2.py
+++ b/youtube_dl/extractor/fc2.py
@@ -5,12 +5,11 @@ import hashlib
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
- encode_dict,
ExtractorError,
sanitized_Request,
)
@@ -57,7 +56,7 @@ class FC2IE(InfoExtractor):
'Submit': ' Login ',
}
- login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
+ login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8')
request = sanitized_Request(
'https://secure.id.fc2.com/index.php?mode=login&switch_language=en', login_data)
diff --git a/youtube_dl/extractor/fivemin.py b/youtube_dl/extractor/fivemin.py
index 67d50a386..6b8345416 100644
--- a/youtube_dl/extractor/fivemin.py
+++ b/youtube_dl/extractor/fivemin.py
@@ -4,8 +4,8 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
compat_parse_qs,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
@@ -109,7 +109,7 @@ class FiveMinIE(InfoExtractor):
response = self._download_json(
'https://syn.5min.com/handlers/SenseHandler.ashx?' +
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'func': 'GetResults',
'playlist': video_id,
'sid': sid,
diff --git a/youtube_dl/extractor/flickr.py b/youtube_dl/extractor/flickr.py
index 18f439df9..0a3de1498 100644
--- a/youtube_dl/extractor/flickr.py
+++ b/youtube_dl/extractor/flickr.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
int_or_none,
@@ -42,7 +42,7 @@ class FlickrIE(InfoExtractor):
}
if secret:
query['secret'] = secret
- data = self._download_json(self._API_BASE_URL + compat_urllib_parse.urlencode(query), video_id, note)
+ data = self._download_json(self._API_BASE_URL + compat_urllib_parse_urlencode(query), video_id, note)
if data['stat'] != 'ok':
raise ExtractorError(data['message'])
return data
diff --git a/youtube_dl/extractor/funimation.py b/youtube_dl/extractor/funimation.py
index 0f37ed786..1eb528f31 100644
--- a/youtube_dl/extractor/funimation.py
+++ b/youtube_dl/extractor/funimation.py
@@ -5,7 +5,6 @@ from .common import InfoExtractor
from ..utils import (
clean_html,
determine_ext,
- encode_dict,
int_or_none,
sanitized_Request,
ExtractorError,
@@ -54,10 +53,10 @@ class FunimationIE(InfoExtractor):
(username, password) = self._get_login_info()
if username is None:
return
- data = urlencode_postdata(encode_dict({
+ data = urlencode_postdata({
'email_field': username,
'password_field': password,
- }))
+ })
login_request = sanitized_Request('http://www.funimation.com/login', data, headers={
'User-Agent': 'Mozilla/5.0 (Windows NT 5.2; WOW64; rv:42.0) Gecko/20100101 Firefox/42.0',
'Content-Type': 'application/x-www-form-urlencoded'
diff --git a/youtube_dl/extractor/gdcvault.py b/youtube_dl/extractor/gdcvault.py
index 3befd3e7b..cc8fa45d2 100644
--- a/youtube_dl/extractor/gdcvault.py
+++ b/youtube_dl/extractor/gdcvault.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
remove_end,
HEADRequest,
@@ -123,7 +123,7 @@ class GDCVaultIE(InfoExtractor):
'password': password,
}
- request = sanitized_Request(login_url, compat_urllib_parse.urlencode(login_form))
+ request = sanitized_Request(login_url, compat_urllib_parse_urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._download_webpage(request, display_id, 'Logging in')
start_page = self._download_webpage(webpage_url, display_id, 'Getting authenticated video page')
diff --git a/youtube_dl/extractor/hotnewhiphop.py b/youtube_dl/extractor/hotnewhiphop.py
index efc3e8429..152d2a98a 100644
--- a/youtube_dl/extractor/hotnewhiphop.py
+++ b/youtube_dl/extractor/hotnewhiphop.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import base64
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
HEADRequest,
@@ -35,7 +35,7 @@ class HotNewHipHopIE(InfoExtractor):
r'"contentUrl" content="(.*?)"', webpage, 'content URL')
return self.url_result(video_url, ie='Youtube')
- reqdata = compat_urllib_parse.urlencode([
+ reqdata = compat_urllib_parse_urlencode([
('mediaType', 's'),
('mediaId', video_id),
])
diff --git a/youtube_dl/extractor/hypem.py b/youtube_dl/extractor/hypem.py
index e0ab31802..f7c913054 100644
--- a/youtube_dl/extractor/hypem.py
+++ b/youtube_dl/extractor/hypem.py
@@ -4,7 +4,7 @@ import json
import time
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
sanitized_Request,
@@ -28,7 +28,7 @@ class HypemIE(InfoExtractor):
track_id = self._match_id(url)
data = {'ax': 1, 'ts': time.time()}
- request = sanitized_Request(url + '?' + compat_urllib_parse.urlencode(data))
+ request = sanitized_Request(url + '?' + compat_urllib_parse_urlencode(data))
response, urlh = self._download_webpage_handle(
request, track_id, 'Downloading webpage with the url')
diff --git a/youtube_dl/extractor/internetvideoarchive.py b/youtube_dl/extractor/internetvideoarchive.py
index 483cc6f9e..e60145b3d 100644
--- a/youtube_dl/extractor/internetvideoarchive.py
+++ b/youtube_dl/extractor/internetvideoarchive.py
@@ -5,7 +5,7 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
xpath_with_ns,
@@ -38,7 +38,7 @@ class InternetVideoArchiveIE(InfoExtractor):
# Other player ids return m3u8 urls
cleaned_dic['playerid'] = '247'
cleaned_dic['videokbrate'] = '100000'
- return compat_urllib_parse.urlencode(cleaned_dic)
+ return compat_urllib_parse_urlencode(cleaned_dic)
def _real_extract(self, url):
query = compat_urlparse.urlparse(url).query
diff --git a/youtube_dl/extractor/iqiyi.py b/youtube_dl/extractor/iqiyi.py
index ffcea30ad..9e8c9432a 100644
--- a/youtube_dl/extractor/iqiyi.py
+++ b/youtube_dl/extractor/iqiyi.py
@@ -14,7 +14,7 @@ from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
)
from ..utils import (
@@ -322,7 +322,7 @@ class IqiyiIE(InfoExtractor):
'bird_t': timestamp,
}
validation_result = self._download_json(
- 'http://kylin.iqiyi.com/validate?' + compat_urllib_parse.urlencode(validation_params), None,
+ 'http://kylin.iqiyi.com/validate?' + compat_urllib_parse_urlencode(validation_params), None,
note='Validate credentials', errnote='Unable to validate credentials')
MSG_MAP = {
@@ -456,7 +456,7 @@ class IqiyiIE(InfoExtractor):
'QY00001': auth_result['data']['u'],
})
api_video_url += '?' if '?' not in api_video_url else '&'
- api_video_url += compat_urllib_parse.urlencode(param)
+ api_video_url += compat_urllib_parse_urlencode(param)
js = self._download_json(
api_video_url, video_id,
note='Download video info of segment %d for format %s' % (segment_index + 1, format_id))
@@ -494,7 +494,7 @@ class IqiyiIE(InfoExtractor):
}
api_url = 'http://cache.video.qiyi.com/vms' + '?' + \
- compat_urllib_parse.urlencode(param)
+ compat_urllib_parse_urlencode(param)
raw_data = self._download_json(api_url, video_id)
return raw_data
diff --git a/youtube_dl/extractor/ivideon.py b/youtube_dl/extractor/ivideon.py
index 617dc8c07..3ca824f79 100644
--- a/youtube_dl/extractor/ivideon.py
+++ b/youtube_dl/extractor/ivideon.py
@@ -5,7 +5,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import qualities
@@ -62,7 +62,7 @@ class IvideonIE(InfoExtractor):
quality = qualities(self._QUALITIES)
formats = [{
- 'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse.urlencode({
+ 'url': 'https://streaming.ivideon.com/flv/live?%s' % compat_urllib_parse_urlencode({
'server': server_id,
'camera': camera_id,
'sessionId': 'demo',
diff --git a/youtube_dl/extractor/kaltura.py b/youtube_dl/extractor/kaltura.py
index 44d7c84a1..a65697ff5 100644
--- a/youtube_dl/extractor/kaltura.py
+++ b/youtube_dl/extractor/kaltura.py
@@ -6,7 +6,7 @@ import base64
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
compat_parse_qs,
)
@@ -71,7 +71,7 @@ class KalturaIE(InfoExtractor):
for k, v in a.items():
params['%d:%s' % (i, k)] = v
- query = compat_urllib_parse.urlencode(params)
+ query = compat_urllib_parse_urlencode(params)
url = self._API_BASE + query
data = self._download_json(url, video_id, *args, **kwargs)
diff --git a/youtube_dl/extractor/laola1tv.py b/youtube_dl/extractor/laola1tv.py
index 41d80bc12..d9dc067d2 100644
--- a/youtube_dl/extractor/laola1tv.py
+++ b/youtube_dl/extractor/laola1tv.py
@@ -5,7 +5,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -90,7 +90,7 @@ class Laola1TvIE(InfoExtractor):
hd_doc = self._download_xml(
'http://www.laola1.tv/server/hd_video.php?%s'
- % compat_urllib_parse.urlencode({
+ % compat_urllib_parse_urlencode({
'play': video_id,
'partner': partner_id,
'portal': portal,
@@ -108,7 +108,7 @@ class Laola1TvIE(InfoExtractor):
req = sanitized_Request(
'https://club.laola1.tv/sp/laola1/api/v3/user/session/premium/player/stream-access?%s' %
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'videoId': video_id,
'target': VS_TARGETS.get(kind, '2'),
'label': _v('label'),
diff --git a/youtube_dl/extractor/leeco.py b/youtube_dl/extractor/leeco.py
index 462b752dd..375fdaed1 100644
--- a/youtube_dl/extractor/leeco.py
+++ b/youtube_dl/extractor/leeco.py
@@ -11,7 +11,7 @@ from .common import InfoExtractor
from ..compat import (
compat_ord,
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
determine_ext,
@@ -122,7 +122,7 @@ class LeIE(InfoExtractor):
'domain': 'www.le.com'
}
play_json_req = sanitized_Request(
- 'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse.urlencode(params)
+ 'http://api.le.com/mms/out/video/playJson?' + compat_urllib_parse_urlencode(params)
)
cn_verification_proxy = self._downloader.params.get('cn_verification_proxy')
if cn_verification_proxy:
@@ -151,7 +151,7 @@ class LeIE(InfoExtractor):
for format_id in formats:
if format_id in dispatch:
media_url = playurl['domain'][0] + dispatch[format_id][0]
- media_url += '&' + compat_urllib_parse.urlencode({
+ media_url += '&' + compat_urllib_parse_urlencode({
'm3v': 1,
'format': 1,
'expect': 3,
@@ -305,7 +305,7 @@ class LetvCloudIE(InfoExtractor):
}
self.sign_data(data)
return self._download_json(
- 'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse.urlencode(data),
+ 'http://api.letvcloud.com/gpc.php?' + compat_urllib_parse_urlencode(data),
media_id, 'Downloading playJson data for type %s' % cf)
play_json = get_play_json(cf, time.time())
diff --git a/youtube_dl/extractor/lynda.py b/youtube_dl/extractor/lynda.py
index d4e1ae99d..df50cb655 100644
--- a/youtube_dl/extractor/lynda.py
+++ b/youtube_dl/extractor/lynda.py
@@ -6,7 +6,7 @@ import json
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
@@ -36,7 +36,7 @@ class LyndaBaseIE(InfoExtractor):
'stayPut': 'false'
}
request = sanitized_Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
login_page = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -65,7 +65,7 @@ class LyndaBaseIE(InfoExtractor):
'stayPut': 'false',
}
request = sanitized_Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form).encode('utf-8'))
+ self._LOGIN_URL, compat_urllib_parse_urlencode(confirm_form).encode('utf-8'))
login_page = self._download_webpage(
request, None,
'Confirming log in and log out from another device')
diff --git a/youtube_dl/extractor/matchtv.py b/youtube_dl/extractor/matchtv.py
index 28e0dfe63..e33bfde3b 100644
--- a/youtube_dl/extractor/matchtv.py
+++ b/youtube_dl/extractor/matchtv.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
import random
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
sanitized_Request,
xpath_text,
@@ -29,7 +29,7 @@ class MatchTVIE(InfoExtractor):
def _real_extract(self, url):
video_id = 'matchtv-live'
request = sanitized_Request(
- 'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse.urlencode({
+ 'http://player.matchtv.ntvplus.tv/player/smil?%s' % compat_urllib_parse_urlencode({
'ts': '',
'quality': 'SD',
'contentId': '561d2c0df7159b37178b4567',
diff --git a/youtube_dl/extractor/metacafe.py b/youtube_dl/extractor/metacafe.py
index c31e8798a..0e4865446 100644
--- a/youtube_dl/extractor/metacafe.py
+++ b/youtube_dl/extractor/metacafe.py
@@ -5,8 +5,8 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
- compat_urllib_parse,
compat_urllib_parse_unquote,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
determine_ext,
@@ -117,7 +117,7 @@ class MetacafeIE(InfoExtractor):
'filters': '0',
'submit': "Continue - I'm over 18",
}
- request = sanitized_Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
+ request = sanitized_Request(self._FILTER_POST, compat_urllib_parse_urlencode(disclaimer_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self.report_age_confirmation()
self._download_webpage(request, None, False, 'Unable to confirm age')
diff --git a/youtube_dl/extractor/minhateca.py b/youtube_dl/extractor/minhateca.py
index e46b23a6f..6ec53c303 100644
--- a/youtube_dl/extractor/minhateca.py
+++ b/youtube_dl/extractor/minhateca.py
@@ -2,7 +2,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
int_or_none,
parse_duration,
@@ -39,7 +39,7 @@ class MinhatecaIE(InfoExtractor):
]
req = sanitized_Request(
'http://minhateca.com.br/action/License/Download',
- data=compat_urllib_parse.urlencode(token_data))
+ data=compat_urllib_parse_urlencode(token_data))
req.add_header('Content-Type', 'application/x-www-form-urlencoded')
data = self._download_json(
req, video_id, note='Downloading metadata')
diff --git a/youtube_dl/extractor/mitele.py b/youtube_dl/extractor/mitele.py
index 9e584860a..76ced7928 100644
--- a/youtube_dl/extractor/mitele.py
+++ b/youtube_dl/extractor/mitele.py
@@ -2,11 +2,10 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
- encode_dict,
get_element_by_attribute,
int_or_none,
)
@@ -60,7 +59,7 @@ class MiTeleIE(InfoExtractor):
'sta': '0',
}
media = self._download_json(
- '%s/?%s' % (gat, compat_urllib_parse.urlencode(encode_dict(token_data))),
+ '%s/?%s' % (gat, compat_urllib_parse_urlencode(token_data)),
display_id, 'Downloading %s JSON' % location['loc'])
file_ = media.get('file')
if not file_:
diff --git a/youtube_dl/extractor/moevideo.py b/youtube_dl/extractor/moevideo.py
index d930b9634..89cdd4600 100644
--- a/youtube_dl/extractor/moevideo.py
+++ b/youtube_dl/extractor/moevideo.py
@@ -5,7 +5,7 @@ import json
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
int_or_none,
@@ -77,7 +77,7 @@ class MoeVideoIE(InfoExtractor):
],
]
r_json = json.dumps(r)
- post = compat_urllib_parse.urlencode({'r': r_json})
+ post = compat_urllib_parse_urlencode({'r': r_json})
req = sanitized_Request(self._API_URL, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
diff --git a/youtube_dl/extractor/moniker.py b/youtube_dl/extractor/moniker.py
index f6bf94f2f..c5ce693f1 100644
--- a/youtube_dl/extractor/moniker.py
+++ b/youtube_dl/extractor/moniker.py
@@ -5,7 +5,7 @@ import os.path
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
remove_start,
@@ -88,7 +88,7 @@ class MonikerIE(InfoExtractor):
fields = re.findall(r'type="hidden" name="(.+?)"\s* value="?(.+?)">', orig_webpage)
data = dict(fields)
- post = compat_urllib_parse.urlencode(data)
+ post = compat_urllib_parse_urlencode(data)
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
diff --git a/youtube_dl/extractor/mooshare.py b/youtube_dl/extractor/mooshare.py
index f010f52d5..ee3947f43 100644
--- a/youtube_dl/extractor/mooshare.py
+++ b/youtube_dl/extractor/mooshare.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
sanitized_Request,
@@ -58,7 +58,7 @@ class MooshareIE(InfoExtractor):
}
request = sanitized_Request(
- 'http://mooshare.biz/%s' % video_id, compat_urllib_parse.urlencode(download_form))
+ 'http://mooshare.biz/%s' % video_id, compat_urllib_parse_urlencode(download_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self._sleep(5, video_id)
diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py
index 824bbcb4e..640ee3d93 100644
--- a/youtube_dl/extractor/mtv.py
+++ b/youtube_dl/extractor/mtv.py
@@ -4,7 +4,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_str,
)
from ..utils import (
@@ -171,7 +171,7 @@ class MTVServicesInfoExtractor(InfoExtractor):
data = {'uri': uri}
if self._LANG:
data['lang'] = self._LANG
- return compat_urllib_parse.urlencode(data)
+ return compat_urllib_parse_urlencode(data)
def _get_videos_info(self, uri):
video_id = self._id_from_uri(uri)
diff --git a/youtube_dl/extractor/muzu.py b/youtube_dl/extractor/muzu.py
index 1e9cf8de9..cbc800481 100644
--- a/youtube_dl/extractor/muzu.py
+++ b/youtube_dl/extractor/muzu.py
@@ -1,9 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse_urlencode
class MuzuTVIE(InfoExtractor):
@@ -25,7 +23,7 @@ class MuzuTVIE(InfoExtractor):
def _real_extract(self, url):
video_id = self._match_id(url)
- info_data = compat_urllib_parse.urlencode({
+ info_data = compat_urllib_parse_urlencode({
'format': 'json',
'url': url,
})
@@ -41,7 +39,7 @@ class MuzuTVIE(InfoExtractor):
if video_info.get('v%s' % quality):
break
- data = compat_urllib_parse.urlencode({
+ data = compat_urllib_parse_urlencode({
'ai': video_id,
# Even if each time you watch a video the hash changes,
# it seems to work for different videos, and it will work
diff --git a/youtube_dl/extractor/myvideo.py b/youtube_dl/extractor/myvideo.py
index c83a1eab5..6d447a493 100644
--- a/youtube_dl/extractor/myvideo.py
+++ b/youtube_dl/extractor/myvideo.py
@@ -9,8 +9,8 @@ import json
from .common import InfoExtractor
from ..compat import (
compat_ord,
- compat_urllib_parse,
compat_urllib_parse_unquote,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
@@ -112,7 +112,7 @@ class MyVideoIE(InfoExtractor):
encxml = compat_urllib_parse_unquote(b)
if not params.get('domain'):
params['domain'] = 'www.myvideo.de'
- xmldata_url = '%s?%s' % (encxml, compat_urllib_parse.urlencode(params))
+ xmldata_url = '%s?%s' % (encxml, compat_urllib_parse_urlencode(params))
if 'flash_playertype=MTV' in xmldata_url:
self._downloader.report_warning('avoiding MTV player')
xmldata_url = (
diff --git a/youtube_dl/extractor/naver.py b/youtube_dl/extractor/naver.py
index 1f5fc2145..6d6f69b44 100644
--- a/youtube_dl/extractor/naver.py
+++ b/youtube_dl/extractor/naver.py
@@ -5,7 +5,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -53,8 +53,8 @@ class NaverIE(InfoExtractor):
raise ExtractorError('couldn\'t extract vid and key')
vid = m_id.group(1)
key = m_id.group(2)
- query = compat_urllib_parse.urlencode({'vid': vid, 'inKey': key, })
- query_urls = compat_urllib_parse.urlencode({
+ query = compat_urllib_parse_urlencode({'vid': vid, 'inKey': key, })
+ query_urls = compat_urllib_parse_urlencode({
'masterVid': vid,
'protocol': 'p2p',
'inKey': key,
diff --git a/youtube_dl/extractor/nba.py b/youtube_dl/extractor/nba.py
index 3e2b3e599..d896b0d04 100644
--- a/youtube_dl/extractor/nba.py
+++ b/youtube_dl/extractor/nba.py
@@ -6,7 +6,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -97,7 +97,7 @@ class NBAIE(InfoExtractor):
_PAGE_SIZE = 30
def _fetch_page(self, team, video_id, page):
- search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse.urlencode({
+ search_url = 'http://searchapp2.nba.com/nba-search/query.jsp?' + compat_urllib_parse_urlencode({
'type': 'teamvideo',
'start': page * self._PAGE_SIZE + 1,
'npp': (page + 1) * self._PAGE_SIZE + 1,
diff --git a/youtube_dl/extractor/neteasemusic.py b/youtube_dl/extractor/neteasemusic.py
index 7830616f8..0d36474fa 100644
--- a/youtube_dl/extractor/neteasemusic.py
+++ b/youtube_dl/extractor/neteasemusic.py
@@ -8,7 +8,7 @@ import re
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_str,
compat_itertools_count,
)
@@ -153,7 +153,7 @@ class NetEaseMusicIE(NetEaseMusicBaseIE):
'ids': '[%s]' % song_id
}
info = self.query_api(
- 'song/detail?' + compat_urllib_parse.urlencode(params),
+ 'song/detail?' + compat_urllib_parse_urlencode(params),
song_id, 'Downloading song info')['songs'][0]
formats = self.extract_formats(info)
diff --git a/youtube_dl/extractor/nextmovie.py b/youtube_dl/extractor/nextmovie.py
index 657ae77a0..9ccd7d774 100644
--- a/youtube_dl/extractor/nextmovie.py
+++ b/youtube_dl/extractor/nextmovie.py
@@ -2,7 +2,7 @@
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
class NextMovieIE(MTVServicesInfoExtractor):
@@ -20,7 +20,7 @@ class NextMovieIE(MTVServicesInfoExtractor):
}]
def _get_feed_query(self, uri):
- return compat_urllib_parse.urlencode({
+ return compat_urllib_parse_urlencode({
'feed': '1505',
'mgid': uri,
})
diff --git a/youtube_dl/extractor/nfb.py b/youtube_dl/extractor/nfb.py
index 5bd15f7a7..ba1eefafc 100644
--- a/youtube_dl/extractor/nfb.py
+++ b/youtube_dl/extractor/nfb.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import sanitized_Request
@@ -40,7 +40,7 @@ class NFBIE(InfoExtractor):
request = sanitized_Request(
'https://www.nfb.ca/film/%s/player_config' % video_id,
- compat_urllib_parse.urlencode({'getConfig': 'true'}).encode('ascii'))
+ compat_urllib_parse_urlencode({'getConfig': 'true'}).encode('ascii'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('X-NFB-Referer', 'http://www.nfb.ca/medias/flash/NFBVideoPlayer.swf')
diff --git a/youtube_dl/extractor/nhl.py b/youtube_dl/extractor/nhl.py
index 8d5ce46ad..c1dea8b6c 100644
--- a/youtube_dl/extractor/nhl.py
+++ b/youtube_dl/extractor/nhl.py
@@ -7,7 +7,7 @@ import os
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse
)
from ..utils import (
@@ -38,7 +38,7 @@ class NHLBaseInfoExtractor(InfoExtractor):
parsed_url = compat_urllib_parse_urlparse(initial_video_url)
filename, ext = os.path.splitext(parsed_url.path)
path = '%s_sd%s' % (filename, ext)
- data = compat_urllib_parse.urlencode({
+ data = compat_urllib_parse_urlencode({
'type': 'fvod',
'path': compat_urlparse.urlunparse(parsed_url[:2] + (path,) + parsed_url[3:])
})
@@ -211,7 +211,7 @@ class NHLVideocenterIE(NHLBaseInfoExtractor):
r'tab0"[^>]*?>(.*?)</td>',
webpage, 'playlist title', flags=re.DOTALL).lower().capitalize()
- data = compat_urllib_parse.urlencode({
+ data = compat_urllib_parse_urlencode({
'cid': cat_id,
# This is the default value
'count': 12,
diff --git a/youtube_dl/extractor/nick.py b/youtube_dl/extractor/nick.py
index b62819ae5..ce065f2b0 100644
--- a/youtube_dl/extractor/nick.py
+++ b/youtube_dl/extractor/nick.py
@@ -2,7 +2,7 @@
from __future__ import unicode_literals
from .mtv import MTVServicesInfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
class NickIE(MTVServicesInfoExtractor):
@@ -54,7 +54,7 @@ class NickIE(MTVServicesInfoExtractor):
}]
def _get_feed_query(self, uri):
- return compat_urllib_parse.urlencode({
+ return compat_urllib_parse_urlencode({
'feed': 'nick_arc_player_prime',
'mgid': uri,
})
diff --git a/youtube_dl/extractor/niconico.py b/youtube_dl/extractor/niconico.py
index 586e52a4a..688f0a124 100644
--- a/youtube_dl/extractor/niconico.py
+++ b/youtube_dl/extractor/niconico.py
@@ -7,11 +7,10 @@ import datetime
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
- encode_dict,
ExtractorError,
int_or_none,
parse_duration,
@@ -101,7 +100,7 @@ class NiconicoIE(InfoExtractor):
'mail': username,
'password': password,
}
- login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('utf-8')
+ login_data = compat_urllib_parse_urlencode(login_form_strs).encode('utf-8')
request = sanitized_Request(
'https://secure.nicovideo.jp/secure/login', login_data)
login_results = self._download_webpage(
@@ -141,7 +140,7 @@ class NiconicoIE(InfoExtractor):
r'\'thumbPlayKey\'\s*:\s*\'(.*?)\'', ext_player_info, 'thumbPlayKey')
# Get flv info
- flv_info_data = compat_urllib_parse.urlencode({
+ flv_info_data = compat_urllib_parse_urlencode({
'k': thumb_play_key,
'v': video_id
})
diff --git a/youtube_dl/extractor/noco.py b/youtube_dl/extractor/noco.py
index ec7317a2f..8f4b69a6f 100644
--- a/youtube_dl/extractor/noco.py
+++ b/youtube_dl/extractor/noco.py
@@ -8,7 +8,7 @@ import hashlib
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -75,7 +75,7 @@ class NocoIE(InfoExtractor):
'username': username,
'password': password,
}
- request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
+ request = sanitized_Request(self._LOGIN_URL, compat_urllib_parse_urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded; charset=UTF-8')
login = self._download_json(request, None, 'Logging in as %s' % username)
diff --git a/youtube_dl/extractor/novamov.py b/youtube_dl/extractor/novamov.py
index d68c1ad79..a131f7dbd 100644
--- a/youtube_dl/extractor/novamov.py
+++ b/youtube_dl/extractor/novamov.py
@@ -7,7 +7,6 @@ from ..compat import compat_urlparse
from ..utils import (
ExtractorError,
NO_DEFAULT,
- encode_dict,
sanitized_Request,
urlencode_postdata,
)
@@ -73,7 +72,7 @@ class NovaMovIE(InfoExtractor):
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(url, post_url)
request = sanitized_Request(
- post_url, urlencode_postdata(encode_dict(fields)))
+ post_url, urlencode_postdata(fields))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
request.add_header('Referer', post_url)
webpage = self._download_webpage(
diff --git a/youtube_dl/extractor/npr.py b/youtube_dl/extractor/npr.py
index a3f0abb4e..1777aa10b 100644
--- a/youtube_dl/extractor/npr.py
+++ b/youtube_dl/extractor/npr.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
int_or_none,
qualities,
@@ -38,7 +38,7 @@ class NprIE(InfoExtractor):
playlist_id = self._match_id(url)
config = self._download_json(
- 'http://api.npr.org/query?%s' % compat_urllib_parse.urlencode({
+ 'http://api.npr.org/query?%s' % compat_urllib_parse_urlencode({
'id': playlist_id,
'fields': 'titles,audio,show',
'format': 'json',
diff --git a/youtube_dl/extractor/ooyala.py b/youtube_dl/extractor/ooyala.py
index 20b984288..16f040191 100644
--- a/youtube_dl/extractor/ooyala.py
+++ b/youtube_dl/extractor/ooyala.py
@@ -9,7 +9,7 @@ from ..utils import (
ExtractorError,
unsmuggle_url,
)
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
class OoyalaBaseIE(InfoExtractor):
@@ -35,7 +35,7 @@ class OoyalaBaseIE(InfoExtractor):
for supported_format in ('mp4', 'm3u8', 'hds', 'rtmp'):
auth_data = self._download_json(
self._AUTHORIZATION_URL_TEMPLATE % (pcode, embed_code) +
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'domain': domain,
'supportedFormats': supported_format
}),
diff --git a/youtube_dl/extractor/patreon.py b/youtube_dl/extractor/patreon.py
index ec8876c28..229750665 100644
--- a/youtube_dl/extractor/patreon.py
+++ b/youtube_dl/extractor/patreon.py
@@ -65,7 +65,7 @@ class PatreonIE(InfoExtractor):
request = sanitized_Request(
'https://www.patreon.com/processLogin',
- compat_urllib_parse.urlencode(login_form).encode('utf-8')
+ compat_urllib_parse_urlencode(login_form).encode('utf-8')
)
login_page = self._download_webpage(request, None, note='Logging in as %s' % username)
diff --git a/youtube_dl/extractor/played.py b/youtube_dl/extractor/played.py
index 2856af96f..63065622b 100644
--- a/youtube_dl/extractor/played.py
+++ b/youtube_dl/extractor/played.py
@@ -5,7 +5,7 @@ import re
import os.path
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
sanitized_Request,
@@ -40,7 +40,7 @@ class PlayedIE(InfoExtractor):
self._sleep(2, video_id)
- post = compat_urllib_parse.urlencode(data)
+ post = compat_urllib_parse_urlencode(data)
headers = {
b'Content-Type': b'application/x-www-form-urlencoded',
}
diff --git a/youtube_dl/extractor/playtvak.py b/youtube_dl/extractor/playtvak.py
index e360404f7..1e8096a25 100644
--- a/youtube_dl/extractor/playtvak.py
+++ b/youtube_dl/extractor/playtvak.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_urlparse,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
@@ -106,7 +106,7 @@ class PlaytvakIE(InfoExtractor):
})
info_url = compat_urlparse.urlunparse(
- parsed_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+ parsed_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
json_info = self._download_json(
info_url, video_id,
diff --git a/youtube_dl/extractor/pluralsight.py b/youtube_dl/extractor/pluralsight.py
index 12e1c2862..575775f09 100644
--- a/youtube_dl/extractor/pluralsight.py
+++ b/youtube_dl/extractor/pluralsight.py
@@ -8,7 +8,7 @@ import collections
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -76,7 +76,7 @@ class PluralsightIE(PluralsightBaseIE):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
request = sanitized_Request(
- post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
diff --git a/youtube_dl/extractor/porn91.py b/youtube_dl/extractor/porn91.py
index 63ce87ee3..9894f3262 100644
--- a/youtube_dl/extractor/porn91.py
+++ b/youtube_dl/extractor/porn91.py
@@ -2,8 +2,8 @@
from __future__ import unicode_literals
from ..compat import (
- compat_urllib_parse,
compat_urllib_parse_unquote,
+ compat_urllib_parse_urlencode,
)
from .common import InfoExtractor
from ..utils import (
@@ -50,7 +50,7 @@ class Porn91IE(InfoExtractor):
r'so.addVariable\(\'seccode\',\'([^\']+)\'', webpage, 'sec code')
max_vid = self._search_regex(
r'so.addVariable\(\'max_vid\',\'(\d+)\'', webpage, 'max vid')
- url_params = compat_urllib_parse.urlencode({
+ url_params = compat_urllib_parse_urlencode({
'VID': file_id,
'mp4': '1',
'seccode': sec_code,
diff --git a/youtube_dl/extractor/primesharetv.py b/youtube_dl/extractor/primesharetv.py
index 85aae9576..188f08826 100644
--- a/youtube_dl/extractor/primesharetv.py
+++ b/youtube_dl/extractor/primesharetv.py
@@ -1,7 +1,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
sanitized_Request,
@@ -42,7 +42,7 @@ class PrimeShareTVIE(InfoExtractor):
self._sleep(wait_time, video_id)
req = sanitized_Request(
- url, compat_urllib_parse.urlencode(fields), headers)
+ url, compat_urllib_parse_urlencode(fields), headers)
video_page = self._download_webpage(
req, video_id, 'Downloading video page')
diff --git a/youtube_dl/extractor/promptfile.py b/youtube_dl/extractor/promptfile.py
index d5357283a..67312016c 100644
--- a/youtube_dl/extractor/promptfile.py
+++ b/youtube_dl/extractor/promptfile.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
determine_ext,
ExtractorError,
@@ -34,7 +34,7 @@ class PromptFileIE(InfoExtractor):
expected=True)
fields = self._hidden_inputs(webpage)
- post = compat_urllib_parse.urlencode(fields)
+ post = compat_urllib_parse_urlencode(fields)
req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(
diff --git a/youtube_dl/extractor/prosiebensat1.py b/youtube_dl/extractor/prosiebensat1.py
index 670e6950f..07d49d489 100644
--- a/youtube_dl/extractor/prosiebensat1.py
+++ b/youtube_dl/extractor/prosiebensat1.py
@@ -5,9 +5,7 @@ import re
from hashlib import sha1
from .common import InfoExtractor
-from ..compat import (
- compat_urllib_parse,
-)
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
determine_ext,
@@ -235,7 +233,7 @@ class ProSiebenSat1IE(InfoExtractor):
client_name = 'kolibri-2.0.19-splec4'
client_location = url
- videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse.urlencode({
+ videos_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos?%s' % compat_urllib_parse_urlencode({
'access_token': access_token,
'client_location': client_location,
'client_name': client_name,
@@ -256,7 +254,7 @@ class ProSiebenSat1IE(InfoExtractor):
client_id = g[:2] + sha1(''.join([clip_id, g, access_token, client_location, g, client_name])
.encode('utf-8')).hexdigest()
- sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse.urlencode({
+ sources_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources?%s' % (clip_id, compat_urllib_parse_urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
@@ -270,7 +268,7 @@ class ProSiebenSat1IE(InfoExtractor):
client_location, source_ids_str, g, client_name])
.encode('utf-8')).hexdigest()
- url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse.urlencode({
+ url_api_url = 'http://vas.sim-technik.de/vas/live/v2/videos/%s/sources/url?%s' % (clip_id, compat_urllib_parse_urlencode({
'access_token': access_token,
'client_id': client_id,
'client_location': client_location,
diff --git a/youtube_dl/extractor/shahid.py b/youtube_dl/extractor/shahid.py
index 1178b7a27..b4433a689 100644
--- a/youtube_dl/extractor/shahid.py
+++ b/youtube_dl/extractor/shahid.py
@@ -2,7 +2,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
int_or_none,
@@ -81,7 +81,7 @@ class ShahidIE(InfoExtractor):
video = self._download_json(
'%s/%s/%s?%s' % (
api_vars['url'], api_vars['playerType'], api_vars['id'],
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'apiKey': 'sh@hid0nlin3',
'hash': 'b2wMCTHpSmyxGqQjJFOycRmLSex+BpTK/ooxy6vHaqs=',
})),
diff --git a/youtube_dl/extractor/shared.py b/youtube_dl/extractor/shared.py
index 96fe0b90d..e66441997 100644
--- a/youtube_dl/extractor/shared.py
+++ b/youtube_dl/extractor/shared.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
import base64
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
int_or_none,
@@ -45,7 +45,7 @@ class SharedIE(InfoExtractor):
download_form = self._hidden_inputs(webpage)
request = sanitized_Request(
- url, compat_urllib_parse.urlencode(download_form))
+ url, compat_urllib_parse_urlencode(download_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
video_page = self._download_webpage(
diff --git a/youtube_dl/extractor/sharesix.py b/youtube_dl/extractor/sharesix.py
index f1ea9bdb2..61dc1c235 100644
--- a/youtube_dl/extractor/sharesix.py
+++ b/youtube_dl/extractor/sharesix.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
parse_duration,
sanitized_Request,
@@ -47,7 +47,7 @@ class ShareSixIE(InfoExtractor):
fields = {
'method_free': 'Free'
}
- post = compat_urllib_parse.urlencode(fields)
+ post = compat_urllib_parse_urlencode(fields)
req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
diff --git a/youtube_dl/extractor/sina.py b/youtube_dl/extractor/sina.py
index b2258a0f6..d03f1b1d4 100644
--- a/youtube_dl/extractor/sina.py
+++ b/youtube_dl/extractor/sina.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import sanitized_Request
@@ -39,7 +39,7 @@ class SinaIE(InfoExtractor):
]
def _extract_video(self, video_id):
- data = compat_urllib_parse.urlencode({'vid': video_id})
+ data = compat_urllib_parse_urlencode({'vid': video_id})
url_doc = self._download_xml('http://v.iask.com/v_play.php?%s' % data,
video_id, 'Downloading video url')
image_page = self._download_webpage(
diff --git a/youtube_dl/extractor/smotri.py b/youtube_dl/extractor/smotri.py
index 015ef75f3..b4c6d5bbf 100644
--- a/youtube_dl/extractor/smotri.py
+++ b/youtube_dl/extractor/smotri.py
@@ -7,7 +7,7 @@ import hashlib
import uuid
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
int_or_none,
@@ -175,7 +175,7 @@ class SmotriIE(InfoExtractor):
video_form['pass'] = hashlib.md5(video_password.encode('utf-8')).hexdigest()
request = sanitized_Request(
- 'http://smotri.com/video/view/url/bot/', compat_urllib_parse.urlencode(video_form))
+ 'http://smotri.com/video/view/url/bot/', compat_urllib_parse_urlencode(video_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
video = self._download_json(request, video_id, 'Downloading video JSON')
@@ -338,7 +338,7 @@ class SmotriBroadcastIE(InfoExtractor):
}
request = sanitized_Request(
- broadcast_url + '/?no_redirect=1', compat_urllib_parse.urlencode(login_form))
+ broadcast_url + '/?no_redirect=1', compat_urllib_parse_urlencode(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
broadcast_page = self._download_webpage(
request, broadcast_id, 'Logging in and confirming age')
diff --git a/youtube_dl/extractor/sohu.py b/youtube_dl/extractor/sohu.py
index ea8fc258d..49e5d09ae 100644
--- a/youtube_dl/extractor/sohu.py
+++ b/youtube_dl/extractor/sohu.py
@@ -6,7 +6,7 @@ import re
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
@@ -170,7 +170,7 @@ class SohuIE(InfoExtractor):
if retries > 0:
download_note += ' (retry #%d)' % retries
part_info = self._parse_json(self._download_webpage(
- 'http://%s/?%s' % (allot, compat_urllib_parse.urlencode(params)),
+ 'http://%s/?%s' % (allot, compat_urllib_parse_urlencode(params)),
video_id, download_note), video_id)
video_url = part_info['url']
diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py
index 1efb2b980..2bca8fa3a 100644
--- a/youtube_dl/extractor/soundcloud.py
+++ b/youtube_dl/extractor/soundcloud.py
@@ -11,10 +11,9 @@ from .common import (
from ..compat import (
compat_str,
compat_urlparse,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
- encode_dict,
ExtractorError,
int_or_none,
unified_strdate,
@@ -393,7 +392,7 @@ class SoundcloudUserIE(SoundcloudIE):
query = COMMON_QUERY.copy()
query['offset'] = 0
- next_href = base_url + '?' + compat_urllib_parse.urlencode(query)
+ next_href = base_url + '?' + compat_urllib_parse_urlencode(query)
entries = []
for i in itertools.count():
@@ -424,7 +423,7 @@ class SoundcloudUserIE(SoundcloudIE):
qs = compat_urlparse.parse_qs(parsed_next_href.query)
qs.update(COMMON_QUERY)
next_href = compat_urlparse.urlunparse(
- parsed_next_href._replace(query=compat_urllib_parse.urlencode(qs, True)))
+ parsed_next_href._replace(query=compat_urllib_parse_urlencode(qs, True)))
return {
'_type': 'playlist',
@@ -460,7 +459,7 @@ class SoundcloudPlaylistIE(SoundcloudIE):
if token:
data_dict['secret_token'] = token
- data = compat_urllib_parse.urlencode(data_dict)
+ data = compat_urllib_parse_urlencode(data_dict)
data = self._download_json(
base_url + data, playlist_id, 'Downloading playlist')
@@ -500,7 +499,8 @@ class SoundcloudSearchIE(SearchInfoExtractor, SoundcloudIE):
query['client_id'] = self._CLIENT_ID
query['linked_partitioning'] = '1'
query['offset'] = 0
- data = compat_urllib_parse.urlencode(encode_dict(query))
+ data = compat_urllib_parse_urlencode(query)
+ data = compat_urllib_parse_urlencode(query)
next_url = '{0}{1}?{2}'.format(self._API_V2_BASE, endpoint, data)
collected_results = 0
diff --git a/youtube_dl/extractor/streamcloud.py b/youtube_dl/extractor/streamcloud.py
index 77841b946..b17779e4b 100644
--- a/youtube_dl/extractor/streamcloud.py
+++ b/youtube_dl/extractor/streamcloud.py
@@ -4,7 +4,7 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import sanitized_Request
@@ -35,7 +35,7 @@ class StreamcloudIE(InfoExtractor):
(?:id="[^"]+"\s+)?
value="([^"]*)"
''', orig_webpage)
- post = compat_urllib_parse.urlencode(fields)
+ post = compat_urllib_parse_urlencode(fields)
self._sleep(12, video_id)
headers = {
diff --git a/youtube_dl/extractor/telecinco.py b/youtube_dl/extractor/telecinco.py
index 2c8e9b941..d6b2560f8 100644
--- a/youtube_dl/extractor/telecinco.py
+++ b/youtube_dl/extractor/telecinco.py
@@ -5,8 +5,8 @@ import json
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
compat_urllib_parse_unquote,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -74,7 +74,7 @@ class TelecincoIE(InfoExtractor):
info_el = self._download_xml(info_url, episode).find('./video/info')
video_link = info_el.find('videoUrl/link').text
- token_query = compat_urllib_parse.urlencode({'id': video_link})
+ token_query = compat_urllib_parse_urlencode({'id': video_link})
token_info = self._download_json(
embed_data['flashvars']['ov_tk'] + '?' + token_query,
episode,
diff --git a/youtube_dl/extractor/tubitv.py b/youtube_dl/extractor/tubitv.py
index 6d78b5dfe..50ed15163 100644
--- a/youtube_dl/extractor/tubitv.py
+++ b/youtube_dl/extractor/tubitv.py
@@ -5,7 +5,7 @@ import codecs
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
int_or_none,
@@ -41,7 +41,7 @@ class TubiTvIE(InfoExtractor):
'username': username,
'password': password,
}
- payload = compat_urllib_parse.urlencode(form_data).encode('utf-8')
+ payload = compat_urllib_parse_urlencode(form_data).encode('utf-8')
request = sanitized_Request(self._LOGIN_URL, payload)
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_page = self._download_webpage(
diff --git a/youtube_dl/extractor/twitch.py b/youtube_dl/extractor/twitch.py
index d4169ec6d..c92dcc7b9 100644
--- a/youtube_dl/extractor/twitch.py
+++ b/youtube_dl/extractor/twitch.py
@@ -9,12 +9,11 @@ from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
)
from ..utils import (
- encode_dict,
ExtractorError,
int_or_none,
orderedSet,
@@ -82,7 +81,7 @@ class TwitchBaseIE(InfoExtractor):
post_url = compat_urlparse.urljoin(redirect_url, post_url)
request = sanitized_Request(
- post_url, compat_urllib_parse.urlencode(encode_dict(login_form)).encode('utf-8'))
+ post_url, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
request.add_header('Referer', redirect_url)
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
@@ -250,7 +249,7 @@ class TwitchVodIE(TwitchItemBaseIE):
formats = self._extract_m3u8_formats(
'%s/vod/%s?%s' % (
self._USHER_BASE, item_id,
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'allow_source': 'true',
'allow_audio_only': 'true',
'allow_spectre': 'true',
@@ -442,7 +441,7 @@ class TwitchStreamIE(TwitchBaseIE):
}
formats = self._extract_m3u8_formats(
'%s/api/channel/hls/%s.m3u8?%s'
- % (self._USHER_BASE, channel_id, compat_urllib_parse.urlencode(query)),
+ % (self._USHER_BASE, channel_id, compat_urllib_parse_urlencode(query)),
channel_id, 'mp4')
self._prefer_source(formats)
diff --git a/youtube_dl/extractor/udemy.py b/youtube_dl/extractor/udemy.py
index a9046b865..6adfb2cee 100644
--- a/youtube_dl/extractor/udemy.py
+++ b/youtube_dl/extractor/udemy.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
compat_HTTPError,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_request,
compat_urlparse,
)
@@ -71,7 +71,7 @@ class UdemyIE(InfoExtractor):
def _download_lecture(self, course_id, lecture_id):
return self._download_json(
'https://www.udemy.com/api-2.0/users/me/subscribed-courses/%s/lectures/%s?%s' % (
- course_id, lecture_id, compat_urllib_parse.urlencode({
+ course_id, lecture_id, compat_urllib_parse_urlencode({
'video_only': '',
'auto_play': '',
'fields[lecture]': 'title,description,asset',
@@ -139,7 +139,7 @@ class UdemyIE(InfoExtractor):
})
request = sanitized_Request(
- self._LOGIN_URL, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ self._LOGIN_URL, compat_urllib_parse_urlencode(login_form).encode('utf-8'))
request.add_header('Referer', self._ORIGIN_URL)
request.add_header('Origin', self._ORIGIN_URL)
diff --git a/youtube_dl/extractor/vbox7.py b/youtube_dl/extractor/vbox7.py
index b755dda90..77bb200e9 100644
--- a/youtube_dl/extractor/vbox7.py
+++ b/youtube_dl/extractor/vbox7.py
@@ -3,7 +3,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -48,7 +48,7 @@ class Vbox7IE(InfoExtractor):
webpage, 'title').split('/')[0].strip()
info_url = 'http://vbox7.com/play/magare.do'
- data = compat_urllib_parse.urlencode({'as3': '1', 'vid': video_id})
+ data = compat_urllib_parse_urlencode({'as3': '1', 'vid': video_id})
info_request = sanitized_Request(info_url, data)
info_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
info_response = self._download_webpage(info_request, video_id, 'Downloading info webpage')
diff --git a/youtube_dl/extractor/viddler.py b/youtube_dl/extractor/viddler.py
index 6bfbd4d85..8d92aee87 100644
--- a/youtube_dl/extractor/viddler.py
+++ b/youtube_dl/extractor/viddler.py
@@ -2,7 +2,7 @@ from __future__ import unicode_literals
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -93,7 +93,7 @@ class ViddlerIE(InfoExtractor):
headers = {'Referer': 'http://static.cdn-ec.viddler.com/js/arpeggio/v2/embed.html'}
request = sanitized_Request(
'http://api.viddler.com/api/v2/viddler.videos.getPlaybackDetails.json?%s'
- % compat_urllib_parse.urlencode(query), None, headers)
+ % compat_urllib_parse_urlencode(query), None, headers)
data = self._download_json(request, video_id)['video']
formats = []
diff --git a/youtube_dl/extractor/vimeo.py b/youtube_dl/extractor/vimeo.py
index 71c30d2cd..707a5735a 100644
--- a/youtube_dl/extractor/vimeo.py
+++ b/youtube_dl/extractor/vimeo.py
@@ -12,7 +12,6 @@ from ..compat import (
)
from ..utils import (
determine_ext,
- encode_dict,
ExtractorError,
InAdvancePagedList,
int_or_none,
@@ -42,13 +41,13 @@ class VimeoBaseInfoExtractor(InfoExtractor):
self.report_login()
webpage = self._download_webpage(self._LOGIN_URL, None, False)
token, vuid = self._extract_xsrft_and_vuid(webpage)
- data = urlencode_postdata(encode_dict({
+ data = urlencode_postdata({
'action': 'login',
'email': username,
'password': password,
'service': 'vimeo',
'token': token,
- }))
+ })
login_request = sanitized_Request(self._LOGIN_URL, data)
login_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
login_request.add_header('Referer', self._LOGIN_URL)
@@ -255,10 +254,10 @@ class VimeoIE(VimeoBaseInfoExtractor):
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option', expected=True)
token, vuid = self._extract_xsrft_and_vuid(webpage)
- data = urlencode_postdata(encode_dict({
+ data = urlencode_postdata({
'password': password,
'token': token,
- }))
+ })
if url.startswith('http://'):
# vimeo only supports https now, but the user can give an http url
url = url.replace('http://', 'https://')
@@ -274,7 +273,7 @@ class VimeoIE(VimeoBaseInfoExtractor):
password = self._downloader.params.get('videopassword')
if password is None:
raise ExtractorError('This video is protected by a password, use the --video-password option')
- data = urlencode_postdata(encode_dict({'password': password}))
+ data = urlencode_postdata({'password': password})
pass_url = url + '/check-password'
password_request = sanitized_Request(pass_url, data)
password_request.add_header('Content-Type', 'application/x-www-form-urlencoded')
@@ -575,7 +574,7 @@ class VimeoChannelIE(VimeoBaseInfoExtractor):
token, vuid = self._extract_xsrft_and_vuid(webpage)
fields['token'] = token
fields['password'] = password
- post = urlencode_postdata(encode_dict(fields))
+ post = urlencode_postdata(fields)
password_path = self._search_regex(
r'action="([^"]+)"', login_form, 'password URL')
password_url = compat_urlparse.urljoin(page_url, password_path)
diff --git a/youtube_dl/extractor/vk.py b/youtube_dl/extractor/vk.py
index d560a4b5e..458099a4a 100644
--- a/youtube_dl/extractor/vk.py
+++ b/youtube_dl/extractor/vk.py
@@ -7,7 +7,7 @@ import json
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
@@ -204,7 +204,7 @@ class VKIE(InfoExtractor):
request = sanitized_Request(
'https://login.vk.com/?act=login',
- compat_urllib_parse.urlencode(login_form).encode('utf-8'))
+ compat_urllib_parse_urlencode(login_form).encode('utf-8'))
login_page = self._download_webpage(
request, None, note='Logging in as %s' % username)
diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py
index bd5545173..baf39bb2c 100644
--- a/youtube_dl/extractor/vlive.py
+++ b/youtube_dl/extractor/vlive.py
@@ -7,7 +7,7 @@ from ..utils import (
float_or_none,
int_or_none,
)
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
class VLiveIE(InfoExtractor):
@@ -43,7 +43,7 @@ class VLiveIE(InfoExtractor):
playinfo = self._download_json(
'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
- % compat_urllib_parse.urlencode({
+ % compat_urllib_parse_urlencode({
'videoId': long_video_id,
'key': key,
'ptc': 'http',
diff --git a/youtube_dl/extractor/vodlocker.py b/youtube_dl/extractor/vodlocker.py
index a97995a6d..f1abca4d9 100644
--- a/youtube_dl/extractor/vodlocker.py
+++ b/youtube_dl/extractor/vodlocker.py
@@ -2,7 +2,7 @@
from __future__ import unicode_literals
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
NO_DEFAULT,
@@ -38,7 +38,7 @@ class VodlockerIE(InfoExtractor):
if fields['op'] == 'download1':
self._sleep(3, video_id) # they do detect when requests happen too fast!
- post = compat_urllib_parse.urlencode(fields)
+ post = compat_urllib_parse_urlencode(fields)
req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
webpage = self._download_webpage(
diff --git a/youtube_dl/extractor/xfileshare.py b/youtube_dl/extractor/xfileshare.py
index 94abdb4f3..4e35e1f44 100644
--- a/youtube_dl/extractor/xfileshare.py
+++ b/youtube_dl/extractor/xfileshare.py
@@ -4,10 +4,9 @@ from __future__ import unicode_literals
import re
from .common import InfoExtractor
-from ..compat import compat_urllib_parse
+from ..compat import compat_urllib_parse_urlencode
from ..utils import (
ExtractorError,
- encode_dict,
int_or_none,
sanitized_Request,
)
@@ -109,7 +108,7 @@ class XFileShareIE(InfoExtractor):
if countdown:
self._sleep(countdown, video_id)
- post = compat_urllib_parse.urlencode(encode_dict(fields))
+ post = compat_urllib_parse_urlencode(fields)
req = sanitized_Request(url, post)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
diff --git a/youtube_dl/extractor/yahoo.py b/youtube_dl/extractor/yahoo.py
index 4c6142927..b2d8f4b48 100644
--- a/youtube_dl/extractor/yahoo.py
+++ b/youtube_dl/extractor/yahoo.py
@@ -8,6 +8,7 @@ import re
from .common import InfoExtractor, SearchInfoExtractor
from ..compat import (
compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urlparse,
)
from ..utils import (
@@ -303,7 +304,7 @@ class YahooIE(InfoExtractor):
region = self._search_regex(
r'\\?"region\\?"\s*:\s*\\?"([^"]+?)\\?"',
webpage, 'region', fatal=False, default='US')
- data = compat_urllib_parse.urlencode({
+ data = compat_urllib_parse_urlencode({
'protocol': 'http',
'region': region,
})
diff --git a/youtube_dl/extractor/yandexmusic.py b/youtube_dl/extractor/yandexmusic.py
index e699e663f..158f3ea68 100644
--- a/youtube_dl/extractor/yandexmusic.py
+++ b/youtube_dl/extractor/yandexmusic.py
@@ -7,7 +7,7 @@ import hashlib
from .common import InfoExtractor
from ..compat import (
compat_str,
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
)
from ..utils import (
ExtractorError,
@@ -170,7 +170,7 @@ class YandexMusicPlaylistIE(YandexMusicPlaylistBaseIE):
missing_track_ids = set(map(compat_str, track_ids)) - set(present_track_ids)
request = sanitized_Request(
'https://music.yandex.ru/handlers/track-entries.jsx',
- compat_urllib_parse.urlencode({
+ compat_urllib_parse_urlencode({
'entries': ','.join(missing_track_ids),
'lang': mu.get('settings', {}).get('lang', 'en'),
'external-domain': 'music.yandex.ru',
diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py
index 900eb2aba..fd7eb5a6d 100644
--- a/youtube_dl/extractor/youku.py
+++ b/youtube_dl/extractor/youku.py
@@ -8,7 +8,7 @@ import time
from .common import InfoExtractor
from ..compat import (
- compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_ord,
)
from ..utils import (
@@ -138,7 +138,7 @@ class YoukuIE(InfoExtractor):
'_00' + \
'/st/' + self.parse_ext_l(format) + \
'/fileid/' + get_fileid(format, n) + '?' + \
- compat_urllib_parse.urlencode(param)
+ compat_urllib_parse_urlencode(param)
video_urls.append(video_url)
video_urls_dict[format] = video_urls
diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py
index 96fa3b5aa..83b5840f7 100644
--- a/youtube_dl/extractor/youtube.py
+++ b/youtube_dl/extractor/youtube.py
@@ -17,16 +17,15 @@ from ..swfinterp import SWFInterpreter
from ..compat import (
compat_chr,
compat_parse_qs,
- compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_parse_unquote_plus,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urlparse,
compat_str,
)
from ..utils import (
clean_html,
- encode_dict,
error_to_compat_str,
ExtractorError,
float_or_none,
@@ -116,7 +115,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'hl': 'en_US',
}
- login_data = compat_urllib_parse.urlencode(encode_dict(login_form_strs)).encode('ascii')
+ login_data = compat_urllib_parse_urlencode(login_form_strs).encode('ascii')
req = sanitized_Request(self._LOGIN_URL, login_data)
login_results = self._download_webpage(
@@ -149,7 +148,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
'TrustDevice': 'on',
})
- tfa_data = compat_urllib_parse.urlencode(encode_dict(tfa_form_strs)).encode('ascii')
+ tfa_data = compat_urllib_parse_urlencode(tfa_form_strs).encode('ascii')
tfa_req = sanitized_Request(self._TWOFACTOR_URL, tfa_data)
tfa_results = self._download_webpage(
@@ -1007,7 +1006,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
continue
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
- params = compat_urllib_parse.urlencode({
+ params = compat_urllib_parse_urlencode({
'lang': lang,
'v': video_id,
'fmt': ext,
@@ -1056,7 +1055,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
if caption_url:
timestamp = args['timestamp']
# We get the available subtitles
- list_params = compat_urllib_parse.urlencode({
+ list_params = compat_urllib_parse_urlencode({
'type': 'list',
'tlangs': 1,
'asrs': 1,
@@ -1075,7 +1074,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
sub_lang = lang_node.attrib['lang_code']
sub_formats = []
for ext in self._SUBTITLE_FORMATS:
- params = compat_urllib_parse.urlencode({
+ params = compat_urllib_parse_urlencode({
'lang': original_lang,
'tlang': sub_lang,
'fmt': ext,
@@ -1094,7 +1093,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
caption_tracks = args['caption_tracks']
caption_translation_languages = args['caption_translation_languages']
caption_url = compat_parse_qs(caption_tracks.split(',')[0])['u'][0]
- parsed_caption_url = compat_urlparse.urlparse(caption_url)
+ parsed_caption_url = compat_urllib_parse_urlparse(caption_url)
caption_qs = compat_parse_qs(parsed_caption_url.query)
sub_lang_list = {}
@@ -1110,7 +1109,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'fmt': [ext],
})
sub_url = compat_urlparse.urlunparse(parsed_caption_url._replace(
- query=compat_urllib_parse.urlencode(caption_qs, True)))
+ query=compat_urllib_parse_urlencode(caption_qs, True)))
sub_formats.append({
'url': sub_url,
'ext': ext,
@@ -1140,7 +1139,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
'cpn': [cpn],
})
playback_url = compat_urlparse.urlunparse(
- parsed_playback_url._replace(query=compat_urllib_parse.urlencode(qs, True)))
+ parsed_playback_url._replace(query=compat_urllib_parse_urlencode(qs, True)))
self._download_webpage(
playback_url, video_id, 'Marking watched',
@@ -1225,7 +1224,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
# this can be viewed without login into Youtube
url = proto + '://www.youtube.com/embed/%s' % video_id
embed_webpage = self._download_webpage(url, video_id, 'Downloading embed webpage')
- data = compat_urllib_parse.urlencode({
+ data = compat_urllib_parse_urlencode({
'video_id': video_id,
'eurl': 'https://youtube.googleapis.com/v/' + video_id,
'sts': self._search_regex(
@@ -2085,7 +2084,7 @@ class YoutubeSearchIE(SearchInfoExtractor, YoutubePlaylistIE):
'spf': 'navigate',
}
url_query.update(self._EXTRA_QUERY_ARGS)
- result_url = 'https://www.youtube.com/results?' + compat_urllib_parse.urlencode(url_query)
+ result_url = 'https://www.youtube.com/results?' + compat_urllib_parse_urlencode(url_query)
data = self._download_json(
result_url, video_id='query "%s"' % query,
note='Downloading page %s' % pagenum,
diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py
index b6e1dc809..eacd81bf9 100644
--- a/youtube_dl/utils.py
+++ b/youtube_dl/utils.py
@@ -47,6 +47,7 @@ from .compat import (
compat_str,
compat_urllib_error,
compat_urllib_parse,
+ compat_urllib_parse_urlencode,
compat_urllib_parse_urlparse,
compat_urllib_request,
compat_urlparse,
@@ -1315,7 +1316,7 @@ def shell_quote(args):
def smuggle_url(url, data):
""" Pass additional data in a URL for internal use. """
- sdata = compat_urllib_parse.urlencode(
+ sdata = compat_urllib_parse_urlencode(
{'__youtubedl_smuggle': json.dumps(data)})
return url + '#' + sdata
@@ -1789,22 +1790,15 @@ def read_batch_urls(batch_fd):
def urlencode_postdata(*args, **kargs):
- return compat_urllib_parse.urlencode(*args, **kargs).encode('ascii')
+ return compat_urllib_parse_urlencode(*args, **kargs).encode('ascii')
def update_url_query(url, query):
parsed_url = compat_urlparse.urlparse(url)
qs = compat_parse_qs(parsed_url.query)
qs.update(query)
- qs = encode_dict(qs)
return compat_urlparse.urlunparse(parsed_url._replace(
- query=compat_urllib_parse.urlencode(qs, True)))
-
-
-def encode_dict(d, encoding='utf-8'):
- def encode(v):
- return v.encode(encoding) if isinstance(v, compat_basestring) else v
- return dict((encode(k), encode(v)) for k, v in d.items())
+ query=compat_urllib_parse_urlencode(qs, True)))
def dict_get(d, key_or_keys, default=None, skip_false_values=True):