diff options
author | Jaime Marquínez Ferrándiz <[email protected]> | 2015-08-02 15:19:57 +0200 |
---|---|---|
committer | Jaime Marquínez Ferrándiz <[email protected]> | 2015-08-02 15:19:57 +0200 |
commit | 25a4c5a9ed59eca0241922363e83e61172527658 (patch) | |
tree | 12a73fd719f84441bfcf43a5c2f8c5463d8e679f /youtube_dl/extractor/dailymotion.py | |
parent | 5c45bbe57bd791debfd64052ab030298a7c6b718 (diff) | |
download | youtube-dl-25a4c5a9ed59eca0241922363e83e61172527658.tar.gz youtube-dl-25a4c5a9ed59eca0241922363e83e61172527658.zip |
[dailymotion:playlist] Use an iterator for the entries
So that using '--playlist-end' only downloads the required pages (reported in #2175).
Diffstat (limited to 'youtube_dl/extractor/dailymotion.py')
-rw-r--r-- | youtube_dl/extractor/dailymotion.py | 10 |
1 files changed, 5 insertions, 5 deletions
diff --git a/youtube_dl/extractor/dailymotion.py b/youtube_dl/extractor/dailymotion.py index 85d945509..2d90b2224 100644 --- a/youtube_dl/extractor/dailymotion.py +++ b/youtube_dl/extractor/dailymotion.py @@ -15,7 +15,6 @@ from ..utils import ( ExtractorError, determine_ext, int_or_none, - orderedSet, parse_iso8601, str_to_int, unescapeHTML, @@ -278,7 +277,7 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor): }] def _extract_entries(self, id): - video_ids = [] + video_ids = set() processed_urls = set() for pagenum in itertools.count(1): page_url = self._PAGE_TEMPLATE % (id, pagenum) @@ -291,12 +290,13 @@ class DailymotionPlaylistIE(DailymotionBaseInfoExtractor): processed_urls.add(urlh.geturl()) - video_ids.extend(re.findall(r'data-xid="(.+?)"', webpage)) + for video_id in re.findall(r'data-xid="(.+?)"', webpage): + if video_id not in video_ids: + yield self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') + video_ids.add(video_id) if re.search(self._MORE_PAGES_INDICATOR, webpage) is None: break - return [self.url_result('http://www.dailymotion.com/video/%s' % video_id, 'Dailymotion') - for video_id in orderedSet(video_ids)] def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) |