summaryrefslogtreecommitdiffhomepage
path: root/libs/subliminal_patch/providers/subdivx.py
blob: c66c5a0a8d9caa1c1698c0aee73e717e6ebee07b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import io
import logging
import os
import re
import time
import zipfile

import rarfile
from subzero.language import Language
from requests import Session

from subliminal import __short_version__
from subliminal.exceptions import ServiceUnavailable
from subliminal.providers import ParserBeautifulSoup
from subliminal.subtitle import SUBTITLE_EXTENSIONS, fix_line_ending, guess_matches
from subliminal.video import Episode, Movie
from subliminal_patch.exceptions import APIThrottled
from six.moves import range
from subliminal_patch.score import get_scores
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.providers import Provider
from guessit import guessit


CLEAN_TITLE_RES = [
    (r"subt[ií]tulos de", ""),
    (r"´|`", "'"),
    (r" {2,}", " "),
]

logger = logging.getLogger(__name__)


class SubdivxSubtitle(Subtitle):
    provider_name = "subdivx"
    hash_verifiable = False

    def __init__(self, language, video, page_link, title, description, uploader):
        super(SubdivxSubtitle, self).__init__(
            language, hearing_impaired=False, page_link=page_link
        )
        self.video = video
        self.title = title
        self.description = description
        self.uploader = uploader
        self.release_info = self.title
        if self.description and self.description.strip():
            self.release_info += " | " + self.description

    @property
    def id(self):
        return self.page_link

    def get_matches(self, video):
        matches = set()

        # episode
        if isinstance(video, Episode):
            # already matched in search query
            matches.update(["title", "series", "season", "episode", "year"])

        # movie
        elif isinstance(video, Movie):
            # already matched in search query
            matches.update(["title", "year"])

        # Special string comparisons are unnecessary. Guessit can match keys
        # from any string and find even more keywords.
        matches |= guess_matches(
            video,
            guessit(
                self.description,
                {"type": "episode" if isinstance(video, Episode) else "movie"},
            ),
        )

        return matches


class SubdivxSubtitlesProvider(Provider):
    provider_name = "subdivx"
    hash_verifiable = False
    languages = {Language.fromalpha2(lang) for lang in ["es"]}
    subtitle_class = SubdivxSubtitle

    server_url = "https://www.subdivx.com/"
    multi_result_throttle = 2
    language_list = list(languages)

    def __init__(self):
        self.session = None

    def initialize(self):
        self.session = Session()
        self.session.headers["User-Agent"] = f"Subliminal/{__short_version__}"

    def terminate(self):
        self.session.close()

    def query(self, video, languages):
        if isinstance(video, Episode):
            query = f"{video.series} S{video.season:02}E{video.episode:02}"
        else:
            # Subdvix has problems searching foreign movies if the year is
            # appended. A proper solution would be filtering results with the
            # year in self._parse_subtitles_page.
            query = video.title

        params = {
            "q": query,  # search string
            "accion": 5,  # action search
            "oxdown": 1,  # order by downloads descending
            "pg": 1,  # page 1
        }

        logger.debug(f"Searching subtitles: {query}")
        subtitles = []
        language = self.language_list[0]
        search_link = self.server_url + "index.php"
        while True:
            response = self.session.get(search_link, params=params, timeout=20)
            self._check_response(response)

            try:
                page_subtitles = self._parse_subtitles_page(video, response, language)
            except Exception as e:
                logger.error(f"Error parsing subtitles list: {e}")
                break

            subtitles += page_subtitles

            if len(page_subtitles) < 100:
                break  # this is the last page

            params["pg"] += 1  # search next page
            time.sleep(self.multi_result_throttle)

        return subtitles

    def list_subtitles(self, video, languages):
        return self.query(video, languages)

    def download_subtitle(self, subtitle):
        if isinstance(subtitle, SubdivxSubtitle):
            # download the subtitle
            logger.info("Downloading subtitle %r", subtitle)

            # get download link
            download_link = self._get_download_link(subtitle)

            # download zip / rar file with the subtitle
            response = self.session.get(
                self.server_url + download_link,
                headers={"Referer": subtitle.page_link},
                timeout=30,
            )
            self._check_response(response)

            # open the compressed archive
            archive = self._get_archive(response.content)

            # extract the subtitle
            subtitle_content = self._get_subtitle_from_archive(archive, subtitle)
            subtitle.content = fix_line_ending(subtitle_content)

    def _parse_subtitles_page(self, video, response, language):
        subtitles = []

        page_soup = ParserBeautifulSoup(
            response.content.decode("utf-8", "ignore"), ["lxml", "html.parser"]
        )
        title_soups = page_soup.find_all("div", {"id": "menu_detalle_buscador"})
        body_soups = page_soup.find_all("div", {"id": "buscador_detalle"})

        for subtitle in range(0, len(title_soups)):
            title_soup, body_soup = title_soups[subtitle], body_soups[subtitle]

            # title
            title = self._clean_title(title_soup.find("a").text)

            # filter by year
            if video.year and str(video.year) not in title:
                continue

            page_link = title_soup.find("a")["href"]

            # description
            description = body_soup.find("div", {"id": "buscador_detalle_sub"}).text
            description = description.replace(",", " ").lower()

            # uploader
            uploader = body_soup.find("a", {"class": "link1"}).text

            subtitle = self.subtitle_class(
                language, video, page_link, title, description, uploader
            )

            logger.debug("Found subtitle %r", subtitle)
            subtitles.append(subtitle)

        return subtitles

    def _get_download_link(self, subtitle):
        response = self.session.get(subtitle.page_link, timeout=20)
        self._check_response(response)
        try:
            page_soup = ParserBeautifulSoup(
                response.content.decode("utf-8", "ignore"), ["lxml", "html.parser"]
            )
            links_soup = page_soup.find_all("a", {"class": "detalle_link"})
            for link_soup in links_soup:
                if link_soup["href"].startswith("bajar"):
                    return self.server_url + link_soup["href"]
            links_soup = page_soup.find_all("a", {"class": "link1"})
            for link_soup in links_soup:
                if "bajar.php" in link_soup["href"]:
                    return link_soup["href"]
        except Exception as e:
            raise APIThrottled(f"Error parsing download link: {e}")

        raise APIThrottled("Download link not found")

    @staticmethod
    def _clean_title(title):
        """
        Normalize apostrophes and spaces to avoid matching problems
        (e.g. Subtitulos de  Carlito´s  Way -> Carlito's Way)
        """
        for og, new in CLEAN_TITLE_RES:
            title = re.sub(og, new, title, flags=re.IGNORECASE)

        return title

    @staticmethod
    def _check_response(response):
        if response.status_code != 200:
            raise ServiceUnavailable(f"Bad status code: {response.status_code}")

    @staticmethod
    def _get_archive(content):
        # open the archive
        archive_stream = io.BytesIO(content)
        if rarfile.is_rarfile(archive_stream):
            logger.debug("Identified rar archive")
            archive = rarfile.RarFile(archive_stream)
        elif zipfile.is_zipfile(archive_stream):
            logger.debug("Identified zip archive")
            archive = zipfile.ZipFile(archive_stream)
        else:
            raise APIThrottled("Unsupported compressed format")

        return archive

    @staticmethod
    def _get_subtitle_from_archive(archive, subtitle):
        _valid_names = []
        for name in archive.namelist():
            # discard hidden files
            # discard non-subtitle files
            if not os.path.split(name)[-1].startswith(".") and name.lower().endswith(
                SUBTITLE_EXTENSIONS
            ):
                _valid_names.append(name)

        # archive with only 1 subtitle
        if len(_valid_names) == 1:
            logger.debug(
                f"returning from archive: {_valid_names[0]} (single subtitle file)"
            )
            return archive.read(_valid_names[0])

        # in archives with more than 1 subtitle (season pack) we try to guess the best subtitle file
        _scores = get_scores(subtitle.video)
        _max_score = 0
        _max_name = ""
        for name in _valid_names:
            _guess = guessit(name)
            if "season" not in _guess:
                _guess["season"] = -1
            if "episode" not in _guess:
                _guess["episode"] = -1

            if isinstance(subtitle.video, Episode):
                logger.debug("guessing %s" % name)
                logger.debug(
                    f"subtitle S{_guess['season']}E{_guess['episode']} video "
                    f"S{subtitle.video.season}E{subtitle.video.episode}"
                )

                if (
                    subtitle.video.episode != _guess["episode"]
                    or subtitle.video.season != _guess["season"]
                ):
                    logger.debug("subtitle does not match video, skipping")
                    continue

            matches = set()
            matches |= guess_matches(subtitle.video, _guess)
            _score = sum((_scores.get(match, 0) for match in matches))
            logger.debug("srt matches: %s, score %d" % (matches, _score))
            if _score > _max_score:
                _max_score = _score
                _max_name = name
                logger.debug(f"new max: {name} {_score}")

        if _max_score > 0:
            logger.debug(f"returning from archive: {_max_name} scored {_max_score}")
            return archive.read(_max_name)

        raise APIThrottled("Can not find the subtitle in the compressed file")