1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
|
# coding=utf-8
import logging
from random import randint
import re
import time
import urllib.parse
from babelfish import language_converters
from bs4.element import NavigableString
from bs4.element import Tag
from guessit import guessit
from requests import Session
from requests.exceptions import JSONDecodeError
from subliminal.providers import ParserBeautifulSoup
from subliminal.score import get_equivalent_release_groups
from subliminal.utils import sanitize
from subliminal.utils import sanitize_release_group
from subliminal.video import Episode
from subliminal.video import Movie
from subliminal_patch.exceptions import APIThrottled
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.utils import fix_inconsistent_naming
from subliminal_patch.utils import sanitize
from subzero.language import Language
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
from .utils import get_archive_from_bytes
from .utils import get_subtitle_from_archive
from .utils import update_matches
logger = logging.getLogger(__name__)
language_converters.register('supersubtitles = subliminal_patch.converters.supersubtitles:SuperSubtitlesConverter')
def fix_tv_naming(title):
"""Fix TV show titles with inconsistent naming using dictionary, but do not sanitize them.
:param str title: original title.
:return: new title.
:rtype: str
"""
return fix_inconsistent_naming(title, {"DC's Legends of Tomorrow": "Legends of Tomorrow",
"Star Trek: The Next Generation": "Star Trek TNG",
"Loki (aka. Marvel\'s Loki)": "Loki",
"Marvel's": "",
}, True)
def fix_movie_naming(title):
return fix_inconsistent_naming(title, {
}, True)
class SuperSubtitlesSubtitle(Subtitle):
"""SuperSubtitles Subtitle."""
provider_name = 'supersubtitles'
def __init__(self, language, page_link, subtitle_id, series, season, episode, version,
releases, year, imdb_id, uploader, asked_for_episode=None, asked_for_release_group=None):
super(SuperSubtitlesSubtitle, self).__init__(language, page_link=page_link)
self.subtitle_id = subtitle_id
self.series = series
self.season = season
self.episode = episode
self.version = version
self.releases = releases or []
self.year = year
self.uploader = uploader
if year:
self.year = int(year)
self.release_info = "\n".join([self.__get_name(), *self.releases])
self.page_link = page_link
self.asked_for_release_group = asked_for_release_group
self.asked_for_episode = asked_for_episode
self.imdb_id = imdb_id
self.is_pack = True
self.matches = set()
def numeric_id(self):
return self.subtitle_id
@property
def id(self):
return str(self.subtitle_id)
def __get_name(self):
ep_addon = f"S{self.season:02}E{self.episode:02}" if self.episode else ""
year_str = f" ({self.year})"
return f"{self.series}{year_str or ''} {ep_addon}".strip()
def get_matches(self, video):
matches = set()
update_matches(matches, video, self.releases)
# episode
if isinstance(video, Episode):
# series
if video.series and sanitize(self.series) == sanitize(video.series):
matches.add('series')
# imdb_id
if video.series_imdb_id and self.imdb_id and str(self.imdb_id) == str(video.series_imdb_id):
matches.add('series_imdb_id')
matches.add('series')
matches.add('year')
# year
if 'year' not in matches and 'series' in matches and video.original_series and self.year is None:
matches.add('year')
# movie
elif isinstance(video, Movie):
# title
if video.title and (sanitize(self.series) in (
sanitize(name) for name in [video.title] + video.alternative_titles)):
matches.add('title')
# imdb_id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
matches.add('title')
matches.add('year')
# year
if video.year and self.year == video.year:
matches.add('year')
# release_group
if video.release_group and self.releases:
video_release_groups = get_equivalent_release_groups(sanitize_release_group(video.release_group))
for release in self.releases:
if any(r in sanitize_release_group(release) for r in video_release_groups):
matches.add('release_group')
if video.resolution and video.resolution in release.lower():
matches.add('resolution')
if video.source and video.source in release.lower():
matches.add('source')
# We don't have to continue in case it is a perfect match
if all(m in matches for m in ['release_group', 'resolution', 'source']):
break
self.matches = matches
return matches
class SuperSubtitlesProvider(Provider, ProviderSubtitleArchiveMixin):
"""SuperSubtitles Provider."""
languages = {Language('hun', 'HU')} | {Language(lang) for lang in [
'hun', 'eng'
]}
video_types = (Episode, Movie)
# https://www.feliratok.eu/?search=&soriSorszam=&nyelv=&sorozatnev=The+Flash+%282014%29&sid=3212&complexsearch=true&knyelv=0&evad=4&epizod1=1&cimke=0&minoseg=0&rlsr=0&tab=all
server_url = 'https://www.feliratok.eu/'
hearing_impaired_verifiable = False
multi_result_throttle = 2 # seconds
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers = {
'User-Agent': AGENT_LIST[randint(0, len(AGENT_LIST) - 1)],
'Referer': 'https://www.feliratok.eu/index.php'
}
def terminate(self):
self.session.close()
@staticmethod
def get_language(text):
if text == 'Magyar':
return Language.fromsupersubtitles('hu')
if text == 'Angol':
return Language.fromsupersubtitles('en')
return None
def find_imdb_id(self, sub_id):
"""
"""
# TODO: add memoization to this method logic
url = self.server_url + "index.php?tipus=adatlap&azon=a_" + str(sub_id)
# url = https://www.feliratok.eu/index.php?tipus=adatlap&azon=a_1518600916
logger.info('Get IMDB id from URL %s', url)
r = self.session.get(url, timeout=10).content
soup = ParserBeautifulSoup(r, ['lxml'])
links = soup.find_all("a")
for value in links:
if "imdb.com" in str(value):
# <a alt="iMDB" href="http://www.imdb.com/title/tt2357547/" target="_blank"><img alt="iMDB"
# src="img/adatlap/imdb.png"/></a>
imdb_id = re.search(r'(?<=www\.imdb\.com/title/).*(?=/")', str(value))
imdb_id = imdb_id.group() if imdb_id else ''
logger.debug("IMDB ID found: %s", imdb_id)
return imdb_id
return None
def find_id(self, series, year, original_title):
"""
We need to find the id of the series at the following url:
https://www.feliratok.eu/index.php?term=SERIESNAME&nyelv=0&action=autoname
Where SERIESNAME is a searchable string.
The result will be something like this:
[{"name":"DC\u2019s Legends of Tomorrow (2016)","ID":"3725"},{"name":"Miles from Tomorrowland (2015)",
"ID":"3789"},{"name":"No Tomorrow (2016)","ID":"4179"}]
"""
# Search for exact name
url = self.server_url + "index.php?term=" + series + "&nyelv=0&action=autoname"
# url = self.server_url + "index.php?term=" + "fla"+ "&nyelv=0&action=autoname"
logger.info('Get series id from URL %s', url)
r = self.session.get(url, timeout=10)
# r is something like this:
# [{"name":"DC\u2019s Legends of Tomorrow (2016)","ID":"3725"},{"name":"Miles from Tomorrowland (2015)",
# "ID":"3789"},{"name":"No Tomorrow (2016)","ID":"4179"}]
results = r.json()
# check all of the results:
for result in results:
try:
# "name":"Miles from Tomorrowland (2015)","ID":"3789"
result_year = re.search(r"(?<=\()\d\d\d\d(?=\))", result['name'])
result_year = result_year.group() if result_year else ''
except IndexError:
result_year = ""
try:
# "name":"Miles from Tomorrowland (2015)","ID":"3789"
result_title = re.search(r".*(?=\(\d\d\d\d\))", result['name'])
result_title = result_title.group() if result_title else ''
result_id = result['ID']
except IndexError:
continue
for title in (result_title, fix_tv_naming(result_title)):
title = title.strip().replace("�", "").replace("& ", "").replace(" ", ".")
if not title:
continue
guessable = title.strip() + ".s01e01." + result_year
guess = guessit(guessable, {'type': "episode"})
sanitized_original_title = sanitize(original_title.replace('& ', ''))
guess_title = sanitize(guess['title'])
if sanitized_original_title == guess_title and year and guess['year'] and \
year == guess['year']:
# Return the founded id
return result_id
elif sanitized_original_title == guess_title and not year:
# Return the founded id
return result_id
return None
def query(self, series, languages, video=None):
year = video.year
subtitle = None
if isinstance(video, Episode):
series = video.series
season = video.season
episode = video.episode
# seriesa = series.replace(' ', '+')
# Get ID of series with original name
series_id = self.find_id(series, year, series)
if not series_id:
# If not founded try without ' char
modified_series = urllib.parse.quote_plus(series.replace('\'', ''))
series_id = self.find_id(modified_series, year, series)
if not series_id and modified_series:
# If still not founded try with the longest word is series title
modified_series = modified_series.split('+')
modified_series = max(modified_series, key=len)
series_id = self.find_id(modified_series, year, series)
if not series_id:
return None
subtitle = self.retrieve_series_subtitles(series_id, season, episode, video, languages)
if isinstance(video, Movie):
title = urllib.parse.quote_plus(series)
# https://www.feliratok.eu/index.php?search=The+Hitman%27s+BodyGuard&soriSorszam=&nyelv=&tab=film
url = self.server_url + "index.php?search=" + title + "&soriSorszam=&nyelv=&tab=film"
subtitle = self.process_subs(languages, video, url)
return subtitle
def retrieve_series_subtitles(self, series_id, season, episode, video, languages):
"""
Retrieve subtitles for a given episode
:param series_id: the ID of the series returned by @find_id.
:param season: the season number
:param episode: the episode number
:param video: video details
:param languages: languages to search for
:return: list of subtitles for the given episode
"""
if isinstance(video, Movie):
return None
subtitles = []
logger.info('Getting the list of subtitles for %s', video)
# First, try using every param that we got
episode_subs, season_subs = self.get_subtitle_list(series_id, season, episode, video)
if episode_subs:
sub_list = episode_subs
else:
'''
Sometimes the site is a bit buggy when you are searching for an episode sub that is only present in a
season pack, so we have to make a separate call for that without supplying the episode number
'''
_, sub_list = self.get_subtitle_list(series_id, season, None, video)
series_imdb_id = None
# Convert the list of subtitles for the proper format
for sub in sub_list.values():
'''
Since it is not possible to narrow down the languages in the request, we need to filter out the
inappropriate elements
'''
if sub['language'] in languages:
link = self.server_url + '/index.php?action=letolt&felirat=' + str(sub['id'])
# For episodes we open the series page so all subtitles imdb_id must be the same
if series_imdb_id is None:
series_imdb_id = self.find_imdb_id(sub['id'])
# Let's create a SuperSubtitlesSubtitle instance from the data that we got and add it to the list
subtitles.append(SuperSubtitlesSubtitle(sub['language'], link, sub['id'], sub['name'], sub['season'],
sub['episode'], ', '.join(sub['releases']), sub['releases'],
video.year, series_imdb_id, sub['uploader'], video.episode,
asked_for_release_group=video.release_group))
return subtitles
def get_subtitle_list(self, series_id, season, episode, video):
"""
We can retrieve the list of subtitles for a given show via the following url:
https://www.feliratok.eu/index.php?action=xbmc&sid=SERIES_ID&ev=SEASON&rtol=EPISODE
SERIES_ID is the ID of the show returned by the @find_id method. It is a mandatory parameter.
SEASON is the season number. Optional paramter.
EPISODE is the episode number. Optional parameter (using this param can cause problems).
NOTE: you gonna get back multiple records for the same subtitle, in case it is compatible with multiple releases
"""
# Construct the url
url = self.server_url + "index.php?action=xbmc&sid=" + str(series_id) + "&ev=" + str(season)
# Use the 'rtol' param in case we have a valid episode number
if episode:
url += "&rtol=" + str(episode)
try:
results = self.session.get(url, timeout=10).json()
except JSONDecodeError:
# provider returned improper JSON
results = None
'''
In order to work, the result should be a JSON like this:
{
"10": {
"language":"Angol",
"nev":"The Flash (Season 5) (1080p)",
"baselink":"http://www.feliratok.eu/index.php",
"fnev":"The.Flash.S05.HDTV.WEB.720p.1080p.ENG.zip",
"felirat":"1560706755",
"evad":"5",
"ep":"-1",
"feltolto":"J1GG4",
"pontos_talalat":"111",
"evadpakk":"1"
}, ...
}
'''
subtitle_list = {}
season_pack_list = {}
# Check the results. If a list or a Nonetype is returned, ignore it:
if results and not isinstance(results, list):
for result in results.values():
'''
Gonna get back multiple records for the same subtitle, in case it is compatible with multiple releases,
so we have to group them manually
'''
sub_id = int(result['felirat'])
# 'Nev' is something like this:
# Marvel's The Falcon and the Winter Soldier - 1x05 (WEB.2160p-KOGi)
# or
# Loki (Season 1) (DSNP.WEB-DL.720p-TOMMY)
search_name = re.search(r'^(.*)\s(?:-\s\d+x\d+|(\(Season\s\d+\)))?\s\((.*)\)$', result['nev'])
name = search_name.group(1) if search_name else ''
release = search_name.group(3) if search_name else ''
# In case of 0 it is an episode sub, in other cases, it is a season pack
target = subtitle_list if not int(result['evadpakk']) else season_pack_list
# Check that this sub_id is not already in the list
if sub_id not in target.keys():
target[sub_id] = {
'id': sub_id,
'name': name,
'language': self.get_language(result['language']),
'season': int(result['evad']),
'episode': result['ep'] if not result['evadpakk'] else int(video.episode),
'uploader': result['feltolto'],
'releases': [release],
'fname': result['fnev']
}
else:
target[sub_id]['releases'].append(release)
else:
logger.debug("Invalid results: %s", results)
return subtitle_list, season_pack_list
def process_subs(self, languages, video, url):
if isinstance(video, Episode):
return None
subtitles = []
logger.info('URL for subtitles %s', url)
r = self.session.get(url, timeout=10).content
soup = ParserBeautifulSoup(r, ['lxml'])
tables = soup.find_all("table")
tables = tables[0].find_all("tr")
i = 0
for table in tables:
if "vilagit" in str(table) and i > 1:
asked_for_episode = None
sub_season = None
sub_episode = None
sub_english = table.findAll("div", {"class": "eredeti"})
sub_english_name = re.search(r'(?<=<div class="eredeti">).*?(?=</div>)', str(sub_english))
sub_english_name = sub_english_name.group() if sub_english_name else ''
sub_english_name = sub_english_name.split(' (')[0]
sub_english_name = sub_english_name.replace('&', '&')
sub_version = 'n/a'
if len(str(sub_english).split('(')) > 1:
sub_version = (str(sub_english).split('(')[len(str(sub_english).split('(')) - 1]).split(')')[0]
# <small>Angol</small>
lang = table.find("small")
sub_language = re.search(r"(?<=<small>).*(?=</small>)", str(lang))
sub_language = sub_language.group() if sub_language else ''
sub_language = self.get_language(sub_language)
# <a href="/index.php?action=letolt&fnev=DCs Legends of Tomorrow - 03x11 - Here I Go Again.SVA.
# English.C.orig.Addic7ed.com.srt&felirat=1519162191">
link = str(table.findAll("a")[len(table.findAll("a")) - 1]).replace("amp;", "")
sub_downloadlink = re.search(r'(?<=href="/).*(?=">)', link)
sub_downloadlink = sub_downloadlink.group() if sub_downloadlink else ''
sub_downloadlink = self.server_url + sub_downloadlink
sub_id = re.search(r"(?<=felirat=).*(?=\">)", link)
sub_id = sub_id.group() if sub_id else ''
sub_year = video.year
sub_releases = [s.strip() for s in sub_version.split(',')]
uploader = ''
for item in table.contents[7].contents:
if isinstance(item, Tag):
uploader = item.text.lstrip('\r\n\t\t\t\t\t').rstrip('\r\n\t\t\t\t')
elif isinstance(item, NavigableString):
uploader = item.lstrip('\r\n\t\t\t\t\t').rstrip('\r\n\t\t\t\t')
sub_imdb_id = self.find_imdb_id(sub_id)
subtitle = SuperSubtitlesSubtitle(sub_language, sub_downloadlink, sub_id, sub_english_name.strip(),
sub_season, sub_episode, sub_version, sub_releases, sub_year,
sub_imdb_id, uploader, asked_for_episode,
asked_for_release_group=video.release_group)
if subtitle.language in languages:
subtitles.append(subtitle)
i = i + 1
return subtitles
def list_subtitles(self, video, languages):
titles = []
if isinstance(video, Episode):
titles = [video.series] + video.alternative_series
elif isinstance(video, Movie):
titles = [video.title] + video.alternative_titles
subtitles = []
for title in titles:
subs = self.query(title, languages, video=video)
if subs:
for item in subs:
if isinstance(video, Episode):
fixed_title = fix_tv_naming(item.series)
else:
fixed_title = fix_movie_naming(item.series)
# Check for the original and the fixed titles too
if any(x in (fixed_title.strip(), item.series) for x in titles):
subtitles.append(item)
time.sleep(self.multi_result_throttle)
return subtitles
def download_subtitle(self, subtitle):
r = self.session.get(subtitle.page_link, timeout=10)
r.raise_for_status()
archive = get_archive_from_bytes(r.content)
if archive is None:
raise APIThrottled(f"Invalid archive from {subtitle.page_link}")
subtitle.content = get_subtitle_from_archive(archive, episode=subtitle.episode or None)
|