1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
|
# -*- coding: utf-8 -*-
import io
import logging
from random import randint
from zipfile import ZipFile, is_zipfile
from rarfile import RarFile, is_rarfile
from requests import Session
import chardet
from bs4 import UnicodeDammit
from subzero.language import Language
from subliminal_patch.providers import Provider
from subliminal_patch.providers.mixins import ProviderSubtitleArchiveMixin
from subliminal_patch.subtitle import Subtitle
from subliminal.exceptions import ProviderError
from subliminal.providers import ParserBeautifulSoup
from subliminal.subtitle import sanitize
from subliminal.video import Movie
from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
logger = logging.getLogger(__name__)
class SubtitriIdSubtitle(Subtitle):
"""subtitri.id.lv Subtitle."""
provider_name = 'subtitriid'
def __init__(self, language, page_link, download_link, title, year, imdb_id):
super(SubtitriIdSubtitle, self).__init__(language, page_link=page_link)
self.download_link = download_link
self.title = title
self.year = year
self.imdb_id = imdb_id
self.matches = None
@property
def id(self):
return self.download_link
def get_matches(self, video):
matches = set()
if isinstance(video, Movie):
# title
if video.title and sanitize(self.title) == sanitize(video.title):
matches.add('title')
# year
if video.year and self.year == video.year:
matches.add('year')
# imdb id
if video.imdb_id and self.imdb_id == video.imdb_id:
matches.add('imdb_id')
self.matches = matches
return matches
def guess_encoding(self):
# override default subtitle guess_encoding method to not include language-specific encodings guessing
# chardet encoding detection seem to yield better results
"""Guess encoding using chardet.
:return: the guessed encoding.
:rtype: str
"""
if self._guessed_encoding:
return self._guessed_encoding
logger.info('Guessing encoding for language %s', self.language)
# guess/detect encoding using chardet
encoding = chardet.detect(self.content)['encoding']
logger.info('Chardet found encoding %s', encoding)
if not encoding:
# fallback on bs4
logger.info('Falling back to bs4 detection')
a = UnicodeDammit(self.content)
logger.info("bs4 detected encoding: %s", a.original_encoding)
if a.original_encoding:
self._guessed_encoding = a.original_encoding
return a.original_encoding
raise ValueError(u"Couldn't guess the proper encoding for %s", self)
self._guessed_encoding = encoding
return encoding
class SubtitriIdProvider(Provider, ProviderSubtitleArchiveMixin):
"""subtitri.id.lv Provider."""
subtitle_class = SubtitriIdSubtitle
languages = {Language('lva', 'LV')} | {Language.fromalpha2(l) for l in ['lv']}
server_url = 'http://subtitri.id.lv'
search_url = server_url + '/search/'
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
self.session.headers['Referer'] = self.server_url
def terminate(self):
self.session.close()
def query(self, title):
subtitles = []
r = self.session.get(self.search_url, params = {'q': title}, timeout=10)
r.raise_for_status()
if not r.content:
logger.debug('No data returned from provider')
return []
soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
# loop over subtitle cells
rows = soup.select('.eBlock')
for row in rows:
result_anchor_el = row.select_one('.eTitle > a')
# page link
page_link = result_anchor_el.get('href')
# fetch/parse additional info
r = self.session.get(page_link, timeout=10)
soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
# title
movie_titles_string = soup.select_one('.main-header').text.strip()
movie_titles_list = movie_titles_string.split(' / ')
title = movie_titles_list[-1]
# year
year = soup.select_one('#film-page-year').text.strip()
# imdb id
imdb_link = soup.select_one('#actors-page > a').get('href')
imdb_id = imdb_link.split('/')[-2]
# download link
href = soup.select_one('.hvr').get('href')
download_link = self.server_url + href
# create/add the subitle
subtitle = self.subtitle_class(Language.fromalpha2('lv'), page_link, download_link, title, year, imdb_id)
logger.debug('subtitri.id.lv: Found subtitle %r', subtitle)
subtitles.append(subtitle)
return subtitles
def list_subtitles(self, video, languages):
if isinstance(video, Movie):
titles = [video.title] + video.alternative_titles
else:
titles = []
subtitles = []
# query for subtitles
for title in titles:
if isinstance(video, Movie):
subtitles += [s for s in self.query(title) if s.language in languages]
return subtitles
def download_subtitle(self, subtitle):
if isinstance(subtitle, SubtitriIdSubtitle):
# download the subtitle
r = self.session.get(subtitle.download_link, timeout=10)
r.raise_for_status()
# open the archive
archive_stream = io.BytesIO(r.content)
if is_rarfile(archive_stream):
archive = RarFile(archive_stream)
elif is_zipfile(archive_stream):
archive = ZipFile(archive_stream)
else:
subtitle.content = r.content
if subtitle.is_valid():
return
subtitle.content = None
raise ProviderError('Unidentified archive type')
subtitle.content = self.get_subtitle_from_archive(subtitle, archive)
|