summaryrefslogtreecommitdiffhomepage
diff options
context:
space:
mode:
-rw-r--r--README.md8
-rw-r--r--bazarr.py2
-rw-r--r--bazarr/check_update.py2
-rw-r--r--bazarr/embedded_subs_reader.py21
-rw-r--r--bazarr/get_languages.py44
-rw-r--r--bazarr/get_movies.py21
-rw-r--r--bazarr/get_subtitle.py87
-rw-r--r--bazarr/list_subtitles.py39
-rw-r--r--bazarr/logger.py1
-rw-r--r--bazarr/main.py12
-rw-r--r--libs/apprise/Apprise.py4
-rw-r--r--libs/apprise/AppriseAsset.py35
-rw-r--r--libs/apprise/AppriseAttachment.py2
-rw-r--r--libs/apprise/AppriseConfig.py35
-rw-r--r--libs/apprise/__init__.py6
-rw-r--r--libs/apprise/attachment/AttachBase.py63
-rw-r--r--libs/apprise/attachment/AttachHTTP.py28
-rw-r--r--libs/apprise/cli.py4
-rw-r--r--libs/apprise/config/ConfigBase.py114
-rw-r--r--libs/apprise/config/ConfigMemory.py82
-rw-r--r--libs/apprise/i18n/apprise.pot31
-rw-r--r--libs/apprise/plugins/NotifyD7Networks.py4
-rw-r--r--libs/apprise/plugins/NotifyDiscord.py14
-rw-r--r--libs/apprise/plugins/NotifyEmail.py47
-rw-r--r--libs/apprise/plugins/NotifyEnigma2.py352
-rw-r--r--libs/apprise/plugins/NotifyGitter.py4
-rw-r--r--libs/apprise/plugins/NotifyJoin.py26
-rw-r--r--libs/apprise/plugins/NotifyKavenegar.py377
-rw-r--r--libs/apprise/plugins/NotifyMSG91.py2
-rw-r--r--libs/apprise/plugins/NotifyMailgun.py2
-rw-r--r--libs/apprise/plugins/NotifyMatrix.py125
-rw-r--r--libs/apprise/plugins/NotifyMessageBird.py2
-rw-r--r--libs/apprise/plugins/NotifyNexmo.py22
-rw-r--r--libs/apprise/plugins/NotifyNextcloud.py294
-rw-r--r--libs/apprise/plugins/NotifyPushBullet.py15
-rw-r--r--libs/apprise/plugins/NotifyPushSafer.py832
-rw-r--r--libs/apprise/plugins/NotifyPushed.py2
-rw-r--r--libs/apprise/plugins/NotifyPushover.py214
-rw-r--r--libs/apprise/plugins/NotifySNS.py2
-rw-r--r--libs/apprise/plugins/NotifySinch.py476
-rw-r--r--libs/apprise/plugins/NotifySlack.py56
-rw-r--r--libs/apprise/plugins/NotifyTelegram.py62
-rw-r--r--libs/apprise/plugins/NotifyTwilio.py6
-rw-r--r--libs/apprise/plugins/NotifyXMPP/SleekXmppAdapter.py208
-rw-r--r--libs/apprise/plugins/NotifyXMPP/__init__.py (renamed from libs/apprise/plugins/NotifyXMPP.py)128
-rw-r--r--libs/apprise/plugins/__init__.py15
-rw-r--r--libs/knowit/__init__.py27
-rw-r--r--libs/knowit/__main__.py151
-rw-r--r--libs/knowit/api.py132
-rw-r--r--libs/knowit/config.py59
-rw-r--r--libs/knowit/core.py36
-rw-r--r--libs/knowit/defaults.yml628
-rw-r--r--libs/knowit/properties/__init__.py27
-rw-r--r--libs/knowit/properties/audio/__init__.py8
-rw-r--r--libs/knowit/properties/audio/bitratemode.py10
-rw-r--r--libs/knowit/properties/audio/channels.py26
-rw-r--r--libs/knowit/properties/audio/codec.py24
-rw-r--r--libs/knowit/properties/audio/compression.py10
-rw-r--r--libs/knowit/properties/audio/profile.py10
-rw-r--r--libs/knowit/properties/basic.py27
-rw-r--r--libs/knowit/properties/duration.py38
-rw-r--r--libs/knowit/properties/language.py28
-rw-r--r--libs/knowit/properties/quantity.py27
-rw-r--r--libs/knowit/properties/subtitle/__init__.py4
-rw-r--r--libs/knowit/properties/subtitle/format.py18
-rw-r--r--libs/knowit/properties/video/__init__.py10
-rw-r--r--libs/knowit/properties/video/codec.py16
-rw-r--r--libs/knowit/properties/video/encoder.py10
-rw-r--r--libs/knowit/properties/video/profile.py41
-rw-r--r--libs/knowit/properties/video/ratio.py35
-rw-r--r--libs/knowit/properties/video/scantype.py10
-rw-r--r--libs/knowit/properties/yesno.py25
-rw-r--r--libs/knowit/property.py137
-rw-r--r--libs/knowit/provider.py135
-rw-r--r--libs/knowit/providers/__init__.py7
-rw-r--r--libs/knowit/providers/enzyme.py153
-rw-r--r--libs/knowit/providers/ffmpeg.py276
-rw-r--r--libs/knowit/providers/mediainfo.py335
-rw-r--r--libs/knowit/rule.py17
-rw-r--r--libs/knowit/rules/__init__.py11
-rw-r--r--libs/knowit/rules/audio/__init__.py7
-rw-r--r--libs/knowit/rules/audio/atmos.py33
-rw-r--r--libs/knowit/rules/audio/channels.py57
-rw-r--r--libs/knowit/rules/audio/codec.py13
-rw-r--r--libs/knowit/rules/audio/dtshd.py32
-rw-r--r--libs/knowit/rules/language.py33
-rw-r--r--libs/knowit/rules/subtitle/__init__.py5
-rw-r--r--libs/knowit/rules/subtitle/closedcaption.py18
-rw-r--r--libs/knowit/rules/subtitle/hearingimpaired.py18
-rw-r--r--libs/knowit/rules/video/__init__.py4
-rw-r--r--libs/knowit/rules/video/resolution.py75
-rw-r--r--libs/knowit/serializer.py155
-rw-r--r--libs/knowit/units.py24
-rw-r--r--libs/knowit/utils.py95
-rw-r--r--libs/pymediainfo/AUTHORS3
-rw-r--r--libs/pymediainfo/LICENSE24
-rw-r--r--libs/pymediainfo/README.rst27
-rw-r--r--libs/pymediainfo/__init__.py320
-rw-r--r--libs/pyprobe/__init__.py2
-rw-r--r--libs/pyprobe/baseparser.py41
-rw-r--r--libs/pyprobe/ffprobeparsers.py216
-rw-r--r--libs/pyprobe/pyprobe.py226
-rw-r--r--libs/pysubs2/exceptions.py3
-rw-r--r--libs/pysubs2/ssastyle.py1
-rw-r--r--libs/pysubs2/subrip.py7
-rw-r--r--libs/pysubs2/substation.py9
-rw-r--r--libs/subliminal_patch/providers/bsplayer.py2
-rw-r--r--libs/subliminal_patch/providers/legendasdivx.py2
-rw-r--r--libs/subliminal_patch/providers/opensubtitles.py1
-rw-r--r--libs/subliminal_patch/providers/regielive.py182
-rw-r--r--libs/subliminal_patch/providers/subdivx.py124
-rw-r--r--libs/subliminal_patch/providers/subs4free.py3
-rw-r--r--libs/subliminal_patch/providers/subs4series.py4
-rw-r--r--libs/subliminal_patch/providers/subssabbz.py38
-rw-r--r--libs/subliminal_patch/providers/subsunacs.py60
-rw-r--r--libs/subliminal_patch/providers/subz.py302
-rw-r--r--libs/subliminal_patch/providers/wizdom.py210
-rw-r--r--libs/subliminal_patch/providers/yavkanet.py179
-rw-r--r--libs/subliminal_patch/providers/zimuku.py358
-rw-r--r--libs/subliminal_patch/subtitle.py15
-rw-r--r--libs/version.txt2
-rw-r--r--libs2/osdefs.h (renamed from libs/osdefs.h)0
-rw-r--r--libs2/winreparse.h (renamed from libs/winreparse.h)0
-rw-r--r--views/episodes.tpl23
-rw-r--r--views/menu.tpl6
-rw-r--r--views/movie.tpl27
-rw-r--r--views/providers.tpl87
127 files changed, 7707 insertions, 1802 deletions
diff --git a/README.md b/README.md
index 18064e4c7..378ac78c7 100644
--- a/README.md
+++ b/README.md
@@ -3,12 +3,9 @@ Bazarr is a companion application to Sonarr and Radarr. It manages and downloads
Be aware that Bazarr doesn't scan disk to detect series and movies: It only takes care of the series and movies that are indexed in Sonarr and Radarr.
-## Support on Beerpay
+## Support on Paypal
At the request of some, here is a way to demonstrate your appreciation for the efforts made in the development of Bazarr:
-[![Beerpay](https://beerpay.io/morpheus65535/bazarr/badge.svg?style=beer-square)](https://beerpay.io/morpheus65535/bazarr)
-
-You can also make a wish but keep in mind that we do not commit to make it happen:
-[![Beerpay](https://beerpay.io/morpheus65535/bazarr/make-wish.svg?style=flat-square)](https://beerpay.io/morpheus65535/bazarr?focus=wish)
+[![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=XHHRWXT9YB7WE&source=url)
# Status
[![GitHub issues](https://img.shields.io/github/issues/morpheus65535/bazarr.svg?style=flat-square)](https://github.com/morpheus65535/bazarr/issues)
@@ -69,6 +66,7 @@ If you need something that is not already part of Bazarr, feel free to create a
* Supersubtitles
* Titlovi
* TVSubtitles
+* Wizdom
* XSubs
* Zimuku
diff --git a/bazarr.py b/bazarr.py
index 8f1b7da03..581689f76 100644
--- a/bazarr.py
+++ b/bazarr.py
@@ -14,7 +14,7 @@ from libs.six import PY3
def check_python_version():
python_version = platform.python_version_tuple()
minimum_py2_tuple = (2, 7, 13)
- minimum_py3_tuple = (3, 6, 0)
+ minimum_py3_tuple = (3, 7, 0)
minimum_py2_str = ".".join(str(i) for i in minimum_py2_tuple)
minimum_py3_str = ".".join(str(i) for i in minimum_py3_tuple)
diff --git a/bazarr/check_update.py b/bazarr/check_update.py
index 965973828..4938724a2 100644
--- a/bazarr/check_update.py
+++ b/bazarr/check_update.py
@@ -60,7 +60,7 @@ def check_and_apply_update():
if releases is None:
notifications.write(msg='Could not get releases from GitHub.',
queue='check_update', type='warning')
- logging.warn('BAZARR Could not get releases from GitHub.')
+ logging.warning('BAZARR Could not get releases from GitHub.')
return
else:
release = releases[0]
diff --git a/bazarr/embedded_subs_reader.py b/bazarr/embedded_subs_reader.py
index 3cc5c98b4..8a2f12e86 100644
--- a/bazarr/embedded_subs_reader.py
+++ b/bazarr/embedded_subs_reader.py
@@ -3,11 +3,9 @@ import enzyme
from enzyme.exceptions import MalformedMKVError
import logging
import os
-import subprocess
-import locale
+from knowit import api
from utils import get_binary
-from pyprobe.pyprobe import VideoFileParser
class NotMKVAndNoFFprobe(Exception):
pass
@@ -23,11 +21,18 @@ class EmbeddedSubsReader:
subtitles_list = []
if self.ffprobe:
- parser = VideoFileParser(ffprobe=self.ffprobe, includeMissing=True, rawMode=False)
- data = parser.parseFfprobe(file)
-
- for detected_language in data['subtitles']:
- subtitles_list.append([detected_language['language'], detected_language['forced'], detected_language["codec"]])
+ api.initialize({'provider': 'ffmpeg', 'ffmpeg': self.ffprobe})
+ data = api.know(file)
+
+ if 'subtitle' in data:
+ for detected_language in data['subtitle']:
+ if 'language' in detected_language:
+ language = detected_language['language'].alpha3
+ forced = detected_language['forced'] if 'forced' in detected_language else None
+ codec = detected_language['format'] if 'format' in detected_language else None
+ subtitles_list.append([language, forced, codec])
+ else:
+ continue
else:
if os.path.splitext(file)[1] == '.mkv':
with open(file, 'rb') as f:
diff --git a/bazarr/get_languages.py b/bazarr/get_languages.py
index 5e61e789e..4e8250640 100644
--- a/bazarr/get_languages.py
+++ b/bazarr/get_languages.py
@@ -3,6 +3,7 @@
from __future__ import absolute_import
import os
import pycountry
+import ast
from get_args import args
from subzero.language import Language
@@ -76,5 +77,48 @@ def get_language_set():
return language_set
+def clean_desired_languages():
+ from list_subtitles import list_missing_subtitles, list_missing_subtitles_movies
+ enabled_languages = []
+ enabled_languages_temp = database.execute("SELECT code2 FROM table_settings_languages WHERE enabled=1")
+ for language in enabled_languages_temp:
+ enabled_languages.append(language['code2'])
+
+ series_languages = database.execute("SELECT sonarrSeriesId, languages FROM table_shows")
+ movies_languages = database.execute("SELECT radarrId, languages FROM table_movies")
+
+ for item in series_languages:
+ if item['languages'] != 'None':
+ try:
+ languages_list = ast.literal_eval(item['languages'])
+ except:
+ pass
+ else:
+ cleaned_languages_list = []
+ for language in languages_list:
+ if language in enabled_languages:
+ cleaned_languages_list.append(language)
+ if cleaned_languages_list != languages_list:
+ database.execute("UPDATE table_shows SET languages=? WHERE sonarrSeriesId=?",
+ (str(cleaned_languages_list), item['sonarrSeriesId']))
+ list_missing_subtitles(no=item['sonarrSeriesId'])
+
+ for item in movies_languages:
+ if item['languages'] != 'None':
+ try:
+ languages_list = ast.literal_eval(item['languages'])
+ except:
+ pass
+ else:
+ cleaned_languages_list = []
+ for language in languages_list:
+ if language in enabled_languages:
+ cleaned_languages_list.append(language)
+ if cleaned_languages_list != languages_list:
+ database.execute("UPDATE table_movies SET languages=? WHERE radarrId=?",
+ (str(cleaned_languages_list), item['radarrId']))
+ list_missing_subtitles_movies(no=item['radarrId'])
+
+
if __name__ == '__main__':
load_language_in_db()
diff --git a/bazarr/get_movies.py b/bazarr/get_movies.py
index 8a8837d4f..36dd63c39 100644
--- a/bazarr/get_movies.py
+++ b/bazarr/get_movies.py
@@ -25,6 +25,8 @@ def update_all_movies():
def update_movies():
logging.debug('BAZARR Starting movie sync from Radarr.')
apikey_radarr = settings.radarr.apikey
+
+ radarr_version = get_radarr_version()
movie_default_enabled = settings.general.getboolean('movie_default_enabled')
movie_default_language = settings.general.movie_default_language
movie_default_hi = settings.general.movie_default_hi
@@ -130,6 +132,8 @@ def update_movies():
videoCodec = None
audioCodec = None
+ audio_language = profile_id_to_language(movie['qualityProfileId'], audio_profiles)
+
# Add movies in radarr to current movies list
current_movies_radarr.append(six.text_type(movie['tmdbId']))
@@ -140,7 +144,7 @@ def update_movies():
'tmdbId': six.text_type(movie["tmdbId"]),
'poster': poster,
'fanart': fanart,
- 'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
+ 'audio_language': audio_language,
'sceneName': sceneName,
'monitored': six.text_type(bool(movie['monitored'])),
'year': six.text_type(movie['year']),
@@ -165,7 +169,7 @@ def update_movies():
'overview': overview,
'poster': poster,
'fanart': fanart,
- 'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
+ 'audio_language': audio_language,
'sceneName': sceneName,
'monitored': six.text_type(bool(movie['monitored'])),
'sortTitle': movie['sortTitle'],
@@ -189,7 +193,7 @@ def update_movies():
'overview': overview,
'poster': poster,
'fanart': fanart,
- 'audio_language': profile_id_to_language(movie['qualityProfileId'], audio_profiles),
+ 'audio_language': audio_language,
'sceneName': sceneName,
'monitored': six.text_type(bool(movie['monitored'])),
'sortTitle': movie['sortTitle'],
@@ -227,8 +231,8 @@ def update_movies():
for updated_movie in movies_to_update_list:
query = dict_converter.convert(updated_movie)
- database.execute('''UPDATE table_movies SET ''' + query.keys_update + ''' WHERE radarrId = ?''',
- query.values + (updated_movie['radarrId'],))
+ database.execute('''UPDATE table_movies SET ''' + query.keys_update + ''' WHERE tmdbId = ?''',
+ query.values + (updated_movie['tmdbId'],))
altered_movies.append([updated_movie['tmdbId'],
updated_movie['path'],
updated_movie['radarrId'],
@@ -275,8 +279,11 @@ def get_profile_list():
radarr_version = get_radarr_version()
profiles_list = []
# Get profiles data from radarr
+ if radarr_version.startswith('0'):
+ url_radarr_api_movies = url_radarr() + "/api/profile?apikey=" + apikey_radarr
+ else:
+ url_radarr_api_movies = url_radarr() + "/api/v3/qualityprofile?apikey=" + apikey_radarr
- url_radarr_api_movies = url_radarr() + "/api/profile?apikey=" + apikey_radarr
try:
profiles_json = requests.get(url_radarr_api_movies, timeout=60, verify=False)
except requests.exceptions.ConnectionError as errc:
@@ -290,7 +297,7 @@ def get_profile_list():
if radarr_version.startswith('0'):
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['language'].capitalize()])
- elif radarr_version.startswith('2'):
+ else:
for profile in profiles_json.json():
profiles_list.append([profile['id'], profile['language']['name'].capitalize()])
diff --git a/bazarr/get_subtitle.py b/bazarr/get_subtitle.py
index 93aa0f6e7..232d62858 100644
--- a/bazarr/get_subtitle.py
+++ b/bazarr/get_subtitle.py
@@ -34,7 +34,7 @@ from notifier import send_notifications, send_notifications_movie
from get_providers import get_providers, get_providers_auth, provider_throttle, provider_pool
from get_args import args
from queueconfig import notifications
-from pyprobe.pyprobe import VideoFileParser
+from knowit import api
from database import database, dict_mapper
from analytics import track_event
@@ -42,6 +42,7 @@ import six
from six.moves import range
from functools import reduce
from locale import getpreferredencoding
+import chardet
def get_video(path, title, sceneName, use_scenename, providers=None, media_type="movie"):
@@ -346,10 +347,20 @@ def manual_search(path, language, hi, forced, providers, providers_auth, sceneNa
not_matched = scores - matches
s.score = score
- releases = ['n/a']
+ releases = []
if hasattr(s, 'release_info'):
if s.release_info is not None:
- releases = s.release_info.split(',')
+ for s_item in s.release_info.split(','):
+ if s_item.strip():
+ releases.append(s_item)
+
+ if len(releases) == 0:
+ releases = ['n/a']
+
+ if s.uploader and s.uploader.strip():
+ s_uploader = s.uploader.strip()
+ else:
+ s_uploader = 'n/a'
subtitles_list.append(
dict(score=round((score / max_score * 100), 2),
@@ -359,7 +370,7 @@ def manual_search(path, language, hi, forced, providers, providers_auth, sceneNa
provider=s.provider_name,
subtitle=codecs.encode(pickle.dumps(s.make_picklable()), "base64").decode(),
url=s.page_link, matches=list(matches), dont_matches=list(not_matched),
- release_info=releases))
+ release_info=releases, uploader=s_uploader))
final_subtitles = sorted(subtitles_list, key=lambda x: (x['orig_score'], x['score_without_hash']),
reverse=True)
@@ -470,6 +481,10 @@ def manual_upload_subtitle(path, language, forced, title, scene_name, media_type
chmod = int(settings.general.chmod, 8) if not sys.platform.startswith(
'win') and settings.general.getboolean('chmod_enabled') else None
+ dest_directory = get_target_folder(path)
+ fake_video_path = None
+ if dest_directory:
+ fake_video_path = os.path.join(dest_directory, os.path.split(path)[1])
_, ext = os.path.splitext(subtitle.filename)
language = alpha3_from_alpha2(language)
@@ -482,7 +497,7 @@ def manual_upload_subtitle(path, language, forced, title, scene_name, media_type
if forced:
lang_obj = Language.rebuild(lang_obj, forced=True)
- subtitle_path = get_subtitle_path(video_path=force_unicode(path),
+ subtitle_path = get_subtitle_path(video_path=force_unicode(fake_video_path if fake_video_path else path),
language=None if single else lang_obj,
extension=ext,
forced_tag=forced)
@@ -492,7 +507,34 @@ def manual_upload_subtitle(path, language, forced, title, scene_name, media_type
if os.path.exists(subtitle_path):
os.remove(subtitle_path)
- subtitle.save(subtitle_path)
+ if settings.general.utf8_encode:
+ try:
+ os.remove(subtitle_path + ".tmp")
+ except:
+ pass
+
+ subtitle.save(subtitle_path + ".tmp")
+
+ with open(subtitle_path + ".tmp", 'rb') as fr:
+ text = fr.read()
+
+ try:
+ guess = chardet.detect(text)
+ text = text.decode(guess["encoding"])
+ text = text.encode('utf-8')
+ except UnicodeError:
+ logging.exception("BAZARR subtitles file doesn't seems to be text based. Skipping this file: " +
+ subtitle_path)
+ else:
+ with open(subtitle_path, 'wb') as fw:
+ fw.write(text)
+ finally:
+ try:
+ os.remove(subtitle_path + ".tmp")
+ except:
+ pass
+ else:
+ subtitle.save(subtitle_path)
if chmod:
os.chmod(subtitle_path, chmod)
@@ -868,7 +910,7 @@ def refine_from_db(path, video):
"WHERE table_episodes.path = ?", (path_replace_reverse(path),), only_one=True)
if data:
- video.series, year, country = series_re.match(data['seriesTitle']).groups()
+ video.series = data['seriesTitle']
video.season = int(data['season'])
video.episode = int(data['episode'])
video.title = data['episodeTitle']
@@ -915,37 +957,30 @@ def refine_from_ffprobe(path, video):
else:
logging.debug('BAZARR FFprobe used is %s', exe)
- parser = VideoFileParser(ffprobe=exe, includeMissing=True, rawMode=False)
- data = parser.parseFfprobe(path)
+ api.initialize({'provider': 'ffmpeg', 'ffmpeg': exe})
+ data = api.know(path)
logging.debug('FFprobe found: %s', data)
- if 'videos' not in data:
+ if 'video' not in data:
logging.debug('BAZARR FFprobe was unable to find video tracks in the file!')
else:
- if 'resolution' in data['videos'][0]:
+ if 'resolution' in data['video'][0]:
if not video.resolution:
- if data['videos'][0]['resolution'][0] >= 3200:
- video.resolution = "2160p"
- elif data['videos'][0]['resolution'][0] >= 1800:
- video.resolution = "1080p"
- elif data['videos'][0]['resolution'][0] >= 1200:
- video.resolution = "720p"
- elif data['videos'][0]['resolution'][0] >= 0:
- video.resolution = "480p"
- if 'codec' in data['videos'][0]:
+ video.resolution = data['video'][0]['resolution']
+ if 'codec' in data['video'][0]:
if not video.video_codec:
- video.video_codec = data['videos'][0]['codec']
- if 'framerate' in data['videos'][0]:
+ video.video_codec = data['video'][0]['codec']
+ if 'frame_rate' in data['video'][0]:
if not video.fps:
- video.fps = data['videos'][0]['framerate']
+ video.fps = data['video'][0]['frame_rate']
- if 'audios' not in data:
+ if 'audio' not in data:
logging.debug('BAZARR FFprobe was unable to find audio tracks in the file!')
else:
- if 'codec' in data['audios'][0]:
+ if 'codec' in data['audio'][0]:
if not video.audio_codec:
- video.audio_codec = data['audios'][0]['codec'].upper()
+ video.audio_codec = data['audio'][0]['codec']
def upgrade_subtitles():
diff --git a/bazarr/list_subtitles.py b/bazarr/list_subtitles.py
index d0ab262af..c1a8794e5 100644
--- a/bazarr/list_subtitles.py
+++ b/bazarr/list_subtitles.py
@@ -13,9 +13,7 @@ import operator
from subliminal import core
from subliminal_patch import search_external_subtitles
from subzero.language import Language
-from bs4 import UnicodeDammit
import six
-from binaryornot.check import is_binary
from get_args import args
from database import database
@@ -27,6 +25,7 @@ from helper import path_replace, path_replace_movie, path_replace_reverse, \
from queueconfig import notifications
from embedded_subs_reader import embedded_subs_reader
import six
+import chardet
gc.enable()
@@ -367,25 +366,31 @@ def guess_external_subtitles(dest_folder, subtitles):
subtitle_path = os.path.join(dest_folder, subtitle)
if os.path.exists(subtitle_path) and os.path.splitext(subtitle_path)[1] in core.SUBTITLE_EXTENSIONS:
logging.debug("BAZARR falling back to file content analysis to detect language.")
- if is_binary(subtitle_path):
- logging.debug("BAZARR subtitles file doesn't seems to be text based. Skipping this file: " +
+ detected_language = None
+
+ # to improve performance, skip detection of files larger that 5M
+ if os.path.getsize(subtitle_path) > 5*1024*1024:
+ logging.debug("BAZARR subtitles file is too large to be text based. Skipping this file: " +
subtitle_path)
continue
- detected_language = None
-
- if six.PY3:
- with open(subtitle_path, 'r', errors='ignore') as f:
- text = f.read()
- else:
- with open(subtitle_path, 'r') as f:
- text = f.read()
-
+
+ with open(subtitle_path, 'rb') as f:
+ text = f.read()
+
try:
- encoding = UnicodeDammit(text)
- if six.PY2:
- text = text.decode(encoding.original_encoding)
+ # to improve performance, use only the first 32K to detect encoding
+ guess = chardet.detect(text[:32768])
+ logging.debug('BAZARR detected encoding %r', guess)
+ if guess["confidence"] < 0.6:
+ raise UnicodeError
+ if guess["confidence"] < 0.8 or guess["encoding"] == "ascii":
+ guess["encoding"] = "utf-8"
+ text = text.decode(guess["encoding"])
detected_language = guess_language(text)
- except Exception as e:
+ except UnicodeError:
+ logging.exception("BAZARR subtitles file doesn't seems to be text based. Skipping this file: " +
+ subtitle_path)
+ except:
logging.exception('BAZARR Error trying to detect language for this subtitles file: ' +
subtitle_path + ' You should try to delete this subtitles file manually and ask '
'Bazarr to download it again.')
diff --git a/bazarr/logger.py b/bazarr/logger.py
index 79075e18f..6100258e0 100644
--- a/bazarr/logger.py
+++ b/bazarr/logger.py
@@ -95,6 +95,7 @@ def configure_logging(debug=False):
logging.getLogger("subliminal_patch").setLevel(logging.CRITICAL)
logging.getLogger("subzero").setLevel(logging.ERROR)
+ logging.getLogger("knowit").setLevel(logging.CRITICAL)
logging.getLogger("enzyme").setLevel(logging.CRITICAL)
logging.getLogger("guessit").setLevel(logging.WARNING)
logging.getLogger("rebulk").setLevel(logging.WARNING)
diff --git a/bazarr/main.py b/bazarr/main.py
index 8825154ac..f96b1e810 100644
--- a/bazarr/main.py
+++ b/bazarr/main.py
@@ -1,6 +1,6 @@
# coding=utf-8
-bazarr_version = '0.8.4.2'
+bazarr_version = '0.8.4.3'
import os
os.environ["SZ_USER_AGENT"] = "Bazarr/1"
@@ -47,7 +47,7 @@ from beaker.middleware import SessionMiddleware
from cork import Cork
from bottle import route, template, static_file, request, redirect, response, HTTPError, app, hook, abort
from datetime import timedelta, datetime
-from get_languages import load_language_in_db, language_from_alpha3, language_from_alpha2, alpha2_from_alpha3
+from get_languages import load_language_in_db, language_from_alpha3, language_from_alpha2, alpha2_from_alpha3, clean_desired_languages
from get_providers import get_providers, get_providers_auth, list_throttled_providers
from get_series import *
@@ -57,7 +57,7 @@ from get_movies import *
from list_subtitles import store_subtitles, store_subtitles_movie, series_scan_subtitles, movies_scan_subtitles, \
list_missing_subtitles, list_missing_subtitles_movies
from get_subtitle import download_subtitle, series_download_subtitles, movies_download_subtitles, \
- manual_search, manual_download_subtitle, manual_upload_subtitle
+ manual_search, manual_download_subtitle, manual_upload_subtitle, wanted_search_missing_subtitles
from utils import history_log, history_log_movie, get_sonarr_version, get_radarr_version
from helper import path_replace_reverse, path_replace_reverse_movie
from scheduler import Scheduler
@@ -1625,6 +1625,8 @@ def save_settings():
database.execute("UPDATE table_settings_notifier SET enabled=?, url=? WHERE name=?",
(enabled,notifier_url,notifier['name']))
+ clean_desired_languages()
+
scheduler.update_configurable_tasks()
logging.info('BAZARR Settings saved succesfully.')
@@ -2005,7 +2007,7 @@ def perform_manual_upload_subtitle_movie():
forced=forced,
title=title,
scene_name=sceneName,
- media_type='series',
+ media_type='movie',
subtitle=upload)
if result is not None:
@@ -2223,6 +2225,8 @@ def api_help():
# Mute DeprecationWarning
warnings.simplefilter("ignore", DeprecationWarning)
+# Mute Insecure HTTPS requests made to Sonarr and Radarr
+warnings.filterwarnings('ignore', message='Unverified HTTPS request')
if six.PY3:
warnings.simplefilter("ignore", BrokenPipeError)
server = CherryPyWSGIServer((str(settings.general.ip), (int(args.port) if args.port else int(settings.general.port))), app)
diff --git a/libs/apprise/Apprise.py b/libs/apprise/Apprise.py
index 31bd2888e..bb9504663 100644
--- a/libs/apprise/Apprise.py
+++ b/libs/apprise/Apprise.py
@@ -323,6 +323,10 @@ class Apprise(object):
# bad attachments
return False
+ # Allow Asset default value
+ body_format = self.asset.body_format \
+ if body_format is None else body_format
+
# Iterate over our loaded plugins
for server in self.find(tag):
if status is None:
diff --git a/libs/apprise/AppriseAsset.py b/libs/apprise/AppriseAsset.py
index 61bd75f33..9ad834fb6 100644
--- a/libs/apprise/AppriseAsset.py
+++ b/libs/apprise/AppriseAsset.py
@@ -86,23 +86,32 @@ class AppriseAsset(object):
'apprise-{TYPE}-{XY}{EXTENSION}',
))
- def __init__(self, theme='default', image_path_mask=None,
- image_url_mask=None, default_extension=None):
+ # This value can also be set on calls to Apprise.notify(). This allows
+ # you to let Apprise upfront the type of data being passed in. This
+ # must be of type NotifyFormat. Possible values could be:
+ # - NotifyFormat.TEXT
+ # - NotifyFormat.MARKDOWN
+ # - NotifyFormat.HTML
+ # - None
+ #
+ # If no format is specified (hence None), then no special pre-formating
+ # actions will take place during a notificaton. This has been and always
+ # will be the default.
+ body_format = None
+
+ def __init__(self, **kwargs):
"""
Asset Initialization
"""
- if theme:
- self.theme = theme
-
- if image_path_mask is not None:
- self.image_path_mask = image_path_mask
-
- if image_url_mask is not None:
- self.image_url_mask = image_url_mask
-
- if default_extension is not None:
- self.default_extension = default_extension
+ # Assign default arguments if specified
+ for key, value in kwargs.items():
+ if not hasattr(AppriseAsset, key):
+ raise AttributeError(
+ 'AppriseAsset init(): '
+ 'An invalid key {} was specified.'.format(key))
+
+ setattr(self, key, value)
def color(self, notify_type, color_type=None):
"""
diff --git a/libs/apprise/AppriseAttachment.py b/libs/apprise/AppriseAttachment.py
index 1a79f82f3..a8f27e179 100644
--- a/libs/apprise/AppriseAttachment.py
+++ b/libs/apprise/AppriseAttachment.py
@@ -102,7 +102,7 @@ class AppriseAttachment(object):
# Initialize our default cache value
cache = cache if cache is not None else self.cache
- if isinstance(asset, AppriseAsset):
+ if asset is None:
# prepare default asset
asset = self.asset
diff --git a/libs/apprise/AppriseConfig.py b/libs/apprise/AppriseConfig.py
index 95070012a..902dfa6dd 100644
--- a/libs/apprise/AppriseConfig.py
+++ b/libs/apprise/AppriseConfig.py
@@ -115,7 +115,7 @@ class AppriseConfig(object):
# Initialize our default cache value
cache = cache if cache is not None else self.cache
- if isinstance(asset, AppriseAsset):
+ if asset is None:
# prepare default asset
asset = self.asset
@@ -165,6 +165,39 @@ class AppriseConfig(object):
# Return our status
return return_status
+ def add_config(self, content, asset=None, tag=None, format=None):
+ """
+ Adds one configuration file in it's raw format. Content gets loaded as
+ a memory based object and only exists for the life of this
+ AppriseConfig object it was loaded into.
+
+ If you know the format ('yaml' or 'text') you can specify
+ it for slightly less overhead during this call. Otherwise the
+ configuration is auto-detected.
+ """
+
+ if asset is None:
+ # prepare default asset
+ asset = self.asset
+
+ if not isinstance(content, six.string_types):
+ logger.warning(
+ "An invalid configuration (type={}) was specified.".format(
+ type(content)))
+ return False
+
+ logger.debug("Loading raw configuration: {}".format(content))
+
+ # Create ourselves a ConfigMemory Object to store our configuration
+ instance = config.ConfigMemory(
+ content=content, format=format, asset=asset, tag=tag)
+
+ # Add our initialized plugin to our server listings
+ self.configs.append(instance)
+
+ # Return our status
+ return True
+
def servers(self, tag=MATCH_ALL_TAG, *args, **kwargs):
"""
Returns all of our servers dynamically build based on parsed
diff --git a/libs/apprise/__init__.py b/libs/apprise/__init__.py
index 61498215b..63da23f8c 100644
--- a/libs/apprise/__init__.py
+++ b/libs/apprise/__init__.py
@@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
#
-# Copyright (C) 2019 Chris Caron <[email protected]>
+# Copyright (C) 2020 Chris Caron <[email protected]>
# All rights reserved.
#
# This code is licensed under the MIT License.
@@ -24,10 +24,10 @@
# THE SOFTWARE.
__title__ = 'apprise'
-__version__ = '0.8.2'
+__version__ = '0.8.5'
__author__ = 'Chris Caron'
__license__ = 'MIT'
-__copywrite__ = 'Copyright (C) 2019 Chris Caron <[email protected]>'
+__copywrite__ = 'Copyright (C) 2020 Chris Caron <[email protected]>'
__email__ = '[email protected]'
__status__ = 'Production'
diff --git a/libs/apprise/attachment/AttachBase.py b/libs/apprise/attachment/AttachBase.py
index 3fdbbf585..1fde66f4b 100644
--- a/libs/apprise/attachment/AttachBase.py
+++ b/libs/apprise/attachment/AttachBase.py
@@ -28,6 +28,7 @@ import time
import mimetypes
from ..URLBase import URLBase
from ..utils import parse_bool
+from ..AppriseLocale import gettext_lazy as _
class AttachBase(URLBase):
@@ -61,7 +62,35 @@ class AttachBase(URLBase):
# 5 MB = 5242880 bytes
max_file_size = 5242880
- def __init__(self, name=None, mimetype=None, cache=True, **kwargs):
+ # Here is where we define all of the arguments we accept on the url
+ # such as: schema://whatever/?overflow=upstream&format=text
+ # These act the same way as tokens except they are optional and/or
+ # have default values set if mandatory. This rule must be followed
+ template_args = {
+ 'cache': {
+ 'name': _('Cache Age'),
+ 'type': 'int',
+ # We default to (600) which means we cache for 10 minutes
+ 'default': 600,
+ },
+ 'mime': {
+ 'name': _('Forced Mime Type'),
+ 'type': 'string',
+ },
+ 'name': {
+ 'name': _('Forced File Name'),
+ 'type': 'string',
+ },
+ 'verify': {
+ 'name': _('Verify SSL'),
+ # SSL Certificate Authority Verification
+ 'type': 'bool',
+ # Provide a default
+ 'default': True,
+ },
+ }
+
+ def __init__(self, name=None, mimetype=None, cache=None, **kwargs):
"""
Initialize some general logging and common server arguments that will
keep things consistent when working with the configurations that
@@ -109,19 +138,27 @@ class AttachBase(URLBase):
# Absolute path to attachment
self.download_path = None
- # Set our cache flag; it can be True or a (positive) integer
- try:
- self.cache = cache if isinstance(cache, bool) else int(cache)
+ # Set our cache flag; it can be True, False, None, or a (positive)
+ # integer... nothing else
+ if cache is not None:
+ try:
+ self.cache = cache if isinstance(cache, bool) else int(cache)
+
+ except (TypeError, ValueError):
+ err = 'An invalid cache value ({}) was specified.'.format(
+ cache)
+ self.logger.warning(err)
+ raise TypeError(err)
+
+ # Some simple error checking
if self.cache < 0:
err = 'A negative cache value ({}) was specified.'.format(
cache)
self.logger.warning(err)
raise TypeError(err)
- except (ValueError, TypeError):
- err = 'An invalid cache value ({}) was specified.'.format(cache)
- self.logger.warning(err)
- raise TypeError(err)
+ else:
+ self.cache = None
# Validate mimetype if specified
if self._mimetype:
@@ -211,12 +248,16 @@ class AttachBase(URLBase):
Simply returns true if the object has downloaded and stored the
attachment AND the attachment has not expired.
"""
+
+ cache = self.template_args['cache']['default'] \
+ if self.cache is None else self.cache
+
if self.download_path and os.path.isfile(self.download_path) \
- and self.cache:
+ and cache:
# We have enough reason to look further into our cached content
# and verify it has not expired.
- if self.cache is True:
+ if cache is True:
# return our fixed content as is; we will always cache it
return True
@@ -224,7 +265,7 @@ class AttachBase(URLBase):
# content again.
try:
age_in_sec = time.time() - os.stat(self.download_path).st_mtime
- if age_in_sec <= self.cache:
+ if age_in_sec <= cache:
return True
except (OSError, IOError):
diff --git a/libs/apprise/attachment/AttachHTTP.py b/libs/apprise/attachment/AttachHTTP.py
index f5986fbb8..046babddb 100644
--- a/libs/apprise/attachment/AttachHTTP.py
+++ b/libs/apprise/attachment/AttachHTTP.py
@@ -78,6 +78,11 @@ class AttachHTTP(AttachBase):
# Where our content is written to upon a call to download.
self._temp_file = None
+ # Our Query String Dictionary; we use this to track arguments
+ # specified that aren't otherwise part of this class
+ self.qsd = {k: v for k, v in kwargs.get('qsd', {}).items()
+ if k not in self.template_args}
+
return
def download(self, **kwargs):
@@ -122,6 +127,7 @@ class AttachHTTP(AttachBase):
url,
headers=headers,
auth=auth,
+ params=self.qsd,
verify=self.verify_certificate,
timeout=self.connection_timeout_sec,
stream=True) as r:
@@ -252,18 +258,21 @@ class AttachHTTP(AttachBase):
Returns the URL built dynamically based on specified arguments.
"""
- # Prepare our cache value
- if isinstance(self.cache, bool) or not self.cache:
- cache = 'yes' if self.cache else 'no'
- else:
- cache = int(self.cache)
-
# Define any arguments set
args = {
'verify': 'yes' if self.verify_certificate else 'no',
- 'cache': cache,
}
+ # Prepare our cache value
+ if self.cache is not None:
+ if isinstance(self.cache, bool) or not self.cache:
+ cache = 'yes' if self.cache else 'no'
+ else:
+ cache = int(self.cache)
+
+ # Set our cache value
+ args['cache'] = cache
+
if self._mimetype:
# A format was enforced
args['mime'] = self._mimetype
@@ -275,6 +284,9 @@ class AttachHTTP(AttachBase):
# Append our headers into our args
args.update({'+{}'.format(k): v for k, v in self.headers.items()})
+ # Apply any remaining entries to our URL
+ args.update(self.qsd)
+
# Determine Authentication
auth = ''
if self.user and self.password:
@@ -290,7 +302,7 @@ class AttachHTTP(AttachBase):
default_port = 443 if self.secure else 80
- return '{schema}://{auth}{hostname}{port}{fullpath}/?{args}'.format(
+ return '{schema}://{auth}{hostname}{port}{fullpath}?{args}'.format(
schema=self.secure_protocol if self.secure else self.protocol,
auth=auth,
hostname=self.quote(self.host, safe=''),
diff --git a/libs/apprise/cli.py b/libs/apprise/cli.py
index 57e964a72..654e597b0 100644
--- a/libs/apprise/cli.py
+++ b/libs/apprise/cli.py
@@ -118,7 +118,9 @@ def print_version_msg():
help='Perform a trial run but only prints the notification '
'services to-be triggered to stdout. Notifications are never '
'sent using this mode.')
[email protected]('--verbose', '-v', count=True)
[email protected]('--verbose', '-v', count=True,
+ help='Makes the operation more talkative. Use multiple v to '
+ 'increase the verbosity. I.e.: -vvvv')
@click.option('--version', '-V', is_flag=True,
help='Display the apprise version and exit.')
@click.argument('urls', nargs=-1,
diff --git a/libs/apprise/config/ConfigBase.py b/libs/apprise/config/ConfigBase.py
index 539d4c494..8cd40813d 100644
--- a/libs/apprise/config/ConfigBase.py
+++ b/libs/apprise/config/ConfigBase.py
@@ -92,7 +92,8 @@ class ConfigBase(URLBase):
# Store the encoding
self.encoding = kwargs.get('encoding')
- if 'format' in kwargs:
+ if 'format' in kwargs \
+ and isinstance(kwargs['format'], six.string_types):
# Store the enforced config format
self.config_format = kwargs.get('format').lower()
@@ -250,6 +251,109 @@ class ConfigBase(URLBase):
return results
@staticmethod
+ def detect_config_format(content, **kwargs):
+ """
+ Takes the specified content and attempts to detect the format type
+
+ The function returns the actual format type if detected, otherwise
+ it returns None
+ """
+
+ # Detect Format Logic:
+ # - A pound/hashtag (#) is alawys a comment character so we skip over
+ # lines matched here.
+ # - Detection begins on the first non-comment and non blank line
+ # matched.
+ # - If we find a string followed by a colon, we know we're dealing
+ # with a YAML file.
+ # - If we find a string that starts with a URL, or our tag
+ # definitions (accepting commas) followed by an equal sign we know
+ # we're dealing with a TEXT format.
+
+ # Define what a valid line should look like
+ valid_line_re = re.compile(
+ r'^\s*(?P<line>([;#]+(?P<comment>.*))|'
+ r'(?P<text>((?P<tag>[ \t,a-z0-9_-]+)=)?[a-z0-9]+://.*)|'
+ r'((?P<yaml>[a-z0-9]+):.*))?$', re.I)
+
+ try:
+ # split our content up to read line by line
+ content = re.split(r'\r*\n', content)
+
+ except TypeError:
+ # content was not expected string type
+ ConfigBase.logger.error('Invalid apprise config specified')
+ return None
+
+ # By default set our return value to None since we don't know
+ # what the format is yet
+ config_format = None
+
+ # iterate over each line of the file to attempt to detect it
+ # stop the moment a the type has been determined
+ for line, entry in enumerate(content, start=1):
+
+ result = valid_line_re.match(entry)
+ if not result:
+ # Invalid syntax
+ ConfigBase.logger.error(
+ 'Undetectable apprise configuration found '
+ 'based on line {}.'.format(line))
+ # Take an early exit
+ return None
+
+ # Attempt to detect configuration
+ if result.group('yaml'):
+ config_format = ConfigFormat.YAML
+ ConfigBase.logger.debug(
+ 'Detected YAML configuration '
+ 'based on line {}.'.format(line))
+ break
+
+ elif result.group('text'):
+ config_format = ConfigFormat.TEXT
+ ConfigBase.logger.debug(
+ 'Detected TEXT configuration '
+ 'based on line {}.'.format(line))
+ break
+
+ # If we reach here, we have a comment entry
+ # Adjust default format to TEXT
+ config_format = ConfigFormat.TEXT
+
+ return config_format
+
+ @staticmethod
+ def config_parse(content, asset=None, config_format=None, **kwargs):
+ """
+ Takes the specified config content and loads it based on the specified
+ config_format. If a format isn't specified, then it is auto detected.
+
+ """
+
+ if config_format is None:
+ # Detect the format
+ config_format = ConfigBase.detect_config_format(content)
+
+ if not config_format:
+ # We couldn't detect configuration
+ ConfigBase.logger.error('Could not detect configuration')
+ return list()
+
+ if config_format not in CONFIG_FORMATS:
+ # Invalid configuration type specified
+ ConfigBase.logger.error(
+ 'An invalid configuration format ({}) was specified'.format(
+ config_format))
+ return list()
+
+ # Dynamically load our parse_ function based on our config format
+ fn = getattr(ConfigBase, 'config_parse_{}'.format(config_format))
+
+ # Execute our config parse function which always returns a list
+ return fn(content=content, asset=asset)
+
+ @staticmethod
def config_parse_text(content, asset=None):
"""
Parse the specified content as though it were a simple text file only
@@ -270,9 +374,6 @@ class ConfigBase(URLBase):
<URL>
"""
- # For logging, track the line number
- line = 0
-
response = list()
# Define what a valid line should look like
@@ -290,10 +391,7 @@ class ConfigBase(URLBase):
ConfigBase.logger.error('Invalid apprise text data specified')
return list()
- for entry in content:
- # Increment our line count
- line += 1
-
+ for line, entry in enumerate(content, start=1):
result = valid_line_re.match(entry)
if not result:
# Invalid syntax
diff --git a/libs/apprise/config/ConfigMemory.py b/libs/apprise/config/ConfigMemory.py
new file mode 100644
index 000000000..c8d49a141
--- /dev/null
+++ b/libs/apprise/config/ConfigMemory.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2020 Chris Caron <[email protected]>
+# All rights reserved.
+#
+# This code is licensed under the MIT License.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files(the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions :
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+from .ConfigBase import ConfigBase
+from ..AppriseLocale import gettext_lazy as _
+
+
+class ConfigMemory(ConfigBase):
+ """
+ For information that was loaded from memory and does not
+ persist anywhere.
+ """
+
+ # The default descriptive name associated with the service
+ service_name = _('Memory')
+
+ # The default protocol
+ protocol = 'memory'
+
+ def __init__(self, content, **kwargs):
+ """
+ Initialize Memory Object
+
+ Memory objects just store the raw configuration in memory. There is
+ no external reference point. It's always considered cached.
+ """
+ super(ConfigMemory, self).__init__(**kwargs)
+
+ # Store our raw config into memory
+ self.content = content
+
+ if self.config_format is None:
+ # Detect our format if possible
+ self.config_format = \
+ ConfigMemory.detect_config_format(self.content)
+
+ return
+
+ def url(self, privacy=False, *args, **kwargs):
+ """
+ Returns the URL built dynamically based on specified arguments.
+ """
+
+ return 'memory://'
+
+ def read(self, **kwargs):
+ """
+ Simply return content stored into memory
+ """
+
+ return self.content
+
+ @staticmethod
+ def parse_url(url):
+ """
+ Memory objects have no parseable URL
+
+ """
+ # These URLs can not be parsed
+ return None
diff --git a/libs/apprise/i18n/apprise.pot b/libs/apprise/i18n/apprise.pot
index b5a624225..ea3fdfad1 100644
--- a/libs/apprise/i18n/apprise.pot
+++ b/libs/apprise/i18n/apprise.pot
@@ -1,21 +1,21 @@
# Translations template for apprise.
-# Copyright (C) 2019 Chris Caron
+# Copyright (C) 2020 Chris Caron
# This file is distributed under the same license as the apprise project.
-# FIRST AUTHOR <EMAIL@ADDRESS>, 2019.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2020.
#
#, fuzzy
msgid ""
msgstr ""
-"Project-Id-Version: apprise 0.8.2\n"
+"Project-Id-Version: apprise 0.8.5\n"
"Report-Msgid-Bugs-To: [email protected]\n"
-"POT-Creation-Date: 2019-11-25 18:50-0500\n"
+"POT-Creation-Date: 2020-03-30 16:00-0400\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.7.0\n"
+"Generated-By: Babel 2.8.0\n"
msgid "API Key"
msgstr ""
@@ -71,6 +71,9 @@ msgstr ""
msgid "Bot Token"
msgstr ""
+msgid "Cache Age"
+msgstr ""
+
msgid "Cache Results"
msgstr ""
@@ -95,6 +98,9 @@ msgstr ""
msgid "Device ID"
msgstr ""
+msgid "Device Name"
+msgstr ""
+
msgid "Display Footer"
msgstr ""
@@ -128,6 +134,12 @@ msgstr ""
msgid "Footer Logo"
msgstr ""
+msgid "Forced File Name"
+msgstr ""
+
+msgid "Forced Mime Type"
+msgstr ""
+
msgid "From Email"
msgstr ""
@@ -164,6 +176,9 @@ msgstr ""
msgid "Log to STDERR"
msgstr ""
+msgid "Memory"
+msgstr ""
+
msgid "Message Hook"
msgstr ""
@@ -203,6 +218,9 @@ msgstr ""
msgid "Priority"
msgstr ""
+msgid "Private Key"
+msgstr ""
+
msgid "Project ID"
msgstr ""
@@ -365,6 +383,9 @@ msgstr ""
msgid "Version"
msgstr ""
+msgid "Vibration"
+msgstr ""
+
msgid "Web Based"
msgstr ""
diff --git a/libs/apprise/plugins/NotifyD7Networks.py b/libs/apprise/plugins/NotifyD7Networks.py
index d784f1cda..e982a38c1 100644
--- a/libs/apprise/plugins/NotifyD7Networks.py
+++ b/libs/apprise/plugins/NotifyD7Networks.py
@@ -86,7 +86,7 @@ class NotifyD7Networks(NotifyBase):
# The services URL
service_url = 'https://d7networks.com/'
- # All pushover requests are secure
+ # All notification requests are secure
secure_protocol = 'd7sms'
# Allow 300 requests per minute.
@@ -94,7 +94,7 @@ class NotifyD7Networks(NotifyBase):
request_rate_per_sec = 0.20
# A URL that takes you to the setup/help of the specific protocol
- setup_url = 'https://github.com/caronc/apprise/wiki/Notify_twilio'
+ setup_url = 'https://github.com/caronc/apprise/wiki/Notify_d7networks'
# D7 Networks batch notification URL
notify_batch_url = 'http://rest-api.d7networks.com/secure/sendbatch'
diff --git a/libs/apprise/plugins/NotifyDiscord.py b/libs/apprise/plugins/NotifyDiscord.py
index af6bafd49..254d9285e 100644
--- a/libs/apprise/plugins/NotifyDiscord.py
+++ b/libs/apprise/plugins/NotifyDiscord.py
@@ -51,6 +51,7 @@ from ..common import NotifyType
from ..utils import parse_bool
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
+from ..attachment.AttachBase import AttachBase
class NotifyDiscord(NotifyBase):
@@ -312,6 +313,19 @@ class NotifyDiscord(NotifyBase):
# Always call throttle before any remote server i/o is made
self.throttle()
+ # Perform some simple error checking
+ if isinstance(attach, AttachBase):
+ if not attach:
+ # We could not access the attachment
+ self.logger.error(
+ 'Could not access attachment {}.'.format(
+ attach.url(privacy=True)))
+ return False
+
+ self.logger.debug(
+ 'Posting Discord attachment {}'.format(
+ attach.url(privacy=True)))
+
# Our attachment path (if specified)
files = None
try:
diff --git a/libs/apprise/plugins/NotifyEmail.py b/libs/apprise/plugins/NotifyEmail.py
index d903ca554..de686c8b3 100644
--- a/libs/apprise/plugins/NotifyEmail.py
+++ b/libs/apprise/plugins/NotifyEmail.py
@@ -269,6 +269,14 @@ class NotifyEmail(NotifyBase):
# Define object templates
templates = (
+ '{schema}://{host}',
+ '{schema}://{host}:{port}',
+ '{schema}://{host}/{targets}',
+ '{schema}://{host}:{port}/{targets}',
+ '{schema}://{user}@{host}',
+ '{schema}://{user}@{host}:{port}',
+ '{schema}://{user}@{host}/{targets}',
+ '{schema}://{user}@{host}:{port}/{targets}',
'{schema}://{user}:{password}@{host}',
'{schema}://{user}:{password}@{host}:{port}',
'{schema}://{user}:{password}@{host}/{targets}',
@@ -280,13 +288,11 @@ class NotifyEmail(NotifyBase):
'user': {
'name': _('User Name'),
'type': 'string',
- 'required': True,
},
'password': {
'name': _('Password'),
'type': 'string',
'private': True,
- 'required': True,
},
'host': {
'name': _('Domain'),
@@ -388,7 +394,7 @@ class NotifyEmail(NotifyBase):
self.from_name = from_name
self.from_addr = from_addr
- if not self.from_addr:
+ if self.user and not self.from_addr:
# detect our email address
self.from_addr = '{}@{}'.format(
re.split(r'[\s@]+', self.user)[0],
@@ -446,6 +452,10 @@ class NotifyEmail(NotifyBase):
# Apply any defaults based on certain known configurations
self.NotifyEmailDefaults()
+ # if there is still no smtp_host then we fall back to the hostname
+ if not self.smtp_host:
+ self.smtp_host = self.host
+
return
def NotifyEmailDefaults(self):
@@ -454,10 +464,11 @@ class NotifyEmail(NotifyBase):
it was provided.
"""
- if self.smtp_host:
+ if self.smtp_host or not self.user:
# SMTP Server was explicitly specified, therefore it is assumed
# the caller knows what he's doing and is intentionally
- # over-riding any smarts to be applied
+ # over-riding any smarts to be applied. We also can not apply
+ # any default if there was no user specified.
return
# detect our email address using our user/host combo
@@ -573,21 +584,22 @@ class NotifyEmail(NotifyBase):
# First attach our body to our content as the first element
base.attach(content)
- attach_error = False
-
# Now store our attachments
for attachment in attach:
if not attachment:
# We could not load the attachment; take an early
# exit since this isn't what the end user wanted
- self.logger.warning(
- 'The specified attachment could not be referenced:'
- ' {}.'.format(attachment.url(privacy=True)))
+ # We could not access the attachment
+ self.logger.error(
+ 'Could not access attachment {}.'.format(
+ attachment.url(privacy=True)))
+
+ return False
- # Mark our failure
- attach_error = True
- break
+ self.logger.debug(
+ 'Preparing Email attachment {}'.format(
+ attachment.url(privacy=True)))
with open(attachment.path, "rb") as abody:
app = MIMEApplication(
@@ -600,11 +612,6 @@ class NotifyEmail(NotifyBase):
base.attach(app)
- if attach_error:
- # Mark our error and quit early
- has_error = True
- break
-
# bind the socket variable to the current namespace
socket = None
@@ -687,7 +694,7 @@ class NotifyEmail(NotifyBase):
args['bcc'] = ','.join(self.bcc)
# pull email suffix from username (if present)
- user = self.user.split('@')[0]
+ user = None if not self.user else self.user.split('@')[0]
# Determine Authentication
auth = ''
@@ -697,7 +704,7 @@ class NotifyEmail(NotifyBase):
password=self.pprint(
self.password, privacy, mode=PrivacyMode.Secret, safe=''),
)
- else:
+ elif user:
# user url
auth = '{user}@'.format(
user=NotifyEmail.quote(user, safe=''),
diff --git a/libs/apprise/plugins/NotifyEnigma2.py b/libs/apprise/plugins/NotifyEnigma2.py
new file mode 100644
index 000000000..3397f6532
--- /dev/null
+++ b/libs/apprise/plugins/NotifyEnigma2.py
@@ -0,0 +1,352 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Chris Caron <[email protected]>
+# All rights reserved.
+#
+# This code is licensed under the MIT License.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files(the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions :
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# Sources
+# - https://dreambox.de/en/
+# - https://dream.reichholf.net/wiki/Hauptseite
+# - https://dream.reichholf.net/wiki/Enigma2:WebInterface#Message
+# - https://github.com/E2OpenPlugins/e2openplugin-OpenWebif
+# - https://github.com/E2OpenPlugins/e2openplugin-OpenWebif/wiki/\
+# OpenWebif-API-documentation#message
+
+import six
+import requests
+from json import loads
+
+from .NotifyBase import NotifyBase
+from ..URLBase import PrivacyMode
+from ..common import NotifyType
+from ..AppriseLocale import gettext_lazy as _
+
+
+class Enigma2MessageType(object):
+ # Defines the Enigma2 notification types Apprise can map to
+ INFO = 1
+ WARNING = 2
+ ERROR = 3
+
+
+# If a mapping fails, the default of Enigma2MessageType.INFO is used
+MESSAGE_MAPPING = {
+ NotifyType.INFO: Enigma2MessageType.INFO,
+ NotifyType.SUCCESS: Enigma2MessageType.INFO,
+ NotifyType.WARNING: Enigma2MessageType.WARNING,
+ NotifyType.FAILURE: Enigma2MessageType.ERROR,
+}
+
+
+class NotifyEnigma2(NotifyBase):
+ """
+ A wrapper for Enigma2 Notifications
+ """
+
+ # The default descriptive name associated with the Notification
+ service_name = 'Enigma2'
+
+ # The services URL
+ service_url = 'https://dreambox.de/'
+
+ # The default protocol
+ protocol = 'enigma2'
+
+ # The default secure protocol
+ secure_protocol = 'enigma2s'
+
+ # A URL that takes you to the setup/help of the specific protocol
+ setup_url = 'https://github.com/caronc/apprise/wiki/Notify_enigma2'
+
+ # Enigma2 does not support a title
+ title_maxlen = 0
+
+ # The maximum allowable characters allowed in the body per message
+ body_maxlen = 1000
+
+ # Throttle a wee-bit to avoid thrashing
+ request_rate_per_sec = 0.5
+
+ # Define object templates
+ templates = (
+ '{schema}://{host}',
+ '{schema}://{host}:{port}',
+ '{schema}://{user}@{host}',
+ '{schema}://{user}@{host}:{port}',
+ '{schema}://{user}:{password}@{host}',
+ '{schema}://{user}:{password}@{host}:{port}',
+ '{schema}://{host}/{fullpath}',
+ '{schema}://{host}:{port}/{fullpath}',
+ '{schema}://{user}@{host}/{fullpath}',
+ '{schema}://{user}@{host}:{port}/{fullpath}',
+ '{schema}://{user}:{password}@{host}/{fullpath}',
+ '{schema}://{user}:{password}@{host}:{port}/{fullpath}',
+ )
+
+ # Define our template tokens
+ template_tokens = dict(NotifyBase.template_tokens, **{
+ 'host': {
+ 'name': _('Hostname'),
+ 'type': 'string',
+ 'required': True,
+ },
+ 'port': {
+ 'name': _('Port'),
+ 'type': 'int',
+ 'min': 1,
+ 'max': 65535,
+ },
+ 'user': {
+ 'name': _('Username'),
+ 'type': 'string',
+ },
+ 'password': {
+ 'name': _('Password'),
+ 'type': 'string',
+ 'private': True,
+ },
+ 'fullpath': {
+ 'name': _('Path'),
+ 'type': 'string',
+ },
+ })
+
+ template_args = dict(NotifyBase.template_args, **{
+ 'timeout': {
+ 'name': _('Server Timeout'),
+ 'type': 'int',
+ # The number of seconds to display the message for
+ 'default': 13,
+ # -1 means infinit
+ 'min': -1,
+ },
+ })
+
+ # Define any kwargs we're using
+ template_kwargs = {
+ 'headers': {
+ 'name': _('HTTP Header'),
+ 'prefix': '+',
+ },
+ }
+
+ def __init__(self, timeout=None, headers=None, **kwargs):
+ """
+ Initialize Enigma2 Object
+
+ headers can be a dictionary of key/value pairs that you want to
+ additionally include as part of the server headers to post with
+ """
+ super(NotifyEnigma2, self).__init__(**kwargs)
+
+ try:
+ self.timeout = int(timeout)
+ if self.timeout < self.template_args['timeout']['min']:
+ # Bulletproof; can't go lower then min value
+ self.timeout = self.template_args['timeout']['min']
+
+ except (ValueError, TypeError):
+ # Use default timeout
+ self.timeout = self.template_args['timeout']['default']
+
+ self.fullpath = kwargs.get('fullpath')
+ if not isinstance(self.fullpath, six.string_types):
+ self.fullpath = '/'
+
+ self.headers = {}
+ if headers:
+ # Store our extra headers
+ self.headers.update(headers)
+
+ return
+
+ def url(self, privacy=False, *args, **kwargs):
+ """
+ Returns the URL built dynamically based on specified arguments.
+ """
+
+ # Define any arguments set
+ args = {
+ 'format': self.notify_format,
+ 'overflow': self.overflow_mode,
+ 'verify': 'yes' if self.verify_certificate else 'no',
+ 'timeout': str(self.timeout),
+ }
+
+ # Append our headers into our args
+ args.update({'+{}'.format(k): v for k, v in self.headers.items()})
+
+ # Determine Authentication
+ auth = ''
+ if self.user and self.password:
+ auth = '{user}:{password}@'.format(
+ user=NotifyEnigma2.quote(self.user, safe=''),
+ password=self.pprint(
+ self.password, privacy, mode=PrivacyMode.Secret, safe=''),
+ )
+ elif self.user:
+ auth = '{user}@'.format(
+ user=NotifyEnigma2.quote(self.user, safe=''),
+ )
+
+ default_port = 443 if self.secure else 80
+
+ return '{schema}://{auth}{hostname}{port}{fullpath}?{args}'.format(
+ schema=self.secure_protocol if self.secure else self.protocol,
+ auth=auth,
+ hostname=NotifyEnigma2.quote(self.host, safe=''),
+ port='' if self.port is None or self.port == default_port
+ else ':{}'.format(self.port),
+ fullpath=NotifyEnigma2.quote(self.fullpath, safe='/'),
+ args=NotifyEnigma2.urlencode(args),
+ )
+
+ def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
+ """
+ Perform Enigma2 Notification
+ """
+
+ # prepare Enigma2 Object
+ headers = {
+ 'User-Agent': self.app_id,
+ }
+
+ params = {
+ 'text': body,
+ 'type': MESSAGE_MAPPING.get(
+ notify_type, Enigma2MessageType.INFO),
+ 'timeout': self.timeout,
+ }
+
+ # Apply any/all header over-rides defined
+ headers.update(self.headers)
+
+ auth = None
+ if self.user:
+ auth = (self.user, self.password)
+
+ # Set our schema
+ schema = 'https' if self.secure else 'http'
+
+ url = '%s://%s' % (schema, self.host)
+ if isinstance(self.port, int):
+ url += ':%d' % self.port
+
+ # Prepare our message URL
+ url += self.fullpath.rstrip('/') + '/api/message'
+
+ self.logger.debug('Enigma2 POST URL: %s (cert_verify=%r)' % (
+ url, self.verify_certificate,
+ ))
+ self.logger.debug('Enigma2 Parameters: %s' % str(params))
+
+ # Always call throttle before any remote server i/o is made
+ self.throttle()
+
+ try:
+ r = requests.get(
+ url,
+ params=params,
+ headers=headers,
+ auth=auth,
+ verify=self.verify_certificate,
+ )
+
+ if r.status_code != requests.codes.ok:
+ # We had a problem
+ status_str = \
+ NotifyEnigma2.http_response_code_lookup(r.status_code)
+
+ self.logger.warning(
+ 'Failed to send Enigma2 notification: '
+ '{}{}error={}.'.format(
+ status_str,
+ ', ' if status_str else '',
+ r.status_code))
+
+ self.logger.debug('Response Details:\r\n{}'.format(r.content))
+
+ # Return; we're done
+ return False
+
+ # We were able to post our message; now lets evaluate the response
+ try:
+ # Acquire our result
+ result = loads(r.content).get('result', False)
+
+ except (AttributeError, TypeError, ValueError):
+ # ValueError = r.content is Unparsable
+ # TypeError = r.content is None
+ # AttributeError = r is None
+
+ # We could not parse JSON response.
+ result = False
+
+ if not result:
+ self.logger.warning(
+ 'Failed to send Enigma2 notification: '
+ 'There was no server acknowledgement.')
+ self.logger.debug('Response Details:\r\n{}'.format(r.content))
+ # Return; we're done
+ return False
+
+ self.logger.info('Sent Enigma2 notification.')
+
+ except requests.RequestException as e:
+ self.logger.warning(
+ 'A Connection error occured sending Enigma2 '
+ 'notification to %s.' % self.host)
+ self.logger.debug('Socket Exception: %s' % str(e))
+
+ # Return; we're done
+ return False
+
+ return True
+
+ @staticmethod
+ def parse_url(url):
+ """
+ Parses the URL and returns enough arguments that can allow
+ us to substantiate this object.
+
+ """
+ results = NotifyBase.parse_url(url)
+
+ if not results:
+ # We're done early as we couldn't load the results
+ return results
+
+ # Add our headers that the user can potentially over-ride if they wish
+ # to to our returned result set
+ results['headers'] = results['qsd-']
+ results['headers'].update(results['qsd+'])
+
+ # Tidy our header entries by unquoting them
+ results['headers'] = {
+ NotifyEnigma2.unquote(x): NotifyEnigma2.unquote(y)
+ for x, y in results['headers'].items()}
+
+ # Save timeout value (if specified)
+ if 'timeout' in results['qsd'] and len(results['qsd']['timeout']):
+ results['timeout'] = results['qsd']['timeout']
+
+ return results
diff --git a/libs/apprise/plugins/NotifyGitter.py b/libs/apprise/plugins/NotifyGitter.py
index 84a2322c6..83e13fc76 100644
--- a/libs/apprise/plugins/NotifyGitter.py
+++ b/libs/apprise/plugins/NotifyGitter.py
@@ -71,7 +71,7 @@ class NotifyGitter(NotifyBase):
# The services URL
service_url = 'https://gitter.im/'
- # All pushover requests are secure
+ # All notification requests are secure
secure_protocol = 'gitter'
# A URL that takes you to the setup/help of the specific protocol
@@ -102,7 +102,7 @@ class NotifyGitter(NotifyBase):
# Define object templates
templates = (
- '{schema}://{token}:{targets}/',
+ '{schema}://{token}/{targets}/',
)
# Define our template tokens
diff --git a/libs/apprise/plugins/NotifyJoin.py b/libs/apprise/plugins/NotifyJoin.py
index 76011d984..278ddaef8 100644
--- a/libs/apprise/plugins/NotifyJoin.py
+++ b/libs/apprise/plugins/NotifyJoin.py
@@ -130,6 +130,11 @@ class NotifyJoin(NotifyBase):
'regex': (r'^[a-z0-9]{32}$', 'i'),
'map_to': 'targets',
},
+ 'device_name': {
+ 'name': _('Device Name'),
+ 'type': 'string',
+ 'map_to': 'targets',
+ },
'group': {
'name': _('Group'),
'type': 'choice:string',
@@ -210,18 +215,7 @@ class NotifyJoin(NotifyBase):
'group.{}'.format(group_re.group('name').lower()))
continue
- elif IS_DEVICE_RE.match(target):
- self.targets.append(target)
- continue
-
- self.logger.warning(
- 'Ignoring invalid Join device/group "{}"'.format(target)
- )
-
- if not self.targets:
- msg = 'No Join targets to notify.'
- self.logger.warning(msg)
- raise TypeError(msg)
+ self.targets.append(target)
return
@@ -247,12 +241,18 @@ class NotifyJoin(NotifyBase):
url_args = {
'apikey': self.apikey,
- 'deviceId': target,
'priority': str(self.priority),
'title': title,
'text': body,
}
+ if IS_GROUP_RE.match(target) or IS_DEVICE_RE.match(target):
+ url_args['deviceId'] = target
+
+ else:
+ # Support Device Names
+ url_args['deviceNames'] = target
+
# prepare our image for display if configured to do so
image_url = None if not self.include_image \
else self.image_url(notify_type)
diff --git a/libs/apprise/plugins/NotifyKavenegar.py b/libs/apprise/plugins/NotifyKavenegar.py
new file mode 100644
index 000000000..bf9b75252
--- /dev/null
+++ b/libs/apprise/plugins/NotifyKavenegar.py
@@ -0,0 +1,377 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2020 Chris Caron <[email protected]>
+# All rights reserved.
+#
+# This code is licensed under the MIT License.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files(the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions :
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# To use this service you will need a Kavenegar account from their website
+# at https://kavenegar.com/
+#
+# After you've established your account you can get your API Key from your
+# account profile: https://panel.kavenegar.com/client/setting/account
+#
+# This provider does not accept +1 (for example) as a country code. You need
+# to specify 001 instead.
+#
+import re
+import requests
+from json import loads
+
+from .NotifyBase import NotifyBase
+from ..common import NotifyType
+from ..utils import parse_list
+from ..utils import validate_regex
+from ..AppriseLocale import gettext_lazy as _
+
+# Extend HTTP Error Messages
+# Based on https://kavenegar.com/rest.html
+KAVENEGAR_HTTP_ERROR_MAP = {
+ 200: 'The request was approved',
+ 400: 'Parameters are incomplete',
+ 401: 'Account has been disabled',
+ 402: 'The operation failed',
+ 403: 'The API Key is invalid',
+ 404: 'The method is unknown',
+ 405: 'The GET/POST request is wrong',
+ 406: 'Invalid mandatory parameters sent',
+ 407: 'You canot access the information you want',
+ 409: 'The server is unable to response',
+ 411: 'The recipient is invalid',
+ 412: 'The sender is invalid',
+ 413: 'Message empty or message length exceeded',
+ 414: 'The number of recipients is more than 200',
+ 415: 'The start index is larger then the total',
+ 416: 'The source IP of the service does not match the settings',
+ 417: 'The submission date is incorrect, '
+ 'either expired or not in the correct format',
+ 418: 'Your account credit is insufficient',
+ 422: 'Data cannot be processed due to invalid characters',
+ 501: 'SMS can only be sent to the account holder number',
+}
+
+# Some Phone Number Detection
+IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
+
+
+class NotifyKavenegar(NotifyBase):
+ """
+ A wrapper for Kavenegar Notifications
+ """
+
+ # The default descriptive name associated with the Notification
+ service_name = 'Kavenegar'
+
+ # The services URL
+ service_url = 'https://kavenegar.com/'
+
+ # All notification requests are secure
+ secure_protocol = 'kavenegar'
+
+ # Allow 300 requests per minute.
+ # 60/300 = 0.2
+ request_rate_per_sec = 0.20
+
+ # A URL that takes you to the setup/help of the specific protocol
+ setup_url = 'https://github.com/caronc/apprise/wiki/Notify_kavenegar'
+
+ # Kavenegar single notification URL
+ notify_url = 'http://api.kavenegar.com/v1/{apikey}/sms/send.json'
+
+ # The maximum length of the body
+ body_maxlen = 160
+
+ # A title can not be used for SMS Messages. Setting this to zero will
+ # cause any title (if defined) to get placed into the message body.
+ title_maxlen = 0
+
+ # Define object templates
+ templates = (
+ '{schema}://{apikey}/{targets}',
+ '{schema}://{source}@{apikey}/{targets}',
+ )
+
+ # Define our template tokens
+ template_tokens = dict(NotifyBase.template_tokens, **{
+ 'apikey': {
+ 'name': _('API Key'),
+ 'type': 'string',
+ 'required': True,
+ 'private': True,
+ 'regex': (r'^[a-z0-9]+$', 'i'),
+ },
+ 'source': {
+ 'name': _('Source Phone No'),
+ 'type': 'string',
+ 'prefix': '+',
+ 'regex': (r'^[0-9\s)(+-]+$', 'i'),
+ },
+ 'target_phone': {
+ 'name': _('Target Phone No'),
+ 'type': 'string',
+ 'prefix': '+',
+ 'regex': (r'^[0-9\s)(+-]+$', 'i'),
+ 'map_to': 'targets',
+ },
+ 'targets': {
+ 'name': _('Targets'),
+ 'type': 'list:string',
+ 'required': True,
+ },
+ })
+
+ # Define our template arguments
+ template_args = dict(NotifyBase.template_args, **{
+ 'to': {
+ 'alias_of': 'targets',
+ },
+ 'from': {
+ 'alias_of': 'source',
+ },
+ })
+
+ def __init__(self, apikey, source=None, targets=None, **kwargs):
+ """
+ Initialize Kavenegar Object
+ """
+ super(NotifyKavenegar, self).__init__(**kwargs)
+
+ # API Key (associated with project)
+ self.apikey = validate_regex(
+ apikey, *self.template_tokens['apikey']['regex'])
+ if not self.apikey:
+ msg = 'An invalid Kavenegar API Key ' \
+ '({}) was specified.'.format(apikey)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ self.source = None
+ if source is not None:
+ result = IS_PHONE_NO.match(source)
+ if not result:
+ msg = 'The Kavenegar source specified ({}) is invalid.'\
+ .format(source)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # Further check our phone # for it's digit count
+ result = ''.join(re.findall(r'\d+', result.group('phone')))
+ if len(result) < 11 or len(result) > 14:
+ msg = 'The MessageBird source # specified ({}) is invalid.'\
+ .format(source)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # Store our source
+ self.source = result
+
+ # Parse our targets
+ self.targets = list()
+
+ for target in parse_list(targets):
+ # Validate targets and drop bad ones:
+ result = IS_PHONE_NO.match(target)
+ if result:
+ # Further check our phone # for it's digit count
+ # if it's less than 10, then we can assume it's
+ # a poorly specified phone no and spit a warning
+ result = ''.join(re.findall(r'\d+', result.group('phone')))
+ if len(result) < 11 or len(result) > 14:
+ self.logger.warning(
+ 'Dropped invalid phone # '
+ '({}) specified.'.format(target),
+ )
+ continue
+
+ # store valid phone number
+ self.targets.append(result)
+ continue
+
+ self.logger.warning(
+ 'Dropped invalid phone # ({}) specified.'.format(target))
+
+ if len(self.targets) == 0:
+ msg = 'There are no valid targets identified to notify.'
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ return
+
+ def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
+ """
+ Sends SMS Message
+ """
+
+ # error tracking (used for function return)
+ has_error = False
+
+ # Prepare our headers
+ headers = {
+ 'User-Agent': self.app_id,
+ 'Accept': 'application/json',
+ }
+
+ # Our URL
+ url = self.notify_url.format(apikey=self.apikey)
+
+ # use the list directly
+ targets = list(self.targets)
+
+ while len(targets):
+ # Get our target(s) to notify
+ target = targets.pop(0)
+
+ # Prepare our payload
+ payload = {
+ 'receptor': target,
+ 'message': body,
+ }
+
+ if self.source:
+ # Only set source if specified
+ payload['sender'] = self.source
+
+ # Some Debug Logging
+ self.logger.debug(
+ 'Kavenegar POST URL: {} (cert_verify={})'.format(
+ url, self.verify_certificate))
+ self.logger.debug('Kavenegar Payload: {}' .format(payload))
+
+ # Always call throttle before any remote server i/o is made
+ self.throttle()
+ try:
+ r = requests.post(
+ url,
+ params=payload,
+ headers=headers,
+ verify=self.verify_certificate,
+ )
+
+ if r.status_code not in (
+ requests.codes.created, requests.codes.ok):
+ # We had a problem
+ status_str = \
+ NotifyBase.http_response_code_lookup(
+ r.status_code, KAVENEGAR_HTTP_ERROR_MAP)
+
+ try:
+ # Update our status response if we can
+ json_response = loads(r.content)
+ status_str = json_response.get('message', status_str)
+
+ except (AttributeError, TypeError, ValueError):
+ # ValueError = r.content is Unparsable
+ # TypeError = r.content is None
+ # AttributeError = r is None
+
+ # We could not parse JSON response.
+ # We will just use the status we already have.
+ pass
+
+ self.logger.warning(
+ 'Failed to send Kavenegar SMS notification to {}: '
+ '{}{}error={}.'.format(
+ target,
+ status_str,
+ ', ' if status_str else '',
+ r.status_code))
+
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+
+ # Mark our failure
+ has_error = True
+ continue
+
+ # If we reach here; the message was sent
+ self.logger.info(
+ 'Sent Kavenegar SMS notification to {}.'.format(target))
+
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+
+ except requests.RequestException as e:
+ self.logger.warning(
+ 'A Connection error occured sending Kavenegar:%s ' % (
+ ', '.join(self.targets)) + 'notification.'
+ )
+ self.logger.debug('Socket Exception: %s' % str(e))
+ # Mark our failure
+ has_error = True
+ continue
+
+ return not has_error
+
+ def url(self, privacy=False, *args, **kwargs):
+ """
+ Returns the URL built dynamically based on specified arguments.
+ """
+
+ # Define any arguments set
+ args = {
+ 'format': self.notify_format,
+ 'overflow': self.overflow_mode,
+ 'verify': 'yes' if self.verify_certificate else 'no',
+ }
+
+ return '{schema}://{source}{apikey}/{targets}?{args}'.format(
+ schema=self.secure_protocol,
+ source='' if not self.source else '{}@'.format(self.source),
+ apikey=self.pprint(self.apikey, privacy, safe=''),
+ targets='/'.join(
+ [NotifyKavenegar.quote(x, safe='') for x in self.targets]),
+ args=NotifyKavenegar.urlencode(args))
+
+ @staticmethod
+ def parse_url(url):
+ """
+ Parses the URL and returns enough arguments that can allow
+ us to substantiate this object.
+
+ """
+ results = NotifyBase.parse_url(url, verify_host=False)
+
+ if not results:
+ # We're done early as we couldn't load the results
+ return results
+
+ # Store the source if specified
+ if results.get('user', None):
+ results['source'] = results['user']
+
+ # Get our entries; split_path() looks after unquoting content for us
+ # by default
+ results['targets'] = NotifyKavenegar.split_path(results['fullpath'])
+
+ # The hostname is our authentication key
+ results['apikey'] = NotifyKavenegar.unquote(results['host'])
+
+ # Support the 'to' variable so that we can support targets this way too
+ # The 'to' makes it easier to use yaml configuration
+ if 'to' in results['qsd'] and len(results['qsd']['to']):
+ results['targets'] += \
+ NotifyKavenegar.parse_list(results['qsd']['to'])
+
+ if 'from' in results['qsd'] and len(results['qsd']['from']):
+ results['source'] = \
+ NotifyKavenegar.unquote(results['qsd']['from'])
+
+ return results
diff --git a/libs/apprise/plugins/NotifyMSG91.py b/libs/apprise/plugins/NotifyMSG91.py
index 1425b8a76..17676bf74 100644
--- a/libs/apprise/plugins/NotifyMSG91.py
+++ b/libs/apprise/plugins/NotifyMSG91.py
@@ -98,7 +98,7 @@ class NotifyMSG91(NotifyBase):
notify_url = 'https://world.msg91.com/api/sendhttp.php'
# The maximum length of the body
- body_maxlen = 140
+ body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
diff --git a/libs/apprise/plugins/NotifyMailgun.py b/libs/apprise/plugins/NotifyMailgun.py
index 6e2a3b282..7dfd1248d 100644
--- a/libs/apprise/plugins/NotifyMailgun.py
+++ b/libs/apprise/plugins/NotifyMailgun.py
@@ -101,7 +101,7 @@ class NotifyMailgun(NotifyBase):
# The services URL
service_url = 'https://www.mailgun.com/'
- # All pushover requests are secure
+ # All notification requests are secure
secure_protocol = 'mailgun'
# Mailgun advertises they allow 300 requests per minute.
diff --git a/libs/apprise/plugins/NotifyMatrix.py b/libs/apprise/plugins/NotifyMatrix.py
index 97ab127cf..13e7fbd30 100644
--- a/libs/apprise/plugins/NotifyMatrix.py
+++ b/libs/apprise/plugins/NotifyMatrix.py
@@ -41,6 +41,7 @@ from ..common import NotifyImageSize
from ..common import NotifyFormat
from ..utils import parse_bool
from ..utils import parse_list
+from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
# Define default path
@@ -74,12 +75,16 @@ class MatrixWebhookMode(object):
# Support the slack webhook plugin
SLACK = "slack"
+ # Support the t2bot webhook plugin
+ T2BOT = "t2bot"
+
# webhook modes are placed ito this list for validation purposes
MATRIX_WEBHOOK_MODES = (
MatrixWebhookMode.DISABLED,
MatrixWebhookMode.MATRIX,
MatrixWebhookMode.SLACK,
+ MatrixWebhookMode.T2BOT,
)
@@ -122,6 +127,11 @@ class NotifyMatrix(NotifyBase):
# Define object templates
templates = (
+ # Targets are ignored when using t2bot mode; only a token is required
+ '{schema}://{token}',
+ '{schema}://{user}@{token}',
+
+ # All other non-t2bot setups require targets
'{schema}://{user}:{password}@{host}/{targets}',
'{schema}://{user}:{password}@{host}:{port}/{targets}',
'{schema}://{token}:{password}@{host}/{targets}',
@@ -199,8 +209,7 @@ class NotifyMatrix(NotifyBase):
},
})
- def __init__(self, targets=None, mode=None, include_image=False,
- **kwargs):
+ def __init__(self, targets=None, mode=None, include_image=False, **kwargs):
"""
Initialize Matrix Object
"""
@@ -233,6 +242,16 @@ class NotifyMatrix(NotifyBase):
self.logger.warning(msg)
raise TypeError(msg)
+ if self.mode == MatrixWebhookMode.T2BOT:
+ # t2bot configuration requires that a webhook id is specified
+ self.access_token = validate_regex(
+ self.host, r'^[a-z0-9]{64}$', 'i')
+ if not self.access_token:
+ msg = 'An invalid T2Bot/Matrix Webhook ID ' \
+ '({}) was specified.'.format(self.host)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
"""
Perform Matrix Notification
@@ -257,20 +276,30 @@ class NotifyMatrix(NotifyBase):
'Content-Type': 'application/json',
}
- # Acquire our access token from our URL
- access_token = self.password if self.password else self.user
+ if self.mode != MatrixWebhookMode.T2BOT:
+ # Acquire our access token from our URL
+ access_token = self.password if self.password else self.user
- default_port = 443 if self.secure else 80
+ default_port = 443 if self.secure else 80
- # Prepare our URL
- url = '{schema}://{hostname}:{port}/{webhook_path}/{token}'.format(
- schema='https' if self.secure else 'http',
- hostname=self.host,
- port='' if self.port is None
- or self.port == default_port else self.port,
- webhook_path=MATRIX_V1_WEBHOOK_PATH,
- token=access_token,
- )
+ # Prepare our URL
+ url = '{schema}://{hostname}:{port}/{webhook_path}/{token}'.format(
+ schema='https' if self.secure else 'http',
+ hostname=self.host,
+ port='' if self.port is None
+ or self.port == default_port else self.port,
+ webhook_path=MATRIX_V1_WEBHOOK_PATH,
+ token=access_token,
+ )
+
+ else:
+ #
+ # t2bot Setup
+ #
+
+ # Prepare our URL
+ url = 'https://webhooks.t2bot.io/api/v1/matrix/hook/' \
+ '{token}'.format(token=self.access_token)
# Retrieve our payload
payload = getattr(self, '_{}_webhook_payload'.format(self.mode))(
@@ -381,7 +410,7 @@ class NotifyMatrix(NotifyBase):
payload = {
'displayName':
- self.user if self.user else self.matrix_default_user,
+ self.user if self.user else self.app_id,
'format': 'html',
}
@@ -399,6 +428,27 @@ class NotifyMatrix(NotifyBase):
return payload
+ def _t2bot_webhook_payload(self, body, title='',
+ notify_type=NotifyType.INFO, **kwargs):
+ """
+ Format the payload for a T2Bot Matrix based messages
+
+ """
+
+ # Retrieve our payload
+ payload = self._matrix_webhook_payload(
+ body=body, title=title, notify_type=notify_type, **kwargs)
+
+ # Acquire our image url if we're configured to do so
+ image_url = None if not self.include_image else \
+ self.image_url(notify_type)
+
+ if image_url:
+ # t2bot can take an avatarUrl Entry
+ payload['avatarUrl'] = image_url
+
+ return payload
+
def _send_server_notification(self, body, title='',
notify_type=NotifyType.INFO, **kwargs):
"""
@@ -867,6 +917,9 @@ class NotifyMatrix(NotifyBase):
))
self.logger.debug('Matrix Payload: %s' % str(payload))
+ # Initialize our response object
+ r = None
+
try:
r = fn(
url,
@@ -948,7 +1001,8 @@ class NotifyMatrix(NotifyBase):
"""
Ensure we relinquish our token
"""
- self._logout()
+ if self.mode != MatrixWebhookMode.T2BOT:
+ self._logout()
def url(self, privacy=False, *args, **kwargs):
"""
@@ -997,12 +1051,14 @@ class NotifyMatrix(NotifyBase):
us to substantiate this object.
"""
- results = NotifyBase.parse_url(url)
-
+ results = NotifyBase.parse_url(url, verify_host=False)
if not results:
# We're done early as we couldn't load the results
return results
+ if not results.get('host'):
+ return None
+
# Get our rooms
results['targets'] = NotifyMatrix.split_path(results['fullpath'])
@@ -1040,4 +1096,37 @@ class NotifyMatrix(NotifyBase):
results['mode'] = results['qsd'].get(
'mode', results['qsd'].get('webhook'))
+ # t2bot detection... look for just a hostname, and/or just a user/host
+ # if we match this; we can go ahead and set the mode (but only if
+ # it was otherwise not set)
+ if results['mode'] is None \
+ and not results['password'] \
+ and not results['targets']:
+
+ # Default mode to t2bot
+ results['mode'] = MatrixWebhookMode.T2BOT
+
return results
+
+ @staticmethod
+ def parse_native_url(url):
+ """
+ Support https://webhooks.t2bot.io/api/v1/matrix/hook/WEBHOOK_TOKEN/
+ """
+
+ result = re.match(
+ r'^https?://webhooks\.t2bot\.io/api/v1/matrix/hook/'
+ r'(?P<webhook_token>[A-Z0-9_-]+)/?'
+ r'(?P<args>\?.+)?$', url, re.I)
+
+ if result:
+ mode = 'mode={}'.format(MatrixWebhookMode.T2BOT)
+
+ return NotifyMatrix.parse_url(
+ '{schema}://{webhook_token}/{args}'.format(
+ schema=NotifyMatrix.secure_protocol,
+ webhook_token=result.group('webhook_token'),
+ args='?{}'.format(mode) if not result.group('args')
+ else '{}&{}'.format(result.group('args'), mode)))
+
+ return None
diff --git a/libs/apprise/plugins/NotifyMessageBird.py b/libs/apprise/plugins/NotifyMessageBird.py
index b593bc214..78ac9d58a 100644
--- a/libs/apprise/plugins/NotifyMessageBird.py
+++ b/libs/apprise/plugins/NotifyMessageBird.py
@@ -63,7 +63,7 @@ class NotifyMessageBird(NotifyBase):
notify_url = 'https://rest.messagebird.com/messages'
# The maximum length of the body
- body_maxlen = 140
+ body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
diff --git a/libs/apprise/plugins/NotifyNexmo.py b/libs/apprise/plugins/NotifyNexmo.py
index db19c759d..5fd662ad7 100644
--- a/libs/apprise/plugins/NotifyNexmo.py
+++ b/libs/apprise/plugins/NotifyNexmo.py
@@ -64,21 +64,12 @@ class NotifyNexmo(NotifyBase):
notify_url = 'https://rest.nexmo.com/sms/json'
# The maximum length of the body
- body_maxlen = 140
+ body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
title_maxlen = 0
- # Default Time To Live
- # By default Nexmo attempt delivery for 72 hours, however the maximum
- # effective value depends on the operator and is typically 24 - 48 hours.
- # We recommend this value should be kept at its default or at least 30
- # minutes.
- default_ttl = 900000
- ttl_max = 604800000
- ttl_min = 20000
-
# Define object templates
templates = (
'{schema}://{apikey}:{secret}@{from_phone}',
@@ -135,6 +126,12 @@ class NotifyNexmo(NotifyBase):
'secret': {
'alias_of': 'secret',
},
+
+ # Default Time To Live
+ # By default Nexmo attempt delivery for 72 hours, however the maximum
+ # effective value depends on the operator and is typically 24 - 48
+ # hours. We recommend this value should be kept at its default or at
+ # least 30 minutes.
'ttl': {
'name': _('ttl'),
'type': 'int',
@@ -170,7 +167,7 @@ class NotifyNexmo(NotifyBase):
raise TypeError(msg)
# Set our Time to Live Flag
- self.ttl = self.default_ttl
+ self.ttl = self.template_args['ttl']['default']
try:
self.ttl = int(ttl)
@@ -178,7 +175,8 @@ class NotifyNexmo(NotifyBase):
# Do nothing
pass
- if self.ttl < self.ttl_min or self.ttl > self.ttl_max:
+ if self.ttl < self.template_args['ttl']['min'] or \
+ self.ttl > self.template_args['ttl']['max']:
msg = 'The Nexmo TTL specified ({}) is out of range.'\
.format(self.ttl)
self.logger.warning(msg)
diff --git a/libs/apprise/plugins/NotifyNextcloud.py b/libs/apprise/plugins/NotifyNextcloud.py
new file mode 100644
index 000000000..33211f64a
--- /dev/null
+++ b/libs/apprise/plugins/NotifyNextcloud.py
@@ -0,0 +1,294 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Chris Caron <[email protected]>
+# All rights reserved.
+#
+# This code is licensed under the MIT License.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files(the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions :
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CON
+
+import requests
+
+from .NotifyBase import NotifyBase
+from ..URLBase import PrivacyMode
+from ..common import NotifyType
+from ..utils import parse_list
+from ..AppriseLocale import gettext_lazy as _
+
+
+class NotifyNextcloud(NotifyBase):
+ """
+ A wrapper for Nextcloud Notifications
+ """
+
+ # The default descriptive name associated with the Notification
+ service_name = 'Nextcloud'
+
+ # The services URL
+ service_url = 'https://nextcloud.com/'
+
+ # Insecure protocol (for those self hosted requests)
+ protocol = 'ncloud'
+
+ # The default protocol (this is secure for notica)
+ secure_protocol = 'nclouds'
+
+ # A URL that takes you to the setup/help of the specific protocol
+ setup_url = 'https://github.com/caronc/apprise/wiki/Notify_nextcloud'
+
+ # Nextcloud URL
+ notify_url = '{schema}://{host}/ocs/v2.php/apps/admin_notifications/' \
+ 'api/v1/notifications/{target}'
+
+ # Nextcloud does not support a title
+ title_maxlen = 255
+
+ # Defines the maximum allowable characters per message.
+ body_maxlen = 4000
+
+ # Define object templates
+ templates = (
+ '{schema}://{user}:{password}@{host}/{targets}',
+ '{schema}://{user}:{password}@{host}:{port}/{targets}',
+ )
+
+ # Define our template tokens
+ template_tokens = dict(NotifyBase.template_tokens, **{
+ 'host': {
+ 'name': _('Hostname'),
+ 'type': 'string',
+ 'required': True,
+ },
+ 'port': {
+ 'name': _('Port'),
+ 'type': 'int',
+ 'min': 1,
+ 'max': 65535,
+ },
+ 'user': {
+ 'name': _('Username'),
+ 'type': 'string',
+ },
+ 'password': {
+ 'name': _('Password'),
+ 'type': 'string',
+ 'private': True,
+ },
+ 'target_user': {
+ 'name': _('Target User'),
+ 'type': 'string',
+ 'map_to': 'targets',
+ },
+ 'targets': {
+ 'name': _('Targets'),
+ 'type': 'list:string',
+ 'required': True,
+ },
+ })
+
+ # Define any kwargs we're using
+ template_kwargs = {
+ 'headers': {
+ 'name': _('HTTP Header'),
+ 'prefix': '+',
+ },
+ }
+
+ def __init__(self, targets=None, headers=None, **kwargs):
+ """
+ Initialize Nextcloud Object
+ """
+ super(NotifyNextcloud, self).__init__(**kwargs)
+
+ self.targets = parse_list(targets)
+ if len(self.targets) == 0:
+ msg = 'At least one Nextcloud target user must be specified.'
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ self.headers = {}
+ if headers:
+ # Store our extra headers
+ self.headers.update(headers)
+
+ return
+
+ def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
+ """
+ Perform Nextcloud Notification
+ """
+
+ # Prepare our Header
+ headers = {
+ 'User-Agent': self.app_id,
+ 'OCS-APIREQUEST': 'true',
+ }
+
+ # Apply any/all header over-rides defined
+ headers.update(self.headers)
+
+ # error tracking (used for function return)
+ has_error = False
+
+ # Create a copy of the targets list
+ targets = list(self.targets)
+ while len(targets):
+ target = targets.pop(0)
+
+ # Prepare our Payload
+ payload = {
+ 'shortMessage': title if title else self.app_desc,
+ }
+ if body:
+ # Only store the longMessage if a body was defined; nextcloud
+ # doesn't take kindly to empty longMessage entries.
+ payload['longMessage'] = body
+
+ auth = None
+ if self.user:
+ auth = (self.user, self.password)
+
+ notify_url = self.notify_url.format(
+ schema='https' if self.secure else 'http',
+ host=self.host if not isinstance(self.port, int)
+ else '{}:{}'.format(self.host, self.port),
+ target=target,
+ )
+
+ self.logger.debug('Nextcloud POST URL: %s (cert_verify=%r)' % (
+ notify_url, self.verify_certificate,
+ ))
+ self.logger.debug('Nextcloud Payload: %s' % str(payload))
+
+ # Always call throttle before any remote server i/o is made
+ self.throttle()
+
+ try:
+ r = requests.post(
+ notify_url,
+ data=payload,
+ headers=headers,
+ auth=auth,
+ verify=self.verify_certificate,
+ )
+ if r.status_code != requests.codes.ok:
+ # We had a problem
+ status_str = \
+ NotifyNextcloud.http_response_code_lookup(
+ r.status_code)
+
+ self.logger.warning(
+ 'Failed to send Nextcloud notification:'
+ '{}{}error={}.'.format(
+ status_str,
+ ', ' if status_str else '',
+ r.status_code))
+
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+ # track our failure
+ has_error = True
+ continue
+
+ else:
+ self.logger.info('Sent Nextcloud notification.')
+
+ except requests.RequestException as e:
+ self.logger.warning(
+ 'A Connection error occured sending Nextcloud '
+ 'notification.',
+ )
+ self.logger.debug('Socket Exception: %s' % str(e))
+
+ # track our failure
+ has_error = True
+ continue
+
+ return not has_error
+
+ def url(self, privacy=False, *args, **kwargs):
+ """
+ Returns the URL built dynamically based on specified arguments.
+ """
+
+ # Define any arguments set
+ args = {
+ 'format': self.notify_format,
+ 'overflow': self.overflow_mode,
+ 'verify': 'yes' if self.verify_certificate else 'no',
+ }
+
+ # Append our headers into our args
+ args.update({'+{}'.format(k): v for k, v in self.headers.items()})
+
+ # Determine Authentication
+ auth = ''
+ if self.user and self.password:
+ auth = '{user}:{password}@'.format(
+ user=NotifyNextcloud.quote(self.user, safe=''),
+ password=self.pprint(
+ self.password, privacy, mode=PrivacyMode.Secret, safe=''),
+ )
+ elif self.user:
+ auth = '{user}@'.format(
+ user=NotifyNextcloud.quote(self.user, safe=''),
+ )
+
+ default_port = 443 if self.secure else 80
+
+ return '{schema}://{auth}{hostname}{port}/{targets}?{args}' \
+ .format(
+ schema=self.secure_protocol
+ if self.secure else self.protocol,
+ auth=auth,
+ hostname=NotifyNextcloud.quote(self.host, safe=''),
+ port='' if self.port is None or self.port == default_port
+ else ':{}'.format(self.port),
+ targets='/'.join([NotifyNextcloud.quote(x)
+ for x in self.targets]),
+ args=NotifyNextcloud.urlencode(args),
+ )
+
+ @staticmethod
+ def parse_url(url):
+ """
+ Parses the URL and returns enough arguments that can allow
+ us to substantiate this object.
+
+ """
+
+ results = NotifyBase.parse_url(url)
+ if not results:
+ # We're done early as we couldn't load the results
+ return results
+
+ # Fetch our targets
+ results['targets'] = \
+ NotifyNextcloud.split_path(results['fullpath'])
+
+ # The 'to' makes it easier to use yaml configuration
+ if 'to' in results['qsd'] and len(results['qsd']['to']):
+ results['targets'] += \
+ NotifyNextcloud.parse_list(results['qsd']['to'])
+
+ # Add our headers that the user can potentially over-ride if they
+ # wish to to our returned result set
+ results['headers'] = results['qsd-']
+ results['headers'].update(results['qsd+'])
+
+ return results
diff --git a/libs/apprise/plugins/NotifyPushBullet.py b/libs/apprise/plugins/NotifyPushBullet.py
index af239c40c..4a3dd8494 100644
--- a/libs/apprise/plugins/NotifyPushBullet.py
+++ b/libs/apprise/plugins/NotifyPushBullet.py
@@ -147,6 +147,19 @@ class NotifyPushBullet(NotifyBase):
# We need to upload our payload first so that we can source it
# in remaining messages
for attachment in attach:
+
+ # Perform some simple error checking
+ if not attachment:
+ # We could not access the attachment
+ self.logger.error(
+ 'Could not access attachment {}.'.format(
+ attachment.url(privacy=True)))
+ return False
+
+ self.logger.debug(
+ 'Preparing PushBullet attachment {}'.format(
+ attachment.url(privacy=True)))
+
# prepare payload
payload = {
'file_name': attachment.name,
@@ -253,7 +266,7 @@ class NotifyPushBullet(NotifyBase):
continue
self.logger.info(
- 'Sent PushBullet attachment (%s) to "%s".' % (
+ 'Sent PushBullet attachment ({}) to "{}".'.format(
attach_payload['file_name'], recipient))
return not has_error
diff --git a/libs/apprise/plugins/NotifyPushSafer.py b/libs/apprise/plugins/NotifyPushSafer.py
new file mode 100644
index 000000000..8e056087e
--- /dev/null
+++ b/libs/apprise/plugins/NotifyPushSafer.py
@@ -0,0 +1,832 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Chris Caron <[email protected]>
+# All rights reserved.
+#
+# This code is licensed under the MIT License.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files(the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions :
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# We use io because it allows us to test the open() call
+import io
+import base64
+import requests
+from json import loads
+
+from .NotifyBase import NotifyBase
+from ..common import NotifyType
+from ..utils import parse_list
+from ..utils import validate_regex
+from ..AppriseLocale import gettext_lazy as _
+
+
+class PushSaferSound(object):
+ """
+ Defines all of the supported PushSafe sounds
+ """
+ # Silent
+ SILENT = 0
+ # Ahem (IM)
+ AHEM = 1
+ # Applause (Mail)
+ APPLAUSE = 2
+ # Arrow (Reminder)
+ ARROW = 3
+ # Baby (SMS)
+ BABY = 4
+ # Bell (Alarm)
+ BELL = 5
+ # Bicycle (Alarm2)
+ BICYCLE = 6
+ # Boing (Alarm3)
+ BOING = 7
+ # Buzzer (Alarm4)
+ BUZZER = 8
+ # Camera (Alarm5)
+ CAMERA = 9
+ # Car Horn (Alarm6)
+ CAR_HORN = 10
+ # Cash Register (Alarm7)
+ CASH_REGISTER = 11
+ # Chime (Alarm8)
+ CHIME = 12
+ # Creaky Door (Alarm9)
+ CREAKY_DOOR = 13
+ # Cuckoo Clock (Alarm10)
+ CUCKOO_CLOCK = 14
+ # Disconnect (Call)
+ DISCONNECT = 15
+ # Dog (Call2)
+ DOG = 16
+ # Doorbell (Call3)
+ DOORBELL = 17
+ # Fanfare (Call4)
+ FANFARE = 18
+ # Gun Shot (Call5)
+ GUN_SHOT = 19
+ # Honk (Call6)
+ HONK = 20
+ # Jaw Harp (Call7)
+ JAW_HARP = 21
+ # Morse (Call8)
+ MORSE = 22
+ # Electricity (Call9)
+ ELECTRICITY = 23
+ # Radio Tuner (Call10)
+ RADIO_TURNER = 24
+ # Sirens
+ SIRENS = 25
+ # Military Trumpets
+ MILITARY_TRUMPETS = 26
+ # Ufo
+ UFO = 27
+ # Whah Whah Whah
+ LONG_WHAH = 28
+ # Man Saying Goodbye
+ GOODBYE = 29
+ # Man Saying Hello
+ HELLO = 30
+ # Man Saying No
+ NO = 31
+ # Man Saying Ok
+ OKAY = 32
+ # Man Saying Ooohhhweee
+ OOOHHHWEEE = 33
+ # Man Saying Warning
+ WARNING = 34
+ # Man Saying Welcome
+ WELCOME = 35
+ # Man Saying Yeah
+ YEAH = 36
+ # Man Saying Yes
+ YES = 37
+ # Beep short
+ BEEP1 = 38
+ # Weeeee short
+ WEEE = 39
+ # Cut in and out short
+ CUTINOUT = 40
+ # Finger flicking glas short
+ FLICK_GLASS = 41
+ # Wa Wa Waaaa short
+ SHORT_WHAH = 42
+ # Laser short
+ LASER = 43
+ # Wind Chime short
+ WIND_CHIME = 44
+ # Echo short
+ ECHO = 45
+ # Zipper short
+ ZIPPER = 46
+ # HiHat short
+ HIHAT = 47
+ # Beep 2 short
+ BEEP2 = 48
+ # Beep 3 short
+ BEEP3 = 49
+ # Beep 4 short
+ BEEP4 = 50
+ # The Alarm is armed
+ ALARM_ARMED = 51
+ # The Alarm is disarmed
+ ALARM_DISARMED = 52
+ # The Backup is ready
+ BACKUP_READY = 53
+ # The Door is closed
+ DOOR_CLOSED = 54
+ # The Door is opend
+ DOOR_OPENED = 55
+ # The Window is closed
+ WINDOW_CLOSED = 56
+ # The Window is open
+ WINDOW_OPEN = 57
+ # The Light is off
+ LIGHT_ON = 58
+ # The Light is on
+ LIGHT_OFF = 59
+ # The Doorbell rings
+ DOORBELL_RANG = 60
+
+
+PUSHSAFER_SOUND_MAP = {
+ # Device Default,
+ 'silent': PushSaferSound.SILENT,
+ 'ahem': PushSaferSound.AHEM,
+ 'applause': PushSaferSound.APPLAUSE,
+ 'arrow': PushSaferSound.ARROW,
+ 'baby': PushSaferSound.BABY,
+ 'bell': PushSaferSound.BELL,
+ 'bicycle': PushSaferSound.BICYCLE,
+ 'bike': PushSaferSound.BICYCLE,
+ 'boing': PushSaferSound.BOING,
+ 'buzzer': PushSaferSound.BUZZER,
+ 'camera': PushSaferSound.CAMERA,
+ 'carhorn': PushSaferSound.CAR_HORN,
+ 'horn': PushSaferSound.CAR_HORN,
+ 'cashregister': PushSaferSound.CASH_REGISTER,
+ 'chime': PushSaferSound.CHIME,
+ 'creakydoor': PushSaferSound.CREAKY_DOOR,
+ 'cuckooclock': PushSaferSound.CUCKOO_CLOCK,
+ 'cuckoo': PushSaferSound.CUCKOO_CLOCK,
+ 'disconnect': PushSaferSound.DISCONNECT,
+ 'dog': PushSaferSound.DOG,
+ 'doorbell': PushSaferSound.DOORBELL,
+ 'fanfare': PushSaferSound.FANFARE,
+ 'gunshot': PushSaferSound.GUN_SHOT,
+ 'honk': PushSaferSound.HONK,
+ 'jawharp': PushSaferSound.JAW_HARP,
+ 'morse': PushSaferSound.MORSE,
+ 'electric': PushSaferSound.ELECTRICITY,
+ 'radiotuner': PushSaferSound.RADIO_TURNER,
+ 'sirens': PushSaferSound.SIRENS,
+ 'militarytrumpets': PushSaferSound.MILITARY_TRUMPETS,
+ 'military': PushSaferSound.MILITARY_TRUMPETS,
+ 'trumpets': PushSaferSound.MILITARY_TRUMPETS,
+ 'ufo': PushSaferSound.UFO,
+ 'whahwhah': PushSaferSound.LONG_WHAH,
+ 'whah': PushSaferSound.SHORT_WHAH,
+ 'goodye': PushSaferSound.GOODBYE,
+ 'hello': PushSaferSound.HELLO,
+ 'no': PushSaferSound.NO,
+ 'okay': PushSaferSound.OKAY,
+ 'ok': PushSaferSound.OKAY,
+ 'ooohhhweee': PushSaferSound.OOOHHHWEEE,
+ 'warn': PushSaferSound.WARNING,
+ 'warning': PushSaferSound.WARNING,
+ 'welcome': PushSaferSound.WELCOME,
+ 'yeah': PushSaferSound.YEAH,
+ 'yes': PushSaferSound.YES,
+ 'beep': PushSaferSound.BEEP1,
+ 'beep1': PushSaferSound.BEEP1,
+ 'weee': PushSaferSound.WEEE,
+ 'wee': PushSaferSound.WEEE,
+ 'cutinout': PushSaferSound.CUTINOUT,
+ 'flickglass': PushSaferSound.FLICK_GLASS,
+ 'laser': PushSaferSound.LASER,
+ 'windchime': PushSaferSound.WIND_CHIME,
+ 'echo': PushSaferSound.ECHO,
+ 'zipper': PushSaferSound.ZIPPER,
+ 'hihat': PushSaferSound.HIHAT,
+ 'beep2': PushSaferSound.BEEP2,
+ 'beep3': PushSaferSound.BEEP3,
+ 'beep4': PushSaferSound.BEEP4,
+ 'alarmarmed': PushSaferSound.ALARM_ARMED,
+ 'armed': PushSaferSound.ALARM_ARMED,
+ 'alarmdisarmed': PushSaferSound.ALARM_DISARMED,
+ 'disarmed': PushSaferSound.ALARM_DISARMED,
+ 'backupready': PushSaferSound.BACKUP_READY,
+ 'dooropen': PushSaferSound.DOOR_OPENED,
+ 'dopen': PushSaferSound.DOOR_OPENED,
+ 'doorclosed': PushSaferSound.DOOR_CLOSED,
+ 'dclosed': PushSaferSound.DOOR_CLOSED,
+ 'windowopen': PushSaferSound.WINDOW_OPEN,
+ 'wopen': PushSaferSound.WINDOW_OPEN,
+ 'windowclosed': PushSaferSound.WINDOW_CLOSED,
+ 'wclosed': PushSaferSound.WINDOW_CLOSED,
+ 'lighton': PushSaferSound.LIGHT_ON,
+ 'lon': PushSaferSound.LIGHT_ON,
+ 'lightoff': PushSaferSound.LIGHT_OFF,
+ 'loff': PushSaferSound.LIGHT_OFF,
+ 'doorbellrang': PushSaferSound.DOORBELL_RANG,
+}
+
+
+# Priorities
+class PushSaferPriority(object):
+ LOW = -2
+ MODERATE = -1
+ NORMAL = 0
+ HIGH = 1
+ EMERGENCY = 2
+
+
+PUSHSAFER_PRIORITIES = (
+ PushSaferPriority.LOW,
+ PushSaferPriority.MODERATE,
+ PushSaferPriority.NORMAL,
+ PushSaferPriority.HIGH,
+ PushSaferPriority.EMERGENCY,
+)
+
+PUSHSAFER_PRIORITY_MAP = {
+ # short for 'low'
+ 'low': PushSaferPriority.LOW,
+ # short for 'medium'
+ 'medium': PushSaferPriority.MODERATE,
+ # short for 'normal'
+ 'normal': PushSaferPriority.NORMAL,
+ # short for 'high'
+ 'high': PushSaferPriority.HIGH,
+ # short for 'emergency'
+ 'emergency': PushSaferPriority.EMERGENCY,
+}
+
+# Identify the priority ou want to designate as the fall back
+DEFAULT_PRIORITY = "normal"
+
+
+# Vibrations
+class PushSaferVibration(object):
+ """
+ Defines the acceptable vibration settings for notification
+ """
+ # x1
+ LOW = 1
+ # x2
+ NORMAL = 2
+ # x3
+ HIGH = 3
+
+
+# Identify all of the vibrations in one place
+PUSHSAFER_VIBRATIONS = (
+ PushSaferVibration.LOW,
+ PushSaferVibration.NORMAL,
+ PushSaferVibration.HIGH,
+)
+
+# At this time, the following pictures can be attached to each notification
+# at one time. When more are supported, just add their argument below
+PICTURE_PARAMETER = (
+ 'p',
+ 'p2',
+ 'p3',
+)
+
+
+# Flag used as a placeholder to sending to all devices
+PUSHSAFER_SEND_TO_ALL = 'a'
+
+
+class NotifyPushSafer(NotifyBase):
+ """
+ A wrapper for PushSafer Notifications
+ """
+
+ # The default descriptive name associated with the Notification
+ service_name = 'Pushsafer'
+
+ # The services URL
+ service_url = 'https://www.pushsafer.com/'
+
+ # The default insecure protocol
+ protocol = 'psafer'
+
+ # The default secure protocol
+ secure_protocol = 'psafers'
+
+ # Number of requests to a allow per second
+ request_rate_per_sec = 1.2
+
+ # The icon ID of 25 looks like a megaphone
+ default_pushsafer_icon = 25
+
+ # A URL that takes you to the setup/help of the specific protocol
+ setup_url = 'https://github.com/caronc/apprise/wiki/Notify_pushsafer'
+
+ # Defines the hostname to post content to; since this service supports
+ # both insecure and secure methods, we set the {schema} just before we
+ # post the message upstream.
+ notify_url = '{schema}://www.pushsafer.com/api'
+
+ # Define object templates
+ templates = (
+ '{schema}://{privatekey}',
+ '{schema}://{privatekey}/{targets}',
+ )
+
+ # Define our template tokens
+ template_tokens = dict(NotifyBase.template_tokens, **{
+ 'privatekey': {
+ 'name': _('Private Key'),
+ 'type': 'string',
+ 'private': True,
+ 'required': True,
+ },
+ 'target_device': {
+ 'name': _('Target Device'),
+ 'type': 'string',
+ 'map_to': 'targets',
+ },
+ 'target_email': {
+ 'name': _('Target Email'),
+ 'type': 'string',
+ 'map_to': 'targets',
+ },
+ 'targets': {
+ 'name': _('Targets'),
+ 'type': 'list:string',
+ },
+ })
+
+ # Define our template arguments
+ template_args = dict(NotifyBase.template_args, **{
+ 'priority': {
+ 'name': _('Priority'),
+ 'type': 'choice:int',
+ 'values': PUSHSAFER_PRIORITIES,
+ },
+ 'sound': {
+ 'name': _('Sound'),
+ 'type': 'choice:string',
+ 'values': PUSHSAFER_SOUND_MAP,
+ },
+ 'vibration': {
+ 'name': _('Vibration'),
+ 'type': 'choice:int',
+ 'values': PUSHSAFER_VIBRATIONS,
+ },
+ 'to': {
+ 'alias_of': 'targets',
+ },
+ })
+
+ def __init__(self, privatekey, targets=None, priority=None, sound=None,
+ vibration=None, **kwargs):
+ """
+ Initialize PushSafer Object
+ """
+ super(NotifyPushSafer, self).__init__(**kwargs)
+
+ #
+ # Priority
+ #
+ try:
+ # Acquire our priority if we can:
+ # - We accept both the integer form as well as a string
+ # representation
+ self.priority = int(priority)
+
+ except TypeError:
+ # NoneType means use Default; this is an okay exception
+ self.priority = None
+
+ except ValueError:
+ # Input is a string; attempt to get the lookup from our
+ # priority mapping
+ priority = priority.lower().strip()
+
+ # This little bit of black magic allows us to match against
+ # low, lo, l (for low);
+ # normal, norma, norm, nor, no, n (for normal)
+ # ... etc
+ match = next((key for key in PUSHSAFER_PRIORITY_MAP.keys()
+ if key.startswith(priority)), None) \
+ if priority else None
+
+ # Now test to see if we got a match
+ if not match:
+ msg = 'An invalid PushSafer priority ' \
+ '({}) was specified.'.format(priority)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # store our successfully looked up priority
+ self.priority = PUSHSAFER_PRIORITY_MAP[match]
+
+ if self.priority is not None and \
+ self.priority not in PUSHSAFER_PRIORITY_MAP.values():
+ msg = 'An invalid PushSafer priority ' \
+ '({}) was specified.'.format(priority)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ #
+ # Sound
+ #
+ try:
+ # Acquire our sound if we can:
+ # - We accept both the integer form as well as a string
+ # representation
+ self.sound = int(sound)
+
+ except TypeError:
+ # NoneType means use Default; this is an okay exception
+ self.sound = None
+
+ except ValueError:
+ # Input is a string; attempt to get the lookup from our
+ # sound mapping
+ sound = sound.lower().strip()
+
+ # This little bit of black magic allows us to match against
+ # against multiple versions of the same string
+ # ... etc
+ match = next((key for key in PUSHSAFER_SOUND_MAP.keys()
+ if key.startswith(sound)), None) \
+ if sound else None
+
+ # Now test to see if we got a match
+ if not match:
+ msg = 'An invalid PushSafer sound ' \
+ '({}) was specified.'.format(sound)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # store our successfully looked up sound
+ self.sound = PUSHSAFER_SOUND_MAP[match]
+
+ if self.sound is not None and \
+ self.sound not in PUSHSAFER_SOUND_MAP.values():
+ msg = 'An invalid PushSafer sound ' \
+ '({}) was specified.'.format(sound)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ #
+ # Vibration
+ #
+ try:
+ # Use defined integer as is if defined, no further error checking
+ # is performed
+ self.vibration = int(vibration)
+
+ except TypeError:
+ # NoneType means use Default; this is an okay exception
+ self.vibration = None
+
+ except ValueError:
+ msg = 'An invalid PushSafer vibration ' \
+ '({}) was specified.'.format(vibration)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ if self.vibration and self.vibration not in PUSHSAFER_VIBRATIONS:
+ msg = 'An invalid PushSafer vibration ' \
+ '({}) was specified.'.format(vibration)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ #
+ # Private Key (associated with project)
+ #
+ self.privatekey = validate_regex(privatekey)
+ if not self.privatekey:
+ msg = 'An invalid PushSafer Private Key ' \
+ '({}) was specified.'.format(privatekey)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ self.targets = parse_list(targets)
+ if len(self.targets) == 0:
+ self.targets = (PUSHSAFER_SEND_TO_ALL, )
+
+ return
+
+ def send(self, body, title='', notify_type=NotifyType.INFO, attach=None,
+ **kwargs):
+ """
+ Perform PushSafer Notification
+ """
+
+ # error tracking (used for function return)
+ has_error = False
+
+ # Initialize our list of attachments
+ attachments = []
+
+ if attach:
+ # We need to upload our payload first so that we can source it
+ # in remaining messages
+ for attachment in attach:
+ # prepare payload
+ if not attachment:
+ # We could not access the attachment
+ self.logger.error(
+ 'Could not access attachment {}.'.format(
+ attachment.url(privacy=True)))
+ return False
+
+ if not attachment.mimetype.startswith('image/'):
+ # Attachment not supported; continue peacefully
+ self.logger.debug(
+ 'Ignoring unsupported PushSafer attachment {}.'.format(
+ attachment.url(privacy=True)))
+ continue
+
+ self.logger.debug(
+ 'Posting PushSafer attachment {}'.format(
+ attachment.url(privacy=True)))
+
+ try:
+ with io.open(attachment.path, 'rb') as f:
+ # Output must be in a DataURL format (that's what
+ # PushSafer calls it):
+ attachment = (
+ attachment.name,
+ 'data:{};base64,{}'.format(
+ attachment.mimetype,
+ base64.b64encode(f.read())))
+
+ except (OSError, IOError) as e:
+ self.logger.warning(
+ 'An I/O error occured while reading {}.'.format(
+ attachment.name if attachment else 'attachment'))
+ self.logger.debug('I/O Exception: %s' % str(e))
+ return False
+
+ # Save our pre-prepared payload for attachment posting
+ attachments.append(attachment)
+
+ # Create a copy of the targets list
+ targets = list(self.targets)
+ while len(targets):
+ recipient = targets.pop(0)
+
+ # prepare payload
+ payload = {
+ 't': title,
+ 'm': body,
+ # Our default icon to use
+ 'i': self.default_pushsafer_icon,
+ # Notification Color
+ 'c': self.color(notify_type),
+ # Target Recipient
+ 'd': recipient,
+ }
+
+ if self.sound is not None:
+ # Only apply sound setting if it was specified
+ payload['s'] = str(self.sound)
+
+ if self.vibration is not None:
+ # Only apply vibration setting
+ payload['v'] = str(self.vibration)
+
+ if not attachments:
+ okay, response = self._send(payload)
+ if not okay:
+ has_error = True
+ continue
+
+ self.logger.info(
+ 'Sent PushSafer notification to "%s".' % (recipient))
+
+ else:
+ # Create a copy of our payload object
+ _payload = payload.copy()
+
+ for idx in range(
+ 0, len(attachments), len(PICTURE_PARAMETER)):
+ # Send our attachments to our same user (already prepared
+ # as our payload object)
+ for c, attachment in enumerate(
+ attachments[idx:idx + len(PICTURE_PARAMETER)]):
+
+ # Get our attachment information
+ filename, dataurl = attachment
+ _payload.update({PICTURE_PARAMETER[c]: dataurl})
+
+ self.logger.debug(
+ 'Added attachment (%s) to "%s".' % (
+ filename, recipient))
+
+ okay, response = self._send(_payload)
+ if not okay:
+ has_error = True
+ continue
+
+ self.logger.info(
+ 'Sent PushSafer attachment (%s) to "%s".' % (
+ filename, recipient))
+
+ # More then the maximum messages shouldn't cause all of
+ # the text to loop on future iterations
+ _payload = payload.copy()
+ _payload['t'] = ''
+ _payload['m'] = '...'
+
+ return not has_error
+
+ def _send(self, payload, **kwargs):
+ """
+ Wrapper to the requests (post) object
+ """
+
+ headers = {
+ 'User-Agent': self.app_id,
+ }
+
+ # Prepare the notification URL to post to
+ notify_url = self.notify_url.format(
+ schema='https' if self.secure else 'http'
+ )
+
+ # Store the payload key
+ payload['k'] = self.privatekey
+
+ self.logger.debug('PushSafer POST URL: %s (cert_verify=%r)' % (
+ notify_url, self.verify_certificate,
+ ))
+ self.logger.debug('PushSafer Payload: %s' % str(payload))
+
+ # Always call throttle before any remote server i/o is made
+ self.throttle()
+
+ # Default response type
+ response = None
+
+ # Initialize our Pushsafer expected responses
+ _code = None
+ _str = 'Unknown'
+
+ try:
+ # Open our attachment path if required:
+ r = requests.post(
+ notify_url,
+ data=payload,
+ headers=headers,
+ verify=self.verify_certificate,
+ )
+
+ try:
+ response = loads(r.content)
+ _code = response.get('status')
+ _str = response.get('success', _str) \
+ if _code == 1 else response.get('error', _str)
+
+ except (AttributeError, TypeError, ValueError):
+ # ValueError = r.content is Unparsable
+ # TypeError = r.content is None
+ # AttributeError = r is None
+
+ # Fall back to the existing unparsed value
+ response = r.content
+
+ if r.status_code not in (
+ requests.codes.ok, requests.codes.no_content):
+ # We had a problem
+ status_str = \
+ NotifyPushSafer.http_response_code_lookup(
+ r.status_code)
+
+ self.logger.warning(
+ 'Failed to deliver payload to PushSafer:'
+ '{}{}error={}.'.format(
+ status_str,
+ ', ' if status_str else '',
+ r.status_code))
+
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+
+ return False, response
+
+ elif _code != 1:
+ # It's a bit backwards, but:
+ # 1 is returned if we succeed
+ # 0 is returned if we fail
+ self.logger.warning(
+ 'Failed to deliver payload to PushSafer;'
+ ' error={}.'.format(_str))
+
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+
+ return False, response
+
+ # otherwise we were successful
+ return True, response
+
+ except requests.RequestException as e:
+ self.logger.warning(
+ 'A Connection error occured communicating with PushSafer.')
+ self.logger.debug('Socket Exception: %s' % str(e))
+
+ return False, response
+
+ def url(self, privacy=False, *args, **kwargs):
+ """
+ Returns the URL built dynamically based on specified arguments.
+ """
+
+ # Define any arguments set
+ args = {
+ 'format': self.notify_format,
+ 'overflow': self.overflow_mode,
+ 'verify': 'yes' if self.verify_certificate else 'no',
+ }
+
+ if self.priority is not None:
+ # Store our priority; but only if it was specified
+ args['priority'] = \
+ next((key for key, value in PUSHSAFER_PRIORITY_MAP.items()
+ if value == self.priority),
+ DEFAULT_PRIORITY) # pragma: no cover
+
+ if self.sound is not None:
+ # Store our sound; but only if it was specified
+ args['sound'] = \
+ next((key for key, value in PUSHSAFER_SOUND_MAP.items()
+ if value == self.sound), '') # pragma: no cover
+
+ if self.vibration is not None:
+ # Store our vibration; but only if it was specified
+ args['vibration'] = str(self.vibration)
+
+ targets = '/'.join([NotifyPushSafer.quote(x) for x in self.targets])
+ if targets == PUSHSAFER_SEND_TO_ALL:
+ # keyword is reserved for internal usage only; it's safe to remove
+ # it from the recipients list
+ targets = ''
+
+ return '{schema}://{privatekey}/{targets}?{args}'.format(
+ schema=self.secure_protocol if self.secure else self.protocol,
+ privatekey=self.pprint(self.privatekey, privacy, safe=''),
+ targets=targets,
+ args=NotifyPushSafer.urlencode(args))
+
+ @staticmethod
+ def parse_url(url):
+ """
+ Parses the URL and returns enough arguments that can allow
+ us to substantiate this object.
+
+ """
+ results = NotifyBase.parse_url(url)
+ if not results:
+ # We're done early as we couldn't load the results
+ return results
+
+ # Fetch our targets
+ results['targets'] = \
+ NotifyPushSafer.split_path(results['fullpath'])
+
+ # The 'to' makes it easier to use yaml configuration
+ if 'to' in results['qsd'] and len(results['qsd']['to']):
+ results['targets'] += \
+ NotifyPushSafer.parse_list(results['qsd']['to'])
+
+ # Setup the token; we store it in Private Key for global
+ # plugin consistency with naming conventions
+ results['privatekey'] = NotifyPushSafer.unquote(results['host'])
+
+ if 'priority' in results['qsd'] and len(results['qsd']['priority']):
+ results['priority'] = \
+ NotifyPushSafer.unquote(results['qsd']['priority'])
+
+ if 'sound' in results['qsd'] and len(results['qsd']['sound']):
+ results['sound'] = \
+ NotifyPushSafer.unquote(results['qsd']['sound'])
+
+ if 'vibration' in results['qsd'] and len(results['qsd']['vibration']):
+ results['vibration'] = \
+ NotifyPushSafer.unquote(results['qsd']['vibration'])
+
+ return results
diff --git a/libs/apprise/plugins/NotifyPushed.py b/libs/apprise/plugins/NotifyPushed.py
index 35e390d70..d9428393d 100644
--- a/libs/apprise/plugins/NotifyPushed.py
+++ b/libs/apprise/plugins/NotifyPushed.py
@@ -68,7 +68,7 @@ class NotifyPushed(NotifyBase):
title_maxlen = 0
# The maximum allowable characters allowed in the body per message
- body_maxlen = 140
+ body_maxlen = 160
# Define object templates
templates = (
diff --git a/libs/apprise/plugins/NotifyPushover.py b/libs/apprise/plugins/NotifyPushover.py
index 58fb63cb6..48bcb786f 100644
--- a/libs/apprise/plugins/NotifyPushover.py
+++ b/libs/apprise/plugins/NotifyPushover.py
@@ -32,6 +32,7 @@ from ..common import NotifyType
from ..utils import parse_list
from ..utils import validate_regex
from ..AppriseLocale import gettext_lazy as _
+from ..attachment.AttachBase import AttachBase
# Flag used as a placeholder to sending to all devices
PUSHOVER_SEND_TO_ALL = 'ALL_DEVICES'
@@ -140,6 +141,14 @@ class NotifyPushover(NotifyBase):
# Default Pushover sound
default_pushover_sound = PushoverSound.PUSHOVER
+ # 2.5MB is the maximum supported image filesize as per documentation
+ # here: https://pushover.net/api#attachments (Dec 26th, 2019)
+ attach_max_size_bytes = 2621440
+
+ # The regular expression of the current attachment supported mime types
+ # At this time it is only images
+ attach_supported_mime_type = r'^image/.*'
+
# Define object templates
templates = (
'{schema}://{user_key}@{token}',
@@ -281,17 +290,12 @@ class NotifyPushover(NotifyBase):
raise TypeError(msg)
return
- def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
+ def send(self, body, title='', notify_type=NotifyType.INFO, attach=None,
+ **kwargs):
"""
Perform Pushover Notification
"""
- headers = {
- 'User-Agent': self.app_id,
- 'Content-Type': 'application/x-www-form-urlencoded'
- }
- auth = (self.token, '')
-
# error tracking (used for function return)
has_error = False
@@ -314,7 +318,7 @@ class NotifyPushover(NotifyBase):
'token': self.token,
'user': self.user_key,
'priority': str(self.priority),
- 'title': title,
+ 'title': title if title else self.app_desc,
'message': body,
'device': device,
'sound': self.sound,
@@ -323,59 +327,161 @@ class NotifyPushover(NotifyBase):
if self.priority == PushoverPriority.EMERGENCY:
payload.update({'retry': self.retry, 'expire': self.expire})
- self.logger.debug('Pushover POST URL: %s (cert_verify=%r)' % (
- self.notify_url, self.verify_certificate,
- ))
- self.logger.debug('Pushover Payload: %s' % str(payload))
-
- # Always call throttle before any remote server i/o is made
- self.throttle()
-
- try:
- r = requests.post(
- self.notify_url,
- data=payload,
- headers=headers,
- auth=auth,
- verify=self.verify_certificate,
- )
- if r.status_code != requests.codes.ok:
- # We had a problem
- status_str = \
- NotifyPushover.http_response_code_lookup(
- r.status_code, PUSHOVER_HTTP_ERROR_MAP)
-
- self.logger.warning(
- 'Failed to send Pushover notification to {}: '
- '{}{}error={}.'.format(
- device,
- status_str,
- ', ' if status_str else '',
- r.status_code))
-
- self.logger.debug(
- 'Response Details:\r\n{}'.format(r.content))
-
+ if attach:
+ # Create a copy of our payload
+ _payload = payload.copy()
+
+ # Send with attachments
+ for attachment in attach:
+ # Simple send
+ if not self._send(_payload, attachment):
+ # Mark our failure
+ has_error = True
+ # clean exit from our attachment loop
+ break
+
+ # To handle multiple attachments, clean up our message
+ _payload['title'] = '...'
+ _payload['message'] = attachment.name
+ # No need to alarm for each consecutive attachment uploaded
+ # afterwards
+ _payload['sound'] = PushoverSound.NONE
+
+ else:
+ # Simple send
+ if not self._send(payload):
# Mark our failure
has_error = True
- continue
- else:
- self.logger.info(
- 'Sent Pushover notification to %s.' % device)
+ return not has_error
+
+ def _send(self, payload, attach=None):
+ """
+ Wrapper to the requests (post) object
+ """
- except requests.RequestException as e:
+ if isinstance(attach, AttachBase):
+ # Perform some simple error checking
+ if not attach:
+ # We could not access the attachment
+ self.logger.error(
+ 'Could not access attachment {}.'.format(
+ attach.url(privacy=True)))
+ return False
+
+ # Perform some basic checks as we want to gracefully skip
+ # over unsupported mime types.
+ if not re.match(
+ self.attach_supported_mime_type,
+ attach.mimetype,
+ re.I):
+ # No problem; we just don't support this attachment
+ # type; gracefully move along
+ self.logger.debug(
+ 'Ignored unsupported Pushover attachment ({}): {}'
+ .format(
+ attach.mimetype,
+ attach.url(privacy=True)))
+
+ return True
+
+ # If we get here, we're dealing with a supported image.
+ # Verify that the filesize is okay though.
+ file_size = len(attach)
+ if not (file_size > 0
+ and file_size <= self.attach_max_size_bytes):
+
+ # File size is no good
self.logger.warning(
- 'A Connection error occured sending Pushover:%s ' % (
- device) + 'notification.'
- )
- self.logger.debug('Socket Exception: %s' % str(e))
+ 'Pushover attachment size ({}B) exceeds limit: {}'
+ .format(file_size, attach.url(privacy=True)))
- # Mark our failure
- has_error = True
- continue
+ return False
- return not has_error
+ self.logger.debug(
+ 'Posting Pushover attachment {}'.format(
+ attach.url(privacy=True)))
+
+ # Default Header
+ headers = {
+ 'User-Agent': self.app_id,
+ }
+
+ # Authentication
+ auth = (self.token, '')
+
+ # Some default values for our request object to which we'll update
+ # depending on what our payload is
+ files = None
+
+ self.logger.debug('Pushover POST URL: %s (cert_verify=%r)' % (
+ self.notify_url, self.verify_certificate,
+ ))
+ self.logger.debug('Pushover Payload: %s' % str(payload))
+
+ # Always call throttle before any remote server i/o is made
+ self.throttle()
+
+ try:
+ # Open our attachment path if required:
+ if attach:
+ files = {'attachment': (attach.name, open(attach.path, 'rb'))}
+
+ r = requests.post(
+ self.notify_url,
+ data=payload,
+ headers=headers,
+ files=files,
+ auth=auth,
+ verify=self.verify_certificate,
+ )
+
+ if r.status_code != requests.codes.ok:
+ # We had a problem
+ status_str = \
+ NotifyPushover.http_response_code_lookup(
+ r.status_code, PUSHOVER_HTTP_ERROR_MAP)
+
+ self.logger.warning(
+ 'Failed to send Pushover notification to {}: '
+ '{}{}error={}.'.format(
+ payload['device'],
+ status_str,
+ ', ' if status_str else '',
+ r.status_code))
+
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+
+ return False
+
+ else:
+ self.logger.info(
+ 'Sent Pushover notification to %s.' % payload['device'])
+
+ except requests.RequestException as e:
+ self.logger.warning(
+ 'A Connection error occured sending Pushover:%s ' % (
+ payload['device']) + 'notification.'
+ )
+ self.logger.debug('Socket Exception: %s' % str(e))
+
+ return False
+
+ except (OSError, IOError) as e:
+ self.logger.warning(
+ 'An I/O error occured while reading {}.'.format(
+ attach.name if attach else 'attachment'))
+ self.logger.debug('I/O Exception: %s' % str(e))
+ return False
+
+ finally:
+ # Close our file (if it's open) stored in the second element
+ # of our files tuple (index 1)
+ if files:
+ files['attachment'][1].close()
+
+ return True
def url(self, privacy=False, *args, **kwargs):
"""
diff --git a/libs/apprise/plugins/NotifySNS.py b/libs/apprise/plugins/NotifySNS.py
index a547558c5..6045c136e 100644
--- a/libs/apprise/plugins/NotifySNS.py
+++ b/libs/apprise/plugins/NotifySNS.py
@@ -89,7 +89,7 @@ class NotifySNS(NotifyBase):
# The maximum length of the body
# Source: https://docs.aws.amazon.com/sns/latest/api/API_Publish.html
- body_maxlen = 140
+ body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
diff --git a/libs/apprise/plugins/NotifySinch.py b/libs/apprise/plugins/NotifySinch.py
new file mode 100644
index 000000000..454cdbf73
--- /dev/null
+++ b/libs/apprise/plugins/NotifySinch.py
@@ -0,0 +1,476 @@
+# -*- coding: utf-8 -*-
+#
+# Copyright (C) 2019 Chris Caron <[email protected]>
+# All rights reserved.
+#
+# This code is licensed under the MIT License.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files(the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions :
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+# THE SOFTWARE.
+
+# To use this service you will need a Sinch account to which you can get your
+# API_TOKEN and SERVICE_PLAN_ID right from your console/dashboard at:
+# https://dashboard.sinch.com/sms/overview
+#
+# You will also need to send the SMS From a phone number or account id name.
+
+# This is identified as the source (or where the SMS message will originate
+# from). Activated phone numbers can be found on your dashboard here:
+# - https://dashboard.sinch.com/numbers/your-numbers/numbers
+#
+import re
+import six
+import requests
+import json
+
+from .NotifyBase import NotifyBase
+from ..URLBase import PrivacyMode
+from ..common import NotifyType
+from ..utils import parse_list
+from ..utils import validate_regex
+from ..AppriseLocale import gettext_lazy as _
+
+
+# Some Phone Number Detection
+IS_PHONE_NO = re.compile(r'^\+?(?P<phone>[0-9\s)(+-]+)\s*$')
+
+
+class SinchRegion(object):
+ """
+ Defines the Sinch Server Regions
+ """
+ USA = 'us'
+ EUROPE = 'eu'
+
+
+# Used for verification purposes
+SINCH_REGIONS = (SinchRegion.USA, SinchRegion.EUROPE)
+
+
+class NotifySinch(NotifyBase):
+ """
+ A wrapper for Sinch Notifications
+ """
+
+ # The default descriptive name associated with the Notification
+ service_name = 'Sinch'
+
+ # The services URL
+ service_url = 'https://sinch.com/'
+
+ # All notification requests are secure
+ secure_protocol = 'sinch'
+
+ # Allow 300 requests per minute.
+ # 60/300 = 0.2
+ request_rate_per_sec = 0.20
+
+ # the number of seconds undelivered messages should linger for
+ # in the Sinch queue
+ validity_period = 14400
+
+ # A URL that takes you to the setup/help of the specific protocol
+ setup_url = 'https://github.com/caronc/apprise/wiki/Notify_sinch'
+
+ # Sinch uses the http protocol with JSON requests
+ # - the 'spi' gets substituted with the Service Provider ID
+ # provided as part of the Apprise URL.
+ notify_url = 'https://{region}.sms.api.sinch.com/xms/v1/{spi}/batches'
+
+ # The maximum length of the body
+ body_maxlen = 160
+
+ # A title can not be used for SMS Messages. Setting this to zero will
+ # cause any title (if defined) to get placed into the message body.
+ title_maxlen = 0
+
+ # Define object templates
+ templates = (
+ '{schema}://{service_plan_id}:{api_token}@{from_phone}',
+ '{schema}://{service_plan_id}:{api_token}@{from_phone}/{targets}',
+ )
+
+ # Define our template tokens
+ template_tokens = dict(NotifyBase.template_tokens, **{
+ 'service_plan_id': {
+ 'name': _('Account SID'),
+ 'type': 'string',
+ 'private': True,
+ 'required': True,
+ 'regex': (r'^[a-f0-9]+$', 'i'),
+ },
+ 'api_token': {
+ 'name': _('Auth Token'),
+ 'type': 'string',
+ 'private': True,
+ 'required': True,
+ 'regex': (r'^[a-f0-9]+$', 'i'),
+ },
+ 'from_phone': {
+ 'name': _('From Phone No'),
+ 'type': 'string',
+ 'required': True,
+ 'regex': (r'^\+?[0-9\s)(+-]+$', 'i'),
+ 'map_to': 'source',
+ },
+ 'target_phone': {
+ 'name': _('Target Phone No'),
+ 'type': 'string',
+ 'prefix': '+',
+ 'regex': (r'^[0-9\s)(+-]+$', 'i'),
+ 'map_to': 'targets',
+ },
+ 'short_code': {
+ 'name': _('Target Short Code'),
+ 'type': 'string',
+ 'regex': (r'^[0-9]{5,6}$', 'i'),
+ 'map_to': 'targets',
+ },
+ 'targets': {
+ 'name': _('Targets'),
+ 'type': 'list:string',
+ },
+ })
+
+ # Define our template arguments
+ template_args = dict(NotifyBase.template_args, **{
+ 'to': {
+ 'alias_of': 'targets',
+ },
+ 'from': {
+ 'alias_of': 'from_phone',
+ },
+ 'spi': {
+ 'alias_of': 'service_plan_id',
+ },
+ 'region': {
+ 'name': _('Region'),
+ 'type': 'string',
+ 'regex': (r'^[a-z]{2}$', 'i'),
+ 'default': SinchRegion.USA,
+ },
+ 'token': {
+ 'alias_of': 'api_token',
+ },
+ })
+
+ def __init__(self, service_plan_id, api_token, source, targets=None,
+ region=None, **kwargs):
+ """
+ Initialize Sinch Object
+ """
+ super(NotifySinch, self).__init__(**kwargs)
+
+ # The Account SID associated with the account
+ self.service_plan_id = validate_regex(
+ service_plan_id, *self.template_tokens['service_plan_id']['regex'])
+ if not self.service_plan_id:
+ msg = 'An invalid Sinch Account SID ' \
+ '({}) was specified.'.format(service_plan_id)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # The Authentication Token associated with the account
+ self.api_token = validate_regex(
+ api_token, *self.template_tokens['api_token']['regex'])
+ if not self.api_token:
+ msg = 'An invalid Sinch Authentication Token ' \
+ '({}) was specified.'.format(api_token)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # The Source Phone # and/or short-code
+ self.source = source
+
+ if not IS_PHONE_NO.match(self.source):
+ msg = 'The Account (From) Phone # or Short-code specified ' \
+ '({}) is invalid.'.format(source)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # Setup our region
+ self.region = self.template_args['region']['default'] \
+ if not isinstance(region, six.string_types) else region.lower()
+ if self.region and self.region not in SINCH_REGIONS:
+ msg = 'The region specified ({}) is invalid.'.format(region)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # Tidy source
+ self.source = re.sub(r'[^\d]+', '', self.source)
+
+ if len(self.source) < 11 or len(self.source) > 14:
+ # A short code is a special 5 or 6 digit telephone number
+ # that's shorter than a full phone number.
+ if len(self.source) not in (5, 6):
+ msg = 'The Account (From) Phone # specified ' \
+ '({}) is invalid.'.format(source)
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ # else... it as a short code so we're okay
+
+ else:
+ # We're dealing with a phone number; so we need to just
+ # place a plus symbol at the end of it
+ self.source = '+{}'.format(self.source)
+
+ # Parse our targets
+ self.targets = list()
+
+ for target in parse_list(targets):
+ # Validate targets and drop bad ones:
+ result = IS_PHONE_NO.match(target)
+ if result:
+ # Further check our phone # for it's digit count
+ # if it's less than 10, then we can assume it's
+ # a poorly specified phone no and spit a warning
+ result = ''.join(re.findall(r'\d+', result.group('phone')))
+ if len(result) < 11 or len(result) > 14:
+ self.logger.warning(
+ 'Dropped invalid phone # '
+ '({}) specified.'.format(target),
+ )
+ continue
+
+ # store valid phone number
+ self.targets.append('+{}'.format(result))
+ continue
+
+ self.logger.warning(
+ 'Dropped invalid phone # '
+ '({}) specified.'.format(target),
+ )
+
+ if not self.targets:
+ if len(self.source) in (5, 6):
+ # raise a warning since we're a short-code. We need
+ # a number to message
+ msg = 'There are no valid Sinch targets to notify.'
+ self.logger.warning(msg)
+ raise TypeError(msg)
+
+ return
+
+ def send(self, body, title='', notify_type=NotifyType.INFO, **kwargs):
+ """
+ Perform Sinch Notification
+ """
+
+ # error tracking (used for function return)
+ has_error = False
+
+ # Prepare our headers
+ headers = {
+ 'User-Agent': self.app_id,
+ 'Authorization': 'Bearer {}'.format(self.api_token),
+ 'Content-Type': 'application/json',
+ }
+
+ # Prepare our payload
+ payload = {
+ 'body': body,
+ 'from': self.source,
+
+ # The To gets populated in the loop below
+ 'to': None,
+ }
+
+ # Prepare our Sinch URL (spi = Service Provider ID)
+ url = self.notify_url.format(
+ region=self.region, spi=self.service_plan_id)
+
+ # Create a copy of the targets list
+ targets = list(self.targets)
+
+ if len(targets) == 0:
+ # No sources specified, use our own phone no
+ targets.append(self.source)
+
+ while len(targets):
+ # Get our target to notify
+ target = targets.pop(0)
+
+ # Prepare our user
+ payload['to'] = [target]
+
+ # Some Debug Logging
+ self.logger.debug('Sinch POST URL: {} (cert_verify={})'.format(
+ url, self.verify_certificate))
+ self.logger.debug('Sinch Payload: {}' .format(payload))
+
+ # Always call throttle before any remote server i/o is made
+ self.throttle()
+ try:
+ r = requests.post(
+ url,
+ data=json.dumps(payload),
+ headers=headers,
+ verify=self.verify_certificate,
+ )
+
+ # The responsne might look like:
+ # {
+ # "id": "CJloRJOe3MtDITqx",
+ # "to": ["15551112222"],
+ # "from": "15553334444",
+ # "canceled": false,
+ # "body": "This is a test message from your Sinch account",
+ # "type": "mt_text",
+ # "created_at": "2020-01-14T01:05:20.694Z",
+ # "modified_at": "2020-01-14T01:05:20.694Z",
+ # "delivery_report": "none",
+ # "expire_at": "2020-01-17T01:05:20.694Z",
+ # "flash_message": false
+ # }
+ if r.status_code not in (
+ requests.codes.created, requests.codes.ok):
+ # We had a problem
+ status_str = \
+ NotifyBase.http_response_code_lookup(r.status_code)
+
+ # set up our status code to use
+ status_code = r.status_code
+
+ try:
+ # Update our status response if we can
+ json_response = json.loads(r.content)
+ status_code = json_response.get('code', status_code)
+ status_str = json_response.get('message', status_str)
+
+ except (AttributeError, TypeError, ValueError):
+ # ValueError = r.content is Unparsable
+ # TypeError = r.content is None
+ # AttributeError = r is None
+
+ # We could not parse JSON response.
+ # We will just use the status we already have.
+ pass
+
+ self.logger.warning(
+ 'Failed to send Sinch notification to {}: '
+ '{}{}error={}.'.format(
+ target,
+ status_str,
+ ', ' if status_str else '',
+ status_code))
+
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+
+ # Mark our failure
+ has_error = True
+ continue
+
+ else:
+ self.logger.info(
+ 'Sent Sinch notification to {}.'.format(target))
+
+ except requests.RequestException as e:
+ self.logger.warning(
+ 'A Connection error occured sending Sinch:%s ' % (
+ target) + 'notification.'
+ )
+ self.logger.debug('Socket Exception: %s' % str(e))
+
+ # Mark our failure
+ has_error = True
+ continue
+
+ return not has_error
+
+ def url(self, privacy=False, *args, **kwargs):
+ """
+ Returns the URL built dynamically based on specified arguments.
+ """
+
+ # Define any arguments set
+ args = {
+ 'format': self.notify_format,
+ 'overflow': self.overflow_mode,
+ 'verify': 'yes' if self.verify_certificate else 'no',
+ 'region': self.region,
+ }
+
+ return '{schema}://{spi}:{token}@{source}/{targets}/?{args}'.format(
+ schema=self.secure_protocol,
+ spi=self.pprint(
+ self.service_plan_id, privacy, mode=PrivacyMode.Tail, safe=''),
+ token=self.pprint(self.api_token, privacy, safe=''),
+ source=NotifySinch.quote(self.source, safe=''),
+ targets='/'.join(
+ [NotifySinch.quote(x, safe='') for x in self.targets]),
+ args=NotifySinch.urlencode(args))
+
+ @staticmethod
+ def parse_url(url):
+ """
+ Parses the URL and returns enough arguments that can allow
+ us to substantiate this object.
+
+ """
+ results = NotifyBase.parse_url(url, verify_host=False)
+ if not results:
+ # We're done early as we couldn't load the results
+ return results
+
+ # Get our entries; split_path() looks after unquoting content for us
+ # by default
+ results['targets'] = NotifySinch.split_path(results['fullpath'])
+
+ # The hostname is our source number
+ results['source'] = NotifySinch.unquote(results['host'])
+
+ # Get our service_plan_ide and api_token from the user/pass config
+ results['service_plan_id'] = NotifySinch.unquote(results['user'])
+ results['api_token'] = NotifySinch.unquote(results['password'])
+
+ # Auth Token
+ if 'token' in results['qsd'] and len(results['qsd']['token']):
+ # Extract the account spi from an argument
+ results['api_token'] = \
+ NotifySinch.unquote(results['qsd']['token'])
+
+ # Account SID
+ if 'spi' in results['qsd'] and len(results['qsd']['spi']):
+ # Extract the account spi from an argument
+ results['service_plan_id'] = \
+ NotifySinch.unquote(results['qsd']['spi'])
+
+ # Support the 'from' and 'source' variable so that we can support
+ # targets this way too.
+ # The 'from' makes it easier to use yaml configuration
+ if 'from' in results['qsd'] and len(results['qsd']['from']):
+ results['source'] = \
+ NotifySinch.unquote(results['qsd']['from'])
+ if 'source' in results['qsd'] and len(results['qsd']['source']):
+ results['source'] = \
+ NotifySinch.unquote(results['qsd']['source'])
+
+ # Allow one to define a region
+ if 'region' in results['qsd'] and len(results['qsd']['region']):
+ results['region'] = \
+ NotifySinch.unquote(results['qsd']['region'])
+
+ # Support the 'to' variable so that we can support targets this way too
+ # The 'to' makes it easier to use yaml configuration
+ if 'to' in results['qsd'] and len(results['qsd']['to']):
+ results['targets'] += \
+ NotifySinch.parse_list(results['qsd']['to'])
+
+ return results
diff --git a/libs/apprise/plugins/NotifySlack.py b/libs/apprise/plugins/NotifySlack.py
index e16885e60..d4e4f6112 100644
--- a/libs/apprise/plugins/NotifySlack.py
+++ b/libs/apprise/plugins/NotifySlack.py
@@ -176,7 +176,7 @@ class NotifySlack(NotifyBase):
'type': 'string',
'private': True,
'required': True,
- 'regex': (r'^[A-Z0-9]{9}$', 'i'),
+ 'regex': (r'^[A-Z0-9]+$', 'i'),
},
# Token required as part of the Webhook request
# /........./BBBBBBBBB/........................
@@ -185,7 +185,7 @@ class NotifySlack(NotifyBase):
'type': 'string',
'private': True,
'required': True,
- 'regex': (r'^[A-Z0-9]{9}$', 'i'),
+ 'regex': (r'^[A-Z0-9]+$', 'i'),
},
# Token required as part of the Webhook request
# /........./........./CCCCCCCCCCCCCCCCCCCCCCCC
@@ -194,7 +194,7 @@ class NotifySlack(NotifyBase):
'type': 'string',
'private': True,
'required': True,
- 'regex': (r'^[A-Za-z0-9]{24}$', 'i'),
+ 'regex': (r'^[A-Za-z0-9]+$', 'i'),
},
'target_encoded_id': {
'name': _('Target Encoded ID'),
@@ -435,8 +435,18 @@ class NotifySlack(NotifyBase):
if attach and self.mode is SlackMode.BOT and attach_channel_list:
# Send our attachments (can only be done in bot mode)
for attachment in attach:
- self.logger.info(
- 'Posting Slack Attachment {}'.format(attachment.name))
+
+ # Perform some simple error checking
+ if not attachment:
+ # We could not access the attachment
+ self.logger.error(
+ 'Could not access attachment {}.'.format(
+ attachment.url(privacy=True)))
+ return False
+
+ self.logger.debug(
+ 'Posting Slack attachment {}'.format(
+ attachment.url(privacy=True)))
# Prepare API Upload Payload
_payload = {
@@ -515,25 +525,29 @@ class NotifySlack(NotifyBase):
'Response Details:\r\n{}'.format(r.content))
return False
- try:
- response = loads(r.content)
+ elif attach:
+ # Attachment posts return a JSON string
+ try:
+ response = loads(r.content)
- except (AttributeError, TypeError, ValueError):
- # ValueError = r.content is Unparsable
- # TypeError = r.content is None
- # AttributeError = r is None
- pass
+ except (AttributeError, TypeError, ValueError):
+ # ValueError = r.content is Unparsable
+ # TypeError = r.content is None
+ # AttributeError = r is None
+ pass
- if not (response and response.get('ok', True)):
- # Bare minimum requirements not met
- self.logger.warning(
- 'Failed to send {}to Slack: error={}.'.format(
- attach.name if attach else '',
- r.status_code))
+ if not (response and response.get('ok', True)):
+ # Bare minimum requirements not met
+ self.logger.warning(
+ 'Failed to send {}to Slack: error={}.'.format(
+ attach.name if attach else '',
+ r.status_code))
- self.logger.debug(
- 'Response Details:\r\n{}'.format(r.content))
- return False
+ self.logger.debug(
+ 'Response Details:\r\n{}'.format(r.content))
+ return False
+ else:
+ response = r.content
# Message Post Response looks like this:
# {
diff --git a/libs/apprise/plugins/NotifyTelegram.py b/libs/apprise/plugins/NotifyTelegram.py
index 11bfe3e78..0b6a2343f 100644
--- a/libs/apprise/plugins/NotifyTelegram.py
+++ b/libs/apprise/plugins/NotifyTelegram.py
@@ -267,15 +267,22 @@ class NotifyTelegram(NotifyBase):
path = None
if isinstance(attach, AttachBase):
+ if not attach:
+ # We could not access the attachment
+ self.logger.error(
+ 'Could not access attachment {}.'.format(
+ attach.url(privacy=True)))
+ return False
+
+ self.logger.debug(
+ 'Posting Telegram attachment {}'.format(
+ attach.url(privacy=True)))
+
# Store our path to our file
path = attach.path
file_name = attach.name
mimetype = attach.mimetype
- if not path:
- # Could not load attachment
- return False
-
# Process our attachment
function_name, key = \
next(((x['function_name'], x['key']) for x in self.mime_lookup
@@ -470,6 +477,9 @@ class NotifyTelegram(NotifyBase):
# Return our detected userid
return _id
+ self.logger.warning(
+ 'Failed to detect a Telegram user; '
+ 'try sending your bot a message first.')
return 0
def send(self, body, title='', notify_type=NotifyType.INFO, attach=None,
@@ -498,8 +508,12 @@ class NotifyTelegram(NotifyBase):
if self.notify_format == NotifyFormat.MARKDOWN:
payload['parse_mode'] = 'MARKDOWN'
- else:
- # Either TEXT or HTML; if TEXT we'll make it HTML
+ payload['text'] = '{}{}'.format(
+ '{}\r\n'.format(title) if title else '',
+ body,
+ )
+
+ elif self.notify_format == NotifyFormat.HTML:
payload['parse_mode'] = 'HTML'
# HTML Spaces (&nbsp;) and tabs (&emsp;) aren't supported
@@ -517,31 +531,23 @@ class NotifyTelegram(NotifyBase):
# Tabs become 3 spaces
title = re.sub('&emsp;?', ' ', title, re.I)
- # HTML
- title = NotifyTelegram.escape_html(title, whitespace=False)
-
- # HTML
- body = NotifyTelegram.escape_html(body, whitespace=False)
-
- if title and self.notify_format == NotifyFormat.TEXT:
- # Text HTML Formatting
- payload['text'] = '<b>%s</b>\r\n%s' % (
- title,
+ payload['text'] = '{}{}'.format(
+ '<b>{}</b>\r\n'.format(title) if title else '',
body,
)
- elif title:
- # Already HTML; trust developer has wrapped
- # the title appropriately
- payload['text'] = '%s\r\n%s' % (
- title,
+ else: # TEXT
+ payload['parse_mode'] = 'HTML'
+
+ # Escape content
+ title = NotifyTelegram.escape_html(title, whitespace=False)
+ body = NotifyTelegram.escape_html(body, whitespace=False)
+
+ payload['text'] = '{}{}'.format(
+ '<b>{}</b>\r\n'.format(title) if title else '',
body,
)
- else:
- # Assign the body
- payload['text'] = body
-
# Create a copy of the chat_ids list
targets = list(self.targets)
while len(targets):
@@ -639,10 +645,10 @@ class NotifyTelegram(NotifyBase):
if attach:
# Send our attachments now (if specified and if it exists)
for attachment in attach:
- sent_attachment = self.send_media(
- payload['chat_id'], notify_type, attach=attachment)
+ if not self.send_media(
+ payload['chat_id'], notify_type,
+ attach=attachment):
- if not sent_attachment:
# We failed; don't continue
has_error = True
break
diff --git a/libs/apprise/plugins/NotifyTwilio.py b/libs/apprise/plugins/NotifyTwilio.py
index ec78e46ea..db0223a8a 100644
--- a/libs/apprise/plugins/NotifyTwilio.py
+++ b/libs/apprise/plugins/NotifyTwilio.py
@@ -23,7 +23,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
-# To use this service you will need a Twillio account to which you can get your
+# To use this service you will need a Twilio account to which you can get your
# AUTH_TOKEN and ACCOUNT SID right from your console/dashboard at:
# https://www.twilio.com/console
#
@@ -67,7 +67,7 @@ class NotifyTwilio(NotifyBase):
# The services URL
service_url = 'https://www.twilio.com/'
- # All pushover requests are secure
+ # All notification requests are secure
secure_protocol = 'twilio'
# Allow 300 requests per minute.
@@ -86,7 +86,7 @@ class NotifyTwilio(NotifyBase):
'{sid}/Messages.json'
# The maximum length of the body
- body_maxlen = 140
+ body_maxlen = 160
# A title can not be used for SMS Messages. Setting this to zero will
# cause any title (if defined) to get placed into the message body.
diff --git a/libs/apprise/plugins/NotifyXMPP/SleekXmppAdapter.py b/libs/apprise/plugins/NotifyXMPP/SleekXmppAdapter.py
new file mode 100644
index 000000000..a28e9ce54
--- /dev/null
+++ b/libs/apprise/plugins/NotifyXMPP/SleekXmppAdapter.py
@@ -0,0 +1,208 @@
+# -*- coding: utf-8 -*-
+
+import ssl
+from os.path import isfile
+import logging
+
+
+# Default our global support flag
+SLEEKXMPP_SUPPORT_AVAILABLE = False
+
+try:
+ # Import sleekxmpp if available
+ import sleekxmpp
+
+ SLEEKXMPP_SUPPORT_AVAILABLE = True
+
+except ImportError:
+ # No problem; we just simply can't support this plugin because we're
+ # either using Linux, or simply do not have sleekxmpp installed.
+ pass
+
+
+class SleekXmppAdapter(object):
+ """
+ Wrapper to sleekxmpp
+
+ """
+
+ # Reference to XMPP client.
+ xmpp = None
+
+ # Whether everything succeeded
+ success = False
+
+ # The default protocol
+ protocol = 'xmpp'
+
+ # The default secure protocol
+ secure_protocol = 'xmpps'
+
+ # The default XMPP port
+ default_unsecure_port = 5222
+
+ # The default XMPP secure port
+ default_secure_port = 5223
+
+ # Taken from https://golang.org/src/crypto/x509/root_linux.go
+ CA_CERTIFICATE_FILE_LOCATIONS = [
+ # Debian/Ubuntu/Gentoo etc.
+ "/etc/ssl/certs/ca-certificates.crt",
+ # Fedora/RHEL 6
+ "/etc/pki/tls/certs/ca-bundle.crt",
+ # OpenSUSE
+ "/etc/ssl/ca-bundle.pem",
+ # OpenELEC
+ "/etc/pki/tls/cacert.pem",
+ # CentOS/RHEL 7
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
+ ]
+
+ # This entry is a bit hacky, but it allows us to unit-test this library
+ # in an environment that simply doesn't have the sleekxmpp package
+ # available to us.
+ #
+ # If anyone is seeing this had knows a better way of testing this
+ # outside of what is defined in test/test_xmpp_plugin.py, please
+ # let me know! :)
+ _enabled = SLEEKXMPP_SUPPORT_AVAILABLE
+
+ def __init__(self, host=None, port=None, secure=False,
+ verify_certificate=True, xep=None, jid=None, password=None,
+ body=None, targets=None, before_message=None, logger=None):
+ """
+ Initialize our SleekXmppAdapter object
+ """
+
+ self.host = host
+ self.port = port
+ self.secure = secure
+ self.verify_certificate = verify_certificate
+
+ self.xep = xep
+ self.jid = jid
+ self.password = password
+
+ self.body = body
+ self.targets = targets
+ self.before_message = before_message
+
+ self.logger = logger or logging.getLogger(__name__)
+
+ # Use the Apprise log handlers for configuring the sleekxmpp logger.
+ apprise_logger = logging.getLogger('apprise')
+ sleek_logger = logging.getLogger('sleekxmpp')
+ for handler in apprise_logger.handlers:
+ sleek_logger.addHandler(handler)
+ sleek_logger.setLevel(apprise_logger.level)
+
+ if not self.load():
+ raise ValueError("Invalid XMPP Configuration")
+
+ def load(self):
+
+ # Prepare our object
+ self.xmpp = sleekxmpp.ClientXMPP(self.jid, self.password)
+
+ # Register our session
+ self.xmpp.add_event_handler("session_start", self.session_start)
+
+ for xep in self.xep:
+ # Load xep entries
+ try:
+ self.xmpp.register_plugin('xep_{0:04d}'.format(xep))
+
+ except sleekxmpp.plugins.base.PluginNotFound:
+ self.logger.warning(
+ 'Could not register plugin {}'.format(
+ 'xep_{0:04d}'.format(xep)))
+ return False
+
+ if self.secure:
+ # Don't even try to use the outdated ssl.PROTOCOL_SSLx
+ self.xmpp.ssl_version = ssl.PROTOCOL_TLSv1
+
+ # If the python version supports it, use highest TLS version
+ # automatically
+ if hasattr(ssl, "PROTOCOL_TLS"):
+ # Use the best version of TLS available to us
+ self.xmpp.ssl_version = ssl.PROTOCOL_TLS
+
+ self.xmpp.ca_certs = None
+ if self.verify_certificate:
+ # Set the ca_certs variable for certificate verification
+ self.xmpp.ca_certs = next(
+ (cert for cert in self.CA_CERTIFICATE_FILE_LOCATIONS
+ if isfile(cert)), None)
+
+ if self.xmpp.ca_certs is None:
+ self.logger.warning(
+ 'XMPP Secure comunication can not be verified; '
+ 'no local CA certificate file')
+ return False
+
+ # We're good
+ return True
+
+ def process(self):
+ """
+ Thread that handles the server/client i/o
+
+ """
+
+ # Establish connection to XMPP server.
+ # To speed up sending messages, don't use the "reattempt" feature,
+ # it will add a nasty delay even before connecting to XMPP server.
+ if not self.xmpp.connect((self.host, self.port),
+ use_ssl=self.secure, reattempt=False):
+
+ default_port = self.default_secure_port \
+ if self.secure else self.default_unsecure_port
+
+ default_schema = self.secure_protocol \
+ if self.secure else self.protocol
+
+ # Log connection issue
+ self.logger.warning(
+ 'Failed to authenticate {jid} with: {schema}://{host}{port}'
+ .format(
+ jid=self.jid,
+ schema=default_schema,
+ host=self.host,
+ port='' if not self.port or self.port == default_port
+ else ':{}'.format(self.port),
+ ))
+ return False
+
+ # Process XMPP communication.
+ self.xmpp.process(block=True)
+
+ return self.success
+
+ def session_start(self, *args, **kwargs):
+ """
+ Session Manager
+ """
+
+ targets = list(self.targets)
+ if not targets:
+ # We always default to notifying ourselves
+ targets.append(self.jid)
+
+ while len(targets) > 0:
+
+ # Get next target (via JID)
+ target = targets.pop(0)
+
+ # Invoke "before_message" event hook.
+ self.before_message()
+
+ # The message we wish to send, and the JID that will receive it.
+ self.xmpp.send_message(mto=target, mbody=self.body, mtype='chat')
+
+ # Using wait=True ensures that the send queue will be
+ # emptied before ending the session.
+ self.xmpp.disconnect(wait=True)
+
+ # Toggle our success flag
+ self.success = True
diff --git a/libs/apprise/plugins/NotifyXMPP.py b/libs/apprise/plugins/NotifyXMPP/__init__.py
index 82623cb45..a1cd0073a 100644
--- a/libs/apprise/plugins/NotifyXMPP.py
+++ b/libs/apprise/plugins/NotifyXMPP/__init__.py
@@ -24,46 +24,17 @@
# THE SOFTWARE.
import re
-import ssl
-from os.path import isfile
-from .NotifyBase import NotifyBase
-from ..URLBase import PrivacyMode
-from ..common import NotifyType
-from ..utils import parse_list
-from ..AppriseLocale import gettext_lazy as _
+from ..NotifyBase import NotifyBase
+from ...URLBase import PrivacyMode
+from ...common import NotifyType
+from ...utils import parse_list
+from ...AppriseLocale import gettext_lazy as _
+from .SleekXmppAdapter import SleekXmppAdapter
# xep string parser
XEP_PARSE_RE = re.compile('^[^1-9]*(?P<xep>[1-9][0-9]{0,3})$')
-# Default our global support flag
-NOTIFY_XMPP_SUPPORT_ENABLED = False
-
-# Taken from https://golang.org/src/crypto/x509/root_linux.go
-CA_CERTIFICATE_FILE_LOCATIONS = [
- # Debian/Ubuntu/Gentoo etc.
- "/etc/ssl/certs/ca-certificates.crt",
- # Fedora/RHEL 6
- "/etc/pki/tls/certs/ca-bundle.crt",
- # OpenSUSE
- "/etc/ssl/ca-bundle.pem",
- # OpenELEC
- "/etc/pki/tls/cacert.pem",
- # CentOS/RHEL 7
- "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
-]
-
-try:
- # Import sleekxmpp if available
- import sleekxmpp
-
- NOTIFY_XMPP_SUPPORT_ENABLED = True
-
-except ImportError:
- # No problem; we just simply can't support this plugin because we're
- # either using Linux, or simply do not have sleekxmpp installed.
- pass
-
class NotifyXMPP(NotifyBase):
"""
@@ -82,6 +53,9 @@ class NotifyXMPP(NotifyBase):
# A URL that takes you to the setup/help of the specific protocol
setup_url = 'https://github.com/caronc/apprise/wiki/Notify_xmpp'
+ # Lower throttle rate for XMPP
+ request_rate_per_sec = 0.5
+
# The default XMPP port
default_unsecure_port = 5222
@@ -98,7 +72,7 @@ class NotifyXMPP(NotifyBase):
# If anyone is seeing this had knows a better way of testing this
# outside of what is defined in test/test_xmpp_plugin.py, please
# let me know! :)
- _enabled = NOTIFY_XMPP_SUPPORT_ENABLED
+ _enabled = SleekXmppAdapter._enabled
# Define object templates
templates = (
@@ -231,10 +205,11 @@ class NotifyXMPP(NotifyBase):
result = XEP_PARSE_RE.match(xep)
if result is not None:
self.xep.append(int(result.group('xep')))
+ self.logger.debug('Loaded XMPP {}'.format(xep))
else:
self.logger.warning(
- "Could not load XMPP xep {}".format(xep))
+ "Could not load XMPP {}".format(xep))
# By default we send ourselves a message
if targets:
@@ -267,34 +242,7 @@ class NotifyXMPP(NotifyBase):
jid = self.host
password = self.password if self.password else self.user
- # Prepare our object
- xmpp = sleekxmpp.ClientXMPP(jid, password)
-
- for xep in self.xep:
- # Load xep entries
- xmpp.register_plugin('xep_{0:04d}'.format(xep))
-
- if self.secure:
- xmpp.ssl_version = ssl.PROTOCOL_TLSv1
- # If the python version supports it, use highest TLS version
- # automatically
- if hasattr(ssl, "PROTOCOL_TLS"):
- # Use the best version of TLS available to us
- xmpp.ssl_version = ssl.PROTOCOL_TLS
-
- xmpp.ca_certs = None
- if self.verify_certificate:
- # Set the ca_certs variable for certificate verification
- xmpp.ca_certs = next(
- (cert for cert in CA_CERTIFICATE_FILE_LOCATIONS
- if isfile(cert)), None)
-
- if xmpp.ca_certs is None:
- self.logger.warning(
- 'XMPP Secure comunication can not be verified; '
- 'no CA certificate found')
-
- # Acquire our port number
+ # Compute port number
if not self.port:
port = self.default_secure_port \
if self.secure else self.default_unsecure_port
@@ -302,48 +250,22 @@ class NotifyXMPP(NotifyBase):
else:
port = self.port
- # Establish our connection
- if not xmpp.connect((self.host, port)):
- return False
-
- xmpp.send_presence()
-
try:
- xmpp.get_roster()
-
- except sleekxmpp.exceptions.IqError as e:
- self.logger.warning('There was an error getting the XMPP roster.')
- self.logger.debug(e.iq['error']['condition'])
- xmpp.disconnect()
+ # Communicate with XMPP.
+ xmpp_adapter = SleekXmppAdapter(
+ host=self.host, port=port, secure=self.secure,
+ verify_certificate=self.verify_certificate, xep=self.xep,
+ jid=jid, password=password, body=body, targets=self.targets,
+ before_message=self.throttle, logger=self.logger)
+
+ except ValueError:
+ # We failed
return False
- except sleekxmpp.exceptions.IqTimeout:
- self.logger.warning('XMPP Server is taking too long to respond.')
- xmpp.disconnect()
- return False
-
- targets = list(self.targets)
- if not targets:
- # We always default to notifying ourselves
- targets.append(jid)
-
- while len(targets) > 0:
-
- # Get next target (via JID)
- target = targets.pop(0)
-
- # Always call throttle before any remote server i/o is made
- self.throttle()
-
- # The message we wish to send, and the JID that
- # will receive it.
- xmpp.send_message(mto=target, mbody=body, mtype='chat')
-
- # Using wait=True ensures that the send queue will be
- # emptied before ending the session.
- xmpp.disconnect(wait=True)
+ # Initialize XMPP machinery and begin processing the XML stream.
+ outcome = xmpp_adapter.process()
- return True
+ return outcome
def url(self, privacy=False, *args, **kwargs):
"""
diff --git a/libs/apprise/plugins/__init__.py b/libs/apprise/plugins/__init__.py
index f8728a9da..fd41cb7fd 100644
--- a/libs/apprise/plugins/__init__.py
+++ b/libs/apprise/plugins/__init__.py
@@ -34,6 +34,7 @@ from os.path import abspath
# Used for testing
from . import NotifyEmail as NotifyEmailBase
from .NotifyGrowl import gntp
+from .NotifyXMPP import SleekXmppAdapter
# NotifyBase object is passed in as a module not class
from . import NotifyBase
@@ -63,6 +64,9 @@ __all__ = [
# gntp (used for NotifyGrowl Testing)
'gntp',
+
+ # sleekxmpp access points (used for NotifyXMPP Testing)
+ 'SleekXmppAdapter',
]
# we mirror our base purely for the ability to reset everything; this
@@ -217,9 +221,16 @@ def _sanitize_token(tokens, default_delimiter):
and 'default' not in tokens[key] \
and 'values' in tokens[key] \
and len(tokens[key]['values']) == 1:
+
# If there is only one choice; then make it the default
- tokens[key]['default'] = \
- tokens[key]['values'][0]
+ # - support dictionaries too
+ tokens[key]['default'] = tokens[key]['values'][0] \
+ if not isinstance(tokens[key]['values'], dict) \
+ else next(iter(tokens[key]['values']))
+
+ if 'values' in tokens[key] and isinstance(tokens[key]['values'], dict):
+ # Convert values into a list if it was defined as a dictionary
+ tokens[key]['values'] = [k for k in tokens[key]['values'].keys()]
if 'regex' in tokens[key]:
# Verify that we are a tuple; convert strings to tuples
diff --git a/libs/knowit/__init__.py b/libs/knowit/__init__.py
new file mode 100644
index 000000000..b753f1ded
--- /dev/null
+++ b/libs/knowit/__init__.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+"""Know your media files better."""
+from __future__ import unicode_literals
+
+__title__ = 'knowit'
+__version__ = '0.3.0-dev'
+__short_version__ = '.'.join(__version__.split('.')[:2])
+__author__ = 'Rato AQ2'
+__license__ = 'MIT'
+__copyright__ = 'Copyright 2016-2017, Rato AQ2'
+__url__ = 'https://github.com/ratoaq2/knowit'
+
+#: Video extensions
+VIDEO_EXTENSIONS = ('.3g2', '.3gp', '.3gp2', '.3gpp', '.60d', '.ajp', '.asf', '.asx', '.avchd', '.avi', '.bik',
+ '.bix', '.box', '.cam', '.dat', '.divx', '.dmf', '.dv', '.dvr-ms', '.evo', '.flc', '.fli',
+ '.flic', '.flv', '.flx', '.gvi', '.gvp', '.h264', '.m1v', '.m2p', '.m2ts', '.m2v', '.m4e',
+ '.m4v', '.mjp', '.mjpeg', '.mjpg', '.mk3d', '.mkv', '.moov', '.mov', '.movhd', '.movie', '.movx',
+ '.mp4', '.mpe', '.mpeg', '.mpg', '.mpv', '.mpv2', '.mxf', '.nsv', '.nut', '.ogg', '.ogm', '.ogv',
+ '.omf', '.ps', '.qt', '.ram', '.rm', '.rmvb', '.swf', '.ts', '.vfw', '.vid', '.video', '.viv',
+ '.vivo', '.vob', '.vro', '.webm', '.wm', '.wmv', '.wmx', '.wrap', '.wvx', '.wx', '.x264', '.xvid')
+
+try:
+ from collections import OrderedDict
+except ImportError: # pragma: no cover
+ from ordereddict import OrderedDict
+
+from .api import KnowitException, know
diff --git a/libs/knowit/__main__.py b/libs/knowit/__main__.py
new file mode 100644
index 000000000..3b55af872
--- /dev/null
+++ b/libs/knowit/__main__.py
@@ -0,0 +1,151 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import json
+import logging
+import sys
+from argparse import ArgumentParser
+
+from six import PY2
+import yaml
+
+from . import (
+ __url__,
+ __version__,
+ api,
+)
+from .provider import ProviderError
+from .serializer import (
+ get_json_encoder,
+ get_yaml_dumper,
+)
+from .utils import recurse_paths
+
+logging.basicConfig(stream=sys.stdout, format='%(message)s')
+logging.getLogger('CONSOLE').setLevel(logging.INFO)
+logging.getLogger('knowit').setLevel(logging.ERROR)
+
+console = logging.getLogger('CONSOLE')
+logger = logging.getLogger('knowit')
+
+
+def build_argument_parser():
+ """Build the argument parser.
+
+ :return: the argument parser
+ :rtype: ArgumentParser
+ """
+ opts = ArgumentParser()
+ opts.add_argument(dest='videopath', help='Path to the video to introspect', nargs='*')
+
+ provider_opts = opts.add_argument_group('Providers')
+ provider_opts.add_argument('-p', '--provider', dest='provider',
+ help='The provider to be used: mediainfo, ffmpeg or enzyme.')
+
+ output_opts = opts.add_argument_group('Output')
+ output_opts.add_argument('--debug', action='store_true', dest='debug',
+ help='Print useful information for debugging knowit and for reporting bugs.')
+ output_opts.add_argument('--report', action='store_true', dest='report',
+ help='Parse media and report all non-detected values')
+ output_opts.add_argument('-y', '--yaml', action='store_true', dest='yaml',
+ help='Display output in yaml format')
+ output_opts.add_argument('-N', '--no-units', action='store_true', dest='no_units',
+ help='Display output without units')
+ output_opts.add_argument('-P', '--profile', dest='profile',
+ help='Display values according to specified profile: code, default, human, technical')
+
+ conf_opts = opts.add_argument_group('Configuration')
+ conf_opts.add_argument('--mediainfo', dest='mediainfo',
+ help='The location to search for MediaInfo binaries')
+ conf_opts.add_argument('--ffmpeg', dest='ffmpeg',
+ help='The location to search for FFmpeg (ffprobe) binaries')
+
+ information_opts = opts.add_argument_group('Information')
+ information_opts.add_argument('--version', dest='version', action='store_true',
+ help='Display knowit version.')
+
+ return opts
+
+
+def knowit(video_path, options, context):
+ """Extract video metadata."""
+ context['path'] = video_path
+ if not options.report:
+ console.info('For: %s', video_path)
+ else:
+ console.info('Parsing: %s', video_path)
+ info = api.know(video_path, context)
+ if not options.report:
+ console.info('Knowit %s found: ', __version__)
+ console.info(dump(info, options, context))
+
+ return info
+
+
+def dump(info, options, context):
+ """Convert info to string using json or yaml format."""
+ if options.yaml:
+ data = {info['path']: info} if 'path' in info else info
+ result = yaml.dump(data, Dumper=get_yaml_dumper(context),
+ default_flow_style=False, allow_unicode=True)
+ if PY2:
+ result = result.decode('utf-8')
+
+ else:
+ result = json.dumps(info, cls=get_json_encoder(context), indent=4, ensure_ascii=False)
+
+ return result
+
+
+def main(args=None):
+ """Execute main function for entry point."""
+ argument_parser = build_argument_parser()
+ args = args or sys.argv[1:]
+ options = argument_parser.parse_args(args)
+
+ if options.debug:
+ logger.setLevel(logging.DEBUG)
+ logging.getLogger('enzyme').setLevel(logging.INFO)
+ else:
+ logger.setLevel(logging.WARNING)
+
+ paths = recurse_paths(options.videopath)
+
+ if paths:
+ report = {}
+ for i, videopath in enumerate(paths):
+ try:
+ context = dict(vars(options))
+ if options.report:
+ context['report'] = report
+ else:
+ del context['report']
+ knowit(videopath, options, context)
+ except ProviderError:
+ logger.exception('Error when processing video')
+ except OSError:
+ logger.exception('OS error when processing video')
+ except UnicodeError:
+ logger.exception('Character encoding error when processing video')
+ except api.KnowitException as e:
+ logger.error(e)
+ if options.report and i % 20 == 19 and report:
+ console.info('Unknown values so far:')
+ console.info(dump(report, options, vars(options)))
+
+ if options.report:
+ if report:
+ console.info('Knowit %s found unknown values:', __version__)
+ console.info(dump(report, options, vars(options)))
+ console.info('Please report them at %s', __url__)
+ else:
+ console.info('Knowit %s knows everything. :-)', __version__)
+
+ elif options.version:
+ console.info(api.debug_info())
+ else:
+ argument_parser.print_help()
+
+
+if __name__ == '__main__':
+ main(sys.argv[1:])
diff --git a/libs/knowit/api.py b/libs/knowit/api.py
new file mode 100644
index 000000000..fd7ab79a1
--- /dev/null
+++ b/libs/knowit/api.py
@@ -0,0 +1,132 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import traceback
+
+from . import OrderedDict, __version__
+from .config import Config
+from .providers import (
+ EnzymeProvider,
+ FFmpegProvider,
+# MediaInfoProvider,
+)
+
+_provider_map = OrderedDict([
+# ('mediainfo', MediaInfoProvider),
+ ('ffmpeg', FFmpegProvider),
+ ('enzyme', EnzymeProvider)
+])
+
+provider_names = _provider_map.keys()
+
+available_providers = OrderedDict([])
+
+
+class KnowitException(Exception):
+ """Exception raised when knowit fails to perform media info extraction because of an internal error."""
+
+
+def initialize(context=None):
+ """Initialize knowit."""
+ if not available_providers:
+ context = context or {}
+ config = Config.build(context.get('config'))
+ for name, provider_cls in _provider_map.items():
+ available_providers[name] = provider_cls(config, context.get(name) or config.general.get(name))
+
+
+def know(video_path, context=None):
+ """Return a dict containing the video metadata.
+
+ :param video_path:
+ :type video_path: string
+ :param context:
+ :type context: dict
+ :return:
+ :rtype: dict
+ """
+ try:
+ # handle path-like objects
+ video_path = video_path.__fspath__()
+ except AttributeError:
+ pass
+
+ try:
+ context = context or {}
+ context.setdefault('profile', 'default')
+ initialize(context)
+
+ for name, provider in available_providers.items():
+ if name != (context.get('provider') or name):
+ continue
+
+ if provider.accepts(video_path):
+ result = provider.describe(video_path, context)
+ if result:
+ return result
+
+ return {}
+ except Exception:
+ raise KnowitException(debug_info(context=context, exc_info=True))
+
+
+def dependencies(context=None):
+ """Return all dependencies detected by knowit."""
+ deps = OrderedDict([])
+ try:
+ initialize(context)
+ for name, provider_cls in _provider_map.items():
+ if name in available_providers:
+ deps[name] = available_providers[name].version
+ else:
+ deps[name] = {}
+ except Exception:
+ pass
+
+ return deps
+
+
+def _centered(value):
+ value = value[-52:]
+ return '| {msg:^53} |'.format(msg=value)
+
+
+def debug_info(context=None, exc_info=False):
+ lines = [
+ '+-------------------------------------------------------+',
+ _centered('KnowIt {0}'.format(__version__)),
+ '+-------------------------------------------------------+'
+ ]
+
+ first = True
+ for key, info in dependencies(context).items():
+ if not first:
+ lines.append(_centered(''))
+ first = False
+
+ for k, v in info.items():
+ lines.append(_centered(k))
+ lines.append(_centered(v))
+
+ if context:
+ debug_data = context.pop('debug_data', None)
+
+ lines.append('+-------------------------------------------------------+')
+ for k, v in context.items():
+ if v:
+ lines.append(_centered('{}: {}'.format(k, v)))
+
+ if debug_data:
+ lines.append('+-------------------------------------------------------+')
+ lines.append(debug_data())
+
+ if exc_info:
+ lines.append('+-------------------------------------------------------+')
+ lines.append(traceback.format_exc())
+
+ lines.append('+-------------------------------------------------------+')
+ lines.append(_centered('Please report any bug or feature request at'))
+ lines.append(_centered('https://github.com/ratoaq2/knowit/issues.'))
+ lines.append('+-------------------------------------------------------+')
+
+ return '\n'.join(lines)
diff --git a/libs/knowit/config.py b/libs/knowit/config.py
new file mode 100644
index 000000000..04e8713e2
--- /dev/null
+++ b/libs/knowit/config.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from collections import namedtuple
+from logging import NullHandler, getLogger
+
+from pkg_resources import resource_stream
+from six import text_type
+import yaml
+
+from .serializer import get_yaml_loader
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+_valid_aliases = ('code', 'default', 'human', 'technical')
+_Value = namedtuple('_Value', _valid_aliases)
+
+
+class Config(object):
+ """Application config class."""
+
+ @classmethod
+ def build(cls, path=None):
+ """Build config instance."""
+ loader = get_yaml_loader()
+ with resource_stream('knowit', 'defaults.yml') as stream:
+ cfgs = [yaml.load(stream, Loader=loader)]
+
+ if path:
+ with open(path, 'r') as stream:
+ cfgs.append(yaml.load(stream, Loader=loader))
+
+ profiles_data = {}
+ for cfg in cfgs:
+ if 'profiles' in cfg:
+ profiles_data.update(cfg['profiles'])
+
+ knowledge_data = {}
+ for cfg in cfgs:
+ if 'knowledge' in cfg:
+ knowledge_data.update(cfg['knowledge'])
+
+ data = {'general': {}}
+ for class_name, data_map in knowledge_data.items():
+ data.setdefault(class_name, {})
+ for code, detection_values in data_map.items():
+ alias_map = (profiles_data.get(class_name) or {}).get(code) or {}
+ alias_map.setdefault('code', code)
+ alias_map.setdefault('default', alias_map['code'])
+ alias_map.setdefault('human', alias_map['default'])
+ alias_map.setdefault('technical', alias_map['human'])
+ value = _Value(**{k: v for k, v in alias_map.items() if k in _valid_aliases})
+ for detection_value in detection_values:
+ data[class_name][text_type(detection_value)] = value
+
+ config = Config()
+ config.__dict__ = data
+ return config
diff --git a/libs/knowit/core.py b/libs/knowit/core.py
new file mode 100644
index 000000000..c567d2ccf
--- /dev/null
+++ b/libs/knowit/core.py
@@ -0,0 +1,36 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from logging import NullHandler, getLogger
+
+from six import text_type
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
+class Reportable(object):
+ """Reportable abstract class."""
+
+ def __init__(self, name, description=None, reportable=True):
+ """Constructor."""
+ self.name = name
+ self._description = description
+ self.reportable = reportable
+
+ @property
+ def description(self):
+ """Rule description."""
+ return self._description or self.name
+
+ def report(self, value, context):
+ """Report unknown value."""
+ if not value or not self.reportable:
+ return
+
+ value = text_type(value)
+ if 'report' in context:
+ report_map = context['report'].setdefault(self.description, {})
+ if value not in report_map:
+ report_map[value] = context['path']
+ logger.info('Invalid %s: %r', self.description, value)
diff --git a/libs/knowit/defaults.yml b/libs/knowit/defaults.yml
new file mode 100644
index 000000000..234f93426
--- /dev/null
+++ b/libs/knowit/defaults.yml
@@ -0,0 +1,628 @@
+knowledge:
+ VideoCodec:
+ # https://en.wikipedia.org/wiki/MPEG-1#Part_2:_Video
+ MPEG1:
+ - MPEG-1V
+ - MPEG1VIDEO
+ # https://en.wikipedia.org/wiki/H.262/MPEG-2_Part_2
+ MPEG2:
+ - MPEG2
+ - MPEG-2V
+ # https://wiki.multimedia.cx/index.php/Microsoft_MPEG-4
+ MSMPEG4V1:
+ - MP41
+ - MPG4
+ MSMPEG4V2:
+ - MP42
+ - MSMPEG4V2
+ MSMPEG4V3:
+ - MP43
+ - AP41
+ - COL1
+ WMV1:
+ - WMV1
+ - WMV7
+ WMV2:
+ - WMV2
+ - WMV8
+ # MPEG-4:
+ # https://wiki.multimedia.cx/index.php/ISO_MPEG-4
+ # https://en.wikipedia.org/wiki/MPEG-4_Part_2
+ MPEG4:
+ - 3IV2
+ - BLZ0
+ - DIGI
+ - DXGM
+ - EM4A
+ - EPHV
+ - FMP4
+ - FVFW
+ - HDX4
+ - M4CC
+ - M4S2
+ - MP4S
+ - MP4V
+ - MVXM
+ - RMP4
+ - SEDG
+ - SMP4
+ - UMP4
+ - WV1F
+ - MPEG-4V
+ - ASP # V_MPEG-4/ISO/ASP
+ - MPEG4
+ DIVX:
+ - DIV1
+ - DIVX
+ - DX50
+ XVID:
+ - XVID
+ - XVIX
+ # VC-1:
+ # https://wiki.multimedia.cx/index.php/VC-1
+ # https://en.wikipedia.org/wiki/VC-1
+ VC1:
+ - WMV3
+ - WMV9
+ - WMVA
+ - WMVC1
+ - WMVP
+ - WVP2
+ - WMVR
+ - VC-1
+ - VC1
+ # H.263:
+ # https://wiki.multimedia.cx/index.php/H.263
+ # https://en.wikipedia.org/wiki/Sorenson_Media#Sorenson_Spark
+ H263:
+ - D263
+ - H263
+ - L263
+ - M263
+ - S263
+ - T263
+ - U263
+ - X263
+ # https://wiki.multimedia.cx/index.php/H.264
+ H264:
+ - AVC
+ - AVC1
+ - DAVC
+ - H264
+ - X264
+ - VSSH
+ # https://wiki.multimedia.cx/index.php/H.265
+ H265:
+ - HEVC
+ - H265
+ - X265
+ # https://wiki.multimedia.cx/index.php/On2_VP6 and https://en.wikipedia.org/wiki/VP6
+ VP6:
+ - VP60
+ - VP61
+ - VP62
+ # https://wiki.multimedia.cx/index.php/On2_VP7
+ VP7:
+ - VP70
+ - VP71
+ - VP72
+ # https://en.wikipedia.org/wiki/VP8
+ VP8:
+ - VP8
+ # https://en.wikipedia.org/wiki/VP9
+ # https://wiki.multimedia.cx/index.php/VP9
+ VP9:
+ - VP9
+ - VP90
+ CJPG:
+ - CJPG
+ QUICKTIME:
+ - QUICKTIME
+ __ignored__:
+ - MJPEG
+ - PNG
+
+ VideoEncoder:
+ DIVX:
+ - DIVX
+ X264:
+ - X264
+ X265:
+ - X265
+ XVID:
+ - XVID
+ VIMEO:
+ - VIMEO ENCODER
+
+ VideoProfile:
+ ADVANCED:
+ - ADVANCED
+ ADVANCEDSIMPLE:
+ - ADVANCED SIMPLE
+ - ADVANCED SIMPLE PROFILE
+ SIMPLE:
+ - SIMPLE
+ BASELINE:
+ - BASELINE
+ - CONSTRAINED BASELINE
+ MAIN:
+ - MAIN
+ MAIN10:
+ - MAIN 10
+ HIGH:
+ - HIGH
+
+ VideoProfileLevel:
+ L1:
+ - L1
+ - L1.0
+ L11:
+ - L1.1
+ L13:
+ - L1.3
+ L2:
+ - L2
+ L21:
+ - L2.1
+ L22:
+ - L2.2
+ L3:
+ - L3
+ - L3.0
+ L31:
+ - L3.1
+ L32:
+ - L3.2
+ L4:
+ - L4
+ - L4.0
+ L41:
+ - L4.1
+ L42:
+ - L4.2
+ L5:
+ - L5
+ - L5.0
+ L51:
+ - L5.1
+ LOW:
+ - LOW
+ MAIN:
+ - MAIN
+ HIGH:
+ - HIGH
+ H14:
+ - HIGH 1440
+
+ VideoProfileTier:
+ MAIN:
+ - MAIN
+ HIGH:
+ - HIGH
+
+ ScanType:
+ PROGRESSIVE:
+ - PROGRESSIVE
+ INTERLACED:
+ - INTERLACED
+ - MBAFF
+ - TT
+ - BB
+ - TB
+ - BT
+
+ BitRateMode:
+ VBR:
+ - VBR
+ CBR:
+ - CBR
+
+ AudioCompression:
+ LOSSY:
+ - LOSSY
+ LOSSLESS:
+ - LOSSLESS
+
+ AudioProfile:
+ CORE:
+ - CORE
+ HRA:
+ - HRA
+ - DTS-HD HRA
+ MA:
+ - MA
+ - DTS-HD MA
+ MAIN:
+ - MAIN
+ LC:
+ - LC
+ HEAAC:
+ - HE-AAC
+ HEAACV2:
+ - HE-AACV2
+ # https://www.lifewire.com/dts-96-24-1846848
+ 96/24:
+ - 96/24
+ - DTS 96/24
+ # https://www.lifewire.com/what-is-dts-es-1846890
+ ESDISCRETE:
+ - ES DISCRETE
+ - DTS-ES
+ ESMATRIX:
+ - ES MATRIX
+ LAYER2:
+ - LAYER 2
+ LAYER3:
+ - LAYER 3
+ PRO:
+ - PRO
+ __ignored__:
+ - DOLBY DIGITAL
+ - DTS
+
+ # References:
+ # - https://ffmpeg.org/general.html#Audio-Codecs
+ AudioCodec:
+ AC3:
+ - AC3
+ - BSID9
+ - BSID10
+ - 2000
+ EAC3:
+ - EAC3
+ - AC3+
+ TRUEHD:
+ - TRUEHD
+ ATMOS:
+ - ATMOS
+ DTS:
+ - DTS
+ # DTS-HD used for DTS-HD High Resolution Audio and DTS-HD Master Audio
+ DTSHD:
+ - DTS-HD
+ AAC:
+ - AAC
+ FLAC:
+ - FLAC
+ PCM:
+ - PCM
+ - PCM_S16LE
+ # https://en.wikipedia.org/wiki/MPEG-1_Audio_Layer_II
+ MP2:
+ - MP2
+ - MPA1L2
+ - MPEG/L2
+ # https://en.wikipedia.org/wiki/MP3
+ MP3:
+ - MP3
+ - MPA1L3
+ - MPA2L3
+ - MPEG/L3
+ - 50
+ - 55
+ VORBIS:
+ - VORBIS
+ OPUS:
+ - OPUS
+ # https://wiki.multimedia.cx/index.php?title=Windows_Media_Audio_9
+ WMA1:
+ - 160
+ WMA2:
+ - 161
+ - WMAV2
+ WMAPRO:
+ - 162
+ - WMAPRO
+ # https://answers.microsoft.com/en-us/windows/forum/windows_vista-pictures/how-to-access-codec-voxware-rt29-metasound-75/a6dbea68-ca5c-e011-8dfc-68b599b31bf5
+ RT29:
+ - 75
+
+ SubtitleFormat:
+ PGS:
+ - PGS
+ - 144
+ - HDMV_PGS_SUBTITLE
+ VOBSUB:
+ - VOBSUB
+ - E0
+ - DVD_SUBTITLE
+ SUBRIP:
+ - SUBRIP
+ - UTF8
+ - SRT
+ # https://en.wikipedia.org/wiki/SubStation_Alpha
+ SSA:
+ - SSA
+ ASS:
+ - ASS
+ # https://en.wikipedia.org/wiki/MPEG-4_Part_17
+ TX3G:
+ - TX3G
+ DVBSUB:
+ - 6
+ MOVTEXT:
+ - MOV_TEXT
+
+profiles:
+ VideoCodec:
+ MPEG1:
+ default: MPEG-1
+ human: MPEG-1 Video
+ technical: MPEG-1 Part 2
+ MPEG2:
+ default: MPEG-2
+ human: MPEG-2 Video
+ technical: MPEG-2 Part 2
+ aka: H.262
+ MSMPEG4V1:
+ default: Microsoft MPEG-4 v1
+ human: Microsoft MPEG-4 version 1
+ technical: MPEG-4 Part 2 Microsoft variant version 1
+ MSMPEG4V2:
+ default: Microsoft MPEG-4 v2
+ human: Microsoft MPEG-4 version 2
+ technical: MPEG-4 Part 2 Microsoft variant version 2
+ MSMPEG4V3:
+ default: Microsoft MPEG-4 v3
+ human: Microsoft MPEG-4 version 3
+ technical: MPEG-4 Part 2 Microsoft variant version 3
+ WMV1:
+ default: WMV 7
+ human: Windows Media Video 7
+ technical: Microsoft Windows Media Video v1/v7
+ WMV2:
+ default: WMV 8
+ human: Windows Media Video 8
+ technical: Microsoft Windows Media Video v2/v8
+ MPEG4:
+ default: MPEG-4
+ human: MPEG-4 Visual
+ technical: MPEG-4 Part 2
+ DIVX:
+ default: DivX
+ human: MPEG-4 Visual (DivX)
+ technical: MPEG-4 Part 2 (DivX)
+ XVID:
+ default: Xvid
+ human: MPEG-4 Visual (Xvid)
+ technical: MPEG-4 Part 2 (Xvid)
+ VC1:
+ default: VC-1
+ human: Windows Media Video 9
+ technical: Microsoft SMPTE 421M
+ H263:
+ default: H.263
+ H264:
+ default: H.264
+ human: Advanced Video Coding (H.264)
+ technical: MPEG-4 Part 10 - Advanced Video Coding
+ aka: AVC
+ H265:
+ default: H.265
+ human: High Efficiency Video Coding (H.265)
+ technical: MPEG-H Part 2 - High Efficiency Video Coding
+ aka: HEVC
+ VP6:
+ human: On2 VP6
+ technical: On2 TrueMotion VP6
+ VP7:
+ human: On2 VP7
+ technical: On2 TrueMotion VP7
+ VP8:
+ technical: Google VP8
+ VP9:
+ technical: Google VP9
+ CJPG:
+ default: WebCam JPEG
+ QUICKTIME:
+ default: QuickTime
+
+ VideoEncoder:
+ DIVX:
+ default: DivX
+ X264:
+ default: x264
+ X265:
+ default: x265
+ XVID:
+ default: Xvid
+ VIMEO:
+ default: Vimeo
+
+ VideoProfile:
+ ADVANCED:
+ default: Advanced
+ ADVANCEDSIMPLE:
+ default: Advanced Simple
+ SIMPLE:
+ default: Simple
+ BASELINE:
+ default: Baseline
+ MAIN:
+ default: Main
+ MAIN10:
+ default: Main 10
+ HIGH:
+ default: High
+
+ VideoProfileLevel:
+ L1:
+ default: '1'
+ technical: Level 1
+ L11:
+ default: '1.1'
+ technical: Level 1.1
+ L13:
+ default: '1.3'
+ technical: Level 1.3
+ L2:
+ default: '2'
+ technical: Level 2
+ L21:
+ default: '2.1'
+ technical: Level 2.1
+ L22:
+ default: '2.2'
+ technical: Level 2.2
+ L3:
+ default: '3'
+ technical: Level 3
+ L31:
+ default: '3.1'
+ technical: Level 3.1
+ L32:
+ default: '3.2'
+ technical: Level 3.2
+ L4:
+ default: '4'
+ technical: Level 4
+ L41:
+ default: '4.1'
+ technical: Level 4.1
+ L42:
+ default: '4.2'
+ technical: Level 4.2
+ L5:
+ default: '5'
+ technical: Level 5
+ L51:
+ default: '5.1'
+ technical: Level 5.1
+ LOW:
+ default: Low
+ MAIN:
+ default: Main
+ HIGH:
+ default: High
+ H14:
+ default: High 1440
+
+ VideoProfileTier:
+ MAIN:
+ default: Main
+ HIGH:
+ default: High
+
+ ScanType:
+ PROGRESSIVE:
+ default: Progressive
+ human: Progressive scanning
+ INTERLACED:
+ default: Interlaced
+ human: Interlaced video
+
+ BitRateMode:
+ VBR:
+ default: Variable
+ human: Variable bitrate
+ CBR:
+ default: Constant
+ human: Constant bitrate
+
+ AudioCompression:
+ LOSSY:
+ default: Lossy
+ human: Lossy compression
+ LOSSLESS:
+ default: Lossless
+ human: Lossless compression
+
+ AudioProfile:
+ HRA:
+ default: High Resolution Audio
+ MA:
+ default: Master Audio
+ MAIN:
+ default: Main
+ technical: Main Profile
+ LC:
+ default: Low Complexity
+ HEAAC:
+ default: High Efficiency
+ HEAACV2:
+ default: High Efficiency v2
+ human: High Efficiency version 2
+ 96/24:
+ default: 96/24
+ human: 96 kHz 24 bits
+ technical: 96 kHz 24 bits Upscaled
+ ESDISCRETE:
+ default: Extended Surround
+ human: Extended Surround Discrete
+ ESMATRIX:
+ default: Extended Surround
+ human: Extended Surround Matrix
+ LAYER2:
+ default: Layer 2
+ LAYER3:
+ default: Layer 3
+ PRO:
+ default: Pro
+ technical: Professional
+
+ AudioCodec:
+ AC3:
+ default: AC-3
+ human: Dolby Digital
+ EAC3:
+ default: E-AC-3
+ human: Dolby Digital Plus
+ technical: Enhanced AC-3
+ TRUEHD:
+ default: TrueHD
+ human: Dolby TrueHD
+ ATMOS:
+ default: Atmos
+ human: Dolby Atmos
+ DTS:
+ DTSHD:
+ default: DTS-HD
+ AAC:
+ human: Advanced Audio Coding
+ FLAC:
+ human: Free Lossless Audio Codec
+ PCM:
+ human: Pulse-code Modulation
+ MP2:
+ human: MPEG Audio Layer 2
+ technical: MPEG-1/MPEG-2 Audio Layer 2
+ MP3:
+ human: MPEG Audio Layer 3
+ technical: MPEG-1/MPEG-2 Audio Layer 3
+ VORBIS:
+ default: Vorbis
+ OPUS:
+ default: Opus
+ WMA1:
+ default: WMA
+ human: Windows Media Audio 1
+ WMA2:
+ default: WMA 2
+ human: Windows Media Audio 2
+ WMAPRO:
+ default: WMA Pro
+ human: Windows Media Audio Pro
+ RT29:
+ default: RT29 MetaSound
+ human: Voxware RT29 MetaSound
+
+ SubtitleFormat:
+ PGS:
+ human: Presentation Graphic Stream
+ VOBSUB:
+ default: VobSub
+ SUBRIP:
+ default: SubRip
+ SSA:
+ human: SubStation Alpha
+ ASS:
+ human: Advanced SubStation Alpha
+ TX3G:
+ human: MPEG-4 Timed Text
+ technical: MPEG-4 Part 17
+ DVBSUB:
+ default: DVBSub
+ human: DVB Subtitle
+ technical: Digital Video Broadcasting Subtitles
+ MOVTEXT:
+ default: MOV Text
diff --git a/libs/knowit/properties/__init__.py b/libs/knowit/properties/__init__.py
new file mode 100644
index 000000000..f871bc47f
--- /dev/null
+++ b/libs/knowit/properties/__init__.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .audio import (
+ AudioChannels,
+ AudioCodec,
+ AudioCompression,
+ AudioProfile,
+ BitRateMode,
+)
+from .basic import Basic
+from .duration import Duration
+from .language import Language
+from .quantity import Quantity
+from .subtitle import (
+ SubtitleFormat,
+)
+from .video import (
+ Ratio,
+ ScanType,
+ VideoCodec,
+ VideoEncoder,
+ VideoProfile,
+ VideoProfileLevel,
+ VideoProfileTier,
+)
+from .yesno import YesNo
diff --git a/libs/knowit/properties/audio/__init__.py b/libs/knowit/properties/audio/__init__.py
new file mode 100644
index 000000000..c7a1198f2
--- /dev/null
+++ b/libs/knowit/properties/audio/__init__.py
@@ -0,0 +1,8 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .bitratemode import BitRateMode
+from .channels import AudioChannels
+from .codec import AudioCodec
+from .compression import AudioCompression
+from .profile import AudioProfile
diff --git a/libs/knowit/properties/audio/bitratemode.py b/libs/knowit/properties/audio/bitratemode.py
new file mode 100644
index 000000000..82fb9e68f
--- /dev/null
+++ b/libs/knowit/properties/audio/bitratemode.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...property import Configurable
+
+
+class BitRateMode(Configurable):
+ """Bit Rate mode property."""
+
+ pass
diff --git a/libs/knowit/properties/audio/channels.py b/libs/knowit/properties/audio/channels.py
new file mode 100644
index 000000000..597a46bc5
--- /dev/null
+++ b/libs/knowit/properties/audio/channels.py
@@ -0,0 +1,26 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from six import text_type
+
+from ...property import Property
+
+
+class AudioChannels(Property):
+ """Audio Channels property."""
+
+ ignored = {
+ 'object based', # Dolby Atmos
+ }
+
+ def handle(self, value, context):
+ """Handle audio channels."""
+ if isinstance(value, int):
+ return value
+
+ v = text_type(value).lower()
+ if v not in self.ignored:
+ try:
+ return int(v)
+ except ValueError:
+ self.report(value, context)
diff --git a/libs/knowit/properties/audio/codec.py b/libs/knowit/properties/audio/codec.py
new file mode 100644
index 000000000..9107de4e7
--- /dev/null
+++ b/libs/knowit/properties/audio/codec.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from six import text_type
+
+from ...property import Configurable
+
+
+class AudioCodec(Configurable):
+ """Audio codec property."""
+
+ @classmethod
+ def _extract_key(cls, value):
+ key = text_type(value).upper()
+ if key.startswith('A_'):
+ key = key[2:]
+
+ # only the first part of the word. E.g.: 'AAC LC' => 'AAC'
+ return key.split(' ')[0]
+
+ @classmethod
+ def _extract_fallback_key(cls, value, key):
+ if '/' in key:
+ return key.split('/')[0]
diff --git a/libs/knowit/properties/audio/compression.py b/libs/knowit/properties/audio/compression.py
new file mode 100644
index 000000000..4842b80e9
--- /dev/null
+++ b/libs/knowit/properties/audio/compression.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...property import Configurable
+
+
+class AudioCompression(Configurable):
+ """Audio Compression property."""
+
+ pass
diff --git a/libs/knowit/properties/audio/profile.py b/libs/knowit/properties/audio/profile.py
new file mode 100644
index 000000000..05a39c98e
--- /dev/null
+++ b/libs/knowit/properties/audio/profile.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...property import Configurable
+
+
+class AudioProfile(Configurable):
+ """Audio profile property."""
+
+ pass
diff --git a/libs/knowit/properties/basic.py b/libs/knowit/properties/basic.py
new file mode 100644
index 000000000..46176cdd4
--- /dev/null
+++ b/libs/knowit/properties/basic.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from six import text_type
+
+from ..property import Property
+
+
+class Basic(Property):
+ """Basic property to handle int, float and other basic types."""
+
+ def __init__(self, name, data_type, allow_fallback=False, **kwargs):
+ """Init method."""
+ super(Basic, self).__init__(name, **kwargs)
+ self.data_type = data_type
+ self.allow_fallback = allow_fallback
+
+ def handle(self, value, context):
+ """Handle value."""
+ if isinstance(value, self.data_type):
+ return value
+
+ try:
+ return self.data_type(text_type(value))
+ except ValueError:
+ if not self.allow_fallback:
+ self.report(value, context)
diff --git a/libs/knowit/properties/duration.py b/libs/knowit/properties/duration.py
new file mode 100644
index 000000000..f902356c2
--- /dev/null
+++ b/libs/knowit/properties/duration.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+from datetime import timedelta
+
+from six import text_type
+
+from ..property import Property
+
+
+class Duration(Property):
+ """Duration property."""
+
+ duration_re = re.compile(r'(?P<hours>\d{1,2}):'
+ r'(?P<minutes>\d{1,2}):'
+ r'(?P<seconds>\d{1,2})(?:\.'
+ r'(?P<millis>\d{3})'
+ r'(?P<micro>\d{3})?\d*)?')
+
+ def handle(self, value, context):
+ """Return duration as timedelta."""
+ if isinstance(value, timedelta):
+ return value
+ elif isinstance(value, int):
+ return timedelta(milliseconds=value)
+ try:
+ return timedelta(milliseconds=int(float(value)))
+ except ValueError:
+ pass
+
+ try:
+ h, m, s, ms, mc = self.duration_re.match(text_type(value)).groups('0')
+ return timedelta(hours=int(h), minutes=int(m), seconds=int(s), milliseconds=int(ms), microseconds=int(mc))
+ except ValueError:
+ pass
+
+ self.report(value, context)
diff --git a/libs/knowit/properties/language.py b/libs/knowit/properties/language.py
new file mode 100644
index 000000000..b203c816c
--- /dev/null
+++ b/libs/knowit/properties/language.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import babelfish
+
+from ..property import Property
+
+
+class Language(Property):
+ """Language property."""
+
+ def handle(self, value, context):
+ """Handle languages."""
+ try:
+ if len(value) == 3:
+ return babelfish.Language.fromalpha3b(value)
+
+ return babelfish.Language.fromietf(value)
+ except (babelfish.Error, ValueError):
+ pass
+
+ try:
+ return babelfish.Language.fromname(value)
+ except babelfish.Error:
+ pass
+
+ self.report(value, context)
+ return babelfish.Language('und')
diff --git a/libs/knowit/properties/quantity.py b/libs/knowit/properties/quantity.py
new file mode 100644
index 000000000..487dc275d
--- /dev/null
+++ b/libs/knowit/properties/quantity.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from six import text_type
+
+from ..property import Property
+
+
+class Quantity(Property):
+ """Quantity is a property with unit."""
+
+ def __init__(self, name, unit, data_type=int, **kwargs):
+ """Init method."""
+ super(Quantity, self).__init__(name, **kwargs)
+ self.unit = unit
+ self.data_type = data_type
+
+ def handle(self, value, context):
+ """Handle value with unit."""
+ if not isinstance(value, self.data_type):
+ try:
+ value = self.data_type(text_type(value))
+ except ValueError:
+ self.report(value, context)
+ return
+
+ return value if context.get('no_units') else value * self.unit
diff --git a/libs/knowit/properties/subtitle/__init__.py b/libs/knowit/properties/subtitle/__init__.py
new file mode 100644
index 000000000..b791152fb
--- /dev/null
+++ b/libs/knowit/properties/subtitle/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .format import SubtitleFormat
diff --git a/libs/knowit/properties/subtitle/format.py b/libs/knowit/properties/subtitle/format.py
new file mode 100644
index 000000000..7d57348ca
--- /dev/null
+++ b/libs/knowit/properties/subtitle/format.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from six import text_type
+
+from ...property import Configurable
+
+
+class SubtitleFormat(Configurable):
+ """Subtitle Format property."""
+
+ @classmethod
+ def _extract_key(cls, value):
+ key = text_type(value) .upper()
+ if key.startswith('S_'):
+ key = key[2:]
+
+ return key.split('/')[-1]
diff --git a/libs/knowit/properties/video/__init__.py b/libs/knowit/properties/video/__init__.py
new file mode 100644
index 000000000..e823b39d6
--- /dev/null
+++ b/libs/knowit/properties/video/__init__.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .codec import VideoCodec
+from .encoder import VideoEncoder
+from .profile import VideoProfile
+from .profile import VideoProfileLevel
+from .profile import VideoProfileTier
+from .ratio import Ratio
+from .scantype import ScanType
diff --git a/libs/knowit/properties/video/codec.py b/libs/knowit/properties/video/codec.py
new file mode 100644
index 000000000..d1a873cd5
--- /dev/null
+++ b/libs/knowit/properties/video/codec.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...property import Configurable
+
+
+class VideoCodec(Configurable):
+ """Video Codec handler."""
+
+ @classmethod
+ def _extract_key(cls, value):
+ key = value.upper().split('/')[-1]
+ if key.startswith('V_'):
+ key = key[2:]
+
+ return key.split(' ')[-1]
diff --git a/libs/knowit/properties/video/encoder.py b/libs/knowit/properties/video/encoder.py
new file mode 100644
index 000000000..b2c925b69
--- /dev/null
+++ b/libs/knowit/properties/video/encoder.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...property import Configurable
+
+
+class VideoEncoder(Configurable):
+ """Video Encoder property."""
+
+ pass
diff --git a/libs/knowit/properties/video/profile.py b/libs/knowit/properties/video/profile.py
new file mode 100644
index 000000000..2459d40d0
--- /dev/null
+++ b/libs/knowit/properties/video/profile.py
@@ -0,0 +1,41 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from six import text_type
+
+from ...property import Configurable
+
+
+class VideoProfile(Configurable):
+ """Video Profile property."""
+
+ @classmethod
+ def _extract_key(cls, value):
+ return value.upper().split('@')[0]
+
+
+class VideoProfileLevel(Configurable):
+ """Video Profile Level property."""
+
+ @classmethod
+ def _extract_key(cls, value):
+ values = text_type(value).upper().split('@')
+ if len(values) > 1:
+ value = values[1]
+ return value
+
+ # There's no level, so don't warn or report it
+ return False
+
+
+class VideoProfileTier(Configurable):
+ """Video Profile Tier property."""
+
+ @classmethod
+ def _extract_key(cls, value):
+ values = value.upper().split('@')
+ if len(values) > 2:
+ return values[2]
+
+ # There's no tier, so don't warn or report it
+ return False
diff --git a/libs/knowit/properties/video/ratio.py b/libs/knowit/properties/video/ratio.py
new file mode 100644
index 000000000..149183bd2
--- /dev/null
+++ b/libs/knowit/properties/video/ratio.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from six import text_type
+
+from ...property import Property
+
+
+class Ratio(Property):
+ """Ratio property."""
+
+ def __init__(self, name, unit=None, **kwargs):
+ """Constructor."""
+ super(Ratio, self).__init__(name, **kwargs)
+ self.unit = unit
+
+ ratio_re = re.compile(r'(?P<width>\d+)[:/](?P<height>\d+)')
+
+ def handle(self, value, context):
+ """Handle ratio."""
+ match = self.ratio_re.match(text_type(value))
+ if match:
+ width, height = match.groups()
+ if (width, height) == ('0', '1'): # identity
+ return 1.
+
+ result = round(float(width) / float(height), 3)
+ if self.unit:
+ result *= self.unit
+
+ return result
+
+ self.report(value, context)
diff --git a/libs/knowit/properties/video/scantype.py b/libs/knowit/properties/video/scantype.py
new file mode 100644
index 000000000..e744ff7ad
--- /dev/null
+++ b/libs/knowit/properties/video/scantype.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...property import Configurable
+
+
+class ScanType(Configurable):
+ """Scan Type property."""
+
+ pass
diff --git a/libs/knowit/properties/yesno.py b/libs/knowit/properties/yesno.py
new file mode 100644
index 000000000..28edce59b
--- /dev/null
+++ b/libs/knowit/properties/yesno.py
@@ -0,0 +1,25 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from six import text_type
+
+from ..property import Property
+
+
+class YesNo(Property):
+ """Yes or No handler."""
+
+ mapping = ('yes', 'true', '1')
+
+ def __init__(self, name, yes=True, no=False, hide_value=None, **kwargs):
+ """Init method."""
+ super(YesNo, self).__init__(name, **kwargs)
+ self.yes = yes
+ self.no = no
+ self.hide_value = hide_value
+
+ def handle(self, value, context):
+ """Handle boolean values."""
+ v = text_type(value).lower()
+ result = self.yes if v in self.mapping else self.no
+ return result if result != self.hide_value else None
diff --git a/libs/knowit/property.py b/libs/knowit/property.py
new file mode 100644
index 000000000..475ea403b
--- /dev/null
+++ b/libs/knowit/property.py
@@ -0,0 +1,137 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from logging import NullHandler, getLogger
+from six import PY3, binary_type, string_types, text_type
+
+from .core import Reportable
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+_visible_chars_table = dict.fromkeys(range(32))
+
+
+def _is_unknown(value):
+ return isinstance(value, text_type) and (not value or value.lower() == 'unknown')
+
+
+class Property(Reportable):
+ """Property class."""
+
+ def __init__(self, name, default=None, private=False, description=None, delimiter=' / ', **kwargs):
+ """Init method."""
+ super(Property, self).__init__(name, description, **kwargs)
+ self.default = default
+ self.private = private
+ # Used to detect duplicated values. e.g.: en / en or [email protected] / [email protected] or Progressive / Progressive
+ self.delimiter = delimiter
+
+ def extract_value(self, track, context):
+ """Extract the property value from a given track."""
+ names = self.name.split('.')
+ value = track.get(names[0], {}).get(names[1]) if len(names) == 2 else track.get(self.name)
+ if value is None:
+ if self.default is None:
+ return
+
+ value = self.default
+
+ if isinstance(value, string_types):
+ if isinstance(value, binary_type):
+ value = text_type(value)
+ else:
+ value = value.translate(_visible_chars_table).strip()
+ if _is_unknown(value):
+ return
+ value = self._deduplicate(value)
+
+ result = self.handle(value, context)
+ if result is not None and not _is_unknown(result):
+ return result
+
+ @classmethod
+ def _deduplicate(cls, value):
+ values = value.split(' / ')
+ if len(values) == 2 and values[0] == values[1]:
+ return values[0]
+ return value
+
+ def handle(self, value, context):
+ """Return the value without any modification."""
+ return value
+
+
+class Configurable(Property):
+ """Configurable property where values are in a config mapping."""
+
+ def __init__(self, config, *args, **kwargs):
+ """Init method."""
+ super(Configurable, self).__init__(*args, **kwargs)
+ self.mapping = getattr(config, self.__class__.__name__)
+
+ @classmethod
+ def _extract_key(cls, value):
+ return text_type(value).upper()
+
+ @classmethod
+ def _extract_fallback_key(cls, value, key):
+ pass
+
+ def _lookup(self, key, context):
+ result = self.mapping.get(key)
+ if result is not None:
+ result = getattr(result, context.get('profile') or 'default')
+ return result if result != '__ignored__' else False
+
+ def handle(self, value, context):
+ """Return Variable or Constant."""
+ key = self._extract_key(value)
+ if key is False:
+ return
+
+ result = self._lookup(key, context)
+ if result is False:
+ return
+
+ while not result and key:
+ key = self._extract_fallback_key(value, key)
+ result = self._lookup(key, context)
+ if result is False:
+ return
+
+ if not result:
+ self.report(value, context)
+
+ return result
+
+
+class MultiValue(Property):
+ """Property with multiple values."""
+
+ def __init__(self, prop=None, delimiter='/', single=False, handler=None, name=None, **kwargs):
+ """Init method."""
+ super(MultiValue, self).__init__(prop.name if prop else name, **kwargs)
+ self.prop = prop
+ self.delimiter = delimiter
+ self.single = single
+ self.handler = handler
+
+ def handle(self, value, context):
+ """Handle properties with multiple values."""
+ values = (self._split(value[0], self.delimiter)
+ if len(value) == 1 else value) if isinstance(value, list) else self._split(value, self.delimiter)
+ call = self.handler or self.prop.handle
+ if len(values) > 1 and not self.single:
+ return [call(item, context) if not _is_unknown(item) else None for item in values]
+
+ return call(values[0], context)
+
+ @classmethod
+ def _split(cls, value, delimiter='/'):
+ if value is None:
+ return
+
+ v = text_type(value)
+ result = map(text_type.strip, v.split(delimiter))
+ return list(result) if PY3 else result
diff --git a/libs/knowit/provider.py b/libs/knowit/provider.py
new file mode 100644
index 000000000..cb58c0180
--- /dev/null
+++ b/libs/knowit/provider.py
@@ -0,0 +1,135 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import os
+from logging import NullHandler, getLogger
+
+from . import OrderedDict
+from .properties import Quantity
+from .units import units
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
+size_property = Quantity('size', units.byte, description='media size')
+
+
+class Provider(object):
+ """Base class for all providers."""
+
+ min_fps = 10
+ max_fps = 200
+
+ def __init__(self, config, mapping, rules=None):
+ """Init method."""
+ self.config = config
+ self.mapping = mapping
+ self.rules = rules or {}
+
+ def accepts(self, target):
+ """Whether or not the video is supported by this provider."""
+ raise NotImplementedError
+
+ def describe(self, target, context):
+ """Read video metadata information."""
+ raise NotImplementedError
+
+ def _describe_tracks(self, video_path, general_track, video_tracks, audio_tracks, subtitle_tracks, context):
+ logger.debug('Handling general track')
+ props = self._describe_track(general_track, 'general', context)
+
+ if 'path' not in props:
+ props['path'] = video_path
+ if 'container' not in props:
+ props['container'] = os.path.splitext(video_path)[1][1:]
+ if 'size' not in props and os.path.isfile(video_path):
+ props['size'] = size_property.handle(os.path.getsize(video_path), context)
+
+ for track_type, tracks, in (('video', video_tracks),
+ ('audio', audio_tracks),
+ ('subtitle', subtitle_tracks)):
+ results = []
+ for track in tracks or []:
+ logger.debug('Handling %s track', track_type)
+ t = self._validate_track(track_type, self._describe_track(track, track_type, context))
+ if t:
+ results.append(t)
+
+ if results:
+ props[track_type] = results
+
+ return props
+
+ @classmethod
+ def _validate_track(cls, track_type, track):
+ if track_type != 'video' or 'frame_rate' not in track:
+ return track
+
+ frame_rate = track['frame_rate']
+ try:
+ frame_rate = frame_rate.magnitude
+ except AttributeError:
+ pass
+
+ if cls.min_fps < frame_rate < cls.max_fps:
+ return track
+
+ def _describe_track(self, track, track_type, context):
+ """Describe track to a dict.
+
+ :param track:
+ :param track_type:
+ :rtype: dict
+ """
+ props = OrderedDict()
+ pv_props = {}
+ for name, prop in self.mapping[track_type].items():
+ if not prop:
+ # placeholder to be populated by rules. It keeps the order
+ props[name] = None
+ continue
+
+ value = prop.extract_value(track, context)
+ if value is not None:
+ if not prop.private:
+ which = props
+ else:
+ which = pv_props
+ which[name] = value
+
+ for name, rule in self.rules.get(track_type, {}).items():
+ if props.get(name) is not None and not rule.override:
+ logger.debug('Skipping rule %s since property is already present: %r', name, props[name])
+ continue
+
+ value = rule.execute(props, pv_props, context)
+ if value is not None:
+ props[name] = value
+ elif name in props and not rule.override:
+ del props[name]
+
+ return props
+
+ @property
+ def version(self):
+ """Return provider version information."""
+ raise NotImplementedError
+
+
+class ProviderError(Exception):
+ """Base class for provider exceptions."""
+
+ pass
+
+
+class MalformedFileError(ProviderError):
+ """Malformed File error."""
+
+ pass
+
+
+class UnsupportedFileFormatError(ProviderError):
+ """Unsupported File Format error."""
+
+ pass
diff --git a/libs/knowit/providers/__init__.py b/libs/knowit/providers/__init__.py
new file mode 100644
index 000000000..0d87e98ed
--- /dev/null
+++ b/libs/knowit/providers/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+"""Provider package."""
+from __future__ import unicode_literals
+
+from .enzyme import EnzymeProvider
+from .ffmpeg import FFmpegProvider
+#from .mediainfo import MediaInfoProvider
diff --git a/libs/knowit/providers/enzyme.py b/libs/knowit/providers/enzyme.py
new file mode 100644
index 000000000..dd9c29417
--- /dev/null
+++ b/libs/knowit/providers/enzyme.py
@@ -0,0 +1,153 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import, unicode_literals
+
+import json
+import logging
+from collections import defaultdict
+from logging import NullHandler, getLogger
+import enzyme
+
+from .. import OrderedDict
+from ..properties import (
+ AudioCodec,
+ Basic,
+ Duration,
+ Language,
+ Quantity,
+ VideoCodec,
+ YesNo,
+)
+from ..property import Property
+from ..provider import (
+ MalformedFileError,
+ Provider,
+)
+from ..rules import (
+ AudioChannelsRule,
+ ClosedCaptionRule,
+ HearingImpairedRule,
+ LanguageRule,
+ ResolutionRule,
+)
+from ..serializer import get_json_encoder
+from ..units import units
+from ..utils import todict
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
+class EnzymeProvider(Provider):
+ """Enzyme Provider."""
+
+ def __init__(self, config, *args, **kwargs):
+ """Init method."""
+ super(EnzymeProvider, self).__init__(config, {
+ 'general': OrderedDict([
+ ('title', Property('title', description='media title')),
+ ('duration', Duration('duration', description='media duration')),
+ ]),
+ 'video': OrderedDict([
+ ('id', Basic('number', int, description='video track number')),
+ ('name', Property('name', description='video track name')),
+ ('language', Language('language', description='video language')),
+ ('width', Quantity('width', units.pixel)),
+ ('height', Quantity('height', units.pixel)),
+ ('scan_type', YesNo('interlaced', yes='Interlaced', no='Progressive', default='Progressive',
+ description='video scan type')),
+ ('resolution', None), # populated with ResolutionRule
+ # ('bit_depth', Property('bit_depth', Integer('video bit depth'))),
+ ('codec', VideoCodec(config, 'codec_id', description='video codec')),
+ ('forced', YesNo('forced', hide_value=False, description='video track forced')),
+ ('default', YesNo('default', hide_value=False, description='video track default')),
+ ('enabled', YesNo('enabled', hide_value=True, description='video track enabled')),
+ ]),
+ 'audio': OrderedDict([
+ ('id', Basic('number', int, description='audio track number')),
+ ('name', Property('name', description='audio track name')),
+ ('language', Language('language', description='audio language')),
+ ('codec', AudioCodec(config, 'codec_id', description='audio codec')),
+ ('channels_count', Basic('channels', int, description='audio channels count')),
+ ('channels', None), # populated with AudioChannelsRule
+ ('forced', YesNo('forced', hide_value=False, description='audio track forced')),
+ ('default', YesNo('default', hide_value=False, description='audio track default')),
+ ('enabled', YesNo('enabled', hide_value=True, description='audio track enabled')),
+ ]),
+ 'subtitle': OrderedDict([
+ ('id', Basic('number', int, description='subtitle track number')),
+ ('name', Property('name', description='subtitle track name')),
+ ('language', Language('language', description='subtitle language')),
+ ('hearing_impaired', None), # populated with HearingImpairedRule
+ ('closed_caption', None), # populated with ClosedCaptionRule
+ ('forced', YesNo('forced', hide_value=False, description='subtitle track forced')),
+ ('default', YesNo('default', hide_value=False, description='subtitle track default')),
+ ('enabled', YesNo('enabled', hide_value=True, description='subtitle track enabled')),
+ ]),
+ }, {
+ 'video': OrderedDict([
+ ('language', LanguageRule('video language')),
+ ('resolution', ResolutionRule('video resolution')),
+ ]),
+ 'audio': OrderedDict([
+ ('language', LanguageRule('audio language')),
+ ('channels', AudioChannelsRule('audio channels')),
+ ]),
+ 'subtitle': OrderedDict([
+ ('language', LanguageRule('subtitle language')),
+ ('hearing_impaired', HearingImpairedRule('subtitle hearing impaired')),
+ ('closed_caption', ClosedCaptionRule('closed caption')),
+ ])
+ })
+
+ def accepts(self, video_path):
+ """Accept only MKV files."""
+ return video_path.lower().endswith('.mkv')
+
+ @classmethod
+ def extract_info(cls, video_path):
+ """Extract info from the video."""
+ with open(video_path, 'rb') as f:
+ return todict(enzyme.MKV(f))
+
+ def describe(self, video_path, context):
+ """Return video metadata."""
+ try:
+ data = defaultdict(dict)
+ ff = self.extract_info(video_path)
+
+ def debug_data():
+ """Debug data."""
+ return json.dumps(ff, cls=get_json_encoder(context), indent=4, ensure_ascii=False)
+ context['debug_data'] = debug_data
+
+ if logger.isEnabledFor(logging.DEBUG):
+ logger.debug('Video %r scanned using enzyme %r has raw data:\n%s',
+ video_path, enzyme.__version__, debug_data)
+
+ data.update(ff)
+ if 'info' in data and data['info'] is None:
+ return {}
+ except enzyme.MalformedMKVError: # pragma: no cover
+ raise MalformedFileError
+
+ if logger.level == logging.DEBUG:
+ logger.debug('Video {video_path} scanned using Enzyme {version} has raw data:\n{data}',
+ video_path=video_path, version=enzyme.__version__, data=json.dumps(data))
+
+ result = self._describe_tracks(video_path, data.get('info', {}), data.get('video_tracks'),
+ data.get('audio_tracks'), data.get('subtitle_tracks'), context)
+
+ if not result:
+ raise MalformedFileError
+
+ result['provider'] = {
+ 'name': 'enzyme',
+ 'version': self.version
+ }
+
+ return result
+
+ @property
+ def version(self):
+ """Return enzyme version information."""
+ return {'enzyme': enzyme.__version__}
diff --git a/libs/knowit/providers/ffmpeg.py b/libs/knowit/providers/ffmpeg.py
new file mode 100644
index 000000000..c849bc43d
--- /dev/null
+++ b/libs/knowit/providers/ffmpeg.py
@@ -0,0 +1,276 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import json
+import logging
+import re
+from logging import NullHandler, getLogger
+from subprocess import check_output
+
+from six import ensure_text
+
+from .. import (
+ OrderedDict,
+ VIDEO_EXTENSIONS,
+)
+from ..properties import (
+ AudioChannels,
+ AudioCodec,
+ AudioProfile,
+ Basic,
+ Duration,
+ Language,
+ Quantity,
+ Ratio,
+ ScanType,
+ SubtitleFormat,
+ VideoCodec,
+ VideoProfile,
+ VideoProfileLevel,
+ YesNo,
+)
+from ..property import (
+ Property,
+)
+from ..provider import (
+ MalformedFileError,
+ Provider,
+)
+from ..rules import (
+ AudioChannelsRule,
+ AudioCodecRule,
+ ClosedCaptionRule,
+ HearingImpairedRule,
+ LanguageRule,
+ ResolutionRule,
+)
+from ..serializer import get_json_encoder
+from ..units import units
+from ..utils import (
+ define_candidate,
+ detect_os,
+)
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
+WARN_MSG = r'''
+=========================================================================================
+FFmpeg (ffprobe) not found on your system or could not be loaded.
+Visit https://ffmpeg.org/download.html to download it.
+If you still have problems, please check if the downloaded version matches your system.
+To load FFmpeg (ffprobe) from a specific location, please define the location as follow:
+ knowit --ffmpeg /usr/local/ffmpeg/bin <video_path>
+ knowit --ffmpeg /usr/local/ffmpeg/bin/ffprobe <video_path>
+ knowit --ffmpeg "C:\Program Files\FFmpeg" <video_path>
+ knowit --ffmpeg C:\Software\ffprobe.exe <video_path>
+=========================================================================================
+'''
+
+
+class FFmpegExecutor(object):
+ """Executor that knows how to execute media info: using ctypes or cli."""
+
+ version_re = re.compile(r'\bversion\s+(?P<version>\d+(?:\.\d+)+)\b')
+ locations = {
+ 'unix': ('/usr/local/ffmpeg/lib', '/usr/local/ffmpeg/bin', '__PATH__'),
+ 'windows': ('__PATH__', ),
+ 'macos': ('__PATH__', ),
+ }
+
+ def __init__(self, location, version):
+ """Constructor."""
+ self.location = location
+ self.version = version
+
+ def extract_info(self, filename):
+ """Extract media info."""
+ json_dump = self._execute(filename)
+ return json.loads(json_dump)
+
+ def _execute(self, filename):
+ raise NotImplementedError
+
+ @classmethod
+ def _get_version(cls, output):
+ match = cls.version_re.search(output)
+ if match:
+ version = tuple([int(v) for v in match.groupdict()['version'].split('.')])
+ return version
+
+ @classmethod
+ def get_executor_instance(cls, suggested_path=None):
+ """Return executor instance."""
+ os_family = detect_os()
+ logger.debug('Detected os: %s', os_family)
+ for exec_cls in (FFmpegCliExecutor, ):
+ executor = exec_cls.create(os_family, suggested_path)
+ if executor:
+ return executor
+
+
+class FFmpegCliExecutor(FFmpegExecutor):
+ """Executor that uses FFmpeg (ffprobe) cli."""
+
+ names = {
+ 'unix': ('ffprobe', ),
+ 'windows': ('ffprobe.exe', ),
+ 'macos': ('ffprobe', ),
+ }
+
+ def _execute(self, filename):
+ return ensure_text(check_output([self.location, '-v', 'quiet', '-print_format', 'json',
+ '-show_format', '-show_streams', '-sexagesimal', filename]))
+
+ @classmethod
+ def create(cls, os_family=None, suggested_path=None):
+ """Create the executor instance."""
+ for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
+ try:
+ output = ensure_text(check_output([candidate, '-version']))
+ version = cls._get_version(output)
+ if version:
+ logger.debug('FFmpeg cli detected: %s v%s', candidate, '.'.join(map(str, version)))
+ return FFmpegCliExecutor(candidate, version)
+ except OSError:
+ pass
+
+
+class FFmpegProvider(Provider):
+ """FFmpeg provider."""
+
+ def __init__(self, config, suggested_path=None):
+ """Init method."""
+ super(FFmpegProvider, self).__init__(config, {
+ 'general': OrderedDict([
+ ('title', Property('tags.title', description='media title')),
+ ('path', Property('filename', description='media path')),
+ ('duration', Duration('duration', description='media duration')),
+ ('size', Quantity('size', units.byte, description='media size')),
+ ('bit_rate', Quantity('bit_rate', units.bps, description='media bit rate')),
+ ]),
+ 'video': OrderedDict([
+ ('id', Basic('index', int, allow_fallback=True, description='video track number')),
+ ('name', Property('tags.title', description='video track name')),
+ ('language', Language('tags.language', description='video language')),
+ ('duration', Duration('duration', description='video duration')),
+ ('width', Quantity('width', units.pixel)),
+ ('height', Quantity('height', units.pixel)),
+ ('scan_type', ScanType(config, 'field_order', default='Progressive', description='video scan type')),
+ ('aspect_ratio', Ratio('display_aspect_ratio', description='display aspect ratio')),
+ ('pixel_aspect_ratio', Ratio('sample_aspect_ratio', description='pixel aspect ratio')),
+ ('resolution', None), # populated with ResolutionRule
+ ('frame_rate', Ratio('r_frame_rate', unit=units.FPS, description='video frame rate')),
+ # frame_rate_mode
+ ('bit_rate', Quantity('bit_rate', units.bps, description='video bit rate')),
+ ('bit_depth', Quantity('bits_per_raw_sample', units.bit, description='video bit depth')),
+ ('codec', VideoCodec(config, 'codec_name', description='video codec')),
+ ('profile', VideoProfile(config, 'profile', description='video codec profile')),
+ ('profile_level', VideoProfileLevel(config, 'level', description='video codec profile level')),
+ # ('profile_tier', VideoProfileTier(config, 'codec_profile', description='video codec profile tier')),
+ ('forced', YesNo('disposition.forced', hide_value=False, description='video track forced')),
+ ('default', YesNo('disposition.default', hide_value=False, description='video track default')),
+ ]),
+ 'audio': OrderedDict([
+ ('id', Basic('index', int, allow_fallback=True, description='audio track number')),
+ ('name', Property('tags.title', description='audio track name')),
+ ('language', Language('tags.language', description='audio language')),
+ ('duration', Duration('duration', description='audio duration')),
+ ('codec', AudioCodec(config, 'codec_name', description='audio codec')),
+ ('_codec', AudioCodec(config, 'profile', description='audio codec', private=True, reportable=False)),
+ ('profile', AudioProfile(config, 'profile', description='audio codec profile')),
+ ('channels_count', AudioChannels('channels', description='audio channels count')),
+ ('channels', None), # populated with AudioChannelsRule
+ ('bit_depth', Quantity('bits_per_raw_sample', units.bit, description='audio bit depth')),
+ ('bit_rate', Quantity('bit_rate', units.bps, description='audio bit rate')),
+ ('sampling_rate', Quantity('sample_rate', units.Hz, description='audio sampling rate')),
+ ('forced', YesNo('disposition.forced', hide_value=False, description='audio track forced')),
+ ('default', YesNo('disposition.default', hide_value=False, description='audio track default')),
+ ]),
+ 'subtitle': OrderedDict([
+ ('id', Basic('index', int, allow_fallback=True, description='subtitle track number')),
+ ('name', Property('tags.title', description='subtitle track name')),
+ ('language', Language('tags.language', description='subtitle language')),
+ ('hearing_impaired', YesNo('disposition.hearing_impaired',
+ hide_value=False, description='subtitle hearing impaired')),
+ ('closed_caption', None), # populated with ClosedCaptionRule
+ ('format', SubtitleFormat(config, 'codec_name', description='subtitle format')),
+ ('forced', YesNo('disposition.forced', hide_value=False, description='subtitle track forced')),
+ ('default', YesNo('disposition.default', hide_value=False, description='subtitle track default')),
+ ]),
+ }, {
+ 'video': OrderedDict([
+ ('language', LanguageRule('video language')),
+ ('resolution', ResolutionRule('video resolution')),
+ ]),
+ 'audio': OrderedDict([
+ ('language', LanguageRule('audio language')),
+ ('channels', AudioChannelsRule('audio channels')),
+ ('codec', AudioCodecRule('audio codec', override=True)),
+ ]),
+ 'subtitle': OrderedDict([
+ ('language', LanguageRule('subtitle language')),
+ ('hearing_impaired', HearingImpairedRule('subtitle hearing impaired')),
+ ('closed_caption', ClosedCaptionRule('closed caption'))
+ ])
+ })
+ self.executor = FFmpegExecutor.get_executor_instance(suggested_path)
+
+ def accepts(self, video_path):
+ """Accept any video when FFprobe is available."""
+ if self.executor is None:
+ logger.warning(WARN_MSG)
+ self.executor = False
+
+ return self.executor and video_path.lower().endswith(VIDEO_EXTENSIONS)
+
+ def describe(self, video_path, context):
+ """Return video metadata."""
+ data = self.executor.extract_info(video_path)
+
+ def debug_data():
+ """Debug data."""
+ return json.dumps(data, cls=get_json_encoder(context), indent=4, ensure_ascii=False)
+
+ context['debug_data'] = debug_data
+
+ if logger.isEnabledFor(logging.DEBUG):
+ logger.debug('Video %r scanned using ffmpeg %r has raw data:\n%s',
+ video_path, self.executor.location, debug_data())
+
+ general_track = data.get('format') or {}
+ if 'tags' in general_track:
+ general_track['tags'] = {k.lower(): v for k, v in general_track['tags'].items()}
+
+ video_tracks = []
+ audio_tracks = []
+ subtitle_tracks = []
+ for track in data.get('streams'):
+ track_type = track.get('codec_type')
+ if track_type == 'video':
+ video_tracks.append(track)
+ elif track_type == 'audio':
+ audio_tracks.append(track)
+ elif track_type == 'subtitle':
+ subtitle_tracks.append(track)
+
+ result = self._describe_tracks(video_path, general_track, video_tracks, audio_tracks, subtitle_tracks, context)
+ if not result:
+ raise MalformedFileError
+
+ result['provider'] = self.executor.location
+ result['provider'] = {
+ 'name': 'ffmpeg',
+ 'version': self.version
+ }
+
+ return result
+
+ @property
+ def version(self):
+ """Return ffmpeg version information."""
+ if not self.executor:
+ return {}
+
+ return {self.executor.location: 'v{}'.format('.'.join(map(str, self.executor.version)))}
diff --git a/libs/knowit/providers/mediainfo.py b/libs/knowit/providers/mediainfo.py
new file mode 100644
index 000000000..519fe862a
--- /dev/null
+++ b/libs/knowit/providers/mediainfo.py
@@ -0,0 +1,335 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+from ctypes import c_void_p, c_wchar_p
+from logging import DEBUG, NullHandler, getLogger
+from subprocess import CalledProcessError, check_output
+from xml.dom import minidom
+from xml.etree import ElementTree
+
+from pymediainfo import MediaInfo
+from pymediainfo import __version__ as pymediainfo_version
+from six import ensure_text
+
+from .. import (
+ OrderedDict,
+ VIDEO_EXTENSIONS,
+)
+from ..properties import (
+ AudioChannels,
+ AudioCodec,
+ AudioCompression,
+ AudioProfile,
+ Basic,
+ BitRateMode,
+ Duration,
+ Language,
+ Quantity,
+ ScanType,
+ SubtitleFormat,
+ VideoCodec,
+ VideoEncoder,
+ VideoProfile,
+ VideoProfileLevel,
+ VideoProfileTier,
+ YesNo,
+)
+from ..property import (
+ MultiValue,
+ Property,
+)
+from ..provider import (
+ MalformedFileError,
+ Provider,
+)
+from ..rules import (
+ AtmosRule,
+ AudioChannelsRule,
+ ClosedCaptionRule,
+ DtsHdRule,
+ HearingImpairedRule,
+ LanguageRule,
+ ResolutionRule,
+)
+from ..units import units
+from ..utils import (
+ define_candidate,
+ detect_os,
+)
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
+WARN_MSG = r'''
+=========================================================================================
+MediaInfo not found on your system or could not be loaded.
+Visit https://mediaarea.net/ to download it.
+If you still have problems, please check if the downloaded version matches your system.
+To load MediaInfo from a specific location, please define the location as follow:
+ knowit --mediainfo /usr/local/mediainfo/lib <video_path>
+ knowit --mediainfo /usr/local/mediainfo/bin <video_path>
+ knowit --mediainfo "C:\Program Files\MediaInfo" <video_path>
+ knowit --mediainfo C:\Software\MediaInfo.dll <video_path>
+ knowit --mediainfo C:\Software\MediaInfo.exe <video_path>
+ knowit --mediainfo /opt/mediainfo/libmediainfo.so <video_path>
+ knowit --mediainfo /opt/mediainfo/libmediainfo.dylib <video_path>
+=========================================================================================
+'''
+
+
+class MediaInfoExecutor(object):
+ """Media info executable knows how to execute media info: using ctypes or cli."""
+
+ version_re = re.compile(r'\bv(?P<version>\d+(?:\.\d+)+)\b')
+
+ locations = {
+ 'unix': ('/usr/local/mediainfo/lib', '/usr/local/mediainfo/bin', '__PATH__'),
+ 'windows': ('__PATH__', ),
+ 'macos': ('__PATH__', ),
+ }
+
+ def __init__(self, location, version):
+ """Constructor."""
+ self.location = location
+ self.version = version
+
+ def extract_info(self, filename):
+ """Extract media info."""
+ return self._execute(filename)
+
+ def _execute(self, filename):
+ raise NotImplementedError
+
+ @classmethod
+ def _get_version(cls, output):
+ match = cls.version_re.search(output)
+ if match:
+ version = tuple([int(v) for v in match.groupdict()['version'].split('.')])
+ return version
+
+ @classmethod
+ def get_executor_instance(cls, suggested_path=None):
+ """Return the executor instance."""
+ os_family = detect_os()
+ logger.debug('Detected os: %s', os_family)
+ for exec_cls in (MediaInfoCTypesExecutor, MediaInfoCliExecutor):
+ executor = exec_cls.create(os_family, suggested_path)
+ if executor:
+ return executor
+
+
+class MediaInfoCliExecutor(MediaInfoExecutor):
+ """Media info using cli."""
+
+ names = {
+ 'unix': ('mediainfo', ),
+ 'windows': ('MediaInfo.exe', ),
+ 'macos': ('mediainfo', ),
+ }
+
+ def _execute(self, filename):
+ output_type = 'OLDXML' if self.version >= (17, 10) else 'XML'
+ return MediaInfo(ensure_text(check_output([self.location, '--Output=' + output_type, '--Full', filename])))
+
+ @classmethod
+ def create(cls, os_family=None, suggested_path=None):
+ """Create the executor instance."""
+ for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
+ try:
+ output = ensure_text(check_output([candidate, '--version']))
+ version = cls._get_version(output)
+ if version:
+ logger.debug('MediaInfo cli detected: %s', candidate)
+ return MediaInfoCliExecutor(candidate, version)
+ except CalledProcessError as e:
+ # old mediainfo returns non-zero exit code for mediainfo --version
+ version = cls._get_version(ensure_text(e.output))
+ if version:
+ logger.debug('MediaInfo cli detected: %s', candidate)
+ return MediaInfoCliExecutor(candidate, version)
+ except OSError:
+ pass
+
+
+class MediaInfoCTypesExecutor(MediaInfoExecutor):
+ """Media info ctypes."""
+
+ names = {
+ 'unix': ('libmediainfo.so.0', ),
+ 'windows': ('MediaInfo.dll', ),
+ 'macos': ('libmediainfo.0.dylib', 'libmediainfo.dylib'),
+ }
+
+ def _execute(self, filename):
+ # Create a MediaInfo handle
+ return MediaInfo.parse(filename, library_file=self.location)
+
+ @classmethod
+ def create(cls, os_family=None, suggested_path=None):
+ """Create the executor instance."""
+ for candidate in define_candidate(cls.locations, cls.names, os_family, suggested_path):
+ if MediaInfo.can_parse(candidate):
+ lib = MediaInfo._get_library(candidate)
+ lib.MediaInfo_Option.argtypes = [c_void_p, c_wchar_p, c_wchar_p]
+ lib.MediaInfo_Option.restype = c_wchar_p
+ version = MediaInfoExecutor._get_version(lib.MediaInfo_Option(None, "Info_Version", ""))
+
+ logger.debug('MediaInfo library detected: %s (v%s)', candidate, '.'.join(map(str, version)))
+ return MediaInfoCTypesExecutor(candidate, version)
+
+
+class MediaInfoProvider(Provider):
+ """Media Info provider."""
+
+ executor = None
+
+ def __init__(self, config, suggested_path):
+ """Init method."""
+ super(MediaInfoProvider, self).__init__(config, {
+ 'general': OrderedDict([
+ ('title', Property('title', description='media title')),
+ ('path', Property('complete_name', description='media path')),
+ ('duration', Duration('duration', description='media duration')),
+ ('size', Quantity('file_size', units.byte, description='media size')),
+ ('bit_rate', Quantity('overall_bit_rate', units.bps, description='media bit rate')),
+ ]),
+ 'video': OrderedDict([
+ ('id', Basic('track_id', int, allow_fallback=True, description='video track number')),
+ ('name', Property('name', description='video track name')),
+ ('language', Language('language', description='video language')),
+ ('duration', Duration('duration', description='video duration')),
+ ('size', Quantity('stream_size', units.byte, description='video stream size')),
+ ('width', Quantity('width', units.pixel)),
+ ('height', Quantity('height', units.pixel)),
+ ('scan_type', ScanType(config, 'scan_type', default='Progressive', description='video scan type')),
+ ('aspect_ratio', Basic('display_aspect_ratio', float, description='display aspect ratio')),
+ ('pixel_aspect_ratio', Basic('pixel_aspect_ratio', float, description='pixel aspect ratio')),
+ ('resolution', None), # populated with ResolutionRule
+ ('frame_rate', Quantity('frame_rate', units.FPS, float, description='video frame rate')),
+ # frame_rate_mode
+ ('bit_rate', Quantity('bit_rate', units.bps, description='video bit rate')),
+ ('bit_depth', Quantity('bit_depth', units.bit, description='video bit depth')),
+ ('codec', VideoCodec(config, 'codec', description='video codec')),
+ ('profile', VideoProfile(config, 'codec_profile', description='video codec profile')),
+ ('profile_level', VideoProfileLevel(config, 'codec_profile', description='video codec profile level')),
+ ('profile_tier', VideoProfileTier(config, 'codec_profile', description='video codec profile tier')),
+ ('encoder', VideoEncoder(config, 'encoded_library_name', description='video encoder')),
+ ('media_type', Property('internet_media_type', description='video media type')),
+ ('forced', YesNo('forced', hide_value=False, description='video track forced')),
+ ('default', YesNo('default', hide_value=False, description='video track default')),
+ ]),
+ 'audio': OrderedDict([
+ ('id', Basic('track_id', int, allow_fallback=True, description='audio track number')),
+ ('name', Property('title', description='audio track name')),
+ ('language', Language('language', description='audio language')),
+ ('duration', Duration('duration', description='audio duration')),
+ ('size', Quantity('stream_size', units.byte, description='audio stream size')),
+ ('codec', MultiValue(AudioCodec(config, 'codec', description='audio codec'))),
+ ('profile', MultiValue(AudioProfile(config, 'format_profile', description='audio codec profile'),
+ delimiter=' / ')),
+ ('channels_count', MultiValue(AudioChannels('channel_s', description='audio channels count'))),
+ ('channel_positions', MultiValue(name='other_channel_positions', handler=(lambda x, *args: x),
+ delimiter=' / ', private=True, description='audio channels position')),
+ ('channels', None), # populated with AudioChannelsRule
+ ('bit_depth', Quantity('bit_depth', units.bit, description='audio bit depth')),
+ ('bit_rate', MultiValue(Quantity('bit_rate', units.bps, description='audio bit rate'))),
+ ('bit_rate_mode', MultiValue(BitRateMode(config, 'bit_rate_mode', description='audio bit rate mode'))),
+ ('sampling_rate', MultiValue(Quantity('sampling_rate', units.Hz, description='audio sampling rate'))),
+ ('compression', MultiValue(AudioCompression(config, 'compression_mode',
+ description='audio compression'))),
+ ('forced', YesNo('forced', hide_value=False, description='audio track forced')),
+ ('default', YesNo('default', hide_value=False, description='audio track default')),
+ ]),
+ 'subtitle': OrderedDict([
+ ('id', Basic('track_id', int, allow_fallback=True, description='subtitle track number')),
+ ('name', Property('title', description='subtitle track name')),
+ ('language', Language('language', description='subtitle language')),
+ ('hearing_impaired', None), # populated with HearingImpairedRule
+ ('_closed_caption', Property('captionservicename', private=True)),
+ ('closed_caption', None), # populated with ClosedCaptionRule
+ ('format', SubtitleFormat(config, 'codec_id', description='subtitle format')),
+ ('forced', YesNo('forced', hide_value=False, description='subtitle track forced')),
+ ('default', YesNo('default', hide_value=False, description='subtitle track default')),
+ ]),
+ }, {
+ 'video': OrderedDict([
+ ('language', LanguageRule('video language')),
+ ('resolution', ResolutionRule('video resolution')),
+ ]),
+ 'audio': OrderedDict([
+ ('language', LanguageRule('audio language')),
+ ('channels', AudioChannelsRule('audio channels')),
+ ('_atmosrule', AtmosRule('atmos rule')),
+ ('_dtshdrule', DtsHdRule('dts-hd rule')),
+ ]),
+ 'subtitle': OrderedDict([
+ ('language', LanguageRule('subtitle language')),
+ ('hearing_impaired', HearingImpairedRule('subtitle hearing impaired')),
+ ('closed_caption', ClosedCaptionRule('closed caption')),
+ ])
+ })
+ self.executor = MediaInfoExecutor.get_executor_instance(suggested_path)
+
+ def accepts(self, video_path):
+ """Accept any video when MediaInfo is available."""
+ if self.executor is None:
+ logger.warning(WARN_MSG)
+ self.executor = False
+
+ return self.executor and video_path.lower().endswith(VIDEO_EXTENSIONS)
+
+ def describe(self, video_path, context):
+ """Return video metadata."""
+ media_info = self.executor.extract_info(video_path)
+
+ def debug_data():
+ """Debug data."""
+ xml = ensure_text(ElementTree.tostring(media_info.xml_dom)).replace('\r', '').replace('\n', '')
+ return ensure_text(minidom.parseString(xml).toprettyxml(indent=' ', newl='\n', encoding='utf-8'))
+
+ context['debug_data'] = debug_data
+
+ if logger.isEnabledFor(DEBUG):
+ logger.debug('Video %r scanned using mediainfo %r has raw data:\n%s',
+ video_path, self.executor.location, debug_data())
+
+ data = media_info.to_data()
+ result = {}
+ if data.get('tracks'):
+ general_tracks = []
+ video_tracks = []
+ audio_tracks = []
+ subtitle_tracks = []
+ for track in data.get('tracks'):
+ track_type = track.get('track_type')
+ if track_type == 'General':
+ general_tracks.append(track)
+ elif track_type == 'Video':
+ video_tracks.append(track)
+ elif track_type == 'Audio':
+ audio_tracks.append(track)
+ elif track_type == 'Text':
+ subtitle_tracks.append(track)
+
+ result = self._describe_tracks(video_path, general_tracks[0] if general_tracks else {},
+ video_tracks, audio_tracks, subtitle_tracks, context)
+ if not result:
+ raise MalformedFileError
+
+ result['provider'] = {
+ 'name': 'mediainfo',
+ 'version': self.version
+ }
+
+ return result
+
+ @property
+ def version(self):
+ """Return mediainfo version information."""
+ versions = [('pymediainfo', pymediainfo_version)]
+ if self.executor:
+ versions.append((self.executor.location, 'v{}'.format('.'.join(map(str, self.executor.version)))))
+
+ return OrderedDict(versions)
diff --git a/libs/knowit/rule.py b/libs/knowit/rule.py
new file mode 100644
index 000000000..6d0764955
--- /dev/null
+++ b/libs/knowit/rule.py
@@ -0,0 +1,17 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .core import Reportable
+
+
+class Rule(Reportable):
+ """Rule abstract class."""
+
+ def __init__(self, name, override=False, **kwargs):
+ """Constructor."""
+ super(Rule, self).__init__(name, **kwargs)
+ self.override = override
+
+ def execute(self, props, pv_props, context):
+ """How to execute a rule."""
+ raise NotImplementedError
diff --git a/libs/knowit/rules/__init__.py b/libs/knowit/rules/__init__.py
new file mode 100644
index 000000000..533706258
--- /dev/null
+++ b/libs/knowit/rules/__init__.py
@@ -0,0 +1,11 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .audio import AtmosRule
+from .audio import AudioChannelsRule
+from .audio import AudioCodecRule
+from .audio import DtsHdRule
+from .language import LanguageRule
+from .subtitle import ClosedCaptionRule
+from .subtitle import HearingImpairedRule
+from .video import ResolutionRule
diff --git a/libs/knowit/rules/audio/__init__.py b/libs/knowit/rules/audio/__init__.py
new file mode 100644
index 000000000..d8a947047
--- /dev/null
+++ b/libs/knowit/rules/audio/__init__.py
@@ -0,0 +1,7 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .atmos import AtmosRule
+from .channels import AudioChannelsRule
+from .codec import AudioCodecRule
+from .dtshd import DtsHdRule
diff --git a/libs/knowit/rules/audio/atmos.py b/libs/knowit/rules/audio/atmos.py
new file mode 100644
index 000000000..3e429d866
--- /dev/null
+++ b/libs/knowit/rules/audio/atmos.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...rule import Rule
+
+
+class AtmosRule(Rule):
+ """Atmos rule."""
+
+ @classmethod
+ def _redefine(cls, props, name, index):
+ actual = props.get(name)
+ if isinstance(actual, list):
+ value = actual[index]
+ if value is None:
+ del props[name]
+ else:
+ props[name] = value
+
+ def execute(self, props, pv_props, context):
+ """Execute the rule against properties."""
+ codecs = props.get('codec') or []
+ # TODO: handle this properly
+ if 'atmos' in {codec.lower() for codec in codecs if codec}:
+ index = None
+ for i, codec in enumerate(codecs):
+ if codec and 'atmos' in codec.lower():
+ index = i
+ break
+
+ if index is not None:
+ for name in ('channels_count', 'sampling_rate'):
+ self._redefine(props, name, index)
diff --git a/libs/knowit/rules/audio/channels.py b/libs/knowit/rules/audio/channels.py
new file mode 100644
index 000000000..50975d5b2
--- /dev/null
+++ b/libs/knowit/rules/audio/channels.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from logging import NullHandler, getLogger
+from six import text_type
+
+from ...rule import Rule
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
+class AudioChannelsRule(Rule):
+ """Audio Channel rule."""
+
+ mapping = {
+ 1: '1.0',
+ 2: '2.0',
+ 6: '5.1',
+ 8: '7.1',
+ }
+
+ def execute(self, props, pv_props, context):
+ """Execute the rule against properties."""
+ count = props.get('channels_count')
+ if count is None:
+ return
+
+ channels = self.mapping.get(count) if isinstance(count, int) else None
+ positions = pv_props.get('channel_positions') or []
+ positions = positions if isinstance(positions, list) else [positions]
+ candidate = 0
+ for position in positions:
+ if not position:
+ continue
+
+ c = 0
+ for i in position.split('/'):
+ try:
+ c += float(i)
+ except ValueError:
+ logger.debug('Invalid %s: %s', self.description, i)
+ pass
+
+ c_count = int(c) + int(round((c - int(c)) * 10))
+ if c_count == count:
+ return text_type(c)
+
+ candidate = max(candidate, c)
+
+ if channels:
+ return channels
+
+ if candidate:
+ return text_type(candidate)
+
+ self.report(positions, context)
diff --git a/libs/knowit/rules/audio/codec.py b/libs/knowit/rules/audio/codec.py
new file mode 100644
index 000000000..5690e220b
--- /dev/null
+++ b/libs/knowit/rules/audio/codec.py
@@ -0,0 +1,13 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...rule import Rule
+
+
+class AudioCodecRule(Rule):
+ """Audio Codec rule."""
+
+ def execute(self, props, pv_props, context):
+ """Execute the rule against properties."""
+ if '_codec' in pv_props:
+ return pv_props.get('_codec')
diff --git a/libs/knowit/rules/audio/dtshd.py b/libs/knowit/rules/audio/dtshd.py
new file mode 100644
index 000000000..d44cdf138
--- /dev/null
+++ b/libs/knowit/rules/audio/dtshd.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...rule import Rule
+
+
+class DtsHdRule(Rule):
+ """DTS-HD rule."""
+
+ @classmethod
+ def _redefine(cls, props, name, index):
+ actual = props.get(name)
+ if isinstance(actual, list):
+ value = actual[index]
+ if value is None:
+ del props[name]
+ else:
+ props[name] = value
+
+ def execute(self, props, pv_props, context):
+ """Execute the rule against properties."""
+ if props.get('codec') == 'DTS-HD':
+ index = None
+ for i, profile in enumerate(props.get('profile', [])):
+ if profile and profile.upper() != 'CORE':
+ index = i
+ break
+
+ if index is not None:
+ for name in ('profile', 'channels_count', 'bit_rate',
+ 'bit_rate_mode', 'sampling_rate', 'compression'):
+ self._redefine(props, name, index)
diff --git a/libs/knowit/rules/language.py b/libs/knowit/rules/language.py
new file mode 100644
index 000000000..8a51ccf05
--- /dev/null
+++ b/libs/knowit/rules/language.py
@@ -0,0 +1,33 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+from logging import NullHandler, getLogger
+
+import babelfish
+
+from ..rule import Rule
+
+logger = getLogger(__name__)
+logger.addHandler(NullHandler())
+
+
+class LanguageRule(Rule):
+ """Language rules."""
+
+ name_re = re.compile(r'(?P<name>\w+)\b', re.IGNORECASE)
+
+ def execute(self, props, pv_props, context):
+ """Language detection using name."""
+ if 'language' in props:
+ return
+
+ if 'name' in props:
+ name = props.get('name', '')
+ match = self.name_re.match(name)
+ if match:
+ try:
+ return babelfish.Language.fromname(match.group('name'))
+ except babelfish.Error:
+ pass
+ logger.info('Invalid %s: %r', self.description, name)
diff --git a/libs/knowit/rules/subtitle/__init__.py b/libs/knowit/rules/subtitle/__init__.py
new file mode 100644
index 000000000..eff71d670
--- /dev/null
+++ b/libs/knowit/rules/subtitle/__init__.py
@@ -0,0 +1,5 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .closedcaption import ClosedCaptionRule
+from .hearingimpaired import HearingImpairedRule
diff --git a/libs/knowit/rules/subtitle/closedcaption.py b/libs/knowit/rules/subtitle/closedcaption.py
new file mode 100644
index 000000000..14be06fdd
--- /dev/null
+++ b/libs/knowit/rules/subtitle/closedcaption.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from ...rule import Rule
+
+
+class ClosedCaptionRule(Rule):
+ """Closed caption rule."""
+
+ cc_re = re.compile(r'(\bcc\d\b)', re.IGNORECASE)
+
+ def execute(self, props, pv_props, context):
+ """Execute closed caption rule."""
+ for name in (pv_props.get('_closed_caption'), props.get('name')):
+ if name and self.cc_re.search(name):
+ return True
diff --git a/libs/knowit/rules/subtitle/hearingimpaired.py b/libs/knowit/rules/subtitle/hearingimpaired.py
new file mode 100644
index 000000000..54c4d5679
--- /dev/null
+++ b/libs/knowit/rules/subtitle/hearingimpaired.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import re
+
+from ...rule import Rule
+
+
+class HearingImpairedRule(Rule):
+ """Hearing Impaired rule."""
+
+ hi_re = re.compile(r'(\bsdh\b)', re.IGNORECASE)
+
+ def execute(self, props, pv_props, context):
+ """Hearing Impaired."""
+ name = props.get('name')
+ if name and self.hi_re.search(name):
+ return True
diff --git a/libs/knowit/rules/video/__init__.py b/libs/knowit/rules/video/__init__.py
new file mode 100644
index 000000000..77c0b406f
--- /dev/null
+++ b/libs/knowit/rules/video/__init__.py
@@ -0,0 +1,4 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from .resolution import ResolutionRule
diff --git a/libs/knowit/rules/video/resolution.py b/libs/knowit/rules/video/resolution.py
new file mode 100644
index 000000000..bcdd594ed
--- /dev/null
+++ b/libs/knowit/rules/video/resolution.py
@@ -0,0 +1,75 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+from ...rule import Rule
+
+
+class ResolutionRule(Rule):
+ """Resolution rule."""
+
+ standard_resolutions = (
+ 480,
+ 720,
+ 1080,
+ 2160,
+ 4320,
+ )
+ uncommon_resolutions = (
+ 240,
+ 288,
+ 360,
+ 576,
+ )
+ resolutions = list(sorted(standard_resolutions + uncommon_resolutions))
+ square = 4. / 3
+ wide = 16. / 9
+
+ def execute(self, props, pv_props, context):
+ """Return the resolution for the video.
+
+ The resolution is based on a widescreen TV (16:9)
+ 1920x800 will be considered 1080p since the TV will use 1920x1080 with vertical black bars
+ 1426x1080 is considered 1080p since the TV will use 1920x1080 with horizontal black bars
+
+ The calculation considers the display aspect ratio and the pixel aspect ratio (not only width and height).
+ The upper resolution is selected if there's no perfect match with the following list of resolutions:
+ 240, 288, 360, 480, 576, 720, 1080, 2160, 4320
+ If no interlaced information is available, resolution will be considered Progressive.
+ """
+ width = props.get('width')
+ height = props.get('height')
+ if not width or not height:
+ return
+
+ try:
+ width = width.magnitude
+ height = height.magnitude
+ except AttributeError:
+ pass
+
+ dar = props.get('aspect_ratio', float(width) / height)
+ par = props.get('pixel_aspect_ratio', 1)
+ scan_type = props.get('scan_type', 'p')[0].lower()
+
+ # selected DAR must be between 4:3 and 16:9
+ selected_dar = max(min(dar, self.wide), self.square)
+
+ # mod-16
+ stretched_width = int(round(width * par / 16)) * 16
+
+ # mod-8
+ calculated_height = int(round(stretched_width / selected_dar / 8)) * 8
+
+ selected_resolution = None
+ for r in reversed(self.resolutions):
+ if r < calculated_height:
+ break
+
+ selected_resolution = r
+
+ if selected_resolution:
+ return '{0}{1}'.format(selected_resolution, scan_type)
+
+ msg = '{width}x{height} - scan_type: {scan_type}, aspect_ratio: {dar}, pixel_aspect_ratio: {par}'.format(
+ width=width, height=height, scan_type=scan_type, dar=dar, par=par)
+ self.report(msg, context)
diff --git a/libs/knowit/serializer.py b/libs/knowit/serializer.py
new file mode 100644
index 000000000..a799df768
--- /dev/null
+++ b/libs/knowit/serializer.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import json
+from collections import OrderedDict
+from datetime import timedelta
+
+import babelfish
+from six import text_type
+import yaml
+
+from .units import units
+
+
+def format_property(context, o):
+ """Convert properties to string."""
+ if isinstance(o, timedelta):
+ return format_duration(o, context['profile'])
+
+ if isinstance(o, babelfish.language.Language):
+ return format_language(o, context['profile'])
+
+ if hasattr(o, 'units'):
+ return format_quantity(o, context['profile'])
+
+ return text_type(o)
+
+
+def get_json_encoder(context):
+ """Return json encoder that handles all needed object types."""
+ class StringEncoder(json.JSONEncoder):
+ """String json encoder."""
+
+ def default(self, o):
+ return format_property(context, o)
+
+ return StringEncoder
+
+
+def get_yaml_dumper(context):
+ """Return yaml dumper that handles all needed object types."""
+ class CustomDumper(yaml.SafeDumper):
+ """Custom YAML Dumper."""
+
+ def default_representer(self, data):
+ """Convert data to string."""
+ if isinstance(data, int):
+ return self.represent_int(data)
+ if isinstance(data, float):
+ return self.represent_float(data)
+ return self.represent_str(str(data))
+
+ def ordered_dict_representer(self, data):
+ """Representer for OrderedDict."""
+ return self.represent_mapping('tag:yaml.org,2002:map', data.items())
+
+ def default_language_representer(self, data):
+ """Convert language to string."""
+ return self.represent_str(format_language(data, context['profile']))
+
+ def default_quantity_representer(self, data):
+ """Convert quantity to string."""
+ return self.default_representer(format_quantity(data, context['profile']))
+
+ def default_duration_representer(self, data):
+ """Convert quantity to string."""
+ return self.default_representer(format_duration(data, context['profile']))
+
+ CustomDumper.add_representer(OrderedDict, CustomDumper.ordered_dict_representer)
+ CustomDumper.add_representer(babelfish.Language, CustomDumper.default_language_representer)
+ CustomDumper.add_representer(timedelta, CustomDumper.default_duration_representer)
+ CustomDumper.add_representer(units.Quantity, CustomDumper.default_quantity_representer)
+
+ return CustomDumper
+
+
+def get_yaml_loader(constructors=None):
+ """Return a yaml loader that handles sequences as python lists."""
+ constructors = constructors or {}
+
+ class CustomLoader(yaml.Loader):
+ """Custom YAML Loader."""
+
+ pass
+
+ CustomLoader.add_constructor('tag:yaml.org,2002:seq', CustomLoader.construct_python_tuple)
+ for tag, constructor in constructors.items():
+ CustomLoader.add_constructor(tag, constructor)
+
+ return CustomLoader
+
+
+def format_duration(duration, profile='default'):
+ if profile == 'technical':
+ return str(duration)
+
+ seconds = duration.total_seconds()
+ if profile == 'code':
+ return duration.total_seconds()
+
+ hours = int(seconds // 3600)
+ seconds = seconds - (hours * 3600)
+ minutes = int(seconds // 60)
+ seconds = int(seconds - (minutes * 60))
+ if profile == 'human':
+ if hours > 0:
+ return '{0} hours {1:02d} minutes {2:02d} seconds'.format(hours, minutes, seconds)
+ if minutes > 0:
+ return '{0} minutes {1:02d} seconds'.format(minutes, seconds)
+
+ return '{0} seconds'.format(seconds)
+
+ return '{0}:{1:02d}:{2:02d}'.format(hours, minutes, seconds)
+
+
+def format_language(language, profile='default'):
+ if profile in ('default', 'human'):
+ return str(language.name)
+
+ return str(language)
+
+
+def format_quantity(quantity, profile='default'):
+ """Human friendly format."""
+ if profile == 'code':
+ return quantity.magnitude
+
+ unit = quantity.units
+ if unit != 'bit':
+ technical = profile == 'technical'
+ if unit == 'hertz':
+ return _format_quantity(quantity.magnitude, unit='Hz', binary=technical, precision=3 if technical else 1)
+
+ root_unit = quantity.to_root_units().units
+ if root_unit == 'bit':
+ return _format_quantity(quantity.magnitude, binary=technical, precision=3 if technical else 2)
+ if root_unit == 'bit / second':
+ return _format_quantity(quantity.magnitude, unit='bps', binary=technical, precision=3 if technical else 1)
+
+ return str(quantity)
+
+
+def _format_quantity(num, unit='B', binary=False, precision=2):
+ fmt_pattern = '{value:3.%sf} {prefix}{affix}{unit}' % precision
+ factor = 1024. if binary else 1000.
+ binary_affix = 'i' if binary else ''
+ for prefix in ('', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'):
+ if abs(num) < factor:
+ return fmt_pattern.format(value=num, prefix=prefix, affix=binary_affix, unit=unit)
+ num /= factor
+
+ return fmt_pattern.format(value=num, prefix='Y', affix=binary_affix, unit=unit)
+
+
+YAMLLoader = get_yaml_loader()
diff --git a/libs/knowit/units.py b/libs/knowit/units.py
new file mode 100644
index 000000000..2397a60bc
--- /dev/null
+++ b/libs/knowit/units.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+
+def _build_unit_registry():
+ try:
+ from pint import UnitRegistry
+
+ registry = UnitRegistry()
+ registry.define('FPS = 1 * hertz')
+ except ImportError:
+ class NoUnitRegistry:
+
+ def __init__(self):
+ pass
+
+ def __getattr__(self, item):
+ return 1
+
+ registry = NoUnitRegistry()
+
+ return registry
+
+
+units = _build_unit_registry()
diff --git a/libs/knowit/utils.py b/libs/knowit/utils.py
new file mode 100644
index 000000000..c65d54943
--- /dev/null
+++ b/libs/knowit/utils.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+from __future__ import unicode_literals
+
+import os
+import sys
+from collections import OrderedDict
+
+from six import PY2, string_types, text_type
+
+from . import VIDEO_EXTENSIONS
+
+
+def recurse_paths(paths):
+ """Return a file system encoded list of videofiles.
+
+ :param paths:
+ :type paths: string or list
+ :return:
+ :rtype: list
+ """
+ enc_paths = []
+
+ if isinstance(paths, (string_types, text_type)):
+ paths = [p.strip() for p in paths.split(',')] if ',' in paths else paths.split()
+
+ encoding = sys.getfilesystemencoding()
+ for path in paths:
+ if os.path.isfile(path):
+ enc_paths.append(path.decode(encoding) if PY2 else path)
+ if os.path.isdir(path):
+ for root, directories, filenames in os.walk(path):
+ for filename in filenames:
+ if os.path.splitext(filename)[1] in VIDEO_EXTENSIONS:
+ if PY2 and os.name == 'nt':
+ fullpath = os.path.join(root, filename.decode(encoding))
+ else:
+ fullpath = os.path.join(root, filename).decode(encoding)
+ enc_paths.append(fullpath)
+
+ # Lets remove any dupes since mediainfo is rather slow.
+ seen = set()
+ seen_add = seen.add
+ return [f for f in enc_paths if not (f in seen or seen_add(f))]
+
+
+def todict(obj, classkey=None):
+ """Transform an object to dict."""
+ if isinstance(obj, string_types):
+ return obj
+ elif isinstance(obj, dict):
+ data = {}
+ for (k, v) in obj.items():
+ data[k] = todict(v, classkey)
+ return data
+ elif hasattr(obj, '_ast'):
+ return todict(obj._ast())
+ elif hasattr(obj, '__iter__'):
+ return [todict(v, classkey) for v in obj]
+ elif hasattr(obj, '__dict__'):
+ values = [(key, todict(value, classkey))
+ for key, value in obj.__dict__.items() if not callable(value) and not key.startswith('_')]
+ data = OrderedDict([(k, v) for k, v in values if v is not None])
+ if classkey is not None and hasattr(obj, '__class__'):
+ data[classkey] = obj.__class__.__name__
+ return data
+ return obj
+
+
+def detect_os():
+ """Detect os family: windows, macos or unix."""
+ if os.name in ('nt', 'dos', 'os2', 'ce'):
+ return 'windows'
+ if sys.platform == 'darwin':
+ return 'macos'
+
+ return 'unix'
+
+
+def define_candidate(locations, names, os_family=None, suggested_path=None):
+ """Generate candidate list for the given parameters."""
+ os_family = os_family or detect_os()
+ for location in (suggested_path, ) + locations[os_family]:
+ if not location:
+ continue
+
+ if location == '__PATH__':
+ for name in names[os_family]:
+ yield name
+ elif os.path.isfile(location):
+ yield location
+ elif os.path.isdir(location):
+ for name in names[os_family]:
+ cmd = os.path.join(location, name)
+ if os.path.isfile(cmd):
+ yield cmd
diff --git a/libs/pymediainfo/AUTHORS b/libs/pymediainfo/AUTHORS
deleted file mode 100644
index d3b460d4d..000000000
--- a/libs/pymediainfo/AUTHORS
+++ /dev/null
@@ -1,3 +0,0 @@
-Patrick Altman <[email protected]> (author)
-cjlucas https://github.com/cjlucas
-Louis Sautier <[email protected]> (maintainer since 2016)
diff --git a/libs/pymediainfo/LICENSE b/libs/pymediainfo/LICENSE
deleted file mode 100644
index 1b517762e..000000000
--- a/libs/pymediainfo/LICENSE
+++ /dev/null
@@ -1,24 +0,0 @@
-The MIT License
-
-Copyright (c) 2010-2014, Patrick Altman <[email protected]>
-Copyright (c) 2016, Louis Sautier <[email protected]>
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
-
-http://www.opensource.org/licenses/mit-license.php
diff --git a/libs/pymediainfo/README.rst b/libs/pymediainfo/README.rst
deleted file mode 100644
index bced11fba..000000000
--- a/libs/pymediainfo/README.rst
+++ /dev/null
@@ -1,27 +0,0 @@
-pymediainfo
------------
-
-.. image:: https://img.shields.io/pypi/v/pymediainfo.svg
- :target: https://pypi.org/project/pymediainfo
-
-.. image:: https://img.shields.io/pypi/pyversions/pymediainfo.svg
- :target: https://pypi.org/project/pymediainfo
-
-.. image:: https://repology.org/badge/tiny-repos/python:pymediainfo.svg
- :target: https://repology.org/metapackage/python:pymediainfo
-
-.. image:: https://img.shields.io/pypi/implementation/pymediainfo.svg
- :target: https://pypi.org/project/pymediainfo
-
-.. image:: https://api.travis-ci.org/sbraz/pymediainfo.svg?branch=master
- :target: https://travis-ci.org/sbraz/pymediainfo
-
-.. image:: https://ci.appveyor.com/api/projects/status/g15a2daem1oub57n/branch/master?svg=true
- :target: https://ci.appveyor.com/project/sbraz/pymediainfo
-
-
-This small package is a wrapper around the MediaInfo library.
-
-It works on Linux, Mac OS X and Windows and is tested with Python 2.7, 3.4, 3.5, 3.6, 3.7, PyPy and PyPy3.
-
-See https://pymediainfo.readthedocs.io/ for more information.
diff --git a/libs/pymediainfo/__init__.py b/libs/pymediainfo/__init__.py
deleted file mode 100644
index c3b9875ed..000000000
--- a/libs/pymediainfo/__init__.py
+++ /dev/null
@@ -1,320 +0,0 @@
-# vim: set fileencoding=utf-8 :
-import os
-import re
-import locale
-import json
-import ctypes
-import sys
-from pkg_resources import get_distribution, DistributionNotFound
-import xml.etree.ElementTree as ET
-
-try:
- import pathlib
-except ImportError:
- pathlib = None
-
-if sys.version_info < (3,):
- import urlparse
-else:
- import urllib.parse as urlparse
-
-try:
- __version__ = get_distribution("pymediainfo").version
-except DistributionNotFound:
- pass
-
-class Track(object):
- """
- An object associated with a media file track.
-
- Each :class:`Track` attribute corresponds to attributes parsed from MediaInfo's output.
- All attributes are lower case. Attributes that are present several times such as Duration
- yield a second attribute starting with `other_` which is a list of all alternative attribute values.
-
- When a non-existing attribute is accessed, `None` is returned.
-
- Example:
-
- >>> t = mi.tracks[0]
- >>> t
- <Track track_id='None', track_type='General'>
- >>> t.duration
- 3000
- >>> t.to_data()["other_duration"]
- ['3 s 0 ms', '3 s 0 ms', '3 s 0 ms',
- '00:00:03.000', '00:00:03.000']
- >>> type(t.non_existing)
- NoneType
-
- All available attributes can be obtained by calling :func:`to_data`.
- """
- def __eq__(self, other):
- return self.__dict__ == other.__dict__
- def __getattribute__(self, name):
- try:
- return object.__getattribute__(self, name)
- except:
- pass
- return None
- def __getstate__(self):
- return self.__dict__
- def __setstate__(self, state):
- self.__dict__ = state
- def __init__(self, xml_dom_fragment):
- self.track_type = xml_dom_fragment.attrib['type']
- for el in xml_dom_fragment:
- node_name = el.tag.lower().strip().strip('_')
- if node_name == 'id':
- node_name = 'track_id'
- node_value = el.text
- other_node_name = "other_%s" % node_name
- if getattr(self, node_name) is None:
- setattr(self, node_name, node_value)
- else:
- if getattr(self, other_node_name) is None:
- setattr(self, other_node_name, [node_value, ])
- else:
- getattr(self, other_node_name).append(node_value)
-
- for o in [d for d in self.__dict__.keys() if d.startswith('other_')]:
- try:
- primary = o.replace('other_', '')
- setattr(self, primary, int(getattr(self, primary)))
- except:
- for v in getattr(self, o):
- try:
- current = getattr(self, primary)
- setattr(self, primary, int(v))
- getattr(self, o).append(current)
- break
- except:
- pass
- def __repr__(self):
- return("<Track track_id='{}', track_type='{}'>".format(self.track_id, self.track_type))
- def to_data(self):
- """
- Returns a dict representation of the track attributes.
-
- Example:
-
- >>> sorted(track.to_data().keys())[:3]
- ['codec', 'codec_extensions_usually_used', 'codec_url']
- >>> t.to_data()["file_size"]
- 5988
-
-
- :rtype: dict
- """
- data = {}
- for k, v in self.__dict__.items():
- if k != 'xml_dom_fragment':
- data[k] = v
- return data
-
-
-class MediaInfo(object):
- """
- An object containing information about a media file.
-
-
- :class:`MediaInfo` objects can be created by directly calling code from
- libmediainfo (in this case, the library must be present on the system):
-
- >>> pymediainfo.MediaInfo.parse("/path/to/file.mp4")
-
- Alternatively, objects may be created from MediaInfo's XML output.
- Such output can be obtained using the ``XML`` output format on versions older than v17.10
- and the ``OLDXML`` format on newer versions.
-
- Using such an XML file, we can create a :class:`MediaInfo` object:
-
- >>> with open("output.xml") as f:
- ... mi = pymediainfo.MediaInfo(f.read())
-
- :param str xml: XML output obtained from MediaInfo.
- :param str encoding_errors: option to pass to :func:`str.encode`'s `errors`
- parameter before parsing `xml`.
- :raises xml.etree.ElementTree.ParseError: if passed invalid XML.
- :var tracks: A list of :py:class:`Track` objects which the media file contains.
- For instance:
-
- >>> mi = pymediainfo.MediaInfo.parse("/path/to/file.mp4")
- >>> for t in mi.tracks:
- ... print(t)
- <Track track_id='None', track_type='General'>
- <Track track_id='1', track_type='Text'>
- """
- def __eq__(self, other):
- return self.tracks == other.tracks
- def __init__(self, xml, encoding_errors="strict"):
- xml_dom = ET.fromstring(xml.encode("utf-8", encoding_errors))
- self.tracks = []
- # This is the case for libmediainfo < 18.03
- # https://github.com/sbraz/pymediainfo/issues/57
- # https://github.com/MediaArea/MediaInfoLib/commit/575a9a32e6960ea34adb3bc982c64edfa06e95eb
- if xml_dom.tag == "File":
- xpath = "track"
- else:
- xpath = "File/track"
- for xml_track in xml_dom.iterfind(xpath):
- self.tracks.append(Track(xml_track))
- @staticmethod
- def _get_library(library_file=None):
- os_is_nt = os.name in ("nt", "dos", "os2", "ce")
- if os_is_nt:
- lib_type = ctypes.WinDLL
- else:
- lib_type = ctypes.CDLL
- if library_file is None:
- if os_is_nt:
- library_names = ("MediaInfo.dll",)
- elif sys.platform == "darwin":
- library_names = ("libmediainfo.0.dylib", "libmediainfo.dylib")
- else:
- library_names = ("libmediainfo.so.0",)
- script_dir = os.path.dirname(__file__)
- # Look for the library file in the script folder
- for library in library_names:
- lib_path = os.path.join(script_dir, library)
- if os.path.isfile(lib_path):
- # If we find it, don't try any other filename
- library_names = (lib_path,)
- break
- else:
- library_names = (library_file,)
- for i, library in enumerate(library_names, start=1):
- try:
- lib = lib_type(library)
- # Define arguments and return types
- lib.MediaInfo_Inform.restype = ctypes.c_wchar_p
- lib.MediaInfo_New.argtypes = []
- lib.MediaInfo_New.restype = ctypes.c_void_p
- lib.MediaInfo_Option.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p, ctypes.c_wchar_p]
- lib.MediaInfo_Option.restype = ctypes.c_wchar_p
- lib.MediaInfo_Inform.argtypes = [ctypes.c_void_p, ctypes.c_size_t]
- lib.MediaInfo_Inform.restype = ctypes.c_wchar_p
- lib.MediaInfo_Open.argtypes = [ctypes.c_void_p, ctypes.c_wchar_p]
- lib.MediaInfo_Open.restype = ctypes.c_size_t
- lib.MediaInfo_Delete.argtypes = [ctypes.c_void_p]
- lib.MediaInfo_Delete.restype = None
- lib.MediaInfo_Close.argtypes = [ctypes.c_void_p]
- lib.MediaInfo_Close.restype = None
- return lib
- except OSError:
- # If we've tried all possible filenames
- if i == len(library_names):
- raise
- @classmethod
- def can_parse(cls, library_file=None):
- """
- Checks whether media files can be analyzed using libmediainfo.
-
- :rtype: bool
- """
- try:
- cls._get_library(library_file)
- return True
- except:
- return False
- @classmethod
- def parse(cls, filename, library_file=None, cover_data=False,
- encoding_errors="strict", parse_speed=0.5, text=False,
- full=True, legacy_stream_display=False):
- """
- Analyze a media file using libmediainfo.
- If libmediainfo is located in a non-standard location, the `library_file` parameter can be used:
-
- >>> pymediainfo.MediaInfo.parse("tests/data/sample.mkv",
- ... library_file="/path/to/libmediainfo.dylib")
-
- :param filename: path to the media file which will be analyzed.
- A URL can also be used if libmediainfo was compiled
- with CURL support.
- :param str library_file: path to the libmediainfo library, this should only be used if the library cannot be auto-detected.
- :param bool cover_data: whether to retrieve cover data as base64.
- :param str encoding_errors: option to pass to :func:`str.encode`'s `errors`
- parameter before parsing MediaInfo's XML output.
- :param float parse_speed: passed to the library as `ParseSpeed`,
- this option takes values between 0 and 1.
- A higher value will yield more precise results in some cases
- but will also increase parsing time.
- :param bool text: if ``True``, MediaInfo's text output will be returned instead
- of a :class:`MediaInfo` object.
- :param bool full: display additional tags, including computer-readable values
- for sizes and durations.
- :param bool legacy_stream_display: display additional information about streams.
- :type filename: str or pathlib.Path
- :rtype: str if `text` is ``True``.
- :rtype: :class:`MediaInfo` otherwise.
- :raises FileNotFoundError: if passed a non-existent file
- (Python ≥ 3.3), does not work on Windows.
- :raises IOError: if passed a non-existent file (Python < 3.3),
- does not work on Windows.
- :raises RuntimeError: if parsing fails, this should not
- happen unless libmediainfo itself fails.
- """
- lib = cls._get_library(library_file)
- if pathlib is not None and isinstance(filename, pathlib.PurePath):
- filename = str(filename)
- url = False
- else:
- url = urlparse.urlparse(filename)
- # Try to open the file (if it's not a URL)
- # Doesn't work on Windows because paths are URLs
- if not (url and url.scheme):
- # Test whether the file is readable
- with open(filename, "rb"):
- pass
- # Obtain the library version
- lib_version = lib.MediaInfo_Option(None, "Info_Version", "")
- lib_version = tuple(int(_) for _ in re.search("^MediaInfoLib - v(\\S+)", lib_version).group(1).split("."))
- # The XML option was renamed starting with version 17.10
- if lib_version >= (17, 10):
- xml_option = "OLDXML"
- else:
- xml_option = "XML"
- # Cover_Data is not extracted by default since version 18.03
- # See https://github.com/MediaArea/MediaInfoLib/commit/d8fd88a1c282d1c09388c55ee0b46029e7330690
- if cover_data and lib_version >= (18, 3):
- lib.MediaInfo_Option(None, "Cover_Data", "base64")
- # Create a MediaInfo handle
- handle = lib.MediaInfo_New()
- lib.MediaInfo_Option(handle, "CharSet", "UTF-8")
- # Fix for https://github.com/sbraz/pymediainfo/issues/22
- # Python 2 does not change LC_CTYPE
- # at startup: https://bugs.python.org/issue6203
- if (sys.version_info < (3,) and os.name == "posix"
- and locale.getlocale() == (None, None)):
- locale.setlocale(locale.LC_CTYPE, locale.getdefaultlocale())
- lib.MediaInfo_Option(None, "Inform", "" if text else xml_option)
- lib.MediaInfo_Option(None, "Complete", "1" if full else "")
- lib.MediaInfo_Option(None, "ParseSpeed", str(parse_speed))
- lib.MediaInfo_Option(None, "LegacyStreamDisplay", "1" if legacy_stream_display else "")
- if lib.MediaInfo_Open(handle, filename) == 0:
- raise RuntimeError("An eror occured while opening {}"
- " with libmediainfo".format(filename))
- output = lib.MediaInfo_Inform(handle, 0)
- # Delete the handle
- lib.MediaInfo_Close(handle)
- lib.MediaInfo_Delete(handle)
- if text:
- return output
- else:
- return cls(output, encoding_errors)
- def to_data(self):
- """
- Returns a dict representation of the object's :py:class:`Tracks <Track>`.
-
- :rtype: dict
- """
- data = {'tracks': []}
- for track in self.tracks:
- data['tracks'].append(track.to_data())
- return data
- def to_json(self):
- """
- Returns a JSON representation of the object's :py:class:`Tracks <Track>`.
-
- :rtype: str
- """
- return json.dumps(self.to_data())
diff --git a/libs/pyprobe/__init__.py b/libs/pyprobe/__init__.py
deleted file mode 100644
index 14e25f36e..000000000
--- a/libs/pyprobe/__init__.py
+++ /dev/null
@@ -1,2 +0,0 @@
-
-from .pyprobe import VideoFileParser
diff --git a/libs/pyprobe/baseparser.py b/libs/pyprobe/baseparser.py
deleted file mode 100644
index 83240cdc4..000000000
--- a/libs/pyprobe/baseparser.py
+++ /dev/null
@@ -1,41 +0,0 @@
-
-class BaseParser:
- @classmethod
- def parse(cls, data, rawMode, includeMissing):
- """Core of the parser classes
-
- Collects all methods prefixed with "value_" and builds a dict of
- their return values. Parser classes will inherit from this class.
- All methods that begin with "value_" in a parser class will be given
- the same `data` argument and are expected to pull their corresponding
- value from the collection.
-
- These methods return a tuple - their raw value and formatted value.
- The raw value is a string or tuple of string and the formatted value
- be of type string, int, float, or tuple.
-
- If no data is found in a method, the raw value is expected to be None,
- and for the formatted value, strings will be "null", ints will be 0,
- floats will be 0.0.
-
- Args:
- data (dict): Raw video data
- rawMode (bool): Returns raw values instead of formatted values
- includeMissing (bool): If value is missing, return "empty" value
-
- Returns:
- dict<str, dict<str, var>>: Parsed data from class methods, may not have every value.
-
- """
- parsers = [getattr(cls, p) for p in dir(cls) if p.startswith("value_")]
- info = {}
- for parser in parsers:
- parsed_raw, parsed_formatted = parser(data)
- if parsed_raw == None and not includeMissing:
- continue
- name = parser.__name__[6:]
- if rawMode:
- info[name] = parsed_raw
- else:
- info[name] = parsed_formatted
- return info
diff --git a/libs/pyprobe/ffprobeparsers.py b/libs/pyprobe/ffprobeparsers.py
deleted file mode 100644
index b3627816c..000000000
--- a/libs/pyprobe/ffprobeparsers.py
+++ /dev/null
@@ -1,216 +0,0 @@
-from __future__ import absolute_import
-from os import path
-
-from .baseparser import BaseParser
-
-
-class StreamParser(BaseParser):
- @staticmethod
- def value_codec(data):
- """Returns a string"""
- info = data.get("codec_name", None)
- return info, (info or "null")
-
- @staticmethod
- def value_format(data):
- """Returns a string"""
- info = data.get("format_name", None)
- return info, (info or "null")
-
- @staticmethod
- def value_bit_rate(data):
- """Returns an int"""
- info = data.get("bit_rate", None)
- try:
- return info, int(float(info))
- except (ValueError, TypeError):
- return info, 0
-
-
-class VideoStreamParser(BaseParser):
- @staticmethod
- def value_codec(data):
- return StreamParser.value_codec(data)
-
- @staticmethod
- def value_format(data):
- return StreamParser.value_format(data)
-
- @staticmethod
- def value_bit_rate(data):
- return StreamParser.value_bit_rate(data)
-
- @staticmethod
- def value_resolution(data):
- """Returns a tuple (width, height)"""
- width = data.get("width", None)
- height = data.get("height", None)
- if width is None and height is None:
- return None, (0, 0)
- try:
- return (width, height), (int(float(width)), int(float(height)))
- except (ValueError, TypeError):
- return (width, height), (0, 0)
-
- @staticmethod
- def average_framerate(data):
- """Returns an int"""
- frames = data.get("nb_frames", None)
- duration = data.get("duration", None)
- try:
- return float(frames) / float(duration)
- except (ValueError, TypeError, ZeroDivisionError):
- return 0.0
-
- @classmethod
- def value_framerate(cls, data):
- """Returns a float"""
- input_str = data.get("avg_frame_rate", None)
- try:
- num, den = input_str.split("/")
- return input_str, round(float(num) / float(den), 3)
- except (ValueError, ZeroDivisionError, AttributeError):
- info = cls.average_framerate(data)
- return input_str, info
-
- @staticmethod
- def value_aspect_ratio(data):
- """Returns a string"""
- info = data.get("display_aspect_ratio", None)
- return info, (info or "null")
-
- @staticmethod
- def value_pixel_format(data):
- """Returns a string"""
- info = data.get("pix_fmt", None)
- return info, (info or "null")
-
-
-class AudioStreamParser(StreamParser):
- @staticmethod
- def value_sample_rate(data):
- """Returns an int - audio sample rate in Hz"""
- info = data.get("sample_rate", None)
- try:
- return info, int(float(info))
- except (ValueError, TypeError):
- return info, 0
-
- @staticmethod
- def value_channel_count(data):
- """Returns an int"""
- info = data.get("channels", None)
- try:
- return info, int(float(info))
- except (ValueError, TypeError):
- return info, 0
-
- @staticmethod
- def value_channel_layout(data):
- """Returns a string"""
- info = data.get("channel_layout", None)
- return info, (info or "null")
-
-
-class SubtitleStreamParser(BaseParser):
- @staticmethod
- def value_codec(data):
- return StreamParser.value_codec(data)
-
- @staticmethod
- def value_language(data):
- """Returns a string """
- tags = data.get("tags", None)
- if tags:
- info = tags.get("language", None) or tags.get("LANGUAGE", None)
- return info, (info or "null")
- return None, "null"
-
- @staticmethod
- def value_forced(data):
- """Returns a bool """
- disposition = data.get("disposition", None)
- if disposition:
- info = disposition.get("forced", None)
- return bool(info), (bool(info) or False)
- return None, "null"
-
-
-class ChapterParser(BaseParser):
- @staticmethod
- def value_start(data):
- """Returns an int"""
- info = data.get("start_time", None)
- try:
- return info, float(data.get("start_time"))
- except (ValueError, TypeError):
- return info, 0
-
- @classmethod
- def value_end(cls, data):
- """Returns a float"""
- info = data.get("end_time", None)
- try:
- return info, float(info)
- except (ValueError, TypeError):
- return info, 0
-
- @staticmethod
- def value_title(data):
- """Returns a string"""
- info = data.get("tags", {}).get("title", None)
- return info, (info or "null")
-
- @staticmethod
- def fillEmptyTitles(chapters):
- """Add text in place of empty titles
- If a chapter doesn't have a title, this will add a basic
- string in the form "Chapter `index+1`"
-
- Args:
- chapters(list<dict>): The list of parsed chapters
-
- """
- index = 0
- for chapter in chapters:
- if not chapter["title"]:
- chapter["title"] = "Chapter " + str(index)
- index += 1
-
-
-class RootParser(BaseParser):
- @staticmethod
- def value_duration(data):
- """Returns an int"""
- info = data.get("duration", None)
- try:
- return info, float(info)
- except (ValueError, TypeError):
- return info, 0.0
-
- @staticmethod
- def value_size(data):
- """Returns an int"""
- info = data.get("size", None)
- if info is None:
- file_path = data.get("filename", "")
- if path.isfile(file_path):
- info = str(path.getsize(file_path))
- try:
- return info, int(float(info))
- except (ValueError, TypeError):
- return info, 0
-
- @classmethod
- def value_bit_rate(cls, data):
- """Returns an int"""
- info = data.get("bit_rate", None)
- if info is None:
- _, size = cls.value_size(data)
- _, duration = cls.value_duration(data)
- if size and duration:
- info = size / (duration / 60 * 0.0075) / 1000
- try:
- return info, int(float(info))
- except (ValueError, TypeError):
- return info, 0
diff --git a/libs/pyprobe/pyprobe.py b/libs/pyprobe/pyprobe.py
deleted file mode 100644
index b280ce551..000000000
--- a/libs/pyprobe/pyprobe.py
+++ /dev/null
@@ -1,226 +0,0 @@
-from __future__ import absolute_import
-from six import PY3
-import json
-import subprocess
-from os import path
-from sys import getfilesystemencoding
-
-from . import ffprobeparsers
-
-
-class VideoFileParser:
- def __init__(
- self,
- ffprobe="ffprobe",
- includeMissing=True,
- rawMode=False,
- ):
- self._ffprobe = ffprobe
- self._includeMissing = includeMissing
- self._rawMode = rawMode
-
- ########################################
- # Main Method
-
- def parseFfprobe(self, inputFile):
- """Takes an input file and returns the parsed data using ffprobe.
-
- Args:
- inputFile (str): Video file path
-
- Returns:
- dict<str, dict<str, var>>: Parsed video info
-
- Raises:
- FileNotFoundError: The input video file or input executable was not found
- IOError: Execution failed
-
- """
- if not path.isfile(inputFile):
- raise FileNotFoundError(inputFile + " not found")
- self._checkExecutable(self._ffprobe)
- fdict = self._executeFfprobe(inputFile)
- return self._parseFfprobe(fdict, inputFile)
-
- ########################################
- # ffprobe Parsing
-
- def _executeFfprobe(self, inputFile):
- """Executes ffprobe program on input file to get raw info
-
- fdict = dict<str, fdict> or dict<str, str>
-
- Args:
- inputFile (str): Video file path
-
- Returns:
- fdict: Parsed data
-
- """
- commandArgs = [
- "-v",
- "quiet",
- "-hide_banner",
- "-show_error",
- "-show_format",
- "-show_streams",
- "-show_programs",
- "-show_chapters",
- "-show_private_data",
- "-print_format",
- "json",
- ]
- outputJson = self._executeParser(self._ffprobe, commandArgs, inputFile)
-
- try:
- data = json.loads(outputJson)
- except json.JSONDecodeError:
- raise IOError("Could not decode ffprobe output for file " + inputFile)
- return data
-
- def _parseFfprobe(self, fOutput, inputFile):
- """Parse all data from fOutput to organized format
-
- fdict = dict<str, fdict> or dict<str, str>
-
- Args:
- fOutput (fdict): Stream data from ffprobe
- inputFile (str): Video file path
-
- Returns:
- dict<str, dict<str, str>>: Parsed video data
-
- """
- videoInfo = {}
- videoInfo["path"] = path.abspath(inputFile)
- videoInfo.update(
- ffprobeparsers.RootParser.parse(
- fOutput["format"], self._rawMode, self._includeMissing
- )
- )
- videoInfo.update(self._parseFfprobeStreams(fOutput))
- videoInfo.update(self._parseFfprobeChapters(fOutput))
- if not self._rawMode:
- ffprobeparsers.ChapterParser.fillEmptyTitles(videoInfo["chapters"])
- return videoInfo
-
- def _parseFfprobeStreams(self, fOutput):
- """Parses video, audio, and subtitle streams
-
- fdict = dict<str, fdict> or dict<str, str>
-
- Args:
- streams_data (fdict): Stream data from ffprobe
-
- Returns:
- dict<str, dict<str, var>>: Parsed streams - video, audio, and subtitle
-
- """
- parsedInfo = {"videos": [], "audios": [], "subtitles": []}
- for stream in fOutput["streams"]:
- streamType = stream["codec_type"]
- data = None
- if streamType == "video":
- data = ffprobeparsers.VideoStreamParser.parse(
- stream, self._rawMode, self._includeMissing
- )
- parsedInfo["videos"].append(data)
- elif streamType == "audio":
- data = ffprobeparsers.AudioStreamParser.parse(
- stream, self._rawMode, self._includeMissing
- )
- parsedInfo["audios"].append(data)
- elif streamType == "subtitle":
- data = ffprobeparsers.SubtitleStreamParser.parse(
- stream, self._rawMode, self._includeMissing
- )
- parsedInfo["subtitles"].append(data)
- return parsedInfo
-
- def _parseFfprobeChapters(self, fOutput):
- """Parses chapters
-
- fdict = dict<str, fdict> or dict<str, str>
-
- Args:
- chapters_data (fdict): Stream data from ffprobe
-
- Returns:
- dict<str, dict<str, var>>: Parsed chapters
-
- """
- parsedInfo = {"chapters": []}
- if fOutput["chapters"] is None:
- return parsedInfo
- for chapter in fOutput["chapters"]:
- parsedInfo["chapters"].append(
- ffprobeparsers.ChapterParser.parse(
- chapter, self._rawMode, self._includeMissing
- )
- )
- return parsedInfo
-
- ########################################
- # Misc Methods
-
- @staticmethod
- def _executeParser(parser, commandArgs, inputFile):
- """Executes parser on the input file
-
- Args:
- parser (str): Executable location or command
- commandArgs (list of strings): Extra command arguments
- inputFile (str): the input file location
-
- Raises:
- IOError: ffprobe execution failed
-
- """
- if PY3:
- command = [parser] + commandArgs + [inputFile]
- completedProcess = subprocess.run(
- command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, encoding="utf-8"
- )
- if completedProcess.returncode:
- raise IOError(
- "Error occurred during execution - " + completedProcess.stderr
- )
- return completedProcess.stdout
- else:
- command = [parser] + commandArgs + [inputFile.encode(getfilesystemencoding())]
- try:
- completedProcess = subprocess.check_output(
- command, stderr=subprocess.STDOUT
- )
- except subprocess.CalledProcessError as e:
- raise IOError(
- "Error occurred during execution - " + e.output
- )
- return completedProcess
-
- @staticmethod
- def _checkExecutable(executable):
- """Checks if target is executable
-
- Args:
- executable (str): Executable location, can be file or command
-
- Raises:
- FileNotFoundError: Executable was not found
-
- """
- try:
- subprocess.check_output(
- [executable, "--help"],
- stderr=subprocess.STDOUT
- )
- except OSError:
- raise FileNotFoundError(executable + " not found")
-
-
-class FileNotFoundError(Exception):
- pass
-
-
-class IOError(Exception):
- pass
diff --git a/libs/pysubs2/exceptions.py b/libs/pysubs2/exceptions.py
index e0c9312fb..b9d528524 100644
--- a/libs/pysubs2/exceptions.py
+++ b/libs/pysubs2/exceptions.py
@@ -12,3 +12,6 @@ class UnknownFormatIdentifierError(Pysubs2Error):
class FormatAutodetectionError(Pysubs2Error):
"""Subtitle format is ambiguous or unknown."""
+
+class ContentNotUsable(Pysubs2Error):
+ """Current content not usable for specified format"""
diff --git a/libs/pysubs2/ssastyle.py b/libs/pysubs2/ssastyle.py
index eb59b74b5..b7b4a5ef3 100644
--- a/libs/pysubs2/ssastyle.py
+++ b/libs/pysubs2/ssastyle.py
@@ -41,6 +41,7 @@ class SSAStyle(object):
self.italic = False #: Italic
self.underline = False #: Underline (ASS only)
self.strikeout = False #: Strikeout (ASS only)
+ self.drawing = False #: Drawing (ASS only, see http://docs.aegisub.org/3.1/ASS_Tags/#drawing-tags
self.scalex = 100.0 #: Horizontal scaling (ASS only)
self.scaley = 100.0 #: Vertical scaling (ASS only)
self.spacing = 0.0 #: Letter spacing (ASS only)
diff --git a/libs/pysubs2/subrip.py b/libs/pysubs2/subrip.py
index 70cb96fe5..56055b650 100644
--- a/libs/pysubs2/subrip.py
+++ b/libs/pysubs2/subrip.py
@@ -5,6 +5,7 @@ from .formatbase import FormatBase
from .ssaevent import SSAEvent
from .ssastyle import SSAStyle
from .substation import parse_tags
+from .exceptions import ContentNotUsable
from .time import ms_to_times, make_time, TIMESTAMP, timestamp_to_ms
#: Largest timestamp allowed in SubRip, ie. 99:59:59,999.
@@ -81,6 +82,7 @@ class SubripFormat(FormatBase):
if sty.italic: fragment = "<i>%s</i>" % fragment
if sty.underline: fragment = "<u>%s</u>" % fragment
if sty.strikeout: fragment = "<s>%s</s>" % fragment
+ if sty.drawing: raise ContentNotUsable
body.append(fragment)
return re.sub("\n+", "\n", "".join(body).strip())
@@ -90,7 +92,10 @@ class SubripFormat(FormatBase):
for i, line in enumerate(visible_lines, 1):
start = ms_to_timestamp(line.start)
end = ms_to_timestamp(line.end)
- text = prepare_text(line.text, subs.styles.get(line.style, SSAStyle.DEFAULT_STYLE))
+ try:
+ text = prepare_text(line.text, subs.styles.get(line.style, SSAStyle.DEFAULT_STYLE))
+ except ContentNotUsable:
+ continue
print("%d" % i, file=fp) # Python 2.7 compat
print(start, "-->", end, file=fp)
diff --git a/libs/pysubs2/substation.py b/libs/pysubs2/substation.py
index 8563f8a0d..274075a44 100644
--- a/libs/pysubs2/substation.py
+++ b/libs/pysubs2/substation.py
@@ -110,7 +110,7 @@ def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={}):
def apply_overrides(all_overrides):
s = style.copy()
- for tag in re.findall(r"\\[ibus][10]|\\r[a-zA-Z_0-9 ]*", all_overrides):
+ for tag in re.findall(r"\\[ibusp][0-9]|\\r[a-zA-Z_0-9 ]*", all_overrides):
if tag == r"\r":
s = style.copy() # reset to original line style
elif tag.startswith(r"\r"):
@@ -122,6 +122,13 @@ def parse_tags(text, style=SSAStyle.DEFAULT_STYLE, styles={}):
elif "b" in tag: s.bold = "1" in tag
elif "u" in tag: s.underline = "1" in tag
elif "s" in tag: s.strikeout = "1" in tag
+ elif "p" in tag:
+ try:
+ scale = int(tag[2:])
+ except (ValueError, IndexError):
+ continue
+
+ s.drawing = scale > 0
return s
overrides = SSAEvent.OVERRIDE_SEQUENCE.findall(text)
diff --git a/libs/subliminal_patch/providers/bsplayer.py b/libs/subliminal_patch/providers/bsplayer.py
index a3bd78182..5bf44ddb9 100644
--- a/libs/subliminal_patch/providers/bsplayer.py
+++ b/libs/subliminal_patch/providers/bsplayer.py
@@ -234,7 +234,7 @@ class BSPlayerProvider(Provider):
def get_sub_domain(self):
# s1-9, s101-109
- SUB_DOMAINS = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8', 's9',
+ SUB_DOMAINS = ['s1', 's2', 's3', 's4', 's5', 's6', 's7', 's8',
's101', 's102', 's103', 's104', 's105', 's106', 's107', 's108', 's109']
API_URL_TEMPLATE = "http://{sub_domain}.api.bsplayer-subtitles.com/v1.php"
sub_domains_end = len(SUB_DOMAINS) - 1
diff --git a/libs/subliminal_patch/providers/legendasdivx.py b/libs/subliminal_patch/providers/legendasdivx.py
index aa9f2a5f0..6247792af 100644
--- a/libs/subliminal_patch/providers/legendasdivx.py
+++ b/libs/subliminal_patch/providers/legendasdivx.py
@@ -118,7 +118,7 @@ class LegendasdivxProvider(Provider):
SEARCH_THROTTLE = 8
site = 'https://www.legendasdivx.pt'
headers = {
- 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0',
+ 'User-Agent': os.environ.get("SZ_USER_AGENT", "Sub-Zero/2"),
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Origin': 'https://www.legendasdivx.pt',
'Referer': 'https://www.legendasdivx.pt',
diff --git a/libs/subliminal_patch/providers/opensubtitles.py b/libs/subliminal_patch/providers/opensubtitles.py
index 34007ed4e..012fe6c13 100644
--- a/libs/subliminal_patch/providers/opensubtitles.py
+++ b/libs/subliminal_patch/providers/opensubtitles.py
@@ -327,6 +327,7 @@ class OpenSubtitlesProvider(ProviderRetryMixin, _OpenSubtitlesProvider):
hash, movie_name, movie_release_name, movie_year, movie_imdb_id,
series_season, series_episode, query_parameters, filename, encoding,
movie_fps, skip_wrong_fps=self.skip_wrong_fps)
+ subtitle.uploader = _subtitle_item['UserNickName'] if _subtitle_item['UserNickName'] else 'anonymous'
logger.debug('Found subtitle %r by %s', subtitle, matched_by)
subtitles.append(subtitle)
diff --git a/libs/subliminal_patch/providers/regielive.py b/libs/subliminal_patch/providers/regielive.py
new file mode 100644
index 000000000..65cbfc93d
--- /dev/null
+++ b/libs/subliminal_patch/providers/regielive.py
@@ -0,0 +1,182 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+import logging
+import io
+import os
+
+from requests import Session
+from guessit import guessit
+from subliminal_patch.providers import Provider
+from subliminal_patch.subtitle import Subtitle
+from subliminal.subtitle import SUBTITLE_EXTENSIONS, fix_line_ending
+from subliminal.subtitle import guess_matches
+from subliminal.video import Episode, Movie
+from subzero.language import Language
+
+import zipfile
+
+logger = logging.getLogger(__name__)
+
+
+class RegieLiveSubtitle(Subtitle):
+ """RegieLive Subtitle."""
+ provider_name = 'regielive'
+ hash_verifiable = False
+
+ def __init__(self, filename, video, link, rating, language):
+ super(RegieLiveSubtitle, self).__init__(language)
+ self.filename = filename
+ self.page_link = link
+ self.video = video
+ self.rating = rating
+ self.language = language
+
+ @property
+ def id(self):
+ return self.page_link
+
+ @property
+ def release_info(self):
+ return self.filename
+
+ def get_matches(self, video):
+ matches = set()
+ matches |= guess_matches(video, guessit(self.filename))
+
+ subtitle_filename = self.filename
+
+ # episode
+ if isinstance(video, Episode):
+ # already matched in search query
+ matches.update(['title', 'series', 'season', 'episode', 'year'])
+
+ # movie
+ elif isinstance(video, Movie):
+ # already matched in search query
+ matches.update(['title', 'year'])
+
+ # release_group
+ if video.release_group and video.release_group.lower() in subtitle_filename:
+ matches.add('release_group')
+
+ # resolution
+ if video.resolution and video.resolution.lower() in subtitle_filename:
+ matches.add('resolution')
+
+ # format
+ formats = []
+ if video.format:
+ formats = [video.format.lower()]
+ if formats[0] == "web-dl":
+ formats.append("webdl")
+ formats.append("webrip")
+ formats.append("web ")
+ for frmt in formats:
+ if frmt.lower() in subtitle_filename:
+ matches.add('format')
+ break
+
+ # video_codec
+ if video.video_codec:
+ video_codecs = [video.video_codec.lower()]
+ if video_codecs[0] == "h264":
+ formats.append("x264")
+ elif video_codecs[0] == "h265":
+ formats.append("x265")
+ for vc in formats:
+ if vc.lower() in subtitle_filename:
+ matches.add('video_codec')
+ break
+
+ return matches
+
+
+class RegieLiveProvider(Provider):
+ """RegieLive Provider."""
+ languages = {Language(l) for l in ['ron']}
+ language = list(languages)[0]
+ SEARCH_THROTTLE = 8
+
+ def __init__(self):
+ self.initialize()
+
+ def initialize(self):
+ self.session = Session()
+ self.url = 'http://api.regielive.ro/kodi/cauta.php'
+ self.api = 'API-KODI-KINGUL'
+ self.headers = {'RL-API': self.api}
+
+ def terminate(self):
+ self.session.close()
+
+ def query(self, video, language):
+ payload = {}
+ if isinstance (video, Episode):
+ payload['nume'] = video.series
+ payload['sezon'] = video.season
+ payload['episod'] = video.episode
+ elif isinstance(video, Movie):
+ payload['nume'] = video.title
+ payload['an'] = video.year
+ response = self.session.post(self.url, data=payload, headers=self.headers)
+ logger.info(response.json())
+ response_json = response.json()['rezultate']
+ subtitles = []
+ if not 'eroare' in response_json:
+ for film in response_json:
+ for sub in response_json[film]['subtitrari']:
+ logger.debug(sub)
+ subtitles.append(
+ RegieLiveSubtitle(sub['titlu'], video, sub['url'], sub['rating'], language)
+ )
+
+ # {'titlu': 'Chernobyl.S01E04.The.Happiness.of.All.Mankind.720p.AMZN.WEB-DL.DDP5.1.H.264-NTb', 'url': 'https://subtitrari.regielive.ro/descarca-33336-418567.zip', 'rating': {'nota': 4.89, 'voturi': 48}}
+ # subtitle def __init__(self, language, filename, subtype, video, link):
+ return subtitles
+
+ def list_subtitles(self, video, languages):
+ return self.query(video, self.language)
+
+ def download_subtitle(self, subtitle):
+ session = Session()
+ _addheaders = {
+ 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0',
+ 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
+ 'Origin': 'https://subtitrari.regielive.ro',
+ 'Accept-Language' : 'en-US,en;q=0.5',
+ 'Referer': 'https://subtitrari.regielive.ro',
+ 'Pragma': 'no-cache',
+ 'Cache-Control': 'no-cache'
+ }
+ session.headers.update(_addheaders)
+ res = session.get('https://subtitrari.regielive.ro')
+ cookies = res.cookies
+ _zipped = session.get(subtitle.page_link, cookies=cookies)
+ if _zipped:
+ if _zipped.text == '500':
+ raise ValueError('Error 500 on server')
+ archive = zipfile.ZipFile(io.BytesIO(_zipped.content))
+ subtitle_content = self._get_subtitle_from_archive(archive)
+ subtitle.content = fix_line_ending(subtitle_content)
+
+ return subtitle
+ raise ValueError('Problems conecting to the server')
+
+ def _get_subtitle_from_archive(self, archive):
+ # some files have a non subtitle with .txt extension
+ _tmp = list(SUBTITLE_EXTENSIONS)
+ _tmp.remove('.txt')
+ _subtitle_extensions = tuple(_tmp)
+
+ for name in archive.namelist():
+ # discard hidden files
+ if os.path.split(name)[-1].startswith('.'):
+ continue
+
+ # discard non-subtitle files
+ if not name.lower().endswith(_subtitle_extensions):
+ continue
+
+ return archive.read(name)
+
+ raise APIThrottled('Can not find the subtitle in the compressed file')
diff --git a/libs/subliminal_patch/providers/subdivx.py b/libs/subliminal_patch/providers/subdivx.py
index 69202ec88..8d04ad0fc 100644
--- a/libs/subliminal_patch/providers/subdivx.py
+++ b/libs/subliminal_patch/providers/subdivx.py
@@ -17,11 +17,15 @@ else:
from subliminal import __short_version__
from subliminal.exceptions import ServiceUnavailable
-from subliminal.providers import ParserBeautifulSoup, Provider
-from subliminal.subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending,guess_matches
+from subliminal.providers import ParserBeautifulSoup
+from subliminal.subtitle import SUBTITLE_EXTENSIONS, fix_line_ending,guess_matches
from subliminal.video import Episode, Movie
from subliminal_patch.exceptions import APIThrottled
from six.moves import range
+from subliminal_patch.score import get_scores
+from subliminal_patch.subtitle import Subtitle
+from subliminal_patch.providers import Provider
+from guessit import guessit
logger = logging.getLogger(__name__)
@@ -30,20 +34,20 @@ class SubdivxSubtitle(Subtitle):
provider_name = 'subdivx'
hash_verifiable = False
- def __init__(self, language, page_link, description, title):
- super(SubdivxSubtitle, self).__init__(language, hearing_impaired=False,
- page_link=page_link)
- self.description = description.lower()
+ def __init__(self, language, video, page_link, title, description, uploader):
+ super(SubdivxSubtitle, self).__init__(language, hearing_impaired=False, page_link=page_link)
+ self.video = video
self.title = title
+ self.description = description
+ self.uploader = uploader
+ self.release_info = self.title
+ if self.description and self.description.strip():
+ self.release_info += ' | ' + self.description
@property
def id(self):
return self.page_link
- @property
- def release_info(self):
- return self.description
-
def get_matches(self, video):
matches = set()
@@ -112,15 +116,17 @@ class SubdivxSubtitlesProvider(Provider):
def terminate(self):
self.session.close()
- def query(self, keyword, season=None, episode=None, year=None):
- query = keyword
- if season and episode:
- query += ' S{season:02d}E{episode:02d}'.format(season=season, episode=episode)
- elif year:
- query += ' {:4d}'.format(year)
+ def query(self, video, languages):
+
+ if isinstance(video, Episode):
+ query = "{} S{:02d}E{:02d}".format(video.series, video.season, video.episode)
+ else:
+ query = video.title
+ if video.year:
+ query += ' {:4d}'.format(video.year)
params = {
- 'buscar': query, # search string
+ 'q': query, # search string
'accion': 5, # action search
'oxdown': 1, # order by downloads descending
'pg': 1 # page 1
@@ -131,44 +137,27 @@ class SubdivxSubtitlesProvider(Provider):
language = self.language_list[0]
search_link = self.server_url + 'index.php'
while True:
- response = self.session.get(search_link, params=params, timeout=10)
+ response = self.session.get(search_link, params=params, timeout=20)
self._check_response(response)
try:
- page_subtitles = self._parse_subtitles_page(response, language)
+ page_subtitles = self._parse_subtitles_page(video, response, language)
except Exception as e:
logger.error('Error parsing subtitles list: ' + str(e))
break
subtitles += page_subtitles
- if len(page_subtitles) >= 20:
- params['pg'] += 1 # search next page
- time.sleep(self.multi_result_throttle)
- else:
- break
+ if len(page_subtitles) < 20:
+ break # this is the last page
+
+ params['pg'] += 1 # search next page
+ time.sleep(self.multi_result_throttle)
return subtitles
def list_subtitles(self, video, languages):
- if isinstance(video, Episode):
- titles = [video.series] + video.alternative_series
- elif isinstance(video, Movie):
- titles = [video.title] + video.alternative_titles
- else:
- titles = []
-
- subtitles = []
- for title in titles:
- if isinstance(video, Episode):
- subtitles += [s for s in self.query(title, season=video.season,
- episode=video.episode, year=video.year)
- if s.language in languages]
- elif isinstance(video, Movie):
- subtitles += [s for s in self.query(title, year=video.year)
- if s.language in languages]
-
- return subtitles
+ return self.query(video, languages)
def download_subtitle(self, subtitle):
if isinstance(subtitle, SubdivxSubtitle):
@@ -186,14 +175,14 @@ class SubdivxSubtitlesProvider(Provider):
archive = self._get_archive(response.content)
# extract the subtitle
- subtitle_content = self._get_subtitle_from_archive(archive)
+ subtitle_content = self._get_subtitle_from_archive(archive, subtitle)
subtitle.content = fix_line_ending(subtitle_content)
def _check_response(self, response):
if response.status_code != 200:
raise ServiceUnavailable('Bad status code: ' + str(response.status_code))
- def _parse_subtitles_page(self, response, language):
+ def _parse_subtitles_page(self, video, response, language):
subtitles = []
page_soup = ParserBeautifulSoup(response.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
@@ -204,13 +193,17 @@ class SubdivxSubtitlesProvider(Provider):
title_soup, body_soup = title_soups[subtitle], body_soups[subtitle]
# title
- title = title_soup.find("a").text.replace("Subtitulo de ", "")
- page_link = title_soup.find("a")["href"].replace('http://', 'https://')
+ title = title_soup.find("a").text.replace("Subtitulos de ", "")
+ page_link = title_soup.find("a")["href"]
- # body
+ # description
description = body_soup.find("div", {'id': 'buscador_detalle_sub'}).text
+ description = description.replace(",", " ").lower()
+
+ # uploader
+ uploader = body_soup.find("a", {'class': 'link1'}).text
- subtitle = self.subtitle_class(language, page_link, description, title)
+ subtitle = self.subtitle_class(language, video, page_link, title, description, uploader)
logger.debug('Found subtitle %r', subtitle)
subtitles.append(subtitle)
@@ -218,7 +211,7 @@ class SubdivxSubtitlesProvider(Provider):
return subtitles
def _get_download_link(self, subtitle):
- response = self.session.get(subtitle.page_link, timeout=10)
+ response = self.session.get(subtitle.page_link, timeout=20)
self._check_response(response)
try:
page_soup = ParserBeautifulSoup(response.content.decode('iso-8859-1', 'ignore'), ['lxml', 'html.parser'])
@@ -226,12 +219,10 @@ class SubdivxSubtitlesProvider(Provider):
for link_soup in links_soup:
if link_soup['href'].startswith('bajar'):
return self.server_url + link_soup['href']
- links_soup = page_soup.find_all ("a", {'class': 'link1'})
+ links_soup = page_soup.find_all("a", {'class': 'link1'})
for link_soup in links_soup:
if "bajar.php" in link_soup['href']:
- # not using link_soup['href'] directly because it's http://
- dl_link = urlparse(link_soup['href'])
- return self.server_url + dl_link.path + '?' + dl_link.query
+ return link_soup['href']
except Exception as e:
raise APIThrottled('Error parsing download link: ' + str(e))
@@ -251,7 +242,10 @@ class SubdivxSubtitlesProvider(Provider):
return archive
- def _get_subtitle_from_archive(self, archive):
+ def _get_subtitle_from_archive(self, archive, subtitle):
+ _max_score = 0
+ _scores = get_scores (subtitle.video)
+
for name in archive.namelist():
# discard hidden files
if os.path.split(name)[-1].startswith('.'):
@@ -261,6 +255,26 @@ class SubdivxSubtitlesProvider(Provider):
if not name.lower().endswith(SUBTITLE_EXTENSIONS):
continue
- return archive.read(name)
+ _guess = guessit (name)
+ if isinstance(subtitle.video, Episode):
+ logger.debug ("guessing %s" % name)
+ logger.debug("subtitle S{}E{} video S{}E{}".format(_guess['season'],_guess['episode'],subtitle.video.season,subtitle.video.episode))
+
+ if subtitle.video.episode != _guess['episode'] or subtitle.video.season != _guess['season']:
+ logger.debug('subtitle does not match video, skipping')
+ continue
+
+ matches = set()
+ matches |= guess_matches (subtitle.video, _guess)
+ _score = sum ((_scores.get (match, 0) for match in matches))
+ logger.debug('srt matches: %s, score %d' % (matches, _score))
+ if _score > _max_score:
+ _max_name = name
+ _max_score = _score
+ logger.debug("new max: {} {}".format(name, _score))
+
+ if _max_score > 0:
+ logger.debug("returning from archive: {} scored {}".format(_max_name, _max_score))
+ return archive.read(_max_name)
raise APIThrottled('Can not find the subtitle in the compressed file')
diff --git a/libs/subliminal_patch/providers/subs4free.py b/libs/subliminal_patch/providers/subs4free.py
index 9c7e92708..033ff664e 100644
--- a/libs/subliminal_patch/providers/subs4free.py
+++ b/libs/subliminal_patch/providers/subs4free.py
@@ -84,7 +84,8 @@ class Subs4FreeProvider(Provider):
def initialize(self):
self.session = Session()
- self.session.headers['User-Agent'] = os.environ.get("SZ_USER_AGENT", "Sub-Zero/2")
+ from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
+ self.session.headers['User-Agent'] = AGENT_LIST[random.randint(0, len(AGENT_LIST) - 1)]
def terminate(self):
self.session.close()
diff --git a/libs/subliminal_patch/providers/subs4series.py b/libs/subliminal_patch/providers/subs4series.py
index 96b756914..5b559e868 100644
--- a/libs/subliminal_patch/providers/subs4series.py
+++ b/libs/subliminal_patch/providers/subs4series.py
@@ -3,6 +3,7 @@ from __future__ import absolute_import
import io
import logging
import os
+from random import randint
import rarfile
import re
@@ -82,7 +83,8 @@ class Subs4SeriesProvider(Provider):
def initialize(self):
self.session = Session()
- self.session.headers['User-Agent'] = 'Subliminal/{}'.format(__short_version__)
+ from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
+ self.session.headers['User-Agent'] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
def terminate(self):
self.session.close()
diff --git a/libs/subliminal_patch/providers/subssabbz.py b/libs/subliminal_patch/providers/subssabbz.py
index 709029169..f2bb05450 100644
--- a/libs/subliminal_patch/providers/subssabbz.py
+++ b/libs/subliminal_patch/providers/subssabbz.py
@@ -12,7 +12,7 @@ from requests import Session
from guessit import guessit
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle
-from subliminal_patch.utils import sanitize
+from subliminal_patch.utils import sanitize, fix_inconsistent_naming
from subliminal.exceptions import ProviderError
from subliminal.utils import sanitize_release_group
from subliminal.subtitle import guess_matches
@@ -23,6 +23,21 @@ from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
logger = logging.getLogger(__name__)
+def fix_tv_naming(title):
+ """Fix TV show titles with inconsistent naming using dictionary, but do not sanitize them.
+
+ :param str title: original title.
+ :return: new title.
+ :rtype: str
+
+ """
+ return fix_inconsistent_naming(title, {"Marvel's Daredevil": "Daredevil",
+ "Marvel's Luke Cage": "Luke Cage",
+ "Marvel's Iron Fist": "Iron Fist",
+ "Marvel's Jessica Jones": "Jessica Jones",
+ "DC's Legends of Tomorrow": "Legends of Tomorrow"
+ }, True)
+
class SubsSabBzSubtitle(Subtitle):
"""SubsSabBz Subtitle."""
provider_name = 'subssabbz'
@@ -34,6 +49,7 @@ class SubsSabBzSubtitle(Subtitle):
self.page_link = link
self.type = type
self.video = video
+ self.release_info = os.path.splitext(filename)[0]
@property
def id(self):
@@ -60,8 +76,6 @@ class SubsSabBzSubtitle(Subtitle):
matches.add('hash')
matches |= guess_matches(video, guessit(self.filename, {'type': self.type}))
-
- matches.add(id(self))
return matches
@@ -99,10 +113,10 @@ class SubsSabBzProvider(Provider):
}
if isEpisode:
- params['movie'] = "%s %02d %02d" % (sanitize(video.series), video.season, video.episode)
+ params['movie'] = "%s %02d %02d" % (sanitize(fix_tv_naming(video.series), {'\''}), video.season, video.episode)
else:
params['yr'] = video.year
- params['movie'] = (video.title)
+ params['movie'] = sanitize(video.title, {'\''})
if language == 'en' or language == 'eng':
params['select-language'] = 1
@@ -118,19 +132,23 @@ class SubsSabBzProvider(Provider):
logger.debug('No subtitles found')
return subtitles
- soup = BeautifulSoup(response.content, 'html.parser')
+ soup = BeautifulSoup(response.content, 'lxml')
rows = soup.findAll('tr', {'class': 'subs-row'})
- # Search on first 10 rows only
- for row in rows[:10]:
+ # Search on first 20 rows only
+ for row in rows[:20]:
a_element_wrapper = row.find('td', { 'class': 'c2field' })
if a_element_wrapper:
element = a_element_wrapper.find('a')
if element:
link = element.get('href')
+ element = row.find('a', href = re.compile(r'.*showuser=.*'))
+ uploader = element.get_text() if element else None
logger.info('Found subtitle link %r', link)
- subtitles = subtitles + self.download_archive_and_add_subtitle_files(link, language, video)
-
+ sub = self.download_archive_and_add_subtitle_files(link, language, video)
+ for s in sub:
+ s.uploader = uploader
+ subtitles = subtitles + sub
return subtitles
def list_subtitles(self, video, languages):
diff --git a/libs/subliminal_patch/providers/subsunacs.py b/libs/subliminal_patch/providers/subsunacs.py
index 5af116d99..87c97c486 100644
--- a/libs/subliminal_patch/providers/subsunacs.py
+++ b/libs/subliminal_patch/providers/subsunacs.py
@@ -12,7 +12,7 @@ from requests import Session
from guessit import guessit
from subliminal_patch.providers import Provider
from subliminal_patch.subtitle import Subtitle
-from subliminal_patch.utils import sanitize
+from subliminal_patch.utils import sanitize, fix_inconsistent_naming
from subliminal.exceptions import ProviderError
from subliminal.utils import sanitize_release_group
from subliminal.subtitle import guess_matches
@@ -23,6 +23,20 @@ from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
logger = logging.getLogger(__name__)
+def fix_tv_naming(title):
+ """Fix TV show titles with inconsistent naming using dictionary, but do not sanitize them.
+
+ :param str title: original title.
+ :return: new title.
+ :rtype: str
+
+ """
+ return fix_inconsistent_naming(title, {"Marvel's Daredevil": "Daredevil",
+ "Marvel's Luke Cage": "Luke Cage",
+ "Marvel's Iron Fist": "Iron Fist",
+ "DC's Legends of Tomorrow": "Legends of Tomorrow"
+ }, True)
+
class SubsUnacsSubtitle(Subtitle):
"""SubsUnacs Subtitle."""
provider_name = 'subsunacs'
@@ -34,6 +48,7 @@ class SubsUnacsSubtitle(Subtitle):
self.page_link = link
self.type = type
self.video = video
+ self.release_info = os.path.splitext(filename)[0]
@property
def id(self):
@@ -60,8 +75,6 @@ class SubsUnacsSubtitle(Subtitle):
matches.add('hash')
matches |= guess_matches(video, guessit(self.filename, {'type': self.type}))
-
- matches.add(id(self))
return matches
@@ -103,10 +116,10 @@ class SubsUnacsProvider(Provider):
'imdbcheck': 1}
if isEpisode:
- params['m'] = "%s %02d %02d" % (sanitize(video.series), video.season, video.episode)
+ params['m'] = "%s %02d %02d" % (sanitize(fix_tv_naming(video.series), {'\''}), video.season, video.episode)
else:
params['y'] = video.year
- params['m'] = (video.title)
+ params['m'] = sanitize(video.title, {'\''})
if language == 'en' or language == 'eng':
params['l'] = 1
@@ -122,17 +135,23 @@ class SubsUnacsProvider(Provider):
logger.debug('No subtitles found')
return subtitles
- soup = BeautifulSoup(response.content, 'html.parser')
- rows = soup.findAll('td', {'class': 'tdMovie'})
-
- # Search on first 10 rows only
- for row in rows[:10]:
- element = row.find('a', {'class': 'tooltip'})
- if element:
- link = element.get('href')
- logger.info('Found subtitle link %r', link)
- subtitles = subtitles + self.download_archive_and_add_subtitle_files('https://subsunacs.net' + link, language, video)
-
+ soup = BeautifulSoup(response.content, 'lxml')
+ rows = soup.findAll('tr', onmouseover=True)
+
+ # Search on first 20 rows only
+ for row in rows[:20]:
+ a_element_wrapper = row.find('td', {'class': 'tdMovie'})
+ if a_element_wrapper:
+ element = a_element_wrapper.find('a', {'class': 'tooltip'})
+ if element:
+ link = element.get('href')
+ element = row.find('a', href = re.compile(r'.*/search\.php\?t=1\&(memid|u)=.*'))
+ uploader = element.get_text() if element else None
+ logger.info('Found subtitle link %r', link)
+ sub = self.download_archive_and_add_subtitle_files('https://subsunacs.net' + link, language, video)
+ for s in sub:
+ s.uploader = uploader
+ subtitles = subtitles + sub
return subtitles
def list_subtitles(self, video, languages):
@@ -152,11 +171,16 @@ class SubsUnacsProvider(Provider):
subtitles = []
type = 'episode' if isinstance(video, Episode) else 'movie'
for file_name in archiveStream.namelist():
- if file_name.lower().endswith(('.srt', '.sub')):
+ if file_name.lower().endswith(('.srt', '.sub', '.txt')):
+ file_is_txt = True if file_name.lower().endswith('.txt') else False
+ if file_is_txt and re.search(r'subsunacs\.net|танете част|прочети|^read ?me|procheti', file_name, re.I):
+ logger.info('Ignore readme txt file %r', file_name)
+ continue
logger.info('Found subtitle file %r', file_name)
subtitle = SubsUnacsSubtitle(language, file_name, type, video, link)
subtitle.content = archiveStream.read(file_name)
- subtitles.append(subtitle)
+ if file_is_txt == False or subtitle.is_valid():
+ subtitles.append(subtitle)
return subtitles
def download_archive_and_add_subtitle_files(self, link, language, video ):
diff --git a/libs/subliminal_patch/providers/subz.py b/libs/subliminal_patch/providers/subz.py
deleted file mode 100644
index b08676d6a..000000000
--- a/libs/subliminal_patch/providers/subz.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# -*- coding: utf-8 -*-
-from __future__ import absolute_import
-import io
-import json
-import logging
-import os
-
-import rarfile
-import re
-import zipfile
-
-from subzero.language import Language
-from guessit import guessit
-from requests import Session
-
-from subliminal.providers import ParserBeautifulSoup, Provider
-from subliminal import __short_version__
-from subliminal.cache import SHOW_EXPIRATION_TIME, region
-from subliminal.score import get_equivalent_release_groups
-from subliminal.subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending, guess_matches
-from subliminal.utils import sanitize, sanitize_release_group
-from subliminal.video import Episode, Movie
-
-logger = logging.getLogger(__name__)
-
-episode_re = re.compile(r'^S(\d{2})E(\d{2})$')
-
-
-class SubzSubtitle(Subtitle):
- """Subz Subtitle."""
- provider_name = 'subz'
-
- def __init__(self, language, page_link, series, season, episode, title, year, version, download_link):
- super(SubzSubtitle, self).__init__(language, page_link=page_link)
- self.series = series
- self.season = season
- self.episode = episode
- self.title = title
- self.year = year
- self.version = version
- self.download_link = download_link
- self.hearing_impaired = None
- self.encoding = 'windows-1253'
-
- @property
- def id(self):
- return self.download_link
-
- def get_matches(self, video):
- matches = set()
- video_type = None
-
- # episode
- if isinstance(video, Episode):
- video_type = 'episode'
- # series name
- if video.series and sanitize(self.series) in (
- sanitize(name) for name in [video.series] + video.alternative_series):
- matches.add('series')
- # season
- if video.season and self.season == video.season:
- matches.add('season')
- # episode
- if video.episode and self.episode == video.episode:
- matches.add('episode')
- # title of the episode
- if video.title and sanitize(self.title) == sanitize(video.title):
- matches.add('title')
- # year
- if video.original_series and self.year is None or video.year and video.year == self.year:
- matches.add('year')
- # movie
- elif isinstance(video, Movie):
- video_type = 'movie'
- # title
- if video.title and (sanitize(self.title) in (
- sanitize(name) for name in [video.title] + video.alternative_titles)):
- matches.add('title')
- # year
- if video.year and self.year == video.year:
- matches.add('year')
-
- # release_group
- if (video.release_group and self.version and
- any(r in sanitize_release_group(self.version)
- for r in get_equivalent_release_groups(sanitize_release_group(video.release_group)))):
- matches.add('release_group')
- # other properties
- matches |= guess_matches(video, guessit(self.version, {'type': video_type}), partial=True)
-
- return matches
-
-
-class SubzProvider(Provider):
- """Subz Provider."""
- languages = {Language(l) for l in ['ell']}
- server_url = 'https://subz.xyz'
- sign_in_url = '/sessions'
- sign_out_url = '/logout'
- search_url = '/typeahead/{}'
- episode_link = '/series/{show_id}/seasons/{season:d}/episodes/{episode:d}'
- movie_link = '/movies/{}'
- subtitle_class = SubzSubtitle
-
- def __init__(self):
- self.logged_in = False
- self.session = None
-
- def initialize(self):
- self.session = Session()
- self.session.headers['User-Agent'] = 'Subliminal/{}'.format(__short_version__)
-
- def terminate(self):
- self.session.close()
-
- def get_show_links(self, title, year=None, is_episode=True):
- """Get the matching show links for `title` and `year`.
-
- First search in the result of :meth:`_get_show_suggestions`.
-
- :param title: show title.
- :param year: year of the show, if any.
- :type year: int
- :param is_episode: if the search is for episode.
- :type is_episode: bool
- :return: the show links, if found.
- :rtype: list of str
-
- """
- title = sanitize(title)
- suggestions = self._get_suggestions(title, is_episode)
-
- show_links = []
- for suggestion in suggestions:
- if sanitize(suggestion['title']) == title or \
- (year and sanitize(suggestion['title']) == '{title} {year}'.format(title=title, year=year)):
- logger.debug('Getting show id')
- show_links.append(suggestion['link'].split('/')[-1])
-
- return show_links
-
- @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME, should_cache_fn=lambda value: value)
- def _get_suggestions(self, title, is_episode=True):
- """Search the show or movie id from the `title`.
-
- :param str title: title of the show.
- :param is_episode: if the search is for episode.
- :type is_episode: bool
- :return: the show suggestions found.
- :rtype: list of dict
-
- """
- # make the search
- logger.info('Searching show ids with %r', title)
- r = self.session.get(self.server_url + self.search_url.format(title), timeout=10)
- r.raise_for_status()
-
- if not r.content:
- logger.debug('No data returned from provider')
- return []
-
- show_type = 'series' if is_episode else 'movie'
- parsed_suggestions = [s for s in json.loads(r.text) if 'type' in s and s['type'] == show_type]
- logger.debug('Found suggestions: %r', parsed_suggestions)
-
- return parsed_suggestions
-
- def query(self, show_id, series, season, episode, title):
- # get the season list of the show
- logger.info('Getting the subtitle list of show id %s', show_id)
- is_episode = False
- if all((show_id, season, episode)):
- is_episode = True
- page_link = self.server_url + self.episode_link.format(show_id=show_id, season=season, episode=episode)
- elif all((show_id, title)):
- page_link = self.server_url + self.movie_link.format(show_id)
- else:
- return []
-
- r = self.session.get(page_link, timeout=10)
- if r.status_code == 404:
- return []
-
- r.raise_for_status()
-
- if not r.content:
- logger.debug('No data returned from provider')
- return []
-
- soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
-
- year = None
- if not is_episode:
- year = int(soup.select_one('span.year').text)
-
- subtitles = []
- # loop over episode rows
- for subs_tag in soup.select('div[id="subtitles"] tr[data-id]'):
- # read common info
- version = subs_tag.find('td', {'class': 'name'}).text
- download_link = subs_tag.find('a', {'class': 'btn-success'})['href'].strip('\'')
-
- # read the episode info
- if is_episode:
- episode_numbers = soup.select_one('#summary-wrapper > div.container.summary span.main-title-sxe').text
- season = None
- episode = None
- matches = episode_re.match(episode_numbers.strip())
- if matches:
- season = int(matches.group(1))
- episode = int(matches.group(2))
-
- series = soup.select_one('#summary-wrapper > div.summary h2 > a').string.strip()
- title = soup.select_one('#summary-wrapper > div.container.summary span.main-title').text
-
- subtitle = self.subtitle_class(Language.fromalpha2('el'), page_link, series, season, episode, title,
- year, version, download_link)
- # read the movie info
- else:
- title = str(soup.select_one('#summary-wrapper > div.summary h1').contents[0]).strip()
- subtitle = self.subtitle_class(Language.fromalpha2('el'), page_link, None, None, None, title, year,
- version, download_link)
-
- logger.debug('Found subtitle %r', subtitle)
- subtitles.append(subtitle)
-
- return subtitles
-
- def list_subtitles(self, video, languages):
- # lookup show_id
- if isinstance(video, Episode):
- titles = [video.series] + video.alternative_series
- elif isinstance(video, Movie):
- titles = [video.title] + video.alternative_titles
- else:
- titles = []
-
- show_links = None
- for title in titles:
- show_links = self.get_show_links(title, video.year, isinstance(video, Episode))
- if show_links is not None and len(show_links) > 0:
- break
-
- subtitles = []
- # query for subtitles with the show_id
- for show_links in show_links:
- if isinstance(video, Episode):
- subtitles += [s for s in self.query(show_links, video.series, video.season, video.episode, video.title)
- if s.language in languages and s.season == video.season and s.episode == video.episode]
- elif isinstance(video, Movie):
- subtitles += [s for s in self.query(show_links, None, None, None, video.title)
- if s.language in languages and s.year == video.year]
-
- return subtitles
-
- def download_subtitle(self, subtitle):
- if isinstance(subtitle, SubzSubtitle):
- # download the subtitle
- logger.info('Downloading subtitle %r', subtitle)
- r = self.session.get(subtitle.download_link, headers={'Referer': subtitle.page_link}, timeout=10)
- r.raise_for_status()
-
- if not r.content:
- logger.debug('Unable to download subtitle. No data returned from provider')
- return
-
- archive = _get_archive(r.content)
-
- subtitle_content = _get_subtitle_from_archive(archive)
- if subtitle_content:
- subtitle.content = fix_line_ending(subtitle_content)
- else:
- logger.debug('Could not extract subtitle from %r', archive)
-
-
-def _get_archive(content):
- # open the archive
- archive_stream = io.BytesIO(content)
- archive = None
- if rarfile.is_rarfile(archive_stream):
- logger.debug('Identified rar archive')
- archive = rarfile.RarFile(archive_stream)
- elif zipfile.is_zipfile(archive_stream):
- logger.debug('Identified zip archive')
- archive = zipfile.ZipFile(archive_stream)
-
- return archive
-
-
-def _get_subtitle_from_archive(archive):
- for name in archive.namelist():
- # discard hidden files
- if os.path.split(name)[-1].startswith('.'):
- continue
-
- # discard non-subtitle files
- if not name.lower().endswith(SUBTITLE_EXTENSIONS):
- continue
-
- return archive.read(name)
-
- return None
diff --git a/libs/subliminal_patch/providers/wizdom.py b/libs/subliminal_patch/providers/wizdom.py
new file mode 100644
index 000000000..44808a3e8
--- /dev/null
+++ b/libs/subliminal_patch/providers/wizdom.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+import io
+import logging
+import os
+import zipfile
+
+from subzero.language import Language
+from guessit import guessit
+from requests import Session
+
+from subliminal_patch.providers import Provider
+from subliminal_patch.subtitle import Subtitle
+from subliminal.subtitle import fix_line_ending
+from subliminal import __short_version__
+from subliminal.cache import SHOW_EXPIRATION_TIME, region
+from subliminal_patch.exceptions import ProviderError
+from subliminal_patch.subtitle import guess_matches
+from subliminal_patch.utils import sanitize
+from subliminal.video import Episode, Movie
+
+logger = logging.getLogger(__name__)
+
+
+class WizdomSubtitle(Subtitle):
+ """Wizdom Subtitle."""
+ provider_name = 'wizdom'
+
+ def __init__(self, language, hearing_impaired, page_link, series, season, episode, title, imdb_id, subtitle_id,
+ release):
+ super(WizdomSubtitle, self).__init__(language, hearing_impaired, page_link)
+ self.series = series
+ self.season = season
+ self.episode = episode
+ self.title = title
+ self.imdb_id = imdb_id
+ self.subtitle_id = subtitle_id
+ self.release = release
+
+ @property
+ def id(self):
+ return str(self.subtitle_id)
+
+ def get_matches(self, video):
+ matches = set()
+
+ # episode
+ if isinstance(video, Episode):
+ # series
+ if video.series and (sanitize(self.title) in (
+ sanitize(name) for name in [video.series] + video.alternative_series)):
+ matches.add('series')
+ # season
+ if video.season and self.season == video.season:
+ matches.add('season')
+ # episode
+ if video.episode and self.episode == video.episode:
+ matches.add('episode')
+ # imdb_id
+ if video.series_imdb_id and self.imdb_id == video.series_imdb_id:
+ matches.add('series_imdb_id')
+ # guess
+ matches |= guess_matches(video, guessit(self.release, {'type': 'episode'}), partial=True)
+ # movie
+ elif isinstance(video, Movie):
+ # guess
+ matches |= guess_matches(video, guessit(self.release, {'type': 'movie'}), partial=True)
+
+ # title
+ if video.title and (sanitize(self.title) in (
+ sanitize(name) for name in [video.title] + video.alternative_titles)):
+ matches.add('title')
+
+ return matches
+
+
+class WizdomProvider(Provider):
+ """Wizdom Provider."""
+ languages = {Language(l) for l in ['heb']}
+ server_url = 'wizdom.xyz'
+
+ _tmdb_api_key = 'a51ee051bcd762543373903de296e0a3'
+
+ def __init__(self):
+ self.session = None
+
+ def initialize(self):
+ self.session = Session()
+ self.session.headers['User-Agent'] = 'Subliminal/{}'.format(__short_version__)
+
+ def terminate(self):
+ self.session.close()
+
+ @region.cache_on_arguments(expiration_time=SHOW_EXPIRATION_TIME)
+ def _search_imdb_id(self, title, year, is_movie):
+ """Search the IMDB ID for the given `title` and `year`.
+
+ :param str title: title to search for.
+ :param int year: year to search for (or 0 if not relevant).
+ :param bool is_movie: If True, IMDB ID will be searched for in TMDB instead of Wizdom.
+ :return: the IMDB ID for the given title and year (or None if not found).
+ :rtype: str
+
+ """
+ # make the search
+ logger.info('Searching IMDB ID for %r%r', title, '' if not year else ' ({})'.format(year))
+ category = 'movie' if is_movie else 'tv'
+ title = title.replace('\'', '')
+ # get TMDB ID first
+ r = self.session.get('http://api.tmdb.org/3/search/{}?api_key={}&query={}{}&language=en'.format(
+ category, self._tmdb_api_key, title, '' if not year else '&year={}'.format(year)))
+ r.raise_for_status()
+ tmdb_results = r.json().get('results')
+ if tmdb_results:
+ tmdb_id = tmdb_results[0].get('id')
+ if tmdb_id:
+ # get actual IMDB ID from TMDB
+ r = self.session.get('http://api.tmdb.org/3/{}/{}{}?api_key={}&language=en'.format(
+ category, tmdb_id, '' if is_movie else '/external_ids', self._tmdb_api_key))
+ r.raise_for_status()
+ return str(r.json().get('imdb_id', '')) or None
+ return None
+
+ def query(self, title, season=None, episode=None, year=None, filename=None, imdb_id=None):
+ # search for the IMDB ID if needed.
+ is_movie = not (season and episode)
+ imdb_id = imdb_id or self._search_imdb_id(title, year, is_movie)
+ if not imdb_id:
+ return {}
+
+ # search
+ logger.debug('Using IMDB ID %r', imdb_id)
+ url = 'http://json.{}/{}.json'.format(self.server_url, imdb_id)
+ page_link = 'http://{}/#/{}/{}'.format(self.server_url, 'movies' if is_movie else 'series', imdb_id)
+
+ # get the list of subtitles
+ logger.debug('Getting the list of subtitles')
+ r = self.session.get(url)
+ r.raise_for_status()
+ try:
+ results = r.json()
+ except ValueError:
+ return {}
+
+ # filter irrelevant results
+ if not is_movie:
+ results = results.get('subs', [])
+ # there are two formats of result jsons - seasons list and seasons dict
+ if isinstance(results, list):
+ results = results[season] if len(results) >= season else {}
+ else:
+ results = results.get(str(season), {})
+ results = results.get(str(episode), [])
+ else:
+ results = results.get('subs', [])
+
+ # loop over results
+ subtitles = {}
+ for result in results:
+ language = Language('heb')
+ hearing_impaired = False
+ subtitle_id = result['id']
+ release = result['version']
+
+ # otherwise create it
+ subtitle = WizdomSubtitle(language, hearing_impaired, page_link, title, season, episode, title, imdb_id,
+ subtitle_id, release)
+ logger.debug('Found subtitle %r', subtitle)
+ subtitles[subtitle_id] = subtitle
+
+ return subtitles.values()
+
+ def list_subtitles(self, video, languages):
+ season = episode = None
+ year = video.year
+ filename = video.name
+ imdb_id = video.imdb_id
+
+ if isinstance(video, Episode):
+ titles = [video.series] + video.alternative_series
+ season = video.season
+ episode = video.episode
+ imdb_id = video.series_imdb_id
+ else:
+ titles = [video.title] + video.alternative_titles
+
+ for title in titles:
+ subtitles = [s for s in
+ self.query(title, season, episode, year, filename, imdb_id) if s.language in languages]
+ if subtitles:
+ return subtitles
+
+ return []
+
+ def download_subtitle(self, subtitle):
+ # download
+ url = 'http://zip.{}/{}.zip'.format(self.server_url, subtitle.subtitle_id)
+ r = self.session.get(url, headers={'Referer': subtitle.page_link}, timeout=10)
+ r.raise_for_status()
+
+ if len(r.content) == 0:
+ return
+
+ # open the zip
+ with zipfile.ZipFile(io.BytesIO(r.content)) as zf:
+ # remove some filenames from the namelist
+ namelist = [n for n in zf.namelist() if os.path.splitext(n)[1] in ['.srt', '.sub']]
+ if len(namelist) > 1:
+ raise ProviderError('More than one file to unzip')
+
+ subtitle.content = fix_line_ending(zf.read(namelist[0]))
diff --git a/libs/subliminal_patch/providers/yavkanet.py b/libs/subliminal_patch/providers/yavkanet.py
new file mode 100644
index 000000000..d695245ee
--- /dev/null
+++ b/libs/subliminal_patch/providers/yavkanet.py
@@ -0,0 +1,179 @@
+# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+import logging
+import re
+import io
+import os
+from random import randint
+from bs4 import BeautifulSoup
+from zipfile import ZipFile, is_zipfile
+from rarfile import RarFile, is_rarfile
+from requests import Session
+from guessit import guessit
+from subliminal_patch.providers import Provider
+from subliminal_patch.subtitle import Subtitle
+from subliminal_patch.utils import sanitize
+from subliminal.exceptions import ProviderError
+from subliminal.utils import sanitize_release_group
+from subliminal.subtitle import guess_matches
+from subliminal.video import Episode, Movie
+from subliminal.subtitle import fix_line_ending
+from subzero.language import Language
+from .utils import FIRST_THOUSAND_OR_SO_USER_AGENTS as AGENT_LIST
+
+logger = logging.getLogger(__name__)
+
+class YavkaNetSubtitle(Subtitle):
+ """YavkaNet Subtitle."""
+ provider_name = 'yavkanet'
+
+ def __init__(self, langauge, filename, type, video, link):
+ super(YavkaNetSubtitle, self).__init__(langauge)
+ self.langauge = langauge
+ self.filename = filename
+ self.page_link = link
+ self.type = type
+ self.video = video
+ self.release_info = os.path.splitext(filename)[0]
+
+ @property
+ def id(self):
+ return self.filename
+
+ def make_picklable(self):
+ self.content = None
+ return self
+
+ def get_matches(self, video):
+ matches = set()
+
+ video_filename = video.name
+ video_filename = os.path.basename(video_filename)
+ video_filename, _ = os.path.splitext(video_filename)
+ video_filename = sanitize_release_group(video_filename)
+
+ subtitle_filename = self.filename
+ subtitle_filename = os.path.basename(subtitle_filename)
+ subtitle_filename, _ = os.path.splitext(subtitle_filename)
+ subtitle_filename = sanitize_release_group(subtitle_filename)
+
+ if video_filename == subtitle_filename:
+ matches.add('hash')
+
+ matches |= guess_matches(video, guessit(self.filename, {'type': self.type}))
+ return matches
+
+
+class YavkaNetProvider(Provider):
+ """YavkaNet Provider."""
+ languages = {Language(l) for l in [
+ 'bul', 'eng', 'rus', 'spa', 'ita'
+ ]}
+
+ def initialize(self):
+ self.session = Session()
+ self.session.headers['User-Agent'] = AGENT_LIST[randint(0, len(AGENT_LIST) - 1)]
+ self.session.headers["Accept"] = "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"
+ self.session.headers["Accept-Language"] = "en-US,en;q=0.5"
+ self.session.headers["Accept-Encoding"] = "gzip, deflate, br"
+ self.session.headers["DNT"] = "1"
+ self.session.headers["Connection"] = "keep-alive"
+ self.session.headers["Upgrade-Insecure-Requests"] = "1"
+ self.session.headers["Cache-Control"] = "max-age=0"
+
+ def terminate(self):
+ self.session.close()
+
+ def query(self, language, video):
+ subtitles = []
+ isEpisode = isinstance(video, Episode)
+ params = {
+ 's': '',
+ 'y': '',
+ 'u': '',
+ 'l': 'BG',
+ 'i': ''
+ }
+
+ if isEpisode:
+ params['s'] = "%s s%02de%02d" % (sanitize(video.series, {'\''}), video.season, video.episode)
+ else:
+ params['y'] = video.year
+ params['s'] = sanitize(video.title, {'\''})
+
+ if language == 'en' or language == 'eng':
+ params['l'] = 'EN'
+ elif language == 'ru' or language == 'rus':
+ params['l'] = 'RU'
+ elif language == 'es' or language == 'spa':
+ params['l'] = 'ES'
+ elif language == 'it' or language == 'ita':
+ params['l'] = 'IT'
+
+ logger.info('Searching subtitle %r', params)
+ response = self.session.get('http://yavka.net/subtitles.php', params=params, allow_redirects=False, timeout=10, headers={
+ 'Referer': 'http://yavka.net/',
+ })
+
+ response.raise_for_status()
+
+ if response.status_code != 200:
+ logger.debug('No subtitles found')
+ return subtitles
+
+ soup = BeautifulSoup(response.content, 'lxml')
+ rows = soup.findAll('tr', {'class': 'info'})
+
+ # Search on first 20 rows only
+ for row in rows[:20]:
+ element = row.find('a', {'class': 'selector'})
+ if element:
+ link = element.get('href')
+ element = row.find('a', {'class': 'click'})
+ uploader = element.get_text() if element else None
+ logger.info('Found subtitle link %r', link)
+ sub = self.download_archive_and_add_subtitle_files('http://yavka.net/' + link, language, video)
+ for s in sub:
+ s.uploader = uploader
+ subtitles = subtitles + sub
+ return subtitles
+
+ def list_subtitles(self, video, languages):
+ return [s for l in languages for s in self.query(l, video)]
+
+ def download_subtitle(self, subtitle):
+ if subtitle.content:
+ pass
+ else:
+ seeking_subtitle_file = subtitle.filename
+ arch = self.download_archive_and_add_subtitle_files(subtitle.page_link, subtitle.language, subtitle.video)
+ for s in arch:
+ if s.filename == seeking_subtitle_file:
+ subtitle.content = s.content
+
+ def process_archive_subtitle_files(self, archiveStream, language, video, link):
+ subtitles = []
+ type = 'episode' if isinstance(video, Episode) else 'movie'
+ for file_name in archiveStream.namelist():
+ if file_name.lower().endswith(('.srt', '.sub')):
+ logger.info('Found subtitle file %r', file_name)
+ subtitle = YavkaNetSubtitle(language, file_name, type, video, link)
+ subtitle.content = archiveStream.read(file_name)
+ subtitles.append(subtitle)
+ return subtitles
+
+ def download_archive_and_add_subtitle_files(self, link, language, video ):
+ logger.info('Downloading subtitle %r', link)
+ request = self.session.get(link, headers={
+ 'Referer': 'http://yavka.net/subtitles.php'
+ })
+ request.raise_for_status()
+
+ archive_stream = io.BytesIO(request.content)
+ if is_rarfile(archive_stream):
+ return self.process_archive_subtitle_files( RarFile(archive_stream), language, video, link )
+ elif is_zipfile(archive_stream):
+ return self.process_archive_subtitle_files( ZipFile(archive_stream), language, video, link )
+ else:
+ raise ValueError('Not a valid archive')
+
diff --git a/libs/subliminal_patch/providers/zimuku.py b/libs/subliminal_patch/providers/zimuku.py
index 0dad58cde..5e3b917e3 100644
--- a/libs/subliminal_patch/providers/zimuku.py
+++ b/libs/subliminal_patch/providers/zimuku.py
@@ -4,6 +4,13 @@ import io
import logging
import os
import zipfile
+import re
+import copy
+
+try:
+ from urlparse import urljoin
+except ImportError:
+ from urllib.parse import urljoin
import rarfile
from subzero.language import Language
@@ -13,7 +20,12 @@ from six import text_type
from subliminal import __short_version__
from subliminal.providers import ParserBeautifulSoup, Provider
-from subliminal.subtitle import SUBTITLE_EXTENSIONS, Subtitle, fix_line_ending, guess_matches
+from subliminal.subtitle import (
+ SUBTITLE_EXTENSIONS,
+ Subtitle,
+ fix_line_ending,
+ guess_matches,
+)
from subliminal.video import Episode, Movie
logger = logging.getLogger(__name__)
@@ -21,43 +33,50 @@ logger = logging.getLogger(__name__)
class ZimukuSubtitle(Subtitle):
"""Zimuku Subtitle."""
- provider_name = 'zimuku'
- def __init__(self, language, page_link, version, download_link):
+ provider_name = "zimuku"
+
+ def __init__(self, language, page_link, version, session):
super(ZimukuSubtitle, self).__init__(language, page_link=page_link)
self.version = version
- self.download_link = download_link
- self.hearing_impaired = None
- self.encoding = 'utf-8'
+ self.hearing_impaired = False
+ self.encoding = "utf-8"
+ self.session = session
@property
def id(self):
- return self.download_link
+ return self.version
def get_matches(self, video):
matches = set()
# episode
if isinstance(video, Episode):
+ # always make year a match
+ info = guessit(self.version, {"type": "episode"})
+ info["year"] = video.year
# other properties
- matches |= guess_matches(video, guessit(self.version, {'type': 'episode'}), partial=True)
+ matches |= guess_matches(video, info, partial=True)
# movie
elif isinstance(video, Movie):
# other properties
- matches |= guess_matches(video, guessit(self.version, {'type': 'movie'}), partial=True)
+ matches |= guess_matches(
+ video, guessit(self.version, {"type": "movie"}), partial=True
+ )
return matches
class ZimukuProvider(Provider):
"""Zimuku Provider."""
- languages = {Language(l) for l in ['zho', 'eng']}
- server_url = 'http://www.zimuku.la'
- search_url = '/search?q={}'
- download_url = 'http://www.zimuku.la/'
-
- UserAgent = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)'
+ languages = {Language(l) for l in ["zho", "eng"]}
+
+ server_url = "http://www.zimuku.la"
+ search_url = "/search?q={}"
+ download_url = "http://www.zimuku.la/"
+
+ UserAgent = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/6.0)"
subtitle_class = ZimukuSubtitle
@@ -66,19 +85,52 @@ class ZimukuProvider(Provider):
def initialize(self):
self.session = Session()
- self.session.headers['User-Agent'] = 'Subliminal/{}'.format(__short_version__)
+ self.session.headers["User-Agent"] = "Subliminal/{}".format(__short_version__)
def terminate(self):
self.session.close()
+ def _parse_episode_page(self, link):
+ r = self.session.get(link)
+ bs_obj = ParserBeautifulSoup(
+ r.content.decode("utf-8", "ignore"), ["html.parser"]
+ )
+ subs_body = bs_obj.find("div", class_="subs box clearfix").find("tbody")
+ subs = []
+ for sub in subs_body.find_all("tr"):
+ a = sub.find("a")
+ name = _extract_name(a.text)
+ name = os.path.splitext(name)[
+ 0
+ ] # remove ext because it can be an archive type
+
+ language = Language("eng")
+ for img in sub.find("td", class_="tac lang").find_all("img"):
+ if (
+ "hongkong" in img.attrs["src"]
+ or "china" in img.attrs["src"]
+ or "jollyroger" in img.attrs["src"]
+ ):
+ language = Language("zho")
+ break
+ sub_page_link = urljoin(self.server_url, a.attrs["href"])
+ backup_session = copy.deepcopy(self.session)
+ backup_session.headers["Referer"] = link
+
+ subs.append(
+ self.subtitle_class(language, sub_page_link, name, backup_session)
+ )
+
+ return subs
+
def query(self, keyword, season=None, episode=None, year=None):
params = keyword
- if season and episode:
- params += ' S{season:02d}E{episode:02d}'.format(season=season, episode=episode)
+ if season:
+ params += ".S{season:02d}".format(season=season)
elif year:
- params += ' {:4d}'.format(year)
+ params += " {:4d}".format(year)
- logger.debug('Searching subtitles %r', params)
+ logger.debug("Searching subtitles %r", params)
subtitles = []
search_link = self.server_url + text_type(self.search_url).format(params)
@@ -86,45 +138,33 @@ class ZimukuProvider(Provider):
r.raise_for_status()
if not r.content:
- logger.debug('No data returned from provider')
+ logger.debug("No data returned from provider")
return []
- soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
-
- for entity in soup.select('div.item.prel.clearfix a:nth-of-type(2)'):
- moviename = entity.text
- entity_url = self.server_url + entity['href']
- logger.debug(entity_url)
- r = self.session.get(entity_url, timeout=30)
- r.raise_for_status()
- logger.debug('looking into ' + entity_url)
-
- soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser']).find("div", class_="subs box clearfix")
- # loop over subtitles cells
-
- subs = soup.tbody.find_all("tr")
- for sub in subs:
- page_link = '%s%s' % (self.server_url, sub.a.get('href').encode('utf-8'))
- version = sub.a.text.encode('utf-8') or None
- if version is None:
- version = ""
- try:
- td = sub.find("td", class_="tac lang")
- r2 = td.find_all("img")
- langs = [x.get('title').encode('utf-8') for x in r2]
- except:
- langs = '未知'
- name = '%s (%s)' % (version, ",".join(langs))
-
- if ('English' in langs) and not(('简体中文' in langs) or ('繁體中文' in langs)):
- language = Language('eng')
- else:
- language = Language('zho')
- # read the item
- subtitle = self.subtitle_class(language, page_link, version, page_link.replace("detail","dld"))
-
- logger.debug('Found subtitle %r', subtitle)
- subtitles.append(subtitle)
+ soup = ParserBeautifulSoup(
+ r.content.decode("utf-8", "ignore"), ["lxml", "html.parser"]
+ )
+
+ # non-shooter result page
+ if soup.find("div", {"class": "item"}):
+ logger.debug("enter a non-shooter page")
+ for item in soup.find_all("div", {"class": "item"}):
+ title_a = item.find("p", class_="tt clearfix").find("a")
+ if season:
+ title = title_a.text
+ season_cn1 = re.search("第(.*)季", title)
+ if not season_cn1:
+ season_cn1 = "一"
+ else:
+ season_cn1 = season_cn1.group(1).strip()
+ season_cn2 = num_to_cn(str(season))
+ if season_cn1 != season_cn2:
+ continue
+ episode_link = self.server_url + title_a.attrs["href"]
+ new_subs = self._parse_episode_page(episode_link)
+ subtitles += new_subs
+
+ # NOTE: shooter result pages are ignored due to the existence of assrt provider
return subtitles
@@ -140,70 +180,174 @@ class ZimukuProvider(Provider):
# query for subtitles with the show_id
for title in titles:
if isinstance(video, Episode):
- subtitles += [s for s in self.query(title, season=video.season, episode=video.episode,
- year=video.year)
- if s.language in languages]
+ subtitles += [
+ s
+ for s in self.query(
+ title,
+ season=video.season,
+ episode=video.episode,
+ year=video.year,
+ )
+ if s.language in languages
+ ]
elif isinstance(video, Movie):
- subtitles += [s for s in self.query(title, year=video.year)
- if s.language in languages]
+ subtitles += [
+ s
+ for s in self.query(title, year=video.year)
+ if s.language in languages
+ ]
return subtitles
def download_subtitle(self, subtitle):
- if isinstance(subtitle, ZimukuSubtitle):
- # download the subtitle
- logger.info('Downloading subtitle %r', subtitle)
- r = self.session.get(subtitle.download_link, headers={'Referer': subtitle.page_link},
- timeout=30)
- r.raise_for_status()
-
- if not r.content:
- logger.debug('Unable to download subtitle. No data returned from provider')
- return
-
- soup = ParserBeautifulSoup(r.content.decode('utf-8', 'ignore'), ['lxml', 'html.parser'])
- links = soup.find("div", {"class":"clearfix"}).find_all('a')
- # TODO: add settings for choice
-
- for down_link in links:
- url = down_link.get('href').encode('utf-8')
- url = self.server_url + url
- r = self.session.get(url, headers={'Referer': subtitle.download_link},
- timeout=30)
- r.raise_for_status()
+ def _get_archive_dowload_link(session, sub_page_link):
+ r = session.get(sub_page_link)
+ bs_obj = ParserBeautifulSoup(
+ r.content.decode("utf-8", "ignore"), ["html.parser"]
+ )
+ down_page_link = bs_obj.find("a", {"id": "down1"}).attrs["href"]
+ down_page_link = urljoin(sub_page_link, down_page_link)
+ r = session.get(down_page_link)
+ bs_obj = ParserBeautifulSoup(
+ r.content.decode("utf-8", "ignore"), ["html.parser"]
+ )
+ download_link = bs_obj.find("a", {"rel": "nofollow"})
+ download_link = download_link.attrs["href"]
+ download_link = urljoin(sub_page_link, download_link)
+ return download_link
+
+ # download the subtitle
+ logger.info("Downloading subtitle %r", subtitle)
+ self.session = subtitle.session
+ download_link = _get_archive_dowload_link(self.session, subtitle.page_link)
+ r = self.session.get(download_link, timeout=30)
+ r.raise_for_status()
+ filename = r.headers["Content-Disposition"]
- if len(r.content) > 1024:
+ if not r.content:
+ logger.debug("Unable to download subtitle. No data returned from provider")
+ return
+
+ archive_stream = io.BytesIO(r.content)
+ archive = None
+ if rarfile.is_rarfile(archive_stream):
+ logger.debug("Identified rar archive")
+ if ".rar" not in filename:
+ logger.debug(
+ ".rar should be in the downloaded file name: {}".format(filename)
+ )
+ return
+ archive = rarfile.RarFile(archive_stream)
+ subtitle_content = _get_subtitle_from_archive(archive)
+ elif zipfile.is_zipfile(archive_stream):
+ logger.debug("Identified zip archive")
+ if ".zip" not in filename:
+ logger.debug(
+ ".zip should be in the downloaded file name: {}".format(filename)
+ )
+ return
+ archive = zipfile.ZipFile(archive_stream)
+ subtitle_content = _get_subtitle_from_archive(archive)
+ else:
+ is_sub = ""
+ for sub_ext in SUBTITLE_EXTENSIONS:
+ if sub_ext in filename:
+ is_sub = sub_ext
break
+ if not is_sub:
+ logger.debug(
+ "unknown subtitle ext int downloaded file name: {}".format(filename)
+ )
+ return
+ logger.debug("Identified {} file".format(is_sub))
+ subtitle_content = r.content
- archive_stream = io.BytesIO(r.content)
- archive = None
- if rarfile.is_rarfile(archive_stream):
- logger.debug('Identified rar archive')
- archive = rarfile.RarFile(archive_stream)
- subtitle_content = _get_subtitle_from_archive(archive)
- elif zipfile.is_zipfile(archive_stream):
- logger.debug('Identified zip archive')
- archive = zipfile.ZipFile(archive_stream)
- subtitle_content = _get_subtitle_from_archive(archive)
- else:
- subtitle_content = r.content
-
- if subtitle_content:
- subtitle.content = fix_line_ending(subtitle_content)
- else:
- logger.debug('Could not extract subtitle from %r', archive)
+ if subtitle_content:
+ subtitle.content = fix_line_ending(subtitle_content)
+ else:
+ logger.debug("Could not extract subtitle from %r", archive)
def _get_subtitle_from_archive(archive):
- for name in archive.namelist():
+ extract_subname, max_score = "", -1
+
+ for subname in archive.namelist():
# discard hidden files
- if os.path.split(name)[-1].startswith('.'):
+ if os.path.split(subname)[-1].startswith("."):
continue
# discard non-subtitle files
- if not name.lower().endswith(SUBTITLE_EXTENSIONS):
+ if not subname.lower().endswith(SUBTITLE_EXTENSIONS):
continue
- return archive.read(name)
-
- return None
+ # prefer ass/ssa subtitles with double languages or simplified chinese
+ score = ("ass" in subname or "ssa" in subname) * 1
+ if "简体" in subname or "chs" in subname or ".gb." in subname:
+ score += 2
+ if "繁体" in subname or "cht" in subname or ".big5." in subname:
+ pass
+ if "chs.eng" in subname or "chs&eng" in subname:
+ score += 2
+ if "中英" in subname or "简英" in subname or "双语" in subname or "简体&英文" in subname:
+ score += 4
+ logger.debug("subtitle {}, score: {}".format(subname, score))
+ if score > max_score:
+ max_score = score
+ extract_subname = subname
+
+ return archive.read(extract_subname) if max_score != -1 else None
+
+
+def _extract_name(name):
+ """ filter out Chinese characters from subtitle names """
+ name, suffix = os.path.splitext(name)
+ c_pattern = "[\u4e00-\u9fff]"
+ e_pattern = "[a-zA-Z]"
+ c_indices = [m.start(0) for m in re.finditer(c_pattern, name)]
+ e_indices = [m.start(0) for m in re.finditer(e_pattern, name)]
+
+ target, discard = e_indices, c_indices
+
+ if len(target) == 0:
+ return ""
+
+ first_target, last_target = target[0], target[-1]
+ first_discard = discard[0] if discard else -1
+ last_discard = discard[-1] if discard else -1
+ if last_discard < first_target:
+ new_name = name[first_target:]
+ elif last_target < first_discard:
+ new_name = name[:first_discard]
+ else:
+ # try to find maximum continous part
+ result, start, end = [0, 1], -1, 0
+ while end < len(name):
+ while end not in e_indices and end < len(name):
+ end += 1
+ if end == len(name):
+ break
+ start = end
+ while end not in c_indices and end < len(name):
+ end += 1
+ if end - start > result[1] - result[0]:
+ result = [start, end]
+ print(result)
+ start = end
+ end += 1
+ new_name = name[result[0] : result[1]]
+ new_name = new_name.strip() + suffix
+ return new_name
+
+
+def num_to_cn(number):
+ """ convert numbers(1-99) to Chinese """
+ assert number.isdigit() and 1 <= int(number) <= 99
+
+ trans_map = {n: c for n, c in zip(("123456789"), ("一二三四五六七八九"))}
+
+ if len(number) == 1:
+ return trans_map[number]
+ else:
+ part1 = "十" if number[0] == "1" else trans_map[number[0]] + "十"
+ part2 = trans_map[number[1]] if number[1] != "0" else ""
+ return part1 + part2
diff --git a/libs/subliminal_patch/subtitle.py b/libs/subliminal_patch/subtitle.py
index 43e9a9716..ce89e74d3 100644
--- a/libs/subliminal_patch/subtitle.py
+++ b/libs/subliminal_patch/subtitle.py
@@ -54,6 +54,7 @@ class Subtitle(Subtitle_):
is_pack = False
asked_for_release_group = None
asked_for_episode = None
+ uploader = None # string - uploader username
pack_data = None
_guessed_encoding = None
@@ -279,6 +280,12 @@ class Subtitle(Subtitle_):
@classmethod
def pysubs2_to_unicode(cls, sub, format="srt"):
+ """
+ this is a modified version of pysubs2.SubripFormat.to_file with special handling for drawing tags in ASS
+ :param sub:
+ :param format:
+ :return:
+ """
def ms_to_timestamp(ms, mssep=","):
"""Convert ms to 'HH:MM:SS,mmm'"""
# XXX throw on overflow/underflow?
@@ -293,6 +300,9 @@ class Subtitle(Subtitle_):
fragment = fragment.replace(r"\h", u" ")
fragment = fragment.replace(r"\n", u"\n")
fragment = fragment.replace(r"\N", u"\n")
+ if sty.drawing:
+ raise pysubs2.ContentNotUsable
+
if format == "srt":
if sty.italic:
fragment = u"<i>%s</i>" % fragment
@@ -324,7 +334,10 @@ class Subtitle(Subtitle_):
for i, line in enumerate(visible_lines, 1):
start = ms_to_timestamp(line.start, mssep=mssep)
end = ms_to_timestamp(line.end, mssep=mssep)
- text = prepare_text(line.text, sub.styles.get(line.style, SSAStyle.DEFAULT_STYLE))
+ try:
+ text = prepare_text(line.text, sub.styles.get(line.style, SSAStyle.DEFAULT_STYLE))
+ except pysubs2.ContentNotUsable:
+ continue
out.append(u"%d\n" % i)
out.append(u"%s --> %s\n" % (start, end))
diff --git a/libs/version.txt b/libs/version.txt
index d4c869b982..5e35b7131 100644
--- a/libs/version.txt
+++ b/libs/version.txt
@@ -12,11 +12,11 @@ gevent-websocker=0.10.1
gitpython=2.1.9
guessit=2.1.4
guess_language-spirit=0.5.3
+knowit=0.3.0-dev
peewee=3.9.6
py-pretty=1
pycountry=18.2.23
pyga=2.6.1
-pyprobe=0.1.2 <-- modified version: do not update!!!
pysrt=1.1.1
pytz=2018.4
rarfile=3.0
diff --git a/libs/osdefs.h b/libs2/osdefs.h
index d678ca3b4..d678ca3b4 100644
--- a/libs/osdefs.h
+++ b/libs2/osdefs.h
diff --git a/libs/winreparse.h b/libs2/winreparse.h
index 66f7775dd..66f7775dd 100644
--- a/libs/winreparse.h
+++ b/libs2/winreparse.h
diff --git a/views/episodes.tpl b/views/episodes.tpl
index 7a0c829d8..1fb735894 100644
--- a/views/episodes.tpl
+++ b/views/episodes.tpl
@@ -416,11 +416,11 @@
<thead>
<tr>
<th style="text-align: left;">Score:</th>
- <th style="text-align: left;">Lang.:</th>
- <th style="text-align: left;">HI:</th>
+ <th style="text-align: left;"></th>
<th style="text-align: left;">Provider:</th>
<th style="text-align: left;">Matching:</th>
<th style="text-align: left;">Releases:</th>
+ <th style="text-align: left;">Uploader:</th>
<th></th>
</tr>
</thead>
@@ -694,14 +694,19 @@
},
{ data: null,
render: function ( data, type, row ) {
+ let lng = data.language;
if ( data.language === "pt" && is_pb === true && is_pt === false) {
- return 'pb'
- } else {
- return data.language
+ lng = 'pb'
}
+
+ let text = '<div class="ui tiny label" style="margin-bottom: 2px;">' + lng.toUpperCase() + '</div>';
+ if (data.hearing_impaired == "True") {
+ text += '<div class="ui tiny inverted label" style="background-color: #313335;">HI</div>';
+ }
+
+ return text;
}
},
- { data: 'hearing_impaired' },
{ data: null,
render: function ( data, type, row ) {
return '<a href="'+data.url+'" target="_blank">'+data.provider+'</a>';
@@ -734,6 +739,7 @@
const array_release_info = data.release_info;
let i;
let text = '<div class="ui fluid accordion"><div class="title"><i class="dropdown icon"></i>...</div><div class="content season">';
+ if (array_release_info.length <= 1) text = '<div><div class="content season">';
for (i = 0; i < array_release_info.length; i++) {
text += '<div class="ui tiny label" style="margin-bottom: 2px;">' + array_release_info[i] + '</div>';
}
@@ -741,6 +747,11 @@
return text;
}
},
+ { data: 'uploader',
+ render: function ( data, type, row ) {
+ return '<div class="ui tiny label">' + data + '</div>';
+ }
+ },
{ data: null,
render: function ( data, type, row ) {
return '<a href="#" class="ui tiny label" onclick="manual_get(this, episodePath, sceneName, hi, sonarrSeriesId, sonarrEpisodeId)" data-subtitle="'+data.subtitle+'" data-provider="'+data.provider+'" data-language="'+data.language+'"><i class="ui download icon" style="margin-right:0px" ></i></a>';
diff --git a/views/menu.tpl b/views/menu.tpl
index ed296bd53..db33bde24 100644
--- a/views/menu.tpl
+++ b/views/menu.tpl
@@ -134,7 +134,7 @@
</i>
System
</a>
- <a id="donate" class="item" href="https://beerpay.io/morpheus65535/bazarr">
+ <a id="donate" class="item" href="https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=XHHRWXT9YB7WE&source=url">
<i class="red heart icon"></i>
Donate
</a>
@@ -203,7 +203,7 @@
</i>
System
</a>
- <a id="donate" class="item" href="https://beerpay.io/morpheus65535/bazarr">
+ <a id="donate" class="item" href="https://www.paypal.com/cgi-bin/webscr?cmd=_s-xclick&hosted_button_id=XHHRWXT9YB7WE&source=url">
<i class="red heart icon"></i>
Donate
</a>
@@ -237,7 +237,7 @@
<i class="python icon"></i>
<div class="content">
<div class="header">Python deprecation warning</div>
- <p><b><u>Bazarr won't update anymore until you upgrade Python!</u></b></p>Bazarr is now compatible with Python 3.6 and newer. You must upgrade Python as we don't support Python 2.x anymore.
+ <p><b><u>Bazarr won't update anymore until you upgrade Python!</u></b></p>Bazarr is now compatible with Python 3.7 and newer. You must upgrade Python as we don't support Python 2.x anymore.
<div class="ui bulleted list">
% if os.name == 'posix':
<div class="item">If you are running under Docker, don't worry, we'll take care of this for you. Just pull the new image.</div>
diff --git a/views/movie.tpl b/views/movie.tpl
index 7b917b7ac..0ce04eb6a 100644
--- a/views/movie.tpl
+++ b/views/movie.tpl
@@ -372,12 +372,12 @@
<thead>
<tr>
<th style="text-align: left;">Score:</th>
- <th style="text-align: left;">Lang.:</th>
- <th style="text-align: left;">HI:</th>
+ <th style="text-align: left;"></th>
<th style="text-align: left;">Provider:</th>
<th style="text-align: left;">Matching:</th>
<th style="text-align: left;">Releases:</th>
- <th></th>details
+ <th style="text-align: left;">Uploader:</th>
+ <th></th>
</tr>
</thead>
</table>
@@ -632,16 +632,21 @@
},
{ data: null,
render: function ( data, type, row ) {
+ let lng = data.language;
if ( data.language === "pt" && is_pb === true && is_pt === false) {
- return 'pb'
+ lng = 'pb'
} else if ( data.language === "pt:forced" && is_pb === true && is_pt === false) {
- return 'pb:forced'
- } else {
- return data.language
+ lng = 'pb:forced'
+ }
+
+ let text = '<div class="ui tiny label" style="margin-bottom: 2px;">' + lng.toUpperCase() + '</div>';
+ if (data.hearing_impaired == "True") {
+ text += '<div class="ui tiny inverted label" style="background-color: #313335;">HI</div>';
}
+
+ return text;
}
},
- { data: 'hearing_impaired' },
{ data: null,
render: function ( data, type, row ) {
return '<a href="'+data.url+'" target="_blank">'+data.provider+'</a>';
@@ -674,6 +679,7 @@
const array_release_info = data.release_info;
let i;
let text = '<div class="ui fluid accordion"><div class="title"><i class="dropdown icon"></i>...</div><div class="content">';
+ if (array_release_info.length <= 1) text = '<div><div class="content">';
for (i = 0; i < array_release_info.length; i++) {
text += '<div class="ui tiny label" style="margin-bottom: 2px;">' + array_release_info[i] + '</div>';
}
@@ -681,6 +687,11 @@
return text;
}
},
+ { data: 'uploader',
+ render: function ( data, type, row ) {
+ return '<div class="ui tiny label">' + data + '</div>';
+ }
+ },
{ data: null,
render: function ( data, type, row ) {
return '<a href="#" class="ui tiny label" onclick="manual_get(this, moviePath, sceneName, hi, radarrId)" data-subtitle="'+data.subtitle+'" data-provider="'+data.provider+'" data-language="'+data.language+'"><i class="ui download icon" style="margin-right:0px" ></i></a>';
diff --git a/views/providers.tpl b/views/providers.tpl
index 510ee7857..0bf3f019e 100644
--- a/views/providers.tpl
+++ b/views/providers.tpl
@@ -483,6 +483,27 @@
<div class="middle aligned row">
<div class="right aligned four wide column">
+ <label>RegieLive</label>
+ </div>
+ <div class="one wide column">
+ <div id="regielive" class="ui toggle checkbox provider">
+ <input type="checkbox">
+ <label></label>
+ </div>
+ </div>
+ <div class="collapsed column">
+ <div class="collapsed center aligned column">
+ <div class="ui basic icon" data-tooltip="Romanian Subtitles Provider." data-inverted="">
+ <i class="help circle large icon"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div id="regielive_option" class="ui grid container">
+
+ </div>
+ <div class="middle aligned row">
+ <div class="right aligned four wide column">
<label>Subdivx</label>
</div>
<div class="one wide column">
@@ -665,28 +686,6 @@
<div class="middle aligned row">
<div class="right aligned four wide column">
- <label>SubZ</label>
- </div>
- <div class="one wide column">
- <div id="subz" class="ui toggle checkbox provider">
- <input type="checkbox">
- <label></label>
- </div>
- </div>
- <div class="collapsed column">
- <div class="collapsed center aligned column">
- <div class="ui basic icon" data-tooltip="Greek Subtitles Provider." data-inverted="">
- <i class="help circle large icon"></i>
- </div>
- </div>
- </div>
- </div>
- <div id="subz_option" class="ui grid container">
-
- </div>
-
- <div class="middle aligned row">
- <div class="right aligned four wide column">
<label>Subtitulamos.tv</label>
</div>
<div class="one wide column">
@@ -773,6 +772,28 @@
<div class="middle aligned row">
<div class="right aligned four wide column">
+ <label>Wizdom</label>
+ </div>
+ <div class="one wide column">
+ <div id="wizdom" class="ui toggle checkbox provider">
+ <input type="checkbox">
+ <label></label>
+ </div>
+ </div>
+ <div class="collapsed column">
+ <div class="collapsed center aligned column">
+ <div class="ui basic icon" data-tooltip="Wizdom.xyz Subtitles Provider." data-inverted="">
+ <i class="help circle large icon"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div id="wizdom_option" class="ui grid container">
+
+ </div>
+
+ <div class="middle aligned row">
+ <div class="right aligned four wide column">
<label>XSubs</label>
</div>
<div class="one wide column">
@@ -814,6 +835,28 @@
<div class="middle aligned row">
<div class="right aligned four wide column">
+ <label>Yavka.net</label>
+ </div>
+ <div class="one wide column">
+ <div id="yavkanet" class="ui toggle checkbox provider">
+ <input type="checkbox">
+ <label></label>
+ </div>
+ </div>
+ <div class="collapsed column">
+ <div class="collapsed center aligned column">
+ <div class="ui basic icon" data-tooltip="Bulgarian (mostly) Subtitles Provider." data-inverted="">
+ <i class="help circle large icon"></i>
+ </div>
+ </div>
+ </div>
+ </div>
+ <div id="yavkanet_option" class="ui grid container">
+
+ </div>
+
+ <div class="middle aligned row">
+ <div class="right aligned four wide column">
<label>Zimuku</label>
</div>
<div class="one wide column">