Major refactor of codebase.

This commit is contained in:
2020-04-11 00:30:24 +03:00
parent fd5d05232f
commit 1022ce353c
33 changed files with 1408 additions and 963 deletions

View File

@ -0,0 +1,255 @@
import logging
import mimetypes
import os
import re
from string import Template
from threading import Lock
from urllib.parse import urljoin
import PIL.Image
import PIL.ImageOps
import requests
import youtube_dl
from django.conf import settings as srv_settings
from YtManagerApp.models import Subscription, Video
from YtManagerApp.models import VIDEO_ORDER_MAPPING
from YtManagerApp.providers.video_provider import VideoProvider
from YtManagerApp.scheduler.job import Job
from YtManagerApp.utils import first_non_null
log = logging.getLogger('DownloadManager')
class DownloadVideoJob(Job):
"""
Downloads a video to the disk
"""
name = "DownloadVideoJob"
__lock = Lock()
def __init__(self, job_execution, video: Video, attempt: int = 1):
super().__init__(job_execution)
self._video = video
self._attempt = attempt
self._log_youtube_dl = self.log.getChild('youtube_dl')
def get_description(self):
ret = "Downloading video " + self._video.name
if self._attempt > 1:
ret += f" (attempt {self._attempt})"
return ret
def run(self):
from YtManagerApp.services import Services
# Issue: if multiple videos are downloaded at the same time, a race condition appears in the mkdirs() call that
# youtube-dl makes, which causes it to fail with the error 'Cannot create folder - file already exists'.
# For now, allow a single download instance.
self.__lock.acquire()
try:
user = self._video.subscription.user
provider: VideoProvider = Services.videoProviderManager().get(self._video)
max_attempts = user.preferences['max_download_attempts']
youtube_dl_params, output_path = self.__build_youtube_dl_params(self._video)
with youtube_dl.YoutubeDL(youtube_dl_params) as yt:
ret = yt.download([provider.get_video_url(self._video)])
self.log.info('Download finished with code %d', ret)
if ret == 0:
self._video.downloaded_path = output_path
self._video.save()
self.log.info('Video %d [%s %s] downloaded successfully!', self._video.id, self._video.video_id,
self._video.name)
# update size
self._video.downloaded_size = 0
for file in self._video.get_files():
self._video.downloaded_size += os.stat(file).st_size
self._video.save()
elif self._attempt <= max_attempts:
self.log.warning('Re-enqueueing video (attempt %d/%d)', self._attempt, max_attempts)
Services.videoManager().download(self._video, self._attempt + 1)
else:
self.log.error('Multiple attempts to download video %d [%s %s] failed!', self._video.id,
self._video.video_id, self._video.name)
self._video.downloaded_path = ''
self._video.save()
finally:
self.__lock.release()
def __build_youtube_dl_params(self, video: Video):
sub = video.subscription
user = sub.user
# resolve path
download_path = user.preferences['download_path']
template_dict = self.__build_template_dict(video)
output_pattern = Template(user.preferences['download_file_pattern']).safe_substitute(template_dict)
output_path = os.path.join(download_path, output_pattern)
output_path = os.path.normpath(output_path)
youtube_dl_params = {
'logger': self._log_youtube_dl,
'format': user.preferences['download_format'],
'outtmpl': output_path,
'writethumbnail': True,
'writedescription': True,
'writesubtitles': user.preferences['download_subtitles'],
'writeautomaticsub': user.preferences['download_autogenerated_subtitles'],
'allsubtitles': user.preferences['download_subtitles_all'],
'merge_output_format': 'mp4',
'postprocessors': [
{
'key': 'FFmpegMetadata'
},
]
}
sub_langs = user.preferences['download_subtitles_langs'].split(',')
sub_langs = [i.strip() for i in sub_langs]
if len(sub_langs) > 0:
youtube_dl_params['subtitleslangs'] = sub_langs
sub_format = user.preferences['download_subtitles_format']
if len(sub_format) > 0:
youtube_dl_params['subtitlesformat'] = sub_format
return youtube_dl_params, output_path
def __build_template_dict(self, video: Video):
return {
'channel': video.subscription.channel_name,
'channel_id': video.subscription.channel_id,
'playlist': video.subscription.name,
'playlist_id': video.subscription.playlist_id,
'playlist_index': "{:03d}".format(1 + video.playlist_index),
'title': video.name,
'id': video.video_id,
}
def __get_valid_path(self, path: str):
"""
Normalizes string, converts to lowercase, removes non-alpha characters, removes forbidden characters.
"""
import unicodedata
value = unicodedata.normalize('NFKD', path).encode('ascii', 'ignore').decode('ascii')
value = re.sub('[:"*]', '', value).strip()
value = re.sub('[?<>|]', '#', value)
return value
class DownloadManager(object):
def __init__(self):
pass
def download_video(self, video: Video, attempt: int = 1):
from YtManagerApp.services import Services
Services.scheduler().add_job(DownloadVideoJob, args=[video, attempt])
def __get_subscription_config(self, sub: Subscription):
user = sub.user
enabled = first_non_null(sub.auto_download, user.preferences['auto_download'])
global_limit = user.preferences['download_global_limit']
limit = first_non_null(sub.download_limit, user.preferences['download_subscription_limit'])
order = first_non_null(sub.download_order, user.preferences['download_order'])
order = VIDEO_ORDER_MAPPING[order]
return enabled, global_limit, limit, order
def process_subscription(self, sub: Subscription):
from YtManagerApp.services import Services
log.info('Processing subscription %d [%s %s]', sub.id, sub.playlist_id, sub.id)
enabled, global_limit, limit, order = self.__get_subscription_config(sub)
log.info('Determined settings enabled=%s global_limit=%d limit=%d order="%s"', enabled, global_limit, limit, order)
if enabled:
videos_to_download = Video.objects\
.filter(subscription=sub, downloaded_path__isnull=True, watched=False)\
.order_by(order)
log.info('%d download candidates.', len(videos_to_download))
if global_limit > 0:
global_downloaded = Video.objects.filter(subscription__user=sub.user, downloaded_path__isnull=False).count()
allowed_count = max(global_limit - global_downloaded, 0)
videos_to_download = videos_to_download[0:allowed_count]
log.info('Global limit is set, can only download up to %d videos.', allowed_count)
if limit > 0:
sub_downloaded = Video.objects.filter(subscription=sub, downloaded_path__isnull=False).count()
allowed_count = max(limit - sub_downloaded, 0)
videos_to_download = videos_to_download[0:allowed_count]
log.info('Limit is set, can only download up to %d videos.', allowed_count)
# enqueue download
for video in videos_to_download:
log.info('Enqueuing video %d [%s %s] index=%d', video.id, video.video_id, video.name, video.playlist_index)
Services.videoManager().download(video)
log.info('Finished processing subscription %d [%s %s]', sub.id, sub.playlist_id, sub.id)
def process_all_subscriptions(self):
for subscription in Subscription.objects.all():
self.process_subscription(subscription)
def fetch_thumbnail(self, url, object_type, identifier, thumb_size):
log.info('Fetching thumbnail url=%s object_type=%s identifier=%s', url, object_type, identifier)
# Make request to obtain mime type
try:
response = requests.get(url, stream=True)
except requests.exceptions.RequestException as e:
log.error('Failed to fetch thumbnail %s. Error: %s', url, e)
return url
ext = mimetypes.guess_extension(response.headers['Content-Type'])
# Build file path
file_name = f"{identifier}{ext}"
abs_path_dir = os.path.join(srv_settings.MEDIA_ROOT, "thumbs", object_type)
abs_path = os.path.join(abs_path_dir, file_name)
abs_path_tmp = file_name + '.tmp'
# Store image
try:
os.makedirs(abs_path_dir, exist_ok=True)
with open(abs_path_tmp, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
# Resize and crop to thumbnail size
image = PIL.Image.open(abs_path_tmp)
image = PIL.ImageOps.fit(image, thumb_size)
image.save(abs_path)
image.close()
# Delete temp file
os.unlink(abs_path_tmp)
except requests.exceptions.RequestException as e:
log.error('Error while downloading stream for thumbnail %s. Error: %s', url, e)
return url
except OSError as e:
log.error('Error while writing to file %s for thumbnail %s. Error: %s', abs_path, url, e)
return url
# Return
media_url = urljoin(srv_settings.MEDIA_URL, f"thumbs/{object_type}/{file_name}")
return media_url

View File

@ -1,111 +0,0 @@
from YtManagerApp.scheduler.jobs.download_video_job import DownloadVideoJob
from YtManagerApp.models import Video, Subscription, VIDEO_ORDER_MAPPING
from YtManagerApp.utils import first_non_null
from django.conf import settings as srv_settings
import logging
import requests
import mimetypes
import os
import PIL.Image
import PIL.ImageOps
from urllib.parse import urljoin
log = logging.getLogger('downloader')
def __get_subscription_config(sub: Subscription):
user = sub.user
enabled = first_non_null(sub.auto_download, user.preferences['auto_download'])
global_limit = user.preferences['download_global_limit']
limit = first_non_null(sub.download_limit, user.preferences['download_subscription_limit'])
order = first_non_null(sub.download_order, user.preferences['download_order'])
order = VIDEO_ORDER_MAPPING[order]
return enabled, global_limit, limit, order
def downloader_process_subscription(sub: Subscription):
log.info('Processing subscription %d [%s %s]', sub.id, sub.playlist_id, sub.id)
enabled, global_limit, limit, order = __get_subscription_config(sub)
log.info('Determined settings enabled=%s global_limit=%d limit=%d order="%s"', enabled, global_limit, limit, order)
if enabled:
videos_to_download = Video.objects\
.filter(subscription=sub, downloaded_path__isnull=True, watched=False)\
.order_by(order)
log.info('%d download candidates.', len(videos_to_download))
if global_limit > 0:
global_downloaded = Video.objects.filter(subscription__user=sub.user, downloaded_path__isnull=False).count()
allowed_count = max(global_limit - global_downloaded, 0)
videos_to_download = videos_to_download[0:allowed_count]
log.info('Global limit is set, can only download up to %d videos.', allowed_count)
if limit > 0:
sub_downloaded = Video.objects.filter(subscription=sub, downloaded_path__isnull=False).count()
allowed_count = max(limit - sub_downloaded, 0)
videos_to_download = videos_to_download[0:allowed_count]
log.info('Limit is set, can only download up to %d videos.', allowed_count)
# enqueue download
for video in videos_to_download:
log.info('Enqueuing video %d [%s %s] index=%d', video.id, video.video_id, video.name, video.playlist_index)
DownloadVideoJob.schedule(video)
log.info('Finished processing subscription %d [%s %s]', sub.id, sub.playlist_id, sub.id)
def downloader_process_all():
for subscription in Subscription.objects.all():
downloader_process_subscription(subscription)
def fetch_thumbnail(url, object_type, identifier, thumb_size):
log.info('Fetching thumbnail url=%s object_type=%s identifier=%s', url, object_type, identifier)
# Make request to obtain mime type
try:
response = requests.get(url, stream=True)
except requests.exceptions.RequestException as e:
log.error('Failed to fetch thumbnail %s. Error: %s', url, e)
return url
ext = mimetypes.guess_extension(response.headers['Content-Type'])
# Build file path
file_name = f"{identifier}{ext}"
abs_path_dir = os.path.join(srv_settings.MEDIA_ROOT, "thumbs", object_type)
abs_path = os.path.join(abs_path_dir, file_name)
abs_path_tmp = file_name + '.tmp'
# Store image
try:
os.makedirs(abs_path_dir, exist_ok=True)
with open(abs_path_tmp, "wb") as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
# Resize and crop to thumbnail size
image = PIL.Image.open(abs_path_tmp)
image = PIL.ImageOps.fit(image, thumb_size)
image.save(abs_path)
image.close()
# Delete temp file
os.unlink(abs_path_tmp)
except requests.exceptions.RequestException as e:
log.error('Error while downloading stream for thumbnail %s. Error: %s', url, e)
return url
except OSError as e:
log.error('Error while writing to file %s for thumbnail %s. Error: %s', abs_path, url, e)
return url
# Return
media_url = urljoin(srv_settings.MEDIA_URL, f"thumbs/{object_type}/{file_name}")
return media_url

View File

@ -0,0 +1,246 @@
import errno
import itertools
import mimetypes
import os
from threading import Lock
from typing import Optional, List, Union
from apscheduler.triggers.cron import CronTrigger
from django.conf import settings
from django.db.models import Max
from YtManagerApp.models import *
from YtManagerApp.providers.video_provider import VideoProvider, InvalidURLError
from YtManagerApp.scheduler.job import Job
from YtManagerApp.utils.algorithms import group_by
_ENABLE_UPDATE_STATS = True
class SynchronizeJob(Job):
name = "SynchronizeJob"
__lock = Lock()
running = False
__global_sync_job = None
def __init__(self, job_execution, subscription: Optional[Subscription] = None):
super().__init__(job_execution)
self.__subscription: Optional[Subscription] = subscription
self.__new_vids: List[Video] = []
def get_description(self):
if self.__subscription is not None:
return "Running synchronization for subscription " + self.__subscription.name
return "Running synchronization..."
def get_subscription_list(self):
if self.__subscription is not None:
return [self.__subscription]
return Subscription.objects.all()
def get_videos_list(self, subs):
return Video.objects.filter(subscription__in=subs)
def run(self):
from YtManagerApp.services import Services
self.__lock.acquire(blocking=True)
SynchronizeJob.running = True
try:
self.log.info(self.get_description())
# Build list of work items
work_subs = self.get_subscription_list()
work_vids = self.get_videos_list(work_subs)
self.set_total_steps(len(work_subs) + len(work_vids))
# Remove the 'new' flag
work_vids.update(new=False)
# Process subscriptions
for sub in work_subs:
self.progress_advance(1, "Synchronizing subscription " + sub.name)
self.check_new_videos(sub)
self.fetch_missing_thumbnails(sub)
# Add new videos to progress calculation
self.set_total_steps(len(work_subs) + len(work_vids) + len(self.__new_vids))
# Group videos by provider
all_videos = itertools.chain(work_vids, self.__new_vids)
all_videos_by_provider = group_by(all_videos, lambda x: x.subscription.provider_id)
for provider_id, videos in all_videos_by_provider.items():
provider: VideoProvider = Services.videoProviderManager().get(provider_id)
if _ENABLE_UPDATE_STATS:
provider.update_videos(videos, update_statistics=True)
for video in videos:
self.progress_advance(1, "Updating video " + video.name)
self.check_video_deleted(video)
self.fetch_missing_thumbnails(video)
# Start downloading videos
for sub in work_subs:
Services.downloadManager().process_subscription(sub)
finally:
SynchronizeJob.running = False
self.__lock.release()
def check_new_videos(self, sub: Subscription):
from YtManagerApp.services import Services
provider: VideoProvider = Services.videoProviderManager().get(sub)
playlist_videos = provider.fetch_videos(sub)
if sub.rewrite_playlist_indices:
playlist_videos = sorted(playlist_videos, key=lambda x: x.publish_date)
else:
playlist_videos = sorted(playlist_videos, key=lambda x: x.playlist_index)
for item in playlist_videos:
results = Video.objects.filter(video_id=item.video_id, subscription=sub)
if not results.exists():
self.log.info('New video for subscription %s: %s %s"', sub, item.video_id, item.name)
# fix playlist index if necessary
if sub.rewrite_playlist_indices or Video.objects.filter(subscription=sub,
playlist_index=item.playlist_index).exists():
highest = Video.objects.filter(subscription=sub).aggregate(Max('playlist_index'))[
'playlist_index__max']
item.playlist_index = 1 + (highest or -1)
item.save()
self.__new_vids.append(item)
def fetch_missing_thumbnails(self, obj: Union[Subscription, Video]):
from YtManagerApp.services import Services
if obj.thumbnail.startswith("http"):
if isinstance(obj, Subscription):
obj.thumbnail = Services.downloadManager().fetch_thumbnail(obj.thumbnail, 'sub', obj.playlist_id,
settings.THUMBNAIL_SIZE_SUBSCRIPTION)
elif isinstance(obj, Video):
obj.thumbnail = Services.downloadManager().fetch_thumbnail(obj.thumbnail, 'video', obj.video_id,
settings.THUMBNAIL_SIZE_VIDEO)
obj.save()
def check_video_deleted(self, video: Video):
if video.downloaded_path is not None:
files = []
try:
files = list(video.get_files())
except OSError as e:
if e.errno != errno.ENOENT:
self.log.error("Could not access path %s. Error: %s", video.downloaded_path, e)
self.usr_err(f"Could not access path {video.downloaded_path}: {e}", suppress_notification=True)
return
# Try to find a valid video file
found_video = False
for file in files:
mime, _ = mimetypes.guess_type(file)
if mime is not None and mime.startswith("video"):
found_video = True
# Video not found, we can safely assume that the video was deleted.
if not found_video:
self.log.info("Video %d was deleted! [%s %s]", video.id, video.video_id, video.name)
# Clean up
for file in files:
try:
os.unlink(file)
except OSError as e:
self.log.error("Could not delete redundant file %s. Error: %s", file, e)
self.usr_err(f"Could not delete redundant file {file}: {e}", suppress_notification=True)
video.downloaded_path = None
# Mark watched?
user = video.subscription.user
if user.preferences['mark_deleted_as_watched']:
video.watched = True
video.save()
def update_video_stats(self, video: Video, yt_video):
if yt_video.n_likes is not None \
and yt_video.n_dislikes is not None \
and yt_video.n_likes + yt_video.n_dislikes > 0:
video.rating = yt_video.n_likes / (yt_video.n_likes + yt_video.n_dislikes)
video.views = yt_video.n_views
video.save()
class SubscriptionImporterJob(Job):
def __init__(self, job_execution, urls: List[str],
parent_folder: SubscriptionFolder,
auto_download: bool,
download_limit: int,
download_order: str,
automatically_delete_watched: bool):
super().__init__(job_execution)
self._urls = urls
self._parent_folder = parent_folder
self._auto_download = auto_download
self._download_limit = download_limit
self._download_order = download_order
self._automatically_delete_watched = automatically_delete_watched
def get_description(self):
return f"Importing {len(self._urls)} subscriptions..."
def run(self):
from YtManagerApp.services import Services
self.set_total_steps(len(self._urls))
for url in self._urls:
try:
self.progress_advance(progress_msg=url)
sub: Subscription = Services.videoProviderManager().fetch_subscription(url)
sub.parent_folder = self._parent_folder
sub.auto_download = self._auto_download
sub.download_limit = self._download_limit
sub.download_order = self._download_order
sub.automatically_delete_watched = self._automatically_delete_watched
sub.save()
except InvalidURLError as e:
self.log.error("Error importing URL %s: %s", url, e)
except ValueError as e:
self.log.error("Error importing URL %s: %s", url, e)
class SubscriptionManager(object):
def __init__(self):
self.__global_sync_job = None
def synchronize(self, sub: Subscription):
from YtManagerApp.services import Services
Services.scheduler().add_job(SynchronizeJob, args=[sub])
def synchronize_all(self):
from YtManagerApp.services import Services
Services.scheduler().add_job(SynchronizeJob, max_instances=1, coalesce=True)
def schedule_global_synchronize_job(self):
from YtManagerApp.services import Services
trigger = CronTrigger.from_crontab(Services.appConfig().sync_schedule)
if self.__global_sync_job is None:
trigger = CronTrigger.from_crontab(Services.appConfig().sync_schedule)
SynchronizeJob.__global_sync_job = Services.scheduler().add_job(SynchronizeJob, trigger, max_instances=1,
coalesce=True)
else:
self.__global_sync_job.reschedule(trigger, max_instances=1, coalesce=True)
def import_multiple(self, urls: List[str],
parent_folder: SubscriptionFolder,
auto_download: bool,
download_limit: int,
download_order: str,
automatically_delete_watched: bool):
from YtManagerApp.services import Services
Services.scheduler().add_job(SubscriptionImporterJob, args=[urls, parent_folder, auto_download, download_limit,
download_order, automatically_delete_watched])

View File

@ -0,0 +1,124 @@
import os
import re
from typing import Optional
from django.contrib.auth.models import User
from django.db.models import Q
from YtManagerApp.models import Subscription, Video, SubscriptionFolder
from YtManagerApp.scheduler.job import Job
class DeleteVideoJob(Job):
"""
Deletes a video's files.
"""
name = "DeleteVideoJob"
def __init__(self, job_execution, video: Video):
super().__init__(job_execution)
self._video = video
def get_description(self):
return f"Deleting video {self._video}"
def run(self):
count = 0
try:
for file in self._video.get_files():
self.log.info("Deleting file %s", file)
count += 1
try:
os.unlink(file)
except OSError as e:
self.log.error("Failed to delete file %s: Error: %s", file, e)
except OSError as e:
self.log.error("Failed to delete video %d [%s %s]. Error: %s", self._video.id,
self._video.video_id, self._video.name, e)
self._video.downloaded_path = None
self._video.save()
self.log.info('Deleted video %d successfully! (%d files) [%s %s]', self._video.id, count,
self._video.video_id, self._video.name)
class VideoManager(object):
def __init__(self):
pass
def get_videos(self,
user: User,
sort_order: Optional[str],
query: Optional[str] = None,
subscription_id: Optional[int] = None,
folder_id: Optional[int] = None,
only_watched: Optional[bool] = None,
only_downloaded: Optional[bool] = None,
):
filter_args = []
filter_kwargs = {
'subscription__user': user
}
# Process query string - basically, we break it down into words,
# and then search for the given text in the name, description, uploader name and subscription name
if query is not None:
for match in re.finditer(r'\w+', query):
word = match[0]
filter_args.append(Q(name__icontains=word)
| Q(description__icontains=word)
| Q(uploader_name__icontains=word)
| Q(subscription__name__icontains=word))
# Subscription id
if subscription_id is not None:
filter_kwargs['subscription_id'] = subscription_id
# Folder id
if folder_id is not None:
# Visit function - returns only the subscription IDs
def visit(node):
if isinstance(node, Subscription):
return node.id
return None
filter_kwargs['subscription_id__in'] = SubscriptionFolder.traverse(folder_id, user, visit)
# Only watched
if only_watched is not None:
filter_kwargs['watched'] = only_watched
# Only downloaded
# - not downloaded (False) -> is null (True)
# - downloaded (True) -> is not null (False)
if only_downloaded is not None:
filter_kwargs['downloaded_path__isnull'] = not only_downloaded
return Video.objects.filter(*filter_args, **filter_kwargs).order_by(sort_order)
def delete_files(self, video: Video):
from YtManagerApp.services import Services
Services.scheduler().add_job(DeleteVideoJob, args=[video])
def download(self, video: Video, attempt: int = 1):
from YtManagerApp.services import Services
Services.downloadManager().download_video(video, attempt)
def mark_watched(self, video: Video):
from YtManagerApp.services import Services
video.watched = True
video.save()
if video.downloaded_path is not None:
if Services.appConfig().for_sub(video.subscription, 'automatically_delete_watched'):
self.delete_files(video)
Services.subscriptionManager().synchronize(video.subscription)
def mark_unwatched(self, video: Video):
from YtManagerApp.services import Services
video.watched = False
video.save()
Services.subscriptionManager().synchronize(video.subscription)

View File

@ -0,0 +1,102 @@
import logging
from typing import List, Dict, Union
from YtManagerApp.models import VideoProviderConfig, Video, Subscription
from YtManagerApp.providers.video_provider import VideoProvider, InvalidURLError
import json
log = logging.getLogger("VideoProviderManager")
class VideoProviderManager(object):
def __init__(self, registered_providers: List[VideoProvider]):
self._registered_providers: Dict[str, VideoProvider] = {}
self._configured_providers: Dict[str, VideoProvider] = {}
self._pending_configs: Dict[str, VideoProviderConfig] = {}
for rp in registered_providers:
self.register_provider(rp)
self._load()
def register_provider(self, provider: VideoProvider) -> None:
"""
Registers a video provider
:param provider: Video provider
"""
# avoid duplicates
if provider.name in self._registered_providers:
log.error(f"Duplicate video provider {provider.name}")
return
# register
self._registered_providers[provider.name] = provider
log.info(f"Registered video provider {provider.name}")
# load configuration (if any)
if provider.name in self._pending_configs:
self._configure(provider, self._pending_configs[provider.name])
del self._pending_configs[provider.name]
def _load(self) -> None:
# Loads configuration from database
for config in VideoProviderConfig.objects.all():
provider = self._registered_providers.get(config.provider_id)
# provider not yet registered, keep it in the pending list
if provider is None:
self._pending_configs[config.provider_id] = config
log.warning(f"Provider {config.provider_id} not registered!")
continue
# configure
self._configure(provider, config)
def _configure(self, provider, config):
settings = json.loads(config.settings)
provider.configure(settings)
log.info(f"Configured video provider {provider.name}")
self._configured_providers[provider.name] = provider
def get(self, item: Union[str, Subscription, Video]):
"""
Gets provider for given item (subscription or video).
:param item: Provider ID, or subscription, or video
:return: Provider
"""
if isinstance(item, str):
return self._registered_providers[item]
elif isinstance(item, Video):
return self._registered_providers[item.subscription.provider_id]
elif isinstance(item, Subscription):
return self._registered_providers[item.provider_id]
return None
def validate_subscription_url(self, url: str):
"""
Validates given URL using all registered and configured provider.
:param url:
:return:
"""
for provider in self._configured_providers.values():
try:
provider.validate_subscription_url(url)
return
except InvalidURLError:
pass
raise InvalidURLError("The given URL is not valid for any of the supported sites!")
def fetch_subscription(self, url: str) -> Subscription:
"""
Validates given URL using all registered and configured provider.
:param url:
:return:
"""
for provider in self._configured_providers.values():
try:
provider.validate_subscription_url(url)
# Found the right provider
return provider.fetch_subscription(url)
except InvalidURLError:
pass
raise InvalidURLError("The given URL is not valid for any of the supported sites!")

View File

@ -1,57 +0,0 @@
import re
from typing import Optional
from django.contrib.auth.models import User
from django.db.models import Q
from YtManagerApp.models import Subscription, Video, SubscriptionFolder
def get_videos(user: User,
sort_order: Optional[str],
query: Optional[str] = None,
subscription_id: Optional[int] = None,
folder_id: Optional[int] = None,
only_watched: Optional[bool] = None,
only_downloaded: Optional[bool] = None,
):
filter_args = []
filter_kwargs = {
'subscription__user': user
}
# Process query string - basically, we break it down into words,
# and then search for the given text in the name, description, uploader name and subscription name
if query is not None:
for match in re.finditer(r'\w+', query):
word = match[0]
filter_args.append(Q(name__icontains=word)
| Q(description__icontains=word)
| Q(uploader_name__icontains=word)
| Q(subscription__name__icontains=word))
# Subscription id
if subscription_id is not None:
filter_kwargs['subscription_id'] = subscription_id
# Folder id
if folder_id is not None:
# Visit function - returns only the subscription IDs
def visit(node):
if isinstance(node, Subscription):
return node.id
return None
filter_kwargs['subscription_id__in'] = SubscriptionFolder.traverse(folder_id, user, visit)
# Only watched
if only_watched is not None:
filter_kwargs['watched'] = only_watched
# Only downloaded
# - not downloaded (False) -> is null (True)
# - downloaded (True) -> is not null (False)
if only_downloaded is not None:
filter_kwargs['downloaded_path__isnull'] = not only_downloaded
return Video.objects.filter(*filter_args, **filter_kwargs).order_by(sort_order)