118 lines
4.1 KiB
Python
118 lines
4.1 KiB
Python
from collections import Counter
|
|
from concurrent.futures import ThreadPoolExecutor
|
|
import csv
|
|
|
|
from imdb_utils import IMDbUtils
|
|
from bookstack import Bookstack
|
|
|
|
|
|
JACKNET_WIKI_URL = "https://wiki.jacknet.io"
|
|
|
|
# Page ID of https://wiki.jacknet.io/books/vcinema/page/csv
|
|
CSV_PAGE_ID = 11
|
|
|
|
|
|
def get_viewings_csv_attachment_id(token_id, token_secret):
|
|
attachments = Bookstack.get_attachments(JACKNET_WIKI_URL, token_id, token_secret)
|
|
|
|
viewings_csv_file_name = "vcinema.csv"
|
|
|
|
return next((x['id'] for x in attachments if x['uploaded_to'] == CSV_PAGE_ID and x['name'] == viewings_csv_file_name), None)
|
|
|
|
|
|
def get_vcinema_viewings(token_id, token_secret, viewings_csv=None, combine_repeat_viewings=True):
|
|
if viewings_csv is None:
|
|
attachment_id = get_viewings_csv_attachment_id(token_id, token_secret)
|
|
viewings_csv = Bookstack.get_attachment(JACKNET_WIKI_URL, token_id, token_secret, attachment_id)
|
|
|
|
viewings_csv = viewings_csv.decode("utf-8")
|
|
viewings_csv_rows = viewings_csv.strip().split("\n")
|
|
|
|
viewings = list(csv.DictReader(viewings_csv_rows, quotechar='"'))
|
|
|
|
if combine_repeat_viewings:
|
|
for viewing in viewings:
|
|
viewing['viewings'] = [
|
|
{'date_watched': viewing['date_watched'], 'season': viewing['season'], 'rating': viewing['rating']}]
|
|
viewing.pop('date_watched')
|
|
viewing.pop('season')
|
|
viewing.pop('rating')
|
|
|
|
watch_counts = Counter([x['imdb_id'] for x in viewings])
|
|
repeat_watches = [k for k, v in watch_counts.items() if v > 1]
|
|
|
|
for film in repeat_watches:
|
|
viewing_indexes = [index for index, viewing in enumerate(viewings) if viewing['imdb_id'] == film]
|
|
|
|
first_watch = viewings[viewing_indexes[0]]
|
|
|
|
for index in viewing_indexes[1::]:
|
|
first_watch['viewings'].extend(viewings[index]['viewings'])
|
|
|
|
for index in reversed(viewing_indexes[1::]):
|
|
viewings.pop(index)
|
|
|
|
return viewings
|
|
|
|
|
|
def add_imdb_data(imdb_id, viewings, data_fields, progressbar=None):
|
|
movie = IMDbUtils.get_movie(imdb_id)
|
|
|
|
for viewing in viewings:
|
|
if viewing['imdb_id'] == movie.movieID:
|
|
for field_name in data_fields:
|
|
if field_name in movie:
|
|
viewing[field_name] = movie[field_name]
|
|
|
|
if progressbar is not None:
|
|
progressbar.next()
|
|
|
|
|
|
def add_imdb_keywords(imdb_id, viewings, progressbar=None):
|
|
movie = IMDbUtils.get_movie_keywords(imdb_id)
|
|
|
|
for viewing in viewings:
|
|
if viewing['imdb_id'] == movie.movieID:
|
|
if 'keywords' in movie:
|
|
viewing['keywords'] = movie['keywords']
|
|
|
|
if progressbar is not None:
|
|
progressbar.next()
|
|
|
|
|
|
def add_imdb_data_to_viewings(viewings, field_names, progress_bar=None):
|
|
with ThreadPoolExecutor(4) as executor:
|
|
future_imdb_tasks = set()
|
|
|
|
if ('keywords' in field_names and len(field_names) > 1) or ('keywords' not in field_names and len(field_names) > 0):
|
|
future_imdb_tasks.update(executor.submit(add_imdb_data, viewing['imdb_id'], viewings, field_names, progress_bar) for viewing in viewings)
|
|
|
|
if 'keywords' in field_names:
|
|
future_imdb_tasks.update(executor.submit(add_imdb_keywords, viewing['imdb_id'], viewings, progress_bar) for viewing in viewings)
|
|
|
|
progress_bar.max = len(future_imdb_tasks)
|
|
|
|
if progress_bar is not None:
|
|
progress_bar.finish()
|
|
|
|
|
|
def filter_viewings(viewings, filter_field):
|
|
viewings_filtered = {}
|
|
|
|
for viewing in viewings:
|
|
if filter_field in viewing:
|
|
viewing_field = viewing[filter_field]
|
|
if isinstance(viewing_field, list):
|
|
for fve in list(viewing_field):
|
|
if fve in viewings_filtered.keys():
|
|
viewings_filtered[fve] += [viewing]
|
|
else:
|
|
viewings_filtered[fve] = [viewing]
|
|
else:
|
|
if viewing_field in viewings_filtered.keys():
|
|
viewings_filtered[viewing_field] += [viewing]
|
|
else:
|
|
viewings_filtered[viewing_field] = [viewing]
|
|
|
|
return viewings_filtered
|