51 lines
1.6 KiB
Python
51 lines
1.6 KiB
Python
from bs4 import BeautifulSoup
|
|
from progress.bar import Bar
|
|
|
|
from imdb_utils import IMDbUtils
|
|
from wiki_utils import WikiUtils
|
|
|
|
|
|
def get_vcinema_viewings(token_id, token_secret):
|
|
# Page ID of /Vcinema/CSV
|
|
page_id = 11
|
|
|
|
wiki_base_url = "https://wiki.jacknet.io"
|
|
|
|
html_page = WikiUtils.get_page_export_html(page_id, wiki_base_url, token_id, token_secret)
|
|
|
|
soup = BeautifulSoup(html_page, 'html.parser')
|
|
elements = soup.find("code").text.strip().split("\n")
|
|
headers = elements.pop(0).split(",")
|
|
viewings = [dict(zip(headers, row.split(","))) for row in elements]
|
|
|
|
return viewings
|
|
|
|
|
|
def add_imdb_data_to_viewings(viewings, field_name):
|
|
viewing_count = len(viewings)
|
|
|
|
with Bar('Processing', max=viewing_count) as bar:
|
|
bar.message = "Processing"
|
|
bar.suffix = '%(percent).1f%% - %(eta)ds'
|
|
|
|
for (viewing_num, viewing) in enumerate(viewings):
|
|
imdb_entry = IMDbUtils.get_movie(viewing['imdb_id'])
|
|
|
|
viewing[field_name] = imdb_entry[field_name]
|
|
bar.next()
|
|
bar.finish()
|
|
|
|
|
|
def filter_viewings(viewings, filter_field, remove_duplicates=True):
|
|
viewings_filtered = {}
|
|
|
|
for viewing in viewings:
|
|
viewing_field = viewing[filter_field]
|
|
if viewing_field in viewings_filtered.keys():
|
|
if not remove_duplicates or not any(x['imdb_id'] == viewing['imdb_id'] for x in viewings_filtered[viewing_field]):
|
|
viewings_filtered[viewing_field] += [viewing]
|
|
else:
|
|
viewings_filtered[viewing[filter_field]] = [viewing]
|
|
|
|
return viewings_filtered
|