add update_films_by_year_page script

This commit is contained in:
Sarah 2022-02-20 21:26:38 +00:00
parent 7cc1e8d199
commit 8eb7abda1b
3 changed files with 109 additions and 0 deletions

View File

@ -0,0 +1,55 @@
import argparse
from collections import OrderedDict
from vcinema_utils import VCinemaUtils
from wiki_utils import WikiUtils
JACKNET_WIKI_URL = "https://wiki.jacknet.io"
def build_table(fby):
fby_sorted = OrderedDict(sorted(fby.items(), key=lambda t: t[0], reverse=True))
page_table = "| Year | Films |\n| - | - |\n"
for year in fby_sorted.keys():
page_table += str(year) + " | "
page_table += "<br>".join("[{}](https://www.imdb.com/title/tt{}/)".format(film['title'], film['imdb_id']) for film in fby_sorted[year])
page_table += "\n"
return page_table
def update_films_by_year_page(token_id, token_secret):
print("Retrieving VCinema viewings")
viewings = VCinemaUtils.get_vcinema_viewings(token_id, token_secret)
print("Retrieving movie data")
VCinemaUtils.add_imdb_data_to_viewings(viewings, 'year')
print("Processing viewing data")
viewings_by_year = VCinemaUtils.filter_viewings(viewings, 'year')
print("Generating table")
film_by_year_table = build_table(viewings_by_year)
wiki_url = JACKNET_WIKI_URL
# Page ID of https://wiki.jacknet.io/books/vcinema/page/films-by-release-year
page_id = "24"
print("Updating page")
WikiUtils.update_page(page_id, film_by_year_table, wiki_url, token_id, token_secret)
print("Done!")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Update page displaying VCinema films sorted by release year.')
parser.add_argument('token_id', help='API token ID.')
parser.add_argument('token_secret', help='API token secret.')
args = parser.parse_args()
update_films_by_year_page(args.token_id, args.token_secret)

View File

@ -0,0 +1,54 @@
from bs4 import BeautifulSoup
from progress.bar import Bar
from imdb_utils import IMDbUtils
from wiki_utils import WikiUtils
def get_vcinema_viewings(token_id, token_secret):
# Page ID of /Vcinema/CSV
page_id = 11
wiki_base_url = "https://wiki.jacknet.io"
html_page = WikiUtils.get_page_export_html(page_id, wiki_base_url, token_id, token_secret)
soup = BeautifulSoup(html_page, 'html.parser')
elements = soup.find("code").text.strip().split("\n")
headers = elements.pop(0).split(",")
viewings = [dict(zip(headers, row.split(","))) for row in elements]
return viewings
def add_imdb_data_to_viewings(viewings, field_name):
viewing_count = len(viewings)
with Bar('Processing', max=viewing_count) as bar:
bar.message = "Processing"
bar.suffix = '%(percent).1f%% - %(eta)ds'
for (viewing_num, viewing) in enumerate(viewings):
imdb_entry = IMDbUtils.get_movie(viewing['imdb_id'])
viewing[field_name] = imdb_entry[field_name]
bar.next()
bar.finish()
def filter_viewings(viewings, pivot_field, remove_duplicates=True):
viewings_filtered = {}
for viewing in viewings:
viewing_field = viewing[pivot_field]
if viewing_field in viewings_filtered.keys():
try:
if not remove_duplicates or (remove_duplicates and not any(x['imdb_id'] == viewing['imdb_id'] for x in viewings_filtered[viewing_field])):
viewings_filtered[viewing_field] += [viewing]
except KeyError as e:
print(e)
print(viewing)
else:
viewings_filtered[viewing[pivot_field]] = [viewing]
return viewings_filtered

View File