Compare commits

...

2 Commits

Author SHA1 Message Date
Sarah
3772e7f190 use csv file instead of reading from csv page 2022-02-20 22:00:14 +00:00
Sarah
5456a138c0 move url to utils 2022-02-20 21:53:36 +00:00
2 changed files with 18 additions and 12 deletions

View File

@ -6,9 +6,6 @@ from vcinema_utils import VCinemaUtils
from wiki_utils import WikiUtils
JACKNET_WIKI_URL = "https://wiki.jacknet.io"
def build_table(films_by_year):
films_by_year_descending = OrderedDict(sorted(films_by_year.items(), key=lambda t: t[0], reverse=True))

View File

@ -1,22 +1,31 @@
from bs4 import BeautifulSoup
from progress.bar import Bar
from imdb_utils import IMDbUtils
from wiki_utils import WikiUtils
def get_vcinema_viewings(token_id, token_secret):
# Page ID of /Vcinema/CSV
JACKNET_WIKI_URL = "https://wiki.jacknet.io"
def get_viewings_csv_attachment_id(token_id, token_secret):
attachments = WikiUtils.get_attachments(JACKNET_WIKI_URL, token_id, token_secret)
# Page ID of "https://wiki.jacknet.io/books/vcinema/page/csv")
page_id = 11
viewings_csv_file_name = "vcinema.csv"
wiki_base_url = "https://wiki.jacknet.io"
return next((x['id'] for x in attachments if x['uploaded_to'] == page_id and x['name'] == viewings_csv_file_name), None)
html_page = WikiUtils.get_page_export_html(page_id, wiki_base_url, token_id, token_secret)
soup = BeautifulSoup(html_page, 'html.parser')
elements = soup.find("code").text.strip().split("\n")
headers = elements.pop(0).split(",")
viewings = [dict(zip(headers, row.split(","))) for row in elements]
def get_vcinema_viewings(token_id, token_secret):
attachment_id = get_viewings_csv_attachment_id(token_id, token_secret)
viewings_csv = WikiUtils.get_attachment_contents(attachment_id, JACKNET_WIKI_URL, token_id, token_secret)
viewings_csv = viewings_csv.decode("utf-8")
viewings_csv_rows = viewings_csv.strip().split("\n")
headers = viewings_csv_rows.pop(0).split(",")
viewings = [dict(zip(headers, row.split(","))) for row in viewings_csv_rows]
return viewings