1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
|
from jinja2 import Template
import csv
from datetime import date, datetime
import os.path as op
import os
from subprocess import check_output
from itertools import chain
from bs4 import BeautifulSoup
def get_references(sessions):
keys = chain.from_iterable(session["refs"] for session in sessions)
with open("keys.txt", "w") as fh:
fh.write("\n".join(keys))
html_refs = check_output(["bibtex2html -nodoc -nobibsource -noheader -q "
"-o - -unicode -nokeys -a -citefile keys.txt "
"-s abbrv sub.bib"], shell=True)
os.remove("keys.txt")
soup = BeautifulSoup(html_refs, "lxml")
body = soup.body
body.name = "ol"
body.hr.decompose()
entries = body.find_all("p")
entries[-1].decompose()
entries = entries[:-1]
keys = {}
for i, entry in enumerate(entries):
entry.name = "li"
entry["id"] = entry.a["name"]
entry.a.decompose()
keys[entry["id"]] = i + 1
fname = op.join("papers", entry["id"] + ".pdf")
if op.isfile(fname):
link = soup.new_tag("a", href=fname)
link.string = "[pdf]"
entry.append(link)
return unicode(body), keys
def clean(session):
session_date = date(*map(int, session["date"].split("-")))
session["date"] = session_date.strftime("%a, %b. %d")
session["refs"] = session["refs"].split(",")
fname = op.join("notes", session_date.strftime("%m-%d.pdf"))
if op.isfile(fname):
session["notes"] = fname
return session
def build(sessions, refs, keys):
template = Template(open("index.jinja").read().decode("utf8"))
date = datetime.now().strftime("%a, %b. %d at %H:%M")
with open("index.html", "w") as fh:
fh.write(template.render(sessions=sessions, refs=refs, keys=keys,
date=date).encode("utf8"))
sessions = [clean(session) for session in csv.DictReader(open("sessions.csv"))]
refs, keys = get_references(sessions)
build(sessions, refs, keys)
|