import pandas as pd import re import psycopg.sql as sql import datetime import pickle import traceback from . import logger from functools import partial, lru_cache from psycopg2.extras import execute_values from pathlib import Path from quantlib.time.imm import next_date from quantlib.time.api import Date, pydate_from_qldate from unicodedata import normalize def list_imm_dates(date): d = Date.from_datetime(date) r = [] for i in range(10): d = next_date(d, False) r.append(pd.Timestamp(pydate_from_qldate(d))) return r def makedf(r, indextype, quote_source): if indextype in ["IG", "EU", "XO", "EUFS"]: cols = [ "strike", "rec_bid", "rec_offer", "delta_rec", "pay_bid", "pay_offer", "delta_pay", "vol", ] else: cols = [ "strike", "rec_bid", "rec_offer", "delta_rec", "pay_bid", "pay_offer", "delta_pay", "vol", "price_vol", ] if quote_source == "BAML": cols.append("gamma") if quote_source == "GS" and indextype in ("IG", "HY"): cols.append("tail") df = pd.DataFrame.from_records(r, columns=cols) for col in ["delta_rec", "delta_pay", "vol", "price_vol", "gamma", "tail"]: if col in df: try: df[col] = df[col].str.rstrip("%").astype("float") / 100 except ValueError: # typo in one email try: df[col] = ( pd.to_numeric(df[col].str.replace("n", "").str.rstrip("%")) / 100 ) except ValueError: df[col] = ( pd.to_numeric( df[col].str.replace("n", "").str.rstrip("%"), errors="coerce", ) / 100 ) if quote_source == "GS": for col in ["pay_bid", "pay_offer", "rec_bid", "rec_offer"]: df[col] = df[col].str.strip("-") df["delta_pay"] *= -1 for k in df: if df.dtypes[k] == "object": df[k] = df[k].str.replace(",", "") try: df[k] = pd.to_numeric(df[k]) except ValueError as e: logger.info(e) logger.error("couldn't convert column") df[k] = pd.to_numeric( df[k] .str.replace("n", "") .str.replace("\\", "") .str.replace("M", "") .str.replace("c", "") ) df.set_index("strike", inplace=True) return df def parse_quotedate(fh, date_received): for line in fh: line = line.rstrip() if "At:" in line or "Sent:" in line: for p in [ "%m/%d/%y %H:%M:%S", "%b %d %Y %H:%M:%S", "%m/%d %H:%M:%S", "%B %d, %Y %I:%M %p", ]: try: quotedate = pd.to_datetime(line, format=p, exact=False) except ValueError: continue else: if quotedate.year == 1900: # p='%m/%d %H:%M:%S' quotedate = quotedate.replace(year=date_received.year) quotedate = quotedate.tz_localize("America/New_York") break else: raise RuntimeError("can't parse date from {line}") return quotedate else: raise RuntimeError("no date received in the email") def parse_refline(line): regex = ( r"Ref:(?P\S+)\s+(?:Fwd Px:(?P\S+)\s+)?" r"Fwd(?: Spd)?:(?P\S+)\s+Fwd Bpv:(?P\S+)" r"\s+Expiry:(?P\S+)" ) m = re.match(regex, line) try: d = m.groupdict() d["expiry"] = pd.to_datetime(d["expiry"], format="%d-%b-%y") except AttributeError: raise RuntimeError(f"can't parse refline {line}") return d def parse_baml(fh, index_desc, *args): option_stack = {} fwd_index = [] line = "" while True: if line == "": try: line = next(fh) except StopIteration: break if line.startswith("Ref"): d = parse_refline(line) d.update(index_desc) df, line = parse_baml_block(fh, index_desc["index"]) option_stack[ (d["expiry"], index_desc["series"], index_desc["version"]) ] = df fwd_index.append(d) else: line = "" return option_stack, fwd_index def parse_baml_block(fh, indextype): next(fh) # skip header r = [] line = "" for line in fh: line = line.strip() if line.startswith("Ref") or line == "": break line = re.sub("[/|]", " ", line) vals = re.sub(" +", " ", line).rstrip().split(" ") if len(vals) < 3: # something went wrong line = "" break r.append(vals) return makedf(r, indextype, "BAML"), line def parse_bnp_block(fh, indextype, skip_header=True): if skip_header: next(fh) # skip header r = [] for line in fh: line = line.strip() if "\xa0" in line: line = normalize("NFKD", line) if line.startswith("Ref") or line == "": break line = re.sub("[/|]", " ", line) vals = re.sub(" +", " ", line).rstrip().split(" ") if indextype == "HY": # If line is missing vol, add it in if len(line) == 7: vals += [""] vals += [""] if len(vals) < 3: # something went wrong line = "" break r.append(vals) return makedf(r, indextype, "BNP") def parse_cs_block(fh, indextype): next(fh) # skip header r = [] for line in fh: line = line.strip() if ( line.startswith("Ref") or line == "" or line.startswith("* Eur") or line.startswith("* No Knockout") ): break line = re.sub("[/|]", " ", line) vals = re.sub(" +", " ", line).rstrip().split(" ") if len(vals) == 1: logger.info("spurious line", line) continue strike, *rest = vals # CS quotes payer first, so we need to move things around a bit if indextype == "IG": vals = (strike, *rest[3:6], *rest[:3], rest[6]) elif indextype == "HY": vals = (strike, *rest[3:6], *rest[:3], *rest[6:8]) r.append(vals) return makedf(r, indextype, "CS") def parse_ms_block(fh, indextype): line = next(fh) # skip header if line.strip() == "": # empty block return None r = [] for line in fh: line = line.rstrip() if line == "": break strike, payer, receiver, vol = line.split("|") strike = strike.strip() if indextype == "HY": strike = strike.split()[0] try: pay_bid, pay_offer, pay_delta = payer.strip().split() rec_bid, rec_offer, rec_delta = receiver.strip().split() except ValueError: try: pay_mid, pay_delta = payer.strip().split() rec_mid, rec_delta = receiver.strip().split() pay_bid, pay_offer = pay_mid, pay_mid rec_bid, rec_offer = rec_mid, rec_mid except ValueError: raise RuntimeError("Couldn't parse line: {line}") vals = [strike, rec_bid, rec_offer, rec_delta, pay_bid, pay_offer, pay_delta] vol = vol.strip() if indextype == "HY": try: price_vol, vol = vol.replace("[", "").replace("]", "").split() except ValueError: price_vol, vol, vol_change, be = ( vol.replace("[", "").replace("]", "").split() ) vals += [vol, price_vol] else: if " " in vol: vol, vol_change, be = vol.split() vals += [vol] r.append(vals) return makedf(r, indextype, "MS") def parse_nomura_block(fh, indextype): next(fh) # skip header r = [] for line in fh: line = line.rstrip() if "EXPIRY" in line or line == "": break strike, receiver, payer, vol, _ = line.split("|", 4) strike = strike.strip() pay, pay_delta = payer.strip().split() rec, rec_delta = receiver.strip().split() pay_bid, pay_offer = pay.split("/") rec_bid, rec_offer = rec.split("/") vol = vol.strip() vals = [ strike, rec_bid, rec_offer, rec_delta, pay_bid, pay_offer, pay_delta, vol, ] if indextype == "HY": # we don't have price vol vals.append(None) r.append(vals) else: return None, makedf(r, indextype, "NOM") return line, makedf(r, indextype, "NOM") def parse_sg_block(fh, indextype, expiration_dates): r = [] for line in fh: line = line.rstrip() if line == "": break if indextype == "IG": option_type, strike, price, delta, vol, expiry = line.split() else: option_type, strike, strike_spread, price, delta, vol, expiry = line.split() expiry_month = datetime.datetime.strptime(expiry, "%b-%y").month expiry = next( pd.Timestamp(d) for d in expiration_dates if d.month == expiry_month ) if option_type == "Rec": rec_bid, rec_offer = price.split("/") pay_bid, pay_offer = None, None rec_delta, pay_delta = delta, None else: pay_bid, pay_offer = price.split("/") rec_bid, rec_offer = None, None rec_delta, pay_delta = None, delta vals = [ strike, rec_bid, rec_offer, rec_delta, pay_bid, pay_offer, pay_delta, vol, ] if indextype == "HY": vals.append(None) r.append(vals) return expiry, makedf(r, indextype, "SG") def parse_gs_block(fh, indextype): # skip header while True: line = next(fh) if line.strip().startswith("Stk"): break r = [] for line in fh: line = line.rstrip() if line == "": continue if ( line.startswith("Expiry") or line.startswith("Assumes") or line.startswith("© Copyright") ): break line = line.replace("|", " ") vals = line.split() strike = vals.pop(0) if indextype == "HY" and "/" not in vals[0]: vals.pop(0) # pop the spread pay, pay_delta = vals[:2] pay_bid, pay_offer = pay.split("/", 1) rec_bid, rec_offer = vals[2].split("/", 1) if rec_offer.count(".") == 2: rec_offer, vol = rec_offer[:6], rec_offer[6:] tail = vals[5] else: vol = vals[3] tail = vals[6] vals = [strike, rec_bid, rec_offer, None, pay_bid, pay_offer, pay_delta, vol] if indextype == "HY": vals.append(None) vals.append(tail) r.append(vals) return makedf(r, indextype, "GS"), line def parse_citi_block(fh, indextype): next(fh) # skip header r = [] for line in fh: line = line.rstrip() if line == "": break if indextype == "HY": strike, payers, receivers, vol, price_vol = line.split("|") else: strike, payers, receivers, vol = line.split("|") strike = strike.strip() try: pay_bid, pay_offer = payers.split("/") except ValueError: pay_bid, pay_offer, pay_delta = None, None, None else: pay_bid = pay_bid.strip() pay_offer = pay_offer.strip() pay_offer, pay_delta = pay_offer.split() try: rec_bid, rec_offer = receivers.split("/") except ValueError: # Some of the rec information was not available at a certain strike rec_bid, rec_offer, rec_delta = None, None, None else: rec_bid = rec_bid.strip() rec_offer = rec_offer.strip() rec_offer, rec_delta = rec_offer.split() vol = vol.strip() vol = vol.split()[0] if indextype == "HY": price_vol = price_vol.strip() r.append( [ strike, rec_bid, rec_offer, rec_delta, pay_bid, pay_offer, pay_delta, vol, price_vol, ] ) else: r.append( [ strike, rec_bid, rec_offer, rec_delta, pay_bid, pay_offer, pay_delta, vol, ] ) return makedf(r, indextype, "CITI") def parse_jpm_block(fh, indextype): receivers = {} payers = {} for line in fh: line = line.strip() if "**" in line or line == "": break if "|" in line: if line.startswith("K"): continue rec_strike, rec_data, pay_strike, pay_data = [ item.strip() for item in line.split("|") ] rec_data = rec_data.replace("/", " ") if rec_strike != "-": receivers[rec_strike] = rec_data.split()[:4] pay_data = pay_data.replace("/", " ") if pay_strike != "-": payers[pay_strike] = pay_data.split()[:4] cols = ["bid", "offer", "delta", "vol"] pay = pd.DataFrame.from_dict(payers, orient="index", columns=cols).add_prefix( "pay_" ) rec = pd.DataFrame.from_dict(receivers, orient="index", columns=cols).add_prefix( "rec_" ) df = pd.concat([rec, pay], axis=1) df["vol"] = df["rec_vol"].fillna(df["pay_vol"]) df = ( df.reset_index() .drop(["rec_vol", "pay_vol"], axis=1) .rename( columns={ "index": "strike", "pay_vol": "vol", "rec_delta": "delta_rec", "pay_delta": "delta_pay", } ) ) # No price vol if indextype == "HY": df["price_vol"] = None return makedf(df, indextype, "JPM"), line def parse_ms(fh, index_desc, *args): option_stack = {} fwd_index = [] for line in fh: line = line.rstrip() if "EXPIRY" in line: expiry = line.split(" ")[1] expiry = pd.to_datetime(expiry, format="%d-%b-%Y") block = parse_ms_block(fh, index_desc["index"]) if index_desc["index"] == "HY": fwdprice = line.split(" ")[3] fwdspread = line.split(" ")[5] fwd_index.append( { "expiry": expiry, "fwdprice": fwdprice, "fwdspread": fwdspread, **index_desc, } ) else: fwdspread = line.split(" ")[3] fwd_index.append( {"expiry": expiry, "fwdspread": fwdspread, **index_desc} ) if block is None or block.empty: logger.warning(f"MS: block is empty for {expiry} expiry") else: option_stack[ (expiry, index_desc["series"], index_desc["version"]) ] = block return option_stack, fwd_index def parse_nom(fh, index_desc, *args): option_stack = {} fwd_index = [] def aux(line, fh, index_desc, option_stack, fwd_index): expiry = line.split(" ")[0] expiry = pd.to_datetime(expiry, format="%d-%b-%y") next_line, df = parse_nomura_block(fh, index_desc["index"]) option_stack[(expiry, index_desc["series"], index_desc["version"])] = df fwd_index.append({"expiry": expiry, **index_desc}) if next_line: if "EXPIRY" in next_line: aux(next_line, fh, index_desc, option_stack, fwd_index) else: raise RuntimeError(f"Don't know what to do with {line}.") for line in fh: line = line.rstrip() if "EXPIRY" in line: aux(line, fh, index_desc, option_stack, fwd_index) return option_stack, fwd_index def parse_sg(fh, index_desc): option_stack = {} fwd_index = [] expiration_dates = index_desc.pop("expiration_dates") for line in fh: line = line.rstrip() if line.startswith("Type"): expiry, df = parse_sg_block(fh, index_desc["index"], expiration_dates) option_stack[(expiry, index_desc["series"], index_desc["version"])] = df fwd_index.append({"expiry": expiry, **index_desc}) return option_stack, fwd_index def parse_gs(fh, index_desc): option_stack = {} fwd_index = [] pat = re.compile(r"Expiry (\d{2}\w{3}\d{2}) \((?:([\S]+) )?([\S]+)\)") line = next(fh).strip() while True: if line.startswith("Expiry"): if m := pat.match(line): expiry, fwdprice, fwdspread = m.groups() expiry = pd.to_datetime(expiry, format="%d%b%y") fwd_index.append( { **index_desc, **{ "fwdspread": fwdspread, "fwdprice": fwdprice, "expiry": expiry, }, } ) try: ( option_stack[ (expiry, index_desc["series"], index_desc["version"]) ], line, ) = parse_gs_block(fh, index_desc["index"]) except IndexError as e: logger.debug(traceback.format_exc()) logger.error(f"Something is wrong with file {Path(fh.name).stem}") raise RuntimeError() else: logger.error("Can't parse expiry line: %s", line) elif line.startswith("Assumes"): break else: try: line = next(fh).strip() except StopIteration: break return option_stack, fwd_index def parse_citi(fh, index_desc): option_stack = {} fwd_index = [] pat = re.compile(r"Exp: (\d{2}-\w{3}-\d{2})[^R]*Ref:[^\d]*([\d.]+)") for line in fh: line = line.strip() if line.startswith("Exp"): if m := pat.match(line): expiry, ref = m.groups() expiry = pd.to_datetime(expiry, format="%d-%b-%y") fwd_index.append({"ref": ref, "expiry": expiry, **index_desc}) option_stack[ (expiry, index_desc["series"], index_desc["version"]) ] = parse_citi_block(fh, index_desc["index"]) else: logger.error("Can't parse expiry line: %s", line) return option_stack, fwd_index def parse_cs(fh, index_desc): option_stack = {} fwd_index = [] regex = { "HY": r"Ref:\s*(?P[\d.]+)\s*Fwd: (?P[\d.]+)\s*Expiry: (?P\d{2}-\w{3}-\d{2})", "IG": r"Ref:\s*(?P[\d.]+)\s*Fwd: (?P[\d.]+)\s*Expiry: (?P\d{2}-\w{3}-\d{2})\s*Fwd dv01:\s*(?P[\d.]*).*", } pat = re.compile(regex[index_desc["index"]]) for line in fh: line = line.strip() if line.startswith("Ref"): if m := pat.match(line): d = m.groupdict() d["expiry"] = pd.to_datetime(d["expiry"], format="%d-%b-%y") fwd_index.append({**index_desc, **d}) option_stack[ (d["expiry"], index_desc["series"], index_desc["version"]) ] = parse_cs_block(fh, index_desc["index"]) else: logger.error( "Can't parse expiry line: %s for filename: %s", line, fh.name ) return option_stack, fwd_index def parse_bnp(fh, index_desc): option_stack = {} fwd_index = [] regex = r"Ref\s+(?P[\d.]+)\s+-\s+(?P\w{3}\d{2})\s+-\s+Fwd\s+(?P[\d.]+)" expiration_dates = index_desc.pop("expiration_dates") pat = re.compile(regex) for line in fh: line = line.strip() if line.startswith("Ref"): c = line.find("Strike") if c != -1: line = line[:c].rstrip() if m := pat.match(line): d = m.groupdict() if index_desc["index"] == "HY": d["fwdprice"] = d.pop("fwdspread") expiry_month = datetime.datetime.strptime(d["expiry"], "%b%y").month d["expiry"] = next( d for d in expiration_dates if d.month == expiry_month ) fwd_index.append({**index_desc, **d}) option_stack[ (d["expiry"], index_desc["series"], index_desc["version"]) ] = parse_bnp_block(fh, index_desc["index"], c == -1) else: logger.error(f"Can't parse expiry line: {line} for filename: {fh.name}") return option_stack, fwd_index def parse_jpm(fh, index_desc): option_stack = {} fwd_index = [] regex = r"JPM (CDX|iTrx) Options: (HY|IG|MAIN|XOVER|FINSEN) \(\w(?P\d+)V(?P\d+)\) (?P[\d]+-[\w]+-[\d]+) \*\* Fwd @(?P[\d.]+), Delta @(?P[\d.]+)" pat = re.compile(regex) line = next(fh).strip() while True: if "**" in line: if m := pat.match(line): d = m.groupdict() d["fwdprice" if index_desc["index"] == "HY" else "fwdspread"] = d.pop( "fwdref" ) d["expiry"] = pd.to_datetime(d["expiry"], format="%d-%b-%y") fwd_index.append({**index_desc, **d}) try: ( option_stack[(d["expiry"], d["series"], d["version"])], line, ) = parse_jpm_block(fh, index_desc["index"]) except IndexError as e: logger.debug(traceback.format_exc()) logger.error(f"Something is wrong with file {Path(fh.name).stem}") raise RuntimeError() else: logger.error( "Can't parse expiry line: %s for filename: %s", line, fh.name ) elif line.startswith("Payer"): break else: try: line = next(fh).strip() except StopIteration: break return option_stack, fwd_index def parse_gs_eu_block(fh, indextype): r = [] for line in fh: line = line.strip() if line == "": break if "%" in line: line = line.replace("/", " ") ( strike, pay_bid, pay_offer, delta_pay, rec_bid, rec_offer, vol, _, _, ) = line.split() delta_pay = float(delta_pay.rstrip("%")) delta_rec = 100 + delta_pay r.append( [ strike, rec_bid, rec_offer, str(delta_rec), pay_bid, pay_offer, str(delta_pay), vol, ] ) return makedf(r, indextype, "GS"), line def parse_gs_eu(fh, index_desc): option_stack = {} fwd_index = [] pat = re.compile(r"(\w{3}\d{2})") line = next(fh).strip() while True: if ("|" in line) and ("STK" not in line): if m := pat.match(line): expiry = m.group(1) expiry = pd.to_datetime(expiry, format="%b%y") expiry = pd.Timestamp( pydate_from_qldate(next_date(Date.from_datetime(expiry), False)) ) try: ( option_stack[ (expiry, index_desc["series"], index_desc["version"]) ], line, ) = parse_gs_eu_block(fh, index_desc["index"]) fwd_index.append({**index_desc, "expiry": expiry}) except IndexError as e: logger.debug(traceback.format_exc()) logger.error(f"Something is wrong with file {Path(fh.name).stem}") raise RuntimeError() else: logger.error("Can't parse expiry line:", line, "filename:", fh.name) elif line.startswith("Keywords"): break else: try: line = next(fh).strip() except StopIteration: break return option_stack, fwd_index # subject_baml = re.compile(r"(?:Fwd:){0,2}(?:BAML )?(\D{2})(\d{1,2})\s") regex_dict = { re.compile(r"(?:Fwd:){0,2}(?:BofA )?(\D{2})(\d{1,2}).*Ref[^\d]*([\d.]+)"): ( "BAML", parse_baml, ), re.compile( r"[^$]*(?:\$|\€){1,2} MS (?:CDX|ITX) OPTIONS: (IG|HY|MAIN|XO)(\d{2})[^-]*- REF[^\d]*([\d.]+)" ): ("MS", parse_ms), re.compile(r"(?:Fwd:)?CDX (IG|HY)(\d{2}).*- REF:[^\d]*([\d.]+)"): ( "NOM", parse_nom, ), re.compile(r"(?:FW: |Fwd: )?GS (IG|HY)(\d{2}) 5y.*- Ref [^\d]*([\d.]+)"): ( "GS", parse_gs, ), re.compile(r"SG OPTIONS - CDX (IG|HY) S(\d{2}).* REF[^\d]*([\d.]+)"): ( "SG", parse_sg, ), re.compile(r"(?:Fwd:)?Citi Options: (IG|HY|MA|XO)(\d{2}) 5Y"): ("CITI", parse_citi), re.compile( r"CS CDX (IG|HY)(\d{2})_?v?(\d)? Options -\s+(?:\d{2}/\d{2}/\d{2}\s+)?Ref = ([\d.]+)[^\d]*" ): ("CS", parse_cs), re.compile(r"(?:BNP )?CDX OPTIONS RUN: (IG|HY)(\d{2}).*"): ("BNP", parse_bnp), re.compile( r"JPM (?:CDX|iTrx) Options: (?:CDX|ITRAXX).(IG|HY|XOVER|MAIN|FINSEN) S(\d+)(?:\/\d+)? 5Y (?:V2&V1 )?\S+(?:-\S+)? \[ref ([\d.]*)\]" ): ("JPM", parse_jpm), re.compile( r"GS Options - iTraxx (Xover|Main|FinSen)(\d+) 5Y V(\d+) Options Run - Ref ([\d.]+)" ): ("GS", parse_gs_eu), } def get_current_version(index, series, d, conn): with conn.cursor() as c: c.execute( "SELECT min(version) FROM index_version " "WHERE index=%s and series=%s and %s <= lastdate", (index.upper(), series, d), ) (version,) = c.fetchone() return version def parse_email(email: Path, date_received: datetime.date, conn): get_version = lru_cache()(partial(get_current_version, conn=conn)) with email.open("rt") as fh: subject = fh.readline().lstrip() for regex, (source, parse_fun) in regex_dict.items(): if m := regex.match(subject): version = None if source in ["CITI", "BNP"]: indextype, series = m.groups() elif source in ("GS", "CS") and len(m.groups()) == 4: indextype, series, version, ref = m.groups() ref = float(ref) else: indextype, series, ref = m.groups() ref = float(ref) indextype = "EU" if indextype.upper() in ("MAIN", "MA") else indextype indextype = "XO" if indextype.upper() == "XOVER" else indextype indextype = "EUFS" if indextype.upper() == "FINSEN" else indextype series = int(series) cur_pos = fh.tell() try: quotedate = parse_quotedate(fh, date_received) except RuntimeError: logger.warning( "couldn't find received date in message: " f"{email.name}, using {date_received}" ) quotedate = pd.Timestamp(date_received).tz_localize( "America/New_York" ) fh.seek(cur_pos) if version is None: version = get_version(indextype, series, quotedate) key = (quotedate, indextype, source) index_desc = { "quotedate": quotedate, "index": indextype, "series": series, "version": version, } if source in ["GS", "MS", "NOM", "SG"]: index_desc["ref"] = ref if source in ["BNP", "SG"]: index_desc["expiration_dates"] = list_imm_dates(quotedate) option_stack, fwd_index = parse_fun(fh, index_desc) if fwd_index: fwd_index = pd.DataFrame.from_records(fwd_index, index="quotedate") fwd_index["quote_source"] = source else: raise RuntimeError("empty email " + fh.name) return (key, (option_stack, fwd_index)) else: raise RuntimeError( f"can't parse subject line: {subject} for email {email.name}" ) def write_todb(swaption_stack, index_data, conn): sql_str = sql.SQL( "INSERT INTO swaption_ref_quotes({}) VALUES({}) " "ON CONFLICT DO NOTHING RETURNING ref_id" ).format( sql.SQL(", ").join(sql.Identifier(c) for c in index_data.columns), sql.SQL(", ").join(sql.Placeholder() * len(index_data.columns)), ) cols = [sql.Identifier(e) for e in (*swaption_stack.columns, "ref_id")] sql_quotes = sql.SQL( "INSERT INTO swaption_quotes({}) VALUES ({}) ON CONFLICT DO NOTHING" ).format( sql.SQL(", ").join(cols), sql.SQL(",").join([sql.Placeholder()] * len(cols)) ) index_data["expiry"] = pd.to_datetime(index_data.expiry) with conn.cursor() as c: for t in index_data.itertuples(index=False): c.execute(sql_str, t) try: (ref_id,) = next(iter(c)) except StopIteration: continue else: try: df = swaption_stack.loc[ ( t.quotedate, t.index, t.series, t.version, t.expiry, t.quote_source, ), ] except KeyError as e: logger.warning( "missing key in swaption_stack: " f"{t.quotedate}, {t.index}, {t.series}, {t.version}, {t.expiry}, {t.quote_source}" ) continue except IndexError: breakpoint() df["ref_id"] = ref_id c.executemany(sql_quotes, df.itertuples(index=False)) conn.commit() def get_email_list(date): """returns a list of email file names for a given date Parameters ---------- date : string """ with open(".pickle", "rb") as fh: already_uploaded = pickle.load(fh) df = pd.DataFrame.from_dict(already_uploaded, orient="index") df.columns = ["quotedate"] df = df.reset_index().set_index("quotedate") return df.loc[date, "index"].tolist() def pickle_drop_date(date): with open(".pickle", "rb") as fh: already_uploaded = pickle.load(fh) newdict = {k: v for k, v in already_uploaded.items() if v.date() != date} with open(".pickle", "wb") as fh: pickle.dump(newdict, fh)