library("parallel") if(.Platform$OS.type == "unix"){ root.dir <- "/home/share/CorpCDOs" }else{ root.dir <- "//WDSENTINEL/share/CorpCDOs" } source(file.path(root.dir, "code", "R", "cds_utils.R")) source(file.path(root.dir, "code", "R", "cds_functions_generic.R")) source(file.path(root.dir, "code", "R", "tranche_functions.R")) source(file.path(root.dir, "code", "R", "yieldcurve.R")) source(file.path(root.dir, "code", "R", "optimization.R")) tradedate <- Sys.Date() MarkitData <- getMarkitIRData(tradedate) L1m <- buildMarkitYC(MarkitData, dt = 1/12) L2m <- buildMarkitYC(MarkitData, dt = 1/6) L3m <- buildMarkitYC(MarkitData) L6m <- buildMarkitYC(MarkitData, dt = 1/2) setEvaluationDate(as.Date(MarkitData$effectiveasof)) ## calibrate HY21 ## calibrate the single names curves singlenames.data <- read.table(file="clipboard", sep="\t", header=T) nondefaulted <- singlenames.data[!singlenames.data$ticker %in% hy19$defaulted,] bps <- 1e-4 cdsdates <- as.Date(character(0)) for(tenor in paste0(1:5, "y")){ cdsdates <- c(cdsdates, cdsMaturity(tenor, date=tradedate)) } hy21portfolio <- c() cs <- couponSchedule(IMMDate(tradedate), cdsdates[length(cdsdates)], "Q", "FIXED", 1, tradedate, IMMDate(tradedate, "prev")) for(i in 1:nrow(nondefaulted)){ SC <- new("creditcurve", recovery=nondefaulted$recovery[i]/100, startdate=today(), issuer=as.character(nondefaulted$ticker[i])) quotes <- data.frame(maturity=cdsdates, upfront = as.numeric(nondefaulted[i,4:8])*0.01, running=rep(nondefaulted$running[i]*bps, 5)) SC@curve <- cdshazardrate(quotes, nondefaulted$recovery[i]/100) hy21portfolio <- c(hy21portfolio, SC) } issuerweights <- rep(1/length(hy21portfolio), length(hh21portfolio)) hy19$indexref <- 1.0275 hy19portfolio.tweaked <- tweakcurves(hy19portfolio, hy19) SurvProb <- SPmatrix(hy19portfolio.tweaked, hy19) ## load common parameters K <- c(0, 0.15, 0.25, 0.35, 1) Kmodified <- adjust.attachments(K, hy19$loss, hy19$factor) tranche.upf <- c(43.375, 95.125, 108.25, 116.8125) tranche.running <- c(0.05, 0.05, 0.05, 0.05) Ngrid <- 2 * nrow(nondefaulted)+1 recov <- sapply(hy19portfolio.tweaked, attr, "recovery") cs <- couponSchedule(nextIMMDate(today()), hy19$maturity,"Q", "FIXED", 0.05, 0) ##calibrate by modifying the factor distribution bottomup <- 1:3 topdown <- 2:4 n.int <- 500 n.credit <- length(hy19portfolio) errvec <- c() quadrature <- gauss.quad.prob(n.int, "normal") w <- quadrature$weights Z <- quadrature$nodes w.mod <- w defaultprob <- 1 - SurvProb p <- defaultprob rho <- 0.45 clusterExport(cl, list("root.dir", "shockprob", "issuerweights", "rho", "Z", "lossrecovdist.term", "lossrecovdist", "lossdistC", "Ngrid", "tranche.pvvec", "tranche.pv", "tranche.pl", "tranche.cl", "trancheloss", "trancherecov", "pos", "Kmodified", "cs")) ## TODO: investigate if this is the right thing w.r.t recovery parf <- function(i){ pshocked <- apply(p, 2, shockprob, rho=rho, Z=Z[i]) S <- 1 - Rstoch[i,,] dist <- lossrecovdist.term(pshocked, , issuerweights, S, Ngrid) return( tranche.pvvec(Kmodified, dist$L, dist$R, cs)) } for(l in 1:100){ Rstoch <- array(0, dim=c(n.int, n.credit, ncol(SurvProb))) for(t in 1:ncol(SurvProb)){ for(i in 1:n.credit){ Rstoch[,i,t] <- stochasticrecov(recov[i], 0, Z, w.mod, rho, defaultprob[i,t], p[i,t]) } } clusterExport(cl, list("Rstoch", "p")) result <- parSapply(cl, 1:n.int, parf) ## solve the optimization problem program <- KLfit(100*(result[bottomup,]+1), w, tranche.upf[bottomup]) err <- 0 for(i in 1:n.credit){ for(j in 1:ncol(p)){ err <- err + abs(crossprod(shockprob(p[i,j], rho, Z), program$weight) - defaultprob[i,j]) } } errvec <- c(errvec, err) ## update the new probabilities p <- MFupdate.prob(Z, program$weight, rho, defaultprob) errvec <- c(errvec, err) w.mod <- program$weight cat(err,"\n") } write.table(data.frame(Z=Z, w=w.mod), file=file.path(root.dir, "Scenarios", "Calibration", paste0("calibration-", Sys.Date(), ".csv")), col.names=T, row.names=F, sep=",") save(singlenames.data, hy19, tranche.upf, file = file.path(root.dir, "Scenarios", "Calibration", paste0("marketdata-", Sys.Date(), ".RData"))) ## calibrate the tranches using base correlation rhovec <- c() f <- function(rho, ...){ temp <- BClossdistC(SurvProb, issuerweights, recov, rho, Ngrid) bp <- 100*(1+1/(Kmodified[i]-Kmodified[i-1]) * (tranche.pv(temp$L, temp$R, cs, 0, Kmodified[i], Ngrid) - tranche.pv(oldtemp$L, oldtemp$R, cs, 0, Kmodified[i-1], Ngrid))) return( abs(tranche.upf[i-1]-bp)) } for(i in 2:length(Kmodified)){ rho <- optimize(f, interval=c(0,1), SurvProb, issuerweights, recov, Ngrid, tranche.upf, Kmodified, cs, oldtemp)$minimum oldtemp <- BClossdistC(SurvProb, issuerweights, recov, rho, Ngrid) rhovec <- c(rhovec, rho) } rhovec <- c(0, rhovec) deltas <- c() for(i in 2:5){ deltas <- c(deltas, BCtranche.delta(hy19portfolio.tweaked, hy19, 0.05, K[i-1], K[i], rhovec[i-1], rhovec[i], Ngrid)) } ## computes MFdeltas newportf <- hy19portfolio.tweaked eps <- 1e-4 for(i in 1:length(newportf)){ newportf[[i]]@curve@hazardrates <- hy19portfolio.tweaked[[i]]@curve@hazardrates * (1 + eps) } SurvProb2 <- SPmatrix(newportf, hy19) p2 <- MFupdate.prob(Z, w.mod, rho, 1-SurvProb2) dPVtranches <- MFtranche.pv(cl, cs, w.mod, rho, 1-SurvProb2, p2, issuerweights) - MFtranche.pv(cl, cs, w.mod, rho, defaultprob, p, issuerweights) dPVindex <- indexpv(newportf, hy19)-indexpv(hy19portfolio.tweaked, hy19) MFdeltas <- dPVtranches/dPVindex #global deltas PVtranches <- MFtranche.pv(cl, cs, w.mod, rho, 1-SurvProb, p, issuerweights, Kmodified) pnlindex <- 1+t(PVtranches$pv.w)%*%diff(Kmodified)-hy19$indexref plot(1-cumsum(w.mod),PVindex, main="pnl of going long the index", xlab="Market factor cumulative probability distribution", type="l") pnltranches <- sweep(1+t(PVtranches$pv.w), 2, tranche.upf/100) matplot(1-cumsum(w.mod), pnltranches, 2, tranche.upf/100, main="pnl of going long the tranches", xlab="Market factor cumulative probability distribution", type="l") global.deltas <- rep(0,4) for(i in 1:4){ global.deltas[i] <- lm(pnltranches[,i]~0+pnlindex, weights=w.mod)$coef } pnlhedged.model <- pnltranches-pnlindex%*%t(global.deltas) pnlhedged.market <- pnltranches-pnlindex%*%t(market.deltas) ## generate a bunch of plots postscript("hedged tranches global.eps") matplot(1-cumsum(w.mod), pnlhedged.model, type="l", ylab="pnl of the hedged package (global deltas)", xlab="Market factor cumulative probability distribution") legend(0.35, y=0.75, c("0-15","15-25","25-35","35-100"), col=1:4, lty=1:4) dev.off() postscript("hedged tranches market.eps") matplot(1-cumsum(w.mod), pnlhedged.market, type="l", ylab="Pnl", main="pnl of the hedged package (market deltas)", xlab="Market factor cumulative probability distribution") legend(0.15, y=1.15, c("0-15","15-25","25-35","35-100"), col=1:4, lty=1:4) dev.off() postscript("0-15 hedged.eps") matplot(1-cumsum(w.mod), cbind(pnlhedged.model[,1], pnlhedged.market[,1]), type="l", ylab="Pnl", main="Pnl of the hedged package (market vs global deltas)\n 0-15 tranche", xlab="Market factor cumulative probability distribution") dev.off() postscript("15-25 hedged.eps") matplot(1-cumsum(w.mod), cbind(pnlhedged.model[,2], pnlhedged.market[,2]), type="l", ylab="Pnl", main="Pnl of the hedged package (market vs global deltas)\n 15-25 tranche", xlab="Market factor cumulative probability distribution") dev.off() postscript("25-35 hedged.eps") matplot(1-cumsum(w.mod), cbind(pnlhedged.model[,3], pnlhedged.market[,3]), type="l", ylab="Pnl", main="Pnl of the hedged package (market vs global deltas)\n 25-35 tranche", xlab="Market factor cumulative probability distribution") dev.off() postscript("35-100 hedged.eps") matplot(1-cumsum(w.mod), cbind(pnlhedged.model[,4], pnlhedged.market[,4]), type="l", ylab="Pnl", main="Pnl of the hedged package (market vs global deltas)\n 35-100 tranche", xlab="Market factor cumulative probability distribution") dev.off() ## scenario based pricing ## generate the scenarios by finding the quantiles of the loss and recovery distributions n.scenarios <- 100 percentiles <- (seq(0, 1, length=n.scenarios+1)[-1]+ seq(0, 1, length=n.scenarios+1)[-(n.scenarios+1)])/2 l <- matrix(0, ncol(defaultprob), n.scenarios) r <- matrix(0, ncol(defaultprob), n.scenarios) MFdist <- MFlossdistrib(w.mod, rho, defaultprob, p, issuerweights, Ngrid) MFdist.orig <- MFlossdistrib(w, rho, defaultprob, defaultprob, issuerweights, Ngrid) lossdist.orig <- BClossdistC(SurvProb, issuerweights, recov, rho, 101, 500) for(i in 1:17){ Lfun <- splinefun(c(0, cumsum(MFdist$L[,i])),c(0, seq(0, 1, length=Ngrid)), "monoH.FC") Rfun <- splinefun(c(0, cumsum(MFdist$R[,i])),c(0, seq(0, 1, length=Ngrid)), "monoH.FC") for(j in 1:99){ l[i, j] <- Lfun(d1[j]) r[i, j] <- Rfun(d1[j]) } } ## joint generation of scenarios for loss and recovery distribution clusterExport(cl, list("lossrecovdist.joint.term", "lossdistribC.joint")) MFdist2 <- MFlossdistrib2(cl, w.mod, rho, defaultprob, p, issuerweights, Ngrid) ## memory never gets released by the clusters for some reasons, needs to call gc twice clusterCall(cl, gc) clusterCall(cl, gc) gc() #L+R<=1 test <- apply(MFdist2[21,,], 2, rev) test.tri <- lower.tri(test) sum(test[test.tri])#close to 1 ## D=L+R dist <- MFdist2[21,,] supportD <- outer(seq(0,1, length=Ngrid), seq(0,1, length=Ngrid), "+") ## compute the joint density of (L/D, D) ## u=x/(x+y) ## v=x+y x <- seq(0,1, length=Ngrid) %o% seq(0,1, length=Ngrid) y <- sweep(-x, 2, seq(0, 1, length=Ngrid), "+") xgrid <- round(x/0.005) ygrid <- round(y/0.005) distchv <- matrix(0, Ngrid, Ngrid) for(i in 1:Ngrid){ for(j in 1:Ngrid){ distchv[i,j] <- dist[xgrid[i,j]+1, ygrid[i,j]+1]*seq(0,1, length=Ngrid)[j] } } ## compute reinvesting distribution beta <- 1.1 f <- function(l, r)(1-l-r)/(1-r)^beta test <- outer(seq(0, 1, length=Ngrid), seq(0, 1, length=Ngrid), f) plot(density(x=test[-Ngrid*Ngrid], weights=dist[-Ngrid*Ngrid], from=0, to=5)) ## two dimensional scenarios n.scenarios <- 100 percentiles <- (seq(0, 1, length=n.scenarios+1)[-1]+ seq(0, 1, length=n.scenarios+1)[-(n.scenarios+1)])/2 l <- matrix(0, ncol(defaultprob), n.scenarios) r <- matrix(0, ncol(defaultprob), n.scenarios) MFdist <- MFlossdistrib(w.mod, rho, defaultprob, p, issuerweights, Ngrid) i <- 21 Lfun <- splinefun(c(0, cumsum(MFdist$L[,i])),c(0, seq(0, 1, length=Ngrid)), "monoH.FC") Rfun <- splinefun(c(0, cumsum(MFdist$R[,i])),c(0, seq(0, 1, length=Ngrid)), "monoH.FC") for(j in 1:99){ l[i, j] <- Lfun(d1[j]) r[i, j] <- Rfun(d1[j]) } } issuerweights <- rep(1/100, 100) rho <- 0.45 recov <- read.csv("~/code/python/recov.csv", header=F) recov <- as.numeric(recov$V1) survprob <- as.matrix(read.csv("~/code/python/survprob.csv", header=F)) gq <- gauss.quad.prob(500, "normal") Z <- gq$nodes w <- gq$weights defaultprob <- 1-survprob test <- BClossdistC(defaultprob, issuerweights, recov, rho, Z, w, 101)