diff options
Diffstat (limited to 'calibrate_tranches.R')
| -rw-r--r-- | calibrate_tranches.R | 326 |
1 files changed, 326 insertions, 0 deletions
diff --git a/calibrate_tranches.R b/calibrate_tranches.R new file mode 100644 index 00000000..0528defe --- /dev/null +++ b/calibrate_tranches.R @@ -0,0 +1,326 @@ +library("parallel")
+setwd("//WDSENTINEL/share/CorpCDOs/R")
+source("cds_utils.R")
+source("cds_functions_generic.R")
+source("index_definitions.R")
+source("tranche_functions.R")
+source("yieldcurve.R")
+source("optimization.R")
+
+cl <- makeCluster(6)
+
+MarkitData <- getMarkitIRData()
+L1m <- buildMarkitYC(MarkitData, dt = 1/12)
+L2m <- buildMarkitYC(MarkitData, dt = 1/6)
+L3m <- buildMarkitYC(MarkitData)
+L6m <- buildMarkitYC(MarkitData, dt = 1/2)
+setEvaluationDate(as.Date(MarkitData$effectiveasof))
+
+## calibrate HY17
+## calibrate the single names curves
+singlenames.data <- read.table(file="clipboard", sep="\t", header=T)
+nondefaulted <- singlenames.data[!singlenames.data$ticker %in% hy17$defaulted,]
+bps <- 1e-4
+
+hy17portfolio <- c()
+cdsdates <- as.Date(character(0))
+for(tenor in paste0(1:5, "y")){
+ cdsdates <- c(cdsdates, cdsMaturity(tenor))
+}
+
+## clusterEvalQ(cl, {setClass("abstractcurve")
+## setClass("defaultcurve", contains="abstractcurve",
+## representation(dates="Date", hazardrates="numeric"))
+## setClass("creditcurve", representation(issuer="character", startdate="Date",
+## recovery="numeric", curve="defaultcurve"))})
+## clusterExport(cl, list("nondefaulted", "cdsdates", "cdshazardrate", "today",
+## "bps", "couponSchedule", "nextIMMDate", "DiscountCurve", "L3m"))
+## test <- parSapply(cl, 1:nrow(nondefaulted), parf)
+
+## parf <- function(i){
+## SC <- new("creditcurve",
+## recovery=nondefaulted$recovery[i]/100,
+## startdate=today(),
+## issuer=as.character(nondefaulted$ticker[i]))
+## quotes <- data.frame(maturity=cdsdates, upfront = as.numeric(nondefaulted[i,5:9])*0.01,
+## running = rep(nondefaulted$running[i]*bps,5))
+## return( cdshazardrate(quotes, nondefaulted$recovery[i]/100))
+## }
+
+for(i in 1:nrow(nondefaulted)){
+ SC <- new("creditcurve",
+ recovery=nondefaulted$recovery[i]/100,
+ startdate=today(),
+ issuer=as.character(nondefaulted$ticker[i]))
+ quotes <- data.frame(maturity=cdsdates, upfront = as.numeric(nondefaulted[i,5:9])*0.01,
+ running=rep(nondefaulted$running[i]*bps, 5))
+ SC@curve <- cdshazardrate(quotes, nondefaulted$recovery[i]/100)
+ hy17portfolio <- c(hy17portfolio, SC)
+}
+
+hy17$indexref <- 1.035
+hy17portfolio.tweaked <- tweakcurves(hy17portfolio, hy17)
+
+SurvProb <- SPmatrix(hy17portfolio.tweaked, hy17)
+
+## calibrate the tranches using base correlation
+K <- c(0, 0.15, 0.25, 0.35, 1)
+Kmodified <- adjust.attachments(K, hy17$loss, hy17$factor)
+tranche.upf <- c(49.625, 94.75, 107.125, 114.875)
+tranche.running <- c(0.05, 0.05, 0.05, 0.05)
+
+
+lu <- 0.01
+recov <- sapply(hy17portfolio.tweaked, attr, "recovery")
+
+rhovec <- c()
+cs <- couponSchedule(nextIMMDate(today()), hy17$maturity,"Q", "FIXED", 0.05, 0)
+
+for(i in 2:length(Kmodified)){
+ f <- function(rho, ...){
+ temp <- BClossdistC(SurvProb, recov, rho, lu, 100)
+ return( abs(tranche.upf[i-1]-1/(Kmodified[i]-Kmodified[i-1])*
+ (tranche.bp(temp$L, temp$R, cs, 0, Kmodified[i])*Kmodified[i]-
+ tranche.bp(oldtemp$L, oldtemp$R, cs, 0, Kmodified[i-1])*Kmodified[i-1])) )
+ }
+ rho <- optimize(f, SurvProb, recov, lu, tranche.upf, Kmodified, cs, oldtemp)$minimum
+ oldtemp <- BClossdistC(SurvProb, rho, recov, lu)
+ rhovec <- c(rhovec, rho)
+}
+
+#calibrate by modifying the factor distribution
+bottomup <- 1:3
+topdown <- 2:4
+n.int <- 100
+n.credit <- 96
+errvec <- c()
+quadrature <- gauss.quad.prob(n.int, "normal")
+w <- quadrature$weights
+Z <- quadrature$nodes
+w.mod <- w
+defaultprob <- 1 - SurvProb
+p <- defaultprob
+rho <- 0.45
+
+clusterExport(cl, list("shockprob", "rho", "Z", "lossrecovdist.term",
+ "lossrecovdist", "lossdistribC", "lu",
+ "tranche.bpvec", "tranche.bp", "tranche.pl", "tranche.cl",
+ "trancheloss", "trancherecov", "pos", "Kmodified", "cs"))
+
+parf <- function(i){
+ pshocked <- apply(p, 2, shockprob, rho=rho, Z=Z[i])
+ S <- 1 - Rstoch[i,,]
+ dist <- lossrecovdist.term(pshocked, 0, S, lu)
+ return( tranche.bpvec(Kmodified, dist$L, dist$R, cs))
+}
+
+for(l in 1:100){
+ Rstoch <- array(0, dim=c(n.int, n.credit, ncol(SurvProb)))
+ for(t in 1:ncol(SurvProb)){
+ for(i in 1:n.credit){
+ Rstoch[,i,t] <- stochasticrecov(recov[i], 0, Z, w.mod, rho, defaultprob[i,t], p[i,t])
+ }
+ }
+ clusterExport(cl, list("Rstoch", "p"))
+ result <- parSapply(cl, 1:n.int, parf)
+ ## solve the optimization problem
+ program <- KLfit(result[topdown,], w, tranche.upf[topdown])
+
+ err <- 0
+ for(i in 1:n.credit){
+ for(j in 1:ncol(p)){
+ err <- err + abs(crossprod(shockprob(p[i,j], rho, Z), program$weight) - defaultprob[i,j])
+ }
+ }
+
+ ## update the new probabilities
+ for(i in 1:n.credit){
+ for(j in 1:ncol(p)){
+ p[i,j] <- fit.prob(Z, program$weight, rho, defaultprob[i,j])
+ }
+ }
+
+ errvec <- c(errvec, err)
+ w.mod <- program$weight
+ cat(err,"\n")
+}
+
+## some plots
+matplot(Z, cbind(w, w.mod), type="l", ylab="probability density",
+ main="Factor distribution (Gaussian, and Market implied)")
+lossdist <- array(0, dim=c(1/lu+1, n.int))
+for(i in 1:n.int){
+ pshocked <- sapply(p[,ncol(p)], shockprob, rho=rho, Z=Z[i])
+ S <- 1 - Rstoch[i,,ncol(p)]
+ lossdist[,i] <- lossrecovdist(pshocked, 0, S, lu)$L
+}
+
+lossdist.orig <- BClossdistC(SurvProb, rho, recov, lu)
+matplot(seq(0,1,0.01), cbind(lossdist.orig$L[,ncol(p)], lossdist%*%w.mod), type="l", xlab="loss percentage",
+ ylab="probability density", main="market implied loss distribution")
+#3d surface of the loss distribution
+Rstoch <- array(0, dim=c(ncol(SurvProb), n.int, n.credit))
+for(t in 1:ncol(SurvProb)){
+ for(i in 1:n.credit){
+ Rstoch[t,,i] <- stochasticrecov(recov[i], 0, Z, w.mod, rho, defaultprob[i,t], p[i,t])
+ }
+}
+lu <- 0.01
+clusterExport(cl, list("p", "shockprob", "rho", "Z", "lossdistribC.joint", "Rstoch", "lu"))
+
+parf <- function(i){
+ pshocked <- apply(p, 2, shockprob, rho=rho, Z=Z[i])
+ return( lossdistribC.joint(pshocked[,ncol(p)], 1-Rstoch[ncol(p),i,], lu) )
+}
+
+dist <- parSapply(cl, 1:n.int, parf)
+dist <- array(dist, dim=c(1/lu+1, 1/lu+1, 100))
+distw <- array(0, dim=c(1/lu+1, 1/lu+1))
+for(i in 1:n.int){
+ distw <- distw+dist[,,i] * w.mod[i]
+}
+
+## for(i in seq(0,360, by=10)){
+## persp(distw, theta=i)
+## Sys.sleep(0.5)
+## }
+
+persp(distw, theta=-20, phi=13, col="lightgreen",ticktype="detailed", shade=0.3)
+x <- seq(0,1,lu)
+y <- seq(0,1,lu)
+
+lossdist.bu <- lossdist
+recovdist.bu <- recovdist
+
+prepayprob <- matrix(0,100,20)
+defaultprob <- matrix(0,100,20)
+for( t in 1:20){
+ prepayprob[,t] <- h/(h+lambda)*(1-exp(-(lambda+h)*yearfrac[t]))
+ defaultprob[,t] <- lambda/(h+lambda)*(1-exp(-(lambda+h)*yearfrac[t]))
+}
+
+#LCDX15 calibration
+library(statmod)
+n.int <- 100
+n.credit <- 100
+Z <- gauss.quad.prob(n.int, "normal")$nodes
+w <- gauss.quad.prob(n.int, "normal")$weights
+rho <- 0.35
+T <- length(yearfrac)
+Recov <- rep(0.35, n.credit)e
+dt <- diff(c(0, yearfrac))
+K <- c(0, 0.08, 0.15, 0.3, 1)
+lu <- 0.01
+errvec <- c()
+for(l in 1:10){
+ result <- c()
+ test <- c()
+ Rstoch <- array(0,dim=c(T,n.int,n.credit))
+ for(t in 1:T){
+ for(i in 1:n.credit){
+ Rstoch[t,,i] <- stochasticrecov(Recov[i],0,Z,w.bak,rho,defaultprob[i,t], defaultprob[i,t])
+ }
+ }
+ lossdist <- c()
+ prepaydist <- c()
+ result <- c()
+ for(i in 1:n.int){
+ dpshocked <- apply(defaultprob,2,shockprob,rho=rho,Z=Z[i])
+ ppshocked <- apply(prepayprob,2,shockprob,rho=rho,Z=-Z[i])
+ L <- c()
+ R <- c()
+ for(t in 1:length(yearfrac)){
+ dist <- lossrecovdist(dpshocked[,t],ppsocked[,t],Rstoch[t,i,],lu)
+ #dist <- lossrecovdist(dpshocked[,t],ppshocked[,t],0.35,lu)
+ L <- cbind(L,dist$L)
+ R <- cbind(R,dist$R)
+ }
+ lossdist <- cbind(lossdist,L[,T])
+ prepaydist <- cbind(prepaydist,R[,T])
+ result <- rbind(result,bpvec(K,L,R,df,dt))
+ }
+
+ pomme <- KLfit(t(result), w, tranchemarks)
+ #pomme <- lmconst(tranchemarks,t(result),rbind(rep(1,100),Z),c(1,0),T)
+
+ err <-0
+ for(i in 1:dim(p.bak)[1]){
+ for(j in 1:dim(p.bak)[2]){
+ err <- err+abs(crossprod(shockprob(p[i,j],rho,Z),pomme$weight)-p.bak[i,j])
+ }
+ }
+
+ ptilde <- defaultprob
+ for(i in 1:dim(defaultprob)[1]){
+ for(j in 1:dim(defaultprob)[2]){
+ ptilde[i,j] <- fit.prob(Z,pomme$weight,rho,defaultprob[i,j])
+ }
+ }
+ p <- ptilde
+ errvec <- c(errvec,err)
+ w.bak <- pomme$weight
+ cat(err,"\n")
+}
+
+pl<-c()
+for(i in 1:100){
+ pd<-c(defaultprob[i,1],diff(defaultprob[i,]))
+ pl<-c(pl,sum(pd*df*(1-0.35)))
+}
+cl<-c()
+for(i in 1:100){
+ cl<-c(cl,sum(df*dt*(1-defaultprob[i,]-prepayprob[i,])))
+}
+
+loss <- function(t,P,R,lambda,mu){
+ lambda/(lambda+mu)*(1-exp(-(lambda+mu)*t))*(1-R/P)-mu/((lambda+mu)*P)*(1-exp(-(lambda+mu)*t))
+}
+losstest <- function(t){
+ loss(t,P,R,lambda,mu)
+}
+
+
+##tranche pricing
+library(statmod)
+n.int <- 500
+Z <- gauss.quad.prob(n.int,"normal")$nodes
+w <- gauss.quad.prob(n.int,"normal")$weights
+rho <- 0.35
+
+Lt <- c()
+Rt <- c()
+for(t in 1:21){
+ L <- c()
+ R <- c()
+ for(i in 1:n.int){
+ pZ <- shockprob(1-p[,t],rho,Z[i])
+ SZ <- shockseverity(1-p[,t],0.45,1,Z[i])
+ temp <- lossrecovdist(pZ,0,1-SZ,0.0045)
+ L <- cbind(L,temp$L)
+ R <- cbind(R,temp$R)
+ }
+ Lt <- cbind(Lt, as.matrix(L)%*%w)
+ Rt <- cbind(Rt, as.matrix(R)%*%w)
+}
+
+hazard.rates <- runif(100)*1000
+yearfrac <- seq(0,7,0.25)
+bps <- 1e-4
+P <- exp(-outer(hazard.rates * bps, yearfrac))
+R <- rep(0.6,100)
+lu <- 0.01
+test <- lossrecovdist(1-P[,29], rep(0,100), R, 0.01)
+
+
+library(statmod)
+n.int <- 500
+Z <- gauss.quad.prob(n.int,"normal")$nodes
+w <- gauss.quad.prob(n.int,"normal")$weights
+rho <- 0.35
+
+
+cl <- makeCluster(6)
+x <- rnorm(1000000)
+clusterCall(cl, function(x) for (i in 1:100) sum(x), x)
+
+
|
