## todo: ## -investigate other ways to interpolate the random severities on the grid ## I'm thinking that at eah severity that we add to the distribution, round it down ## and keep track of the missing mass: namely if X_i=S_i w.p p_i, then add ## X_i=lu*floor(S_i/lu) with probability p_i and propagate ## X_{i+1}=S_{i+1}+(S_i-lu*floor(S_i/lu)) with the right probability ## - investigate truncated distributions more (need to compute loss and recov distribution ## separately, for the 0-10 equity tranche, we need the loss on the 0-0.1 support and ## recovery with 0.1-1 support, so it's not clear that there is a big gain. ## - do the correlation adjustments when computing the deltas since it seems to be ## the market standard #' @useDynLib lossdistrib, .registration=TRUE, .fixes = "C_" NULL #' Gauss-Hermite quadrature weights #' #' \code{GHquad} computes the quadrature weights for integrating against a #' Gaussian distribution. #' #' if f is a function, then \eqn{\sum_{i=1}^n f(Z_i)w_i \approx #' \frac{1}{\sqrt{2\pi}} \int_{-\infty}^\infty f(x)e^{-\frac{x^2}{2}}\,dx}. #' @param n Integer, the number of nodes #' @return A list with two components: #' \item{Z}{the list of nodes} #' \item{w}{the corresponding weights} #' @export GHquad <- function(n) { n <- as.integer(n) Z <- double(n) w <- double(n) result <- .C(C_GHquad, n, Z=Z, w=w) result[[1]] <- NULL return(result) } #' Loss distribution of a portfolio #' #' \code{lossdistrib} computes the probability distribution of a sum #' of independent Bernouilli variables with unequal probabilities #' (also called the Poisson-Binomial distribution). #' #' We compute the probability distribution of \eqn{S = \sum_{i=1}^n X_i} #' where \eqn{X_i} is Bernouilli(\eqn{p_i}). This uses the simple recursive #' algorithm of Andersen, Sidenius and Basu #' @param p Numeric vector, the vector of success probabilities #' @return A vector q such that \eqn{q_k=\Pr(S=k)} #' @export lossdistrib <- function(p){ ## basic recursive algorithm of Andersen, Sidenius and Basu n <- length(p) q <- rep(0, (n+1)) q[1] <- 1 for(i in 1:n) { q[-1] <- p[i] * q[-(n+1)] + (1 - p[i]) * q[-1] q[1] <- (1 - p[i]) * q[1] } return(q) } #'Convolution of two discrete probability distributions #' #' This is the probability of the sum of the two random variables #' assuming they are independent. #' @param dist1 Numeric vector of probabilities #' @param dist2 Numeric vector of probabilities #' @return Convolution of the two vectors convolve <- function(dist1, dist2) { n1 <- length(dist1) n2 <- length(dist2) dist1 <- c(dist1, rep(0, n2-1)) dist2 <- c(dist2, rep(0, n1-1)) Re(fft(fft(dist1) * fft(dist2), inverse=T))/length(dist1) } #' Loss distribution of a portfolio #' #' \code{lossdistrib.fft} computes the probability distribution of a sum #' of independent Bernouilli variables with unequal probabilities #' (also called the Poisson-Binomial distribution). #' #' We compute the probability distribution of \eqn{S = \sum_{i=1}^n X_i} #' where \eqn{X_i} is Bernouilli(\eqn{p_i}). #' This uses the FFT, thus complexity is of order \eqn{O(n \log(n))}, #' compared to \eqn{O(n^2)} for the recursive algorithm. #' @param p Numeric vector, the vector of success probabilities #' @return A vector such that \eqn{q_k=\Pr(S=k)} #' @export lossdistrib.fft <- function(p) { ## haven't tested when p is not a power of 2. if(length(p) == 1) { c(1 - p, p) } else { convolve(lossdistrib.fft(p[1:(length(p)/2)]), lossdistrib.fft(p[-(1:(length(p)/2))])) } } #' Loss distribution of a portfolio #' #' \code{lossdistrib2} computes the probability distribution of a sum #' of independent Bernouilli variables with unequal probabilities. #' #' We compute the probability distribution of \eqn{L = \sum_{i=1}^n w_i S_i X_i} #' where \eqn{X_i} is Bernouilli(\eqn{p_i}). If \code{defaultflag} is TRUE, we #' compute the distribution of \eqn{D = \sum_{i=1}^n w_i X_i} instead. #' This a recursive algorithm with first order correction for discretization. #' Complexity is of ordier \eqn{O(nN)}, so linear for a given grid size. #' @param p Numeric, vector of default probabilities #' @param w Numeric, vector of weights #' @param S Numeric, vector of severities #' @param N Integer, number of ticks in the grid #' @param defaultflag Boolean, if TRUE, we compute the default distribution #' (instead of the loss distribution). #' @return a Numeric vector of size \code{N} computing the loss (resp. #' default) distribution if \code{defaultflag} is FALSE (resp. TRUE). #' @export lossdistrib2 <- function(p, w, S, N, defaultflag = FALSE){ n <- length(p) lu <- 1/(N-1) q <- rep(0, N) q[1] <- 1 for(i in 1:n){ if(defaultflag){ d <- w[i] /lu } else { d <- S[i] * w[i] / lu } d1 <- floor(d) d2 <- ceiling(d) p1 <- p[i]*(d2-d) p2 <- p[i] - p1 q1 <- c(rep(0,d1), p1*q[1:(N-d1)]) q2 <- c(rep(0,d2), p2*q[1:(N-d2)]) q <- q1 + q2 + (1-p[i])*q } q[length(q)] <- q[length(q)]+1-sum(q) return(q) } #' Loss distribution truncated version #' #' \code{lossdistrib2.truncated} computes the probability distribution of a sum #' of independent Bernouilli variables with unequal probabilities up #' to a cutoff N. #' #' This is actually slower than \code{lossdistrib2}, but in C this is #' twice as fast. For high severities, M can become bigger than the cutoff, and #' there is some probability mass escaping. #' @param p Numeric, vector of default probabilities #' @param w Numeric, vector of weights #' @param S Numeric, vector of severities #' @param N Integer, number of ticks in the grid #' @param cutoff Integer, where to stop computing the exact probabilities #' @return a Numeric vector of size \code{N} computing the loss distribution #' @export lossdistrib2.truncated <- function(p, w, S, N, cutoff = N){ n <- length(p) lu <- 1/(N-1) q <- rep(0, cutoff) q[1] <- 1 M <- 1 for(i in 1:n){ d <- S[i] * w[i] / lu d1 <- floor(d) d2 <- ceiling(d) p1 <- p[i]*(d2-d) p2 <- p[i] - p1 q1 <- p1*q[1:min(M, cutoff-d1)] q2 <- p2*q[1:min(M, cutoff-d2)] q[1:min(M, cutoff)] <- (1-p[i])*q[1:min(M, cutoff)] q[(d1+1):min(M+d1, cutoff)] <- q[(d1+1):min(M+d1, cutoff)]+q1 q[(d2+1):min(M+d2, cutoff)] <- q[(d2+1):min(M+d2, cutoff)]+q2 M <- M+d2 } return(q) } #' Recovery distribution of a portfolio #' #' \code{recovdist} computes the recovery distribution of portfolio #' described by a vector of default probabilities, and prepay probabilities. #' \eqn{R=\sum_{i=1}^n w_i X_i} where \eqn{X_i=0} w.p. \eqn{1-dp_i-pp_i}, #' \eqn{X_i=1-S_i} with probability \eqn{dp_i}, and \eqn{X_i=1} w.p. \eqn{pp_i} #' #' It is a recursive algorithm with first-order correction. For a unit of loss #' \eqn{lu}, each non-zero value \eqn{v} is interpolated on the grid #' as the pair of values #' \eqn{\left\lfloor\frac{v}{lu}\right\rfloor} and #' \eqn{\left\lceil\frac{v}{lu}\right\rceil} so that \eqn{X_i} has #' four non zero values. #' @param dp Numeric, vector of default probabilities #' @param pp Numeric, vector of prepay probabilities #' @param w Numeric, vector of weights #' @param S Numeric, vector of severities #' @param N Integer, number of ticks in the grid #' @return a Numeric vector of size \code{N} computing the recovery distribution #' @export recovdist <- function(dp, pp, w, S, N){ n <- length(dp) q <- rep(0, N) q[1] <- 1 lu <- 1/(N-1) for(i in 1:n){ d1 <- w[i]*(1-S[i])/lu d1l <- floor(d1) d1u <- ceiling(d1) d2 <- w[i] / lu d2l <- floor(d2) d2u <- ceiling(d2) dp1 <- dp[i] * (d1u-d1) dp2 <- dp[i] - dp1 pp1 <- pp[i] * (d2u - d2) pp2 <- pp[i] - pp1 q1 <- c(rep(0, d1l), dp1 * q[1:(N-d1l)]) q2 <- c(rep(0, d1u), dp2 * q[1:(N-d1u)]) q3 <- c(rep(0, d2l), pp1 * q[1:(N-d2l)]) q4 <- c(rep(0, d2u), pp2 *q[1:(N-d2u)]) q <- q1+q2+q3+q4+(1-dp[i]-pp[i])*q } return(q) } #' Joint loss-recovery distributionrecursive algorithm with first order correction to compute the joint #' probability distribution of the loss and recovery. #' #' For high severities, M can become bigger than N, and there is #' some probability mass escaping. #' @param p Numeric, vector of default probabilities #' @param w Numeric, vector of weights #' @param S Numeric, vector of severities #' @param N Integer, number of ticks in the grid #' @param defaultflag Logical, whether to return the loss or default distribution #' @return q Matrix of joint loss, recovery probability distribution #' colSums(q) is the recovery distribution marginal #' rowSums(q) is the loss distribution marginal #' @export lossdist.joint <- function(p, w, S, N, defaultflag = FALSE){ n <- length(p) lu <- 1/(N-1) q <- matrix(0, N, N) q[1,1] <- 1 for(k in 1:n) { if(defaultflag) { x <- w[k] / lu } else { x <- S[k] * w[k]/lu } y <- (1-S[k]) * w[k]/lu i <- floor(x) j <- floor(y) weights <- c((i+1-x)*(j+1-y), (i+1-x)*(y-j), (x-i)*(y-j), (j+1-y)*(x-i)) psplit <- p[k] * weights qtemp <- matrix(0, N, N) qtemp[(i+1):N,(j+1):N] <- qtemp[(i+1):N,(j+1):N] + psplit[1] * q[1:(N-i),1:(N-j)] qtemp[(i+1):N,(j+2):N] <- qtemp[(i+1):N,(j+2):N] + psplit[2] * q[1:(N-i), 1:(N-j-1)] qtemp[(i+2):N,(j+2):N] <- qtemp[(i+2):N,(j+2):N] + psplit[3] * q[1:(N-i-1), 1:(N-j-1)] qtemp[(i+2):N, (j+1):N] <- qtemp[(i+2):N, (j+1):N] + psplit[4] * q[1:(N-i-1), 1:(N-j)] q <- qtemp + (1-p[k])*q } q[length(q)] <- q[length(q)] + 1 - sum(q) return(q) } #' @export lossdist.prepay.joint <- function(dp, pp, w, S, N, defaultflag = FALSE){ ## recursive algorithm with first order correction ## to compute the joint probability distribition of the loss and recovery ## inputs: ## dp: vector of default probabilities ## pp: vector of prepay probabilities ## w: vector of issuer weights ## S: vector of severities ## N: number of tick sizes on the grid ## defaultflag: if true computes the default ## outputs ## q: matrix of joint loss and recovery probability ## colSums(q) is the recovery distribution marginal ## rowSums(q) is the loss distribution marginal n <- length(dp) lu <- 1/(N-1) q <- matrix(0, N, N) q[1,1] <- 1 for(k in 1:n){ y1 <- (1-S[k]) * w[k]/lu y2 <- w[k]/lu j1 <- floor(y1) j2 <- floor(y2) if(defaultflag){ x <- y2 i <- j2 }else{ x <- y2-y1 i <- floor(x) } ## weights <- c((i+1-x)*(j1+1-y1), (i+1-x)*(y1-j1), (x-i)*(y1-j1), (j1+1-y1)*(x-i)) weights1 <- c((i+1-x)*(j1+1-y1), (i+1-x)*(y1-j1), (x-i)*(y1-j1), (j1+1-y1)*(x-i)) dpsplit <- dp[k] * weights1 if(defaultflag){ weights2 <- c((i+1-x)*(j2+1-y2), (i+1-x)*(y2-j2), (x-i)*(y2-j2), (j2+1-y2)*(x-i)) ppsplit <- pp[k] * weights2 } else { ppsplit <- pp[k] * c(j2+1-y2, y2-j2) } qtemp <- matrix(0, N, N) qtemp[(i+1):N,(j1+1):N] <- qtemp[(i+1):N,(j1+1):N] + dpsplit[1] * q[1:(N-i),1:(N-j1)] qtemp[(i+1):N,(j1+2):N] <- qtemp[(i+1):N,(j1+2):N] + dpsplit[2] * q[1:(N-i), 1:(N-j1-1)] qtemp[(i+2):N,(j1+2):N] <- qtemp[(i+2):N,(j1+2):N] + dpsplit[3] * q[1:(N-i-1), 1:(N-j1-1)] qtemp[(i+2):N,(j1+1):N] <- qtemp[(i+2):N, (j1+1):N] + dpsplit[4] * q[1:(N-i-1), 1:(N-j1)] if(defaultflag){ qtemp[(i+1):N,(j2+1):N] <- qtemp[(i+1):N,(j2+1):N] + ppsplit[1] * q[1:(N-i),1:(N-j2)] qtemp[(i+1):N,(j2+2):N] <- qtemp[(i+1):N,(j2+2):N] + ppsplit[2] * q[1:(N-i), 1:(N-j2-1)] qtemp[(i+2):N,(j2+2):N] <- qtemp[(i+2):N,(j2+2):N] + ppsplit[3] * q[1:(N-i-1), 1:(N-j2-1)] qtemp[(i+2):N,(j2+1):N] <- qtemp[(i+2):N, (j2+1):N] + ppsplit[4] * q[1:(N-i-1), 1:(N-j2)] } else { qtemp[, (j2+1):N] <- qtemp[,(j2+1):N]+ppsplit[1]*q[,1:(N-j2)] qtemp[, (j2+2):N] <- qtemp[,(j2+2):N]+ppsplit[2]*q[,1:(N-j2-1)] } q <- qtemp + (1-pp[k]-dp[k]) * q } q[length(q)] <- q[length(q)] + 1 - sum(q) return(q) } #' @export lossdistC <- function(p, w, S, N, defaultflag=FALSE){ ## C version of lossdistrib2, roughly 50 times faster .C(C_lossdistrib, as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.integer(N), as.logical(defaultflag), q = double(N))$q } #' @export lossdistCZ <- function(p, w, S, N, defaultflag=FALSE, rho, Z){ ##S is of size (length(p), length(Z)) stopifnot(length(rho) == length(p), length(rho) == length(w), nrow(S) == length(p), ncol(S) == length(Z)) .C(C_lossdistrib_Z, as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), as.double(rho), as.double(Z), as.integer(length(Z)), q = matrix(0, N, length(Z)))$q } #' @export lossdistC.truncated <- function(p, w, S, N, T=N, defaultflag=FALSE){ ## truncated version of lossdistrib ## q[i] is 0 for i>=T .C(C_lossdistrib, as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.integer(T), as.logical(defaultflag), q = double(N))$q } #' @export exp_trunc <- function(p, w, S, N, K){ ## computes E[(K-L)^+] r <- 0 .C(C_exp_trunc, as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.double(K), res = r)$res } #' @export rec.trunc <- function(p, w, S, N, K){ ## computes E[(K-(1-R))^+] = E[(\tilde K- \bar R)] ## where \tilde K = K-sum_i w_i S_i and \bar R=\sum_i w_i R_i (1-X_i) Ktilde <- K-crossprod(w, S) if(Ktilde < 0){ return( 0 ) }else{ return( exp.trunc(1-p, w, 1-S, N, Ktilde) ) } } #' @export recovdistC <- function(dp, pp, w, S, N){ ## C version of recovdist .C(C_recovdist, as.double(dp), as.double(pp), as.integer(length(dp)), as.double(w), as.double(S), as.integer(N), q = double(N))$q } #' @export lossdistC.prepay.joint <- function(dp, pp, w, S, N, defaultflag=FALSE){ ## C version of lossdist.prepay.joint r <- .C(C_lossdistrib_joint, as.double(dp), as.double(pp), as.integer(length(dp)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), q=matrix(0, N, N))$q return(r) } #' @export lossdistC.prepay.jointZ <- function(dp, pp, w, S, N, defaultflag = FALSE, rho, Z, wZ){ ## N is the size of the grid ## dp is of size n.credits ## pp is of size n.credits ## w is of size n.credits ## S is of size n.credits by nZ ## rho is a vector of doubles of size n.credits ## Z is a vector of length nZ ## w is a vector if length wZ r <- .C(C_lossdistrib_joint_Z, as.double(dp), as.double(pp), as.integer(length(dp)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), as.double(rho), as.double(Z), as.double(wZ), as.integer(length(Z)), output = matrix(0,N,N)) return(r$output) } #' @export lossrecovdist <- function(defaultprob, prepayprob, w, S, N, defaultflag=FALSE, useC=TRUE){ lossdistrib2 <- if(useC) lossdistC recovdist <- if(useC) recovdistC if(missing(prepayprob)){ L <- lossdistrib2(defaultprob, w, S, N, defaultflag) R <- lossdistrib2(defaultprob, w, 1-S, N) } else { L <- lossdistrib2(defaultprob+defaultflag*prepayprob, w, S, N, defaultflag) R <- recovdist(defaultprob, prepayprob, w, S, N) } return(list(L=L, R=R)) } #' @export lossrecovdist.term <- function(defaultprob, prepayprob, w, S, N, defaultflag=FALSE, useC=TRUE){ ## computes the loss and recovery distribution over time L <- array(0, dim=c(N, ncol(defaultprob))) R <- array(0, dim=c(N, ncol(defaultprob))) if(missing(prepayprob)) { for(t in 1:ncol(defaultprob)){ temp <- lossrecovdist(defaultprob[,t], , w, S[,t], N, defaultflag, useC) L[,t] <- temp$L R[,t] <- temp$R } } else { for(t in 1:ncol(defaultprob)) { temp <- lossrecovdist(defaultprob[,t], prepayprob[,t], w, S[,t], N, defaultflag, useC) L[,t] <- temp$L R[,t] <- temp$R } } return(list(L=L, R=R)) } #' @export lossrecovdist.joint.term <- function(defaultprob, prepayprob, w, S, N, defaultflag=FALSE, useC=TRUE){ ## computes the joint loss and recovery distribution over time Q <- array(0, dim=c(ncol(defaultprob), N, N)) lossdist.joint <- if(useC) lossdistC.jointblas lossdist.prepay.joint <- if(useC) lossdistC.prepay.jointblas if(missing(prepayprob)){ for(t in 1:ncol(defaultprob)){ Q[t,,] <- lossdist.joint(defaultprob[,t], w, S[,t], N, defaultflag) } }else{ for(t in 1:ncol(defaultprob)){ Q[t,,] <- lossdist.prepay.jointblas(defaultprob[,t], prepayprob[,t], w, S[,t], N, defaultflag) } } return(Q) } #' @export dist.transform <- function(dist.joint) { ## compute the joint (D, R) distribution ## from the (L, R) distribution using D = L+R distDR <- array(0, dim=dim(dist.joint)) Ngrid <- dim(dist.joint)[2] for(t in 1:dim(dist.joint)[1]) { for(i in 1:Ngrid){ for(j in 1:Ngrid){ index <- i+j if(index <= Ngrid) { distDR[t,index,j] <- distDR[t,index,j] + dist.joint[t,i,j] }else{ distDR[t,Ngrid,j] <- distDR[t,Ngrid,j] + dist.joint[t,i,j] } } } distDR[t,,] <- distDR[t,,]/sum(distDR[t,,]) } return( distDR ) } #' @export shockprob <- function(p, rho, Z, log.p=F) { ## computes the shocked default probability as a function of the copula factor ## function is vectorized provided the below caveats: ## p and rho are vectors of same length n, Z is a scalar, returns vector of length n ## p and rho are scalars, Z is a vector of length n, returns vector of length n if(length(p)==1) { if(rho==1) { return(Z <= qnorm(p)) } else { return(pnorm((qnorm(p)-sqrt(rho)*Z)/sqrt(1-rho), log.p=log.p)) } } else { result <- double(length(p)) result[rho==1] <- Z<=qnorm(p[rho==1]) result[rho<1] <- pnorm((qnorm(p[rho<1])-sqrt(rho[rho<1])*Z)/sqrt(1-rho[rho<1]), log.p=log.p) return( result ) } } #' @export shockseverity <- function(S, Stilde=1, Z, rho, p) { ## computes the severity as a function of the copula factor Z result <- double(length(S)) result[p==0] <- 0 result[p!=0] <- Stilde * exp( shockprob(S[p != 0] / Stilde * p[p != 0], rho[p != 0], Z, TRUE) - shockprob(p[p != 0], rho[p != 0], Z, TRUE)) return(result) } #' @export dshockprob <- function(p,rho,Z) { dnorm((qnorm(p)-sqrt(rho)*Z)/sqrt(1-rho))*dqnorm(p)/sqrt(1-rho) } #' @export dqnorm <- function(x) { 1/dnorm(qnorm(x)) } #' @export fit.prob <- function(Z, w, rho, p0) { ## if the weights are not perfectly gaussian, find the probability p such ## E_w(shockprob(p, rho, Z)) = p0 if(p0 == 0) { return(0) } if(rho == 1) { distw <- distr::DiscreteDistribution(Z, w) return(distr::pnorm(distr::q(distw)(p0))) } eps <- 1e-12 dp <- (crossprod(shockprob(p0, rho, Z),w) - p0) / crossprod(dshockprob(p0, rho, Z), w) p <- p0 while(abs(dp) > eps) { dp <- (crossprod(shockprob(p, rho, Z), w) - p0) / crossprod(dshockprob(p, rho, Z), w) phi <- 1 while ((p - phi * dp) < 0 || (p - phi * dp) > 1) { phi <- 0.8 * phi } p <- p - phi * dp } return(p) } #' @export fit.probC <- function(Z, w, rho, p0) { stopifnot(length(Z)==length(w)) r <- .C(C_fitprob, as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.double(p0), q = double(1)) return(r$q) } #' @export stochasticrecov <- function(R, Rtilde, Z, w, rho, porig, pmod) { ## if porig == 0 (probably matured asset) then return orginal recovery ## it shouldn't matter anyway since we never default that asset if(porig == 0) { return(rep(R, length(Z))) } else { ptilde <- fit.prob(Z, w, rho, (1-R)/(1-Rtilde) * porig) return(abs(1-(1-Rtilde) * exp(shockprob(ptilde, rho, Z, TRUE) - shockprob(pmod, rho, Z, TRUE)))) } } #' @export stochasticrecovC <- function(R, Rtilde, Z, w, rho, porig, pmod) { r <- .C(C_stochasticrecov, as.double(R), as.double(Rtilde), as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.double(porig), as.double(pmod), q = double(length(Z))) return(r$q) } #' @export BClossdist <- function(defaultprob, issuerweights, recov, rho, Z, w, N = length(recov)+1, defaultflag = FALSE, n.int = 500) { if(missing(Z)) { quadrature <- GHquad(n.int) Z <- quadrature$Z w <- quadrature$w } stopifnot(length(Z) == length(w), nrow(defaultprob) == length(issuerweights)) ## do not use if weights are not gaussian, results would be incorrect ## since shockseverity is invalid in that case (need to use stochasticrecov) LZ <- matrix(0, N, length(Z)) RZ <- matrix(0, N, length(Z)) L <- matrix(0, N, ncol(defaultprob)) R <- matrix(0, N, ncol(defaultprob)) for(t in 1:ncol(defaultprob)){ for(i in 1:length(Z)){ g.shocked <- shockprob(defaultprob[,t], rho, Z[i]) S.shocked <- shockseverity(1-recov, 1, Z[i], rho, defaultprob[,t]) temp <- lossrecovdist(g.shocked, , issuerweights, S.shocked, N) LZ[,i] <- temp$L RZ[,i] <- temp$R } L[,t] <- LZ%*%w R[,t] <- RZ%*%w } list(L=L, R=R) } #' @export BClossdistC <- function(defaultprob, issuerweights, recov, rho, Z, w, N = length(issuerweights)+1, defaultflag = FALSE) { if(is.null(dim(defaultprob))) { dim(defaultprob) <- c(length(defaultprob),1) } stopifnot(length(Z) == length(w), nrow(defaultprob)==length(issuerweights), nrow(defaultprob)==length(recov)) L <- matrix(0, N, ncol(defaultprob)) R <- matrix(0, N, ncol(defaultprob)) rho <- rep(rho, length(issuerweights)) r <- .C(C_BCloss_recov_dist, defaultprob, as.integer(nrow(defaultprob)), as.integer(ncol(defaultprob)), as.double(issuerweights), as.double(recov), as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.integer(N), as.logical(defaultflag), L=L, R=R) return(list(L=r$L,R=r$R)) } #' @export BCER <- function(defaultprob, issuerweights, recov, K, rho, Z, w, N = length(issuerweights)+1, defaultflag = FALSE) { stopifnot(length(Z) == length(w), nrow(defaultprob) == length(issuerweights)) rho <- rep(rho, length(issuerweights)) ELt <- numeric(ncol(defaultprob)) ERt <- numeric(ncol(defaultprob)) r <- .C(C_BCloss_recov_trunc, defaultprob, as.integer(nrow(defaultprob)), as.integer(ncol(defaultprob)), as.double(issuerweights), as.double(recov), as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.integer(N), as.double(K), as.logical(defaultflag), ELt=ELt, ERt=ERt) return(list(ELt=r$ELt, ERt=r$ERt)) }