## todo: ## -investigate other ways to interpolate the random severities on the grid ## I'm thinking that at eah severity that we add to the distribution, round it down ## and keep track of the missing mass: namely if X_i=S_i w.p p_i, then add ## X_i=lu*floor(S_i/lu) with probability p_i and propagate ## X_{i+1}=S_{i+1}+(S_i-lu*floor(S_i/lu)) with the right probability ## - investigate truncated distributions more (need to compute loss and recov distribution ## separately, for the 0-10 equity tranche, we need the loss on the 0-0.1 support and ## recovery with 0.1-1 support, so it's not clear that there is a big gain. ## - do the correlation adjustments when computing the deltas since it seems to be ## the market standard #' Gauss-Hermite quadrature weights #' #' \code{GHquad} computes the quadrature weights for integrating against a #' Gaussian distribution. #' #' if f is a function, then with(GHquad(100), crossprod(f(Z), w)) #' #' @param n Integer, the number of nodes #' @return A list with two components: #' \item{Z}{the list of nodes} #' \item{w}{the corresponding weights} #' GHquad <- function(n){ n <- as.integer(n) Z <- double(n) w <- double(n) result <- .C("GHquad", n, Z=Z, w=w) result[[1]] <- NULL return(result) } #' Loss distribution of a portfolio #' #' \code{lossdistrib} computes the probability distribution of a sum #' of independent Bernouilli variables with unequal probabilities. #' #' This uses the basic recursive algorithm of Andersen, Sidenius and Basu #' We compute the probability distribution of S = \sum_{i=1}^n X_i #' where X_i is Bernouilli(p_i) #' @param p Numeric vector, the vector of success probabilities #' @return A vector q such that q[k]=P(S=k) lossdistrib <- function(p){ ## basic recursive algorithm of Andersen, Sidenius and Basu n <- length(p) q <- rep(0, (n+1)) q[1] <- 1 for(i in 1:n){ q[-1] <- p[i]*q[-(n+1)]+(1-p[i])*q[-1] q[1] <- (1-p[i])*q[1] } return(q) } #' Loss distribution of a portfolio #' #' \code{lossdistrib.fft} computes the probability distribution of a sum #' of independent Bernouilli variables with unequal probabilities. #' #' This uses the fft. Complexity is of order O(n m) + O(m\log{m}) #' where m is the size of the grid and n, the number of probabilities. #' It is slower than the recursive algorithm in practice. #' We compute the probability distribution of S = \sum_{i=1}^n X_i #' where X_i is Bernouilli(p_i) #' @param p Numeric vector, the vector of success probabilities #' @return A vector such that q[k]=P(S=k) lossdistrib.fft <- function(p){ n <- length(p) theta <- 2*pi*1i*(0:n)/(n+1) Phi <- 1 - p + p%o%exp(theta) v <- apply(Phi, 2, prod) return(1/(n+1)*Re(fft(v))) } #' recursive algorithm with first order correction #' #' @param p Numeric, vector of default probabilities #' @param w Numeric, vector of weights #' @param S Numeric, vector of severities #' @param N Integer, number of ticks in the grid #' @param defaultflag Boolean, if True, we compute the default distribution #' (instead of the loss distribution). #' @return a Numeric vector of size \code{N} computing the loss (resp. #' default) distribution if \code{defaultflag} is FALSE (resp. TRUE). lossdistrib2 <- function(p, w, S, N, defaultflag=FALSE){ n <- length(p) lu <- 1/(N-1) q <- rep(0, N) q[1] <- 1 for(i in 1:n){ if(defaultflag){ d <- w[i] /lu }else{ d <- S[i] * w[i] / lu } d1 <- floor(d) d2 <- ceiling(d) p1 <- p[i]*(d2-d) p2 <- p[i] - p1 q1 <- c(rep(0,d1), p1*q[1:(N-d1)]) q2 <- c(rep(0,d2), p2*q[1:(N-d2)]) q <- q1 + q2 + (1-p[i])*q } q[length(q)] <- q[length(q)]+1-sum(q) return(q) } lossdistrib2.truncated <- function(p, w, S, N, cutoff=N){ ## recursive algorithm with first order correction ## p vector of default probabilities ## w vector of weigths ## S vector of severities ## N number of ticks in the grid (for best accuracy should ## be a multiple of the number of issuers) ## cutoff where to stop computing the exact probabilities ## (useful for tranche computations) ## this is actually slower than lossdistrib2. But in C this is ## twice as fast. ## for high severities, M can become bigger than N, and there is ## some probability mass escaping. n <- length(p) lu <- 1/(N-1) q <- rep(0, cutoff) q[1] <- 1 M <- 1 for(i in 1:n){ d <- S[i] * w[i] / lu d1 <- floor(d) d2 <- ceiling(d) p1 <- p[i]*(d2-d) p2 <- p[i] - p1 q1 <- p1*q[1:min(M, cutoff-d1)] q2 <- p2*q[1:min(M, cutoff-d2)] q[1:min(M, cutoff)] <- (1-p[i])*q[1:min(M, cutoff)] q[(d1+1):min(M+d1, cutoff)] <- q[(d1+1):min(M+d1, cutoff)]+q1 q[(d2+1):min(M+d2, cutoff)] <- q[(d2+1):min(M+d2, cutoff)]+q2 M <- M+d2 } return(q) } recovdist <- function(dp, pp, w, S, N){ ## computes the recovery distribution for a sum of independent variables ## R=\sum_{i=1}^n w[i] X_i ## where X_i = 0 w.p 1 - dp[i] - pp[i] ## = 1 - S[i] w.p dp[i] ## = 1 w.p pp[i] ## each non zero value v is interpolated on the grid as ## the pair of values floor(v/lu) and ceiling(v/lu) so that ## X_i has four non zero values n <- length(dp) q <- rep(0, N) q[1] <- 1 lu <- 1/(N-1) for(i in 1:n){ d1 <- w[i]*(1-S[i])/lu d1l <- floor(d1) d1u <- ceiling(d1) d2 <- w[i] / lu d2l <- floor(d2) d2u <- ceiling(d2) dp1 <- dp[i] * (d1u-d1) dp2 <- dp[i] - dp1 pp1 <- pp[i] * (d2u - d2) pp2 <- pp[i] - pp1 q1 <- c(rep(0, d1l), dp1 * q[1:(N-d1l)]) q2 <- c(rep(0, d1u), dp2 * q[1:(N-d1u)]) q3 <- c(rep(0, d2l), pp1 * q[1:(N-d2l)]) q4 <- c(rep(0, d2u), pp2 *q[1:(N-d2u)]) q <- q1+q2+q3+q4+(1-dp[i]-pp[i])*q } return(q) } lossdist.joint <- function(p, w, S, N, defaultflag=FALSE){ ## recursive algorithm with first order correction ## to compute the joint probability distribution of the loss and recovery ## inputs: ## p: vector of default probabilities ## w: vector of issuer weights ## S: vector of severities ## N: number of tick sizes on the grid ## defaultflag: if true computes the default distribution ## output: ## q: matrix of joint loss, recovery probability ## colSums(q) is the recovery distribution marginal ## rowSums(q) is the loss distribution marginal n <- length(p) lu <- 1/(N-1) q <- matrix(0, N, N) q[1,1] <- 1 for(k in 1:n){ if(defaultflag){ x <- w[k] / lu }else{ x <- S[k] * w[k]/lu } y <- (1-S[k]) * w[k]/lu i <- floor(x) j <- floor(y) weights <- c((i+1-x)*(j+1-y), (i+1-x)*(y-j), (x-i)*(y-j), (j+1-y)*(x-i)) psplit <- p[k] * weights qtemp <- matrix(0, N, N) qtemp[(i+1):N,(j+1):N] <- qtemp[(i+1):N,(j+1):N] + psplit[1] * q[1:(N-i),1:(N-j)] qtemp[(i+1):N,(j+2):N] <- qtemp[(i+1):N,(j+2):N] + psplit[2] * q[1:(N-i), 1:(N-j-1)] qtemp[(i+2):N,(j+2):N] <- qtemp[(i+2):N,(j+2):N] + psplit[3] * q[1:(N-i-1), 1:(N-j-1)] qtemp[(i+2):N, (j+1):N] <- qtemp[(i+2):N, (j+1):N] + psplit[4] * q[1:(N-i-1), 1:(N-j)] q <- qtemp + (1-p[k])*q } q[length(q)] <- q[length(q)]+1-sum(q) return(q) } lossdist.prepay.joint <- function(dp, pp, w, S, N, defaultflag=FALSE){ ## recursive algorithm with first order correction ## to compute the joint probability distribition of the loss and recovery ## inputs: ## dp: vector of default probabilities ## pp: vector of prepay probabilities ## w: vector of issuer weights ## S: vector of severities ## N: number of tick sizes on the grid ## defaultflag: if true computes the default ## outputs ## q: matrix of joint loss and recovery probability ## colSums(q) is the recovery distribution marginal ## rowSums(q) is the loss distribution marginal n <- length(dp) lu <- 1/(N-1) q <- matrix(0, N, N) q[1,1] <- 1 for(k in 1:n){ y1 <- (1-S[k]) * w[k]/lu y2 <- w[k]/lu j1 <- floor(y1) j2 <- floor(y2) if(defaultflag){ x <- y2 i <- j2 }else{ x <- y2-y1 i <- floor(x) } ## weights <- c((i+1-x)*(j1+1-y1), (i+1-x)*(y1-j1), (x-i)*(y1-j1), (j1+1-y1)*(x-i)) weights1 <- c((i+1-x)*(j1+1-y1), (i+1-x)*(y1-j1), (x-i)*(y1-j1), (j1+1-y1)*(x-i)) dpsplit <- dp[k] * weights1 if(defaultflag){ weights2 <- c((i+1-x)*(j2+1-y2), (i+1-x)*(y2-j2), (x-i)*(y2-j2), (j2+1-y2)*(x-i)) ppsplit <- pp[k] * weights2 }else{ ppsplit <- pp[k] * c(j2+1-y2, y2-j2) } qtemp <- matrix(0, N, N) qtemp[(i+1):N,(j1+1):N] <- qtemp[(i+1):N,(j1+1):N] + dpsplit[1] * q[1:(N-i),1:(N-j1)] qtemp[(i+1):N,(j1+2):N] <- qtemp[(i+1):N,(j1+2):N] + dpsplit[2] * q[1:(N-i), 1:(N-j1-1)] qtemp[(i+2):N,(j1+2):N] <- qtemp[(i+2):N,(j1+2):N] + dpsplit[3] * q[1:(N-i-1), 1:(N-j1-1)] qtemp[(i+2):N,(j1+1):N] <- qtemp[(i+2):N, (j1+1):N] + dpsplit[4] * q[1:(N-i-1), 1:(N-j1)] if(defaultflag){ qtemp[(i+1):N,(j2+1):N] <- qtemp[(i+1):N,(j2+1):N] + ppsplit[1] * q[1:(N-i),1:(N-j2)] qtemp[(i+1):N,(j2+2):N] <- qtemp[(i+1):N,(j2+2):N] + ppsplit[2] * q[1:(N-i), 1:(N-j2-1)] qtemp[(i+2):N,(j2+2):N] <- qtemp[(i+2):N,(j2+2):N] + ppsplit[3] * q[1:(N-i-1), 1:(N-j2-1)] qtemp[(i+2):N,(j2+1):N] <- qtemp[(i+2):N, (j2+1):N] + ppsplit[4] * q[1:(N-i-1), 1:(N-j2)] }else{ qtemp[, (j2+1):N] <- qtemp[,(j2+1):N]+ppsplit[1]*q[,1:(N-j2)] qtemp[, (j2+2):N] <- qtemp[,(j2+2):N]+ppsplit[2]*q[,1:(N-j2-1)] } q <- qtemp + (1-pp[k]-dp[k]) * q } q[length(q)] <- q[length(q)] + 1 - sum(q) return(q) } lossdistC <- function(p, w, S, N, defaultflag=FALSE){ ## C version of lossdistrib2, roughly 50 times faster .C("lossdistrib", as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), q = double(N))$q } lossdistCblas <- function(p, w, S, N, defaultflag=FALSE){ ## C version of lossdistrib2, roughly 50 times faster .C("lossdistrib_blas", as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), q = double(N))$q } lossdistCZ <- function(p, w, S, N, defaultflag=FALSE, rho, Z){ #S is of size (length(p), length(Z)) .C("lossdistrib_Z", as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), as.double(rho), as.double(Z), as.integer(length(Z)), q = matrix(0, N, length(Z)))$q } lossdistC.truncated <- function(p, w, S, N, T=N, defaultflag=FALSE){ ## truncated version of lossdistrib ## q[i] is 0 for i>=T .C("lossdistrib_truncated", as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.integer(T), as.logical(defaultflag), q = double(N))$q } exp.trunc <- function(p, w, S, N, K){ ## computes E[(K-L)^+] r <- 0 .C("exp_trunc", as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.double(K), res = r)$res } rec.trunc <- function(p, w, S, N, K){ ## computes E[(K-(1-R))^+] = E[(\tilde K- \bar R)] ## where \tilde K = K-sum_i w_i S_i and \bar R=\sum_i w_i R_i (1-X_i) Ktilde <- K-crossprod(w, S) if(Ktilde < 0){ return( 0 ) }else{ return( exp.trunc(1-p, w, 1-S, N, Ktilde) ) } } recovdistC <- function(dp, pp, w, S, N){ ## C version of recovdist .C("recovdist", as.double(dp), as.double(pp), as.integer(length(dp)), as.double(w), as.double(S), as.integer(N), q = double(N))$q } lossdistC.joint <- function(p, w, S, N, defaultflag=FALSE){ ## C version of lossdistrib.joint, roughly 20 times faster .C("lossdistrib_joint", as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), q = matrix(0, N, N))$q } lossdistC.jointblas <- function(p, w, S, N, defaultflag=FALSE){ ## C version of lossdistrib.joint, roughly 20 times faster .C("lossdistrib_joint_blas", as.double(p), as.integer(length(p)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), q = matrix(0, N, N))$q } lossdistC.jointZ <- function(dp, w, S, N, defaultflag = FALSE, rho, Z, wZ){ ## N is the size of the grid ## dp is of size n.credits ## w is of size n.credits ## S is of size n.credits by nZ ## rho is a double ## Z is a vector of length nZ ## w is a vector if length wZ r <- .C("lossdistrib_joint_Z", as.double(dp), as.integer(length(dp)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), as.double(rho), as.double(Z), as.double(wZ), as.integer(length(Z)), q = matrix(0, N, N))$q } lossdistC.prepay.jointblas <- function(dp, pp, w, S, N, defaultflag=FALSE){ ## C version of lossdist.prepay.joint r <- .C("lossdistrib_prepay_joint_blas", as.double(dp), as.double(pp), as.integer(length(dp)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), q=matrix(0, N, N))$q return(r) } lossdistC.prepay.jointZ <- function(dp, pp, w, S, N, defaultflag = FALSE, rho, Z, wZ){ ## N is the size of the grid ## dp is of size n.credits ## pp is of size n.credits ## w is of size n.credits ## S is of size n.credits by nZ ## rho is a vector of doubles of size n.credits ## Z is a vector of length nZ ## w is a vector if length wZ r <- .C("lossdistrib_prepay_joint_Z", as.double(dp), as.double(pp), as.integer(length(dp)), as.double(w), as.double(S), as.integer(N), as.logical(defaultflag), as.double(rho), as.double(Z), as.double(wZ), as.integer(length(Z)), output = matrix(0,N,N)) return(r$output) } lossrecovdist <- function(defaultprob, prepayprob, w, S, N, defaultflag=FALSE, useC=TRUE){ lossdistrib2 <- if(useC) lossdistC recovdist <- if(useC) recovdistC if(missing(prepayprob)){ L <- lossdistrib2(defaultprob, w, S, N, defaultflag) R <- lossdistrib2(defaultprob, w, 1-S, N) }else{ L <- lossdistrib2(defaultprob+defaultflag*prepayprob, w, S, N, defaultflag) R <- recovdist(defaultprob, prepayprob, w, S, N) } return(list(L=L, R=R)) } lossrecovdist.term <- function(defaultprob, prepayprob, w, S, N, defaultflag=FALSE, useC=TRUE){ ## computes the loss and recovery distribution over time L <- array(0, dim=c(N, ncol(defaultprob))) R <- array(0, dim=c(N, ncol(defaultprob))) if(missing(prepayprob)){ for(t in 1:ncol(defaultprob)){ temp <- lossrecovdist(defaultprob[,t], , w, S[,t], N, defaultflag, useC) L[,t] <- temp$L R[,t] <- temp$R } }else{ for(t in 1:ncol(defaultprob)){ temp <- lossrecovdist(defaultprob[,t], prepayprob[,t], w, S[,t], N, defaultflag, useC) L[,t] <- temp$L R[,t] <- temp$R } } return(list(L=L, R=R)) } lossrecovdist.joint.term <- function(defaultprob, prepayprob, w, S, N, defaultflag=FALSE, useC=TRUE){ ## computes the joint loss and recovery distribution over time Q <- array(0, dim=c(ncol(defaultprob), N, N)) lossdist.joint <- if(useC) lossdistC.jointblas lossdist.prepay.joint <- if(useC) lossdistC.prepay.jointblas if(missing(prepayprob)){ for(t in 1:ncol(defaultprob)){ Q[t,,] <- lossdist.joint(defaultprob[,t], w, S[,t], N, defaultflag) } }else{ for(t in 1:ncol(defaultprob)){ Q[t,,] <- lossdist.prepay.jointblas(defaultprob[,t], prepayprob[,t], w, S[,t], N, defaultflag) } } return(Q) } dist.transform <- function(dist.joint){ ## compute the joint (D, R) distribution ## from the (L, R) distribution using D = L+R distDR <- array(0, dim=dim(dist.joint)) Ngrid <- dim(dist.joint)[2] for(t in 1:dim(dist.joint)[1]){ for(i in 1:Ngrid){ for(j in 1:Ngrid){ index <- i+j if(index <= Ngrid){ distDR[t,index,j] <- distDR[t,index,j] + dist.joint[t,i,j] }else{ distDR[t,Ngrid,j] <- distDR[t,Ngrid,j] + dist.joint[t,i,j] } } } distDR[t,,] <- distDR[t,,]/sum(distDR[t,,]) } return( distDR ) } shockprob <- function(p, rho, Z, log.p=F){ ## computes the shocked default probability as a function of the copula factor ## function is vectorized provided the below caveats: ## p and rho are vectors of same length n, Z is a scalar, returns vector of length n ## p and rho are scalars, Z is a vector of length n, returns vector of length n if(length(p)==1){ if(rho==1){ return(Z<=qnorm(p)) }else{ return(pnorm((qnorm(p)-sqrt(rho)*Z)/sqrt(1-rho), log.p=log.p)) } }else{ result <- double(length(p)) result[rho==1] <- Z<=qnorm(p[rho==1]) result[rho<1] <- pnorm((qnorm(p[rho<1])-sqrt(rho[rho<1])*Z)/sqrt(1-rho[rho<1]), log.p=log.p) return( result ) } } shockseverity <- function(S, Stilde=1, Z, rho, p){ ## computes the severity as a function of the copula factor Z result <- double(length(S)) result[p==0] <- 0 result[p!=0] <- Stilde * exp( shockprob(S[p!=0]/Stilde*p[p!=0], rho[p!=0], Z, TRUE) - shockprob(p[p!=0], rho[p!=0], Z, TRUE)) return(result) } dshockprob <- function(p,rho,Z){ dnorm((qnorm(p)-sqrt(rho)*Z)/sqrt(1-rho))*dqnorm(p)/sqrt(1-rho) } dqnorm <- function(x){ 1/dnorm(qnorm(x)) } fit.prob <- function(Z, w, rho, p0){ ## if the weights are not perfectly gaussian, find the probability p such ## E_w(shockprob(p, rho, Z)) = p0 require(distr) if(p0==0){ return(0) } if(rho == 1){ distw <- DiscreteDistribution(Z, w) return(pnorm(q(distw)(p0))) } eps <- 1e-12 dp <- (crossprod(shockprob(p0,rho,Z),w)-p0)/crossprod(dshockprob(p0,rho,Z),w) p <- p0 while(abs(dp) > eps){ dp <- (crossprod(shockprob(p,rho,Z),w)-p0)/crossprod(dshockprob(p,rho,Z),w) phi <- 1 while ((p-phi*dp)<0 || (p-phi*dp)>1){ phi <- 0.8*phi } p <- p - phi*dp } return(p) } fit.probC <- function(Z, w, rho, p0){ stopifnot(length(Z)==length(w)) r <- .C("fitprob", as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.double(p0), q = double(1)) return(r$q) } stochasticrecov <- function(R, Rtilde, Z, w, rho, porig, pmod){ ## if porig == 0 (probably matured asset) then return orginal recovery ## it shouldn't matter anyway since we never default that asset if(porig == 0){ return(rep(R, length(Z))) }else{ ptilde <- fit.prob(Z, w, rho, (1-R)/(1-Rtilde) * porig) return(abs(1-(1-Rtilde) * exp(shockprob(ptilde, rho, Z, TRUE) - shockprob(pmod, rho, Z, TRUE)))) } } stochasticrecovC <- function(R, Rtilde, Z, w, rho, porig, pmod){ r <- .C("stochasticrecov", as.double(R), as.double(Rtilde), as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.double(porig), as.double(pmod), q = double(length(Z))) return(r$q) } BClossdist <- function(defaultprob, issuerweights, recov, rho, Z, w, N=length(recov)+1, defaultflag=FALSE, n.int=500){ if(missing(Z)){ quadrature <- GHquad(n.int) Z <- quadrature$Z w <- quadrature$w } stopifnot(length(Z)==length(w), nrow(defaultprob) == length(issuerweights)) ## do not use if weights are not gaussian, results would be incorrect ## since shockseverity is invalid in that case (need to use stochasticrecov) LZ <- matrix(0, N, length(Z)) RZ <- matrix(0, N, length(Z)) L <- matrix(0, N, ncol(defaultprob)) R <- matrix(0, N, ncol(defaultprob)) for(t in 1:ncol(defaultprob)){ for(i in 1:length(Z)){ g.shocked <- shockprob(defaultprob[,t], rho, Z[i]) S.shocked <- shockseverity(1-recov, 1, Z[i], rho, defaultprob[,t]) temp <- lossrecovdist(g.shocked, , issuerweights, S.shocked, N) LZ[,i] <- temp$L RZ[,i] <- temp$R } L[,t] <- LZ%*%w R[,t] <- RZ%*%w } list(L=L, R=R) } BClossdistC <- function(defaultprob, issuerweights, recov, rho, Z, w, N=length(issuerweights)+1, defaultflag=FALSE){ stopifnot(length(Z)==length(w), nrow(defaultprob)==length(issuerweights)) L <- matrix(0, N, ncol(defaultprob)) R <- matrix(0, N, ncol(defaultprob)) rho <- rep(rho, length(issuerweights)) r <- .C("BCloss_recov_dist", defaultprob, as.integer(nrow(defaultprob)), as.integer(ncol(defaultprob)), as.double(issuerweights), as.double(recov), as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.integer(N), as.logical(defaultflag), L=L, R=R) return(list(L=r$L,R=r$R)) } BCER <- function(defaultprob, issuerweights, recov, K, rho, Z, w, N=length(issuwerweights)+1, defaultflag=FALSE){ stopifnot(length(Z)==length(w), nrow(defaultprob)==length(issuerweights)) rho <- rep(rho, length(issuerweights)) ELt <- numeric(ncol(defaultprob)) ERt <- numeric(ncol(defaultprob)) r <- .C("BCloss_recov_trunc", defaultprob, as.integer(nrow(defaultprob)), as.integer(ncol(defaultprob)), as.double(issuerweights), as.double(recov), as.double(Z), as.double(w), as.integer(length(Z)), as.double(rho), as.integer(N), as.double(K), as.logical(defaultflag), ELt=ELt, ERt=ERt) return(list(ELt=r$ELt, ERt=r$ERt)) }