diff options
Diffstat (limited to 'python/tranche_functions.py')
| -rw-r--r-- | python/tranche_functions.py | 75 |
1 files changed, 69 insertions, 6 deletions
diff --git a/python/tranche_functions.py b/python/tranche_functions.py index 1a3e04e5..85071561 100644 --- a/python/tranche_functions.py +++ b/python/tranche_functions.py @@ -82,6 +82,15 @@ libloss.lossdistrib_joint_Z.argtypes = [ ndpointer('double', ndim=2, flags='F,writeable') ] +libloss.joint_default_averagerecov_distrib.restype = None +libloss.joint_default_averagerecov_distrib.argtypes = [ + ndpointer('double', ndim=1, flags='F'), + POINTER(c_int), + ndpointer('double', ndim=1, flags='F'), + POINTER(c_int), + ndpointer('double', ndim=2, flags='F,writeable') +] + def GHquad(n): Z, w = h_roots(n) return Z*np.sqrt(2), w/np.sqrt(np.pi) @@ -204,6 +213,37 @@ def lossdistrib_joint_Z(p, pp, w, S, rho, Ngrid=101, defaultflag=False, nZ=500): byref(c_int(nZ)), q) return q +def joint_default_averagerecov_distrib(p, S, Ngrid=101): + """Joint defaut-average recovery distribution recursive algorithm. + + This computes the joint default/average recovery distribution using a first order + correction. + + Parameters + ---------- + p : (N,) array_like + Vector of default probabilities. + S : (N,) array_like + Vector of severities. + Ngrid : integer, optional + Number of ticks on the grid, default 101. + + Returns + ------- + q : (N, N) ndarray + + Notes + ----- + np.sum(q, axis=0) is the recovery distribution marginal + np.sum(q, axis=1) is the loss (or default) distribution marginal + """ + + q = np.zeros((Ngrid, p.shape[0]+1), order='F') + assert(p.shape == S.shape) + libloss.joint_default_averagerecov_distrib(p, byref(c_int(p.shape[0])), + S, byref(c_int(Ngrid)), q) + return q.T + def adjust_attachments(K, losstodate, factor): """ computes the attachments adjusted for losses @@ -311,13 +351,36 @@ def compute_pv(q, strike): val += sum(v for k, v in q[i].items() if k < strike) * 1/Ngrid return val +def average_recov(p, R, Ngrid): + q = np.zeros((p.shape[0]+1, Ngrid)) + q[0,0] = 1 + lu = 1 / (Ngrid-1) + weights = np.empty(Ngrid) + index = np.empty(Ngrid) + grid = np.linspace(0, 1, Ngrid) + for i, prob in enumerate(p): + for j in range(i+1, 0, -1): + newrecov = ((j-1) * grid + R[i])/j + np.modf(newrecov * ( Ngrid - 1), weights, index) + q[j] *= (1-prob) + for k in range(Ngrid): + q[j,int(index[k])+1] += weights[k] * prob * q[j-1, k] + q[j,int(index[k])] += (1-weights[k]) * prob * q[j-1, k] + q[0] *= (1-prob) + return q if __name__=="__main__": + # n_issuers = 100 + # p = np.random.rand(n_issuers) + # pp = np.random.rand(n_issuers) + # w = 1/n_issuers * np.ones(n_issuers) + # S = np.random.rand(n_issuers) + # rho = 0.5 + # pomme = lossdistrib_joint_Z(p, None, w, S, rho, defaultflag=True) + # poire = lossdistrib_joint_Z(p, pp, w, S, rho, defaultflag=True) + import numpy as np n_issuers = 100 p = np.random.rand(n_issuers) - pp = np.random.rand(n_issuers) - w = 1/n_issuers * np.ones(n_issuers) - S = np.random.rand(n_issuers) - rho = 0.5 - pomme = lossdistrib_joint_Z(p, None, w, S, rho, defaultflag=True) - poire = lossdistrib_joint_Z(p, pp, w, S, rho, defaultflag=True) + R = np.random.rand(n_issuers) + Rbar = joint_default_averagerecov_distrib(p, 1-R, 1001) + Rbar_slow = average_recov(p, R, 1001) |
