SAS Documentation
SASĀ® Solution for Stress Testing
Reference manual - version 08.2021
Loading...
Searching...
No Matches
irmst_fft_convolution.sas
Go to the documentation of this file.
1/*
2 Copyright (C) 2018 SAS Institute Inc. Cary, NC, USA
3 */
4
5/**
6 \file
7 \anchor irmst_fft_convolution
8
9 \brief This macro evaluate the compound distribution.
10 \anchor irmst_fft_convolution_key
11
12 \details
13
14 This macro evaluate the compound distribution. Details about the model implemented here can be found in
15 "CreditRisk+ by Fast Fourier Transform", July 2004. YieldCurve, August 2004.
16 Available at SSRN: https://ssrn.com/abstract=1122844 or https://dx.doi.org/10.2139/ssrn.1122844
17 The main idea is to analyze the default rates conditionally on several sectors of interests, where each sector is given a weight.
18
19
20 \ingroup DynamicPortfolio
21
22 \author SAS Institute INC.
23 \date 2020
24*/
25
26
27%macro irmst_fft_convolution(
28 libname=
29 , ValueAtRisk_Level=
30 , ds_in=
31 , ds_out=
32 );
33
34
35
36 /****************************************************************************
37 ** GLOBAL CREDITRISK+ VARIABLES
38 ****************************************************************************/
39 /* The following macros are defined thorugh the UI.
40 %let sectors_cnt =4;
41 %let exposureHistogram_max =51200000;
42 %let exposureHistogram_cellCnt =256;
43 %let exposureHistogram_width =%eval(&exposureHistogram_max./&exposureHistogram_cellCnt.);
44 %let ValueAtRisk_Level =0.95;
45 */
46
47
48 proc means mean data=&libname..&ds_in. noprint;
49 var PD_Sector_1-PD_Sector_&sectors_cnt.
50 PD_Volatility_Sector_1-PD_Volatility_Sector_&sectors_cnt.;
51 output out=&libname..&ds_in._mean(drop=_type_ _freq_ )
52 mean=PD_Mean_Sector_1-PD_Mean_Sector_&sectors_cnt. PD_Volatility_Mean_Sector_1-PD_Volatility_Mean_Sector_&sectors_cnt.;
53 run;
54
55
56 /*Notice that in the case of the mixing gamma distribution the shape and the scale parameters are equal across observation even though
57 the original default rates and the corresponding standard deviations change across observations. This is correct by design, since the
58 mixing distribution is a common distribution from which you can draw idiosyncratic element for each observation. */
59 data _NULL_;
60 /**/
61 /**/
62 set &libname..&ds_in._mean(obs=1);
63 option mprint;
64 %macro temp;
65 %do i_sector=1 %to &sectors_cnt.;
66 /*Quoting from Melchiori, Mario R.,
67 CreditRisk+ by Fast Fourier Transform (July 2004). YieldCurve, August 2004.
68 Available at SSRN: https://ssrn.com/abstract=1122844 or https://dx.doi.org/10.2139/ssrn.1122844
69 "In the real world the mean rate of defaults, as well as the variance of defaults, changes over time.
70 ..... In order to solve this problem, CreditRisk+ introduces an alternative to the basic model,
71 using Poisson to develop the process of default, but assuming that the mean rate is stochastic with mean
72 P_A and standard deviation Sigma_{P_A}. To model this uncertainty, CreditRisk+ adopts the
73 Gamma distribution, and assumes a mean of one and a standard deviation equal to MIX_GAMMA_DIST_STD"*/
74 /*IMPORTANT: technically speaking
75 MIX_GAMMA_DIST_STD=SD_PD/PD
76 should be constant across all the observation for each sector.
77 The above restriction is very strong and hard to enforce. This is particularly true when new synthetic positions need
78 to be created for a dynamic analysis. A slight modification can make this restriction softer.
79 Instead of imposing
80 MIX_GAMMA_DIST_STD=SD_PD/PD
81 we impose
82 MIX_GAMMA_DIST_STD=SD_MEAN_PD/PD_MEAN.
83 This is equivalent to
84 -let PD remain PD for each individual observation
85 -change SD_PD=SD_MEAN_PD*PD/PD_MEAN.
86 This modification of the individual standard deviations naturally preserves the original value of PD,
87 and enforces a constant ratio (MIX_GAMMA_DIST_STD) across the sector.
88 */
89 MIX_GAMMA_DIST_STD_&i_sector.=PD_Volatility_Mean_Sector_&i_sector./PD_Mean_Sector_&i_sector.;
90
91 /*
92 Mix_Gamma_Dist_Scale_Beta = MIX_GAMMA_DIST_STD_^2
93 Mix_Gamma_Dist_Shape_Alpha = MIX_GAMMA_DIST_STD_^(-2)
94 */
95 CALL SYMPUTX("MIX_GAMMA_DIST_SCALE_BETA_&i_sector.", (MIX_GAMMA_DIST_STD_&i_sector.)**2 ,"L");
96 CALL SYMPUTX("MIX_GAMMA_DIST_SHAPE_ALPHA_&i_sector.", (MIX_GAMMA_DIST_STD_&i_sector.)**(-2),"L");
97 %end;
98 %mend temp;
99 %temp;
100 run;
101
102
103
104
105
106
107 /*Aggregate the normalized expected exposure for the the "buckets" used to discretize the continuous loss severity function*/
108 proc means sum data=&libname..&ds_in. noprint;
109 var NORMLZD_EXPCTD_LOSS_Sector_1-NORMLZD_EXPCTD_LOSS_Sector_&sectors_cnt.;
110 class Discretized_Exposure ;
111 output out=&libname..&ds_in._sum(where=(Discretized_Exposure NE .) drop=_type_ _freq_ ) sum=normlzd_expctd_loss_sum_1-normlzd_expctd_loss_sum_&sectors_cnt.;
112 run;
113
114 /*Create a zero data-set to be used to pad the original data.*/
115 data zero_padding;
116 do j_band=0 to %eval(&exposureHistogram_cellCnt.-1);
117 output;
118 end;
119 run;
120
121
122 /*Full join between the original data and the containing zeros*/
123 proc sql;
124 create table &libname..&ds_in._extended as
125 select main_tbl.*, zero_padding.j_band
126 from &libname..&ds_in._sum(where=(Discretized_Exposure NE .)) as main_tbl full join zero_padding
127 on zero_padding.j_band=main_tbl.Discretized_Exposure;
128 quit;
129
130
131 /*Evaluate the expected number of defaults*/
132 data &libname..&ds_in._extended;
133 set &libname..&ds_in._extended end=last;
134 /*create running sums of the number of defaults*/
135 retain %do i_sector=1 %to &sectors_cnt.;
136 cum_default_number_&i_sector. 0
137 %end;
138 ;
139 /*Evaluate the expected number of defaults as the ratio between the total amount of loss in a given band
140 and the corresponding exposure amount: all these measurements are obtained from normalized figures:
141 Therefore, for a give band j
142 normalizedExpctedLoss_sum =sum(loss/exposureHistogram_width)
143 DiscretizedExposure =ceiling(exposure/exposureHistogram_width)=j_band
144 ExpectedDefaultNumber =normalizedExpctedLoss_sum/j_band
145 */
146 %do i_sector=1 %to &sectors_cnt.;
147 if (normlzd_expctd_loss_sum_&i_sector. NE .) and (j_band NE 0) then do;
148 expctd_default_number_&i_sector. =normlzd_expctd_loss_sum_&i_sector./j_band;
149 end;
150 else do;
151 expctd_default_number_&i_sector.=0;
152 end;
153 cum_default_number_&i_sector. =cum_default_number_&i_sector.+expctd_default_number_&i_sector.;
154 %end;
155 /*Store the value of the total number of expected loss across bands into MACROs*/
156 if last then do;
157 %do i_sector=1 %to &sectors_cnt.;
158 CALL SYMPUTX("total_default_number_&i_sector.", cum_default_number_&i_sector. ,"L");
159 %end;
160
161 end;
162 run;
163
164 /*Evaluate the empirical densities for the discretized losses*//**/
165 data &libname..&ds_in._extended;
166 set &libname..&ds_in._extended;
167 %do i_sector=1 %to &sectors_cnt.;
168 severity_density_&i_sector.=expctd_default_number_&i_sector./&&total_default_number_&i_sector..;
169 %end;
170 run;
171
172
173 /*Move the analysis into proc IML to take advantage of the the function to evaluate the Fast Fourier Transform*/
174 proc iml;
175 use &libname..&ds_in._extended;
176 /*Read data into memory*/
177 read all var {
178 %do i_sector=1 %to &sectors_cnt.;
179 expctd_default_number_&i_sector.
180 severity_density_&i_sector.
181 %end;
182 };
183 close;
184
185
186 /* Define a function to evaluate the "Complex point-wise raise to the power: Z^q".
187 Notice the a vector of complex numbers is a two-column
188 matrix where Re(Z)=Z[,1] and Im(Z)=Z[,2].
189 z^q=r^q*cos((theta+2*pi*k)*q)+i*r^q*sin((theta+2*pi*k)*q)
190 Where
191 r=sqrt(Re(z)^2+Im(z)^2)
192 theta=acos(Re(z)/r)*/
193 start cplxPower(Z, q);
194 R = j(nrow(Z),1);
195 THETA = j(nrow(Z),1);
196 C = j(nrow(Z),2);
197 R[,1] =sqrt(Z[,1]##2+Z[,2]##2);
198 THETA[,1] =ATAN2(Z[,2], Z[,1]);
199 /*In theory it is should be cos((THETA[,1]+2*pi*k)*q)
200 Here we picked k=0*/
201 C[,1] = (R[,1]##q)#cos(THETA[,1]#q);
202 C[,2] = (R[,1]##q)#sin(THETA[,1]#q);
203 return( C );
204 finish;
205
206
207 /* Define a function to evaluate the "Complex point-wise multiplication: A*B".
208 Notice that a vector of complex numbers is a two-column
209 matrix where Re(A)=A[,1] and Im(A)=A[,2].
210 If A = x + iy and B = u + iv then
211 C = A*B = (x#u - y#v) + i(x#v + y#u)*/
212 start cplxMult(A, B);
213 C = j(nrow(A),2);
214 C[,1] = A[,1]#B[,1] - A[,2]#B[,2];
215 C[,2] = A[,1]#B[,2] + A[,2]#B[,1];
216 return( C );
217 finish;
218
219 /*Define the complex vector representation of the unity: 1+i0*/
220 unit=j(1,2);
221 unit[1,1]=1;
222 unit[1,2]=0;
223
224
225 %do i_sector=1 %to &sectors_cnt.;
226 /*Evaluate the Fast Fourier Transform of the the empirical densities associated with the severity loss distribution.*/
227 fft_&i_sector. =fft(severity_density_&i_sector.);
228 /*Evaluate a temporary product needed for the evaluation of the characteristic function of the compound loss distribution when the
229 total number of losses follows a mixed-gamma Poisson distribution.*/
230 temp =unit-(&&total_default_number_&i_sector..#&&MIX_GAMMA_DIST_SCALE_BETA_&i_sector..)#(fft_&i_sector.-unit);
231 /*Evaluate the characteristic function of the compound loss distribution when the
232 total number of losses follows a mixed-gamma Poisson distribution.*/
233 characteristic_fuction_fft_&i_sector. =cplxPower(temp, -&&MIX_GAMMA_DIST_SHAPE_ALPHA_&i_sector..);
234 %end;
235
236 /*Evaluate the compound loss distribution using the Inverse Fourier Transform of the characteristic function of the compound loss distribution*/
237 %if &sectors_cnt.=1 %then %do;
238 /*If data contains only one sector of exposure, then the compound distribution is the IFFT of the characteristic function.*/
239 compoundDistribution =ifft(characteristic_fuction_fft_1)/&exposureHistogram_cellCnt.;
240 %end;
241 %else %if &sectors_cnt.>1 %then %do;
242 /*If data contains more then one sector of exposure, then the compound distribution is the IFFT of the product of the characteristic functions.*/
243 /*Initialize a temporary vector and set it equal to 1. Remember that in the complex domain, a complex unitary vector is a matrix with 2 columns.
244 The first column captures the real part of the complex vector and it is initially set to 1. The second column captures the imaginary part and
245 it is initially set to zero.*/
246 characteristic_fuction_fft_prod = repeat(unit,floor(&exposureHistogram_cellCnt./2+1),1);
247 %do i_sector=1 %to &sectors_cnt.;
248 characteristic_fuction_fft_prod =cplxMult(characteristic_fuction_fft_prod,characteristic_fuction_fft_&i_sector.);
249
250 %end;
251 /*Evaluate the IFFT of the product of the characteristic functions for each sector in order to obtain the compound distribution*/
252 compoundDistribution =ifft(characteristic_fuction_fft_prod)/&exposureHistogram_cellCnt.;
253
254
255
256 %end;
257
258
259
260 /*Create the output table.*/
261 /*The first element to be created is the cumulative compound distribution. It is used to evaluate the Value-at-Risk*/
262 compoundDistribution_cumsum =cusum(compoundDistribution);
263 /*Build a vector identifying the the portion of the compound distribution which is beyond the Value-at-Risk*/
264 compoundDistribution_tailIndex =compoundDistribution_cumsum>(&ValueAtRisk_Level.);
265 /*Compound distribution beyond the Value-at-Risk. This vector can be used to create a graph emphasizing the tail distribution.*/
266 compoundDistribution_tail =compoundDistribution#compoundDistribution_tailIndex;
267 /*Discretized Loss*/
268 CompoundLoss =(0:%eval(&exposureHistogram_cellCnt.-1))*&exposureHistogram_width.;
269 /*Value-at-Risk at the &ValueAtRisk_Level.*/
270 VAR =CompoundLoss[loc(compoundDistribution_cumsum>(&ValueAtRisk_Level.))[1]];
271 ExpectedCompoundLoss =CompoundLoss*compoundDistribution;
272 EconomicCapital =VAR-ExpectedCompoundLoss;
273 /*In theory you do not need to evaluate any of the disaggregate results if we are interested just in the aggregate results.*/
274 %if &sectors_cnt.>1 %then %do;
275 %do i_sector=1 %to &sectors_cnt.;
276 compoundDistribution_&i_sector. =ifft(characteristic_fuction_fft_&i_sector.)/&exposureHistogram_cellCnt.;
277 compoundDistribution_cumsum_&i_sector. =cusum(compoundDistribution_&i_sector.);
278 /*Build a vector identifying the the portion of the compound distribution which is beyond the Value-at-Risk*/
279 compoundDistribution_tailIndex_&i_sector. =compoundDistribution_cumsum_&i_sector.>(&ValueAtRisk_Level.);
280 /*Compound distribution beyond the Value-at-Risk. This vector can be used to create a graph emphasizing the tail distribution.*/
281 compoundDistribution_tail_&i_sector. =compoundDistribution_&i_sector.#compoundDistribution_tailIndex_&i_sector.;
282 VAR_&i_sector. =CompoundLoss[loc(compoundDistribution_cumsum_&i_sector.>(&ValueAtRisk_Level.))[1]];
283 ExpectedCompoundLoss_&i_sector. =CompoundLoss*compoundDistribution_&i_sector.;
284 EconomicCapital_&i_sector. =VAR_&i_sector. - ExpectedCompoundLoss_&i_sector. ;
285 %end;
286 %end;
287
288 create &libname..&ds_out. var {
289 CompoundLoss
290 compoundDistribution_tail
291 VAR
292 ExpectedCompoundLoss
293 EconomicCapital
294 compoundDistribution_cumsum
295 compoundDistribution
296 %if &sectors_cnt.>1 %then %do;
297 %do i_sector=1 %to &sectors_cnt.;
298 compoundDistribution_tailIndex_&i_sector.
299 VAR_&i_sector.
300 ExpectedCompoundLoss_&i_sector.
301 EconomicCapital_&i_sector.
302 compoundDistribution_cumsum_&i_sector.
303 compoundDistribution_&i_sector.
304 %end;
305 %end;
306 };
307 append;
308 close &libname..&ds_out.;
309
310
311 quit;
312
313
314%mend irmst_fft_convolution; /*End macro function definition: irmst_fft_convolution*/