回帰モデルにおける線形制約のFテスト (プログラム)
// Ordinary Least Square Method
#include<oxstd.h>
main()
{
decl cdf,ck,cnobs; // scalar (count, number of ...)
decl dr2,dr2_adj,ds2,dssr,dssr_r,dssr_u,dsst; // scalar (double)
decl mp,msigma,mx; // matrix
decl vb,ve,vpvalue,vse,vtvalue,vy,vyhat; // vector
decl file;
// Read file
file = fopen("nerlove.asc");fscan(file,"%#m",145,5,&mx);
fclose(file); mx=log(mx);
// X matrix: independent variable
cnobs=rows(mx); // number of observations
vy=mx[][0]; // Y vector: dependent variable
mx=ones(cnobs,1)~mx[][1:2]~mx[][4]~mx[][3];; // + constant term
ck=columns(mx); // number of parameters (including constant term)
cdf=cnobs-ck; // degrees of freedom for error
// Ordinary Least Square Estimates
println("*************************************************");
println("Unconstrained Model");
println("*************************************************");
// y=b1+b2*x2+b3*x3+b4*x4+b5*x5
// Unconstrained Model
vb=invert(mx'*mx)*mx'*vy; // beta
vyhat=mx*vb; // y-hat
ve=vy-vyhat; // residual
dssr=ve'*ve; // Sum of Squared Residuals
mp=unit(cnobs)-(1/cnobs)*ones(cnobs,1)*ones(1,cnobs); // p=i-(1/n)11'
dsst=vy'*mp*vy; // sum of squares total for y: sst=y'py
dr2=1-dssr/dsst; // r-squared
dr2_adj=1-(dssr/cdf)/(dsst/(cnobs-1)); // adjusted r-squared
ds2=dssr/cdf; // s^2
msigma=ds2*invert(mx'mx); // s^2*(x'x)^{-1}
vse=sqrt(diagonal(msigma))'; // standard error of beta_hat
vtvalue=vb./vse; // t-value for H0:beta=0
vpvalue=2*tailt(fabs(vtvalue),cdf);// p-value for H0:beta=0
println("%r",{"beta1","beta2","beta3","beta4","beta5"},
"%c",{"est. coeff.", "std. err.","t-value", "p-value"},
"%10.6",vb~vse~vtvalue~vpvalue);
println("%c",{"R^2", "R^2-adj."},
"%10.6",dr2~dr2_adj);
println("estimated variance of error:",ds2);
dssr_u=dssr;
println("*************************************************");
println("Constrained Model (B3+B4+B5=1)");
println("*************************************************");
// Constrained Model
// b3+b4+b5=1
// (y-x5)=b1+b2*x2+b3*(x3-x5)+b4*(x4-x5)
// Substitution method
vy=vy-mx[][4];mx=mx[][0:1]~(mx[][2:3]-ones(1,2).*mx[][4]);
cdf=cdf+1;
//
vb=invert(mx'*mx)*mx'*vy; // beta
vyhat=mx*vb; // y-hat
ve=vy-vyhat; // residual
dssr=ve'*ve; // Sum of Squared Residuals
mp=unit(cnobs)-(1/cnobs)*ones(cnobs,1)*ones(1,cnobs); // p=i-(1/n)11'
dsst=vy'*mp*vy; // sum of squares total for y: sst=y'py
dr2=1-dssr/dsst; // r-squared
dr2_adj=1-(dssr/cdf)/(dsst/(cnobs-1)); // adjusted r-squared
ds2=dssr/cdf; // s^2
msigma=ds2*invert(mx'mx); // s^2*(x'x)^{-1}
vse=sqrt(diagonal(msigma))'; // standard error of beta_hat
vtvalue=vb./vse; // t-value for H0:beta=0
vpvalue=2*tailt(fabs(vtvalue),cdf);// p-value for H0:beta=0
println("%r",{"beta1","beta2","beta3","beta4","beta5"},
"%c",{"est. coeff.", "std. err.","t-value", "p-value"},
"%10.6",vb~vse~vtvalue~vpvalue);
println("%c",{"R^2", "R^2-adj."},
"%10.6",dr2~dr2_adj);
println("estimated variance of error:",ds2);
//
dssr_r=dssr;
decl dwald,dlrt,dlm,dfstat,vstat;
dlm=cnobs*(dssr_r-dssr_u)/dssr_r;
dlrt=cnobs*log(dssr_r/dssr_u);
dwald=cnobs*(dssr_r-dssr_u)/dssr_u;
dfstat=(dssr_r-dssr_u)/(dssr_u/(cnobs-5));
vstat= dlm | dlrt | dwald| dfstat;
vpvalue=tailchi(vstat[0:2],1)|tailf(vstat[3],1,cnobs-5);
println("*************************************************");
println("TEST H0:B3+B4+B5=1 vs H1:B3+B4+B5 =! 1");
println("*************************************************");
println("%r",{"LM test", "Likelihood ratio test", "Wald test", "F test"},
"%c",{"Statistic", "p-value"},
"%10.6",vstat~vpvalue);
}