最小二乗法
プログラム
// Ordinary Least Square Method
#include<oxstd.h>
main()
{
decl cdf,ck,cnobs; // scalar (count, number of ...)
decl dr2,dr2_adj,ds2,dssr,dsst; // scalar (double)
decl mp,msigma,mx; // matrix
decl vb,ve,vpvalue,vse,vtvalue,vy,vyhat; // vector
mx=<0;1;2;3;4>; // X matrix: independent variable
mx=ones(5,1)~mx; // + constant term
vy=<10;20;20;30;40>; // Y vector: dependent variable
cnobs=rows(mx); // number of observations
ck=columns(mx); // number of parameters (including constant term)
cdf=cnobs-ck; // degrees of freedom for error
// Ordinary Least Square Estimates
vb=invert(mx'*mx)*mx'*vy; // beta
vyhat=mx*vb; // y-hat
ve=vy-vyhat; // residual
dssr=ve'*ve; // Sum of Squared Residuals
mp=unit(cnobs)-(1/cnobs)*ones(cnobs,1)*ones(1,cnobs); // p=i-(1/n)11'
dsst=vy'*mp*vy; // sum of squares total for y: sst=y'py
dr2=1-dssr/dsst; // r-squared
dr2_adj=1-(dssr/cdf)/(dsst/(cnobs-1)); // adjusted r-squared
ds2=dssr/cdf; // s^2
msigma=ds2*invert(mx'mx); // s^2*(x'x)^{-1}
vse=sqrt(diagonal(msigma))'; // standard error of beta_hat
vtvalue=vb./vse; // t-value for H0:beta=0
vpvalue=2*tailt(fabs(vtvalue),cdf);// p-value for H0:beta=0
println("%c", {"x","y","yhat","residual"},
"%10.6",mx[][1]~vy~vyhat~ve);
println("%r",{"beta0","beta1"},
"%c",{"est. coeff.", "std. err.","t-value", "p-value"},
"%10.6",vb~vse~vtvalue~vpvalue);
println("%c",{"R^2", "R^2-adj."},
"%10.6",dr2~dr2_adj);
println("estimated variance of error:",ds2);
println("estimated covariance matrix of beta_hat:",msigma);
}
計算結果
x y yhat residual
0.00000 10.000 10.000 -7.1054e-015
1.0000 20.000 17.000 3.0000
2.0000 20.000 24.000 -4.0000
3.0000 30.000 31.000 -1.0000
4.0000 40.000 38.000 2.0000
est. coeff. std. err. t-value p-value
beta0 10.000 2.4495 4.0825 0.026548
beta1 7.0000 1.0000 7.0000 0.0059863
R^2 R^2-adj.
0.94231 0.92308
estimated variance of error:
10.000
estimated covariance matrix of beta_hat:
6.0000 -2.0000
-2.0000 1.0000