/* TESTING FOR AND ESTIMATION IN THE PRESENCE OF AUTOCORRELATION -- AN APPLICATION TO THE CONSUMPTION FUNCTION */ cls; /* LOAD DATA AND DEFINE VARIABLES */ load path = c:\gauss8.0\classes\ECON5350\data\; load x[37,3] = macro.txt; t = x[2:37,1]; y = x[2:37,2]; c = x[2:37,3]; nobs = rows(t); constant = ones(nobs,1); xmat = constant~t~y; k = cols(xmat); /* OLS ESTIMATION */ b = inv(xmat'*xmat)*(xmat'*c); resids = c - xmat*b; /* AUTOCORRELATION TESTS */ /* DURBIN-WATSON TEST */ err = resids[2:nobs,1]; errless1 = resids[1:nobs-1,1]; diff = err - errless1; dwstat = (diff'*diff)/(resids'*resids); print "Durbin-Watson Statistic = " dwstat; print; /* LAGRANGE MULTIPLIER TEST */ y2 = y[2:nobs,1]; t2 = t[2:nobs,1]; lmcons = constant[2:nobs,1]; lmx = lmcons~y2~t2~errless1; alpha = inv(lmx'*lmx)*(lmx'*err); lmresids = err - lmx*alpha; errbar = err - meanc(err)*lmcons; R2 = 1 - (lmresids'*lmresids)/(errbar'*errbar); lmstat = (nobs - 1)*R2; print "LM Statistic = " lmstat; print; /* EFFICIENT ESTIMATION */ /* COCHRANE ORCUTT ESTIMATION */ cless1 = c[1:nobs-1,1]; c2 = c[2:nobs,1]; xmatless1 = xmat[1:nobs-1,.]; xmat2 = xmat[2:nobs,.]; cc = 1; i = 1; print " CO Intercept CO Trend CO Slope CO Rho"; do while cc>0.01; resids = c - xmat*b; errless1 = resids[1:nobs-1,1]; err = resids[2:nobs,1]; if i == 1; rhohat = 0.75; else; rhohat = inv(errless1'*errless1)*(errless1'*err); endif; cstar = c2 - rhohat*cless1; xstar = xmat2 - rhohat*xmatless1; print b[1,1]~b[2,1]~b[3,1]~rhohat; bold = b; b = inv(xstar'*xstar)*(xstar'*cstar); cc = (bold - b)'*(bold - b); i = i + 1; endo; /* FEASIBLE GLS ESTIMATION */ b = inv(xmat'*xmat)*(xmat'*c); resids = c - xmat*b; s2 = (resids'*resids)/(nobs-k); err = resids[2:nobs,1]; errless1 = resids[1:nobs-1,1]; r = (err'*errless1)/(err'*err); omega = eye(nobs); for i(1,nobs,1); for j(1,nobs,1); omega[i,j] = r^(abs(i-j))/(1-r^2); endfor; endfor; bhathat = inv(xmat'*inv(omega)*xmat)*(xmat'*inv(omega)*c); print; print " FGLS Intercept FGLS Trend FGLS Slope FGLS Rho"; print bhathat[1,1]~bhathat[2,1]~bhathat[3,1]~r; print; /* MAXIMUM LIKELIHOOD ESTIMATION (Ignoring the 1st Observation) */ library cml; #include cml.ext; /* Likelihood Procedure */ data = c~xmat; proc lnlk(theta,data); local ystar, xstar, eps, epstar, beta, sigma2, rho, logl, first; beta = theta[1:3]; sigma2 = theta[4]; rho = theta[5]; ystar = data[2:nobs,1] - rho*data[1:nobs-1,1]; xstar = data[2:nobs,2:4] - rho*data[1:nobs-1,2:4]; eps = data[.,1] - data[.,2:4]*beta; epstar = ystar - xstar*beta; logl = -0.5*(nobs*(ln(2*pi) + ln(sigma2)) + (1/sigma2)*(epstar'*epstar)); retp(logl); endp; /* Supplying an Analytical Gradient */ proc gradient(theta,data); local grad, g1, g2, g3, beta, sigma2, rho, ystar, xstar, eps, epstar; beta = theta[1:3]; sigma2 = theta[4]; rho = theta[5]; ystar = data[2:nobs,1] - rho*data[1:nobs-1,1]; xstar = data[2:nobs,2:4] - rho*data[1:nobs-1,2:4]; eps = data[.,1] - data[.,2:4]*beta; epstar = ystar - xstar*beta; g1 = (1/sigma2)*((epstar'*xstar)); g2 = -(0.5*nobs/sigma2) + (0.5/sigma2^2)*((epstar'*epstar)); g3 = (1/sigma2)*(epstar'*eps[1:nobs-1]); grad = g1~g2~g3; retp(grad); endp; /* Call the CML Module */ cmlset; Cvec = zeros(1,5); Cvec[4] = 1; _cml_C = Cvec; _cml_D = {0.00001}; _cml_GradProc = &gradient; _cml_GradCheckTol = 0.01; startval = b|s2|rhohat; {mltheta,f,g,cov,ret} = CMLPrt(CML(data,0,&lnlk,startval));