' MLOGIT1.PRG ' example program for EViews LogL object ' ' multinomial logit with three choices ' with two individual specific regressors ' ' for a discussion, see Section 19.7 of Greene, William H. (1997) ' Econometric Analysis, 3rd edition, Prentice-Hall 'change path to program path %path = @runpath +"../data/" cd %path ' load artificial data wfload mlogit ' declare parameter vector coef(3) b2 coef(3) b3 ' true values are ' b2(1) -0.4 b2(2) 1 b2(3) -0.2 ' b3(1) -0.4 b3(2) 0.3 b3(3) 0.5 ' setup the loglikelihood logl mlogit mlogit.append @logl logl1 ' define index for each choice mlogit.append xb2 = b2(1)+b2(2)*x1+b2(3)*x2 mlogit.append xb3 = b3(1)+b3(2)*x1+b3(3)*x2 ' define prob for each choice mlogit.append denom = 1+exp(xb2)+exp(xb3) mlogit.append pr1 = 1/denom mlogit.append pr2 = exp(xb2)/denom mlogit.append pr3 = exp(xb3)/denom ' specify likelihood mlogit.append logl1 = (1-dd2-dd3)*log(pr1)+dd2*log(pr2)+dd3*log(pr3) ' specify analytic derivatives for !i=2 to 3 mlogit.append @deriv b{!i}(1) grad{!i}1 b{!i}(2) grad{!i}2 b{!i}(3) grad{!i}3 mlogit.append grad{!i}1 = dd{!i}-pr{!i} mlogit.append grad{!i}2 = grad{!i}1*x1 mlogit.append grad{!i}3 = grad{!i}1*x2 next ' get starting values from binomial logit equation eq2.binary(d=l) dd2 c x1 x2 b2 = eq2.@coefs equation eq3.binary(d=l) dd3 c x1 x2 b3 = eq3.@coefs ' check analytic derivatives at starting values freeze(tab1) mlogit.checkderiv show tab1 ' do MLE and display results mlogit.ml(showopts,m=1000,c=1e-5) show mlogit.output
' AR1.PRG ' example program for EViews LogL object ' ' estimate AR(1) by MLE ' for a discussion, see pp. 118-119 of Hamilton, James D. (1994) Time Series Analysis, ' Princeton University Press. ' ' note that this model becomes difficult to estimate as rho approaches 1 ' make up data that follows an AR(1), with rho=.85 wfcreate ar1 m 1980 1989 rndseed 123 'set seed of random number generator series y=0 smpl @first+1 @last y = 0.85*y(-1) + nrnd ' create dummy variable for first obs series d1 = 0 smpl @first @first d1 = 1 smpl @all ' set starting values to LS (drops first obs) equation eq1.ls y c ar(1) coef(1) rho = c(2) coef(1) s2 = eq1.@se^2 ' set up logl object ' uses new @recode syntax: @recode(true_false, value_if_true, value_if_false) ' to handle the first observation likelihood contribution logl ar1 ar1.append @logl logl1 ar1.append var = @recode( d1=1,s2(1)/(1-rho(1)^2),s2(1) ) ar1.append res = @recode( d1=1,y-c(1)/(1-rho(1)),y-c(1)-rho(1)*y(-1) ) ar1.append sres = res/@sqrt(var) ar1.append logl1 = log(@dnorm(sres)) -log(var)/2 ' do MLE and show results ar1.ml(showopts,m=1000,c=1e-5) show ar1.output ' compare with EViews AR(1) estimates which drop the first obs show eq1.output ' full MLE using state space object sspace ss1 ss1.append @signal y = c(1) + sv1 ss1.append @state sv1 = c(2)*sv1(-1) + [var = exp(c(3))] ' take starting values from NLS eq1.updatecoefs c(3) = log(eq1.@ssr/eq1.@regobs) ss1.ml(showopts,m=1000,c=1e-5) show ss1.output
' CLOGIT1.PRG ' example program for EViews LogL object ' ' conditional logit with 3 outcomes, and a test for IIA ' ' for a discussion, see Section 19.7.2 of Greene, William H. (1997) ' Econometric Analysis, 3rd edition, Prentice-Hall 'change path to program path %path = @runpath+"../data/" cd %path ' load artificial data wfload mlogit ' full conditional logit using all three outcomes ' declare coef vector to use in MLE coef(2) a1 = 0.1 ' true values 0.6, -0.3 coef(2) b1 = 0.1 ' true values 0.6, 0.7 ' for use in derivative expression series stores = d1*stores1+d2*stores2+d3*stores3 series dist = d1*dist1+d2*dist2+d3*dist3 ' specify likelihood for three outcome logit logl ll1 ll1.append @logl logl1 ll1.append xb1 = a1(1)*stores1+a1(2)*dist1 ll1.append xb2 = a1(1)*stores2+a1(2)*dist2+b1(1)*inc ll1.append xb3 = a1(1)*stores3+a1(2)*dist3+b1(2)*inc ll1.append denom = exp(xb1)+exp(xb2)+exp(xb3) ll1.append logl1 = d1*xb1+d2*xb2+d3*xb3-log(denom) ' analytic derivatives ll1.append @deriv a1(1) grada1 a1(2) grada2 b1(1) gradb2 b1(2) gradb3 ll1.append grada1 = stores-(stores1*exp(xb1)+stores2*exp(xb2)+stores3*exp(xb3))/denom ll1.append grada2 = dist-(dist1*exp(xb1)+dist2*exp(xb2)+dist3*exp(xb3))/denom ll1.append gradb2 = (d2-exp(xb2)/denom)*inc ll1.append gradb3 = (d3-exp(xb3)/denom)*inc ' estimate by MLE ll1.ml(showopts,m=1000,c=1e-5) show ll1.output ' compute predicted probabilities series y2hat = exp(xb2)/denom series y3hat = exp(xb3)/denom series y1hat = 1-y2hat-y3hat ' compute actual and predicted outcome (the one with highest predicted probability) series y = (d2=1)*2+3*(d3=1)+1*(d2=0 and d3=0) series yhat = 2*(y2hat>y3hat and y2hat>y1hat) + 3*(y3hat>y2hat and y3hat>y1hat) + 1*(y1hat>=y3hat and y1hat>=y2hat) ' show prediction table group predict y yhat show predict.freq '------------------------------------------------------------------------------------- ' to test IIA restriction estimate model dropping choice 3 (use only outcomes 1 and 2) '------------------------------------------------------------------------------------- ' initialize params coef(2) a2 = 0.1 coef(1) b2 = 0.1 ' likelihood for restricted model ' make sure not to use the same name for expressions as in ll1 logl ll2 ll2.append @logl logl2 ll2.append xb12 = a2(1)*stores1+a2(2)*dist1 ll2.append xb22 = a2(1)*stores2+a2(2)*dist2+b2(1)*inc ll2.append denom2 = exp(xb12)+exp(xb22) ll2.append logl2 = d1*xb12+d2*xb22-log(denom2) ' analytic derivatives ll2.append @deriv a2(1) grada12 a2(2) grada22 b2(1) gradb22 ll2.append grada12 = d1*stores1+d2*stores2-(stores1*exp(xb12)+stores2*exp(xb22))/denom2 ll2.append grada22 = d1*dist1+d2*dist2-(dist1*exp(xb12)+dist2*exp(xb22))/denom2 ll2.append gradb22 = (d2-exp(xb22)/denom2)*inc ' estimate excluding choice 3 data smpl @all if d3=0 ll2.ml(showopts,m=1000,c=1e-5) show ll2.output ' compute Hausman statistic for IIA test ' get variance of unrestriced sym var_1 = ll1.@coefcov ' resize to match var_2 (this trick depends on the ordering of the coefs) sym(3,3) var_1 ' get variance of restricted sym var_2 = ll2.@coefcov ' get coefs for !i=1 to 2 matrix(3,1) coef_{!i} coef_{!i}(1,1) = a{!i}(1) coef_{!i}(2,1) = a{!i}(2) coef_{!i}(3,1) = b{!i}(1) next ' compute test statistic coef cdiff = coef_2 - coef_1 sym vdiff = var_2 - var_1 matrix hs = @transpose(cdiff) * @inverse(vdiff) * cdiff ' display results in table table out setcolwidth(out,1,20) out(1,1) = "Hausman test for I.I.A.:" out(2,1) = "chi-sqr(" + @str(@rows(cdiff)) + ") = " out(2,2) = @str(hs(1)) out(2,3) = "p-value" out(2,4) = 1-@cchisq(hs(1),@rows(cdiff)) show out
' BOXCOX1.PRG ' example program for EViews LogL object ' Box-Cox with transformation on both sides of the equality ' For a description of the likelihood function, see p. 484 of ' Greene, William H. (1997) Econometric Analysis, 3rd edition, ' Prentice-Hall. See Section 10.4 for a general discussion. ' create workfile wfcreate boxcox a 1920 1938 ' fill data series y series x y.fill 73.8,70.6,59.1,55.5,56,56.4,55.8,56.2,55.7,55.8,55.7,54.9,54,53.7,54.3,55,56.1,57.2,58.9 x.fill 3.9,17,14.3,11.7,10.3,11.3,12.5,9.7,10.8,10.4,16.1,21.3,22.1,19.9,16.7,15.5,13.1,10.8,12.9 ' set starting value for lambda ' NOTE: the estimates are very sensitive to starting values coef(1) ylam = -0.5 coef(1) xlam = 0.1 ' get coef starting values series yt = (y^ylam(1)-1)/ylam(1) series xt = (x^xlam(1)-1)/xlam(1) equation eq_temp.ls yt c xt coef(1) var = eq_temp.@se^2 ' setup likelihood logl ll1 ll1.append @logl logl ll1.append yt = (y^ylam(1)-1)/ylam(1) ll1.append xt = (x^xlam(1)-1)/xlam(1) ll1.append res = yt-c(1)-c(2)*xt ll1.append logl = log( @dnorm( res/@sqrt(var(1)) ) ) - log(var(1))/2 + (ylam(1)-1)*log(y) ' do MLE and display results ll1.ml(showopts, m=1000, c=1e-5) show ll1.output
' DISEQ1.PRG ' example program for EViews LogL object ' disequilibrium switching model ' Exercise 15.14-15.15 in (page 644-646), Judge, et al. (1985), ' The Theory and Practice of Econometrics, 2nd Edition, ' John Wiley & Sons. ' create workfile wfcreate diseq u 1 21 ' load data in exercise 15.14 (Table 15.3) ' one obs extra for presample value series u1 series v1 series y1 series w1 u1.fill(o=2) 0.41,-0.09,0.39,-0.52,1.37,-0.8,0.63,1.83,-0.85,-0.85,-1.29,-0.46,-0.79,-0.63,-0.62,-1.53,0.61,0.92,-0.26,-0.93 v1.fill(o=2) 0.46,4.78,2.76,-2.83,3.12,0.28,-0.02,2.81,2.41,0.09,-1.37,0.23,-0.45,2.64,3.83,1.23,-0.81,-1.04,3.33,1.92 y1.fill(o=2) 574.21,653.85,733.81,818.29,899.29,972.78,1057.56,1132.86,1215.8,1292.58,1373.82,1459.22,1538.38,1617.47,1692.97,1777.61,1858.21,1939.75,2010.6,2090.36 w1.fill(o=2) 1.69,1.88,1.94,1.96,2.15,2.16,2.29,2.31,2.59,2.67,2.76,2.73,2.94,3.06,3.19,3.17,3.3,3.44,3.46,3.62 ' set starting value for p smpl 1 1 series p1 = 43.21 ' generate p1, d1, s1, q1 model m1 m1.append d1 = 60-1.8*p1(-1)+0.05*y1+u1 m1.append s1 = 10+2.5*p1(-1)-3*w1+v1 m1.append q1 = (d1=s1)*s1 m1.append p1 = p1(-1)+0.04*(d1-s1) smpl 2 21 m1.solve ' rename rename d1_0 d1 rename s1_0 s1 rename q1_0 q1 p1 = p1_0 ' already exists; overwrite ' declare coef vectors to use in likelihood spec coef(3) alpha ' true values 60, -1.8, 0.05 coef(3) beta ' true values 10, 2.5, -3 coef(1) gamma ' true value 0.04 coef(2) sigma ' true values 1, 2 !pi = @acos(-1) ' setup likelihood as in eqs (2.3.16) & (2.3.17) of Quandt, page 32 ' assume zero correlation between demand and supply error series dp_pos = (d(p1)>0)*d(p1) series dp_neg = (d(p1)<=0)*(-1)*d(p1) logl ll1 ll1.append @logl logl1 ll1.append ud = q1-alpha(1)-alpha(2)*p1(-1)-alpha(3)*y1+dp_pos/gamma(1) ll1.append us = q1-beta(1)-beta(2)*p1(-1)-beta(3)*w1+dp_neg/gamma(1) ll1.append logl1 = -log(2*!pi) -log(@abs(gamma(1))) -log(sigma(1)) -log(sigma(2)) -ud^2/sigma(1)^2/2 -us^2/sigma(2)^2/2 ' estimate 2SLS for starting values equation eqa.tsls q1 c p1(-1) y1 dp_pos @ c p1(-1) y1 w1 alpha = eqa.@coefs sigma(1) = eqa.@se show eqa.output equation eqb.tsls q1 c p1(-1) w1 dp_neg @ c p1(-1) y1 w1 beta = eqb.@coefs sigma(2) = eqb.@se show eqb.output gamma(1) = -1/eqb.c(4) ' do mle and display output ll1.ml(showopts, m=1000, c=1e-5) show ll1.output
' HETERO1.PRG ' estimate parameteric heterogenerity model ' ' Example 12.14, p. 563 of Greene, William H. (1997) Econometric Analysis, ' 3rd edition, Prentice-Hall ' create workfile wfcreate hetero1 u 1 51 ' read data from Greene (1997) table 12.1, page 541 series expend series inc expend.fill 275,275,531,316,304,431,316,427,259,294,423,335,320,342,268,353,320,821,387,424,265,437,355,327,466,274,359,388,311,397,315,315,356,NA,339,452,428,403,345,260,427,477,433,279,447,322,412,321,417,415,500 inc.fill 0.6247,0.6183,0.8914,0.7505,0.6813,0.7873,0.664,0.8063,0.5736,0.7391,0.8818,0.6607,0.6951,0.7526,0.6489,0.6541,0.6456,1.0851,0.885,0.8604,0.67,0.8745,0.8001,0.6333,0.8442,0.7342,0.9032,0.6505,0.7478,0.7839,0.6242,0.7697,0.7624,0.7597,0.7374,0.8001,1.0022,0.838,0.7696,0.6615,0.8306,0.7847,0.7051,0.7277,0.8267,0.7812,0.7733,0.6841,0.6622,0.845,0.9096 ' specify likelihood function logl ll1 ll1.append @logl logl1 ll1.append res = expend-c(1)-c(2)*inc-c(3)*inc^2 ll1.append var = c(4)*inc^c(5) ll1.append logl1 = log(@dnorm(res/@sqrt(var)))-log(var)/2 ' set starting values to OLS equation eq1.ls expend c inc inc^2 for !i=1 to 3 c(!i) = eq1.c(!i) next c(4) = eq1.@se^2 c(5) = 1 ' estimate for sample with nonmissing values for dependent variable 7smpl @all if expend<>na ll1.ml(showopts, m=1000, c=1e-5) ' should replicate Greene (1997), Table 12.11, p.563 show ll1.output
' HPROBIT1.PRG ' example program for EViews LogL object ' Estimate probit specification with multiplicative heterogeneity. ' ' Example 19.7 (p. 890) of Greene, William H. (1997) Econometric Analysis, ' 3rd edition, Prentice-Hall. 'create workfile wfcreate hprobit u 32 'read data from Greene series gpa series tuce series psi series grade gpa.fill 2.66,2.89,3.28,2.92,4,2.86,2.76,2.87,3.03,3.92,2.63,3.32,3.57,3.26,3.53,2.74,2.75,2.83,3.12,3.16,2.06,3.62,2.89,3.51,3.54,2.83,3.39,2.67,3.65,4,3.1,2.39 tuce.fill 20,22,24,12,21,17,17,21,25,29,20,23,23,25,26,19,25,19,23,25,22,28,14,26,24,27,17,24,21,23,21,19 psi.fill 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1 grade.fill 0,0,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,1,0,0,1,1,1,0,1,1,0,1 ' arbitrary (random) starting values rndseed 1234 coef(4) beta rnd(beta) coef(1) gam rnd(gam) ' specify the likelihood logl ll1 ll1.append @logl logl1 ll1.append index = (beta(1)+beta(2)*gpa+beta(3)*tuce+beta(4)*psi)/exp(gam(1)*psi) ll1.append mills = @dnorm(index)*(grade-@cnorm(index))/@cnorm(index)/(1-@cnorm(index)) ll1.append logl1 = grade*log(@cnorm(index))+(1-grade)*log(1-@cnorm(index)) ' specify analytic derivatives ll1.append @deriv beta(1) grad1 beta(2) grad2 beta(3) grad3 beta(4) grad4 gam(1) grad5 ll1.append grad1 = mills/exp(gam(1)*psi) ll1.append grad2 = grad1*gpa ll1.append grad3 = grad1*tuce ll1.append grad4 = grad1*psi ll1.append grad5 = mills*psi*(-index) ' carry out MLE and display results ll1.ml(showopts, m=1000, c=1e-5) 'does not quite match results in Table 19.4 (right block), p.890 'but eviews logl is higher show ll1.output
' GPROBIT1.PRG ' example program for EViews LogL object ' grouped (proportions) data probit ' ' Problem 6, page 947 of Greene, William H. (1997) Econometric Analysis, ' 3rd edition, Prentice-Hall ' create workfile wfcreate gprobit u 10 ' fill data series trucks trucks.fill 160,250,170,365,210,206,203,305,270,340 series prate prate.fill 11,74,8,87,62,83,48,84,71,79 prate = prate/100 ' set up likelihood for probit logl ll1 ll1.append @logl logl1 ll1.append xb = c(1)+c(2)*trucks ll1.append logl1 = prate*log(@cnorm(xb)) + (1-prate)*log(1-@cnorm(xb)) ' analytic derivatives ll1.append @deriv c(1) grad1 c(2) grad2 ll1.append mills1 = @dnorm(xb)/@cnorm(xb) ll1.append mills2 = -@dnorm(xb)/(1-@cnorm(xb)) ll1.append grad1 = prate*mills1+(1-prate)*mills2 ll1.append grad2 = prate*mills1*trucks+(1-prate)*mills2*trucks ' set OLS starting values equation eq1.ls prate c trucks ' do MLE ll1.ml(showopts, m=1000, c=1e-5) show ll1.output ' create table for answer to problems table(3,2) ans setcolwidth(ans,1,30) ' problem 6(a) setcell(ans,1,1, "answer to 6(a)","l") scalar ans_a = ( @qnorm(.95)-ll1.c(1) )/ll1.c(2) ans(1,2) = ans_a ' problem 6(b) ' ans_b is the expected participation rate if budget 6.5 million setcell(ans,2,1,"expected participation rate","l") scalar ans_b = @cnorm( ll1.c(1)+ll1.c(2)*650/2 ) ans(2,2) = ans_b ' problem 6(c) setcell(ans,3,1,"marginal participation rate at 301","l") scalar ans_c = ll1.c(2)*@dnorm( ll1.c(1)+ll1.c(2)*301 ) ans(3,2) = ans_c show ans
' NLOGIT1.PRG ' example program for EViews LogL object ' nested logit with three choices and two branches: ' branch 1 (y=1) and branch 2 (y=2,3) ' ' for a discussion, see Section 19.7.4 of Greene, William H. (1997) ' Econometric Analysis, 3rd edition, Prentice-Hall 'change path to program path %path = @runpath + "../data/" cd %path ' load artificial data load mlogit ' declare coef vector to use in MLE coef(2) a3 = 0.1 ' true values 0.6, -0.3 coef(2) b3 = 0.1 ' true values 0.6, 0.7 coef(1) c3 = 0.1 ' true value 1 (conditional logit) ' specify likelihood logl ll3 ll3.append @logl logl3 ll3.append xb1 = a3(1)*stores1+a3(2)*dist1 ll3.append xb2 = a3(1)*stores2+a3(2)*dist2+b3(1)*inc ll3.append xb3 = a3(1)*stores3+a3(2)*dist3+b3(2)*inc ' inclusive values for each branch ll3.append ival1 = xb1 ll3.append ival2 = log(exp(xb2)+exp(xb3)) ' (unconditional) prob for each brach ll3.append prob1 = exp(ival1)/( exp(ival1)+exp(c3(1)*ival2) ) ll3.append prob2 = exp(c3(1)*ival2)/( exp(ival1)+exp(c3(1)*ival2) ) ' conditional prob within branch 2 ll3.append prob22 = exp(xb2)/( exp(xb2)+exp(xb3) ) ll3.append prob23 = exp(xb3)/( exp(xb2)+exp(xb3) ) ll3.append logl3 = d1*log(prob1) + d2*log(prob2*prob22) + d3*log(prob2*prob23) ' estimate MLE ' check that you get the conditional logit estimates if c3(1)=1 ll3.ml(showopts) freeze(out1) ll3.output show out1 ' test IIA by testing c3(1)=1 ll3.wald c3(1)=1
' ZPOISS1.PRG ' example program for EViews LogL object ' ' zero-altered poisson models ' ' see Section 19.9.6 in Greene, William H. (1997) ' Econometric Analysis, 3rd edition, Prentice-Hall. ' create workfile wfcreate zpoiss u 1 1000 ' generate artificial data rndseed 123 for !i=1 to 4 series x{!i} = 2*nrnd next series zstar = 0.1*x1+0.3*x2+nrnd series z = (zstar>0) series xbeta = exp(0.1+0.6*x3+0.4*x4) series ystar = @rpoisson(xbeta) series y=z*ystar ' declare params to estimate coef(3) gam = 0.1 ' true values 0, 0.1, 0.3 coef(3) beta = 0.1 ' true values 0.1, 0.6, 0.4 ' specify likelihood ' note: variance of probit not identified; only ratio to coef identified logl ll1 ll1.append @logl logl1 ' index function for the probit ll1.append xb0 = (gam(1)+gam(2)*x1+gam(3)*x2) ' index function for the Poisson ll1.append lambda = exp(beta(1)+beta(2)*x3+beta(3)*x4) ll1.append prob0 = (1-@cnorm(xb0)) + @cnorm(xb0)*exp(-lambda) ll1.append logl1 = (y=0)*log(prob0)+(y>0)*( log(@cnorm(xb0)) -lambda+y*log(lambda)-@factlog(y) ) ' get starting values from probit and poission models equation eq1.binary(d=n) (y>0) c x1 x2 gam = eq1.@coefs equation eq2.count y c x3 x4 beta = eq2.@coefs ' do MLE ll1.ml(showopts, m=1000, c=1e-5) show ll1.output ' Vuong non-nested LR test, Greene (1997), page 944 ' need to get the log likelihood series from binary and Poisson models ' probit log likelihood series xb21 = eq1.c(1)+eq1.c(2)*x1+eq1.c(3)*x2 series logl21 = (y>0)*log( @cnorm(xb21) ) + (y=0)*log( 1-@cnorm(xb21) ) ' poisson log likelihood series lam22 = exp( eq2.c(1)+eq2.c(2)*x3+eq2.c(3)*x4 ) series logl22 = -lam22+y*log(lam22)-@factlog(y) ' likelihood ratio series series logl2 = logl21+logl22 series m = logl1-logl2 ' test statistic (take model 1 if v>2, take model 2 if v<2, else inconclusive) table out setcolwidth(out,1,20) out(1,1) = "Vuong non-nested LR test" out(2,1) = "Zero-altered model if v>2; probit-poisson if v<2; else inconclusive" scalar v = @sum(m)/@sqrt( @sumsq(m-@mean(m)) ) out(3,1) = "v = " + @str(v) show out
' HECKMAN1.PRG ' example program for EViews LogL object ' ' Heckman selection model with three regressors z1, z2, z3 ' (common to both selection and regression equations) ' selection equation: lfp=1 if selected; otherwise 0 ' regression model: lw (dependent) c z1 z2 z3 (regressors) ' ' for a discussion, see Section 20.4 of Greene, William H. (1997) ' Econometric Analysis, 3rd edition, Prentice-Hall 'change path to program path %path = @runpath +"../data/" cd %path ' load artificial data load mlogit ' get starting values from 2-step Heckman ' first step: estimate probit of the selection equation smpl @all equation eq1.binary(d=n) lfp c z1 z2 z3 ' copy starting values to selection equation params coef(4) b = eq1.@coefs ' true values b(1) 1 b(2) 0.2 b(3) 0.5 b(4) -0.3 ' compute inverse mills ratio eq1.fit(i) xbhat series imills = @dnorm(xbhat)/@cnorm(xbhat) ' compute delta to estimate sig2 (variance) series delta = imills*(imills+xbhat) ' run second step OLS, including the inverse mills ratio smpl @all if lfp=1 equation eq2.ls lw c z1 z2 z3 imills ' true values c(1) 5 c(2) 0.8 c(3) 0.1 c(4) -1 ' construct estimate of correlation rho (note: uses only estimation sample of 2nd step) eq2.makeresid resid2 ' initial estimate of the regression model variance coef(1) sig2 = @sumsq(resid2)/eq2.@regobs+@mean(delta)*eq2.c(5)^2 ' true value 4 ' initial estimate of the squared correlation between the two equations ' rho2 should be constrained between 0 and 1 coef(1) rho2 = eq2.c(5)^2/sig2(1) ' true value 0.64 ' end of 2 step Heckman ' set up log likelihood !pi = @acos(-1) ' the constant pie logl ll1 ll1.append @logl logl1 ' index for selection equation ll1.append xb0 = b(1)+b(2)*z1+b(3)*z2+b(4)*z3 ' log probability if not observed ll1.append logp0 = log( 1-@cnorm(xb0) ) ' fitted values from the regression model ll1.append xb1 = c(1)+c(2)*z1+c(3)*z2+c(4)*z3 ' standardized residual from the regression model ll1.append sres = (lw-xb1)/@sqrt(sig2(1)) ' index for observed regression model ll1.append index = (xb0+sres*@sqrt(rho2(1)) )/@sqrt( 1-rho2(1) ) ' log probability if observed ll1.append logp1 = -log(2*!pi)/2-log(sig2(1))/2-sres^2/2+log(@cnorm(index)) ' specify contribution to the likelihood (uses new @recode syntax) ll1.append logl1 = @recode(lfp=0,logp0,logp1) ' do MLE and show results smpl @all ll1.ml(showopts, m=1000, c=1e-5) show ll1.output
' WEIBULL1.PRG ' example program for EViews LogL object ' ' weibull duration model with (fixed) covariates; no censoring ' ' see Example 20.18 (pp. 990-991) in Greene, William H. (1997) ' Econometric Analysis, 3rd edition, Prentice-Hall. ' create workfile wfcreate weibull u 1 62 ' fill data series days series ind days.fill 7,9,13,14,26,29,52,130,9,37,41,49,52,119,3,17,19,28,72,99,104,114,152,153,216,15,61,98,2,25,85,3,10,1,2,3,3,3,4,8,11,22,23,27,32,33,35,43,43,44,100,5,49,2,12,12,21,21,27,38,42,117 ind.fill 0.01138,0.01138,0.01138,0.01138,0.01138,0.01138,0.01138,0.01138,0.02299,0.02299,0.02299,0.02299,0.02299,0.02299,-0.03957,-0.03957,-0.03957,-0.03957,-0.03957,-0.03957,-0.03957,-0.03957,-0.03957,-0.03957,-0.03957,-0.05467,-0.05467,-0.05467,0.00535,0.00535,0.00535,0.07427,0.07427,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,0.0645,-0.10443,-0.10443,-0.007,-0.007,-0.007,-0.007,-0.007,-0.007,-0.007,-0.007,-0.007 ' declare params to estimate with arbitrary initial values coef(2) beta = 0.1 coef(1) p = 0.1 ' setup likelihood for weibull model logl ll1 ll1.append @logl logl1 ll1.append loglam = -beta(1)-beta(2)*ind ll1.append w = p(1)*(loglam+log(days)) ll1.append logl1 = w+log(p(1))-exp(w) ' resg is the generalized residuals (should have mean 1) ll1.append resg = exp(w) ' analytic derivatives ll1.append @deriv beta(1) grad1 beta(2) grad2 p(1) grad3 ll1.append grad1 = p(1)*(resg-1) ll1.append grad2 = grad1*ind ll1.append grad3 = (w+1-w*resg)/p(1) ' do MLE ll1.ml(showopts, m=1000, c=1e-7) show ll1.output ' conditional moment tests (Greene, example 20.19, p. 994) !psi = @psi(1) ' @psi is the first deriv of log gamma group gm resg^2-2 log(resg)-!psi group gg grad1 grad2 grad3 matrix mm = @convert(gm) matrix dd = @convert(gg) 'compute M'i matrix(gm.count,1) colsum for !i=1 to gm.count series _tmp = gm(!i) colsum(!i,1) = @sum( _tmp ) next ' compute M'M - M'D(D'D)^{-1}D'M sym dtd = @transpose(dd)*dd matrix qdiff = @transpose(mm)*mm - @transpose(mm)*dd*@inverse(dtd)*@transpose(dd)*mm ' evaluate quadratic form at the bottom of page 994 matrix chi2_stat = @transpose(colsum)*@inverse(qdiff)*colsum table out setcolwidth(out, 1, 10) out(1,1) = "Conditional moment tests for the Weibull specification" out(2,1) = "Matches E[e^2] and E[log(e)]" out(3,1) = "chi-sqr(" + @str(@columns(mm)) + ") = " out(3,2) = @str(chi2_stat(1)) out(3,3) = "p-value" out(3,4) = 1-@cchisq(chi2_stat(1),@columns(mm)) show out
' GARCH1.PRG ' example program for EViews LogL object ' ' GARCH(1,1) model with MA(1) errors and coef restrictions. ' ' This is the specification described in Section 9.1 (pp. 3014-3017) ' of Bollerslev, Tim, Robert F. Engle and Daniel B. Nelson (1994) ' "ARCH Models", in Chapter 49 of the Handbook of Econometrics, ' Volume 4, North-Holland, applied to different data. 'change path to program path %path = @runpath +"../data/" cd %path ' load workfile wfload gerus series y = 100*dlog(ger) ' set sample (1/2/82-7/9/92) as in Table 1, column 1 ' 10/6/86 is obs 3945 sample s0 2750 2750 sample s1 2751 5392 smpl s1 ' declare coef vectors to use in ARCH likelihood coef(1) mu = .1 coef(1) theta = .1 coef(2) omega = .1 coef(1) alpha = .1 coef(1) beta = .1 ' get starting values from MA(1) equation eq_temp.ls y c ma(1) mu(1) = eq_temp.c(1) theta(1) = eq_temp.c(2) omega(1) = eq_temp.@se^2 ' set presample values of expressions in logl smpl s0 series sig2 = omega(1) series resma = 0 ' set up ARCH likelihood logl ll1 ll1.append @logl logl ll1.append res = y-mu(1) ll1.append resma = res - theta(1)*resma(-1) ll1.append sig2 = omega(1)+omega(2)*dumw-omega(2)*(alpha(1)+beta(1))*dumw(-1) +alpha(1)*resma(-1)^2 +beta(1)*sig2(-1) ll1.append z = resma/@sqrt(sig2) ll1.append logl = log(@dnorm(z)) - log(sig2)/2 ' estimate and display results smpl s1 ll1.ml(showopts, m=1000, c=1e-5) show ll1.output
' EGARCH1.PRG ' example program for EViews LogL object ' Estimate EGARCH with generalized error distribution (Nelson's specification) ' ' For discussion, see p. 668 of Hamilton, James D. (1994) Time Series Analysis, ' Princeton University Press. 'change path to program path %path = @runpath + "../data/" cd %path ' load workfile wfload gerus series y = 100*dlog(ger) ' set sample to 1/2/82-7/9/92 (10/6/86 is obs 3945) sample s0 2748 2750 sample s1 2751 5392 smpl s1 ' get starting values from Gaussian EGARCH-M equation eq1 eq1.arch(2,2,egarch,archm=var) y c y(-1) show eq1.output ' declare and initialize parameters ' coefs on lagged variance coef(2) delta delta(1) = eq1.c(8) delta(2) = eq1.c(9) ' coefs on lagged resids coef(2) alpha alpha(1) = eq1.c(5) alpha(2) = eq1.c(7) ' coef on asym term coef(1) chi chi(1) = eq1.c(6)/eq1.c(5) ' coefs on deterministic terms coef(2) rho rho(1) = 2*log(eq1.@se) rho(2) = 0 ' 02 is thin tails coef(1) nu = 2 !pi = @acos(-1) ' set presample values of expressions in logl smpl s0 series zeta = log(1+rho(2)*ndays) series h = exp(rho(1)) series z = 0 series temp1 = @abs(z(-1)) + chi(1)*z(-1) ' set up EGARCH likelihood ' note that E|v{t-1}| is absorbed in overall constant rho(1) logl ll1 ll1.append @logl logl 'zeta = unconditional mean of log(ht) ll1.append zeta = log(1+rho(2)*ndays) 'log(lambda) p.668 last equation ll1.append loggam1 = @gammalog(1/nu(1)) ll1.append loglam = -log(2)/nu(1) + 0.5*( loggam1 - @gammalog(3/nu(1)) ) ll1.append temp1 = @abs(z(-1)) + chi(1)*z(-1) ll1.append log(h) = rho(1) + zeta + delta(1)*(log(h(-1))-zeta(-1)) + delta(2)*(log(h(-2))-zeta(-2)) + alpha(1)*temp1 + alpha(2)*temp1(-1) ll1.append res = y - c(1)*h - c(2) - c(3)*y(-1) ll1.append z = res/@sqrt(h) ll1.append logl = log(nu(1)) - loglam - (1+1/nu(1))*log(2) - loggam1 - 0.5*@abs(z/exp(loglam))^nu(1) - 0.5*log(h) ' estimate and display output smpl s1 ll1.ml(showopts, m=1000, c=1e-5) show ll1.output
ARCH_T1.PRG estimates a GARCH(1,1) model assuming a t-distribution for the innovations. The log likelihood function for this model may be found in Hamilton (1994, equation 21.1.24, page 662). Note: This example is for illustration purposes only, as EViews 5 provides more convenient, built-in tools for estimating GARCH models with a Student-t distribution.
' ARCH_T1.PRG ' example program for EViews LogL object ' ' Estimate GARCH(1,1) model with t-distributed errors ' ' see page 662, equation 21.1.24, of Hamilton, James D. (1994) Time Series Analysis, ' Princeton University Press. 'change path to program path %path = @runpath + "../data/" cd %path ' load workfile wfload gerus series y = 100*dlog(ger) ' set sample to 1/2/82-7/9/92 (10/6/86 is obs 3945) sample s0 2750 2750 sample s1 2751 5392 smpl s1 ' get starting values from Gaussian ARCH equation eq1 eq1.arch y c show eq1.output ' declare and initialize parameters coef(1) mu = eq1.c(1) coef(1) omega = eq1.c(2) coef(1) alpha = eq1.c(3) coef(1) beta = eq1.c(4) coef(1) tdf = 3 ' set presample values of expressions in logl smpl s0 series sig2 = omega(1) series res = 0 !pi = @acos(-1) ' set up GARCH likelihood logl ll1 ll1.append @logl logl ll1.append res = y-mu(1) ll1.append sig2 = omega(1)+alpha(1)*res(-1)^2 +beta(1)*sig2(-1) ll1.append z = res^2/sig2/(tdf(1)-2) + 1 ll1.append logl = @gammalog((tdf(1) + 1)/2) - @gammalog(tdf(1)/2) - log(!pi)/2 - log(tdf(1) - 2)/2 - log(sig2)/2 - (tdf(1)+1)*log(z)/2 ' estimate and display output smpl s1 ll1.ml(showopts, m=1000, c=1e-5) 'estimate same model using EViews' ' build-in function equation eq2 eq2.arch(tdist) y c show eq2 show ll1.output
' BV_GARCH.PRG (3/30/2004) ' example program for EViews LogL object ' ' restricted version of ' bi-variate BEKK of Engle and Kroner (1995): ' ' y = mu + res ' res ~ N(0,H) ' ' H = omega*omega' + beta H(-1) beta' + alpha res(-1) res(-1)' alpha' ' ' where ' ' y = 2 x 1 ' mu = 2 x 1 ' H = 2 x 2 (symmetric) ' H(1,1) = variance of y1 (saved as var_y1) ' H(1,2) = cov of y1 and y2 (saved as var_y2) ' H(2,2) = variance of y2 (saved as cov_y1y2) ' omega = 2 x 2 low triangular ' beta = 2 x 2 diagonal ' alpha = 2 x 2 diagonal ' 'change path to program path %path = @runpath + "../data/" cd %path ' load workfile wfload intl_fin.wf1 ' dependent variables of both series must be continues smpl @all series y1 = dlog(sp500) series y2 = dlog(tbond) ' set sample ' first observation of s1 need to be one or two periods after ' the first observation of s0 sample s0 3/1/1994 8/25/2000 sample s1 3/2/1994 8/25/2000 ' initialization of parameters and starting values ' change below only to change the specification of model smpl s0 'get starting values from univariate GARCH equation eq1.arch(m=100,c=1e-5) y1 c equation eq2.arch(m=100,c=1e-5) y2 c ' declare coef vectors to use in bi-variate GARCH model ' see above for details coef(2) mu mu(1) = eq1.c(1) mu(2)= eq2.c(1) coef(3) omega omega(1)=(eq1.c(2))^.5 omega(2)=0 omega(3)=eq2.c(2)^.5 coef(2) alpha alpha(1) = (eq1.c(3))^.5 alpha(2) = (eq2.c(3))^.5 coef(2) beta beta(1)= (eq1.c(4))^.5 beta(2)= (eq2.c(4))^.5 ' constant adjustment for log likelihood !mlog2pi = 2*log(2*@acos(-1)) ' use var-cov of sample in "s1" as starting value of variance-covariance matrix series cov_y1y2 = @cov(y1-mu(1), y2-mu(2)) series var_y1 = @var(y1) series var_y2 = @var(y2) series sqres1 = (y1-mu(1))^2 series sqres2 = (y2-mu(2))^2 series res1res2 = (y1-mu(1))*(y2-mu(2)) ' ........................................................... ' LOG LIKELIHOOD ' set up the likelihood ' 1) open a new blank likelihood object (L.O.) name bvgarch ' 2) specify the log likelihood model by append ' ........................................................... logl bvgarch bvgarch.append @logl logl bvgarch.append sqres1 = (y1-mu(1))^2 bvgarch.append sqres2 = (y2-mu(2))^2 bvgarch.append res1res2 = (y1-mu(1))*(y2-mu(2)) ' calculate the variance and covariance series bvgarch.append var_y1 = omega(1)^2 + beta(1)^2*var_y1(-1) + alpha(1)^2*sqres1(-1) bvgarch.append var_y2 = omega(3)^2+omega(2)^2 + beta(2)^2*var_y2(-1) + alpha(2)^2*sqres2(-1) bvgarch.append cov_y1y2 = omega(1)*omega(2) + beta(2)*beta(1)*cov_y1y2(-1) + alpha(2)*alpha(1)*res1res2(-1) ' determinant of the variance-covariance matrix bvgarch.append deth = var_y1*var_y2 - cov_y1y2^2 ' inverse elements of the variance-covariance matrix bvgarch.append invh1 = var_y2/deth bvgarch.append invh3 = var_y1/deth bvgarch.append invh2 = -cov_y1y2/deth ' log-likelihood series bvgarch.append logl =-0.5*(!mlog2pi + (invh1*sqres1+2*invh2*res1res2+invh3*sqres2) + log(deth)) ' remove some of the intermediary series ' bvgarch.append @temp invh1 invh2 invh3 sqres1 sqres2 res1res2 deth ' estimate the model smpl s1 bvgarch.ml(showopts, m=100, c=1e-5) ' change below to display different output show bvgarch.output graph varcov.line var_y1 var_y2 cov_y1y2 show varcov ' LR statistic for univariate versus bivariate model scalar lr = -2*( eq1.@logl + eq2.@logl - bvgarch.@logl ) scalar lr_pval = 1 - @cchisq(lr,1)
' TV_GARCH.PRG ' example program for EViews LogL object ' ' restricted version of ' tri-variate BEKK of Engle and Kroner (1995): ' ' y = mu + res ' res ~ N(0,H) ' ' H = omega*omega' + beta H(-1) beta' + alpha res(-1) res(-1)' alpha' ' ' where, ' y = 3 x 1 ' mu = 3 x 1 ' H = 3 x 3 (symmetric) ' H(1,1) = variance of y1 (saved as var_y1) ' H(1,2) = cov of y1 and y2 (saved as cov_y1y2) ' H(1,3) = cov of y1 and y2 (saved as cov_y1y3) ' H(2,2) = variance of y2 (saved as var_y2) ' H(2,3) = cov of y1 and y3 (saved as cov_y2y3) ' H(3,3) = variance of y3 (saved as var_y3) ' omega = 3 x 3 low triangular ' beta = 3 x 3 diagonal ' alpha = 3 x 3 diagonal ' 'change path to program path %path = @runpath + "../data/" cd %path ' load workfile wfload intl_fin.wf1 ' dependent variables of all series must be continues series y1 = dlog(sp500) series y2 = dlog(ftse) series y3 = dlog(nikkei) ' set sample ' first observation of s1 need to be one or two periods after ' the first observation of s0 sample s0 3/3/94 8/1/2000 sample s1 3/7/94 8/1/2000 ' initialization of parameters and starting values ' change below only to change the specification of model smpl s0 'get starting values from univariate GARCH equation eq1.arch(m=100,c=1e-5) y1 c equation eq2.arch(m=100,c=1e-5) y2 c equation eq3.arch(m=100,c=1e-5) y3 c ' declare coef vectors to use in GARCH model coef(3) mu mu(1) = eq1.c(1) mu(2) = eq2.c(1) mu(3) = eq2.c(1) coef(6) omega omega(1) = (eq1.c(2))^.5 omega(2) = 0 omega(3) = 0 omega(4) = eq2.c(2)^.5 omega(5) = 0 omega(6) = eq3.c(2)^.5 coef(3) alpha alpha(1) = (eq1.c(3))^.5 alpha(2) = (eq2.c(3))^.5 alpha(3) = (eq2.c(3))^.5 coef(3) beta beta(1) = (eq1.c(4))^.5 beta(2) = (eq2.c(4))^.5 beta(3) = (eq3.c(4))^.5 ' use sample var-cov as starting value of variance-covariance matrix series cov_y1y2 = @cov(y1-mu(1), y2-mu(2)) series cov_y1y3 = @cov(y1-mu(1), y3-mu(3)) series cov_y2y3 = @cov(y2-mu(2), y3-mu(3)) series var_y1 = @var(y1) series var_y2 = @var(y2) series var_y3 = @var(y3) series sqres1 = (y1-mu(1))^2 series sqres2 = (y2-mu(2))^2 series sqres3 = (y3-mu(3))^2 series res1res2 = (y1-mu(1))*(y2-mu(2)) series res1res3 = (y1-mu(1))*(y3-mu(3)) series res2res3 = (y3-mu(3))*(y2-mu(2)) ' constant adjustment for log likelihood !mlog2pi = 3*log(2*@acos(-1)) ' ........................................................... ' LOG LIKELIHOOD ' set up the likelihood ' 1) open a new blank likelihood object name tvgarch ' 2) specify the log likelihood model by append ' ........................................................... logl tvgarch ' squared errors and cross errors tvgarch.append @logl logl tvgarch.append sqres1 = (y1-mu(1))^2 tvgarch.append sqres2 = (y2-mu(2))^2 tvgarch.append sqres3 = (y3-mu(3))^2 tvgarch.append res1res2 = (y1-mu(1))*(y2-mu(2)) tvgarch.append res1res3 = (y1-mu(1))*(y3-mu(3)) tvgarch.append res2res3 = (y3-mu(3))*(y2-mu(2)) ' variance and covariance series tvgarch.append var_y1 = omega(1)^2 + beta(1)^2*var_y1(-1) + alpha(1)^2*sqres1(-1) tvgarch.append var_y2 = omega(2)^2+omega(4)^2 + beta(2)^2*var_y2(-1) + alpha(2)^2*sqres2(-1) tvgarch.append var_y3 = omega(3)^2+omega(5)^2+omega(6)^2 + beta(3)^2*var_y3(-1) + alpha(3)^2*sqres3(-1) tvgarch.append cov_y1y2 = omega(1)*omega(2) + beta(2)*beta(1)*cov_y1y2(-1) + alpha(2)*alpha(1)*res1res2(-1) tvgarch.append cov_y1y3 = omega(1)*omega(3) + beta(3)*beta(1)*cov_y1y3(-1) + alpha(3)*alpha(1)*res1res3(-1) tvgarch.append cov_y2y3 = omega(2)*omega(3) + omega(4)*omega(5) + beta(3)*beta(2)*cov_y2y3(-1) + alpha(3)*alpha(2)*res2res3(-1) ' determinant of the variance-covariance matrix tvgarch.append deth = var_y1*var_y2*var_y3 - var_y1*cov_y2y3^2-cov_y1y2^2*var_y3+2*cov_y1y2*cov_y2y3*cov_y1y3-cov_y1y3^2*var_y2 ' calculate the elements of the inverse of var_cov (H) matrix ' numbered as vech(inv(H)) tvgarch.append invh1 = (var_y2*var_y3-cov_y2y3^2)/deth tvgarch.append invh2 = -(cov_y1y2*var_y3-cov_y1y3*cov_y2y3)/deth tvgarch.append invh3 = (cov_y1y2*cov_y2y3-cov_y1y3*var_y2)/deth tvgarch.append invh4 = (var_y1*var_y3-cov_y1y3^2)/deth tvgarch.append invh5 = -(var_y1*cov_y2y3-cov_y1y2*cov_y1y3)/deth tvgarch.append invh6 = (var_y1*var_y2-cov_y1y2^2)/deth ' log-likelihood series tvgarch.append logl = -0.5*(!mlog2pi + (invh1*sqres1+invh4*sqres2+invh6*sqres3 +2*invh2*res1res2 +2*invh3*res1res3+2*invh5*res2res3 ) + log(deth)) ' remove some of the intermediary series 'tvgarch.append @temp invh1 invh2 invh3 invh4 invh5 invh6 sqres1 sqres2 sqres3 res1res2 res1res3 res2res3 deth ' estimate the model smpl s1 tvgarch.ml(showopts, m=100, c=1e-5) ' change below to display different output show tvgarch.output graph var.line var_y1 var_y2 var_y3 graph cov.line cov_y1y2 cov_y1y3 cov_y2y3 show var show cov ' LR statistic for univariate vs trivariate scalar lr = -2*(eq1.@logl + eq2.@logl + eq3.@logl - tvgarch.@logl) scalar lr_pval = 1 - @cchisq(lr,3)