/**********************************************************************************************/ /* Replication files for */ /* Meissner, Thomas and Rostam-Afschar, Davud "Do Tax Cuts Increase Consumption? */ /* An Experimental Test of Ricardian Equivalence" (2014) */ /* */ /* Program file: run.do: Do-file containing Stata code to reproduce the results */ /* in the paper's tables and figures. This program was written to run in Stata */ /* version 11.2. */ /* Data file: MRA2014.dta: Stata version 11.2 dataset. Details about the data are described */ /* in the results section of the paper. */ /* */ /* Use of these files is permitted under the following condition: */ /* Anyone using these files must include a citation of the paper in his or her work. */ /**********************************************************************************************/ /* Begin: Stata setup */ clear all clear matrix clear mata version 11.2 set more off set trace off set type double, permanently set varabbrev off macro drop _all scalar drop _all cap log close global DO_PATH "MYPATH" global PATH "MYPATH" global MY_RESULTS_PATH "MYPATH" global MY_FIGURES_PATH "MYPATH" global MY_TABLES_PATH "MYPATH" global MY_LOG_PATH "MYPATH" global MY_OUT_PATH "MYPATH" global MY_TEMP_PATH "MYPATH" local sysdate = c(current_date) local systime = c(current_time) log using "${MY_LOG_PATH}analysis_`sysdate'", smcl replace /* End: Stata setup */ /* load dataset */ use "${MY_OUT_PATH}MRA2014.dta", clear /* generate variables for the consumption function, see paper section 3 */ /* due Taxes = Taxes to pay - taxes paid */ sort id r t gen duetx=0 bys id r: gen sumtx=sum(tx) forv i=1/25 { replace duetx=3000-sumtx if t==`i' } gen fsumtx=0 bys id r: replace fsumtx=sumtx[_n-1] if sumtx[_n-1]~=. gen fduetx=3000 bys id r: replace fduetx=duetx[_n-1] if duetx[_n-1]~=. /* calculate optimal consumption */ scalar theta = 0.0125 scalar T = 25 scalar spread = 65 gen sumsumGamma=0 gen factor=0 gen pyfactor=0 forv t = 1/`=scalar(T)' { forv j = 0/`=T-`t'' { forv i = 1/`j' { replace sumsumGamma = sumsumGamma + log(cosh(theta*(spread/(scalar(T) - `t' + 1 - `i')))) if t==`t' } } replace pyfactor = scalar(T) - `t' if t==`t' replace factor = 1/(scalar(T) - `t' + 1) if t==`t' } replace sumsumGamma = sumsumGamma *- 1/theta gen py = 185 gen w_OPT = 1000 gen a_OPT = w_OPT gen Optcons_analytical = 0 forv i=1/25 { replace a_OPT = a_OPT[_n-1]+y[_n-1]-tx[_n-1]-Optcons_analytical[_n-1] if t==`i' & t~=1 replace w_OPT = a_OPT+y if t==`i' replace Optcons_analytical = factor*(a_OPT+y-fduetx+pyfactor*py+sumsumGamma) if t==`i' } /* calculate wealth from observed consumption */ sort id r t gen w = 1000 gen a = w forv i=1/25 { replace a = a[_n-1]+y[_n-1]-tx[_n-1]-cons[_n-1] if t==`i' & t~=1 replace w = a+y if t==`i' } /* calculate optimal consumption conditional on observed wealth */ gen OptCondcons_analytical = 0 forv i=1/25 { replace OptCondcons_analytical = factor*(a+y-fduetx+pyfactor*py+sumsumGamma) if t==`i' } /* redefine model variables */ gen deviations = OptCondcons_analytical-cons gen absdeviations = abs(OptCondcons_analytical-cons) gen labsdeviations = log(abs(OptCondcons_analytical-cons)) gen pyfactor185 = 185*pyfactor gen y_trans = y*factor label variable y_trans "$\tilde{y}$ " gen a_trans = a*factor label variable a_trans "$\tilde{a}$ " gen fduetx_trans = -fduetx*factor label variable fduetx_trans "$\tilde{\mathscr{T}}$" gen sumsumGamma_trans = sumsumGamma*factor label variable sumsumGamma_trans "$\tilde{\Gamma}(\theta\sigma_y)$" gen pyfactor185_trans = 185*pyfactor*factor label variable pyfactor185_trans "$(T-t)\tilde{y}_p$" /* generate other variables */ label variable t "$\text{t}$" gen tsq=t^2 label variable tsq "$\text{t}^{2}$" gen tx0 = 0 replace tx0 = 1 if tx==0 label variable tx0 "$\text{d}_{0.tx}$" gen tx240 = 0 replace tx240 = 1 if tx==240 label variable tx240 "$\text{d}_{240.tx}$" gen tx0R1 = 0 replace tx0R1 = 1 if tx==0 & treatment==2 label variable tx0R1 "$\text{d}_{0.tx}\times \text{dR1}$" gen tx240R1 = 0 replace tx240R1 = 1 if tx==240 & treatment==2 label variable tx240R1 "$\text{d}_{240.tx}\times \text{dR1}$" gen tx0R2 = 0 replace tx0R2 = 1 if tx==0 & treatment==3 label variable tx0R2 "$\text{d}_{0.tx}\times \text{dR2}$" gen tx240R2 = 0 replace tx240R2 = 1 if tx==240 & treatment==3 label variable tx240R2 "$\text{d}_{240.tx}\times \text{dR2}$" gen r1 = 0 replace r1 = 1 if r==1 label variable r1 " $\text{d}_{r.1}$" gen r2 = 0 replace r2 = 1 if r==2 label variable r2 " $\text{d}_{r.2}$" gen r3 = 0 replace r3 = 1 if r==3 label variable r3 " $\text{d}_{r.3}$" gen r4 = 0 replace r4 = 1 if r==4 label variable r4 " $\text{d}_{r.4}$" gen r5 = 0 replace r5 = 1 if r==5 label variable r5 " $\text{d}_{r.5}$" gen r6 = 0 replace r6 = 1 if r==6 label variable r6 " $\text{d}_{r.6}$" gen r7 = 0 replace r7 = 1 if r==7 label variable r7 " $\text{d}_{r.7}$" gen r8 = 0 replace r8 = 1 if r==8 label variable r8 " $\text{d}_{r.8}$" /* generate lag dummies */ gen txbl1 = 0 replace txbl1 = 1 if tx[_n-1]==0 & tx[_n-1]~=. label variable txbl1 "$\text{d}_{t-1,0.\text{tx}}$" gen txbl2 = 0 replace txbl2 = 1 if tx[_n-2]==0 & tx[_n-2]~=. label variable txbl2 "$\text{d}_{t-2,0.\text{tx}}$" gen txbl3 = 0 replace txbl3 = 1 if tx[_n-3]==0 & tx[_n-3]~=. label variable txbl3 "$\text{d}_{t-3,0.\text{tx}}$" gen txdl1 = 0 replace txdl1 = 1 if tx[_n-1]==240 & tx[_n-1]~=. label variable txdl1 "$\text{d}_{t-1,240.\text{tx}}$" gen txdl2 = 0 replace txdl2 = 1 if tx[_n-2]==240 & tx[_n-2]~=. label variable txdl2 "$\text{d}_{t-2,240.\text{tx}}$" gen txdl3 = 0 replace txdl3 = 1 if tx[_n-3]==240 & tx[_n-3]~=. label variable txdl3 "$\text{d}_{t-3,240.\text{tx}}$" /* deviations from optimal behavior: descriptives */ sort id r t gen m1 = . gen m2 = . forv i=1/25 { replace m1=abs(OptCondcons_analytical-cons) if t==`i' replace m2=338*(1-exp(-0.0125*Optcons_analytical))-338*(1-exp(-0.0125*cons)) if t==`i' } gen R1Ctrl = . replace R1Ctrl = 0 if Control==1 replace R1Ctrl = 1 if R1==1 gen R2Ctrl = . replace R2Ctrl = 0 if Control==1 replace R2Ctrl = 1 if R2==1 gen R1R2 = . replace R1R2 = 0 if R1==1 replace R1R2 = 1 if R2==1 bys r id: egen totalm1 = total(m1) bys r id: egen totalm2 = total(m2) bys id: egen totalroundm1 = total(m1) bys id: egen totalroundm2 = total(m2) /* Mann-Whitney-U Tests */ #d; mat MWU = (1,2,3,4,5,6,7,8,9); forv measure = 1(1)2 {; forv samples = 1(1)3 {; mat MWU = MWU \ (.,.,.,.,.,.,.,.,.); }; forv treatment = 1(1)3 {; mat MWU = MWU \ (.,.,.,.,.,.,.,.,.); }; forv treatment = 1(1)3 {; mat MWU = MWU \ (.,.,.,.,.,.,.,.,.); }; }; matrix rownames MWU = "Measure" "medianm1Ctrl" "medianm1R1" "medianm1R2" "meanm1Ctrl" "meanm1R1" "meanm1R2" "pR1-Ctrl" "pR2-Ctrl" "pR1-R2" "medianm2Ctrl" "medianm2R1" "medianm2R2" "meanm2Ctrl" "meanm2R1" "meanm2R2" "pR1-Ctrl" "pR2-Ctrl" "pR1-R2"; matrix colnames MWU = "Round1" "Round2" "Round3" "Round4" "Round5" "Round6" "Round7" "Round8" "Total"; scalar idt = 1; foreach measure in totalm1 totalm2 {; forv treatment = 1(1)3 {; scalar idt = scalar(idt)+1; forv r = 1(1)9 {; if `r'~=9 {; su `measure' if treatment==`treatment' & r==`r',d; }; if `r'==9 {; su `measure' if treatment==`treatment',d; }; mat MWU[scalar(idt), `r'] = r(p50); }; }; forv treatment = 1(1)3 {; scalar idt = scalar(idt)+1; forv r = 1(1)9 {; if `r'~=9 {; qui su `measure' if treatment==`treatment' & r==`r',d; }; if `r'==9 {; qui su `measure' if treatment==`treatment',d; }; mat MWU[scalar(idt), `r'] = r(mean); }; }; foreach sample in R1Ctrl R2Ctrl R1R2 {; scalar idt = scalar(idt)+1; forv r = 1(1)9 {; if `r'~=9 {; qui ranksum `measure' if r==`r', by(`sample'); }; if `r'==9 {; qui ranksum `measure', by(`sample'); }; mat MWU[scalar(idt), `r'] = 2 * normprob(-abs(`r(z)')); }; }; }; mat MWU1 = MWU[1...,9]; mat MWU2 = MWU[1...,1..8]; mat MWU = MWU1,MWU2; mat li MWU; matrix MWU = MWU[2..., 1...]; outtable using "${MY_TABLES_PATH}Mann_Whitney_U",mat(MWU) replace nobox caption("Mann-Whitney-U") format(%20.2f); #d cr preserve collapse (median) totalm1 totalm2, by(r treatment) tw (connected totalm1 r if treatment==1, msymbol(sh)) || /// (connected totalm1 r if treatment==2, msymbol(oh)) || /// (connected totalm1 r if treatment==3, msymbol(th)), legend(label(1 "Ctrl") /// label(2 "R1") /// label(3 "R2") /// cols(5) pos(1) ring(5) region(lcolor(none))) ytitle("Median aggregate absolute deviation", margin(0 3 0 0)) xtitle("Round") xlabel(1(1)8) name(panela, replace) tw (connected totalm2 r if treatment==1, msymbol(sh)) || /// (connected totalm2 r if treatment==2, msymbol(oh)) || /// (connected totalm2 r if treatment==3, msymbol(th)), legend(label(1 "Ctrl") /// label(2 "R1") /// label(3 "R2") /// cols(5) pos(1) ring(5) region(lcolor(none))) ytitle("Median utility loss", margin(0 3 0 0)) xtitle("Round") xlabel(1(1)8) name(panelb, replace) restore preserve collapse (mean) totalm1 totalm2, by(r treatment) tw (connected totalm1 r if treatment==1, msymbol(sh)) || /// (connected totalm1 r if treatment==2, msymbol(oh)) || /// (connected totalm1 r if treatment==3, msymbol(th)), legend(label(1 "Ctrl") /// label(2 "R1") /// label(3 "R2") /// cols(5) pos(1) ring(5) region(lcolor(none))) ytitle("Mean aggregate absolute deviation", margin(0 3 0 0)) xtitle("Round") xlabel(1(1)8) name(panelc, replace) tw (connected totalm2 r if treatment==1, msymbol(sh)) || /// (connected totalm2 r if treatment==2, msymbol(oh)) || /// (connected totalm2 r if treatment==3, msymbol(th)), legend(label(1 "Ctrl") /// label(2 "R1") /// label(3 "R2") /// cols(5) pos(1) ring(5) region(lcolor(none))) ytitle("Mean utility loss", margin(0 3 0 0)) xtitle("Round") xlabel(1(1)8) name(paneld, replace) restore graph combine panela panelb panelc paneld, cols(2) graph export "${MY_FIGURES_PATH}DeviationMeasures.eps", as(eps) replace /* panel regression */ xtset id idt #d; reg cons y_trans a_trans fduetx_trans sumsumGamma_trans pyfactor185_trans tx0 tx240 txbl1 txbl2 txbl3 txdl1 txdl2 txdl3 t tsq R1 R2 r2 r3 r4 r5 r6 r7 r8 highriskaversion mediumriskaversion female engineering otherscience other , vce(cluster id); est sto OLS; scalar t_stat_y_trans = (_b[y_trans]-1)/_se[y_trans]; di t_stat_y_trans; scalar p_value_y_trans = 2*ttail(e(df_r),abs(scalar(t_stat_y_trans))); di p_value_y_trans; scalar t_stat_a_trans = (_b[a_trans]-1)/_se[a_trans]; di t_stat_a_trans; scalar p_value_a_trans = 2*ttail(e(df_r),abs(scalar(t_stat_a_trans))); di p_value_a_trans; scalar t_stat_fduetx_trans = (_b[fduetx_trans]-1)/_se[fduetx_trans]; di t_stat_fduetx_trans; scalar p_value_fduetx_trans = 2*ttail(e(df_r),abs(scalar(t_stat_fduetx_trans))); di p_value_fduetx_trans; scalar t_stat_sumsumGamma_trans = (_b[sumsumGamma_trans]-1)/_se[sumsumGamma_trans]; di t_stat_sumsumGamma_trans; scalar p_value_sumsumGamma_trans = 2*ttail(e(df_r),abs(scalar(t_stat_sumsumGamma_trans))); di p_value_sumsumGamma_trans; scalar t_stat_pyfactor185_trans = (_b[pyfactor185_trans]-1)/_se[pyfactor185_trans]; di t_stat_pyfactor185_trans; scalar p_value_pyfactor185_trans = 2*ttail(e(df_r),abs(scalar(t_stat_pyfactor185_trans))); di p_value_pyfactor185_trans; xtreg cons y_trans a_trans fduetx_trans sumsumGamma_trans pyfactor185_trans tx0 tx240 txbl1 txbl2 txbl3 txdl1 txdl2 txdl3 t tsq R1 R2 r2 r3 r4 r5 r6 r7 r8 highriskaversion mediumriskaversion female engineering otherscience other , fe vce(cluster id) ; est sto FE; scalar t_stat_y_trans = (_b[y_trans]-1)/_se[y_trans]; di t_stat_y_trans; scalar p_value_y_trans = 2*ttail(e(df_r),abs(scalar(t_stat_y_trans))); di p_value_y_trans; scalar t_stat_a_trans = (_b[a_trans]-1)/_se[a_trans]; di t_stat_a_trans; scalar p_value_a_trans = 2*ttail(e(df_r),abs(scalar(t_stat_a_trans))); di p_value_a_trans; scalar t_stat_fduetx_trans = (_b[fduetx_trans]-1)/_se[fduetx_trans]; di t_stat_fduetx_trans; scalar p_value_fduetx_trans = 2*ttail(e(df_r),abs(scalar(t_stat_fduetx_trans))); di p_value_fduetx_trans; scalar t_stat_sumsumGamma_trans = (_b[sumsumGamma_trans]-1)/_se[sumsumGamma_trans]; di t_stat_sumsumGamma_trans; scalar p_value_sumsumGamma_trans = 2*ttail(e(df_r),abs(scalar(t_stat_sumsumGamma_trans))); di p_value_sumsumGamma_trans; scalar t_stat_pyfactor185_trans = (_b[pyfactor185_trans]-1)/_se[pyfactor185_trans]; di t_stat_pyfactor185_trans; scalar p_value_pyfactor185_trans = 2*ttail(e(df_r),abs(scalar(t_stat_pyfactor185_trans))); di p_value_pyfactor185_trans; esttab OLS FE using "${MY_TABLES_PATH}cons.tex", wide replace booktabs scalars(r2_o) ar2 star(* 0.10 ** 0.05 *** 0.01) label refcat(R1 "Treatment (base: control):" highriskaversion "Risk aversion (base: low):" female "Gender (base: male):" engineering "Subject (base: economics)" r2 "Round dummies (base: round 1):", nolabel); /* treatment interactions */ #d; reg cons y_trans a_trans fduetx_trans sumsumGamma_trans pyfactor185_trans tx0R1 tx0R2 tx240R1 tx240R2 txbl1 txbl2 txbl3 txdl1 txdl2 txdl3 t tsq R1 R2 r2 r3 r4 r5 r6 r7 r8 highriskaversion mediumriskaversion female engineering otherscience other , vce(cluster id); est sto OLS_interactions; xtreg cons y_trans a_trans fduetx_trans sumsumGamma_trans pyfactor185_trans tx0R1 tx0R2 tx240R1 tx240R2 txbl1 txbl2 txbl3 txdl1 txdl2 txdl3 t tsq R1 R2 r2 r3 r4 r5 r6 r7 r8 highriskaversion mediumriskaversion female engineering otherscience other , fe vce(cluster id) ; est sto FE_interactions; test tx0R1=tx0R2; test tx240R1=tx240R2; esttab OLS_interactions FE_interactions using "${MY_TABLES_PATH}cons_interactions.tex", replace scalars(r2_o) ar2 star(* 0.10 ** 0.05 *** 0.01) label refcat(R1 "Treatment (base: control):" highriskaversion "Risk aversion (base: low):" female "Gender (base: male):" engineering "Subject (base: economics)" r1 "Round dummies (base: round 8):", nolabel); /* individual estimates */ #d; gen coef_0tx = .; gen t_stat_0tx = .; gen p_value_0tx = .; gen coef_240tx = .; gen t_stat_240tx = .; gen p_value_240tx = .; levelsof id, local(idl); foreach l of local idl {; reg cons y_trans a_trans fduetx_trans sumsumGamma_trans pyfactor185_trans tx0 tx240 txbl1 txbl2 txbl3 txdl1 txdl2 txdl3 t tsq r2 r3 r4 r5 r6 r7 r8 if `l'==id ; replace coef_0tx = _b[tx0] if `l'==id; replace t_stat_0tx = _b[tx0]/_se[tx0] if `l'==id; replace p_value_0tx = 2*ttail(e(df_r),abs(t_stat_0tx)) if `l'==id; replace coef_240tx = _b[tx240] if `l'==id; replace t_stat_240tx = _b[tx240]/_se[tx240] if `l'==id; replace p_value_240tx = 2*ttail(e(df_r),abs(t_stat_240tx)) if `l'==id; }; /* fraction of non-Ricardians */; drop if treatment==1; collapse p_value*, by(id); count; scalar N = r(N); count if (p_value_240tx <0.05 | p_value_0tx <0.05) & p_value_0tx~=. & p_value_240tx~=.; scalar NonRicardian = r(N); count if p_value_240tx <0.05 & p_value_240tx~=.; scalar NonRicardian240 = r(N); count if p_value_0tx <0.05 & p_value_0tx~=.; scalar NonRicardian0 = r(N); di in red "Fraction of subjects for who a t-test rejects the hypothesis that the coefficient on the tax cut dummy is zero at 5% level " NonRicardian0/N*100; di in red "Fraction of subjects for who a t-test rejects the hypothesis that the coefficient on the tax deduction dummy is zero at 5% level " NonRicardian240/N*100; di in red "Fraction of subjects for who a t-test rejects the hypothesis that the coefficient on the tax cut and the tax deduction dummy, respectively, is zero at 5% level " NonRicardian/N*100; log close;