head = '<?xml version="1.0" encoding="UTF-8" standalone="no"?>\n<?xml-stylesheet href="Blank_US_Map.css" type="text/css"?>' svg = head + svg svg = svg.replace('width="959"', 'width="1035"') with open("images/stateRelativeIncome" + str(year) + ".svg", "wb") as file: file.write(bytes(svg, 'UTF-8')) file = open("images/stateRelativeIncome" + str(year) + ".svg", "a") convert = 'convert -density 144 images/stateRelativeIncome' + str( year) + '.svg images/stateRelativeIncome' + str(year) + '.png' subprocess.call(convert, shell=True) # In[ ]: # 4.4 Creat gif with imagemagick makegif = 'convert -loop 0 -delay 50x100 images/*.png usStateConvergence.gif' subprocess.call(makegif, shell=True) # In[ ]: # 5. Clean up # os.chdir(os.getcwd()) # for files in os.listdir('.'): # if files.endswith('.css') or files.endswith('.svg'): # os.remove(files) # In[ ]: # 6. Export notebook to .py runProcs.exportNb('usConvergenceMap')
# Construct series for real incomes in 1840, 1880, and 1900 df_1840 = easterlin_data['Income per capita - 1840 - A [cur dollars]']/float(historic_cpi_data.loc[1840]) df_1880 = easterlin_data['Income per capita - 1880 [cur dollars]']/float(historic_cpi_data.loc[1890]) df_1900 = easterlin_data['Income per capita - 1900 [cur dollars]']/float(historic_cpi_data.loc[1900]) # Put into a DataFrame and concatenate with previous data beginning in 1929 df = pd.DataFrame({pd.to_datetime('1840'):df_1840,pd.to_datetime('1880'):df_1880,pd.to_datetime('1900'):df_1900}).transpose() df = pd.concat([data_y,df]).sort_index() # In[115]: # Export data to csv series = df.sort_index() dropCols = [u'AK', u'HI', u'New England', u'Mideast', u'Great Lakes', u'Plains', u'Southeast', u'Southwest', u'Rocky Mountain', u'Far West'] for c in dropCols: series = series.drop([c],axis=1) series.to_csv('../csv/state_income_data.csv',na_rep='NaN') # In[11]: # Export notebook to .py runProcs.exportNb('state_income_data')
with open("../frames/state_relative_income" + str(year.year) + ".svg", "wb") as file: file.write(bytes(svg, 'UTF-8')) file = open("../frames/state_relative_income" + str(year.year) + ".svg", "a") convert = 'convert -density 144 ../frames/state_relative_income' + str( year.year) + '.svg ../frames/state_relative_income' + str( year.year) + '.png' subprocess.call(convert, shell=True) # In[ ]: # 4.4 Creat gif with imagemagick makegif = 'convert -loop 0 -delay 50x100 ../frames/*.png ../gif/us_state_convergence.gif' subprocess.call(makegif, shell=True) # In[ ]: # 5. Clean up # os.chdir(os.getcwd()) # for files in os.listdir('.'): # if files.endswith('.css') or files.endswith('.svg'): # os.remove(files) # In[ ]: # 6. Export notebook to .py runProcs.exportNb('us_convergence_map')
# Drop countries with inf values qtyTheoryData = qtyTheoryData.replace([np.inf, -np.inf], np.nan).dropna() qtyTheoryDataL = qtyTheoryData.loc[indexL] qtyTheoryDataM = qtyTheoryData.loc[indexM] qtyTheoryDataH = qtyTheoryData.loc[indexH] qtyTheoryDataOecd = qtyTheoryData.loc[indexOecd] # 4.6 Export dataframes to csv qtyTheoryData.to_csv('qtyTheoryOpenData.csv', index=True, index_label='country') qtyTheoryDataL.to_csv('qtyTheoryOpenDataL.csv', index=True, index_label='country') qtyTheoryDataM.to_csv('qtyTheoryOpenDataM.csv', index=True, index_label='country') qtyTheoryDataH.to_csv('qtyTheoryOpenDataH.csv', index=True, index_label='country') qtyTheoryDataOecd.to_csv('qtyTheoryOpenDataOecd.csv', index=True, index_label='country') # In[ ]: # 5. Export notebook to python script runProcs.exportNb('quantityTheoryData')
alpha=0.25, facecolor='green', interpolate=True) ax.fill_between(annual_data_frame.index, actualInflation, expectedInflation, where=expectedInflation > actualInflation, alpha=0.25, facecolor='red', interpolate=True) ax.set_ylabel('%') ax.xaxis.set_major_locator(years5) ax.legend(['actual inflation (year ahead)', 'expected inflation (year ahead)'], bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=3, mode="expand", borderaxespad=0., prop={ 'weight': 'normal', 'size': '15' }) plt.grid() fig.autofmt_xdate() plt.savefig('../img/fig_US_Inflation_Forecast_site.png', bbox_inches='tight') # In[17]: progName = 'realRateData' runProcs.exportNb(progName)
growth = 100*((df.iloc[-1]/df.iloc[0])**(1/(len(df.index)-1))-1) # Construct plot fig = plt.figure(figsize=(10, 6)) ax = fig.add_subplot(1,1,1) colors = ['red','blue','magenta','green'] plt.scatter(income60,growth,s=0.0001) for i, txt in enumerate(df.columns): ax.annotate(txt[-3:], (income60[i],growth[i]),fontsize=10,color = colors[np.mod(i,4)]) ax.grid() ax.set_xlabel('GDP per capita in 1960\n (thousands of 2011 $ PPP)') ax.set_ylabel('Real GDP per capita growth\nfrom 1970 to '+str(df.index[0].year)+ ' (%)') xlim = ax.get_xlim() ax.set_xlim([0,xlim[1]]) fig.tight_layout() # Save image plt.savefig('../png/fig_GDP_GDP_Growth_site.png',bbox_inches='tight') # In[ ]: # Export notebook to python script runProcs.exportNb('cross_country_income_data')
# z.extractall() # 1.3 Remove the zip file os.remove('FRB_Z1.zip') # In[16]: # 2. Import the xml data and create a legend # 2.2 parse tree = etree.parse("Z1_data.xml") root = tree.getroot() # 2.2 create a legend in csv format # legend= createLegend(root) # In[17]: # 3. Sample plot: US T-bill volume tBills = getSeries('FL313161113.A') tBills.plot(x_compat=True) # In[18]: # 4. export the notebook runProcs.exportNb('z1data')
# In[9]: df = pd.concat([dataY, df]).sort_index() # In[17]: df.loc['1880'].sort_values() # In[10]: # 3. Export data to csv series = dataY.sort_index() series = df.sort_index() dropCols = [ u'AK', u'HI', u'New England', u'Mideast', u'Great Lakes', u'Plains', u'Southeast', u'Southwest', u'Rocky Mountain', u'Far West' ] for c in dropCols: series = series.drop([c], axis=1) series.to_csv('stateIncomeData.csv', na_rep='NaN') # In[11]: len(dataY.columns) # In[12]: # 4. Export notebook to .py runProcs.exportNb('stateIncomeData')
nf.write('\\hspace*{-.5cm}\\includegraphics[height = 7.cm]{./png/fig_inflation_interest_differentials_open.png}\n') nf.write('\\end{figure}') nf.close() # 9.5 Money growth and exchange rate depreciation nf = open('../tex/figure_money_differential_depreciation_open.tex', 'w') nf.write('\\begin{figure}[h]\n') nf.write('\\caption{\\label{fig:money_differential_depreciation_open} \\textbf{Money growth and depreciation for '),nf.write(str(len(quantity_theory_data))),nf.write(' countries.} High-income countries: blue circles, medium-income: green squares, and low-income: red triangles. {\\tiny Source: Quandl, World Development Indicators, World Bank}}\n') nf.write('\\hspace*{-.5cm}\\includegraphics[height = 7.cm]{./png/fig_money_differential_depreciation_open.png}\n') nf.write('\\end{figure}') nf.close() # In[10]: # 10. Correlations print(quantity_theory_data[['money growth','inflation','gdp growth','nominal interest rate','exchange rate depreciation']].corr()) print(quantity_theory_data_H[['money growth','inflation','gdp growth','nominal interest rate','exchange rate depreciation']].corr()) print(quantity_theory_data_M[['money growth','inflation','gdp growth','nominal interest rate','exchange rate depreciation']].corr()) print(quantity_theory_data_L[['money growth','inflation','gdp growth','nominal interest rate','exchange rate depreciation']].corr()) print(quantity_theory_data_oecd[['money growth','inflation','gdp growth','nominal interest rate','exchange rate depreciation']].corr()) # In[11]: # 11. Export notebook to python script runProcs.exportNb('quantity_theory_figures')
r_pred_A = sigma * np.array(cons.data - np.mean(cons.data)) - 100 * np.log(beta) print(gc) # In[14]: r_pred_A # In[15]: # 6.3 Plot the predicted real interest rate fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(real_ex_ante_A, 'b-', lw=3) ax.plot(real_ex_ante_A.index, r_pred_A, 'r--', lw=3) ax.set_title('Annual ex ante real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual', 'predicted'], loc='upper right') # interest_A.recessions() plt.grid() # In[16]: np.corrcoef(cons.data, real_ex_ante_A) # In[17]: # 7. Export to notebook to .py runProcs.exportNb('inflation_forecasts')
# 6.2 Predicted real interest rate: sigma = 1 sigma = 1 beta = .98 gc = np.mean(cons.data) rPredA = sigma * np.array(cons.data - np.mean(cons.data)) - 100 * np.log(beta) print(gc) # In[14]: # 6.3 Plot the predicted real interest rate fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot_date(interestA.datenumbers, realExAnteA, 'b-', lw=3) ax.plot_date(interestA.datenumbers, rPredA, 'r--', lw=3) ax.set_title('Annual ex ante real interest rate') ax.set_xlabel('Date') ax.set_ylabel('%') ax.legend(['actual', 'predicted'], loc='upper right') # interestA.recessions() plt.grid() # In[15]: np.corrcoef(cons.data, realExAnteA) # In[16]: # 7. Export to notebook to .py runProcs.exportNb('consumptionEuler')