from pandas_datareader.data import DataReader import pandas as pd import matplotlib.pyplot as plt from datetime import date series_code = 'DEXJPUS' data_source = 'fred' startDate = date(1979, 1, 1) jpy = DataReader(series_code, data_source=data_source, start=startDate) jpy.index = pd.to_datetime(jpy.index) jpy['usdjpy'] = jpy['DEXJPUS'] del jpy['DEXJPUS'] jpy['returns'] = jpy['usdjpy'].pct_change() autocorrelation_daily = jpy['returns'].autocorr() print("The daily autocorrelation is: " + str(autocorrelation_daily)) jpyWeekly = jpy.resample('W').last() jpyWeekly['weekly_returns'] = jpyWeekly['usdjpy'].pct_change() atuocorrelation_weekly = jpyWeekly['weekly_returns'].autocorr() print("The weekly autocorrelation is: " + str(atuocorrelation_weekly)) jpyMonthly = jpy.resample('M').last() jpyMonthly['returns'] = jpyMonthly['usdjpy'].pct_change() autocorrelation_monthly = jpyMonthly['returns'].autocorr() print("The monthly autocorrelation is: " + str(autocorrelation_monthly))
self['transition'] = transition self['state_cov', 0, 0] = structural_params[self.idx_tech_var] if __name__ == '__main__': start = '1984-01' end = datetime.today().strftime('%Y-%m-%d') labor = DataReader('HOANBS', 'fred', start=start, end=end).resample('QS').first() cons = DataReader('PCECC96', 'fred', start=start, end=end).resample('QS').first() inv = DataReader('GPDIC1', 'fred', start=start, end=end).resample('QS').first() pop = DataReader('CNP16OV', 'fred', start=start, end=end) pop = pop.resample( 'QS').mean() # Convert pop from monthly to quarterly observations recessions = DataReader('USRECQ', 'fred', start=start, end=end) recessions = recessions.resample('QS').last()['USRECQ'].iloc[1:] # Get in per-capita terms N = labor['HOANBS'] * 6e4 / pop['CNP16OV'] C = (cons['PCECC96'] * 1e6 / pop['CNP16OV']) / 4 I = (inv['GPDIC1'] * 1e6 / pop['CNP16OV']) / 4 Y = C + I # Log, detrend y = np.log(Y).diff()[1:] c = np.log(C).diff()[1:] n = np.log(N).diff()[1:] i = np.log(I).diff()[1:] rbc_data = pd.concat((y, n, c), axis=1)
from pandas_datareader.data import DataReader import matplotlib.pyplot as plt import seaborn as sns sns.set() # %% amazon = DataReader('AMZN', 'stooq') amazon.to_csv('data.csv') # %% amazon['Close'].plot() amazon['Close'].plot(alpha=0.5) # %% amazon['Close'].plot(alpha=0.7) amazon.resample('BQ').mean()['Close'].\ plot(color='green', style='--', alpha=0.7) plt.legend(['price', 'quarter average']) # %% shifting fig, ax = plt.subplots(3, sharex=True) amazon['Close'].plot(ax=ax[0]) amazon['Close'].shift(365).plot(ax=ax[1]) amazon['Close'].shift(-365).plot(ax=ax[2]) ax[0].legend(['input']) ax[1].legend(['shift by 365']) ax[2].legend(['shift by -365']) # %% ROI
from pandas_datareader.data import DataReader import pandas as pd import matplotlib.pyplot as plt from datetime import date series_code = 'msft' data_source = 'yahoo' startDate = date(2012,1,1) endDate = date(2019,11,1) msft = DataReader(series_code,data_source,startDate,endDate) msft = msft.resample('W').last() returns = msft['Adj Close'].pct_change().dropna() autocorrelation = returns.autocorr() print("The autocorrelation of weekly returns is %4.2f" %(autocorrelation)) ########################################################################################### from statsmodels.graphics.tsaplots import plot_acf from math import sqrt #find number of observations nobs = len(returns) #compute approximate confidence interval conf = 1.96/sqrt(nobs) print("The approximate confidence interval is +/- %4.2f" %(conf)) #plot the autocorrelation function with 95% confidence interval and 20 lags using plot_acf plot_acf(returns,alpha=0.05,lags=20,title="MSFT Autocorrelation") plt.show() #compare to white noise import numpy as np whiteNoise = np.random.normal(loc=0.0025, scale= 0.05, size=1000) #print white noise mean/std wnMean = np.mean(whiteNoise)
from dateutil.relativedelta import relativedelta start_data = datetime.now() - relativedelta(years=66) today = datetime.now() from pandas_datareader.data import DataReader consumer_df = DataReader([ 'PCE', 'UMCSENT', 'UNRATE', 'LCEAMN01USM189S', 'TOTALSL', 'MRTSSM44X72USS', 'HOUST' ], 'fred', start_data, today) consumer_df = consumer_df.dropna() consumer_df.columns = [ 'PCE', 'ConConf', 'Unempl', 'HourlyEarning', 'CCredit', 'RetSales', 'HouseStarts' ] consumer_df = consumer_df.resample('1M').mean() type(consumer_df) # lag order selection lag_order = select_order(data=consumer_df, maxlags=10, deterministic="ci", seasons=12) print(lag_order.summary()) print(lag_order) # Cointegration rank rank_test = select_coint_rank(consumer_df, 0, 2, method="trace", signif=0.05) rank_test.rank print(rank_test.summary()) print(rank_test)
from pandas_datareader.data import DataReader import matplotlib.pyplot as plt import seaborn as sns sns.set() #%% Import data to dataframe raw_uber = DataReader('UBER', 'stooq') #%% Export imported data to csv raw_uber.to_csv('./source_data/Uber.csv') #%% Plot close Uber values raw_uber['Close'].plot() #%% Create new sample with monthly periods and calculated mean resampled_uber = raw_uber.resample('BM').mean() #%% Plot resampled and raw close data on one plot raw_uber['Close'].plot() resampled_uber['Close'].plot(style='--', color='green') #%% Plot raw close values with shift on one plot fig, ax = plt.subplots(3, sharex=True) raw_uber['Close'].plot(ax=ax[0]) raw_uber['Close'].shift(100).plot(ax=ax[1]) raw_uber['Close'].shift(-100).plot(ax=ax[2]) ax[0].legend(['Input']) ax[1].legend(['Shift by 100 days']) ax[2].legend(['Shift by -100 days'])