def get_all(list_for_fred): fred = Fred() merge_df = pd.DataFrame() # Create df to hold all the fred variables counter = 0 # For tracking whether to merge the DF or just overwrite the DF (First pull) for i in list_for_fred: data = fred.get_series_all_releases(i) data = data.reset_index() data.columns = ['date_for_merge', i] if counter == 0: merge_df = data # If first pull, no variable to merge on, so just overwrite else: merge_df = pd.merge( merge_df, data, on='date_for_merge') # If >1st pull, just merge on date counter += 1 time.sleep(3) # Slight delay to not over-query
def economic_indicator(df,indicators=['GDP', 'PAYEMS', 'DTB1YR', 'DTB6', 'DTB3', 'DGS3MO', 'DGS1MO', 'CPIAUCSL', 'BOGMBASE', 'TEDRATE']): fred = Fred(api_key='cfcd953abacca55f74c93ca364dfce2d') df_eco = df.copy() for i in indicators: # Scrap indicator = fred.get_series_all_releases(i) indicator.dropna(inplace=True) indicator.drop_duplicates(subset=['date'], keep='first', inplace=True) indicator.drop_duplicates(subset=['realtime_start'], keep='last', inplace=True) indicator.set_index('realtime_start', inplace=True) indicator.drop(['date'],axis=1, inplace=True) # Merge df_eco = pd.merge(df_eco, indicator, how='left', left_index=True, right_index=True) df_eco.rename(columns={'value':i}, inplace=True) df_eco.fillna(method = 'ffill', inplace=True) df_eco.fillna(method = 'bfill', inplace=True) return df_eco
'DTWEXBGS', 'DTWEXEMEGS', 'VIXCLS', 'T5YIE', 'T5YIFR', 'MORTGAGE30US', 'MORTGAGE15US' ] dates_list = pd.date_range(start='2000-01-01', periods=244, freq='MS') # open connection os.chdir('../data') macro_con = sqlite3.connect('macro.db') for tk in macro_dict.keys(): if tk in ['US530']: continue if tk in fin_tickers: tmp = fred.get_series(tk) tmp.to_sql(tk, macro_con, if_exists='replace') else: tmp = fred.get_series_all_releases(tk) tmp.to_sql(tk, macro_con, if_exists='replace') # close connection macro_con.close() # create a US yieldcurve using US 30-year yield and US 5-year yield dgs30 = get_table('DGS30', 'macro') dgs30.columns = ['date', 'US30'] os.chdir('../data') dgs30['date'] = pd.to_datetime(dgs30['date']) dgs30.set_index('date', inplace=True) dgs30['US30'].fillna(method='ffill') dgs5 = get_table('DGS5', 'macro') os.chdir('../data') dgs5.columns = ['date', 'US5']
def getFRED(self): fred = Fred(api_key=self.fred_api_key) data = fred.get_series_all_releases("GDP") print(data)
stateName = pd.DataFrame({'State Name': stateName}) fred_list = pd.concat([fred_list, stateName], axis=1) ###### # not all states have min wage data from the fred. in those cases #you will need to skip them and then later use the federal minimum wage #in their place #put a continue thing in this for loop #make dataframe from the fred and combine them minWageData = pd.DataFrame() for indx, name in enumerate( fred_list['State Abbreviation']): # ie name = STTMINWGAK try: data = fred.get_series_all_releases(name) #download the data #change the date to year format and remove the old columns data['date'] = pd.to_datetime(data['date']) data['Year'] = data['date'].dt.year data.drop(columns=['realtime_start'], inplace=True, axis=1) data.drop(columns=['date'], inplace=True, axis=1) #label the columns with min wage and year data.columns = ['Minimum Wage', 'Year'] #label each datafram by its state name state_name = fred_list['State Name'][indx] data["State"] = data.shape[0] * [state_name] # add each new dataframe to the right minWageData = minWageData.append(data) except Exception: #if an error arrises from a file not available in the fred #use the federal min wage instead