Ejemplo n.º 1
0
 def get_riskfree_rate(self,startdate,enddate,freq="M",maturity='1M'):
     """
     Rates from FRED
     http://research.stlouisfed.org/fred2/categories/116
     """
     rfcache = self.__class__._cacherfrate
     grabdata = False
     if rfcache == None:
         grabdata = True
     elif rfcache[0]< startdate or rfcache[1] > enddate:
         grabdata = True
          
     if grabdata:
         dt          = DataReader('DTB4WK',"fred", startdate,enddate)
         dt.columns  = ['RFRate']
         dt.fillna(method='backfill',inplace=True)
         rfcache     = (startdate,enddate,dt)
         self.__class__._cacherfrate= rfcache
     else:
         dt          = rfcache[2]
     
     dsm     = dt[startdate:enddate].resample('M')
     return dsm
     
     
     
     
     
Ejemplo n.º 2
0
 def getHistoricalQuotes(self, symbol, index, market=None):
     assert (isinstance(index, pd.Index))
     source = 'yahoo'
     try:
         quotes = DataReader(symbol, source, index[0], index[-1])
     except:
         log.error('** Could not get {} quotes'.format(symbol))
         return pd.DataFrame()
     if index.freq != pd.datetools.BDay() or index.freq != pd.datetools.Day():
         #NOTE reIndexDF has a column arg but here not provided
         quotes = utils.reIndexDF(quotes, delta=index.freq, reset_hour=False)
     if not quotes.index.tzinfo:
         quotes.index = quotes.index.tz_localize(self.tz)
     quotes.columns = utils.Fields.QUOTES
     return quotes
Ejemplo n.º 3
0
 def getHistoricalQuotes(self, symbol, index, market=None):
     assert (isinstance(index, pd.Index))
     source = 'yahoo'
     try:
         quotes = DataReader(symbol, source, index[0], index[-1])
     except:
         log.error('** Could not get {} quotes'.format(symbol))
         return pd.DataFrame()
     if index.freq != pd.datetools.BDay() or index.freq != pd.datetools.Day():
         #NOTE reIndexDF has a column arg but here not provided
         quotes = utils.reIndexDF(quotes, delta=index.freq, reset_hour=False)
     if not quotes.index.tzinfo:
         quotes.index = quotes.index.tz_localize(self.tz)
     quotes.columns = utils.Fields.QUOTES
     return quotes
Ejemplo n.º 4
0
def _ohlc(code, source='yahoo', start=None, end=None):
    df = DataReader(code, source, start=start, end=end)
    df.columns = [c.replace(' ', '_').lower() for c in df.columns]
    return df
# pivot
bda_negative = by_day_airline.pivot('tweet_date','airline','negative')

# plot
airlines = ['Delta','Southwest','US Airways','United','Virgin America']
bda_negative.plot(bda_negative.index,airlines,linestyle='--',figsize=(12,9))
plt.ylabel('% Negative')
plt.xlabel('Date')
plt.title('% of Tweets with Negative Sentiment, by Airline')
plt.savefig('airline_sentiment.pdf', bbox_inches='tight')

# grab stock data for all in list of airlines
start = date(2015,2,17)
end = date(2015,2,24)
tickers = ['DAL','LUV','AAL','UAL','VA']
prices = DataReader(tickers,'yahoo',start,end)['Adj Close']

# rename columns, so that they are airline names, not tickers
prices.columns = ['US Airways Price','Delta Price','Southwest Price','United Price','Virgin America Price']

# join stock data to twitter sentiment data
joined = pd.merge(bda_negative,prices,left_index=True,right_index=True)

# plot to see co-movements
united = ['United', 'United Price']
joined.plot(joined.index,united,linestyle='--',figsize=(12,9))

# use broken axis?
# with more days of data, create normalized variables, calculated as deviation from all airline average sentiment levels
Ejemplo n.º 6
0
# plt.show()
pic.new()
plt.savefig("C:/Users/oskar/Documents/doc_no_backup/python_crap/temp/%s.png" %(str(pic.num)))
rep.addimage("C:/Users/oskar/Documents/doc_no_backup/python_crap/temp/%s.png"%(str(pic.num)),7,4,'LEFT')
plt.close()
  
  
  
##unemployment model-----------------------------------------------------------------------------------------
UNEMP = DataReader("UNRATE",  "fred", start, end) #Unemplyment
N_UNEMP = DataReader("NROU",  "fred", start, end) #natural rate Unemplyment
# EQ = DataReader("SP500",  "fred", start, end) #SPX
UNEMP=pd.merge(UNEMP, N_UNEMP, how='outer', left_index=True, right_index=True)
UNEMP=pd.merge(UNEMP, EQ, how='inner', left_index=True, right_index=True)
UNEMP = UNEMP.replace('.',np.nan).fillna(method='ffill')
UNEMP.columns=['UNEMP','N_UNEMP','EQ']
UNEMP['MAS']=pd.rolling_mean(UNEMP['EQ'], 15)
UNEMP[['UNEMP','N_UNEMP','EQ']]=UNEMP[['UNEMP','N_UNEMP','EQ']].applymap(f)
UNEMP['Excess']=UNEMP['UNEMP']-UNEMP['N_UNEMP']
   
trade =list(np.zeros(15))
i=15
pos3=0
while i < len(UNEMP['EQ']):
    if UNEMP['EQ'].ix[i] >UNEMP['MAS'].ix[i] and UNEMP['Excess'].ix[i]>0.0:
        pos3=1
        trade.append(pos3)
    elif UNEMP['EQ'].ix[i] <UNEMP['MAS'].ix[i] and min(UNEMP['Excess'].ix[i-12:i])<0.0:
        pos3=0
        trade.append(pos3)
    else: