示例#1
0
 def refreshBDMPrice(self, event):
     out = blpapiwrapper.simpleReferenceDataRequest(self.dic,
                                                    'PX_MID')['PX_MID']
     self.bdm.lock.acquire()
     self.bdm.df['BGN_MID'] = out.astype(float)
     self.bdm.lock.release()
     pub.sendMessage('BGN_PRICE_UPDATE', message=MessageContainer('empty'))
示例#2
0
def refresh_bond_universe():
    """Function to refresh bond universe. Function is called by:
    FlowTradingGUI > MainForm.onUpdateBondUniverse()
    """
    bonds = pandas.ExcelFile(DEFPATH + "bonduniverse.xls").parse("list", index_col=0, has_index_names=True)
    targetBonds = (
        bonds.loc[pandas.isnull(bonds["SECURITY_NAME"]), "REGS"] + " Corp"
    ).to_dict()  # this works better than coupon
    targetFields = list(set(bonds.columns) - set(["REGS", "144A", "TRADITION", "BGC", "GARBAN", "TULLETT", "GFI"]))
    bonds.loc[targetBonds.keys(), targetFields] = blpapiwrapper.simpleReferenceDataRequest(targetBonds, targetFields)
    print bonds.loc[targetBonds.keys(), targetFields]
    bonds.to_excel(DEFPATH + "bonduniverse.xls", "list")
    print "The file bonduniverse.xls has been updated."
示例#3
0
 def updateNewTradesByISIN(self):
     #THERE SHOULD NOT BE MORE THAN ONE RECORD PER BOOK AND ISIN - THE KEY IS BOOK-ISIN
     self.th.positionsByISINBook = self.th.positionsByISINBook[self.th.positionsByISINBook['SOD_Pos']!=0].copy()
     self.new_trades = self.th.df[self.th.df['Date']==todayDateSTR].copy()
     self.new_trades['TradePnL'] = 0.0
     if self.bdmReady:
         self.new_trades = self.new_trades.join(self.bdm.df['MID'], on='Bond')
         riskFreeIsins = []
         for issuer in self.riskFreeIssuers:
             riskFreeIsins = riskFreeIsins + list(self.new_trades.loc[self.new_trades['Issuer']==issuer,'ISIN'])
         if len(riskFreeIsins)>0:
             riskFreePrices = blpapiwrapper.simpleReferenceDataRequest(dict(zip(riskFreeIsins, map(lambda x:x + '@BGN Corp', riskFreeIsins))), 'PX_MID')
             for (i,row) in riskFreePrices.iterrows():
                 self.new_trades.loc[self.new_trades['ISIN']==i,'MID'] = float(row['PX_MID'])#this works because bond name == isin for UST and bunds but it's not very clean
     self.positionDeltas = self.new_trades.groupby(['Book','ISIN'])[['Qty','MK']]
     reclist = []
     nkeylist = []
     for (k,grp) in self.positionDeltas:
         key = k[0] + '-' + k[1]
         if key in self.th.positionsByISINBook.index:
             self.th.positionsByISINBook.at[key,'Qty'] = self.th.positionsByISINBook.at[key,'SOD_Pos'] + grp['Qty'].sum()
             self.th.positionsByISINBook.at[key,'MK'] = grp['MK'].sum()
         else:
             lr = self.new_trades.loc[self.new_trades['ISIN']==k[1]].iloc[-1]#take the last trade -> ONLY FOR STATIC DATA
             bond = lr['Bond']
             pf = self.th.positions.at[bond,'PRINCIPAL_FACTOR']
             r = self.th.positions.at[bond,'RISK_MID']
             lc = self.cntrymap.at[lr['Country'],'LongCountry']
             series = 'REGS' if k[1]==bonds.loc[bond,'REGS'] else '144A'
             rec = [bond,k[0],lr['CCY'],k[1],lr['Issuer'], lr['Country'],lc,0,series, grp['Qty'].sum(), grp['MK'].sum(), lr['Price'], pandas.np.nan, pf, r]
             reclist.append(rec)
             nkeylist.append(key)
     if reclist != []:
         reclistdf = pandas.DataFrame(data=reclist, columns=['Bond','Book','CCY','ISIN','Issuer','Country','LongCountry','SOD_Pos', 'Series','Qty', 'MK', 'EODPrice', 'PriceY', 'PRINCIPAL_FACTOR','RISK_MID'], index=nkeylist)
         self.th.positionsByISINBook = self.th.positionsByISINBook.append(reclistdf, verify_integrity=True)
     for (k,grp) in self.positionDeltas:#calculate trade pnl
         key = k[0] + '-' + k[1]
         idx = (self.new_trades['ISIN']==k[1]) & (self.new_trades['Book']==k[0])
         self.new_trades.loc[idx,'TradePnL'] = self.new_trades.loc[idx,'Qty']*(self.new_trades.loc[idx,'MID']-self.new_trades.loc[idx,'Price'])/100.
         try:
             self.th.positionsByISINBook.at[key,'TradePnL'] = self.th.positionsByISINBook.at[key,'PRINCIPAL_FACTOR'] * self.new_trades.loc[idx,'TradePnL'].sum()
         except:
             print 'error finding a price for ' + key
     pass
示例#4
0
 def treeRebuild(self):
     self.traded_bonds = self.th.df[self.th.df['Date']==todayDateSTR]['Bond'].drop_duplicates().dropna().copy()
     new_bonds = list(set(self.traded_bonds) - set(self.displayPositions.index))
     self.th.positions['EODPrice'] = self.EODPrices
     self.th.positions['EODPrice'].fillna(0.0,inplace=True)
     for bond in new_bonds:
         self.th.positions.loc[bond,'EODPrice'] = self.th.df[self.th.df['Bond']==bond].iloc[-1]['Price'] # we take the last traded price
     self.EODPrices = self.th.positions['EODPrice'].copy()
     if self.bdmReady:
         self.th.positions['PRINCIPAL_FACTOR'] = self.bdm.df['PRINCIPAL_FACTOR']
         self.th.positions['RISK_MID'] = self.bdm.df['RISK_MID']
         #Following 2 lines get rid of some runtime warning - possibly a bug - http://stackoverflow.com/questions/30519487/pandas-error-invalid-value-encountered
         self.th.positions['PRINCIPAL_FACTOR'].fillna(1.0,inplace=True) # in doubt principal is 1
         self.th.positions['RISK_MID'].fillna(0.0,inplace=True) # in doubt risk is 0
     else:
         self.th.positions['PRINCIPAL_FACTOR'] = 1.0
         self.th.positions['RISK_MID'] = 0.0
         self.th.positionsByISINBook['PRINCIPAL_FACTOR'] = 1.0
         self.th.positionsByISINBook['RISK_MID'] = 0.0
     ###
     if len(new_bonds) > 0:
         self.th.positions.loc[new_bonds,'PRINCIPAL_FACTOR'] = self.bdm.df['PRINCIPAL_FACTOR']
         self.th.positions.loc[new_bonds,'RISK_MID'] = self.bdm.df['RISK_MID']
         spbonds = list(set(SPECIALBONDS) & set(new_bonds))
         if len(spbonds)>0:
             dc = dict(zip(spbonds,map(lambda x:bonds.loc[x,'REGS']+ ' Corp',spbonds)))
             output = blpapiwrapper.simpleReferenceDataRequest(dc,['WORKOUT_OAS_MID_MOD_DUR'])
             self.th.positions.loc[spbonds,'RISK_MID'] = output['WORKOUT_OAS_MID_MOD_DUR'].astype(float)
     self.th.positions['USDQty'] = self.th.positions.apply(lambda row:row['Qty']/ccy.loc[row['CCY'],'2016'],axis=1)
     self.th.positions['EODValue'] = self.th.positions['EODPrice']*self.th.positions['USDQty']/100.*(self.th.positions['PRINCIPAL_FACTOR'])
     self.th.positions['Risk'] = -self.th.positions['USDQty']*self.th.positions['RISK_MID']/10000
     for issuer in self.riskFreeIssuers:
         self.th.positions.loc[self.th.positions['Issuer']==issuer,'Risk'] = 0.0 # UST HAVE NO CREDIT RISK
     self.displayPositions = self.th.positions.loc[list(self.displayPositions.index)+new_bonds]#SOD risk + new trades
     self.displayPositions = self.displayPositions.join(self.cntrymap['LongCountry'],on='Country')
     self.displayGroup = self.displayPositions.groupby(['Region','LongCountry','Issuer','Bond']).sum()
     self.th.positionsByISINBook['PriceT'] = self.th.positionsByISINBook['PriceT'].astype(float)#NEEDED OTHERWISE DEFAULTS TO int64
     for (i,row) in self.th.positionsByISINBook.iterrows():
         try:
             self.th.positionsByISINBook.at[i,'PriceT'] = self.bdm.df.at[row['Bond'],'MID']
         except:
             self.th.positionsByISINBook.at[i,'PriceT'] = pandas.np.nan # for UST and unrecognized bonds
     riskFreeIsins = []
     for issuer in self.riskFreeIssuers:
         riskFreeIsins = riskFreeIsins + list(self.th.positionsByISINBook.loc[self.th.positionsByISINBook['Issuer']==issuer,'ISIN'])
     if len(riskFreeIsins) > 0:
         riskFreePrices = blpapiwrapper.simpleReferenceDataRequest(dict(zip(riskFreeIsins, map(lambda x:x + '@BGN Corp', riskFreeIsins))), 'PX_MID')
         for (i,row) in riskFreePrices.iterrows():
             self.th.positionsByISINBook.loc[self.th.positionsByISINBook['ISIN']==i,'PriceT'] = float(row['PX_MID'])
     self.th.positionsByISINBook.drop(['PRINCIPAL_FACTOR','RISK_MID','EODPrice'], axis=1, inplace=True)
     self.th.positionsByISINBook = self.th.positionsByISINBook.join(self.EODPrices, on='Bond')
     self.th.positionsByISINBook = self.th.positionsByISINBook.join(self.bdm.df['PRINCIPAL_FACTOR'], on='Bond')
     self.th.positionsByISINBook = self.th.positionsByISINBook.join(self.bdm.df['RISK_MID'], on='Bond')
     for issuer in self.riskFreeIssuers:
         self.th.positionsByISINBook.loc[self.th.positionsByISINBook['Issuer']==issuer,'RISK_MID'] = 0.0 # UST HAVE NO CREDIT RISK
         self.th.positionsByISINBook.loc[self.th.positionsByISINBook['Issuer']==issuer,'PRINCIPAL_FACTOR'] = 1.0        
     self.th.positionsByISINBook[['PRINCIPAL_FACTOR','RISK_MID']] = self.th.positionsByISINBook[['PRINCIPAL_FACTOR','RISK_MID']].astype(float)
     self.th.positionsByISINBook['SODPnL'] = self.th.positionsByISINBook['SOD_Pos'] *  self.th.positionsByISINBook['PRINCIPAL_FACTOR'] * (self.th.positionsByISINBook['PriceT'] - self.th.positionsByISINBook['PriceY'])/100.
     self.updateNewTradesByISIN() # at that point prices and principal factors are ready already if self.bdmReady
     self.th.positionsByISINBook['SODPnL'].fillna(0.0, inplace = True) 
     self.th.positionsByISINBook['TradePnL'].fillna(0.0, inplace = True)
     self.th.positionsByISINBook['MK'].fillna(0.0, inplace = True)
     self.th.positionsByISINBook['TotalPnL'] = self.th.positionsByISINBook['SODPnL']/self.th.positionsByISINBook.apply(lambda row:ccy.loc[row['CCY'],'2016'],axis=1) + self.th.positionsByISINBook['TradePnL']/self.th.positionsByISINBook.apply(lambda row:ccy.loc[row['CCY'],'2016'],axis=1)
     self.th.positionsByISINBook['USDQty'] = self.th.positionsByISINBook.apply(lambda row:row['Qty']/ccy.loc[row['CCY'],'2016'],axis=1)
     self.th.positionsByISINBook['EODValue'] = self.th.positionsByISINBook['EODPrice']*self.th.positionsByISINBook['USDQty']/100.*(self.th.positionsByISINBook['PRINCIPAL_FACTOR'])
     self.th.positionsByISINBook['Risk'] = -self.th.positionsByISINBook['USDQty']*self.th.positionsByISINBook['RISK_MID']/10000
     self.displayPositionsBook = self.th.positionsByISINBook
     self.displayGroupBook = self.th.positionsByISINBook.groupby(['Book','LongCountry','Issuer','Bond','Series']).sum()
     pub.sendMessage('REDRAW_RISK_TREE', message=MessageContainer('empty'))
示例#5
0
    def fillHistoricalPricesAndRating(self):
        """Fill historical prices and ratings. Function is called when the pricer menu first launches. 
        """
        time_start = time.time()
        self.buildPriceHistory()
        savepath = TEMPPATH + 'bondhistoryrating.csv'
        #If bondhistoryratingUAT.csv doesn't exist, download data and write file.
        cols = [
            'SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M',
            'ACCRUED', 'D2CPN', 'SAVG', 'ISP1D', 'ISP1W', 'ISP1M', 'RISK_MID',
            'PRINCIPAL_FACTOR', 'SIZE'
        ]
        if not (os.path.exists(savepath)) or datetime.datetime.fromtimestamp(
                os.path.getmtime(
                    savepath)).date() < datetime.datetime.today().date():
            isins = self.df['ISIN'] + BBGHand + ' Corp'
            isins = list(isins.astype(str))

            ##
            flds = [
                'RTG_SP', 'RTG_MOODY', 'RTG_FITCH', 'INT_ACC',
                'DAYS_TO_NEXT_COUPON', 'YRS_TO_SHORTEST_AVG_LIFE', 'RISK_MID',
                'PRINCIPAL_FACTOR', 'AMT_OUTSTANDING'
            ]
            out = blpapiwrapper.simpleReferenceDataRequest(
                pandas.Series((self.df['ISIN'] + ' Corp').values,
                              index=self.df.index).to_dict(), flds)[flds]
            #loop
            for f in flds:
                self.df[bbgToBdmDic[f]] = out[f]
            self.df['RISK_MID'].fillna(0, inplace=True)
            ##
            self.df.drop([
                'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ISP1D', 'ISP1W',
                'ISP1M'
            ],
                         axis=1,
                         inplace=True)
            dbPriceHistory = pandas.read_csv(PHPATH + 'dbPriceHistory.csv',
                                             index_col=0)
            dbYieldHistory = pandas.read_csv(PHPATH + 'dbYieldHistory.csv',
                                             index_col=0)
            dbSpreadHistory = pandas.read_csv(PHPATH + 'dbSpreadHistory.csv',
                                              index_col=0)
            hdt = []
            if self.dtYesterday.strftime('%Y%m%d') in dbPriceHistory.columns:
                hdt.append(self.dtYesterday.strftime('%Y%m%d'))
            else:
                self.df['P1D'] = pandas.np.nan
                self.df['Y1D'] = pandas.np.nan
                self.df['ISP1D'] = pandas.np.nan
            if self.dtLastWeek.strftime('%Y%m%d') in dbPriceHistory.columns:
                hdt.append(self.dtLastWeek.strftime('%Y%m%d'))
            else:
                self.df['P1W'] = pandas.np.nan
                self.df['Y1W'] = pandas.np.nan
                self.df['ISP1W'] = pandas.np.nan
            if self.dtLastMonth.strftime('%Y%m%d') in dbPriceHistory.columns:
                hdt.append(self.dtLastMonth.strftime('%Y%m%d'))
            else:
                self.df['P1M'] = pandas.np.nan
                self.df['Y1M'] = pandas.np.nan
                self.df['ISP1M'] = pandas.np.nan
            ohdt = [
                self.dtYesterday.strftime('%Y%m%d'),
                self.dtLastWeek.strftime('%Y%m%d'),
                self.dtLastMonth.strftime('%Y%m%d')
            ]
            self.df = self.df.join(dbPriceHistory[hdt], on='ISIN')
            self.df.rename(columns={
                ohdt[0]: 'P1D',
                ohdt[1]: 'P1W',
                ohdt[2]: 'P1M'
            },
                           inplace=True)
            self.df = self.df.join(dbYieldHistory[hdt], on='ISIN')
            self.df.rename(columns={
                ohdt[0]: 'Y1D',
                ohdt[1]: 'Y1W',
                ohdt[2]: 'Y1M'
            },
                           inplace=True)
            self.df = self.df.join(dbSpreadHistory[hdt], on='ISIN')
            self.df.rename(columns={
                ohdt[0]: 'ISP1D',
                ohdt[1]: 'ISP1W',
                ohdt[2]: 'ISP1M'
            },
                           inplace=True)

            self.df[cols].to_csv(savepath)
            self.df['ACCRUED'] = self.df['ACCRUED'].apply(
                lambda x: '{:,.2f}'.format(float(x)))
            self.df['D2CPN'].fillna(-1, inplace=True)
            self.df['D2CPN'] = self.df['D2CPN'].astype(int)
            self.df[['RISK_MID', 'PRINCIPAL_FACTOR', 'SIZE'
                     ]] = self.df[['RISK_MID', 'PRINCIPAL_FACTOR',
                                   'SIZE']].astype(float)
            self.df[['SNP', 'MDY',
                     'FTC']] = self.df[['SNP', 'MDY', 'FTC'
                                        ]].fillna('NA')  # ,'ACCRUED','D2CPN'
            self.df[['SNP', 'MDY', 'FTC',
                     'ACCRUED']] = self.df[['SNP', 'MDY', 'FTC',
                                            'ACCRUED']].astype(str)

        #Otherwise, load and read from file.
        else:
            print 'Found existing file from today'
            df = pandas.read_csv(savepath, index_col=0)
            self.df[cols] = df[cols]
            self.df[[
                'RISK_MID', 'PRINCIPAL_FACTOR', 'SIZE', 'SAVG', 'ISP1D',
                'ISP1W', 'ISP1M'
            ]] = self.df[[
                'RISK_MID', 'PRINCIPAL_FACTOR', 'SIZE', 'SAVG', 'ISP1D',
                'ISP1W', 'ISP1M'
            ]].astype(float)
            self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY',
                                                      'FTC']].astype(str)
            self.df['ACCRUED'].fillna(
                -1, inplace=True
            )  #HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!
            self.df['ACCRUED'] = self.df['ACCRUED'].astype(float)
            self.df['ACCRUED'] = self.df['ACCRUED'].apply(
                lambda x: '{:,.2f}'.format(float(x)))
            self.df['D2CPN'].fillna(
                -1, inplace=True
            )  #HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!
            self.df['D2CPN'] = self.df['D2CPN'].astype(int)

        print 'History fetched in: ' + str(
            int(time.time() - time_start)) + ' seconds.'
示例#6
0
    def fillHistoricalPricesAndRating(self):
        """Fill historical prices and ratings. Function is called when the pricer menu first launches. 
        """
        time_start = time.time()
        savepath = TEMPPATH+'bondhistoryrating.csv'
        #If bondhistoryratingUAT.csv doesn't exist, download data and write file.
        if not (os.path.exists(savepath)) or datetime.datetime.fromtimestamp(
                os.path.getmtime(savepath)).date() < datetime.datetime.today().date():
            isins = self.df['ISIN'] + BBGHand + ' Corp'
            isins = list(isins.astype(str))

            #rtgaccBLP = blpapiwrapper.BLPTS(isins,
            #                                ['RTG_SP', 'RTG_MOODY', 'RTG_FITCH', 'INT_ACC', 'DAYS_TO_NEXT_COUPON','YRS_TO_SHORTEST_AVG_LIFE','RISK_MID','PRINCIPAL_FACTOR','AMT_OUTSTANDING'])
            #rtgaccStream = StreamWatcher(self,'RTGACC')
            #rtgaccBLP.register(rtgaccStream)
            #rtgaccBLP.get()
            #rtgaccBLP.closeSession()

            ##
            flds = ['RTG_SP', 'RTG_MOODY', 'RTG_FITCH', 'INT_ACC', 'DAYS_TO_NEXT_COUPON','YRS_TO_SHORTEST_AVG_LIFE','RISK_MID','PRINCIPAL_FACTOR','AMT_OUTSTANDING']
            out = blpapiwrapper.simpleReferenceDataRequest(pandas.Series((self.df['ISIN'] + ' Corp').values, index=self.df.index).to_dict(),flds)[flds]
            #loop
            for f in flds:
                self.df[bbgToBdmDic[f]] = out[f]
            self.df['RISK_MID'].fillna(0, inplace=True)
            ##


            priceHistory = blpapiwrapper.BLPTS(isins, ['PX_LAST', 'YLD_YTM_MID'], startDate=self.dtLastMonth,endDate=self.dtToday)
            priceHistoryStream = StreamWatcherHistory(self)
            priceHistory.register(priceHistoryStream)
            priceHistory.get()
            priceHistory.closeSession()

            #Based on today's shortest to average life, calculate the SAVG for yesterday, last week, and last month
            self.df['SAVG'] = self.df['SAVG'].astype(float)
            self.df['SAVG1D'] = self.df['SAVG']+(self.dtToday - self.dtYesterday).days/365.0
            self.df['SAVG1W'] = self.df['SAVG']+(self.dtToday - self.dtLastWeek).days/365.0
            self.df['SAVG1M'] = self.df['SAVG']+(self.dtToday - self.dtLastMonth).days/365.0

            #Create DataFrames for Swap Rates of different currencies
            US1D = SwapHistory('USD',self.dtYesterday)
            US1W = SwapHistory('USD',self.dtLastWeek)
            US1M = SwapHistory('USD',self.dtLastMonth)
            CHF1D = SwapHistory('CHF',self.dtYesterday)
            CHF1W = SwapHistory('CHF',self.dtLastWeek)
            CHF1M = SwapHistory('CHF',self.dtLastMonth)
            EUR1D = SwapHistory('EUR',self.dtYesterday)
            EUR1W = SwapHistory('EUR',self.dtLastWeek)
            EUR1M = SwapHistory('EUR',self.dtLastMonth)
            CNY1D = SwapHistory('CNY',self.dtYesterday)
            CNY1W = SwapHistory('CNY',self.dtLastWeek)
            CNY1M = SwapHistory('CNY',self.dtLastMonth)         
            #Compute Historical Risk Free Rate for each bonds.
            currencyList1D = {'USD':US1D,'CHF':CHF1D,'EUR':EUR1D,'CNY':CNY1D}
            self.populateRiskFreeRates(currencyList1D,'INTSWAP1D','SAVG1D')
            currencyList1W = {'USD':US1W,'CHF':CHF1W,'EUR':EUR1W,'CNY':CNY1W}
            self.populateRiskFreeRates(currencyList1W,'INTSWAP1W','SAVG1W')           
            currencyList1M = {'USD':US1M,'CHF':CHF1M,'EUR':EUR1M,'CNY':CNY1M}
            self.populateRiskFreeRates(currencyList1M,'INTSWAP1M','SAVG1M')
            # get ISpread over past dates
            self.df['ISP1D'] = (self.df['Y1D']-self.df['INTSWAP1D'])*100
            self.df['ISP1W'] = (self.df['Y1W']-self.df['INTSWAP1W'])*100
            self.df['ISP1M'] = (self.df['Y1M']-self.df['INTSWAP1M'])*100

            self.df[['SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ACCRUED', 'D2CPN','SAVG','ISP1D','ISP1W','ISP1M','RISK_MID','PRINCIPAL_FACTOR','SIZE']].to_csv(savepath)
            self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x)))
            self.df['D2CPN'].fillna(-1, inplace=True)
            self.df['D2CPN'] = self.df['D2CPN'].astype(int)
            self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']] = self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']].astype(float)
            self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].fillna('NA')  # ,'ACCRUED','D2CPN'
            self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']] = self.df[['SNP', 'MDY', 'FTC', 'ACCRUED']].astype(str)

        #Otherwise, load and read from file.
        else:
            print 'Found existing file from today'
            df = pandas.read_csv(savepath, index_col=0)
            self.df[['SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ACCRUED', 'D2CPN','SAVG','ISP1D','ISP1W','ISP1M']] = df[['SNP', 'MDY', 'FTC', 'P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M', 'ACCRUED', 'D2CPN','SAVG','ISP1D','ISP1W','ISP1M']]
            self.df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']] = df[['RISK_MID','PRINCIPAL_FACTOR','SIZE']].astype(float)
            self.df[['SNP', 'MDY', 'FTC']] = self.df[['SNP', 'MDY', 'FTC']].astype(str)
            self.df['ACCRUED'].fillna(-1,inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!
            self.df['ACCRUED'] = self.df['ACCRUED'].astype(float)
            self.df['ACCRUED'] = self.df['ACCRUED'].apply(lambda x: '{:,.2f}'.format(float(x)))
            self.df['D2CPN'].fillna(-1, inplace=True)#HACK SO NEXT LINE DOESN'T BLOW UP - WE DON'T WANT TO PUT 0 THERE!
            self.df['D2CPN'] = self.df['D2CPN'].astype(int)          
            self.df[['SAVG','ISP1D','ISP1W','ISP1M']] = self.df[['SAVG','ISP1D','ISP1W','ISP1M']].astype(float)

        #pxhist = self.df[['P1D', 'P1W', 'P1M', 'Y1D', 'Y1W', 'Y1M']]
        #print pxhist[pxhist.isnull().any(axis=1)]
        print 'History fetched in: ' + str(int(time.time() - time_start)) + ' seconds.'