コード例 #1
0
def spreadToBenchmarkHistory(isinBench: str, isin_list: list):
    """
    define a benchmark bond and get spread to benchmark hiistory for bonds in hthe list.
    
        input:
            isinBench: the benchmark isin
            isin_list: the benchmark isins list
            
        output:
            df: dataframe containing benchmark histories
            
    """
    
    # remove benchmark from list
    if isinBench in isin_list:
        print('isin removed')
        isin_list.remove(isinBench)
    else:
        print('isin not in bench')
    

    # list of isins
    isin_list_corp = [i + ' Corp' for i in isin_list]
    print('here is the list: ', isin_list)
    

    # fields
    fieldList = 'blp_i_sprd_mid'

    # start and end dates to use
    dateToUseToday = datetime.date.today()
    dateToUse2yAgo = dateToUseToday + datetime.timedelta(days=-365*2)
    
    print('lets go between today and 2 years ago')
    print('today:', dateToUseToday)
    print('2y ago:', dateToUse2yAgo)



    # request from bbg returning a MultiIndex DataFrame object - for the isin list    
    bondDataHist = blp.bdh(
        tickers= isin_list_corp, flds=fieldList,
        start_date=dateToUse2yAgo, end_date=dateToUseToday,
        )

    # request from bbg returning a MultiIndex DataFrame object - for the isin benchmark  
    benchDataHist = blp.bdh(
        tickers= isinBench+ ' Corp', flds=fieldList,
        start_date=dateToUse2yAgo, end_date=dateToUseToday,
        )

    # MultiIndex dataframe for spreads
    spreadDataHist = bondDataHist.sub(benchDataHist[(isinBench+ ' Corp', 'blp_i_sprd_mid')], axis=0)
    
    # print(spreadDataHist)
    # rename columns
    spreadDataHist.columns = isin_list
    
    
    return spreadDataHist
コード例 #2
0
def get_data_bdh(ticker, fields, start_date, end_date, override, legend_labels, sort):
    """
    Downloads data using blp.bph
    :param ticker: str/list, ticker(s) name(s)
    :param fields: str/list, field(s) name(s)
    :param start_date: datetime
    :param end_date: datetime
    :param override: dictionary (override_value: override_value)
    :param legend_labels: list[string], your name for fields
    :param sort: string, sort value if needed
    :return: data to plot
    """
    data = pd.DataFrame()
    min_max_avg = [pd.DataFrame(), pd.DataFrame(), pd.DataFrame()]
    final = []
    for idx, field in enumerate(fields):
        try:
            temp = blp.bdh(tickers=ticker, flds=field, start_date=start_date, end_date=end_date, **override[idx])
        except Exception as e:
            temp = blp.bdh(tickers=ticker, flds=field, start_date=start_date, end_date=end_date)
        data[legend_labels[idx]] = temp.iloc[-1].unstack()
        min_max_avg[0][legend_labels[idx]] = temp[ticker].min().unstack()
        min_max_avg[1][legend_labels[idx]] = temp[ticker].max().unstack()
        min_max_avg[2][legend_labels[idx]] = temp[ticker].mean().unstack()

    if sort == 'a':
        data.sort_values(by=data.columns[0], axis=0, inplace=True, ascending=True)
    if sort == 'd':
        data.sort_values(by=data.columns[0], axis=0, inplace=True, ascending=False)

    # print(min_max_avg[0])
    for df in min_max_avg:
        final.append(df.reindex(data.index))
    return data, final
コード例 #3
0
ファイル: Database.py プロジェクト: KelianF/RiskMonitor
def Updatedb(Ticker,
             Start="20000101",
             End=pd.Timestamp.today().strftime("%Y%m%d")):
    #print(len(Ticker), type(Ticker), Ticker)
    if Ticker + ".csv" in os.listdir(
            r"\\10.155.31.53\Share\Kelian\DATA\COMMO"):  # Folder Location
        #print("Got From DataBase")
        Pointer = pd.read_csv(r"\\10.155.31.53\Share\Kelian\DATA\COMMO\\" +
                              Ticker + ".csv",
                              index_col=0)
        if str(Pointer.index.max()) < End:
            Pointer2 = blp.bdh(tickers=[Ticker],
                               start_date=str(Pointer.index.max()),
                               end_date=End)
            if len(Pointer2[Pointer2.index > str(Pointer.index.max())]) != 0:
                Pointer2 = Pointer2.iloc[1:, :]
                Pointer2.index = Pointer2.index.strftime("%Y%m%d")
                Pointer2.columns = [Ticker]
                Pointer = pd.concat((Pointer, Pointer2), axis=0)
                Pointer = Pointer[~Pointer.index.duplicated()]
            Pointer.to_csv(r"\\10.155.31.53\Share\Kelian\DATA\COMMO\\" +
                           Ticker + ".csv",
                           index=True)

        return Pointer
    else:  # Twice
        Pointer = blp.bdh(tickers=[Ticker], start_date=Start, end_date=End)
        Pointer.index = Pointer.index.strftime("%Y%m%d")
        Pointer.columns = [Ticker]
        Pointer.to_csv(r"\\10.155.31.53\Share\Kelian\DATA\COMMO\\" + Ticker +
                       ".csv",
                       index=True)
        return Pointer
コード例 #4
0
def getHistoryFromISIN_list(isin_list: list):
    ''' function to get historic data from a list of isins  '''

    # list of isins
    isin_list = [i + ' Corp' for i in isin_list]
    print('here is the list: ', isin_list)

    # fields
    fieldList = 'blp_i_sprd_mid'

    # start and end dates to use
    dateToUseToday = datetime.date.today()
    dateToUse2yAgo = dateToUseToday + datetime.timedelta(days=-365*2)
    
    print('lets go between today and 2 years ago')
    print('today:', dateToUseToday)
    print('2y ago:', dateToUse2yAgo)



    # request from bbg returning a MultiIndex DataFrame object    
    bondDataHist = blp.bdh(
        tickers= isin_list, flds=fieldList,
        start_date=dateToUse2yAgo, end_date=dateToUseToday,
        )
    

    return bondDataHist
コード例 #5
0
def examples():
    """
    examples of how to get basic data from bbg
    """

    # get some data for a single name
    x = blp.bdp('BDEV LN Equity', 'px_last')
    print(x)
    print('the type of x', type(x))
    print('the value of x:', x.iloc[0]['px_last'])

    # get multiple data for a single name
    y = blp.bdp('BDEV LN Equity', flds=['px_bid', 'px_ask'])
    print(y)

    # get multiple data for multiple names
    z = blp.bdp(tickers=['BDEV LN Equity', 'BARC LN Equity'],
                flds=['px_bid', 'px_ask'])
    print(z)
    print('here is the bdev ask >>>', z.loc['BDEV LN Equity', 'px_ask'])

    # get history data for a single name
    print('getting history...')
    todaydate = datetime.datetime.today()
    historicDate = todaydate - datetime.timedelta(3 * 365)
    print(todaydate, historicDate)
    x = blp.bdh('BDEV LN Equity',
                flds='px_last',
                start_date=historicDate,
                end_date='today')

    print(x.head())
    print(x.tail())
コード例 #6
0
def getHistoryFromISIN(isin: str, fieldList: list):
    '''
    function that takes an isin as a string and returns a DataFrame for the ISIN
    
    '''

    # get dates to use (in the correct format)
    dateToUseToday = datetime.date.today()
    dateToUse2yAgo = dateToUseToday + datetime.timedelta(days=-365*2)
    
    print('lets go between today and 2 years ago')
    print('today:', dateToUseToday)
    print('2y ago:', dateToUse2yAgo)

    # fields that we want to get
    fieldList = fieldList #['px_bid', 'px_ask', 'blp_i_sprd_mid']
    #fieldList = ['blp_i_sprd_mid']

    # request from bbg returning a MultiIndex DataFrame object    
    bondDataHist = blp.bdh(
        tickers= isin+' Corp', flds=fieldList,
        start_date=dateToUse2yAgo, end_date=dateToUseToday,
        )


    return bondDataHist
コード例 #7
0
ファイル: data_retrieval.py プロジェクト: jrollus/Regression
def get_relevant_data(tickers_list, date_start, date_end, data_source):
    """Get financial data from Bloomberg or Typhoon for a given set of underlying and type of data"""
    if data_source == 'Bloomberg':
        selected_data = blp.bdh(tickers=tickers_list, flds=['PX_LAST'],
                                start_date=date_start, end_date=date_end, adjust='all')

        if not selected_data.empty:
            selected_data.columns = selected_data.columns.droplevel(1)

    return selected_data
コード例 #8
0
def get_data_bdh(ticker, fields, start_date, end_date, override):
    """
    """
    data = blp.bdh(tickers=ticker,
                   flds=fields,
                   start_date=start_date,
                   end_date=end_date,
                   **override)
    data = data.T
    data = data.unstack()
    data = data.droplevel(0, axis=1)
    return data
コード例 #9
0
    def get_hack_data():

        df_IBOV = blp.bdh('BOVV11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')
        df_SP   = blp.bdh('SPXI11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')
        df_IMAB = blp.bdh('IMAB11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')
        df_IRFM = blp.bdh('IRFM11 BZ Equity', 'PX_LAST', '2017-12-29', '2021-04-30')

        df_IBOV.index = pd.to_datetime(df_IBOV.index)
        df_SP.index = pd.to_datetime(df_SP.index)
        df_IMAB.index = pd.to_datetime(df_IMAB.index)
        df_IRFM.index = pd.to_datetime(df_IRFM.index)

        df_IBOV = df_IBOV.pct_change().dropna()
        df_SP = df_SP.pct_change().dropna()
        df_IMAB = df_IMAB.pct_change().dropna()
        df_IRFM = df_IRFM.pct_change().dropna()

        df_IMAB_indice = blp.bdh('BZRFIMAB index', 'PX_LAST', '2017-12-30', '2019-05-20')
        df_IMAB_indice.index = pd.to_datetime(df_IMAB_indice.index)
        df_IMAB_indice = df_IMAB_indice.rename(columns ={'BZRFIMAB index' : 'IMAB11 BZ Equity'})
        df_IMAB_indice = df_IMAB_indice.pct_change().dropna()
        df_IMAB = pd.concat([df_IMAB_indice/1000, df_IMAB])

        df_IRFM_indice = blp.bdh('BZRFIRFM Index', 'PX_LAST', '2017-12-30', '2019-09-23')
        df_IRFM_indice.index = pd.to_datetime(df_IRFM_indice.index)
        df_IRFM_indice = df_IRFM_indice.rename(columns ={'BZRFIRFM Index' : 'IRFM11 BZ Equity'})
        df_IRFM_indice = df_IRFM_indice.pct_change().dropna()
        df_IRFM = pd.concat([df_IRFM_indice/1000, df_IRFM])

        df = pd.concat([df_IBOV, df_SP, df_IMAB, df_IRFM], axis = 1).dropna()

        return df
コード例 #10
0
ファイル: data_retrieval.py プロジェクト: jrollus/Correlation
def get_relevant_data(tickers_list, date_start, date_end, data_source):
    """Get financial data from Yahoo Finance for a given set of underlying and type of data"""
    if data_source == 'Yahoo':
        try:
            raw_data = web.DataReader(name=tickers_list, data_source='yahoo', start=date_start, end=date_end)
            selected_data = raw_data['Adj Close']
        except RemoteDataError:
            selected_data = pd.DataFrame(np.nan, index=[0], columns=tickers_list)

    elif data_source == 'Bloomberg':
        selected_data = blp.bdh(tickers=tickers_list, flds=['PX_LAST'],
                                start_date=date_start, end_date=date_end, adjust='all')

        if not selected_data.empty:
            selected_data.columns = selected_data.columns.droplevel(1)

    return selected_data
コード例 #11
0
 def bdh(cls,
         ticker,
         flds='PX_LAST',
         start_date='20200101',
         end_date=None,
         overrides=None):
     """
     获取多时间序列数据
     ~~~~~~~~~~~~~~~~
     d = BlpUtil.bdh('600570 CH Equity', flds='PX_LAST', start_date='2021-04-01')
     """
     params = {
         'tickers': ticker,
         'flds': flds,
         'start_date': start_date,
         'end_date': cls.get_date_str(end_date)
     }
     params = params if overrides is None else dict(
         list(params.items()) + list(overrides.items()))
     df = blp.bdh(**params)
     if df is not None and len(df) > 0:
         df = df[ticker]
         df = df.rename_axis('date').reset_index()
     return df
コード例 #12
0
def bdh(cusips, fecha):
    datos = blp.bdh(cusips, ['PX_DIRTY_MID', 'YLD_YTM_MID'], fecha, fecha)
    return datos
コード例 #13
0
    def run(self):

        def reconstruct_price_data(price_data):
            price_data = price_data.unstack().reset_index()
            price_data.columns = ["bbg_ticker", "item_name", "date", "item_value"]
            price_data["item_value"] = price_data["item_value"].astype("float")
            price_data["date"] = price_data["date"].astype("datetime64[ns]")
            return price_data

        def adjust_start_end_date_based_on_trade_data(this_trade_df, price_data):
            # halt flag dataframe
            active_df = price_data[price_data["item_name"] == "volume"].copy()
            active_df = active_df.dropna(subset=["item_value"])
            active_stock = list(active_df["bbg_ticker"].drop_duplicates())
            halt_list = [stock for stock in this_stock_list if stock not in active_stock]
            halt_df = pd.DataFrame(index=halt_list).reset_index().rename(columns={"index": "bbg_ticker"})
            halt_df["halt_flag"] = True
            logger.info("Got Halt Flag")

            # ipo or delist dataframe
            start_end_date_df = active_df.groupby(["bbg_ticker"])["date"].agg(["min", "max"])
            ipo_df = start_end_date_df[start_end_date_df["min"] != start_date].reset_index().rename(
                columns={"min": "ipo_date"}).drop(columns="max")
            delist_df = start_end_date_df[start_end_date_df["max"] != end_date].reset_index().rename(
                columns={"max": "delist_date"}).drop(columns="min")
            logger.info("Got IPO Date and Delist Date")

            # ipo return
            ipo_return_list = []
            if not ipo_df.empty:
                for ticker in list(ipo_df["bbg_ticker"].drop_duplicates()):
                    ipo_return = list(price_data[(price_data["item_name"] == "last_price") &
                                                 (price_data["bbg_ticker"] == ticker)].sort_values("date")[
                                          "item_value"].dropna())[:2]
                    ipo_return = (ipo_return[-1] / ipo_return[0] - 1) * 100
                    ipo_return_list.append(ipo_return)
            ipo_df["ipo_return"] = ipo_return_list
            logger.info("Got IPO Return")

            # get adjusted trade df
            if not halt_df.empty:
                this_trade_df = pd.merge(this_trade_df, halt_df, on=["bbg_ticker"], how="left")
                this_trade_df["halt_flag"] = this_trade_df["halt_flag"].fillna(False)
            else:
                this_trade_df["halt_flag"] = False

            if not ipo_df.empty:
                this_trade_df = pd.merge(this_trade_df, ipo_df, on=["bbg_ticker"], how="left")
            else:
                this_trade_df["ipo_date"] = pd.NaT
                this_trade_df["ipo_return"] = np.nan

            if not delist_df.empty:
                this_trade_df = pd.merge(this_trade_df, delist_df, on=["bbg_ticker"], how="left")
            else:
                this_trade_df["delist_date"] = pd.NaT

            this_trade_df["trade_start_date"] = [trade_start_date if pd.isnull(ipo_date) else ipo_date
                                                 for trade_start_date, ipo_date
                                                 in np.array(this_trade_df[["trade_start_date", "ipo_date"]])]
            this_trade_df["trade_end_date"] = [trade_end_date if pd.isnull(delist_date) else delist_date
                                               for trade_end_date, delist_date
                                               in np.array(this_trade_df[["trade_end_date", "delist_date"]])]
            return this_trade_df

        def get_beta(this_trade_df, price_data, funding_source):
            stock_beta_df = price_data[(price_data["item_name"] == "beta_adj_overridable") &
                                       (price_data["date"].isin(
                                           list(this_trade_df["trade_start_date"].drop_duplicates())))].copy()
            stock_beta_df = stock_beta_df[["bbg_ticker", "date", "item_value"]].rename(
                columns={"item_value": "stock_beta",
                         "date": "trade_start_date"})
            this_trade_df = pd.merge(this_trade_df, stock_beta_df, on=["bbg_ticker", "trade_start_date"], how="left")
            fund_beta_df = stock_beta_df[stock_beta_df["bbg_ticker"] == funding_source].rename(
                columns={"stock_beta": "fund_beta"})
            this_trade_df = pd.merge(this_trade_df, fund_beta_df.drop(columns=["bbg_ticker"]), on=["trade_start_date"],
                                     how="left")
            return this_trade_df

        def get_backtesting_returns(this_trade_df, price_data, funding_source, trade_start_date, trade_end_date):
            this_return_df = this_trade_df[(this_trade_df["trade_start_date"] == trade_start_date) &
                                           (this_trade_df["trade_end_date"] == trade_end_date) &
                                           (this_trade_df["halt_flag"] == False)].copy()
            if not this_return_df.empty:
                this_ticker_list = list(this_return_df["bbg_ticker"].drop_duplicates())
                this_price_data = price_data[(price_data["bbg_ticker"].isin(this_ticker_list + [funding_source])) &
                                             (price_data["date"] >= trade_start_date) &
                                             (price_data["date"] <= trade_end_date) &
                                             (price_data["item_name"] == "last_price")].copy()

                # calculate return [stock, funding, long_short]
                this_pivot_return_df = pd.pivot_table(this_price_data, index="date", columns="bbg_ticker",
                                                      values="item_value")
                this_pivot_return_df = this_pivot_return_df.pct_change()
                this_pivot_return_df = this_pivot_return_df.fillna(0)

                this_daily_stock_return_df = this_pivot_return_df.stack().reset_index().rename(columns={0: "daily_stock_return"})
                this_daily_fund_return_df = this_daily_stock_return_df[this_daily_stock_return_df["bbg_ticker"] == funding_source].rename(
                    columns={"daily_stock_return": "daily_fund_return"})

                this_pivot_return_df = (1 + this_pivot_return_df).cumprod() - 1
                this_stock_return_df = this_pivot_return_df.stack().reset_index().rename(columns={0: "stock_return"})
                this_fund_return_df = this_stock_return_df[this_stock_return_df["bbg_ticker"] == funding_source].rename(
                    columns={"stock_return": "fund_return"})

                this_backtest_df = this_return_df[["trade_id", "bbg_ticker"]].copy()
                this_backtest_df = pd.merge(this_backtest_df, this_daily_stock_return_df, on=["bbg_ticker"], how="left")
                this_backtest_df = pd.merge(this_backtest_df, this_daily_fund_return_df.drop(columns=["bbg_ticker"]), on=["date"], how="left")
                this_backtest_df = pd.merge(this_backtest_df, this_stock_return_df, on=["bbg_ticker", "date"], how="left")
                this_backtest_df = pd.merge(this_backtest_df, this_fund_return_df.drop(columns=["bbg_ticker"]), on=["date"], how="left")

                this_backtest_df[["stock_return", "fund_return", "daily_fund_return", "daily_stock_return"]] *= 100
                this_backtest_df["long_short_return"] = this_backtest_df["stock_return"] - this_backtest_df["fund_return"]

                # get date index
                this_backtest_df = pd.merge(this_backtest_df, this_trade_df[["trade_id", "effective_date"]],
                                            on=["trade_id"], how="left")
                this_backtest_df["date_index"] = [np.busday_count(pd.Timestamp(effect_date).date(), pd.Timestamp(date).date())
                                                  for date, effect_date in np.array(this_backtest_df[["date", "effective_date"]])]
                this_backtest_df = this_backtest_df.sort_values(["trade_id", "date_index"])

                # calculate drawdown
                this_backtest_df["roll_max_abs_return"] = this_backtest_df.groupby(["trade_id"])["stock_return"].cummax()
                this_backtest_df["roll_max_ls_return"] = this_backtest_df.groupby(["trade_id"])["long_short_return"].cummax()
                this_backtest_df["roll_abs_drawdown"] = this_backtest_df["stock_return"] - this_backtest_df["roll_max_abs_return"]
                this_backtest_df["roll_ls_drawdown"] = this_backtest_df["long_short_return"] - this_backtest_df["roll_max_ls_return"]
                this_backtest_df = this_backtest_df.drop(columns=["roll_max_abs_return", "roll_max_ls_return"])

            else:
                this_backtest_df = pd.DataFrame()
            return this_backtest_df

        trade_df_list = []
        backtest_df_list = []
        for date, start_date, end_date in np.array(self.trade_file[["effective_date",
                                                                    "trade_start_date",
                                                                    "trade_end_date"]].drop_duplicates()):

            logger.info("Updateing Effective Date: " + pd.Timestamp(date).strftime("%Y-%m-%d"))
            this_trade_df = self.trade_file[self.trade_file["effective_date"] == date].copy()
            this_stock_list = list(this_trade_df["bbg_ticker"].drop_duplicates())
            this_id_list = this_stock_list + [self.funding_source]

            # get price_data from bloomberg from xbbg
            price_data = blp.bdh(
                tickers=this_id_list, flds=["last_price", "volume", "beta_adj_overridable"],
                start_date=start_date.strftime("%Y-%m-%d"), end_date=end_date.strftime("%Y-%m-%d"),
            )
            logger.info("Got Price Data from BBG for Effective Date: " + date.strftime("%Y-%m-%d"))

            # Reconstruct price_data
            price_data = reconstruct_price_data(price_data=price_data)
            logger.info("Reconstructed Price Data for Effective Date: " + pd.Timestamp(date).strftime("%Y-%m-%d"))

            # Adjust Start End Date based on Halt Flag, IPO Date and Delist Date
            this_trade_df = adjust_start_end_date_based_on_trade_data(this_trade_df=this_trade_df, price_data=price_data)
            logger.info("Adjusted Trade Start End Date based on Halt Flag, IPO Date and Delist Date for Effective Date: " + pd.Timestamp(date).strftime("%Y-%m-%d"))

            # get final trade dataframe with beta
            this_trade_df = get_beta(this_trade_df=this_trade_df, price_data=price_data,
                                     funding_source=self.funding_source)
            trade_df_list.append(this_trade_df)
            logger.info("Got Final Trade DataFrame with Beta for Effective Date: " + pd.Timestamp(date).strftime("%Y-%m-%d"))

            # get backtesting returns
            for trade_start_date, trade_end_date in np.array(this_trade_df[["trade_start_date",
                                                                            "trade_end_date"]].drop_duplicates()):
                this_backtest_df = get_backtesting_returns(this_trade_df=this_trade_df, price_data=price_data,
                                                           funding_source=self.funding_source,
                                                           trade_start_date=trade_start_date, trade_end_date=trade_end_date)
                if not this_backtest_df.empty:
                    backtest_df_list.append(this_backtest_df)
            logger.info("Got BackTesting Returns for Effective Date: " + pd.Timestamp(date).strftime("%Y-%m-%d"))

        trade_df = pd.concat(trade_df_list, sort=True)
        backtest_df = pd.concat(backtest_df_list, sort=True)

        trade_df.to_csv(self.output_hsci_trade_file_path, index=False)
        backtest_df.to_csv(self.output_hsci_backtest_file_path, index=False)

        logger.info('''
        Output Trade DataFrame to:
        %s
        Output BackTest DataFrame to:
        %s
        ''' % (self.output_hsci_trade_file_path, self.output_hsci_backtest_file_path))
コード例 #14
0
ファイル: data_pull-checkpoint.py プロジェクト: xbbld4x/CMAs
            {k: v
             for (k, v) in cma.val_dict.items()
             if 'equity_us_name' in k}.values())))
equity_list = list(
    filter(
        None,
        list(
            {k: v
             for (k, v) in cma.val_dict.items()
             if 'equity_us_code' in k}.values())))
equity_dictionary = dict(zip(equity_list, equity_name_list))

# %%
equity_returns = blp.bdh(tickers=equity_list,
                         flds=data_return,
                         start_date=start_date_str,
                         end_date=end_date_str,
                         Per='M')

# Rename and reorder columns
equity_returns.columns = equity_returns.columns.droplevel(1)
equity_returns.columns = equity_returns.columns.map(equity_dictionary)

# Convert index to datetime
equity_returns.index = pd.to_datetime(equity_returns.index)

# Adjust dataframe for varying month end dates
equity_returns = equity_returns.resample('M', axis=0).mean()

# %% [markdown]
# ## Non-USD
コード例 #15
0
con.start()
today = datetime.datetime.now()
s_date = '2011-01-01'
e_date = today
#read ticker list
names = ['Tickers']
tix = pd.read_csv("tickers.csv", names=names).values.tolist()
# create a global df for all prices. index needs to be created for begining of the month
global_index = pd.date_range(start=s_date, end=e_date, freq='W-FRI')
global_df = pd.DataFrame(index=global_index)

# %% bloomberg data requests
#create loop here for tickers in tix
for tt in tix:
    test_temp = blp.bdh(tickers=tt,
                        flds=['last_price'],
                        start_date=s_date,
                        end_date=e_date,
                        Quote='G',
                        Per='W',
                        Fill='B',
                        Days='W')
    #get the name for dataframe header
    tick_name = blp.bdp(tickers=tt, flds=['Security_Name'])
    header = str(tt).strip('[]')
    test_temp.columns = [header]
    #merge current df with global df
    global_df = global_df.join(test_temp, how='outer')

global_df.to_csv('prices.csv')
コード例 #16
0
ファイル: VaR.py プロジェクト: KelianF/RiskMonitor
Created on Thu Mar 19 14:12:16 2020

@author: HanaFI
"""

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from xbbg import blp

stats.zscore(0.95)

Start = "20150101"
End = pd.to_datetime("today").strftime("%Y%m%d")
df = blp.bdh(tickers=["CO1 Comdty"], start_date=Start, end_date=End)
df.columns = ["CO"]

Ret = (np.log(df) - np.log(df.shift(1))).dropna()


def VaRCovariance(Ret, ZScore=1.65, Nominal=1000):
    '''
    Variance-Covariance Method
    Z Score: 95%: 1.65, 99%: 2.33
    Formula:
    Mean - (zscore * PortStDev) * Nominal
    '''
    pd.DataFrame((ZScore * Ret[-252:].std()), columns=["VaR %"])
    (ZScore * Ret[-252:].std()) * Nominal
コード例 #17
0
import pandas as pd
import numpy as np

from xbbg import blp, pipeline

blp.__version__

blp.bdh(
    tickers='SHCOMP Index', flds=['high', 'low', 'last_price'],
    start_date='2019-11', end_date='2020', Per='W', Fill='P', Days='A',
)

cur_dt = pd.Timestamp('today', tz='America/New_York').date()
recent = pd.bdate_range(end=cur_dt, periods=2, tz='America/New_York')
pre_dt = max(filter(lambda dd: dd < cur_dt, recent))
pre_dt.date()

blp.bdtick('QQQ US Equity', dt=pre_dt).tail(10)

async for snap in blp.live(['ESA Index', 'NQA Index']):
    print(snap)

cur_dt = pd.Timestamp('today', tz='America/New_York').date()
recent = pd.bdate_range(end=cur_dt, periods=2, tz='America/New_York')
pre_dt = max(filter(lambda dd: dd < cur_dt, recent))

fx = blp.bdib('JPY Curncy', dt=pre_dt)
jp = pd.concat([
    blp.bdib(ticker, dt=pre_dt, session='day')
    for ticker in ['7974 JP Equity', '9984 JP Equity']
], axis=1)
コード例 #18
0
 def init_histo(self, ticker, fields, startdate, enddate):
     self.tick = ticker
     self.histo_data = blp.bdh(tickers=ticker,
                               flds=fields,
                               start_date=startdate,
                               end_date=enddate)