Example #1
0
File: blp.py Project: DT021/xbbg
def bds(tickers, flds, **kwargs) -> pd.DataFrame:
    """
    Bloomberg block data

    Args:
        tickers: ticker(s)
        flds: field
        **kwargs: other overrides for query

    Returns:
        pd.DataFrame: block data
    """
    logger = logs.get_logger(bds, **kwargs)

    service = conn.bbg_service(service='//blp/refdata', **kwargs)
    request = service.createRequest('ReferenceDataRequest')

    if isinstance(tickers, str):
        data_file = storage.ref_file(ticker=tickers,
                                     fld=flds,
                                     has_date=True,
                                     ext='pkl',
                                     **kwargs)
        if files.exists(data_file):
            logger.debug(f'Loading Bloomberg data from: {data_file}')
            return pd.DataFrame(pd.read_pickle(data_file))

        process.init_request(request=request,
                             tickers=tickers,
                             flds=flds,
                             **kwargs)
        logger.debug(f'Sending request to Bloomberg ...\n{request}')
        conn.send_request(request=request, **kwargs)

        res = pd.DataFrame(
            process.rec_events(func=process.process_ref, **kwargs))
        if kwargs.get('raw', False): return res
        if res.empty or any(fld not in res for fld in ['ticker', 'field']):
            return pd.DataFrame()

        data = (res.set_index(['ticker', 'field']).droplevel(
            axis=0, level=1).rename_axis(index=None).pipe(
                pipeline.standard_cols, col_maps=kwargs.get('col_maps', None)))
        if data_file:
            logger.debug(f'Saving Bloomberg data to: {data_file}')
            files.create_folder(data_file, is_file=True)
            data.to_pickle(data_file)
        return data

    return pd.DataFrame(
        pd.concat(
            [bds(tickers=ticker, flds=flds, **kwargs) for ticker in tickers],
            sort=False))
Example #2
0
def bdp(tickers, flds, cache=False, **kwargs):
    """
    Get reference data and save to

    Args:
        tickers: tickers
        flds: fields to query
        cache: bool - use cache to store data
        **kwargs: overrides

    Returns:
        pd.DataFrame

    Examples:
        >>> bdp('IQ US Equity', 'Crncy', raw=True)
                 ticker  field value
        0  IQ US Equity  Crncy   USD
        >>> bdp('IQ US Equity', 'Crncy').reset_index()
                 ticker crncy
        0  IQ US Equity   USD
    """
    logger = logs.get_logger(bdp, level=kwargs.pop('log', logs.LOG_LEVEL))
    con, _ = create_connection()
    ovrds = assist.proc_ovrds(**kwargs)

    logger.info(
        f'loading reference data from Bloomberg:\n'
        f'{assist.info_qry(tickers=tickers, flds=flds)}'
    )
    data = con.ref(tickers=tickers, flds=flds, ovrds=ovrds)
    if not cache: return [data]

    qry_data = []
    for r, snap in data.iterrows():
        subset = [r]
        data_file = storage.ref_file(
            ticker=snap.ticker, fld=snap.field, ext='pkl', cache=cache, **kwargs
        )
        if data_file:
            if not files.exists(data_file): qry_data.append(data.iloc[subset])
            files.create_folder(data_file, is_file=True)
            data.iloc[subset].to_pickle(data_file)

    return qry_data
Example #3
0
def bdp_bds_cache(func, tickers, flds, **kwargs) -> ToQuery:
    """
    Find cached `BDP` / `BDS` queries

    Args:
        func: function name - bdp or bds
        tickers: tickers
        flds: fields
        **kwargs: other kwargs

    Returns:
        ToQuery(ticker, flds, kwargs)
    """
    cache_data = []
    log_level = kwargs.get('log', logs.LOG_LEVEL)
    logger = logs.get_logger(bdp_bds_cache, level=log_level)
    has_date = kwargs.pop('has_date', func == 'bds')
    cache = kwargs.get('cache', True)

    tickers = utils.flatten(tickers)
    flds = utils.flatten(flds)
    loaded = pd.DataFrame(data=0, index=tickers, columns=flds)

    for ticker, fld in product(tickers, flds):
        data_file = storage.ref_file(
            ticker=ticker,
            fld=fld,
            has_date=has_date,
            cache=cache,
            ext='pkl',
            **{k: v
               for k, v in kwargs.items() if k not in EXC_COLS})
        if not files.exists(data_file): continue
        logger.debug(f'reading from {data_file} ...')
        cache_data.append(pd.read_pickle(data_file))
        loaded.loc[ticker, fld] = 1

    to_qry = loaded.where(loaded == 0)\
        .dropna(how='all', axis=1).dropna(how='all', axis=0)

    return ToQuery(tickers=to_qry.index.tolist(),
                   flds=to_qry.columns.tolist(),
                   cached_data=cache_data)
Example #4
0
def _bds_(
    ticker: str,
    fld: str,
    logger: logs.logging.Logger,
    use_port: bool = False,
    **kwargs,
) -> pd.DataFrame:
    """
    Get data of BDS of single ticker
    """
    if 'has_date' not in kwargs: kwargs['has_date'] = True
    data_file = storage.ref_file(ticker=ticker, fld=fld, ext='pkl', **kwargs)
    if files.exists(data_file):
        logger.debug(f'Loading Bloomberg data from: {data_file}')
        return pd.DataFrame(pd.read_pickle(data_file))

    request = process.create_request(
        service='//blp/refdata',
        request='PortfolioDataRequest' if use_port else 'ReferenceDataRequest',
        **kwargs,
    )
    process.init_request(request=request, tickers=ticker, flds=fld, **kwargs)
    logger.debug(f'Sending request to Bloomberg ...\n{request}')
    conn.send_request(request=request, **kwargs)

    res = pd.DataFrame(process.rec_events(func=process.process_ref, **kwargs))
    if kwargs.get('raw', False): return res
    if res.empty or any(fld not in res for fld in ['ticker', 'field']):
        return pd.DataFrame()

    data = (res.set_index(['ticker', 'field']).droplevel(
        axis=0, level=1).rename_axis(index=None).pipe(pipeline.standard_cols,
                                                      col_maps=kwargs.get(
                                                          'col_maps', None)))
    if data_file:
        logger.debug(f'Saving Bloomberg data to: {data_file}')
        files.create_folder(data_file, is_file=True)
        data.to_pickle(data_file)

    return data
Example #5
0
def bds(tickers, flds, cache=False, **kwargs):
    """
    Download block data from Bloomberg

    Args:
        tickers: ticker(s)
        flds: field(s)
        cache: whether read from cache
        **kwargs: other overrides for query
          -> raw: raw output from `pdbdp` library, default False

    Returns:
        pd.DataFrame: block data

    Examples:
        >>> import os
        >>>
        >>> pd.options.display.width = 120
        >>> s_dt, e_dt = '20180301', '20181031'
        >>> dvd = bds(
        ...     'NVDA US Equity', 'DVD_Hist_All',
        ...     DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt, raw=True,
        ... )
        >>> dvd.loc[:, ['ticker', 'name', 'value']].head(8)
                   ticker                name         value
        0  NVDA US Equity       Declared Date    2018-08-16
        1  NVDA US Equity             Ex-Date    2018-08-29
        2  NVDA US Equity         Record Date    2018-08-30
        3  NVDA US Equity        Payable Date    2018-09-21
        4  NVDA US Equity     Dividend Amount          0.15
        5  NVDA US Equity  Dividend Frequency       Quarter
        6  NVDA US Equity       Dividend Type  Regular Cash
        7  NVDA US Equity       Declared Date    2018-05-10
        >>> dvd = bds(
        ...     'NVDA US Equity', 'DVD_Hist_All',
        ...     DVD_Start_Dt=s_dt, DVD_End_Dt=e_dt,
        ... )
        >>> dvd.reset_index().loc[:, ['ticker', 'ex_date', 'dividend_amount']]
                   ticker     ex_date  dividend_amount
        0  NVDA US Equity  2018-08-29             0.15
        1  NVDA US Equity  2018-05-23             0.15
        >>> if not os.environ.get('BBG_ROOT', ''):
        ...     os.environ['BBG_ROOT'] = f'{files.abspath(__file__, 1)}/tests/data'
        >>> idx_kw = dict(End_Dt='20181220', cache=True)
        >>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)
        >>> idx_wt.round(2).tail().reset_index(drop=True)
          index_member  percent_weight
        0         V UN            3.82
        1        VZ UN            1.63
        2       WBA UW            2.06
        3       WMT UN            2.59
        4       XOM UN            2.04
        >>> idx_wt = bds('DJI Index', 'Indx_MWeight_Hist', **idx_kw)
        >>> idx_wt.round(2).head().reset_index(drop=True)
          index_member  percent_weight
        0      AAPL UW            4.65
        1       AXP UN            2.84
        2        BA UN            9.29
        3       CAT UN            3.61
        4      CSCO UW            1.26
    """
    logger = logs.get_logger(bds, level=kwargs.pop('log', logs.LOG_LEVEL))
    has_date = kwargs.pop('has_date', True)
    con, _ = create_connection()
    ovrds = assist.proc_ovrds(**kwargs)

    logger.info(
        f'loading block data from Bloomberg:\n'
        f'{assist.info_qry(tickers=tickers, flds=flds)}'
    )
    data = con.bulkref(tickers=tickers, flds=flds, ovrds=ovrds)
    if not cache: return [data]

    qry_data = []
    for (ticker, fld), grp in data.groupby(['ticker', 'field']):
        data_file = storage.ref_file(
            ticker=ticker, fld=fld, has_date=has_date, ext='pkl', cache=cache, **kwargs
        )
        if data_file:
            if not files.exists(data_file): qry_data.append(grp)
            files.create_folder(data_file, is_file=True)
            grp.reset_index(drop=True).to_pickle(data_file)

    return qry_data