Example #1
0
def bdh(tickers,
        flds=None,
        start_date=None,
        end_date='today',
        adjust=None,
        **kwargs) -> pd.DataFrame:
    """
    Bloomberg historical data

    Args:
        tickers: ticker(s)
        flds: field(s)
        start_date: start date
        end_date: end date - default today
        adjust: `all`, `dvd`, `normal`, `abn` (=abnormal), `split`, `-` or None
                exact match of above words will adjust for corresponding events
                Case 0: `-` no adjustment for dividend or split
                Case 1: `dvd` or `normal|abn` will adjust for all dividends except splits
                Case 2: `adjust` will adjust for splits and ignore all dividends
                Case 3: `all` == `dvd|split` == adjust for all
                Case 4: None == Bloomberg default OR use kwargs
        **kwargs: overrides

    Returns:
        pd.DataFrame
    """
    logger = logs.get_logger(bdh, **kwargs)

    if flds is None: flds = ['Last_Price']
    e_dt = utils.fmt_dt(end_date, fmt='%Y%m%d')
    if start_date is None:
        start_date = pd.Timestamp(e_dt) - pd.Timedelta(weeks=8)
    s_dt = utils.fmt_dt(start_date, fmt='%Y%m%d')

    request = process.create_request(
        service='//blp/refdata',
        request='HistoricalDataRequest',
        **kwargs,
    )
    process.init_request(request=request,
                         tickers=tickers,
                         flds=flds,
                         start_date=s_dt,
                         end_date=e_dt,
                         adjust=adjust,
                         **kwargs)
    logger.debug(f'Sending request to Bloomberg ...\n{request}')
    conn.send_request(request=request, **kwargs)

    res = pd.DataFrame(process.rec_events(process.process_hist, **kwargs))
    if kwargs.get('raw', False): return res
    if res.empty or any(fld not in res for fld in ['ticker', 'date']):
        return pd.DataFrame()

    return (res.set_index(['ticker', 'date']).unstack(level=0).rename_axis(
        index=None, columns=[None, None]).swaplevel(
            0, 1, axis=1).reindex(columns=utils.flatten(tickers),
                                  level=0).reindex(columns=utils.flatten(flds),
                                                   level=1))
Example #2
0
def bdp_bds_cache(func, tickers, flds, **kwargs) -> ToQuery:
    """
    Find cached `BDP` / `BDS` queries

    Args:
        func: function name - bdp or bds
        tickers: tickers
        flds: fields
        **kwargs: other kwargs

    Returns:
        ToQuery(ticker, flds, kwargs)
    """
    cache_data = []
    log_level = kwargs.get('log', logs.LOG_LEVEL)
    logger = logs.get_logger(bdp_bds_cache, level=log_level)
    has_date = kwargs.pop('has_date', func == 'bds')
    cache = kwargs.get('cache', True)

    tickers = utils.flatten(tickers)
    flds = utils.flatten(flds)
    loaded = pd.DataFrame(data=0, index=tickers, columns=flds)

    for ticker, fld in product(tickers, flds):
        data_file = storage.ref_file(
            ticker=ticker,
            fld=fld,
            has_date=has_date,
            cache=cache,
            ext='pkl',
            **{k: v
               for k, v in kwargs.items() if k not in EXC_COLS})
        if not files.exists(data_file): continue
        logger.debug(f'reading from {data_file} ...')
        cache_data.append(pd.read_pickle(data_file))
        loaded.loc[ticker, fld] = 1

    to_qry = loaded.where(loaded == 0)\
        .dropna(how='all', axis=1).dropna(how='all', axis=0)

    return ToQuery(tickers=to_qry.index.tolist(),
                   flds=to_qry.columns.tolist(),
                   cached_data=cache_data)