Ejemplo n.º 1
0
    def test_apply_offset(self):
        ans = datetime(2016, 8, 2)
        self.assertEqual(date_shift(self.t, "+1bd"), ans)

        ans = datetime(2016, 7, 25)
        self.assertEqual(date_shift(self.t, "-Mon"), ans)

        ans = datetime(2016, 8, 8)
        self.assertEqual(date_shift(self.t, "+Mon"), ans)
Ejemplo n.º 2
0
def load_eur():
    """ Return cash rate for EUR and DEM prior to the introduction of EUR """
    bank_rate = quandl.get(CashFile.GER_BANKRATE.value, api_key=quandl_token)

    ww2_data = pd.DataFrame([4.0, 3.5, 5.0],
                            index=[
                                datetime(1936, 6, 30),
                                datetime(1940, 4, 9),
                                datetime(1948, 6, 28)
                            ])
    ww2_month = pd.date_range('1936-06-01', '1948-06-01', freq='M')
    ww2_month = pd.DataFrame(index=ww2_month)
    ww2_data = pd.concat((ww2_data, ww2_month), axis=1).fillna(method="pad")

    parser = lambda d: date_shift(datetime.strptime(d, "%Y-%m"), "+BMonthEnd")
    filename = join(DATA_DIRECTORY, 'cash_rate', 'eur', 'BBK01.SU0112.csv')
    discount_rate = pd.read_csv(filename,
                                index_col=0,
                                skiprows=[1, 2, 3, 4],
                                usecols=[0, 1],
                                engine="python",
                                skipfooter=95,
                                parse_dates=True,
                                date_parser=parser)

    ib_rate = DataReader(CashFile.EUR_3M_IB_RATE.value, "fred", START_DATE)
    libor = quandl.get(CashFile.EUR_3M_EURIBOR.value, api_key=quandl_token)

    data = (pd.concat(
        (bank_rate[:"1936-06"].fillna(method="pad"), ww2_data,
         discount_rate[:"1959"].fillna(method="pad"),
         to_monthend(ib_rate['1960':"1998"].fillna(method="pad")),
         libor['1999':].fillna(method="pad")),
        axis=1).sum(axis=1).rename("cash_rate_eur"))
    return data
Ejemplo n.º 3
0
def data_to_mongo(symbol, data):
    metadata = {'last_datetime': data.index[-1]}

    if library.has_symbol(symbol):
        # only append non-overlapping part
        existing_up_to = library.read(symbol).metadata['last_datetime']
        last_existing_data = library.read(symbol,
                                          date_range=DateRange(existing_up_to))
        if len(last_existing_data.data) != 1:
            raise ValueError('metadata and database are not consistent.')

        save_from = date_shift(existing_up_to, data_frequency)
        save_data = data.loc[save_from:, :]
        library.append(symbol, save_data, metadata=metadata)
    else:
        library.write(symbol, data, metadata=metadata)
Ejemplo n.º 4
0
def load_jpy():
    """ Return cash rate for JPY """
    libor = DataReader(CashFile.JPY_3M_LIBOR.value, 'fred', START_DATE)

    parser = lambda d: date_shift(datetime.strptime(d, "%Y/%m"), "+BMonthEnd")
    filename = join(DATA_DIRECTORY, 'cash_rate', 'jpy', 'discount_rate.csv')
    discount_rate = pd.read_csv(filename,
                                index_col=0,
                                usecols=[0, 1],
                                parse_dates=True,
                                date_parser=parser)
    data = (pd.concat(
        (discount_rate["1882-10":"1985-12"].astype("float").fillna(
            method="pad"), libor['1986':].fillna(method="pad")),
        axis=1).sum(axis=1).rename("cash_rate_jpy"))
    return data
Ejemplo n.º 5
0
            raise ValueError('metadata and database are not consistent.')

        save_from = date_shift(existing_up_to, data_frequency)
        save_data = data.loc[save_from:, :]
        logger.debug('Append {} rows'.format(len(save_data)))
        library.append(symbol, save_data, metadata=metadata)
    else:
        logger.debug('Write {} rows'.format(len(data)))
        library.write(symbol, data, metadata=metadata)


if __name__ == '__main__':
    for ccy_pair in ccy_pairs:
        logger.info('Updating {}'.format(ccy_pair))
        last_update = get_last_update(ccy_pair)
        download_date = date_shift(last_update, '+MonthBegin')
        last_month = date_shift(datetime.today(), '-MonthEnd')

        while download_date < last_month:
            logger.info('Downloading {}-{}'.format(download_date.year,
                                                   download_date.month))
            download_url = get_download_url(ccy_pair, download_date)
            filename = download_url.split('/')[-1].replace('zip', 'csv')

            logger.debug('Try download: {}'.format(download_url))
            download_zip_from_url(download_url, download_to)

            logger.debug('Loading csv: {}'.format(filename))
            csv_filepath = join(download_to, filename)
            data = get_df_from_csv(csv_filepath)
            # remove duplicate rows, also sometimes csv contains NaNs.
Ejemplo n.º 6
0
 def test_multi_apply(self):
     ans = datetime(2016, 8, 26)
     self.assertEqual(date_shift(self.t, "+MonthEnd-3bd"), ans)
Ejemplo n.º 7
0
 def test_apply_offset_without_int(self):
     ans = datetime(2016, 8, 31)
     self.assertEqual(date_shift(self.t, "+MonthEnd"), ans)