Esempio n. 1
0
def foreinfl(n=120, alpha=1.0, beta=0.3673):
    '''Forecast Unified Inflation 1-year ahead per https://git.io/infl
       which a rendering of fecon235/nb/fred-inflation.ipynb.
       SUMMARY output: [Average, "infl-date", GMR, HW, BEI]
       e.g.  [2.2528, '2018-01-01', 1.5793, 3.0791, 2.1000]
       where Average is the mean of three orthogonal methods:
       GMR for geometric mean rate, HW for Holt-Winters time-series,
       and BEI for Break-even Inflation from the Treasury bond market.
       Default n denotes 120-month history, i.e. last 10 years.
    '''
    #  Holt-Winters parameters alpha and beta are optimized
    #  from the 1960-2018 dataset, consisting of 697 monthly points.
    #  Each "way" is an orthogonal method, to be averaged into way[0].
    way = [-9, -9, -9, -9, -9]  # dummy placeholders.
    inflall = get(m4infl)       # synthetic Unified Inflation, monthly.
    infl = tail(inflall, n)
    way[1] = str(infl.index[-1]).replace(" 00:00:00", "")
    #                ^Most recent month for CPI, CPIc, PCE, PCEc data.
    gm = gemrat(infl, yearly=12)
    way[2] = gm[0]   # Geometric Mean Rate over n months.
    hw = foreholt(infl, 12, alpha, beta)  # Holt-Winters model.
    way[3] = (tailvalue(hw) - 1) * 100    # Convert forecasted level to rate.
    bond10 = get(m4bond10)
    tips10 = get(m4tips10)
    bei = todf(bond10 - tips10)           # 10-year BEI Break-even Inflation.
    #         ^Treasury bond market data will be much more recent than m4infl.
    way[4] = tailvalue(bei)
    #  Final forecast is the AVERAGE of three orthogonal methods:
    way[0] = sum(way[2:]) / len(way[2:])
    return way
Esempio n. 2
0
def test_tool_fecon236_paste_function():
    '''Test xau and foo pasted together as xaufoo dataframe.'''
    assert [col for col in xaufoo.columns] == ['XAU', 'FOO']
    assert xaufoo.shape == (30, 2)
    assert tool.tailvalue(xaufoo, pos=0) == 1393.75
    assert tool.tailvalue(xaufoo, pos=1) == 6393.75
    assert xaufoo.index[0] == pd.Timestamp('2013-03-08 00:00:00')
    assert xaufoo.index[-1] == pd.Timestamp('2013-04-18 00:00:00')
Esempio n. 3
0
def test_bootstrap_fecon236_writefile_normdiflog_read():
    '''Create normdiflog CSV file, then read it as dataframe for testing.'''
    fname = 'tests' + sep + 'tmp-xau-normdiflog.csv'
    bs.writefile_normdiflog(xau, filename=fname)
    df = bs.readcsv('tests' + sep + 'tmp-xau-normdiflog.csv')
    assert [col for col in df.columns] == ['Y']
    assert df.shape == (29, 1)
    assert round(tool.tailvalue(df[:'2013-04-15']), 3) == -4.804
    assert round(tool.tailvalue(df), 3) == 0.295
Esempio n. 4
0
def getinflations(inflations=ml_infl):
    '''Normalize and average all inflation measures.'''
    #  We will take the average of indexes after their
    #  current value is set to 1 for equal weighting.
    inflsum = getdata_fred(inflations[0])
    inflsum = inflsum / float(tool.tailvalue(inflsum))
    for i in inflations[1:]:
        infl = getdata_fred(i)
        infl = infl / float(tool.tailvalue(infl))
        inflsum += infl
    return inflsum / len(inflations)
Esempio n. 5
0
def optimize_holt(dataframe, grids=50, alphas=(0.0, 1.0), betas=(0.0, 1.0)):
    '''Optimize Holt-Winters parameters alpha and beta for given data.
       The alphas and betas are boundaries of respective explored regions.
       Function interpolates "grids" from its low bound to its high bound,
       inclusive. Final output: [alpha, beta, losspc, median absolute loss]
       TIP: narrow down alphas and betas using optimize_holt iteratively.
    '''
    if grids > 49:
        system.warn("Optimizing Holt-Winters alphabetaloss may take TIME!")
        #  Exploring loss at all the grids is COMPUTATIONALLY INTENSE
        #  due to holt(), especially if the primary data is very large.
        #  Tip: truncate dataframe to recent data.
    result = op.minBrute(fun=loss_holt,
                         funarg=(dataframe, ),
                         boundpairs=[alphas, betas],
                         grids=grids)
    #  result is a numpy array, so convert to list:
    alpha, beta = list(result)
    #  Compute loss, given optimal parameters:
    loss = loss_holt((alpha, beta), dataframe)
    #  Compute percentage loss relative to absolute tailvalue:
    losspc = (float(loss) / abs(tailvalue(dataframe))) * 100
    #  Since np.round and np.around print ugly, use Python round() to
    #  display alpha and beta. Also include losspc and median absolute loss:
    return [round(alpha, 4), round(beta, 4), round(losspc, 4), loss]
Esempio n. 6
0
def getm4infleu():
    '''Normalize and average Eurozone Consumer Prices.'''
    #  FRED carries only NSA data from Eurostat,
    #  so we shall use Holt-Winters levels.
    cpiall = getdata_fred('CP0000EZ17M086NEST')
    #                        ^for 17 countries.
    holtall = hw.holtlevel(cpiall)
    normall = holtall / float(tool.tailvalue(holtall))
    return normall
Esempio n. 7
0
def getdeflator(inflation=m4infl):
    '''Construct a de-inflation dataframe suitable as multiplier.'''
    #  Usually we encounter numbers which have been deflated to dollars
    #  of some arbitrary year (where the value is probably 100).
    #  Here we set the present to 1, while past values have increasing
    #     multiplicative "returns" which will yield current dollars.
    infl = getfred(inflation)
    lastin = tool.tailvalue(infl)
    return float(lastin) / infl
Esempio n. 8
0
def test_group_fecon236_GET_w4cotr_metals_from_QUANDL_vSlow_oLocal():
    '''Test get() which uses getqdl() in qdl module.
       Here we get the CFTC Commitment of Traders Reports
       for gold and silver expressed as our position indicator.
    >>> print(qdl.w4cotr_metals)
    w4cotr_metals
    '''
    metals = get(qdl.w4cotr_metals)
    assert round(tool.tailvalue(metals[:'2015-07-28']), 3) == 0.461
Esempio n. 9
0
def test_qdl_fecon236_Check_xbt_prices_vSlow_oLocal():
    '''Check on xbt prices on various dates, only Local.'''
    #  Download Bitcoin prices from Quandl:
    xbt = qdl.getqdl(qdl.d7xbtusd)
    xbt = tool.todf(xbt, 'XBT')
    #          todf used to rename column.
    assert abs(tool.tailvalue(xbt[:'2014-02-01']) - 815.99) < 0.1
    assert abs(tool.tailvalue(xbt[:'2015-02-01']) - 220.72) < 0.1
    assert abs(tool.tailvalue(xbt[:'2016-02-01']) - 376.86) < 0.1
    #
    #  Q: Why only Local, i.e. oLocal?
    #
    #  Without revealing one's private authtoken.p,
    #  all Travis CI machines sharing an IP address
    #  will be considered one anonymous user, thus
    #  the limited calls to Quandl will be jointly exceeded,
    #  resulting in "HTTP Error 429: Too Many Requests".
    #  And the Travis job will fail for Quandl's server policy
    #  reasons, not the validity of the code.
    return
Esempio n. 10
0
def test_bootstrap_fecon236_ROUNDTRIP():
    '''Do a roundtrip using all the functionality.'''
    fname = 'tests' + sep + 'tmp-xau-normdiflog.csv'
    bs.writefile_normdiflog(xau, filename=fname)
    _, xaumean, xausigma, _, _, _ = gemrat(xau, yearly=256, pc=False)
    #  gemrat: "RuntimeWarning: invalid value encountered in log"
    #          due to smackdown in price on 2013-04-15,
    #          i.e. expect inaccuracies as a consequence.
    poparr = bs.csv2ret(fname, mean=xaumean, sigma=xausigma, yearly=256)
    #  So poparr is the "population" array which will be
    #  repetitively hammered in memory for bootstrap resamplings.
    pxdf = bs.bsret2prices(29, poparr, inprice=1581.75, replace=False)
    #  Crucial fact: tail price is indifferent as to the order in which
    #                returns are selected without replacement,
    #                so long as ALL the returns are selected.
    #  Horrible assertion range, but our test data is just too short,
    #  and contains statistical abnormality per smackdown on 2013-04-15.
    #  In actuality: expecting 1393.75, but got 1386.112272.
    assert 1385.00 < tool.tailvalue(pxdf) < 1395.00
Esempio n. 11
0
def test_holtwinters_fecon236_check_xau_DataFrame():
    '''Check xau dataframe.'''
    assert tool.tailvalue(xau) == 1393.75
Esempio n. 12
0
def test_fred_fecon236_check_xau_DataFrame():
    '''Check xau dataframe.'''
    assert [col for col in xau.columns] == ['XAU']
    assert tool.tailvalue(xau) == 1393.75
Esempio n. 13
0
def test_tool_fecon236_check_foo_DataFrame():
    '''Check foo dataframe which is just xau + 5000.00 increase.'''
    assert [col for col in foo.columns] == ['FOO']
    assert tool.tailvalue(foo) == 6393.75
Esempio n. 14
0
def test_tool_fecon236_lagdf_function():
    '''Test xaufoolag dataframe created by lagdf on xaufoo with lags=3.'''
    assert [col for col in xaufoolag.columns] == [
        'XAU_0', 'FOO_0', 'XAU_1', 'FOO_1', 'XAU_2', 'FOO_2', 'XAU_3', 'FOO_3'
    ]  # noqa
    #  Number after underscore indicates lag.
    assert xaufoolag.shape == (27, 8)
    #                lags will introduce NaN, which are then dropped,
    #                so rows are reduced from 30 to 27.
    #
    #  Making sure LAGGED VALUES are correctly placed...
    assert tool.tailvalue(xaufoolag, pos=0, row=1) == 1393.75
    assert tool.tailvalue(xaufoolag, pos=1, row=1) == 6393.75
    assert tool.tailvalue(xaufoolag, pos=2, row=1) == 1392.0
    assert tool.tailvalue(xaufoolag, pos=3, row=1) == 6392.0
    assert tool.tailvalue(xaufoolag, pos=4, row=1) == 1380.0
    assert tool.tailvalue(xaufoolag, pos=5, row=1) == 6380.0
    assert tool.tailvalue(xaufoolag, pos=6, row=1) == 1395.0
    assert tool.tailvalue(xaufoolag, pos=7, row=1) == 6395.0

    assert tool.tailvalue(xaufoolag, pos=0, row=2) == 1392.0
    assert tool.tailvalue(xaufoolag, pos=1, row=2) == 6392.0
    assert tool.tailvalue(xaufoolag, pos=2, row=2) == 1380.0
    assert tool.tailvalue(xaufoolag, pos=3, row=2) == 6380.0
    assert tool.tailvalue(xaufoolag, pos=4, row=2) == 1395.0
    assert tool.tailvalue(xaufoolag, pos=5, row=2) == 6395.0

    assert tool.tailvalue(xaufoolag, pos=0, row=3) == 1380.0
    assert tool.tailvalue(xaufoolag, pos=1, row=3) == 6380.0
    assert tool.tailvalue(xaufoolag, pos=2, row=3) == 1395.0
    assert tool.tailvalue(xaufoolag, pos=3, row=3) == 6395.0

    assert tool.tailvalue(xaufoolag, pos=0, row=4) == 1395.0
    assert tool.tailvalue(xaufoolag, pos=1, row=4) == 6395.0

    assert xaufoolag.index[0] == pd.Timestamp('2013-03-13 00:00:00')
    assert xaufoolag.index[-1] == pd.Timestamp('2013-04-18 00:00:00')
Esempio n. 15
0
def test_gaussmix_fecon236_check_xau_DataFrame():
    '''Check xau dataframe.'''
    assert tool.tailvalue(xau) == 1393.75
Esempio n. 16
0
def test_group_fecon236_GET_d7xbtusd_from_QUANDL_vSlow_oLocal():
    '''Test get() which uses getqdl() in qdl module.
       Here we get a Bitcoin price from Quandl.
    '''
    xbt = get(qdl.d7xbtusd)
    assert tool.tailvalue(xbt[:'2018-06-14']) == 6315.7