Exemple #1
0
def chaikin_money_flow3(df, col_volume='Volume_BTC', n=20):
    """
    Chaikin Money Flow
    https://github.com/FreddieWitherden/ta/blob/master/ta.py
    """
    clv = (2 * df['Close'] - df['High'] - df['Low']) / (df['High'] - df['Low'])
    clv.fillna(0.0, inplace=True)  # float division by zero
    return pd.Series(
        moments.rolling_sum(clv * df[col_volume], n) /
        moments.rolling_sum(df[col_volume], n))
Exemple #2
0
def vortex(s, n=14):
    ss = s.shift(1)

    tr = s.high.combine(ss.close, max) - s.low.combine(ss.close, min)
    trn = moments.rolling_sum(tr, n)

    vmp = np.abs(s.high - ss.low)
    vmm = np.abs(s.low - ss.high)

    vip = moments.rolling_sum(vmp, n) / trn
    vin = moments.rolling_sum(vmm, n) / trn

    return DataFrame(dict(vin=vin, vip=vip))
Exemple #3
0
def vortex(s, n=14):
    ss = s.shift(1)

    tr = s.high.combine(ss.close, max) - s.low.combine(ss.close, min)
    trn = moments.rolling_sum(tr, n)

    vmp = np.abs(s.high - ss.low)
    vmm = np.abs(s.low - ss.high)

    vip = moments.rolling_sum(vmp, n) / trn
    vin = moments.rolling_sum(vmm, n) / trn

    return DataFrame(dict(vin=vin, vip=vip))
Exemple #4
0
def StormPrecipAnalysis(StormIntervals):
    #### EROSIVITY INDEX for storms (ENGLISH UNITS)
    stormlist=[]
    for storm in StormIntervals.iterrows():
        index = storm[1]['start']
        start = storm[1]['start']-dt.timedelta(minutes=60) ## storm start is when PT exceeds threshold, retrieve Precip x min. prior to this.
        end =  storm[1]['end'] ## when to end the storm?? falling limb takes too long I think
        try:
            rain_data = pd.DataFrame.from_dict({'Timu':Precip['Timu-Nuuuli2'][start:end]})
            rain_data['AccumulativeDepth mm']=(rain_data['Timu']).cumsum() ## cumulative depth at 1 min. intervals
            rain_data['AccumulativeDepth in.']=(rain_data['Timu']/25.4).cumsum() ## cumulative depth at 1 min. intervals
            rain_data['Intensity (in./hr)']=rain_data['Timu']*60 ## intensity at each minute
            rain_data['30minMax (in./hr)']=m.rolling_sum(Precip['Timu-Nuuuli2'],window=30)/25.4
            I30 = rain_data['30minMax (in./hr)'].max()
            duration_hours = (end - start).days * 24 + (end - start).seconds//3600
            I = (rain_data['Timu'].sum())/25.4/duration_hours ## I = Storm Average Intensity
            E = 1099 * (1-(0.72*math.exp(-1.27*I))) ## E = Rain Kinetic Energy
            EI = E*I30
            stormlist.append((index,[rain_data['Timu'].sum()/25.4,duration_hours,I30,I,E,EI]))
        except:
            print "Can't analyze Storm Precip for storm:"+str(start)
            pass
    Stormdf = pd.DataFrame.from_items(stormlist,orient='index',columns=['Total(in)','Duration(hrs)','Max30minIntensity(in/hr)','AvgIntensity(in/hr)','E-RainKineticEnergy(ft-tons/acre/inch)','EI'])
    Stormdf = Stormdf[(Stormdf['Total(in)']>0.0)] ## filter out storms without good Timu1 data
    return Stormdf
Exemple #5
0
    def test_fperr_robustness(self):
        # TODO: remove this once python 2.5 out of picture
        if PY3:
            raise nose.SkipTest

        # #2114
        data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'

        arr = np.frombuffer(data, dtype='<f8')
        if sys.byteorder != "little":
            arr = arr.byteswap().newbyteorder()

        result = mom.rolling_sum(arr, 2)
        self.assertTrue((result[1:] >= 0).all())

        result = mom.rolling_mean(arr, 2)
        self.assertTrue((result[1:] >= 0).all())

        result = mom.rolling_var(arr, 2)
        self.assertTrue((result[1:] >= 0).all())

        # #2527, ugh
        arr = np.array([0.00012456, 0.0003, 0])
        result = mom.rolling_mean(arr, 1)
        self.assertTrue(result[-1] >= 0)

        result = mom.rolling_mean(-arr, 1)
        self.assertTrue(result[-1] <= 0)
def StormPrecipAnalysis(StormIntervals):
    #### EROSIVITY INDEX for storms (ENGLISH UNITS)
    stormlist=[]
    for storm in StormIntervals.iterrows():
        index = storm[1]['start']
        start = storm[1]['start']-dt.timedelta(minutes=60) ## storm start is when PT exceeds threshold, retrieve Precip x min. prior to this.
        end =  storm[1]['end'] ## when to end the storm?? falling limb takes too long I think
        try:
            rain_data = pd.DataFrame.from_dict({'Timu':Precip['Timu-Nuuuli2'][start:end]})
            rain_data['AccumulativeDepth mm']=(rain_data['Timu']).cumsum() ## cumulative depth at 1 min. intervals
            rain_data['AccumulativeDepth in.']=(rain_data['Timu']/25.4).cumsum() ## cumulative depth at 1 min. intervals
            rain_data['Intensity (in./hr)']=rain_data['Timu']*60 ## intensity at each minute
            rain_data['30minMax (in./hr)']=m.rolling_sum(Precip['Timu-Nuuuli2'],window=30)/25.4
            I30 = rain_data['30minMax (in./hr)'].max()
            duration_hours = (end - start).days * 24 + (end - start).seconds//3600
            I = (rain_data['Timu'].sum())/25.4/duration_hours ## I = Storm Average Intensity
            E = 1099 * (1-(0.72*math.exp(-1.27*I))) ## E = Rain Kinetic Energy
            EI = E*I30
            stormlist.append((index,[rain_data['Timu'].sum()/25.4,duration_hours,I30,I,E,EI]))
        except:
            print "Can't analyze Storm Precip for storm:"+str(start)
            pass
    Stormdf = pd.DataFrame.from_items(stormlist,orient='index',columns=['Total(in)','Duration(hrs)','Max30minIntensity(in/hr)','AvgIntensity(in/hr)','E-RainKineticEnergy(ft-tons/acre/inch)','EI'])
    Stormdf = Stormdf[(Stormdf['Total(in)']>0.0)] ## filter out storms without good Timu1 data
    return Stormdf
Exemple #7
0
    def test_fperr_robustness(self):
        # TODO: remove this once python 2.5 out of picture
        if PY3:
            raise nose.SkipTest("doesn't work on python 3")

        # #2114
        data = '\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1a@\xaa\xaa\xaa\xaa\xaa\xaa\x02@8\x8e\xe38\x8e\xe3\xe8?z\t\xed%\xb4\x97\xd0?\xa2\x0c<\xdd\x9a\x1f\xb6?\x82\xbb\xfa&y\x7f\x9d?\xac\'\xa7\xc4P\xaa\x83?\x90\xdf\xde\xb0k8j?`\xea\xe9u\xf2zQ?*\xe37\x9d\x98N7?\xe2.\xf5&v\x13\x1f?\xec\xc9\xf8\x19\xa4\xb7\x04?\x90b\xf6w\x85\x9f\xeb>\xb5A\xa4\xfaXj\xd2>F\x02\xdb\xf8\xcb\x8d\xb8>.\xac<\xfb\x87^\xa0>\xe8:\xa6\xf9_\xd3\x85>\xfb?\xe2cUU\xfd?\xfc\x7fA\xed8\x8e\xe3?\xa5\xaa\xac\x91\xf6\x12\xca?n\x1cs\xb6\xf9a\xb1?\xe8%D\xf3L-\x97?5\xddZD\x11\xe7~?#>\xe7\x82\x0b\x9ad?\xd9R4Y\x0fxK?;7x;\nP2?N\xf4JO\xb8j\x18?4\xf81\x8a%G\x00?\x9a\xf5\x97\r2\xb4\xe5>\xcd\x9c\xca\xbcB\xf0\xcc>3\x13\x87(\xd7J\xb3>\x99\x19\xb4\xe0\x1e\xb9\x99>ff\xcd\x95\x14&\x81>\x88\x88\xbc\xc7p\xddf>`\x0b\xa6_\x96|N>@\xb2n\xea\x0eS4>U\x98\x938i\x19\x1b>\x8eeb\xd0\xf0\x10\x02>\xbd\xdc-k\x96\x16\xe8=(\x93\x1e\xf2\x0e\x0f\xd0=\xe0n\xd3Bii\xb5=*\xe9\x19Y\x8c\x8c\x9c=\xc6\xf0\xbb\x90]\x08\x83=]\x96\xfa\xc0|`i=>d\xfc\xd5\xfd\xeaP=R0\xfb\xc7\xa7\x8e6=\xc2\x95\xf9_\x8a\x13\x1e=\xd6c\xa6\xea\x06\r\x04=r\xda\xdd8\t\xbc\xea<\xf6\xe6\x93\xd0\xb0\xd2\xd1<\x9d\xdeok\x96\xc3\xb7<&~\xea9s\xaf\x9f<UUUUUU\x13@q\x1c\xc7q\x1c\xc7\xf9?\xf6\x12\xdaKh/\xe1?\xf2\xc3"e\xe0\xe9\xc6?\xed\xaf\x831+\x8d\xae?\xf3\x1f\xad\xcb\x1c^\x94?\x15\x1e\xdd\xbd>\xb8\x02@\xc6\xd2&\xfd\xa8\xf5\xe8?\xd9\xe1\x19\xfe\xc5\xa3\xd0?v\x82"\xa8\xb2/\xb6?\x9dX\x835\xee\x94\x9d?h\x90W\xce\x9e\xb8\x83?\x8a\xc0th~Kj?\\\x80\xf8\x9a\xa9\x87Q?%\xab\xa0\xce\x8c_7?1\xe4\x80\x13\x11*\x1f? \x98\x00\r\xb6\xc6\x04?\x80u\xabf\x9d\xb3\xeb>UNrD\xbew\xd2>\x1c\x13C[\xa8\x9f\xb8>\x12b\xd7<pj\xa0>m-\x1fQ@\xe3\x85>\xe6\x91)l\x00/m>Da\xc6\xf2\xaatS>\x05\xd7]\xee\xe3\xf09>'

        arr = np.frombuffer(data, dtype='<f8')
        if sys.byteorder != "little":
            arr = arr.byteswap().newbyteorder()

        result = mom.rolling_sum(arr, 2)
        self.assertTrue((result[1:] >= 0).all())

        result = mom.rolling_mean(arr, 2)
        self.assertTrue((result[1:] >= 0).all())

        result = mom.rolling_var(arr, 2)
        self.assertTrue((result[1:] >= 0).all())

        # #2527, ugh
        arr = np.array([0.00012456, 0.0003, 0])
        result = mom.rolling_mean(arr, 1)
        self.assertTrue(result[-1] >= 0)

        result = mom.rolling_mean(-arr, 1)
        self.assertTrue(result[-1] <= 0)
def peakFind(data,l,pos,order=1,_even=False):
    sets = re.sub('x',str(pos),'Report: xm (m)')
    print sets    
    iterr = 1

    while not _even:                
        iterr += 1
        mean = int(l[2]*iterr)
        maxlen = []
        phases = {}
        framepeak = {}
        for frame in data.iteritems():
            name = frame[0]
            frame = frame[1][sets]
            frameMean = mo.rolling_sum(frame**3,mean,center=True).values
            maxpeak = sig.argrelmax(frameMean,order=order)
            maxlen.append(len(maxpeak[0]))
            framepeak[name] = frame.index[maxpeak]
            phase = np.zeros([framepeak[name].shape[0]-1])
            for i in xrange(phase.shape[0]):
                phase[i] = framepeak[name][i+1]-framepeak[name][i]
            phases[name] = phase
        try:   
            phases = pd.DataFrame(phases)
            framePeaks = pd.DataFrame(framepeak)
            _even = True
        except ValueError:
            print maxlen
            print '----- All of the peaks have not been found ------'
            if iterr is 200:
                _even = True
                print ' ------ max iterations reached. Check data length ------'
                sets,phases,framePeaks = [],[],[]
    
    return [sets,phases,framePeaks]
Exemple #9
0
    def _window_time_obs(self):
        window_obs = moments.rolling_sum(self._time_obs_count > 0,
                                         self._window,
                                         min_periods=1)

        window_obs[np.isnan(window_obs)] = 0
        return window_obs.astype(int)
Exemple #10
0
def vortex(df, n=14):
    """
    Vortex Indicator
    https://github.com/FreddieWitherden/ta/blob/master/ta.py
    """
    dfs = df.shift(1)

    tr = df['High'].combine(dfs['Close'], max) - df['Low'].combine(dfs['Close'], min)
    trn = moments.rolling_sum(tr, n)

    vmp = np.abs(df['High'] - dfs['Low'])
    vmm = np.abs(df['Low'] - dfs['High'])

    vip = moments.rolling_sum(vmp, n) / trn
    vin = moments.rolling_sum(vmm, n) / trn

    return pd.DataFrame([vin, vip]).transpose()
Exemple #11
0
 def rolling_attention(iot, window):
     "callback function for ASI calculations"
     w = window
     firm = iot.columns[0]
     iot['%sday_median' % w] = rolling_median(iot[firm], w)
     iot['%sday_ASI' % w] = log((1 + iot[firm]) / (1 + iot['%sday_median' % w]))
     iot['%sday_CASI' % w] = rolling_sum(iot['%sday_ASI' % w], w)
     return iot
Exemple #12
0
    def _nobs_raw(self):
        if self._is_rolling:
            window = self._window
        else:
            # expanding case
            window = len(self._index)

        result = moments.rolling_sum(self._time_obs_count, window, min_periods=1)

        return result.astype(int)
Exemple #13
0
    def test_rolling_functions_window_non_shrinkage(self):
        # GH 7764
        s = Series(range(4))
        s_expected = Series(np.nan, index=s.index)
        df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=['A', 'B'])
        df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
        df_expected_panel = Panel(items=df.index,
                                  major_axis=df.columns,
                                  minor_axis=df.columns)

        functions = [
            lambda x: mom.rolling_cov(
                x, x, pairwise=False, window=10, min_periods=5),
            lambda x: mom.rolling_corr(
                x, x, pairwise=False, window=10, min_periods=5),
            lambda x: mom.rolling_max(x, window=10, min_periods=5),
            lambda x: mom.rolling_min(x, window=10, min_periods=5),
            lambda x: mom.rolling_sum(x, window=10, min_periods=5),
            lambda x: mom.rolling_mean(x, window=10, min_periods=5),
            lambda x: mom.rolling_std(x, window=10, min_periods=5),
            lambda x: mom.rolling_var(x, window=10, min_periods=5),
            lambda x: mom.rolling_skew(x, window=10, min_periods=5),
            lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
            lambda x: mom.rolling_quantile(
                x, quantile=0.5, window=10, min_periods=5),
            lambda x: mom.rolling_median(x, window=10, min_periods=5),
            lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
            lambda x: mom.rolling_window(
                x, win_type='boxcar', window=10, min_periods=5),
        ]
        for f in functions:
            try:
                s_result = f(s)
                assert_series_equal(s_result, s_expected)

                df_result = f(df)
                assert_frame_equal(df_result, df_expected)
            except (ImportError):

                # scipy needed for rolling_window
                continue

        functions = [
            lambda x: mom.rolling_cov(
                x, x, pairwise=True, window=10, min_periods=5),
            lambda x: mom.rolling_corr(
                x, x, pairwise=True, window=10, min_periods=5),
            # rolling_corr_pairwise is depracated, so the following line should be deleted
            # when rolling_corr_pairwise is removed.
            lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5
                                                ),
        ]
        for f in functions:
            df_result_panel = f(df)
            assert_panel_equal(df_result_panel, df_expected_panel)
Exemple #14
0
    def _nobs_raw(self):
        if self._is_rolling:
            window = self._window
        else:
            # expanding case
            window = len(self._index)

        result = moments.rolling_sum(self._time_obs_count, window,
                                     min_periods=1)

        return result.astype(int)
Exemple #15
0
def read_quote(quote):
    fe = read_close(quote)
    
    ratios, financials, fcf, eps, shares_o = load_csv_files(quote)

    ep, fep = parse_eps(eps, max(fe.index))

    fc = fcf.fcf
    fc.index = fcf.pDate
    fc = fc.apply(lambda x: convert(x))
    #fc_pad = pad(fc, fc[-1], max(fe.index))
    
    shares = shares_o.shares_o
    shares.index = shares_o.pDate
    shares = shares.apply(lambda x: convert(x))
    shares_pad = pad(shares, shares[-1], max(fe.index))
    #past_year_fcf = rolling_sum(fc, 4, min_periods=4)
    
    fcf_shares = (fc / shares_pad).dropna()
    fcf_growth_rate = calculate_fc_growth(fcf_shares)

    past_year_eps = rolling_sum(ep, 4, min_periods=4)
    calculate_eps_growth(past_year_eps)
    

    #py_fc_pad = pad(past_year_fc, past_year_fc[-1], max(fe.index))    
    fcf_growth_rate = 0.06
    growth=fcf_growth_rate * 0.75 * 100
    mg_df = past_mg_value(past_year_eps, growth=growth)
    
    #past_2year_eps = rolling_sum(ep, 8, min_periods=8)
    #past_3year_eps = rolling_sum(ep, 12, min_periods=12)
    #past_4year_eps = rolling_sum(ep, 16, min_periods=16)
    #past_5year_eps = rolling_sum(ep, 20, min_periods=20)
    
    #past_year_eps_ewma = ewma(ep, span=3, min_periods=4)
    #past_5year_eps_ewma = ewma(ep, span=19, min_periods=20)
    #ep.tshift(1, freq='D') #Need to adjust because earnings happens EOD. Actually you don't dates aren't exact
        
    df = DataFrame({'close' : fe, 'fep': fep})
    
    #df['last_qtr_eps'] = fep
    add_series(df, 'Valuemg Sell', mg_df['Valuemg'] * 1.1, max(fe.index))
    add_series(df, 'Valuemg Buy', mg_df['Valuemg'] * 0.75, max(fe.index))
    sub = df[df['Valuemg Sell'] > -1000].copy()
    sub['mavg_50day'] = rolling_mean(sub.close, 50, min_periods=1).shift(1)
    sub['mavg_200day'] = rolling_mean(sub.close, 200, min_periods=1).shift(1)
    sub.plot()
    
    #sub['ewma_s50'] = ewma(sub.close, span=50)
    #sub['ewma_s20'] = ewma(sub.close, span=20)
    plot_2015(sub, quote)
    return sub
Exemple #16
0
def ultimate(s, n1=7, n2=14, n3=28):
    cs = s.close.shift(1)
    bp = s.close - s.low.combine(cs, min)
    tr = s.high.combine(cs, max) - s.low.combine(cs, min)

    avg1 = moments.rolling_sum(bp, n1) / moments.rolling_sum(tr, n1)
    avg2 = moments.rolling_sum(bp, n2) / moments.rolling_sum(tr, n2)
    avg3 = moments.rolling_sum(bp, n3) / moments.rolling_sum(tr, n3)

    return 100*(4*avg1 + 2*avg2 + avg3) / 7
Exemple #17
0
def ultimate(s, n1=7, n2=14, n3=28):
    cs = s.close.shift(1)
    bp = s.close - s.low.combine(cs, min)
    tr = s.high.combine(cs, max) - s.low.combine(cs, min)

    avg1 = moments.rolling_sum(bp, n1) / moments.rolling_sum(tr, n1)
    avg2 = moments.rolling_sum(bp, n2) / moments.rolling_sum(tr, n2)
    avg3 = moments.rolling_sum(bp, n3) / moments.rolling_sum(tr, n3)

    return 100 * (4 * avg1 + 2 * avg2 + avg3) / 7
Exemple #18
0
    def test_rolling_functions_window_non_shrinkage(self):
        # GH 7764
        s = Series(range(4))
        s_expected = Series(np.nan, index=s.index)
        df = DataFrame([[1,5], [3, 2], [3,9], [-1,0]], columns=['A','B'])
        df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)
        df_expected_panel = Panel(items=df.index, major_axis=df.columns, minor_axis=df.columns)

        functions = [lambda x: mom.rolling_cov(x, x, pairwise=False, window=10, min_periods=5),
                     lambda x: mom.rolling_corr(x, x, pairwise=False, window=10, min_periods=5),
                     lambda x: mom.rolling_max(x, window=10, min_periods=5),
                     lambda x: mom.rolling_min(x, window=10, min_periods=5),
                     lambda x: mom.rolling_sum(x, window=10, min_periods=5),
                     lambda x: mom.rolling_mean(x, window=10, min_periods=5),
                     lambda x: mom.rolling_std(x, window=10, min_periods=5),
                     lambda x: mom.rolling_var(x, window=10, min_periods=5),
                     lambda x: mom.rolling_skew(x, window=10, min_periods=5),
                     lambda x: mom.rolling_kurt(x, window=10, min_periods=5),
                     lambda x: mom.rolling_quantile(x, quantile=0.5, window=10, min_periods=5),
                     lambda x: mom.rolling_median(x, window=10, min_periods=5),
                     lambda x: mom.rolling_apply(x, func=sum, window=10, min_periods=5),
                     lambda x: mom.rolling_window(x, win_type='boxcar', window=10, min_periods=5),
                    ]
        for f in functions:
            try:
                s_result = f(s)
                assert_series_equal(s_result, s_expected)

                df_result = f(df)
                assert_frame_equal(df_result, df_expected)
            except (ImportError):

                # scipy needed for rolling_window
                continue

        functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
                     lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
                     # rolling_corr_pairwise is depracated, so the following line should be deleted
                     # when rolling_corr_pairwise is removed.
                     lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
                    ]
        for f in functions:
            df_result_panel = f(df)
            assert_panel_equal(df_result_panel, df_expected_panel)
Exemple #19
0
    def _window_time_obs(self):
        window_obs = moments.rolling_sum(self._time_obs_count > 0,
                                         self._window, min_periods=1)

        window_obs[np.isnan(window_obs)] = 0
        return window_obs.astype(int)
Exemple #20
0
def cmf(s, n=20):
    clv = (2 * s.close - s.high - s.low) / (s.high - s.low)
    vol = s.volume

    return moments.rolling_sum(clv * vol, n) / moments.rolling_sum(vol, n)
Exemple #21
0
def calculate_fc_growth(fcf_shares):
    past_year_fc = rolling_sum(fcf_shares, 4, min_periods=4)
    fc_growth = pct_growth(past_year_fc)
    grow = sum(fc_growth.tail(21)[::4].tail(5) * Weights)
    print("Past 5 year FCF Growth Rate: %f" % (grow))
    return grow
Exemple #22
0
 def yearly_eps(self):
     return rolling_sum(self.eps, 4, min_periods=4)
Exemple #23
0
 def yearly_fcf(self):
     return rolling_sum(self.fcf_per_share(), 4, min_periods=4)
Exemple #24
0
def cmf(s, n=20):
    clv = (2*s.close - s.high - s.low) / (s.high - s.low)
    vol = s.volume

    return moments.rolling_sum(clv*vol, n) / moments.rolling_sum(vol, n)