def gen_cloud(self):
        high_prices = self.data_frame['High']
        close_prices = self.data_frame['Close']
        low_prices = self.data_frame['Low']
        dates = self.data_frame.index
        nine_period_high = pd.rolling_max(self.data_frame['High'],
                                          window=self.period_high)
        nine_period_low = pd.rolling_min(self.data_frame['Low'],
                                         window=self.period_high)
        self.data_frame['tenkan_sen'] = (nine_period_high +
                                         nine_period_low) / 2

        # Kijun-sen (Base Line): (26-period high + 26-period low)/2))
        period26_high = pd.rolling_max(high_prices, window=self.base_line)
        period26_low = pd.rolling_min(low_prices, window=self.base_line)
        self.data_frame['kijun_sen'] = (period26_high + period26_low) / 2

        # Senkou Span A (Leading Span A): (Conversion Line + Base Line)/2))
        self.data_frame['senkou_span_a'] = (
            (data_frame['tenkan_sen'] + data_frame['kijun_sen']) / 2).shift(
                self.base_line)

        # Senkou Span B (Leading Span B): (52-period high + 52-period low)/2))
        period52_high = pd.rolling_max(high_prices, window=self.leading_line)
        period52_low = pd.rolling_min(low_prices, window=self.leading_line)
        self.data_frame['senkou_span_b'] = ((period52_high + period52_low) /
                                            2).shift(26)

        # The most current closing price plotted 22 time periods behind (optional)
        #self.data_frame['chikou_span'] = close_prices.shift(-22) # 22 according to investopedia
        print(self.data_frame.plot())
Exemple #2
0
    def ichimoku(self, l_sym, df_high, df_low):

        df_ichimoku_tenkan_u = pd.DataFrame(columns=l_sym, index=df_high.index)
        df_ichimoku_tenkan_l = pd.DataFrame(columns=l_sym, index=df_high.index)
        df_ichimoku_kijun_u = pd.DataFrame(columns=l_sym, index=df_high.index)
        df_ichimoku_kijun_l = pd.DataFrame(columns=l_sym, index=df_high.index)
        df_ichimoku_kijun = pd.DataFrame(columns=l_sym, index=df_high.index)
        df_ichimoku_tenkan = pd.DataFrame(columns=l_sym, index=df_high.index)

        for sym in l_sym:
            try:
                df_ichimoku_tenkan_u[sym] = pd.rolling_max(df_high[sym],
                                                           min_periods=1,
                                                           window=9)
                df_ichimoku_tenkan_l[sym] = pd.rolling_min(df_low[sym],
                                                           min_periods=1,
                                                           window=9)

                df_ichimoku_kijun_u[sym] = pd.rolling_max(df_high[sym],
                                                          min_periods=1,
                                                          window=26)
                df_ichimoku_kijun_l[sym] = pd.rolling_min(df_low[sym],
                                                          min_periods=1,
                                                          window=26)

                df_ichimoku_tenkan[sym] = (df_ichimoku_tenkan_u[sym] +
                                           df_ichimoku_tenkan_l[sym]) / 2
                df_ichimoku_kijun[sym] = (df_ichimoku_kijun_u[sym] +
                                          df_ichimoku_kijun_l[sym]) / 2
            except:
                pass

        return df_ichimoku_tenkan, df_ichimoku_kijun
Exemple #3
0
def prepare():
    warnings.filterwarnings("ignore")
    env = command_line_argument_parser()
    data, activities = load_dataset(env["filename"])
    data['chest_absacc'] = data.apply(chest_absacc,axis=1)
    data['hand_absacc'] = data.apply(hand_absacc,axis=1)
    data['ankle_absacc'] = data.apply(ankle_absacc,axis=1)

    ws =  env["WindowSize"]
    data['chest_absacc_max'] = pd.rolling_max(data['chest_absacc'], ws)
    data['hand_absacc_max'] =  pd.rolling_max(data['hand_absacc'], ws)
    data['ankle_absacc_max'] = pd.rolling_max(data['ankle_absacc'], ws)

    data['chest_absacc_min'] = pd.rolling_min(data['chest_absacc'], ws)
    data['hand_absacc_min'] =  pd.rolling_min(data['hand_absacc'], ws)
    data['ankle_absacc_min'] = pd.rolling_min(data['ankle_absacc'], ws)

    data['chest_acc_max_diff'] = data.chest_absacc_max - data.chest_absacc_min
    data['hand_acc_max_diff'] = data.hand_absacc_max - data.hand_absacc_min
    data['ankle_acc_max_diff'] = data.ankle_absacc_max - data.ankle_absacc_min

    activities = data.groupby('activity')
    feature_vector=['chest_acc_max_diff','hand_acc_max_diff','ankle_acc_max_diff']
    data.dropna(subset=['chest_acc_max_diff','hand_acc_max_diff','ankle_acc_max_diff'],inplace=True)

    labels = data['activity'].values

    env["data"], env["activities"],env['labels'],env['feature_vector']= data,activities,labels,feature_vector
    return env
Exemple #4
0
def main():

    for currency in currencies:
        logging.info('Currency: {0}'.format(currency))

        df = loadData(currency)
        # print df

        df = dropOutliers(df)
        # print df

        rl = pd.DataFrame(dtype=float)
        rl['high8'] = pd.rolling_max(df['high'], 8)
        rl['high13'] = pd.rolling_max(df['high'], 13)
        rl['high21'] = pd.rolling_max(df['high'], 21)
        rl['high5'] = pd.rolling_max(df['high'], 34)
        rl['low8'] = pd.rolling_min(df['low'], 8)
        rl['low13'] = pd.rolling_min(df['low'], 13)
        rl['low21'] = pd.rolling_min(df['low'], 21)
        rl['low5'] = pd.rolling_min(df['low'], 34)
        print rl.tail(20)

        rl = rl.iloc[-88:-22]
        logging.info('rl length {0}'.format(len(rl)))

        rl.plot()
        # plt.show()
        plt.savefig('resistance_lines.png')

        break
 def process_data(self, mdf):
     xdf = self.proc_func(mdf, **self.proc_args)
     if self.win == -1:
         tr= pd.concat([xdf.high - xdf.low, abs(xdf.close - xdf.close.shift(1))],
                        join='outer', axis=1).max(axis=1)
     elif self.win == 0:
         tr = pd.concat([(pd.rolling_max(xdf.high, 2) - pd.rolling_min(xdf.close, 2))*self.multiplier,
                         (pd.rolling_max(xdf.close, 2) - pd.rolling_min(xdf.low, 2))*self.multiplier,
                         xdf.high - xdf.close,
                         xdf.close - xdf.low],
                         join='outer', axis=1).max(axis=1)
     else:
         tr= pd.concat([pd.rolling_max(xdf.high, self.win) - pd.rolling_min(xdf.close, self.win),
                        pd.rolling_max(xdf.close, self.win) - pd.rolling_min(xdf.low, self.win)],
                        join='outer', axis=1).max(axis=1)
     xdf['TR'] = tr
     xdf['chanh'] = self.chan_high(xdf['high'], self.chan, **self.chan_func['high']['args'])
     xdf['chanl'] = self.chan_low(xdf['low'], self.chan, **self.chan_func['low']['args'])
     xdf['ATR'] = dh.ATR(xdf, n = self.atr_len)
     xdf['MA'] = dh.MA(xdf, n=self.atr_len, field = 'close')
     xdata = pd.concat([xdf['TR'].shift(1), xdf['MA'].shift(1), xdf['ATR'].shift(1),
                        xdf['chanh'].shift(1), xdf['chanl'].shift(1),
                        xdf['open']], axis=1, keys=['tr','ma', 'atr', 'chanh', 'chanl', 'dopen']).fillna(0)
     self.df = mdf.join(xdata, how = 'left').fillna(method='ffill')
     self.df['datetime'] = self.df.index
     self.df['cost'] = 0
     self.df['pos'] = 0
     self.df['traded_price'] = self.df['open']
Exemple #6
0
def main():

    for currency in currencies:
        logging.info('Currency: {0}'.format(currency))

        df = loadData(currency)
        # print df

        df = dropOutliers(df)
        # print df

        rl = pd.DataFrame(dtype=float)
        rl['high8'] = pd.rolling_max(df['high'], 8)
        rl['high13'] = pd.rolling_max(df['high'], 13)
        rl['high21'] = pd.rolling_max(df['high'], 21)
        rl['high5'] = pd.rolling_max(df['high'], 34)
        rl['low8'] = pd.rolling_min(df['low'], 8)
        rl['low13'] = pd.rolling_min(df['low'], 13)
        rl['low21'] = pd.rolling_min(df['low'], 21)
        rl['low5'] = pd.rolling_min(df['low'], 34)
        print rl.tail(20)

        rl = rl.iloc[-88:-22]
        logging.info('rl length {0}'.format(len(rl)))

        rl.plot()
        # plt.show()
        plt.savefig('resistance_lines.png')

        break
def exit_model(sts_trade,days,atr_multiplier,ma_days,days_two):
    macd = talib.MACD(np.array(sts_trade['Settle']),macd_slow,macd_fast,macd_ma)
    macd = pd.DataFrame({'macd_slow':pd.Series(macd[0],index=sts_trade.index),'macd_fast':pd.Series(macd[1],index=sts_trade.index),'macd_dist':pd.Series(macd[2],index=sts_trade.index).shift(5)})    
    macd_shift = talib.MACD(np.array(sts_trade.shift(1)['Settle']),macd_slow,macd_fast,macd_ma)
    macd_shift = pd.DataFrame({'macd_slow_prev':pd.Series(macd_shift[0],index=sts_trade.index),'macd_fast_prev':pd.Series(macd_shift[1],index=sts_trade.index),'macd_dist_prev':pd.Series(macd_shift[2],index=sts_trade.index)})
    macd_shift_one = talib.MACD(np.array(sts_trade.shift(2)['Settle']),macd_slow,macd_fast,macd_ma)
    macd_shift_one = pd.DataFrame({'macd_slow_prev_one':pd.Series(macd_shift_one[0],index=sts_trade.index),'macd_fast_prev_one':pd.Series(macd_shift_one[1],index=sts_trade.index),'macd_dist_prev_one':pd.Series(macd_shift_one[2],index=sts_trade.index)})
    macd_shift_two = talib.MACD(np.array(sts_trade.shift(3)['Settle']),macd_slow,macd_fast,macd_ma)
    macd_shift_two = pd.DataFrame({'macd_slow_prev_two':pd.Series(macd_shift_two[0],index=sts_trade.index),'macd_fast_prev_two':pd.Series(macd_shift_two[1],index=sts_trade.index),'macd_dist_prev_two':pd.Series(macd_shift_two[2],index=sts_trade.index)})
    macd_shift_three = talib.MACD(np.array(sts_trade.shift(4)['Settle']),macd_slow,macd_fast,macd_ma)
    macd_shift_three = pd.DataFrame({'macd_slow_prev_three':pd.Series(macd_shift_three[0],index=sts_trade.index),'macd_fast_prev_three':pd.Series(macd_shift_three[1],index=sts_trade.index),'macd_dist_prev_three':pd.Series(macd_shift_three[2],index=sts_trade.index)})
    macd_shift_four = talib.MACD(np.array(sts_trade.shift(5)['Settle']),macd_slow,macd_fast,macd_ma)
    macd_shift_four = pd.DataFrame({'macd_slow_prev_four':pd.Series(macd_shift_four[0],index=sts_trade.index),'macd_fast_prev_four':pd.Series(macd_shift_four[1],index=sts_trade.index),'macd_dist_prev_four':pd.Series(macd_shift_four[2],index=sts_trade.index)})
    macd_shift_five = talib.MACD(np.array(sts_trade.shift(6)['Settle']),macd_slow,macd_fast,macd_ma)
    macd_shift_five = pd.DataFrame({'macd_slow_prev_five':pd.Series(macd_shift_five[0],index=sts_trade.index),'macd_fast_prev_five':pd.Series(macd_shift_five[1],index=sts_trade.index),'macd_dist_prev_five':pd.Series(macd_shift_five[2],index=sts_trade.index)}) 
    adx_min =  pd.DataFrame(talib.MINUS_DI(np.array(sts_trade['High']),np.array(sts_trade['Low']),np.array(sts_trade['Settle']),timeperiod = adx_days),columns=['adx_min'],index=sts_trade.index)
    adx_plus =  pd.DataFrame(talib.PLUS_DI(np.array(sts_trade['High']),np.array(sts_trade['Low']),np.array(sts_trade['Settle']),timeperiod = adx_days),columns=['adx_plus'],index=sts_trade.index)    
    adx_min_shift =  pd.DataFrame(talib.MINUS_DI(np.array(sts_trade.shift(1)['High']),np.array(sts_trade.shift(1)['Low']),np.array(sts_trade.shift(1)['Settle']),timeperiod = adx_days),columns=['adx_minprev'],index=sts_trade.index)
    adx_plus_shift =  pd.DataFrame(talib.PLUS_DI(np.array(sts_trade.shift(1)['High']),np.array(sts_trade.shift(1)['Low']),np.array(sts_trade.shift(1)['Settle']),timeperiod = adx_days),columns=['adx_plusprev'],index=sts_trade.index)    
    adx_min_shift_two =  pd.DataFrame(talib.MINUS_DI(np.array(sts_trade.shift(2)['High']),np.array(sts_trade.shift(2)['Low']),np.array(sts_trade.shift(2)['Settle']),timeperiod = adx_days),columns=['adx_minprev_two'],index=sts_trade.index)
    adx_plus_shift_two =  pd.DataFrame(talib.PLUS_DI(np.array(sts_trade.shift(2)['High']),np.array(sts_trade.shift(2)['Low']),np.array(sts_trade.shift(2)['Settle']),timeperiod = adx_days),columns=['adx_plusprev_two'],index=sts_trade.index)    
    adx_min_shift_three =  pd.DataFrame(talib.MINUS_DI(np.array(sts_trade.shift(3)['High']),np.array(sts_trade.shift(3)['Low']),np.array(sts_trade.shift(3)['Settle']),timeperiod = adx_days),columns=['adx_minprev_three'],index=sts_trade.index)
    adx_plus_shift_three =  pd.DataFrame(talib.PLUS_DI(np.array(sts_trade.shift(3)['High']),np.array(sts_trade.shift(3)['Low']),np.array(sts_trade.shift(3)['Settle']),timeperiod = adx_days),columns=['adx_plusprev_three'],index=sts_trade.index)   
    adx_min_shift_four =  pd.DataFrame(talib.MINUS_DI(np.array(sts_trade.shift(4)['High']),np.array(sts_trade.shift(4)['Low']),np.array(sts_trade.shift(4)['Settle']),timeperiod = adx_days),columns=['adx_minprev_four'],index=sts_trade.index)
    adx_plus_shift_four =  pd.DataFrame(talib.PLUS_DI(np.array(sts_trade.shift(4)['High']),np.array(sts_trade.shift(4)['Low']),np.array(sts_trade.shift(4)['Settle']),timeperiod = adx_days),columns=['adx_plusprev_four'],index=sts_trade.index)   
    adx_min_shift_five =  pd.DataFrame(talib.MINUS_DI(np.array(sts_trade.shift(5)['High']),np.array(sts_trade.shift(5)['Low']),np.array(sts_trade.shift(5)['Settle']),timeperiod = adx_days),columns=['adx_minprev_five'],index=sts_trade.index)
    adx_plus_shift_five =  pd.DataFrame(talib.PLUS_DI(np.array(sts_trade.shift(5)['High']),np.array(sts_trade.shift(5)['Low']),np.array(sts_trade.shift(5)['Settle']),timeperiod = adx_days),columns=['adx_plusprev_five'],index=sts_trade.index)   
    adx_min_shift_six =  pd.DataFrame(talib.MINUS_DI(np.array(sts_trade.shift(6)['High']),np.array(sts_trade.shift(6)['Low']),np.array(sts_trade.shift(6)['Settle']),timeperiod = adx_days),columns=['adx_minprev_six'],index=sts_trade.index)
    adx_plus_shift_six =  pd.DataFrame(talib.PLUS_DI(np.array(sts_trade.shift(6)['High']),np.array(sts_trade.shift(6)['Low']),np.array(sts_trade.shift(6)['Settle']),timeperiod = adx_days),columns=['adx_plusprev_six'],index=sts_trade.index)    
    roll_max = pd.DataFrame(pd.rolling_max(sts_trade['High'],days))
    roll_max.columns = ['stop_short']
    roll_min = pd.DataFrame(pd.rolling_min(sts_trade['Low'],days))
    roll_min.columns = ['stop_long']
    atr = talib.ATR(np.array(sts_trade['High']),np.array(sts_trade['Low']),np.array(sts_trade['Settle']),timeperiod = 18)
    stop = sts_trade['Settle'] + atr*atr_multiplier
    stop_short_post = pd.DataFrame(stop)
    stop_short_post.columns = ['ssp']
    stop = sts_trade['Settle'] - atr*atr_multiplier
    stop_long_post = pd.DataFrame(stop)
    stop_long_post.columns = ['slp']
    sts_trade = pd.concat([sts_trade,adx_min_shift,adx_min,adx_plus,adx_plus_shift,adx_min_shift_two,adx_plus_shift_two,adx_min_shift_three,adx_plus_shift_three,adx_min_shift_four,adx_plus_shift_four,adx_min_shift_five,adx_plus_shift_five,adx_min_shift_six,adx_plus_shift_six,roll_max,roll_min,stop_long_post,stop_short_post,macd,macd_shift,macd_shift_one,macd_shift_two,macd_shift_three,macd_shift_four,macd_shift_five],axis=1)
    def short(h):
        if (h['adx_plusprev'] < h['adx_minprev'] and h['adx_plus'] > h['adx_min'] and h['adx_plusprev_two'] < h['adx_minprev_two'] and h['adx_plusprev_three'] < h['adx_minprev_three'] and h['adx_plusprev_four'] < h['adx_minprev_four'] and h['adx_plusprev_five'] < h['adx_minprev_five']):
            return h['slp']
        else:
            return h['slp']       
    def long_(h):        
        if (h['adx_plusprev'] > h['adx_minprev'] and h['adx_plus'] < h['adx_min'] and h['adx_plusprev_two'] > h['adx_minprev_two'] and h['adx_plusprev_three'] > h['adx_minprev_three'] and h['adx_plusprev_four'] > h['adx_minprev_four'] and h['adx_plusprev_five'] > h['adx_minprev_five']):
            return h['ssp']
        else:
            return h['ssp']
    stop_long_post = pd.DataFrame(pd.rolling_min(sts_trade['Low'],days_two))
    stop_long_post.columns=['stop_long_post']
    stop_short_post = pd.DataFrame(pd.rolling_max(sts_trade['High'],days_two))
    stop_short_post.columns=['stop_short_post']
    #stop_long_post = pd.DataFrame(sts_trade.apply(short,axis=1),columns=['stop_long_post'])
    #stop_short_post = pd.DataFrame(sts_trade.apply(long_,axis=1),columns=['stop_short_post'])
    sts_trade = pd.concat([sts_trade,stop_long_post,stop_short_post],axis=1)
    return sts_trade
Exemple #8
0
def willR(data, inputCols, periods):
    curname = inputCols[0]
    maxname = inputCols[1]
    minname = inputCols[2]
    values = 100 * ((pd.rolling_max(data[minname], periods) - data[curname]) /
                    (pd.rolling_max(data[maxname], periods) -
                     pd.rolling_min(data[minname], periods))).fillna(0)
    return values
Exemple #9
0
def plot_rolling_functions(series, window_size=128):
    pd.rolling_median(series,window_size).plot(label='median')
    pd.rolling_mean(series,window_size).plot(label='mean')
    pd.rolling_std(series,window_size).plot(label='std')
    pd.rolling_skew(series,window_size).plot(label='skew')
    pd.rolling_kurt(series,window_size).plot(label='kurt')
    pd.rolling_min(series,window_size).plot(label='min')
    pd.rolling_max(series,window_size).plot(label='max')
    plt.title('Various rolling window functions, window size %s' % (window_size))
    plt.legend()
    plt.show()
Exemple #10
0
    def ichimoku(self, df_close, df_high, df_low):
        '''
        df_ichimoku_tenkan_u, df_ichimoku_tenkan_l = indicators.channel(df_close,9)
        df_ichimoku_kijun_u, df_ichimoku_kijun_l = indicators.channel(df_close,26)
        df_senkou_a
        df_senkou_b_u, df_senkou_b_l = indicators.channel(df_close,52)

        sym_list = df_close.columns
        '''
        timestamps = df_close.index
        sym_list = df_close.columns

        df_ichimoku_tenkan_u = copy.deepcopy(df_close)
        df_ichimoku_tenkan_u = df_ichimoku_tenkan_u * np.NAN

        df_ichimoku_tenkan_l = copy.deepcopy(df_close)
        df_ichimoku_tenkan_l = df_ichimoku_tenkan_l * np.NAN

        df_ichimoku_kijun_u = copy.deepcopy(df_close)
        df_ichimoku_kijun_u = df_ichimoku_kijun_u * np.NAN

        df_ichimoku_kijun_l = copy.deepcopy(df_close)
        df_ichimoku_kijun_l = df_ichimoku_kijun_l * np.NAN

        df_ichimoku_kijun = copy.deepcopy(df_close)
        df_ichimoku_kijun = df_ichimoku_kijun * np.NAN

        df_ichimoku_tenkan = copy.deepcopy(df_close)
        df_ichimoku_tenkan = df_ichimoku_tenkan * np.NAN

        for sym in sym_list:
            df_ichimoku_tenkan_u[sym] = pd.rolling_max(df_high[sym],
                                                       min_periods=1,
                                                       window=9)
            df_ichimoku_tenkan_l[sym] = pd.rolling_min(df_low[sym],
                                                       min_periods=1,
                                                       window=9)

            df_ichimoku_kijun_u[sym] = pd.rolling_max(df_high[sym],
                                                      min_periods=1,
                                                      window=26)
            df_ichimoku_kijun_l[sym] = pd.rolling_min(df_low[sym],
                                                      min_periods=1,
                                                      window=26)

            for t_stamp in timestamps:
                df_ichimoku_tenkan[sym].ix[t_stamp] = (
                    df_ichimoku_tenkan_u[sym].ix[t_stamp] +
                    df_ichimoku_tenkan_l[sym].ix[t_stamp]) / 2
                df_ichimoku_kijun[sym].ix[t_stamp] = (
                    df_ichimoku_kijun_u[sym].ix[t_stamp] +
                    df_ichimoku_kijun_l[sym].ix[t_stamp]) / 2

        return df_ichimoku_tenkan, df_ichimoku_kijun
Exemple #11
0
 def new_high(self, days=5):
     '''
     '''
     df = deepcopy(self.df)
     daima = self.daima
     df['rolling_high'] = pd.rolling_max(df.high, days)
     df['new_high'] = df.rolling_high = df.high.shift(-1)
     df['pct'] = np.where(df['new_high'], (df.open.shift(days) - df.open) / df.open, _NO_PCT)
     df['low_pct'] = np.where(df['new_high'], (pd.rolling_min(df.low, days) - df.open) / df.open, _NO_PCT)
     df['high_pct'] = np.where(df['new_high'],(pd.rolling_max(df.high, days) - df.open) / df.open, _NO_PCT)
     func_name = sys._getframe().f_code.co_name
     self._to_result(df, days, func_name)
     self._to_normal_distribution(df, days, func_name)
def minMaxScalerRelative(tab,wind):
	minn=pandas.rolling_min(tab,wind)
	maxx=pandas.rolling_max(tab,wind)
	minn[0:wind-1]=minn[wind]
	maxx[0:wind-1]=maxx[wind]
	norm=maxx-minn
	return (tab-minn)/norm
Exemple #13
0
    def newhigh(self, days=5, rolling_days=5):
        df = deepcopy(self.df)
        daima = self.daima

        df['rolling_high'] = pd.rolling_max(df.high.shift(-1*rolling_days), rolling_days) #今天前rolling_days天的最高
        df['rolling_low'] = pd.rolling_min(df.low.shift(-1*rolling_days), rolling_days) #今天前rolling_days天的最低
        df['new_high'] =  df.high.shift(-1) >= df.rolling_high
        low_value = (pd.rolling_min(df.low.shift(-1*days), days) - df.open) / df.open
        high_value = (pd.rolling_max(df.high.shift(-1*days), days) - df.open) / df.open
        df['low_pct'] = np.where(df['new_high'], low_value , 9)
        df['high_pct'] = np.where(df['new_high'], high_value , 9)

        
        func_name = sys._getframe().f_code.co_name
        df.to_csv('files_tmp/looklow_%s_%s_before.csv' % (func_name, daima))  # 以函数名作为文件名保存
        self._to_result(df, days, func_name)
def MDD(timeSeries, window):
    """
    A maximum drawdown (MDD) is the maximum loss from a peak to a trough of a portfolio, 
    before a new peak is attained. 
    Maximum Drawdown (MDD) is an indicator of downside risk over a 
    specified time period. It can be used both as a stand-alone 
    measure or as an input into other metrics such as "Return 
    over Maximum Drawdown" and Calmar Ratio. Maximum Drawdown 
    is expressed in percentage terms and computed as:
    Read more: Maximum Drawdown (MDD) Definition 
    """

    ## Rolling_max calculates puts the maximum value seen at time t.
    # We

    # Calculate the max drawdown in the past window days for each day in the series.
    # Use min_periods=1 if you want to let the first 252 days data have an expanding window
    Roll_Max = pd.rolling_max(timeSeries, window, min_periods=1)

    Roll_Max = ul.fnp(Roll_Max)
    print Roll_Max.shape
    # How much we have lost compared to the maximum so dar
    Daily_Drawdown = timeSeries / Roll_Max - 1.0
    print Daily_Drawdown.shape
    # Next we calculate the minimum (negative) daily drawdown in that window.
    # Again, use min_periods=1 if you want to allow the expanding window
    Max_Daily_Drawdown = pd.rolling_min(Daily_Drawdown, window, min_periods=1)

    Max_Daily_Drawdown = ul.fnp(Max_Daily_Drawdown)
    return Daily_Drawdown, Max_Daily_Drawdown
    def BuildData(self,code):
        self.data = pd.read_csv('600218.csv',index_col='date')
        del self.data['open']
        del self.data['low'] 
        del self.data['volume']
        del self.data['amount'] 
        del self.data['high'] 

        #self.__data = pd.read_csv('000001.csv')
        d0 = pd.read_csv('000001.csv',index_col='date')

        #read sh index into data
        self.data['refc']=d0.close[self.data.index[0]:self.data.index[-1]]

        #init the margin column  
        #self.__data['margin']=np.zeros(len(self.__data))

        #In=close/ref(c,1)-refc/ref(refc,1)*100
        self.data['IN']=(self.data.close/self.data.close.shift(7)-self.data.refc/self.data.refc.shift(7))*100

        #in_apex=(IN<REF(IN,1) AND REF(IN,1)>REF(IN,2)) ? 1,0;
        self.data['IN_apex']=np.zeros(len(self.data))
        index = self.data.IN[self.data.IN.shift(1)>self.data.IN][self.data.IN.shift(2)<self.data.IN.shift(1)].index
        self.data.IN_apex[index]=1

        #IN_T1 = (MAX(IN,15)<0.10 AND MIN(IN,15)>0 AND SUM(IN_apex,15)>4)) ? 1, 0
        self.data['IN_T1']=np.zeros(len(self.data))
        index = self.data.IN[pd.rolling_max(self.data.IN,15)<10][pd.rolling_min(self.data.IN,15)>0][pd.rolling_sum(self.data.IN_apex,15)>4].index
        self.data.IN_T1[index]=1

        #IN_T = (IN<0 AND REF(IN,1)>0 AND SUM(IN_T1,3)>0) ? 1, 0
        self.data['IN_T']=np.zeros(len(self.data))
        index = self.data.IN[self.data.IN<self.data.IN.shift(1)][self.data.IN.shift(1)>0][pd.rolling_sum(self.data.IN_T1,3)>0].index
        self.data.IN_T[index]=1
Exemple #16
0
def storage(code):
	try:
		a=ts.get_hist_data(code)
	except:
		print('{} is wrong'.format(code))
	else:
		if a is not None:
			a['code']=str(code)		        	            
			a.sort_index(inplace=True)
			boll(a)
			a['rsi']=rsi(a)
			kdj(a,9,3,3)
			a['macd']=pd.ewma(a.close,12)-pd.ewma(a.close,26)
			a['ma30']=pd.rolling_mean(a.close,30)
			a['ma60']=pd.rolling_mean(a.close,60)
			a['ma90']=pd.rolling_mean(a.close,90)
			a['change30']=pd.rolling_std(np.gradient(a.ma30),30)
			for t in [5,10,20,30,60,90]:
				a['max'+str(t)]=pd.rolling_max(a.close,t)
				a['min'+str(t)]=pd.rolling_min(a.close,t)
			a['macd_a']=pd.ewma(a.close,12)
			a['macd_d']=pd.ewma(a.close,26)
			a['diff5']=100*(a.shift(-5).close-a.close)/a.close
			a['diff10']=100*(a.shift(-10).close-a.close)/a.close
			a['diff5_c']=a.diff5.apply(category)
			a['diff10_c']=a.diff10.apply(category)
			a.dropna()
			return a
Exemple #17
0
def dropOutliers(df):
    logging.info('Dropping outliers...')
    size_start = len(df)

    # get range
    rolhigh = pd.rolling_max(df['high'], 5)
    rolmin = pd.rolling_min(df['low'], 5)
    df['range'] = rolhigh - rolmin
    df.dropna(inplace=True)
    # print df

    # get stats
    mean = df.range.mean()
    std = df.range.std()

    # drop outliers
    min_cutoff = mean - std * 2
    max_cutoff = mean + std * 2
    # logging.info('Dropping outliers between below {0:4f} and above {1:4f}'.format(min_cutoff, max_cutoff))
    df = df[df['range'] > min_cutoff]
    df = df[df['range'] < max_cutoff]
    # logging.info('Dropped {0} rows'.format(500 - len(df)))

    # get stats
    # mean = df.range.mean()
    # std = df.range.std()
    # logging.info('{0} between {1} and {2} [{3}]'.format(
    #     currency,
    #     round(mean - std, 4),
    #     round(mean + std, 4),
    #     round(mean, 4),
    # ))

    logging.info('Outliers: {0} removed'.format(size_start - len(df)))
    return df
Exemple #18
0
def STOK(df, n):
    high_n = pd.rolling_max(df['High'], n)
    low_n = pd.rolling_min(df['Low'], n)
    SOk = pd.Series(100 * (df['Close'] - low_n) / (high_n - low_n),
                    name='SO%k' + str(n))
    df = df.join(SOk)
    return df
Exemple #19
0
def CHENOW_PLUNGER(df, n, atr_n=40):
    atr = ATR(df, atr_n)
    high = pd.Series((pd.rolling_max(df['high'], n) - df['close']) / atr,
                     name='CPLUNGER_H' + str(n))
    low = pd.Series((df['close'] - pd.rolling_min(df['low'], n)) / atr,
                    name='CPLUNGER_L' + str(n))
    return pd.concat([high, low], join='outer', axis=1)
def locate_jumps(data, z_window=10, f_window=7,
               maxima_window=100, significance=2):
    """Find the points when the B-field was stepped.

    Parameters
    ----------
    data : Series of angle data, indexed by frame or time
    z_window : Size of noise sample
        Default 10.
    f_window : Compare these points ahead and behind to detect a jump.
        Default 7.
    maxima_window : Minimum spacing of distinct jumps. Default 100.
    significance: Minimum significance of a jump; in units of sigma. Default 2.

    Returns
    -------
    array of positions where jumps occurred
    """
    # Each point's z-score in the context of the preceding ones.
    z = (data - pd.rolling_mean(data, z_window))/pd.rolling_std(data, z_window)
    # f = z_{before}^2 - z_{after}^2
    f = pd.rolling_sum(z.shift(-f_window)**2 - z**2, f_window)
    jumps = ((f == pd.rolling_max(f, maxima_window, center=True)) & \
             (f > 2*f_window*significance))
    print jumps.value_counts()
    return jumps[jumps].index.values
Exemple #21
0
    def get_stock_indicator(self,start,end):
        
        data =read_hdf(self.dataFile, self.symbol,where='index>=start & index <= end')
        
        data['ma5']  = pd.rolling_mean(data['Adj Close'], window=5).bfill()
        data['ma20'] = pd.rolling_mean(data['Adj Close'], window=20).bfill()
        data['ma50'] = pd.rolling_mean(data['Adj Close'], window=50).bfill()

        data['bol_upper'] = pd.rolling_mean(data['Adj Close'], window=20).bfill() + 2* pd.rolling_std(data['Adj Close'], 20, min_periods=20).bfill()
        data['bol_lower'] = pd.rolling_mean(data['Adj Close'], window=20).bfill() - 2* pd.rolling_std(data['Adj Close'], 20, min_periods=20).bfill()      
        data['bol_bw'] = ((data['bol_upper'] - data['bol_lower'])/data['ma20'])*100
        
                
        data['exma12'] = pd.ewma(data['Adj Close'], span=12).bfill()
        
        data['exma26'] = pd.ewma(data['Adj Close'], span=26).bfill()
        
        data['dif'] = data['exma12'] - data['exma26']
        
        data['dea'] = pd.ewma(data['dif'],span=9).bfill()
        
        data['macd'] = (data['dif'] - data['dea']) * 2
        
        #seems 百度百科对KDJ的算法介绍是错误的
        data['k'] = ((data['Adj Close'] - pd.rolling_min(data['Adj Close'],window=9).bfill())/
                     (pd.rolling_max(data['Adj Close'],window=9).bfill()-pd.rolling_min(data['Adj Close'],window=9).bfill()))*100
        
        data['d'] = pd.ewma(data['k'],span=3).bfill()
        
        data['j'] = 3 * data['d'] - 2 * data['k']
        

        return data        
Exemple #22
0
def MDD(timeSeries, window):
    """
    A maximum drawdown (MDD) is the maximum loss from a peak to a trough of a portfolio, 
    before a new peak is attained. 
    Maximum Drawdown (MDD) is an indicator of downside risk over a 
    specified time period. It can be used both as a stand-alone 
    measure or as an input into other metrics such as "Return 
    over Maximum Drawdown" and Calmar Ratio. Maximum Drawdown 
    is expressed in percentage terms and computed as:
    Read more: Maximum Drawdown (MDD) Definition 
    """
    
    ## Rolling_max calculates puts the maximum value seen at time t.
    # We
    
    # Calculate the max drawdown in the past window days for each day in the series.
    # Use min_periods=1 if you want to let the first 252 days data have an expanding window
    Roll_Max = pd.rolling_max(timeSeries, window, min_periods=1)
    
    Roll_Max = ul.fnp(Roll_Max)
    # print (Roll_Max.shape)
    # How much we have lost compared to the maximum so dar
    ## Daily_Drawdown = timeSeries/Roll_Max - 1.0
    print (Daily_Drawdown.shape)
    # Next we calculate the minimum (negative) daily drawdown in that window.
    # Again, use min_periods=1 if you want to allow the expanding window
    Max_Daily_Drawdown = pd.rolling_min(Daily_Drawdown, window, min_periods=1)
    
    Max_Daily_Drawdown = ul.fnp(Max_Daily_Drawdown)
    return Daily_Drawdown, Max_Daily_Drawdown
def calc_iter_probs(ii):
    df_iter = fdf.loc[ii].copy()
    df_iter['dip'] = mc_d['dip_samp'][ii]
    df_iter['Ddot'] = mc_d['Ddot_samp'][ii]

    # Generate EQ sample/sequence from F(M) dist.
    m_vec = np.linspace(5, mc_d['max_M'][ii], num=1000)
    fm_vec = eqs.F_char(m_vec, Mc=Mc, char_M=mc_d['char_M'][ii])
    M_samp = eqs.sample_from_pdf(m_vec, fm_vec, n_eq_samp)
    Mo_samp = eqs.calc_Mo_from_M(M_samp)
    
    # Make time series of earthquakes, including no eq years
    recur_int = eqs.calc_recurrence_interval(Mo=Mo_samp, 
                                             dip=mc_d['dip_samp'][ii],
                                             slip_rate=mc_d['Ddot_samp'][ii],
                                             L=params['L_km'],
                                             z=params['z_km'])

    cum_yrs = eqs.calc_cumulative_yrs(recur_int)
    eq_series = eqs.make_eq_time_series(M_samp, cum_yrs)
    
    # calculate probability of observing EQ in time_window
    for t in time_window:
        roll_max = pd.rolling_max(eq_series, t)
        df_iter[t] = (eqs.get_probability_above_value(roll_max, min_M_list)
                      * mc_d['dip_frac'])
        
    # calculate histgrams of recurrence intervals
    rec_int_counts_df = rec_int_df.loc[ii].copy()
    for mm in np.array(min_M_list):
        ints = np.diff( np.where(eq_series >= mm) )
        rec_int_counts_df.loc[mm] = np.histogram(ints, bins=rec_int_bins)[0]
        

    return df_iter, rec_int_counts_df
Exemple #24
0
    def which_ma_up_and_gtma(self, which_ma=[], ma='', days=5):
        df = deepcopy(self.df)
        daima = self.daima

        # first all true
        df['up1'] = df.high>df.low
        df['up2'] = df.high>df.low
        df['up3'] = df.high>df.low
        df['up4'] = df.high>df.low
        
        if 'ma5' in which_ma:
            df['up1'] = df['ma5'].shift(-1) > df['ma5'].shift(-2)
        if 'ma10' in which_ma:
            df['up2'] = df['ma10'].shift(-1) > df['ma10'].shift(-2)
        if 'ma20' in which_ma:
            df['up3'] = df['ma20'].shift(-1) > df['ma20'].shift(-2)
        if 'ma40' in which_ma:
            df['up4'] = df['ma40'].shift(-1) > df['ma40'].shift(-2)
        
        df['gt_ma'] = df.low.shift(-1) > df[ma].shift(-1)

        df['all'] = df.up1 & df.up2 & df.up3 & df.up4 & df.gt_ma
        low_value = (pd.rolling_min(df.low.shift(-1*days), days) - df.open) / df.open
        high_value = (pd.rolling_max(df.high.shift(-1*days), days) - df.open) / df.open

        df['low_pct'] = np.where(df['all'], low_value , 9)
        df['high_pct'] = np.where(df['all'], high_value , 9)

        
        func_name = sys._getframe().f_code.co_name
        self._to_result(df, days, func_name)
Exemple #25
0
def UI(prices, timeperiod=14):
    """
    说明:
    Ulcer Index是由Peter Martin 1987年提出的测度证券市场风险的波动率指标,
    、先求出过去n日每日收盘价相对于n日内最高价的变动率R, R平方的平均值开二次方后得到Ulcer指标。

    计算方法:
    R=(CLOSE-MAX(CLOSE,N))/MAX(CLOSE,N)*100
    UI=SQRT(SUMSQ(R,N)/N)

    :param prices:
    :param timeperiod:
    :return:
    """
    assert prices is not None
    _assert_greater_or_equal(len(prices), timeperiod)
    assert isinstance(timeperiod, int)
    # assert isinstance(timeperiod2, int)

    df_price = prices.copy()
    df_price = df_price.sort_index(ascending=True)
    max_close_n = None
    if pd.__version__ < '0.18.0':
        max_close_n = pd.rolling_max(df_price['close'], timeperiod)
    else:
        max_close_n = df_price['close'].rolling(timeperiod).max()

    r = (df_price['close'] - max_close_n) / max_close_n * 100

    ui = None
    if pd.__version__ < '0.18.0':
        ui = pd.rolling_sum(r ** 2, timeperiod) / timeperiod
    else:
        ui = (r ** 2).rolling(timeperiod).sum() / timeperiod
    return ui
Exemple #26
0
def find_extrema(x, y, window=5, span_points=25):
    #df = pd.DataFrame({'x': mpl.dates.date2num(x), 'y': y})
    df = pd.DataFrame({'x': range(len(x)), 'y': y})

    span = span_points / len(df)
    lo = stats.loess('y~x', df, span=span, na_action=stats.na_exclude)
    # we have to use predict(lo) instead of lo.rx2('fitted') here, the latter
    # doesn't not include NAs
    fitted = pd.Series(pandas2ri.ri2py(stats.predict(lo)), index=df.index)
    max_ = pd.rolling_max(fitted, window, center=True)
    min_ = pd.rolling_min(fitted, window, center=True)

    df['fitted'] = fitted
    df['max'] = max_
    df['min'] = min_

    delta = max_ - fitted
    highs = df[delta <= 0]
    delta = min_ - fitted
    lows = df[delta >= 0]

    #globals()['fe_df'] = df
    #globals()['x'] = x
    #globals()['y'] = y
    #globals()['lows'] = lows
    #globals()['highs'] = highs

    return fitted, lows, highs
Exemple #27
0
def stochastic_oscillator(df, periods_k, periods_d):
    close = df.midclose.values
    high = pd.rolling_max(df.midhigh, periods_k, how='max')
    low = pd.rolling_min(df.midlow, periods_k, how='min')
    k = ((close - low) / (high - low)) * 100
    d = pd.rolling_mean(k, periods_d)
    return d
Exemple #28
0
def get_kdj(code):
    stock_data = ts.get_k_data(code)
    # kdj
    low_list = pd.rolling_min(stock_data['low'], 9)
    low_list.fillna(value=pd.expanding_min(stock_data['low']), inplace=True)
    high_list = pd.rolling_max(stock_data['high'], 9)
    high_list.fillna(value=pd.expanding_max(stock_data['high']), inplace=True)
    rsv = (stock_data['close'] - low_list) / (high_list - low_list) * 100
    stock_data['kdj_k'] = pd.ewma(rsv, com=2)
    stock_data['kdj_d'] = pd.ewma(stock_data['kdj_k'], com=2)
    stock_data['kdj_j'] = 3 * stock_data['kdj_k'] - 2 * stock_data['kdj_d']
    # 用今天的j值和昨天比较
    kdj_j = stock_data['kdj_j']
    yesterdayJ = kdj_j[kdj_j.size - 2]
    todayJ = kdj_j[kdj_j.size - 1]
    kdj_k = stock_data['kdj_k']
    todayK = kdj_k[kdj_k.size - 1]
    # 如果今天的j值大于昨天的j值才继续后面的逻辑
    if (todayJ > yesterdayJ and todayK < float(20)):
        # 计算价格5日百分比
        stock_data = stock_data[stock_data.date > str(dc.get_the_day_before_today(1))]
        stock_data['kdj_ok'] = 1
    else:
        stock_data = stock_data[stock_data.date > str(dc.get_the_day_before_today(1))]
        stock_data['kdj_ok'] = 0
    return stock_data
Exemple #29
0
def boll(a):
	a['std20']=pd.rolling_std(a.ma20,20)
	a['boll_up']=a.close+2*a.std20
	a['boll_down']=a.close-2*a.std20
	a['boll_range']=(pd.rolling_max(a.boll_up,20)-pd.rolling_min(a.boll_down,20))/pd.rolling_mean(a.ma20,20)
	a['boll_range_p']=a.boll_range.pct_change()
	a['boll_range_p20']=a.boll_range.pct_change(periods=20)
Exemple #30
0
def stoch_rsi(df, period):
    rsi_value = rsi(df, period=period)

    stoch_rsi_value = (rsi_value - pd.rolling_min(rsi_value, period)) / \
                      (pd.rolling_max(rsi_value, period) - pd.rolling_min(rsi_value, period))

    return stoch_rsi_value
Exemple #31
0
    def KDJ(self, P1, P2):
        temp = pd.read_csv(
            r"C:\\Dell\\internship\\CICC\\Barra\\Database\\Tech\\KDJ.csv")
        temp_c = (self.data.iloc[:, :, 2]).T
        temp_h = (self.data.iloc[:, :, 4]).T
        temp_l = (self.data.iloc[:, :, 6]).T
        date = temp.index.values[-1]
        start = self.data.items.get_loc[date] + 1
        end = len(self.data)
        for i in range(start, end):
            DCloP = temp_c.iloc[i - 40:i, :]
            DLP = temp_l.iloc[i - 40:i, :]
            DHP = temp_h.iloc[i - 40:i, :]
            lv = pd.rolling_min(DLP, 5, 1)
            hv = pd.rolling_max(DHP, 5, 1)
            rsv = 100 * (DCloP - lv) / (hv - lv)
            k1 = (rsv.iloc[len(rsv) - 2, :] *
                  (P1 - 1) + rsv.iloc[len(rsv) - 1, :]) / P1
            k2 = (rsv.iloc[len(rsv) - 3, :] *
                  (P1 - 1) + rsv.iloc[len(rsv) - 2, :]) / P1
            d = k2 * (P2 - 1) + k1 / P2
            j = k1 * 3 - d * 2
            temp.loc[self.data.items.values[i], :] = j

        temp.to_csv(
            r"C:\\Dell\\internship\\CICC\\Barra\\Database\\Tech\\KDJ.csv")
def STOK(df, n):
    SOk = pd.Series((df['Adj. Close'] - pd.rolling_min(df['Adj. Low'], n)) /
                    (pd.rolling_max(df['Adj. High'], n) -
                     pd.rolling_min(df['Adj. Low'], n)),
                    name='SO%k')
    df['SOk'] = SOk
    return df
Exemple #33
0
def stoch(hlc, n_fastK=14, n_fastD=3, n_slowD=3, ma_type='sma', bounded=True, smooth=1):
    ''' Stochastic Oscillator '''
    high, low, close = utils.safe_hlc(hlc)
    
    if bounded:
        hmax = pd.rolling_max(high, n_fastK) 
        lmin = pd.rolling_min(low, n_fastK)
    else:
        raise NotImplementedError()
    
    num = close - lmin
    den = hmax - lmin
    
    mafunc = ma.get_ma(ma_type)
    num_ma = mafunc(num, smooth)
    den_ma = mafunc(den, smooth)
    
    fastK = num_ma / den_ma
    fastK[np.isnan(fastK)] = 0.5
    fastD = mafunc(fastK, n_fastD)
    slowD = mafunc(fastD, n_slowD)
    
    return pd.DataFrame(dict(fastK=fastK, 
                             fastD=fastD, 
                             slowD=slowD), 
                        index=hlc.index)
Exemple #34
0
def attr(p): 
 k = p.dropna()
 k['dir'] = k.Close-k.Open
 

 k['bindar'] =(k.Close-k.Open)/(abs(k.Close-k.Open)+0.0001)+1
 k['bindar'] =  round(k.bindar/2)*2 
 k['ret'] = ((k.Close.shift(-10)-k.Close)/k.Close)*100
 k['nret'] = norm1(k.ret)
 k['highspike'] = k.High - k.Open * k.bindar/2 + (k.bindar-2)/2 * k.Close
 
 k['lowspike'] = k.Close * k.bindar/2 - (k.bindar-2)/2 * k.Open - k.Low
 k['normdir'] = norm1(k['highspike']/(k['lowspike']+0.000001))
 k['normhs'] = norm1(k['dir']/(k['highspike']+0.000001))
 k['normls'] = norm1(k['dir']/(k['lowspike']+0.000001))


 k['avg'] = pd.rolling_mean(k.Close,window =14)
 k['avgdif'] = k.avg-k.avg.shift(1)
 k['avgdif'] = norm1(k.avgdif)
 k['mp'] = (k.High+k.Low)/2
 k['im'] = 1*(k.mp-(pd.rolling_min(k.Low,window = 20)))/((pd.rolling_max(k.High,window=20))-(pd.rolling_min(k.Low,window = 20)))
 k['ft'] = 0.5*np.log((1+k.im)/(1-k.im))
 
 k['tt'] = norm1(k['tickqty']/k['tickqty'].shift(1))
 k = k.dropna()
 k.fillna(0)
 return k
 def results(self, data_frame):
     try:
         data_frame[self.value] = pd.rolling_max(data_frame[self.data],
                                                 self.period)
     except KeyError:
         data_frame[self.value] = np.nan
     return data_frame
Exemple #36
0
    def kdj(self, start, stop):
        close = []
        high = []
        low = []
        for i in range(start, stop):
            close.append(self.stocks[i].close)
            high.append(self.stocks[i].high)
            low.append(self.stocks[i].low)

        lowest = pandas.rolling_min(pandas.Series(low), 9)
        highest = pandas.rolling_max(pandas.Series(high), 9)
        closedp = pandas.Series(close)

        rsv = ((closedp - lowest) / (highest - lowest)) * 100
        rsv[0] = 50

        k = pandas.ewma(rsv, com=2, adjust=False)
        k[0] = 50
        d = pandas.ewma(k, com=2, adjust=False)
        j = 3 * k - 2 * d

        for i in range(start, stop):
            self.stocks[i].k = k[i - start]
            self.stocks[i].d = d[i - start]
            self.stocks[i].j = j[i - start]
Exemple #37
0
def get_high_low(df, n):
    df.sort_values(by=['tradeDate'], ascending=True, inplace=True)
    df['newhigh'] = df['highestPrice'].shift(1)
    df['newlow'] = df['lowestPrice'].shift(1)
    df['nhigh'] = pd.rolling_max(df['newhigh'], window=n, min_periods=1)
    df['nlow'] = pd.rolling_min(df['newlow'], window=n, min_periods=1)
    return df
Exemple #38
0
def featHiLow(dData, lLookback=20, b_human=False ):
    '''
    @summary: 1 represents a high for the lookback -1 represents a low
    @param dData: Dictionary of data to use
    @param lLookback: Number of days to look in the past
    @param b_human: if true return dataframe to plot
    @return: DataFrame array containing values
    '''
    if b_human:
        for sym in dData['close']:
            x=1000/dData['close'][sym][0]
            dData['close'][sym]=dData['close'][sym]*x
        return dData['close']
    dfPrice = dData['close']
    
    #Find Max for each price for lookback
    maxes = pand.rolling_max(dfPrice, lLookback, 1)
    
    #Find Min
    mins = pand.rolling_min(dfPrice, lLookback, 1)
    
    #Find Range
    ranges = maxes - mins
    
    #Calculate (price - min) * 2 / range -1
    dfRet = (((dfPrice-mins)*2)/ranges)-1
    
    return dfRet
Exemple #39
0
def computeEnvelope(arrData, nWindow, nMinPeriods=None):
    """compute upper and lower envelope for given data"""
    arrUpperEnvelope = pd.rolling_max(pd.Series(arrData), window=nWindow,
                                      min_periods=nMinPeriods, center=True)
    arrLowerEnvelope = pd.rolling_min(pd.Series(arrData), window=nWindow,
                                     min_periods=nMinPeriods, center=True)
    return arrUpperEnvelope, arrLowerEnvelope
def breakout(price, lookback, smooth=None):
    """
    :param price: The price or other series to use (assumed Tx1)
    :type price: pd.DataFrame

    :param lookback: Lookback in days
    :type lookback: int

    :param lookback: Smooth to apply in days. Must be less than lookback! Defaults to smooth/4
    :type lookback: int

    :returns: pd.DataFrame -- unscaled, uncapped forecast

    With thanks to nemo4242 on elitetrader.com for vectorisation

    """
    if smooth is None:
        smooth=max(int(lookback/4.0), 1)
        
    assert smooth<lookback
        
    roll_max = pd.rolling_max(price, lookback, min_periods=int(min(len(price), np.ceil(lookback/2.0))))
    roll_min = pd.rolling_min(price, lookback, min_periods=int(min(len(price), np.ceil(lookback/2.0))))
    
    roll_mean = (roll_max+roll_min)/2.0

    ## gives a nice natural scaling
    output = 40.0*((price - roll_mean) / (roll_max - roll_min))
    smoothed_output = pd.ewma(output, span=smooth, min_periods=np.ceil(smooth/2.0))

    return smoothed_output
def find_extrema(se, window=5, span_points=25):
        #df = pd.DataFrame({'x': mpl.dates.date2num(x), 'y': y})
        x=se.index
        y=se
        df = pd.DataFrame({'x': x, 'y': y})

        span = span_points/len(df)
        lo = stats.loess('y~x', df, span=span, na_action=stats.na_exclude)
        # we have to use predict(lo) instead of lo.rx2('fitted') here, the latter 
        # doesn't not include NAs
        fitted = pd.Series(pandas2ri.ri2py(stats.predict(lo)), index=df.index)
        max_ = pd.rolling_max(fitted, window, center=True)
        min_ = pd.rolling_min(fitted, window, center=True)

        df['fitted'] = fitted
        df['max'] = max_
        df['min'] = min_

        delta = max_ - fitted
        highs = df[delta<=0]
        delta = min_ - fitted
        lows = df[delta>=0]

        #globals()['fe_df'] = df
        #globals()['x'] = x
        #globals()['y'] = y
        #globals()['lows'] = lows
        #globals()['highs'] = highs

        return fitted, lows, highs
Exemple #42
0
def sto_d(df, n):  
    high_n = pd.rolling_max(df['High'], n)
    low_n = pd.rolling_min(df['Low'], n)
    SOk = pd.Series( 100 * (df['Close'] - low_n) / (high_n - low_n), name = 'SO%k')  
    SOd = pd.Series(pd.rolling_mean(SOk, 3), name = 'SO%d' + str(n))  
    df = df.join(SOd)  
    return df
Exemple #43
0
def dropOutliers(df):
    logging.info('Dropping outliers...')
    size_start = len(df)

    # get range
    rolhigh = pd.rolling_max(df['high'], 5)
    rolmin = pd.rolling_min(df['low'], 5)
    df['range'] = rolhigh - rolmin
    df.dropna(inplace=True)
    # print df

    # get stats
    mean = df.range.mean()
    std = df.range.std()

    # drop outliers
    min_cutoff = mean - std * 2
    max_cutoff = mean + std * 2
    # logging.info('Dropping outliers between below {0:4f} and above {1:4f}'.format(min_cutoff, max_cutoff))
    df = df[df['range'] > min_cutoff]
    df = df[df['range'] < max_cutoff]
    # logging.info('Dropped {0} rows'.format(500 - len(df)))

    # get stats
    # mean = df.range.mean()
    # std = df.range.std()
    # logging.info('{0} between {1} and {2} [{3}]'.format(
    #     currency,
    #     round(mean - std, 4),
    #     round(mean + std, 4),
    #     round(mean, 4),
    # ))

    logging.info('Outliers: {0} removed'.format(size_start - len(df)))
    return df
Exemple #44
0
def min_max_mean(candles, window_length):
    closings = candles.midclose.values[1:] - candles.midclose.values[:-1]
    closings = np.insert(closings, 0, np.nan)
    mmm = pd.rolling_mean((pd.rolling_max(closings, window_length) -  \
                           pd.rolling_min(closings, window_length)),
                           window_length)
    return mmm
Exemple #45
0
def featDrawDown(dData, lLookback=30, b_human=False):
    '''
    @summary: Calculate Drawdown for the stock
    @param dData: Dictionary of data to use
    @param lLookback: Days to look back
    @return: DataFrame array containing values
    @param b_human: if true return dataframe to plot
    @warning: Drawdown and RunUp can depend heavily on sample period
    '''

    dfPrice = dData['close']

    #''' Feature DataFrame will be 1:1, we can use the price as a template '''
    dfRet = pand.DataFrame(index=dfPrice.index,
                           columns=dfPrice.columns,
                           data=np.zeros(dfPrice.shape))

    dfMax = pand.rolling_max(dfPrice, lLookback)
    return (dfMax - dfPrice) / dfMax

    if b_human:
        for sym in dData['close']:
            x = 1000 / dData['close'][sym][0]
            dData['close'][sym] = dData['close'][sym] * x
        return dData['close']
    return dfRet
Exemple #46
0
def featStochastic(dData, lLookback=14, bFast=True, lMA=3, b_human=False):
    '''
    @summary: Calculate stochastic oscillator - indicates what range of recent low-high spread we are in.
    @param dData: Dictionary of data to use
    @param bFast: If false, do slow stochastics, 3 day MA, if not use fast, no MA
    @param b_human: if true return dataframe to plot
    @return: DataFrame array containing feature values
    '''

    dfLow = dData['low']
    dfHigh = dData['high']
    dfPrice = dData['close']

    #''' Loop through stocks '''
    dfLows = pand.rolling_min(dfLow, lLookback)
    dfHighs = pand.rolling_max(dfHigh, lLookback)

    dfStoch = (dfPrice - dfLows) / (dfHighs - dfLows)

    #''' For fast we just take the stochastic value, slow we need 3 day MA '''
    if not bFast:
        dfStoch = pand.rolling_mean(dfStoch, lMA)

    if b_human:
        for sym in dData['close']:
            x = 1000 / dData['close'][sym][0]
            dData['close'][sym] = dData['close'][sym] * x
        return dData['close']

    return dfStoch
Exemple #47
0
def sen(data, periods=9):
    t = pd.rolling_max(
        data['Adjusted Close'], window=periods)
    t += pd.rolling_min(
        data['Adjusted Close'], window=periods)
    t /= 2.
    return t
Exemple #48
0
 def calculate(self):
     col = '52-Week-%s' % self.col
     if self.col == 'High':
         self.df[col] = pd.rolling_max(self.df['Adj High'], 365)
     elif self.col == 'Low':
         self.df[col] = pd.rolling_min(self.df['Adj Low'], 365)
     return self.df
Exemple #49
0
def rolling_functions_tests(p, d):
    # Old-fashioned rolling API
    assert_eq(pd.rolling_count(p, 3), dd.rolling_count(d, 3))
    assert_eq(pd.rolling_sum(p, 3), dd.rolling_sum(d, 3))
    assert_eq(pd.rolling_mean(p, 3), dd.rolling_mean(d, 3))
    assert_eq(pd.rolling_median(p, 3), dd.rolling_median(d, 3))
    assert_eq(pd.rolling_min(p, 3), dd.rolling_min(d, 3))
    assert_eq(pd.rolling_max(p, 3), dd.rolling_max(d, 3))
    assert_eq(pd.rolling_std(p, 3), dd.rolling_std(d, 3))
    assert_eq(pd.rolling_var(p, 3), dd.rolling_var(d, 3))
    # see note around test_rolling_dataframe for logic concerning precision
    assert_eq(pd.rolling_skew(p, 3),
              dd.rolling_skew(d, 3), check_less_precise=True)
    assert_eq(pd.rolling_kurt(p, 3),
              dd.rolling_kurt(d, 3), check_less_precise=True)
    assert_eq(pd.rolling_quantile(p, 3, 0.5), dd.rolling_quantile(d, 3, 0.5))
    assert_eq(pd.rolling_apply(p, 3, mad), dd.rolling_apply(d, 3, mad))
    with ignoring(ImportError):
        assert_eq(pd.rolling_window(p, 3, 'boxcar'),
                  dd.rolling_window(d, 3, 'boxcar'))
    # Test with edge-case window sizes
    assert_eq(pd.rolling_sum(p, 0), dd.rolling_sum(d, 0))
    assert_eq(pd.rolling_sum(p, 1), dd.rolling_sum(d, 1))
    # Test with kwargs
    assert_eq(pd.rolling_sum(p, 3, min_periods=3),
              dd.rolling_sum(d, 3, min_periods=3))
Exemple #50
0
def updatingMoneyWave(highp, lowp, closep, nextMWPrice = False):
	if len(closep) > 10:
#		slowk, slowd = tb.STOCH(highp, lowp, closep, fastk_period=5, slowk_period=3, slowk_matype=0, slowd_period=1, slowd_matype=0)
		lowest, highest =  pd.rolling_min(lowp, 5), pd.rolling_max(highp, 5)
		stoch = 100 * (closep - lowest) / (highest - lowest)
#		if nextMWPrice:
		MWhigh = 80
		MWlow = 20
		slowk = pd.rolling_mean(stoch, 3)[-1]

		if slowk > MWhigh:
			newPrice = ((highest[-1]-lowest[-1])*(((MWhigh*3)-stoch[-1]-stoch[-2])/100)+lowest[-1])
			print 'Buy below '
			print newPrice
			if nextMWPrice:
				return newPrice
		elif slowk < MWlow:
			newPrice = ((highest[-1]-lowest[-1])*(((MWlow*3)-stoch[-1]-stoch[-2])/100)+lowest[-1])
			print 'Buy above '
			print newPrice
			if nextMWPrice:
				return newPrice
		if nextMWPrice:
			return 0
#			preStoch = ((MW*3) - slowd[-1] - slowd[-2])/100
#			newPrice = ((max(highp[-4:]) - min(lowp[-4:]))*preStoch)+min(lowp[-4:])

		return slowk
	# Throw exception?
	return (None, None)
def amplitude_range(df):
    period_list = [5, 10, 20, 40]
    for i in period_list:
        name = 'ampr' + str(i)
        df[name] = (pd.rolling_max(df['high'], i) -
                    pd.rolling_min(df['low'], i)) / df['close'].shift(i + 1)
    return df
def calc_iter_probs(iter):
    df_iter = fdf.loc[iter].copy()
    df_iter['dip'] = mc_d['dip_samp'][iter]
    df_iter['Ddot'] = mc_d['Ddot_samp'][iter]

    # Generate EQ sample/sequence from F(M) dist.
    m_vec = np.linspace(5, mc_d['max_M'][iter], num=1000)
    fm_vec = eqs.F(m_vec, Mc=Mc)
    M_samp = eqs.sample_from_pdf(m_vec, fm_vec, n_eq_samp)
    Mo_samp = eqs.calc_Mo_from_M(M_samp)
    
    # Make time series of earthquakes, including no eq years
    recur_int = eqs.calc_recurrence_interval(Mo=Mo_samp, 
                                             dip=mc_d['dip_samp'][iter],
                                             slip_rate=mc_d['Ddot_samp'][iter],
                                             L=params['L_km'],
                                             z=params['z_km'])

    cum_yrs = eqs.calc_cumulative_yrs(recur_int)
    eq_series = eqs.make_eq_time_series(M_samp, cum_yrs)
    
    # calculate probability of observing EQ in time_window
    for t in time_window:
        roll_max = pd.rolling_max(eq_series, t)
        df_iter[t] = (eqs.get_probability_above_value(roll_max, min_M_list)
                      * mc_d['dip_frac'])
        
    # calculate histgrams of recurrence intervals
    rec_int_counts_df = rec_int_df.loc[iter].copy()
    for mm in np.array(min_M_list):
        ints = np.diff( np.where(eq_series >= mm) )
        rec_int_counts_df.loc[mm] = np.histogram(ints, bins=rec_int_bins)[0]
        

    return df_iter, rec_int_counts_df
Exemple #53
0
def featHiLow(dData, lLookback=20, b_human=False):
    '''
    @summary: 1 represents a high for the lookback -1 represents a low
    @param dData: Dictionary of data to use
    @param lLookback: Number of days to look in the past
    @param b_human: if true return dataframe to plot
    @return: DataFrame array containing values
    '''
    if b_human:
        for sym in dData['close']:
            x = 1000 / dData['close'][sym][0]
            dData['close'][sym] = dData['close'][sym] * x
        return dData['close']
    dfPrice = dData['close']

    #Find Max for each price for lookback
    maxes = pand.rolling_max(dfPrice, lLookback, 1)

    #Find Min
    mins = pand.rolling_min(dfPrice, lLookback, 1)

    #Find Range
    ranges = maxes - mins

    #Calculate (price - min) * 2 / range -1
    dfRet = (((dfPrice - mins) * 2) / ranges) - 1

    return dfRet
Exemple #54
0
def rolling_functions_tests(p, d):
    # Old-fashioned rolling API
    assert_eq(pd.rolling_count(p, 3), dd.rolling_count(d, 3))
    assert_eq(pd.rolling_sum(p, 3), dd.rolling_sum(d, 3))
    assert_eq(pd.rolling_mean(p, 3), dd.rolling_mean(d, 3))
    assert_eq(pd.rolling_median(p, 3), dd.rolling_median(d, 3))
    assert_eq(pd.rolling_min(p, 3), dd.rolling_min(d, 3))
    assert_eq(pd.rolling_max(p, 3), dd.rolling_max(d, 3))
    assert_eq(pd.rolling_std(p, 3), dd.rolling_std(d, 3))
    assert_eq(pd.rolling_var(p, 3), dd.rolling_var(d, 3))
    # see note around test_rolling_dataframe for logic concerning precision
    assert_eq(pd.rolling_skew(p, 3),
              dd.rolling_skew(d, 3),
              check_less_precise=True)
    assert_eq(pd.rolling_kurt(p, 3),
              dd.rolling_kurt(d, 3),
              check_less_precise=True)
    assert_eq(pd.rolling_quantile(p, 3, 0.5), dd.rolling_quantile(d, 3, 0.5))
    assert_eq(pd.rolling_apply(p, 3, mad), dd.rolling_apply(d, 3, mad))
    assert_eq(pd.rolling_window(p, 3, win_type='boxcar'),
              dd.rolling_window(d, 3, win_type='boxcar'))
    # Test with edge-case window sizes
    assert_eq(pd.rolling_sum(p, 0), dd.rolling_sum(d, 0))
    assert_eq(pd.rolling_sum(p, 1), dd.rolling_sum(d, 1))
    # Test with kwargs
    assert_eq(pd.rolling_sum(p, 3, min_periods=3),
              dd.rolling_sum(d, 3, min_periods=3))
def featStochastic( dData, lLookback=14, bFast=True, lMA=3, b_human=False ):
    '''
    @summary: Calculate stochastic oscillator - indicates what range of recent low-high spread we are in.
    @param dData: Dictionary of data to use
    @param bFast: If false, do slow stochastics, 3 day MA, if not use fast, no MA
    @param b_human: if true return dataframe to plot
    @return: DataFrame array containing feature values
    '''

    dfLow = dData['low']
    dfHigh = dData['high']
    dfPrice = dData['close']

    
    #''' Loop through stocks '''
    dfLows = pand.rolling_min(dfLow, lLookback)
    dfHighs = pand.rolling_max(dfHigh, lLookback)
    
    dfStoch = (dfPrice - dfLows) / (dfHighs - dfLows)
            
    #''' For fast we just take the stochastic value, slow we need 3 day MA '''
    if not bFast:
       dfStoch = pand.rolling_mean(dfStoch, lMA)
                 
    if b_human:
        for sym in dData['close']:
            x=1000/dData['close'][sym][0]
            dData['close'][sym]=dData['close'][sym]*x
        return dData['close']
    
    return dfStoch
Exemple #56
0
    def stochastic(self, df_close, df_high, df_low, n):

        sym_list = df_close.columns
        timestamps = df_close.index
        columns = ['high','low','close','hh','ll','stoch']

        df_symstoch = pd.DataFrame(index=df_close.index,columns = columns)
        df_symstoch = df_symstoch.fillna(0.00)

        df_stoch = copy.deepcopy(df_close)
        df_stoch = df_stoch.fillna(0.0)

        for sym in sym_list:
            df_symstoch['close'] = df_close[sym]
            df_symstoch['high'] = df_high[sym]
            df_symstoch['low'] = df_low[sym]
            df_symstoch['hh'] = pd.rolling_max(df_high[sym],min_periods=1,window=n)
            df_symstoch['ll'] = pd.rolling_min(df_low[sym],min_periods=1,window=n)
            for t_stamp in range(0,len(timestamps)):
                if t_stamp >= n-1:
                    df_symstoch['stoch'].ix[t_stamp] = ((df_symstoch['close'].ix[t_stamp] - df_symstoch['ll'].ix[t_stamp])/(df_symstoch['hh'].ix[t_stamp] - df_symstoch['ll'].ix[t_stamp]))*100
            df_stoch[sym] = df_symstoch['stoch']

        df_stoch.to_csv('./debug/df_stoch.csv')
        return df_stoch
Exemple #57
0
def filter_rule(stock2read, stock2save, n):
	file_location_csv = str(stock2read) + '.csv'
	df = pd.read_csv(file_location_csv, index_col = 'Dates', parse_dates = True)
	print df.head()
	df['Rolling_max'] = pd.rolling_max(df['Last_price'], int(n))
	df.to_csv(str(stock2save) + '.csv')
	df[['Open', 'Last_price', 'Rolling_max']].plot()
	plt.show()
Exemple #58
0
def FISHER(df, win, smooth_p = 0.7, smooth_i = 0.7):
    roll_high = pd.rolling_max(df.high, win)
    roll_low  = pd.rolling_min(df.low, win)
    price_loc = (df.close - roll_low)/(roll_high - roll_low) * 2.0 - 1
    sm_price = pd.Series(pd.ewma(price_loc, com = 1.0/smooth_p - 1, adjust = False), name = 'FISHER_P')
    fisher_ind = 0.5 * np.log((1 + sm_price)/(1 - sm_price))
    sm_fisher = pd.Series(pd.ewma(fisher_ind, com = 1.0/smooth_i - 1, adjust = False), name = 'FISHER_I')
    return pd.concat([sm_price, sm_fisher], join='outer', axis=1)
    def rollingStats(self, selectCol = [], splitCol=None, sepCol=None, startTime=None, endTime=None, window=60, quantile=0.1, freq='10s', min_periods=5 ):
        
        df = self.dfSetup()
        
        ## Selects a list of columns to use and splits a column into single type if it contains more than one
        # eg. if a file contains multiple sensor readings 
        if (len(selectCol) > 0):
            dfSub = df[selectCol]
            
        else:
            dfSub = df
        
        if (splitCol and sepCol):
            dfSub = dfSub[dfSub[splitCol] == sepCol]
        
        ## Converts datetime column to datatime object index, then use it to create time slices
        # Time format '2015-10-17 09:00:00' May use the dfOther to use other data frames
        if (startTime and endTime):
            dfSub = dfSub[ startTime : endTime ]
        
        else:
            dfSub = dfSub
        
        if (splitCol):
            dfSub = dfSub.drop(splitCol, axis=1) # Remove columns used to split entries
        
        
        valueName = dfSub.columns.values[0]
        outList = []
        
        counts = pd.rolling_count(dfSub,window,freq=freq).rename(columns = {valueName:'rolling_counts'})
        outList.append(counts)
        
        means = pd.rolling_mean(dfSub, window, min_periods=min_periods, freq=freq).rename(columns = {valueName:'rolling_mean'})
        outList.append(means)
        
        rms = np.sqrt(pd.rolling_mean(dfSub**2, window, min_periods=min_periods, freq=freq).rename(columns = {valueName:'rolling_rms'}) )
        outList.append(rms)
        
        medians = pd.rolling_median(dfSub, window, min_periods=min_periods, freq=freq).rename(columns = {valueName:'rolling_median'})
        outList.append(medians)
        
        stds = pd.rolling_std(dfSub, window, min_periods=min_periods, freq=freq).rename(columns = {valueName:'rolling_std'})
        outList.append(stds)
        
        mins = pd.rolling_min(dfSub, window, min_periods=min_periods, freq=freq).rename(columns = {valueName:'rolling_min'})
        outList.append(mins)
        
        maxs = pd.rolling_max(dfSub, window, min_periods=min_periods, freq=freq).rename(columns = {valueName:'rolling_max'})
        outList.append(maxs)
        
        quants = pd.rolling_quantile(dfSub, window, quantile, min_periods=min_periods, freq=freq).rename(columns = {valueName:'rolling_quantile'})
        outList.append(quants)

        
        dfOut = pd.concat(outList, axis=1)

        return dfOut