Esempio n. 1
0
def testPolicy(symbol='JPM', sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,12,31), sv=100000):
    prices = get_data([symbol], pd.date_range(sd, ed))
    prices = prices[symbol]

    SMA = sma(prices)
    BBP = bbp(prices, SMA)
    SMA = prices / SMA

    orders = prices.copy()
    orders[:] = 0

    orders[(SMA < 0.95) & (BBP < 0)] = 1
    orders[(SMA > 1.05) & (BBP > 1)] = -1

    holdings = 0.0

    trades = []
    for date in orders.index:
        trade = 0
        if orders.loc[date] == 1:
            trade = 1000 - holdings
            trades.append((date, trade))
        elif orders.loc[date] == -1:
            trade = -1000 - holdings
            trades.append((date, trade))
        elif orders.loc[date] == 0:
            trade = 0
            trades.append((date, trade))
        holdings = holdings + trade

    df_trades = pd.DataFrame(trades, columns=["Date", symbol])
    df_trades.set_index("Date", inplace=True)

    return df_trades
Esempio n. 2
0
    def testPolicy(self, symbol = "IBM", \
        sd=dt.datetime(2009,1,1), \
        ed=dt.datetime(2010,1,1), \
        sv = 10000):

        syms = [symbol]
        dates = pd.date_range(sd, ed)
        prices = ut.get_data(syms, dates)
        prices = prices[syms]

        SMA = sma(prices)
        BBP = bbp(prices, SMA)
        SMA = prices / SMA

        Xtest = pd.concat([SMA, BBP], axis=1)
        Xtest = Xtest.values

        Y = self.learner.query(Xtest)

        trades = pd.DataFrame(0.0, index=prices.index, columns=[symbol])

        holdings = 0.0

        for i in range(0, trades.shape[0] - 1):
            if Y[0][i] >= 0.5:
                trades[symbol].iloc[i] = 1000.0 - holdings
            elif Y[0][i] <= -0.5:
                trades[symbol].iloc[i] = -1000.0 - holdings
            else:
                trades[symbol].iloc[i] = 0.0
            holdings += trades[symbol].iloc[i]

        return trades
Esempio n. 3
0
def testPolicy(symbol,
               sd=dt.datetime(2008, 1, 1),
               ed=dt.datetime(2009, 12, 31),
               sv=100000):
    # get volume and prices
    pricesDF = get_data([symbol], pd.date_range(sd, ed)).drop(['SPY'], axis=1)
    volumeDF = get_data([symbol], pd.date_range(sd, ed),
                        colname="Volume").drop(['SPY'], axis=1)
    closeDF = get_data([symbol], pd.date_range(sd, ed),
                       colname="Close").drop(['SPY'], axis=1)
    volumeDF = (volumeDF * closeDF / pricesDF)

    # calc indicators
    bbpDF = ind.bbp(pricesDF, 10)
    rsi_obvDF = ind.rsi_obv(pricesDF, volumeDF, 14)
    #trixDF = ind.trix(pricesDF, 14)

    # check conditions:
    # close when trix crosses the avarage
    # buy - when bbf < 0 or rsi of obv < 30
    # sell - when bbf > 1 or rsi of obv > 70
    '''trix_cross = pd.DataFrame(0, index=trixDF.index, columns=["JPM"])
        trix_cross[trixDF >= 1] = 1
        trix_cross[1:] = trix_cross.diff()'''
    orders = pd.DataFrame(np.nan,
                          index=pricesDF.index,
                          columns=pricesDF.columns)
    orders[(bbpDF < 0) | (rsi_obvDF < 30)] = 1000
    orders[(bbpDF > 1) | (rsi_obvDF > 70)] = -1000
    #orders[(trix_cross != 0)] = 0
    orders.ffill(inplace=True)
    orders.fillna(0, inplace=True)
    orders = orders.diff()
    orders.iloc[0] = 0
    return orders
Esempio n. 4
0
    def testPolicy(self, symbol="IBM", \
                   sd=dt.datetime(2009, 1, 1), \
                   ed=dt.datetime(2010, 1, 1), \
                   sv=10000):

        ## 14 day Lookup period
        lookback_dates = pd.date_range(sd - dt.timedelta(days=30), ed)

        ## Get prices
        prices = ut.get_data([symbol], lookback_dates)
        prices = prices.ix[:, [symbol]]
        prices = prices.fillna(method='ffill', inplace=False)
        prices = prices.fillna(method='bfill', inplace=False)

        ## Get indicators
        priceOverSMAValues = priceOverSMA(prices)
        bbpValues = bbp(prices)
        rsiValues = rsi(prices)

        ##Discretize
        self.prices = prices.ix[sd:]
        self.symbol = symbol
        priceOverSMAValues = priceOverSMAValues.ix[sd:]
        priceOverSMAValues[symbol] = pd.qcut(priceOverSMAValues[symbol].values,
                                             10).codes

        bbpValues = bbpValues.ix[sd:]
        bbpValues[symbol] = pd.qcut(bbpValues[symbol].values, 10).codes

        rsiValues = rsiValues.ix[sd:]
        rsiValues[symbol] = pd.qcut(rsiValues[symbol].values, 10).codes

        states = (priceOverSMAValues * 100) + (bbpValues * 10) + rsiValues * 1

        df_trades = priceOverSMAValues.copy()
        ##Query
        state = states.ix[0, symbol]
        action = self.learner.querysetstate(state)

        total_days = states.shape[0]
        total_reward = 0
        day = 0

        net_position = self.NONE
        df_trades.ix[day, symbol] = self.addNewAction(net_position, day,
                                                      action)

        for day in range(1, total_days):
            net_position = action
            total_reward += self.getReward(net_position, day)
            state = states.ix[day, symbol]
            action = self.learner.querysetstate(state)
            df_trades.ix[day,
                         symbol] = self.addNewAction(net_position, day, action)
        df_trades = df_trades[(df_trades.T != 0).any()]
        return df_trades
Esempio n. 5
0
    def addEvidence(self,
                    symbol="IBM",
                    sd=dt.datetime(2008, 1, 1),
                    ed=dt.datetime(2009, 1, 1),
                    sv=10000):
        syms = [symbol]
        dates = pd.date_range(sd, ed)
        prices_all = ut.get_data(syms, dates)
        pricesDF = prices_all[syms]
        prices_SPY = prices_all['SPY']
        volumeDF = ut.get_data(syms, dates, colname="Volume").drop(['SPY'],
                                                                   axis=1)
        closeDF = ut.get_data(syms, dates, colname="Close").drop(['SPY'],
                                                                 axis=1)
        volumeDF = (volumeDF * closeDF / pricesDF)
        if self.verbose:
            print pricesDF

        # calc indicators
        bbpDF = ind.bbp(pricesDF, 10)
        rsi_obvDF = ind.rsi_obv(pricesDF, volumeDF, 14)
        self.bbpBins = self.discretizing(bbpDF[symbol].values)
        self.obvBins = self.discretizing(rsi_obvDF[symbol].values)
        bbpDF = self.getDiscretValue(self.bbpBins, bbpDF[symbol].values)
        rsi_obvDF = self.getDiscretValue(self.obvBins,
                                         rsi_obvDF[symbol].values)

        prices = pricesDF[symbol].values
        orders = np.empty(len(prices))
        min_iter = 0
        while 1:
            old_orders = np.copy(orders)
            orders[0:15] = 0
            s = self.getState([bbpDF[14], rsi_obvDF[14]])
            self.learner.querysetstate(s)
            hold_value = 0
            impact_factor = 1
            for i in range(15, len(bbpDF)):
                r = ((prices[i] /
                      (prices[i - 1] * impact_factor)) - 1) * hold_value
                a = self.learner.query(s, r)
                hold_value = self.translateAction(a)
                orders[i] = hold_value
                impact_factor = self.calcImpact(hold_value)
                s = self.getState([bbpDF[i], rsi_obvDF[i]])
            if np.array_equal(
                    orders, old_orders
            ) and min_iter > self.min_iter:  #converged and min iterations
                break
            min_iter += 1
        return orders
Esempio n. 6
0
    def testPolicy(self,
                   symbol="IBM",
                   sd=dt.datetime(2009, 1, 1),
                   ed=dt.datetime(2010, 1, 1),
                   sv=10000):
        syms = [symbol]
        dates = pd.date_range(sd, ed)
        prices_all = ut.get_data(syms, dates)
        pricesDF = prices_all[syms]
        prices_SPY = prices_all['SPY']
        volumeDF = ut.get_data(syms, dates, colname="Volume").drop(['SPY'],
                                                                   axis=1)
        closeDF = ut.get_data(syms, dates, colname="Close").drop(['SPY'],
                                                                 axis=1)
        volumeDF = (volumeDF * closeDF / pricesDF)
        if self.verbose:
            print pricesDF

        # calc indicators
        bbpDF = ind.bbp(pricesDF, 10)
        rsi_obvDF = ind.rsi_obv(pricesDF, volumeDF, 14)

        bbpDF = self.getDiscretValue(self.bbpBins, bbpDF[symbol].values)
        rsi_obvDF = self.getDiscretValue(self.obvBins,
                                         rsi_obvDF[symbol].values)
        prices = pricesDF[symbol].values
        orders = np.empty(len(prices))

        s = self.getState([bbpDF[14], rsi_obvDF[14]])
        self.learner.querysetstate(s)
        orders[0:15] = 0
        for i in range(15, len(bbpDF)):
            a = self.learner.querysetstate(s)
            orders[i] = self.translateAction(a)
            s = self.getState([bbpDF[i], rsi_obvDF[i]])

        orders = pd.DataFrame(orders,
                              index=pricesDF.index,
                              columns=pricesDF.columns)
        if self.verbose:
            print type(orders)  # it better be a DataFrame!
            print orders
            print prices_all
        orders = orders.diff()
        orders.iloc[0] = 0
        return orders
Esempio n. 7
0
    def addEvidence(self, symbol = "IBM", \
        sd=dt.datetime(2008,1,1), \
        ed=dt.datetime(2009,1,1), \
        sv = 10000):

        leaf_size = 5
        bags = 50
        N = 10
        YBUY = 0.01
        YSELL = -0.01

        syms = [symbol]
        dates = pd.date_range(sd, ed)
        prices = ut.get_data(syms, dates)
        prices = prices[syms]

        SMA = sma(prices)
        BBP = bbp(prices, SMA)
        SMA = prices / SMA

        X = pd.concat([SMA, BBP], axis=1)
        X = X[:-N]
        X = X.values

        ndayreturns = (prices.shift(-N) / prices) - 1.0

        Y = ndayreturns.applymap(lambda x: 1.0 if x > (YBUY + self.impact) else
                                 (-1.0 if x < (YSELL - self.impact) else 0.0))
        Y = Y.dropna()
        Y = Y.values

        self.learner = bl.BagLearner(learner=rt.RTLearner,
                                     kwargs={'leaf_size': leaf_size},
                                     bags=bags,
                                     boost=False,
                                     verbose=False)

        self.learner.addEvidence(X, Y)
Esempio n. 8
0
    def addEvidence(self, symbol = "IBM", \
        sd=dt.datetime(2008,1,1), \
        ed=dt.datetime(2009,1,1), \
        sv = 10000):

        ## Initialize Q Learner
        self.learner = ql.QLearner(num_states=3000, \
                                   num_actions=3, \
                                   alpha=0.2, \
                                   gamma=0.9, \
                                   rar=0.98, \
                                   radr=0.999, \
                                   dyna=0, \
                                   verbose=False)

        ## 14 day lookback period
        lookback_dates = pd.date_range(sd - dt.timedelta(days=30), ed)

        ## Get Prices
        prices = ut.get_data([symbol], lookback_dates)
        prices = prices.ix[:, [symbol]]
        prices = prices.fillna(method='ffill', inplace=False)
        prices = prices.fillna(method='bfill', inplace=False)

        ## Get indicators
        priceOverSMAValues = priceOverSMA(prices)
        bbpValues = bbp(prices)
        rsiValues = rsi(prices)

        self.prices = prices.ix[sd:]
        self.symbol = symbol

        ## Discretize
        priceOverSMAValues = priceOverSMAValues.ix[sd:]
        priceOverSMAValues[symbol] = pd.qcut(priceOverSMAValues[symbol].values,
                                             10).codes
        bbpValues = bbpValues.ix[sd:]
        bbpValues[symbol] = pd.qcut(bbpValues[symbol].values, 10).codes
        rsiValues = rsiValues.ix[sd:]
        rsiValues[symbol] = pd.qcut(rsiValues[symbol].values, 10).codes

        ## Set up state
        states = (priceOverSMAValues * 100) + (bbpValues * 10) + rsiValues * 1

        ##Update Q Table until converged
        converged = False
        prev_total_reward = 0
        reward_match_count = 0
        while not converged:
            state = states.ix[0, symbol]
            action = self.learner.querysetstate(state)
            net_position = self.NONE
            total_days = states.shape[0]
            total_reward = 0
            for day in range(1, total_days):
                net_position = action
                reward = self.getReward(net_position, day)
                total_reward += reward
                state = states.ix[day, symbol]
                action = self.learner.query(state, reward)
            prev_total_reward = total_reward
            if total_reward == prev_total_reward:
                reward_match_count += 1
                if reward_match_count == 5:
                    converged = True