예제 #1
0
    def test_case_three(self):
        print('\nTest_Case_Three')

        test_position = Broker(5)
        midpoint = 100.

        for i in range(10):
            order_open = MarketOrder(ccy='BTC-USD',
                                     side='long',
                                     price=midpoint - i,
                                     step=i)
            test_position.add(order=order_open)

        self.assertEqual(5, test_position.long_inventory.position_count)
        self.assertEqual(0, test_position.short_inventory.position_count)
        print('Confirm we have 5 positions: %i' %
              test_position.long_inventory.position_count)

        for i in range(10):
            order_open = MarketOrder(ccy='BTC-USD',
                                     side='long',
                                     price=midpoint + i,
                                     step=i)
            test_position.remove(order=order_open)

        self.assertEqual(0, test_position.long_inventory.position_count)
        self.assertEqual(0, test_position.short_inventory.position_count)
예제 #2
0
    def test_case_one(self):
        print('\nTest_Case_One')

        test_position = Broker()
        midpoint = 100.
        fee = .003

        order_open = Order(ccy='BTC-USD', side='long', price=midpoint, step=1)
        test_position.add(order=order_open)

        self.assertEqual(1, test_position.long_inventory.position_count)
        print('LONG Unrealized_pnl: %f' %
              test_position.long_inventory.get_unrealized_pnl())

        self.assertEqual(0, test_position.short_inventory.position_count)
        self.assertEqual(0.,
                         test_position.short_inventory.get_unrealized_pnl())

        order_close = Order(ccy='BTC-USD',
                            side='long',
                            price=midpoint + (midpoint * fee * 5),
                            step=100)

        test_position.remove(order=order_close)
        self.assertEqual(0, test_position.long_inventory.position_count)
        print('LONG Unrealized_pnl: %f' %
              test_position.long_inventory.get_unrealized_pnl())

        self.assertEqual(test_position.short_inventory.position_count, 0)
        self.assertEqual(test_position.short_inventory.get_unrealized_pnl(),
                         0.)
        print('LONG Realized_pnl: %f' % test_position.get_realized_pnl())
예제 #3
0
    def test_case_two(self):
        print('\nTest_Case_Two')

        test_position = Broker()
        midpoint = 100.
        fee = .003

        order_open = MarketOrder(ccy='BTC-USD',
                                 side='short',
                                 price=midpoint,
                                 step=1)
        test_position.add(order=order_open)
        self.assertEqual(1, test_position.short_inventory.position_count)
        self.assertEqual(0, test_position.long_inventory.position_count)
        self.assertEqual(
            0.,
            test_position.long_inventory.get_unrealized_pnl(price=midpoint))
        print('SHORT Unrealized_pnl: %f' %
              test_position.short_inventory.get_unrealized_pnl(price=midpoint))

        order_close = MarketOrder(ccy='BTC-USD',
                                  side='short',
                                  price=midpoint - (midpoint * fee * 15),
                                  step=100)
        test_position.remove(order=order_close)
        self.assertEqual(0, test_position.short_inventory.position_count)
        self.assertEqual(0, test_position.long_inventory.position_count)
        self.assertEqual(
            0.,
            test_position.long_inventory.get_unrealized_pnl(price=midpoint))
        print('SHORT Unrealized_pnl: %f' %
              test_position.short_inventory.get_unrealized_pnl(price=midpoint))
        print('SHORT Realized_pnl: %f' % test_position.realized_pnl)
예제 #4
0
    def test_long_pnl(self):
        test_position = Broker()
        step = 0
        bid_price = 101.
        ask_price = 102.
        buy_volume = 100
        sell_volume = 100
        pnl = 0.

        def walk_forward(pnl, step, bid_price, ask_price, buy_volume, sell_volume,
                         down=True):
            for i in range(50):
                step += 1
                if down:
                    bid_price *= 0.99
                    ask_price *= 0.99
                else:
                    bid_price *= 1.01
                    ask_price *= 1.01

                pnl, is_long_order_filled, is_short_order_filled = \
                    test_position.step_limit_order_pnl(
                    bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                    sell_volume=sell_volume, step=step)
                pnl += pnl
                if i % 10 == 0:
                    print('bid_price={:.2f} | ask_price={:.2f}'.format(bid_price,
                                                                       ask_price))
            return step, bid_price, ask_price, buy_volume, sell_volume, pnl

        test_position.add(
            order=LimitOrder(ccy='BTC-USD', side='long', price=100., step=step,
                             queue_ahead=1000))

        step, _, _, buy_volume, sell_volume, pnl = walk_forward(pnl, step, bid_price,
                                                                ask_price, buy_volume,
                                                                sell_volume, down=True)
        self.assertEqual(1, test_position.long_inventory_count)

        test_position.add(
            order=LimitOrder(ccy='BTC-USD', side='short', price=105., step=step,
                             queue_ahead=0))
        _, _, _, _, _, pnl = walk_forward(pnl, step, bid_price, ask_price, buy_volume,
                                          sell_volume, down=False)
        realized_pnl = test_position.realized_pnl

        self.assertEqual(0.05, realized_pnl,
                         "Expected Realized PnL of 0.5 and got {}".format(realized_pnl))
        self.assertEqual(0,
                         test_position.short_inventory_count +
                         test_position.long_inventory_count)
        print("PnL: {}".format(pnl))
예제 #5
0
    def test_avg_exe(self):
        test_position = Broker()

        # perform a partial fill on the first order
        step = 0
        bid_price = 101.
        ask_price = 102.
        buy_volume = 500
        sell_volume = 500
        pnl = 0.

        test_position.add(order=LimitOrder(ccy='BTC-USD',
                                           side='long',
                                           price=bid_price,
                                           step=step,
                                           queue_ahead=0))

        print("taking first step...")
        step += 1
        pnl += test_position.step_limit_order_pnl(bid_price=bid_price,
                                                  ask_price=ask_price,
                                                  buy_volume=buy_volume,
                                                  sell_volume=sell_volume,
                                                  step=step)
        self.assertEqual(500, test_position.long_inventory.order.executed)
        self.assertEqual(0, test_position.long_inventory_count)

        # if order gets filled with a bid below the order's price, the order should NOT
        # receive any price improvement during the execution.
        bid_price = 99.
        ask_price = 100.
        test_position.add(order=LimitOrder(ccy='BTC-USD',
                                           side='long',
                                           price=bid_price,
                                           step=step,
                                           queue_ahead=0))

        print("taking second step...")
        step += 1
        pnl += test_position.step_limit_order_pnl(bid_price=bid_price,
                                                  ask_price=ask_price,
                                                  buy_volume=buy_volume,
                                                  sell_volume=sell_volume,
                                                  step=step)
        self.assertEqual(1, test_position.long_inventory_count)
        self.assertEqual(100., test_position.long_inventory.average_price)
        print("PnL: {}".format(pnl))
예제 #6
0
class PriceJump(BaseEnvironment):
    id = 'long-short-v0'

    def __init__(self,
                 *,
                 fitting_file='LTC-USD_2019-04-07.csv.xz',
                 testing_file='LTC-USD_2019-04-08.csv.xz',
                 step_size=1,
                 max_position=5,
                 window_size=10,
                 seed=1,
                 action_repeats=10,
                 training=True,
                 format_3d=False,
                 z_score=True,
                 reward_type='trade_completion',
                 scale_rewards=True):
        super(PriceJump, self).__init__(fitting_file=fitting_file,
                                        testing_file=testing_file,
                                        step_size=step_size,
                                        max_position=max_position,
                                        window_size=window_size,
                                        seed=seed,
                                        action_repeats=action_repeats,
                                        training=training,
                                        format_3d=format_3d,
                                        z_score=z_score,
                                        reward_type=reward_type,
                                        scale_rewards=scale_rewards)

        self.actions = np.eye(3, dtype=np.float32)

        # get Broker class to keep track of PnL and orders
        self.broker = Broker(max_position=max_position,
                             transaction_fee=MARKET_ORDER_FEE)

        self.action_space = spaces.Discrete(len(self.actions))
        self.reset()  # reset to load observation.shape
        self.observation_space = spaces.Box(low=-10,
                                            high=10,
                                            shape=self.observation.shape,
                                            dtype=np.float32)

        print(
            '{} PriceJump #{} instantiated.\nself.observation_space.shape : {}'
            .format(self.sym, self._seed, self.observation_space.shape))

    def __str__(self):
        return '{} | {}-{}'.format(PriceJump.id, self.sym, self._seed)

    def map_action_to_broker(self, action: int):
        """
        Create or adjust orders per a specified action and adjust for penalties.
        :param action: (int) current step's action
        :return: (float) reward
        """
        reward = pnl = 0.0
        discouragement = 0.000000000001

        if action == 0:  # do nothing
            reward += discouragement

        elif action == 1:  # buy
            if self.broker.short_inventory_count > 0:
                order = MarketOrder(ccy=self.sym,
                                    side='short',
                                    price=self.midpoint,
                                    step=self.local_step_number)
                pnl += self.broker.remove(order=order)

            elif self.broker.long_inventory_count >= 0:
                order = MarketOrder(
                    ccy=self.sym,
                    side='long',
                    price=self.midpoint,
                    # price_fee_adjusted,
                    step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                print(
                    ('gym_trading.get_reward() ' + 'Error for action #{} - ' +
                     'unable to place an order with broker').format(action))

        elif action == 2:  # sell
            if self.broker.long_inventory_count > 0:
                order = MarketOrder(ccy=self.sym,
                                    side='long',
                                    price=self.midpoint,
                                    step=self.local_step_number)
                pnl += self.broker.remove(order=order)

            elif self.broker.short_inventory_count >= 0:
                order = MarketOrder(ccy=self.sym,
                                    side='short',
                                    price=self.midpoint,
                                    step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                print(
                    ('gym_trading.get_reward() ' + 'Error for action #{} - ' +
                     'unable to place an order with broker').format(action))

        else:
            print(('Unknown action to take in get_reward(): ' +
                   'action={} | midpoint={}').format(action, self.midpoint))

        return reward, pnl

    def _create_position_features(self):
        """
        Create an array with features related to the agent's inventory
        :return: (np.array) normalized position features
        """
        return np.array(
            (self.broker.long_inventory.position_count / self.max_position,
             self.broker.short_inventory.position_count / self.max_position,
             self.broker.realized_pnl / self.broker.reward_scale,
             self.broker.get_unrealized_pnl(self.best_bid, self.best_ask) /
             self.broker.reward_scale),
            dtype=np.float32)
예제 #7
0
class MarketMaker(BaseEnvironment):
    id = 'market-maker-v0'

    def __init__(self, transaction_fee=LIMIT_ORDER_FEE, **kwargs):
        """
        Environment designed for automated market making.
        :param kwargs: refer to BaseEnvironment.py
        """
        super(MarketMaker, self).__init__(**kwargs)

        # environment attributes to override in sub-class
        self.actions = np.eye(17, dtype=np.float32)

        # get Broker class to keep track of PnL and orders
        self.broker = Broker(max_position=self.max_position,
                             transaction_fee=transaction_fee)

        self.action_space = spaces.Discrete(len(self.actions))
        self.reset()  # reset to load observation.shape
        self.observation_space = spaces.Box(low=-10,
                                            high=10,
                                            shape=self.observation.shape,
                                            dtype=np.float32)

        print(
            '{} MarketMaker #{} instantiated\nobservation_space: {}'.format(
                self.sym, self._seed, self.observation_space.shape),
            'reward_type = {}'.format(self.reward_type))

    def __str__(self):
        return '{} | {}-{}'.format(MarketMaker.id, self.sym, self._seed)

    def map_action_to_broker(self, action: int):
        """
        Create or adjust orders per a specified action and adjust for penalties.
        :param action: (int) current step's action
        :return: (float) reward
        """
        reward = pnl = 0.0
        discouragement = 0.000000000001

        if action == 0:  # do nothing
            reward += discouragement

        elif action == 1:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=0,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='short')

        elif action == 2:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=0,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='short')
        elif action == 3:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=0,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='short')

        elif action == 4:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=0,
                                                  side='short')

        elif action == 5:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='short')

        elif action == 6:

            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='short')
        elif action == 7:

            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='short')

        elif action == 8:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=0,
                                                  side='short')

        elif action == 9:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='short')

        elif action == 10:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='short')

        elif action == 11:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='short')

        elif action == 12:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=0,
                                                  side='short')

        elif action == 13:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=4,
                                                  side='short')

        elif action == 14:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=9,
                                                  side='short')

        elif action == 15:
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='long')
            reward += self._create_order_at_level(reward,
                                                  discouragement,
                                                  level=14,
                                                  side='short')
        elif action == 16:
            reward += self.broker.flatten_inventory(self.best_bid,
                                                    self.best_ask)
        else:
            print("L'action n'exist pas ! Il faut faire attention !!!")

        return reward, pnl

    def _create_position_features(self):
        """
        Create an array with features related to the agent's inventory
        :return: (np.array) normalized position features
        """
        return np.array(
            (self.broker.long_inventory.position_count / self.max_position,
             self.broker.short_inventory.position_count / self.max_position,
             self.broker.get_total_pnl(self.best_bid, self.best_ask) *
             self.broker.reward_scale,
             self.broker.long_inventory.get_unrealized_pnl(self.best_bid) *
             self.broker.reward_scale,
             self.broker.short_inventory.get_unrealized_pnl(self.best_ask) *
             self.broker.reward_scale,
             self.broker.get_long_order_distance_to_midpoint(
                 midpoint=self.midpoint) * self.broker.reward_scale,
             self.broker.get_short_order_distance_to_midpoint(
                 midpoint=self.midpoint) * self.broker.reward_scale,
             *self.broker.get_queues_ahead_features()),
            dtype=np.float32)

    def _create_order_at_level(self,
                               reward: float,
                               discouragement: float,
                               level=0,
                               side='long'):
        """
        Create a new order at a specified LOB level
        :param reward: (float) current step reward
        :param discouragement: (float) penalty deducted from reward for erroneous actions
        :param level: (int) level in the limit order book
        :param side: (str) direction of trade e.g., 'long' or 'short'
        :return: (float) reward with penalties added
        """
        adjustment = 1 if level > 0 else 0

        if side == 'long':
            best = self._get_book_data(MarketMaker.best_bid_index - level)
            denormalized_best = round(self.midpoint * (best + 1), 2)
            inside_best = self._get_book_data(MarketMaker.best_bid_index -
                                              level + adjustment)
            denormalized_inside_best = round(self.midpoint * (inside_best + 1),
                                             2)
            plus_one = denormalized_best + 0.01

            if denormalized_inside_best == plus_one:
                # stick to best bid
                bid_price = denormalized_best
                # since LOB is rendered as cumulative notional, deduct the prior price
                # level to derive the notional value of orders ahead in the queue
                bid_queue_ahead = self._get_book_data(
                    MarketMaker.notional_bid_index -
                    level) - self._get_book_data(
                        MarketMaker.notional_bid_index - level + adjustment)
            else:
                # insert a cent ahead to jump a queue
                bid_price = plus_one
                bid_queue_ahead = 0.

            bid_order = LimitOrder(ccy=self.sym,
                                   side='long',
                                   price=bid_price,
                                   step=self.local_step_number,
                                   queue_ahead=bid_queue_ahead)

            if self.broker.add(order=bid_order) is False:
                reward -= discouragement
            else:
                reward += discouragement

        if side == 'short':
            best = self._get_book_data(MarketMaker.best_ask_index + level)
            denormalized_best = round(self.midpoint * (best + 1), 2)
            inside_best = self._get_book_data(MarketMaker.best_ask_index +
                                              level - adjustment)
            denormalized_inside_best = round(self.midpoint * (inside_best + 1),
                                             2)
            plus_one = denormalized_best - 0.01

            if denormalized_inside_best == plus_one:
                ask_price = denormalized_best
                # since LOB is rendered as cumulative notional, deduct the prior price
                # level to derive the notional value of orders ahead in the queue
                ask_queue_ahead = self._get_book_data(
                    MarketMaker.notional_ask_index +
                    level) - self._get_book_data(
                        MarketMaker.notional_ask_index + level - adjustment)
            else:
                ask_price = plus_one
                ask_queue_ahead = 0.

            ask_order = LimitOrder(ccy=self.sym,
                                   side='short',
                                   price=ask_price,
                                   step=self.local_step_number,
                                   queue_ahead=ask_queue_ahead)

            if self.broker.add(order=ask_order) is False:
                reward -= discouragement
            else:
                reward += discouragement

        return reward
예제 #8
0
    def test_queues_ahead_features(self):
        test_position = Broker()

        # perform a partial fill on the first order
        step = 0
        bid_price = 100.
        ask_price = 200.
        buy_volume = 0
        sell_volume = 0

        order_open_long = LimitOrder(ccy='BTC-USD',
                                     side='long',
                                     price=bid_price,
                                     step=step,
                                     queue_ahead=0)
        order_open_short = LimitOrder(ccy='BTC-USD',
                                      side='short',
                                      price=ask_price,
                                      step=step,
                                      queue_ahead=2000)
        print('opening long position = {}'.format(order_open_long))
        test_position.add(order=order_open_long)
        print('opening short position = {}'.format(order_open_short))
        test_position.add(order=order_open_short)

        print('\ntaking first step...')
        step += 1
        pnl, is_long_order_filled, is_short_order_filled = \
            test_position.step_limit_order_pnl(
                bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                sell_volume=sell_volume, step=step)
        pnl += pnl

        print("#1 long_inventory.order = \n{}".format(
            test_position.long_inventory.order))
        print("#1 short_inventory.order = \n{}".format(
            test_position.short_inventory.order))
        bid_queue, ask_queue = test_position.get_queues_ahead_features()
        print("#1 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".
              format(bid_queue, ask_queue))
        self.assertEqual(0., bid_queue)
        self.assertEqual(-0.67, round(ask_queue, 2))

        print('\ntaking second step...')
        buy_volume = 500
        sell_volume = 500
        step += 1
        pnl, is_long_order_filled, is_short_order_filled = \
            test_position.step_limit_order_pnl(
                bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                sell_volume=sell_volume, step=step)
        pnl += pnl

        print("#2 long_inventory.order = \n{}".format(
            test_position.long_inventory.order))
        print("#2 short_inventory.order = \n{}".format(
            test_position.short_inventory.order))
        bid_queue, ask_queue = test_position.get_queues_ahead_features()
        print("#2 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".
              format(bid_queue, ask_queue))
        self.assertEqual(0.5, bid_queue)
        self.assertEqual(-0.6, round(ask_queue, 2))

        print('\ntaking third step...')
        buy_volume = 500
        sell_volume = 499
        step += 1
        pnl, is_long_order_filled, is_short_order_filled = \
            test_position.step_limit_order_pnl(
                bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                sell_volume=sell_volume, step=step)
        pnl += pnl

        print("#3 long_inventory.order = \n{}".format(
            test_position.long_inventory.order))
        print("#3 short_inventory.order = \n{}".format(
            test_position.short_inventory.order))
        bid_queue, ask_queue = test_position.get_queues_ahead_features()
        print("#3 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".
              format(bid_queue, ask_queue))
        self.assertEqual(0.999, bid_queue)
        self.assertEqual(-0.5, round(ask_queue, 2))

        print('\ntaking fourth step...')
        buy_volume = 500
        sell_volume = 500
        step += 1
        pnl, is_long_order_filled, is_short_order_filled = \
            test_position.step_limit_order_pnl(
                bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                sell_volume=sell_volume, step=step)
        pnl += pnl

        print("#4 long_inventory.order = \n{}".format(
            test_position.long_inventory.order))
        print("#4 short_inventory.order = \n{}".format(
            test_position.short_inventory.order))
        bid_queue, ask_queue = test_position.get_queues_ahead_features()
        print("#4 get_queues_ahead_features:\nbid_queue={} || ask_queue={}".
              format(bid_queue, ask_queue))
        self.assertEqual(0.0, bid_queue)
        self.assertEqual(-0.33, round(ask_queue, 2))
        print("PnL: {}".format(pnl))
예제 #9
0
    def test_lob_queuing(self):
        test_position = Broker()

        # perform a partial fill on the first order
        step = 0
        bid_price = 102.
        ask_price = 103.
        buy_volume = 500
        sell_volume = 500
        queue_ahead = 800

        order_open = LimitOrder(ccy='BTC-USD',
                                side='long',
                                price=bid_price,
                                step=step,
                                queue_ahead=queue_ahead)
        test_position.add(order=order_open)

        step += 1
        pnl, is_long_order_filled, is_short_order_filled = \
            test_position.step_limit_order_pnl(
                bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                sell_volume=sell_volume, step=step)
        pnl += pnl

        print("#1 long_inventory.order = \n{}".format(
            test_position.long_inventory.order))
        self.assertEqual(300, test_position.long_inventory.order.queue_ahead)
        self.assertEqual(0, test_position.long_inventory.order.executed)
        self.assertEqual(0, test_position.long_inventory_count)

        step += 1
        pnl, is_long_order_filled, is_short_order_filled = \
            test_position.step_limit_order_pnl(
                bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                sell_volume=sell_volume, step=step)
        pnl += pnl

        print("#2 long_inventory.order = \n{}".format(
            test_position.long_inventory.order))
        self.assertEqual(200, test_position.long_inventory.order.executed)
        self.assertEqual(0, test_position.long_inventory_count)

        # if order gets filled with a bid below the order's price, the order should NOT
        # receive any price improvement during the execution.
        bid_price = 100.
        ask_price = 102.
        order_open = LimitOrder(ccy='BTC-USD',
                                side='long',
                                price=bid_price,
                                step=step,
                                queue_ahead=queue_ahead)
        test_position.add(order=order_open)
        print("#3 long_inventory.order = \n{}".format(
            test_position.long_inventory.order))
        self.assertEqual(0, test_position.long_inventory_count)

        bid_price = 100.
        for i in range(5):
            step += 1
            pnl, is_long_order_filled, is_short_order_filled = \
                test_position.step_limit_order_pnl(
                    bid_price=bid_price, ask_price=ask_price, buy_volume=buy_volume,
                    sell_volume=sell_volume, step=step)
            pnl += pnl

        self.assertEqual(1, test_position.long_inventory_count)
        self.assertEqual(100.40,
                         round(test_position.long_inventory.average_price, 2))
        print("PnL: {}".format(pnl))
예제 #10
0
class PriceJump(Env):

    metadata = {'render.modes': ['human']}
    id = 'long-short-v0'
    # Turn to true if Bitifinex is in the dataset (e.g., include_bitfinex=True)
    features = Sim.get_feature_labels(include_system_time=False,
                                      include_bitfinex=False)
    best_bid_index = features.index('coinbase-bid-distance-0')
    best_ask_index = features.index('coinbase-ask-distance-0')
    notional_bid_index = features.index('coinbase-bid-notional-0')
    notional_ask_index = features.index('coinbase-ask-notional-0')

    buy_trade_index = features.index('coinbase-buys')
    sell_trade_index = features.index('coinbase-sells')

    target_pnl = BROKER_FEE * 10 * 5  # e.g., 5 for max_positions
    fee = BROKER_FEE

    def __init__(self,
                 *,
                 fitting_file='ETH-USD_2018-12-31.xz',
                 testing_file='ETH-USD_2019-01-01.xz',
                 step_size=1,
                 max_position=5,
                 window_size=10,
                 seed=1,
                 action_repeats=10,
                 training=True,
                 format_3d=False,
                 z_score=True):

        # properties required for instantiation
        self.action_repeats = action_repeats
        self._seed = seed
        self._random_state = np.random.RandomState(seed=self._seed)
        self.training = training
        self.step_size = step_size
        self.max_position = max_position
        self.window_size = window_size
        self.format_3d = format_3d  # e.g., [window, features, *NEW_AXIS*]

        self.action = 0
        # derive gym.env properties
        self.actions = np.eye(3)

        self.sym = testing_file[:7]  # slice the CCY from the filename

        # properties that get reset()
        self.reward = 0.0
        self.done = False
        self.local_step_number = 0
        self.midpoint = 0.0
        self.observation = None

        # get Broker class to keep track of PnL and orders
        self.broker = Broker(max_position=max_position)
        # get historical data for simulations
        self.sim = Sim(use_arctic=False)

        self.data = self._load_environment_data(fitting_file, testing_file)
        self.prices_ = self.data[
            'coinbase_midpoint'].values  # used to calculate PnL

        self.normalized_data = self.data.copy()
        self.data = self.data.values

        self.max_steps = self.data.shape[0] - self.step_size * \
                         self.action_repeats - 1

        # normalize midpoint data
        self.normalized_data['coinbase_midpoint'] = \
            np.log(self.normalized_data['coinbase_midpoint'].values)
        self.normalized_data['coinbase_midpoint'] = (
            self.normalized_data['coinbase_midpoint'] -
            self.normalized_data['coinbase_midpoint'].shift(1)).fillna(0.)

        # load indicators into the indicator manager
        self.tns = IndicatorManager()
        self.rsi = IndicatorManager()
        for window in INDICATOR_WINDOW:
            self.tns.add(('tns_{}'.format(window), TnS(window=window)))
            self.rsi.add(('rsi_{}'.format(window), RSI(window=window)))

        if z_score:
            logger.info("Pre-scaling {}-{} data...".format(
                self.sym, self._seed))
            self.normalized_data = self.normalized_data.apply(self.sim.z_score,
                                                              axis=1).values
            logger.info("...{}-{} pre-scaling complete.".format(
                self.sym, self._seed))
        else:
            self.normalized_data = self.normalized_data.values

        # rendering class
        self._render = TradingGraph(sym=self.sym)
        # graph midpoint prices
        self._render.reset_render_data(
            y_vec=self.prices_[:np.shape(self._render.x_vec)[0]])
        # buffer for appending lags
        self.data_buffer = list()

        self.action_space = spaces.Discrete(len(self.actions))
        self.reset()  # reset to load observation.shape
        self.observation_space = spaces.Box(low=-10,
                                            high=10,
                                            shape=self.observation.shape,
                                            dtype=np.float32)

        print(
            '{} PriceJump #{} instantiated.\nself.observation_space.shape : {}'
            .format(self.sym, self._seed, self.observation_space.shape))

    def __str__(self):
        return '{} | {}-{}'.format(PriceJump.id, self.sym, self._seed)

    def step(self, action: int):
        for current_step in range(self.action_repeats):

            if self.done:
                self.reset()
                return self.observation, self.reward, self.done

            # reset the reward if there ARE action repeats
            if current_step == 0:
                self.reward = 0.
                step_action = action
            else:
                step_action = 0

            # Get current step's midpoint
            self.midpoint = self.prices_[self.local_step_number]
            # Pass current time step midpoint to broker to calculate PnL,
            # or if any open orders are to be filled
            buy_volume = self._get_book_data(PriceJump.buy_trade_index)
            sell_volume = self._get_book_data(PriceJump.sell_trade_index)

            self.tns.step(buys=buy_volume, sells=sell_volume)
            self.rsi.step(price=self.midpoint)

            self.broker.step(midpoint=self.midpoint)

            self.reward += self._send_to_broker_and_get_reward(
                action=step_action)

            step_observation = self._get_step_observation(action=action)
            self.data_buffer.append(step_observation)

            if len(self.data_buffer) > self.window_size:
                del self.data_buffer[0]

            self.local_step_number += self.step_size

        self.observation = self._get_observation()

        if self.local_step_number > self.max_steps:
            self.done = True
            order = Order(ccy=self.sym,
                          side=None,
                          price=self.midpoint,
                          step=self.local_step_number)
            self.reward = self.broker.flatten_inventory(order=order)

        return self.observation, self.reward, self.done, {}

    def reset(self):
        if self.training:
            self.local_step_number = self._random_state.randint(
                low=1, high=self.data.shape[0] // 4)
        else:
            self.local_step_number = 0

        msg = ' {}-{} reset. Episode pnl: {:.4f} with {} trades | First step: {}'.format(
            self.sym, self._seed,
            self.broker.get_total_pnl(midpoint=self.midpoint),
            self.broker.get_total_trade_count(), self.local_step_number)
        logger.info(msg)

        self.reward = 0.0
        self.done = False
        self.broker.reset()
        self.data_buffer.clear()
        self.rsi.reset()
        self.tns.reset()

        for step in range(self.window_size + INDICATOR_WINDOW_MAX):
            self.midpoint = self.prices_[self.local_step_number]

            step_buy_volume = self._get_book_data(PriceJump.buy_trade_index)
            step_sell_volume = self._get_book_data(PriceJump.sell_trade_index)
            self.tns.step(buys=step_buy_volume, sells=step_sell_volume)
            self.rsi.step(price=self.midpoint)

            step_observation = self._get_step_observation(action=0)
            self.data_buffer.append(step_observation)

            self.local_step_number += self.step_size
            if len(self.data_buffer) > self.window_size:
                del self.data_buffer[0]

        self.observation = self._get_observation()

        return self.observation

    def render(self, mode='human'):
        self._render.render(midpoint=self.midpoint, mode=mode)

    def close(self):
        logger.info('{}-{} is being closed.'.format(self.id, self.sym))
        self.data = None
        self.normalized_data = None
        self.prices_ = None
        self.broker = None
        self.sim = None
        self.data_buffer = None
        self.tns = None
        self.rsi = None
        return

    def seed(self, seed=1):
        self._random_state = np.random.RandomState(seed=seed)
        self._seed = seed
        logger.info('Setting seed in PriceJump.seed({})'.format(seed))
        return [seed]

    @staticmethod
    def _process_data(_next_state):
        return np.clip(_next_state.reshape((1, -1)), -10., 10.)

    # def _process_data(self, _next_state):
    #     # return self.sim.scale_state(_next_state).values.reshape((1, -1))
    #     return np.reshape(_next_state, (1, -1))

    def _send_to_broker_and_get_reward(self, action):
        reward = 0.0
        discouragement = 0.000000000001

        if action == 0:  # do nothing
            reward += discouragement

        elif action == 1:  # buy
            price_fee_adjusted = self.midpoint + (PriceJump.fee *
                                                  self.midpoint)
            if self.broker.short_inventory_count > 0:
                order = Order(ccy=self.sym,
                              side='short',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                self.broker.remove(order=order)
                reward += self.broker.get_reward(side=order.side)

            elif self.broker.long_inventory_count >= 0:
                order = Order(ccy=self.sym,
                              side='long',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                logger.info(
                    ('gym_trading.get_reward() ' + 'Error for action #{} - ' +
                     'unable to place an order with broker').format(action))

        elif action == 2:  # sell
            price_fee_adjusted = self.midpoint - (PriceJump.fee *
                                                  self.midpoint)
            if self.broker.long_inventory_count > 0:
                order = Order(ccy=self.sym,
                              side='long',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                self.broker.remove(order=order)
                reward += self.broker.get_reward(side=order.side)
            elif self.broker.short_inventory_count >= 0:
                order = Order(ccy=self.sym,
                              side='short',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                logger.info(
                    'gym_trading.get_reward() ' + 'Error for action #{} - ' +
                    'unable to place an order with broker'.format(action))

        else:
            logger.info(
                ('Unknown action to take in get_reward(): ' +
                 'action={} | midpoint={}').format(action, self.midpoint))

        return reward

    def _create_position_features(self):
        return np.array(
            (self.broker.long_inventory.position_count / self.max_position,
             self.broker.short_inventory.position_count / self.max_position,
             self.broker.get_total_pnl(midpoint=self.midpoint) /
             PriceJump.target_pnl,
             self.broker.long_inventory.get_unrealized_pnl(self.midpoint) /
             self.broker.reward_scale,
             self.broker.short_inventory.get_unrealized_pnl(self.midpoint) /
             self.broker.reward_scale),
            dtype=np.float32)

    def _create_action_features(self, action):
        return self.actions[action]

    def _create_indicator_features(self):
        return np.array((*self.tns.get_value(), *self.rsi.get_value()),
                        dtype=np.float32)

    def _get_nbbo(self):
        best_bid = round(
            self.midpoint - self._get_book_data(PriceJump.best_bid_index), 2)
        best_ask = round(
            self.midpoint + self._get_book_data(PriceJump.best_ask_index), 2)
        return best_bid, best_ask

    def _get_book_data(self, index=0):
        return self.data[self.local_step_number][index]

    def _get_step_observation(self, action=0):
        step_position_features = self._create_position_features()
        step_action_features = self._create_action_features(action=action)
        step_indicator_features = self._create_indicator_features()
        return np.concatenate(
            (self._process_data(self.normalized_data[self.local_step_number]),
             step_indicator_features, step_position_features,
             step_action_features, np.array([self.reward])),
            axis=None)

    def _get_observation(self):
        observation = np.array(self.data_buffer, dtype=np.float32)
        # Expand the observation space from 2 to 3 dimensions.
        # This is necessary for conv nets in Baselines.
        if self.format_3d:
            observation = np.expand_dims(observation, axis=-1)
        return observation

    def _load_environment_data(self, fitting_file, testing_file):
        fitting_data_filepath = '{}/data_exports/{}'.format(
            self.sim.cwd, fitting_file)
        data_used_in_environment = '{}/data_exports/{}'.format(
            self.sim.cwd, testing_file)
        fitting_data = self.sim.import_csv(filename=fitting_data_filepath)
        fitting_data['coinbase_midpoint'] = np.log(
            fitting_data['coinbase_midpoint'].values)
        fitting_data['coinbase_midpoint'] = (
            fitting_data['coinbase_midpoint'] -
            fitting_data['coinbase_midpoint'].shift(1)).fillna(method='bfill')
        self.sim.fit_scaler(fitting_data)
        del fitting_data
        return self.sim.import_csv(filename=data_used_in_environment)
예제 #11
0
class PriceJump(BaseEnvironment):
    id = 'long-short-v0'

    def __init__(self, transaction_fee=MARKET_ORDER_FEE, **kwargs):
        """
        Environment designed to trade price jumps using market orders
        :param kwargs: refer to BaseEnvironment.py
        """
        super(PriceJump, self).__init__(**kwargs)

        # environment attributes to override in sub-class
        self.actions = np.eye(3, dtype=np.float32)

        # get Broker class to keep track of PnL and orders
        self.broker = Broker(max_position=self.max_position,
                             transaction_fee=transaction_fee)

        self.action_space = spaces.Discrete(len(self.actions))
        self.reset()  # reset to load observation.shape
        self.observation_space = spaces.Box(low=-10,
                                            high=10,
                                            shape=self.observation.shape,
                                            dtype=np.float32)

        print(
            '{} PriceJump #{} instantiated.\nobservation_space : {}'.format(
                self.sym, self._seed, self.observation_space.shape),
            'reward_type = {}'.format(self.reward_type))

    def __str__(self):
        return '{} | {}-{}'.format(PriceJump.id, self.sym, self._seed)

    def map_action_to_broker(self, action: int):
        """
        Create or adjust orders per a specified action and adjust for penalties.
        :param action: (int) current step's action
        :return: (float) reward
        """
        reward = pnl = 0.0
        discouragement = 0.000000000001

        if action == 0:  # do nothing
            reward += discouragement

        elif action == 1:  # buy
            if self.broker.short_inventory_count > 0:
                order = MarketOrder(ccy=self.sym,
                                    side='short',
                                    price=self.midpoint,
                                    step=self.local_step_number)
                pnl += self.broker.remove(order=order)

            elif self.broker.long_inventory_count >= 0:
                order = MarketOrder(
                    ccy=self.sym,
                    side='long',
                    price=self.midpoint,
                    # price_fee_adjusted,
                    step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                print((
                    'gym_trading.get_reward() Error for action #{} - unable to place '
                    'an order with broker').format(action))

        elif action == 2:  # sell
            if self.broker.long_inventory_count > 0:
                order = MarketOrder(ccy=self.sym,
                                    side='long',
                                    price=self.midpoint,
                                    step=self.local_step_number)
                pnl += self.broker.remove(order=order)

            elif self.broker.short_inventory_count >= 0:
                order = MarketOrder(ccy=self.sym,
                                    side='short',
                                    price=self.midpoint,
                                    step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                print((
                    'gym_trading.get_reward() Error for action #{} - unable to place '
                    'an order with broker').format(action))

        else:
            print((
                'Unknown action to take in get_reward(): action={} | midpoint={}'
            ).format(action, self.midpoint))

        return reward, pnl

    def _create_position_features(self):
        """
        Create an array with features related to the agent's inventory
        :return: (np.array) normalized position features
        """
        return np.array(
            (self.broker.long_inventory.position_count / self.max_position,
             self.broker.short_inventory.position_count / self.max_position,
             self.broker.realized_pnl * self.broker.reward_scale,
             self.broker.get_unrealized_pnl(self.best_bid, self.best_ask) *
             self.broker.reward_scale),
            dtype=np.float32)
예제 #12
0
class PriceJump(Env):

    metadata = {'render.modes': ['human']}
    id = 'long-short-v0'
    # Turn to true if Bitifinex is in the dataset (e.g., include_bitfinex=True)
    features = Sim.get_feature_labels(include_system_time=False,
                                      include_bitfinex=False)
    best_bid_index = features.index('coinbase_bid_distance_0')
    best_ask_index = features.index('coinbase_ask_distance_0')
    notional_bid_index = features.index('coinbase_bid_notional_0')
    notional_ask_index = features.index('coinbase_ask_notional_0')

    buy_trade_index = features.index('coinbase_buys')
    sell_trade_index = features.index('coinbase_sells')

    target_pnl = 0.03  # 3.0% gain per episode (i.e., day)
    fee = MARKET_ORDER_FEE

    def __init__(self,
                 *,
                 fitting_file='LTC-USD_2019-04-07.csv.xz',
                 testing_file='LTC-USD_2019-04-08.csv.xz',
                 step_size=1,
                 max_position=5,
                 window_size=10,
                 seed=1,
                 action_repeats=10,
                 training=True,
                 format_3d=False,
                 z_score=True):

        # properties required for instantiation
        self.action_repeats = action_repeats
        self._seed = seed
        self._random_state = np.random.RandomState(seed=self._seed)
        self.training = training
        self.step_size = step_size
        self.max_position = max_position
        self.window_size = window_size
        self.format_3d = format_3d  # e.g., [window, features, *NEW_AXIS*]

        self.action = 0
        # derive gym.env properties
        self.actions = np.eye(3, dtype=np.float32)

        self.sym = testing_file[:7]  # slice the CCY from the filename

        # properties that get reset()
        self.reward = 0.0
        self.done = False
        self.local_step_number = 0
        self.midpoint = 0.0
        self.observation = None

        # get Broker class to keep track of PnL and orders
        self.broker = Broker(max_position=max_position)
        # get historical data for simulations
        self.sim = Sim(use_arctic=False, z_score=z_score)

        self.prices_, self.data, self.normalized_data = self.sim.load_environment_data(
            fitting_file, testing_file)

        self.max_steps = self.data.shape[0] - self.step_size * \
            self.action_repeats - 1

        # load indicators into the indicator manager
        self.tns = IndicatorManager()
        self.rsi = IndicatorManager()
        for window in INDICATOR_WINDOW:
            self.tns.add(('tns_{}'.format(window), TnS(window=window)))
            self.rsi.add(('rsi_{}'.format(window), RSI(window=window)))

        # rendering class
        self._render = TradingGraph(sym=self.sym)

        # graph midpoint prices
        self._render.reset_render_data(
            y_vec=self.prices_[:np.shape(self._render.x_vec)[0]])

        # buffer for appending lags
        self.data_buffer = list()

        self.action_space = spaces.Discrete(len(self.actions))
        self.reset()  # reset to load observation.shape
        self.observation_space = spaces.Box(low=-10,
                                            high=10,
                                            shape=self.observation.shape,
                                            dtype=np.float32)

        print(
            '{} PriceJump #{} instantiated.\nself.observation_space.shape : {}'
            .format(self.sym, self._seed, self.observation_space.shape))

    def __str__(self):
        return '{} | {}-{}'.format(PriceJump.id, self.sym, self._seed)

    def step(self, action: int):
        for current_step in range(self.action_repeats):

            if self.done:
                self.reset()
                return self.observation, self.reward, self.done

            # reset the reward if there ARE action repeats
            if current_step == 0:
                self.reward = 0.
                step_action = action
            else:
                step_action = 0

            # Get current step's midpoint
            self.midpoint = self.prices_[self.local_step_number]
            # Pass current time step midpoint to broker to calculate PnL,
            # or if any open orders are to be filled
            buy_volume = self._get_book_data(PriceJump.buy_trade_index)
            sell_volume = self._get_book_data(PriceJump.sell_trade_index)

            self.tns.step(buys=buy_volume, sells=sell_volume)
            self.rsi.step(price=self.midpoint)

            self.broker.step(midpoint=self.midpoint)

            self.reward += self._send_to_broker_and_get_reward(
                action=step_action)

            step_observation = self._get_step_observation(action=action)
            self.data_buffer.append(step_observation)

            if len(self.data_buffer) > self.window_size:
                del self.data_buffer[0]

            self.local_step_number += self.step_size

        self.observation = self._get_observation()

        if self.local_step_number > self.max_steps:
            self.done = True
            order = Order(ccy=self.sym,
                          side=None,
                          price=self.midpoint,
                          step=self.local_step_number)
            self.reward = self.broker.flatten_inventory(order=order)

        return self.observation, self.reward, self.done, {}

    def reset(self):
        if self.training:
            self.local_step_number = self._random_state.randint(
                low=1, high=self.data.shape[0] // 4)
        else:
            self.local_step_number = 0

        msg = ' {}-{} reset. Episode pnl: {:.4f} with {} trades. First step: {}'.format(
            self.sym, self._seed,
            self.broker.get_total_pnl(midpoint=self.midpoint),
            self.broker.get_total_trade_count(), self.local_step_number)
        logger.info(msg)

        self.reward = 0.0
        self.done = False
        self.broker.reset()
        self.data_buffer.clear()
        self.rsi.reset()
        self.tns.reset()

        for step in range(self.window_size + INDICATOR_WINDOW_MAX):
            self.midpoint = self.prices_[self.local_step_number]

            step_buy_volume = self._get_book_data(PriceJump.buy_trade_index)
            step_sell_volume = self._get_book_data(PriceJump.sell_trade_index)
            self.tns.step(buys=step_buy_volume, sells=step_sell_volume)
            self.rsi.step(price=self.midpoint)

            step_observation = self._get_step_observation(action=0)
            self.data_buffer.append(step_observation)

            self.local_step_number += self.step_size
            if len(self.data_buffer) > self.window_size:
                del self.data_buffer[0]

        self.observation = self._get_observation()

        return self.observation

    def render(self, mode='human'):
        self._render.render(midpoint=self.midpoint, mode=mode)

    def close(self):
        logger.info('{}-{} is being closed.'.format(self.id, self.sym))
        self.data = None
        self.normalized_data = None
        self.prices_ = None
        self.broker = None
        self.sim = None
        self.data_buffer = None
        self.tns = None
        self.rsi = None
        return

    def seed(self, seed=1):
        self._random_state = np.random.RandomState(seed=seed)
        self._seed = seed
        logger.info('Setting seed in PriceJump.seed({})'.format(seed))
        return [seed]

    @staticmethod
    def _process_data(_next_state):
        """
        Reshape observation and clip outliers (values +/- 10)
        :param _next_state: observation space
        :return: (np.array) clipped observation space
        """
        return np.clip(_next_state.reshape((1, -1)), -10., 10.)

    def _send_to_broker_and_get_reward(self, action: int):
        """
        Create or adjust orders per a specified action and adjust for penalties.
        :param action: (int) current step's action
        :return: (float) reward
        """
        reward = 0.0
        discouragement = 0.000000000001

        if action == 0:  # do nothing
            reward += discouragement

        elif action == 1:  # buy
            price_fee_adjusted = self.midpoint + (PriceJump.fee *
                                                  self.midpoint)
            if self.broker.short_inventory_count > 0:
                order = Order(ccy=self.sym,
                              side='short',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                self.broker.remove(order=order)
                reward += self.broker.get_reward(side=order.side) / \
                    self.broker.reward_scale  # scale realized PnL

            elif self.broker.long_inventory_count >= 0:
                order = Order(ccy=self.sym,
                              side='long',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                logger.info(
                    ('gym_trading.get_reward() ' + 'Error for action #{} - ' +
                     'unable to place an order with broker').format(action))

        elif action == 2:  # sell
            price_fee_adjusted = self.midpoint - (PriceJump.fee *
                                                  self.midpoint)
            if self.broker.long_inventory_count > 0:
                order = Order(ccy=self.sym,
                              side='long',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                self.broker.remove(order=order)
                reward += self.broker.get_reward(side=order.side) / \
                    self.broker.reward_scale  # scale realized PnL
            elif self.broker.short_inventory_count >= 0:
                order = Order(ccy=self.sym,
                              side='short',
                              price=price_fee_adjusted,
                              step=self.local_step_number)
                if self.broker.add(order=order) is False:
                    reward -= discouragement

            else:
                logger.info(
                    ('gym_trading.get_reward() ' + 'Error for action #{} - ' +
                     'unable to place an order with broker').format(action))

        else:
            logger.info(
                ('Unknown action to take in get_reward(): ' +
                 'action={} | midpoint={}').format(action, self.midpoint))

        return reward

    def _create_position_features(self):
        """
        Create an array with features related to the agent's inventory
        :return: (np.array) normalized position features
        """
        return np.array(
            (self.broker.long_inventory.position_count / self.max_position,
             self.broker.short_inventory.position_count / self.max_position,
             self.broker.get_total_pnl(midpoint=self.midpoint) /
             PriceJump.target_pnl,
             self.broker.long_inventory.get_unrealized_pnl(self.midpoint) /
             self.broker.reward_scale,
             self.broker.short_inventory.get_unrealized_pnl(self.midpoint) /
             self.broker.reward_scale),
            dtype=np.float32)

    def _create_action_features(self, action):
        """
        Create a features array for the current time step's action.
        :param action: (int) action number
        :return: (np.array) One-hot of current action
        """
        return self.actions[action]

    def _create_indicator_features(self):
        """
        Create features vector with environment indicators.
        :return: (np.array) Indicator values for current time step
        """
        return np.array((*self.tns.get_value(), *self.rsi.get_value()),
                        dtype=np.float32)

    def _get_nbbo(self):
        """
        Get best bid and offer
        :return: (tuple) best bid and offer
        """
        best_bid = round(
            self.midpoint - self._get_book_data(PriceJump.best_bid_index), 2)
        best_ask = round(
            self.midpoint + self._get_book_data(PriceJump.best_ask_index), 2)
        return best_bid, best_ask

    def _get_book_data(self, index=0):
        """
        Return step 'n' of order book snapshot data
        :param index: (int) step 'n' to look up in order book snapshot history
        :return: (np.array) order book snapshot vector
        """
        return self.data[self.local_step_number][index]

    def _get_step_observation(self, action=0):
        """
        Current step observation, NOT including historical data.
        :param action: (int) current step action
        :return: (np.array) Current step observation
        """
        step_position_features = self._create_position_features()
        step_action_features = self._create_action_features(action=action)
        step_indicator_features = self._create_indicator_features()
        return np.concatenate(
            (self._process_data(self.normalized_data[self.local_step_number]),
             step_indicator_features, step_position_features,
             step_action_features, np.array([self.reward])),
            axis=None)

    def _get_observation(self):
        """
        Current step observation, including historical data.

        If format_3d is TRUE: Expand the observation space from 2 to 3 dimensions.
        (note: This is necessary for conv nets in Baselines.)
        :return: (np.array) Observation state for current time step
        """
        observation = np.array(self.data_buffer, dtype=np.float32)
        if self.format_3d:
            observation = np.expand_dims(observation, axis=-1)
        return observation