コード例 #1
0
    def receiveMessage(self, currentTime, msg):
        super().receiveMessage(currentTime, msg)
        if msg.body['msg'] == 'ORDER_EXECUTED':
            self.handleOrderExecution(currentTime, msg)
        elif msg.body['msg'] == 'ORDER_ACCEPTED':
            self.handleOrderAcceptance(currentTime, msg)

        if currentTime > self.end_time:
            log_print(
                f'[---- {self.name} - {currentTime} ----]: current time {currentTime} is after specified end time of POV order '
                f'{self.end_time}. TRADING CONCLUDED. ')
            return

        if self.rem_quantity > 0 and \
                self.state == 'AWAITING_TRANSACTED_VOLUME' \
                and msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME' \
                and self.transacted_volume[self.symbol] is not None \
                and currentTime > self.start_time:
            qty = round(self.pov * self.transacted_volume[self.symbol])
            self.cancelOrders()
            self.placeMarketOrder(self.symbol, qty, self.direction == 'BUY')
            log_print(
                f'[---- {self.name} - {currentTime} ----]: TOTAL TRANSACTED VOLUME IN THE LAST {self.look_back_period} = {self.transacted_volume[self.symbol]}'
            )
            log_print(
                f'[---- {self.name} - {currentTime} ----]: MARKET ORDER PLACED - {qty}'
            )
コード例 #2
0
    def wakeup(self, currentTime):
        # Parent class handles discovery of exchange times and market_open wakeup call.
        super().wakeup(currentTime)

        self.state = 'INACTIVE'

        if not self.mkt_open or not self.mkt_close:
            # TradingAgent handles discovery of exchange times.
            return
        else:
            if not self.trading:
                self.trading = True

                # Time to start trading!
                log_print("{} is ready to start trading now.", self.name)

        # Steady state wakeup behavior starts here.

        # If we've been told the market has closed for the day, we will only request
        # final price information, then stop.
        if self.mkt_closed and (self.symbol in self.daily_close_price):
            # Market is closed and we already got the daily close price.
            return

        delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)
        self.setWakeup(currentTime + pd.Timedelta('{}ns'.format(int(round(delta_time)))))

        if self.mkt_closed and (not self.symbol in self.daily_close_price):
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
            return

        self.cancelOrders()

        if type(self) == ValueAgent:
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
        else:
            self.state = 'ACTIVE'
コード例 #3
0
    def getBookLiquidity(book: Sequence[Tuple[int, int]],
                         within: Union[int, float]) -> int:
        """
        Helper function for the above. Checks one side of the known order book.

        Args:
            book:    Order Book history
            within:

        Returns:
            Order Book liquidity
        """
        liq = 0
        best = book[0][0]
        threshold = round(best * within)
        for price, shares in book:
            # Is this price within "within" proportion of the best price?
            if abs(best - price) <= threshold:
                log_print(
                    f"Within {within} of {best}: {price} with {shares} shares")
                liq += shares
        return liq
コード例 #4
0
    def processQuerySpreadReply(self, symbol: str, price: int,
                                bids: List[Tuple[int, int]],
                                asks: List[Tuple[int, int]]) -> None:
        """
        Handle QueryLastSpreadReply messages from the ExchangeAgent.

        Args:
            symbol:  trading symbol
            price:   price of the
            bids:    list of bids
            asks:    list of asks

        Returns:
            None
        """
        # The spread message now also includes last price for free.
        self.processQueryLastTrade(symbol, price)

        self.known_bids[symbol] = bids
        if bids:
            best_bid, best_bid_qty = bids[0]
        else:
            best_bid = best_bid_qty = 0

        self.known_asks[symbol] = asks
        if asks:
            best_ask, best_ask_qty = asks[0]
        else:
            best_ask = best_ask_qty = 0

        log_print(
            f"Received spread of {best_bid_qty} @ {best_bid} / {best_ask_qty} @ {best_ask} for {symbol}"
        )

        self.logEvent("BID_DEPTH", bids)
        self.logEvent("ASK_DEPTH", asks)
        self.logEvent("IMBALANCE",
                      (sum(x[1] for x in bids), sum(x[1] for x in asks)))
コード例 #5
0
 def handleOrderExecution(self, currentTime, msg):
     executed_order = msg.body['order']
     self.executed_orders.append(executed_order)
     executed_qty = sum(executed_order.quantity
                        for executed_order in self.executed_orders)
     self.rem_quantity = self.quantity - executed_qty
     log_print('[---- {} - {} ----]: LIMIT ORDER EXECUTED - {} @ {}'.format(
         self.name, currentTime, executed_order.quantity,
         executed_order.fill_price))
     log_print('[---- {} - {} ----]: EXECUTED QUANTITY: {}'.format(
         self.name, currentTime, executed_qty))
     log_print('[---- {} - {} ----]: REMAINING QUANTITY: {}'.format(
         self.name, currentTime, self.rem_quantity))
     log_print('[---- {} - {} ----]: % EXECUTED: {} \n'.format(
         self.name, currentTime,
         round((1 - self.rem_quantity / self.quantity) * 100, 2)))
コード例 #6
0
ファイル: core.py プロジェクト: andrewsonin/abides
    def setWakeup(self,
                  sender_id: int,
                  requested_time: Optional[pd.Timestamp] = None) -> None:
        # Called by an agent to receive a "wakeup call" from the kernel
        # at some requested future time. Defaults to the next possible
        # timestamp. Wakeup time cannot be the current time or a past time.
        # Sender is required and should be the ID of the agent making the call.
        # The agent is responsible for maintaining any required state; the
        # kernel will not supply any parameters to the wakeup() call.

        if requested_time is None:
            requested_time = self.current_time + one_ns_timedelta
        elif requested_time < self.current_time:
            raise ValueError(
                "setWakeup() called with requested time not in future",
                "currentTime:", self.current_time, "requestedTime:",
                requested_time)

        log_print(
            f"Kernel adding wakeup for agent {sender_id} at time {self.fmtTime(requested_time)}"
        )

        self.message_queue.put((requested_time, (sender_id, WakeUp())))
コード例 #7
0
    def wakeup(self, currentTime):
        # Parent class handles discovery of exchange times and market_open wakeup call.
        super().wakeup(currentTime)

        self.state = 'INACTIVE'

        if not self.mkt_open or not self.mkt_close:
            # TradingAgent handles discovery of exchange times.
            return
        else:
            if not self.trading:
                self.trading = True

                # Time to start trading!
                log_print("{} is ready to start trading now.", self.name)

        # Steady state wakeup behavior starts here.

        # If we've been told the market has closed for the day, we will only request
        # final price information, then stop.
        if self.mkt_closed and (self.symbol in self.daily_close_price):
            # Market is closed and we already got the daily close price.
            return

        if self.wakeup_time[0] > currentTime:
            self.setWakeup(self.wakeup_time[0])

        if self.mkt_closed and (not self.symbol in self.daily_close_price):
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
            return

        if type(self) == NoiseAgent:
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
        else:
            self.state = 'ACTIVE'
コード例 #8
0
    def placeOrders(self, currentTime):
        if currentTime.floor('1s') == self.execution_time_horizon[-2]:
            self.placeMarketOrder(symbol=self.symbol,
                                  quantity=self.rem_quantity,
                                  is_buy_order=self.direction == 'BUY')
        elif currentTime.floor('1s') in self.execution_time_horizon[:-2]:
            bid, _, ask, _ = self.getKnownBidAsk(self.symbol)

            if currentTime.floor('1s') == self.start_time:
                self.arrival_price = (bid + ask) / 2
                log_print("[---- {}  - {} ----]: Arrival Mid Price {}".format(
                    self.name, currentTime, self.arrival_price))

            qty = self.schedule[pd.Interval(
                currentTime.floor('1s'),
                currentTime.floor('1s') + datetime.timedelta(minutes=1))]
            price = ask if self.direction == 'BUY' else bid
            self.placeLimitOrder(symbol=self.symbol,
                                 quantity=qty,
                                 is_buy_order=self.direction == 'BUY',
                                 limit_price=price)
            log_print(
                '[---- {} - {} ----]: LIMIT ORDER PLACED - {} @ {}'.format(
                    self.name, currentTime, qty, price))
コード例 #9
0
    def processOrderCancelled(self, order: Order) -> None:
        """
        Handle OrderCancelled messages from the ExchangeAgent.

        Args:
            order:  cancelled order

        Returns:
            None
        """
        log_print(f"Received notification of cancellation for: {order}")
        if self.log_orders:
            self.logEvent('ORDER_CANCELLED', order.to_dict())

        # Remove the cancelled order from the open orders list.  We may of course wish to have
        # additional logic here later, so agents can easily "look for" cancelled orders.  Of
        # course they can just override this method.
        order_id = order.order_id
        orders = self.orders
        if order_id in orders:
            del orders[order_id]
        else:
            log_print(
                f"Cancellation received for order not in orders list: {order}")
コード例 #10
0
    def receiveMessage(self, currentTime, msg):
        # Allow the base Agent to do whatever it needs to.
        super().receiveMessage(currentTime, msg)

        if msg.body['msg'] == "PEER_EXCHANGE":

            # Ensure we don't somehow record the same peer twice.
            if msg.body['sender'] not in self.peers_received:
                self.peers_received[msg.body['sender']] = True
                self.peer_sum += msg.body['n']

                if len(self.peers_received) == len(self.peer_list):
                    # We just heard from the final peer.  Initiate our first sum request.
                    log_print("agent {} heard from final peer.  peers_received = {}, peer_sum = {}",
                              self.id, self.peers_received, self.peer_sum)

                    self.peer_exchange_complete = True
                    self.setWakeup(currentTime + pd.Timedelta('1ns'))

        elif msg.body['msg'] == "SUM_QUERY_RESPONSE":
            log_print("Agent {} received sum query response: {}", self.id, msg)

            # Now schedule a new query.
            self.setWakeup(currentTime + pd.Timedelta('1m'))
コード例 #11
0
 def handleOrderExecution(self, currentTime, msg):
     executed_order = msg.body['order']
     self.executed_orders.append(executed_order)
     executed_qty = sum(executed_order.quantity
                        for executed_order in self.executed_orders)
     self.rem_quantity = self.quantity - executed_qty
     log_print(
         f'[---- {self.name} - {currentTime} ----]: LIMIT ORDER EXECUTED - {executed_order.quantity} @ {executed_order.fill_price}'
     )
     log_print(
         f'[---- {self.name} - {currentTime} ----]: EXECUTED QUANTITY: {executed_qty}'
     )
     log_print(
         f'[---- {self.name} - {currentTime} ----]: REMAINING QUANTITY (NOT EXECUTED): {self.rem_quantity}'
     )
     log_print(
         f'[---- {self.name} - {currentTime} ----]: % EXECUTED: {round((1 - self.rem_quantity / self.quantity) * 100, 2)} \n'
     )
コード例 #12
0
    def processOrderExecuted(self, order: Order) -> None:
        """
        Handle OrderExecuted messages from the ExchangeAgent.

        Args:
            order:  executed order

        Returns:
            None
        """
        log_print(f"Received notification of execution for: {order}")
        if self.log_orders:
            self.logEvent('ORDER_EXECUTED', order.to_dict())

        # At the very least, we must update CASH and holdings at execution time.
        symbol = order.symbol
        quantity = order.quantity
        if order.is_buy_order:
            quantity = -quantity

        holdings = self.holdings
        if symbol in holdings:
            holdings[symbol] += quantity
        else:
            holdings[symbol] = quantity

        if not holdings[symbol]:
            del holdings[symbol]

        # As with everything else, CASH holdings are in CENTS.
        holdings['CASH'] -= quantity * order.fill_price

        # If this original order is now fully executed, remove it from the open orders list.
        # Otherwise, decrement by the quantity filled just now.  It is _possible_ that due
        # to timing issues, it might not be in the order list (i.e. we issued a cancellation
        # but it was executed first, or something).
        orders = self.orders
        order_id = order.order_id
        if order_id in orders:
            order_found = orders[order_id]

            if order.quantity >= order_found.quantity:
                del orders[order_id]
            else:
                order_found.quantity -= order.quantity
        else:
            log_print(
                f"Execution received for order not in orders list: {order}")

        log_print(f"After execution, agent open orders: {orders}")
        self.logEvent('HOLDINGS_UPDATED', holdings)
コード例 #13
0
    def placeOrder(self):
        # Called when it is time for the agent to determine a limit price and place an order.
        # updateEstimates() returns the agent's current total valuation for the share it
        # is considering to trade and whether it will buy or sell that share.
        v, buy = self.updateEstimates()

        # Select a requested surplus for this trade.
        R = self.random_state.randint(self.R_min, self.R_max + 1)

        # Determine the limit price.
        p = v - R if buy else v + R

        # Either place the constructed order, or if the agent could secure (eta * R) surplus
        # immediately by taking the inside bid/ask, do that instead.
        bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
        if buy and ask_vol > 0:
            R_ask = v - ask
            if R_ask >= (self.eta * R):
                log_print(
                    "{} desired R = {}, but took R = {} at ask = {} due to eta",
                    self.name, R, R_ask, ask)
                p = ask
            else:
                log_print("{} demands R = {}, limit price {}", self.name, R, p)
        elif (not buy) and bid_vol > 0:
            R_bid = bid - v
            if R_bid >= (self.eta * R):
                log_print(
                    "{} desired R = {}, but took R = {} at bid = {} due to eta",
                    self.name, R, R_bid, bid)
                p = bid
            else:
                log_print("{} demands R = {}, limit price {}", self.name, R, p)

        # Place the order.
        size = 100
        self.placeLimitOrder(self.symbol, size, buy, p)
コード例 #14
0
    def kernelStopping(self):
        # Always call parent method to be safe.
        super().kernelStopping()

        # Print end of day valuation.
        H = int(round(self.getHoldings(self.symbol), -2) / 100)
        # May request real fundamental value from oracle as part of final cleanup/stats.
        if self.symbol != 'ETF':
            rT = self.oracle.observePrice(self.symbol,
                                          self.currentTime,
                                          sigma_n=0,
                                          random_state=self.random_state)
        else:
            portfolio_rT, rT = self.oracle.observePortfolioPrice(
                self.symbol,
                self.portfolio,
                self.currentTime,
                sigma_n=0,
                random_state=self.random_state)

        # Start with surplus as private valuation of shares held.
        if H > 0:
            surplus = sum(
                [self.theta[x + self.q_max - 1] for x in range(1, H + 1)])
        elif H < 0:
            surplus = -sum(
                [self.theta[x + self.q_max - 1] for x in range(H + 1, 1)])
        else:
            surplus = 0

        log_print("surplus init: {}", surplus)

        # Add final (real) fundamental value times shares held.
        surplus += rT * H

        log_print("surplus after holdings: {}", surplus)

        # Add ending cash value and subtract starting cash value.
        surplus += self.holdings['CASH'] - self.starting_cash

        self.logEvent('FINAL_VALUATION', surplus, True)

        log_print(
            "{} final report.  Holdings {}, end cash {}, start cash {}, final fundamental {}, preferences {}, surplus {}",
            self.name, H, self.holdings['CASH'], self.starting_cash, rT,
            self.theta, surplus)
コード例 #15
0
    def generate_schedule(self):

        schedule = {}
        bins = pd.interval_range(start=self.start_time,
                                 end=self.end_time,
                                 freq=self.freq)
        child_quantity = int(self.quantity / len(self.execution_time_horizon))
        for b in bins:
            schedule[b] = child_quantity
        log_print('[---- {} {} - Schedule ----]:'.format(
            self.name, self.currentTime))
        log_print('[---- {} {} - Total Number of Orders ----]: {}'.format(
            self.name, self.currentTime, len(schedule)))
        for t, q in schedule.items():
            log_print("From: {}, To: {}, Quantity: {}".format(
                t.left.time(), t.right.time(), q))
        return schedule
コード例 #16
0
    def generate_schedule(self):

        if self.volume_profile_path is None:
            volume_profile = VWAPExecutionAgent.synthetic_volume_profile(
                self.start_time, self.freq)
        else:
            volume_profile = pd.read_pickle(self.volume_profile_path).to_dict()

        schedule = {}
        bins = pd.interval_range(start=self.start_time,
                                 end=self.end_time,
                                 freq=self.freq)
        for b in bins:
            schedule[b] = round(volume_profile[b.left] * self.quantity)
        log_print('[---- {} {} - Schedule ----]:'.format(
            self.name, self.currentTime))
        log_print('[---- {} {} - Total Number of Orders ----]: {}'.format(
            self.name, self.currentTime, len(schedule)))
        for t, q in schedule.items():
            log_print("From: {}, To: {}, Quantity: {}".format(
                t.left.time(), t.right.time(), q))
        return schedule
コード例 #17
0
    def wakeup(self, currentTime):
        # Allow the base Agent to do whatever it needs to.
        super().wakeup(currentTime)

        # This agent only needs one wakeup call at simulation start.  At this time,
        # each client agent will send a number to each agent in its peer list.
        # Each number will be sampled independently.  That is, client agent 1 will
        # send n2 to agent 2, n3 to agent 3, and so forth.

        # Once a client agent has received these initial random numbers from all
        # agents in the peer list, it will make its first request from the sum
        # service.  Afterwards, it will simply request new sums when answers are
        # delivered to previous queries.

        # At the first wakeup, initiate peer exchange.
        if not self.peer_exchange_complete:
            n = [self.random_state.randint(low=0, high=100) for i in range(len(self.peer_list))]
            log_print("agent {} peer list: {}", self.id, self.peer_list)
            log_print("agent {} numbers to exchange: {}", self.id, n)

            for idx, peer in enumerate(self.peer_list):
                self.sendMessage(peer, Message({"msg": "PEER_EXCHANGE", "sender": self.id, "n": n[idx]}))

        else:
            # For subsequent (self-induced) wakeups, place a sum query.
            n1, n2 = [self.random_state.randint(low=0, high=100) for i in range(2)]

            log_print("agent {} transmitting numbers {} and {} with peer sum {}", self.id, n1, n2, self.peer_sum)

            # Add the sum of the peer exchange values to both numbers.
            n1 += self.peer_sum
            n2 += self.peer_sum

            self.sendMessage(self.serviceAgentID, Message({"msg": "SUM_QUERY", "sender": self.id,
                                                           "n1": n1, "n2": n2}))

        return
コード例 #18
0
    def updateEstimates(self):
        # Called by a background agent that wishes to obtain a new fundamental observation,
        # update its internal estimation parameters, and compute a new total valuation for the
        # action it is considering.

        # The agent obtains a new noisy observation of the current fundamental value
        # and uses this to update its internal estimates in a Bayesian manner.
        obs_t = self.oracle.observePrice(self.symbol,
                                         self.currentTime,
                                         sigma_n=self.sigma_n,
                                         random_state=self.random_state)

        log_print("{} observed {} at {}", self.name, obs_t, self.currentTime)

        # Flip a coin to decide if we will buy or sell a unit at this time.
        q = int(self.getHoldings(self.symbol) /
                100)  # q now represents an index to how many 100 lots are held

        if q >= self.q_max:
            buy = False
            log_print("Long holdings limit: agent will SELL")
        elif q <= -self.q_max:
            buy = True
            log_print("Short holdings limit: agent will BUY")
        else:
            buy = bool(self.random_state.randint(0, 2))
            log_print("Coin flip: agent will {}", "BUY" if buy else "SELL")

        # Update internal estimates of the current fundamental value and our error of same.

        # If this is our first estimate, treat the previous wake time as "market open".
        if self.prev_wake_time is None: self.prev_wake_time = self.mkt_open

        # First, obtain an intermediate estimate of the fundamental value by advancing
        # time from the previous wake time to the current time, performing mean
        # reversion at each time step.

        # delta must be integer time steps since last wake
        delta = (self.currentTime - self.prev_wake_time) / np.timedelta64(
            1, 'ns')

        # Update r estimate for time advancement.
        r_tprime = (1 - (1 - self.kappa)**delta) * self.r_bar
        r_tprime += ((1 - self.kappa)**delta) * self.r_t

        # Update sigma estimate for time advancement.
        sigma_tprime = ((1 - self.kappa)**(2 * delta)) * self.sigma_t
        sigma_tprime += ((1 - (1 - self.kappa)**(2 * delta)) /
                         (1 - (1 - self.kappa)**2)) * self.sigma_s

        # Apply the new observation, with "confidence" in the observation inversely proportional
        # to the observation noise, and "confidence" in the previous estimate inversely proportional
        # to the shock variance.
        self.r_t = (self.sigma_n / (self.sigma_n + sigma_tprime)) * r_tprime
        self.r_t += (sigma_tprime / (self.sigma_n + sigma_tprime)) * obs_t

        self.sigma_t = (self.sigma_n * self.sigma_t) / (self.sigma_n +
                                                        self.sigma_t)

        # Now having a best estimate of the fundamental at time t, we can make our best estimate
        # of the final fundamental (for time T) as of current time t.  Delta is now the number
        # of time steps remaining until the simulated exchange closes.
        delta = max(0, (self.mkt_close - self.currentTime) /
                    np.timedelta64(1, 'ns'))

        # IDEA: instead of letting agent "imagine time forward" to the end of the day,
        #       impose a maximum forward delta, like ten minutes or so.  This could make
        #       them think more like traders and less like long-term investors.  Add
        #       this line of code (keeping the max() line above) to try it.
        # delta = min(delta, 1000000000 * 60 * 10)

        r_T = (1 - (1 - self.kappa)**delta) * self.r_bar
        r_T += ((1 - self.kappa)**delta) * self.r_t

        # Our final fundamental estimate should be quantized to whole units of value.
        r_T = int(round(r_T))

        # Finally (for the final fundamental estimation section) remember the current
        # time as the previous wake time.
        self.prev_wake_time = self.currentTime

        log_print("{} estimates r_T = {} as of {}", self.name, r_T,
                  self.currentTime)

        # Determine the agent's total valuation.
        q += (self.q_max - 1)
        theta = self.theta[q + 1 if buy else q]
        v = r_T + theta

        log_print("{} total unit valuation is {} (theta = {})", self.name, v,
                  theta)

        # Return values needed to implement strategy and select limit price.
        return v, buy
コード例 #19
0
    def wakeup(self, currentTime):
        # Parent class handles discovery of exchange times and market_open wakeup call.
        super().wakeup(currentTime)

        self.state = 'INACTIVE'

        if not self.mkt_open or not self.mkt_close:
            # TradingAgent handles discovery of exchange times.
            return
        else:
            if not self.trading:
                self.trading = True

                # Time to start trading!
                log_print("{} is ready to start trading now.", self.name)

        # Steady state wakeup behavior starts here.

        # If we've been told the market has closed for the day, we will only request
        # final price information, then stop.
        if self.mkt_closed and (self.symbol in self.daily_close_price):
            # Market is closed and we already got the daily close price.
            return

        # Schedule a wakeup for the next time this agent should arrive at the market
        # (following the conclusion of its current activity cycle).
        # We do this early in case some of our expected message responses don't arrive.

        # Agents should arrive according to a Poisson process.  This is equivalent to
        # each agent independently sampling its next arrival time from an exponential
        # distribution in alternate Beta formation with Beta = 1 / lambda, where lambda
        # is the mean arrival rate of the Poisson process.
        delta_time = self.random_state.exponential(scale=1.0 / self.lambda_a)
        self.setWakeup(currentTime +
                       pd.Timedelta('{}ns'.format(int(round(delta_time)))))

        # If the market has closed and we haven't obtained the daily close price yet,
        # do that before we cease activity for the day.  Don't do any other behavior
        # after market close.
        if self.mkt_closed and (not self.symbol in self.daily_close_price):
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
            return

        # Issue cancel requests for any open orders.  Don't wait for confirmation, as presently
        # the only reason it could fail is that the order already executed.  (But requests won't
        # be generated for those, anyway, unless something strange has happened.)
        self.cancelOrders()

        # The ZI agent doesn't try to maintain a zero position, so there is no need to exit positions
        # as some "active trading" agents might.  It might exit a position based on its order logic,
        # but this will be as a natural consequence of its beliefs.

        # In order to use the "strategic threshold" parameter (eta), the ZI agent needs the current
        # spread (inside bid/ask quote).  It would not otherwise need any trade/quote information.

        # If the calling agent is a subclass, don't initiate the strategy section of wakeup(), as it
        # may want to do something different.

        if type(self) == ZeroIntelligenceAgent:
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
        else:
            self.state = 'ACTIVE'
コード例 #20
0
ファイル: core.py プロジェクト: andrewsonin/abides
    def sendMessage(self,
                    sender: 'Agent',
                    recipient_id: int,
                    msg: MessageAbstractBase,
                    delay: int = 0) -> None:
        """
        Called by an Agent to send a message to another agent. The kernel
        supplies its own current_time (i.e. "now") to prevent possible
        abuse by agents. The kernel will handle computational delay penalties
        and/or network latency. The message must derive from the Message class.
        The optional delay parameter represents an agent's request for ADDITIONAL
        delay (beyond the Kernel's mandatory computation + latency delays) to represent
        parallel pipeline processing delays (that should delay the transmission of messages
        but do not make the agent "busy" and unable to respond to new messages).

        Apply the agent's current computation delay to effectively "send" the message
        at the END of the agent's current computation period when it is done "thinking".

        NOTE: sending multiple messages on a single wake will transmit all at the same
        time, at the end of computation. To avoid this, use Agent.delay() to accumulate
        a temporary delay (current cycle only) that will also stagger messages.

        The optional pipeline delay parameter DOES push the send time forward, since it
        represents "thinking" time before the message would be sent. We don't use this
        for much yet, but it could be important later.

        This means message delay (before latency) is the agent's standard computation delay
        PLUS any accumulated delay for this wake cycle PLUS any one-time requested delay
        for this specific message only.

        Args:
            sender:        sender Agent
            recipient_id:  recipient ID
            msg:           message
            delay:         delay

        Returns:
            None
        """

        sender_id = sender.id
        sentTime = self.current_time + pd.Timedelta(
            self.agent_computation_delays[sender_id] +
            self.current_agent_additional_delay + delay)

        # Apply communication delay per the agentLatencyModel, if defined, or the
        # agentLatency matrix [sender][recipient] otherwise.

        latency, noise = self.agent_latency_model.get_latency_and_noise(
            sender_id, recipient_id)
        deliverAt = sentTime + pd.Timedelta(latency + noise)
        log_print(
            f"Kernel applied latency {latency}, noise {noise}, "
            f"accumulated delay {self.current_agent_additional_delay}, one-time delay {delay} "
            f"on sendMessage from: {self.agents[sender_id].name} to {self.agents[recipient_id].name}, "
            f"scheduled for {self.fmtTime(deliverAt)}")

        # Finally drop the message in the queue with priority == delivery time.
        self.message_queue.put((deliverAt, (recipient_id, msg)))

        log_print(
            f"Sent time: {sentTime}, current time {self.current_time}, "
            f"computation delay {self.agent_computation_delays[sender_id]}\n"
            f"Message queued: {msg}")
コード例 #21
0
    def receiveMessage(self, currentTime, msg):
        """ Processes message from exchange. Main function is to update orders in orderbook relative to mid-price.

        :param simulation current time
        :param message received by self from ExchangeAgent

        :type currentTime: pd.Timestamp
        :type msg: str

        :return:
        """

        super().receiveMessage(currentTime, msg)
        if self.last_mid is not None:
            mid = self.last_mid

        if self.last_spread is not None and self.is_adaptive:
            self._adaptive_update_window_and_tick_size()

        if msg.body['msg'] == 'QUERY_TRANSACTED_VOLUME' and self.state['AWAITING_TRANSACTED_VOLUME'] is True:
            self.updateOrderSize()
            self.state['AWAITING_TRANSACTED_VOLUME'] = False

        if not self.subscribe:
            if msg.body['msg'] == 'QUERY_SPREAD' and self.state['AWAITING_SPREAD'] is True:
                bid, _, ask, _ = self.getKnownBidAsk(self.symbol)
                if bid and ask:
                    mid = int((ask + bid) / 2)
                    self.last_mid = mid
                    if self.is_adaptive:
                        spread = int(ask - bid)
                        self._adaptive_update_spread(spread)

                    self.state['AWAITING_SPREAD'] = False
                else:
                    log_print("SPREAD MISSING at time {}", currentTime)
                    self.state['AWAITING_SPREAD'] = False  # use last mid price and spread

            if self.state['AWAITING_SPREAD'] is False and self.state['AWAITING_TRANSACTED_VOLUME'] is False:
                self.placeOrders(mid)
                self.state = self.initialiseState()
                self.setWakeup(currentTime + self.getWakeFrequency())

        else:  # subscription mode
            if msg.body['msg'] == 'MARKET_DATA' and self.state['AWAITING_MARKET_DATA'] is True:
                bid = self.known_bids[self.symbol][0][0] if self.known_bids[self.symbol] else None
                ask = self.known_asks[self.symbol][0][0] if self.known_asks[self.symbol] else None
                if bid and ask:
                    mid = int((ask + bid) / 2)
                    self.last_mid = mid
                    if self.is_adaptive:
                        spread = int(ask - bid)
                        self._adaptive_update_spread(spread)

                    self.state['AWAITING_MARKET_DATA'] = False
                else:
                    log_print("SPREAD MISSING at time {}", currentTime)
                    self.state['AWAITING_MARKET_DATA'] = False

            if self.state['MARKET_DATA'] is False and self.state['AWAITING_TRANSACTED_VOLUME'] is False:
                self.placeOrders(mid)
                self.state = self.initialiseState()
コード例 #22
0
ファイル: core.py プロジェクト: andrewsonin/abides
    def runner(self, num_simulations: int = 1) -> KernelCustomState:

        agents = self.agents
        message_queue = self.message_queue
        custom_state = self.custom_state
        agent_current_times = self.agent_current_times
        agent_computation_delays = self.agent_computation_delays
        start_time = self.start_time
        stop_time = self.stop_time

        custom_state.clear()

        self.current_agent_additional_delay = 0

        log_print(f"Kernel started: {self.name}\nSimulation started!")

        # Note that num_simulations has not yet been really used or tested
        # for anything. Instead we have been running multiple simulations
        # with coarse parallelization from a shell script.
        for sim in range(num_simulations):
            log_print(f"Starting sim {sim}")

            # Event notification for kernel init (agents should not try to
            # communicate with other agents, as order is unknown). Agents
            # should initialize any internal resources that may be needed
            # to communicate with other agents during agent.kernelStarting().
            # Kernel passes self-reference for agents to retain, so they can
            # communicate with the kernel in the future (as it does not have
            # an agentID).
            log_print("\n--- Agent.kernelInitializing() ---")
            for agent in agents:
                agent.kernelInitializing(self)

            # Event notification for kernel start (agents may set up
            # communications or references to other agents, as all agents
            # are guaranteed to exist now). Agents should obtain references
            # to other agents they require for proper operation (exchanges,
            # brokers, subscription services...). Note that we generally
            # don't (and shouldn't) permit agents to get direct references
            # to other agents (like the exchange) as they could then bypass
            # the Kernel, and therefore simulation "physics" to send messages
            # directly and instantly or to perform disallowed direct inspection
            # of the other agent's state. Agents should instead obtain the
            # agent ID of other agents, and communicate with them only via
            # the Kernel. Direct references to utility objects that are not
            # agents are acceptable (e.g. oracles).
            log_print("\n--- Agent.kernelStarting() ---")
            for agent in agents:
                agent.kernelStarting(start_time)

            # Set the kernel to its startTime.
            self.current_time = start_time
            log_print(
                "\n--- Kernel Clock started ---\n"
                f"Kernel.currentTime is now {start_time}\n"
                "\n--- Kernel Event Queue begins ---\n"
                f"Kernel will start processing messages. Queue length: {len(message_queue)}"
            )

            # Track starting wallclock time and total message count for stats at the end.
            eventQueueWallClockStart = pd.Timestamp('now')
            total_messages = 0

            # Process messages until there aren't any (at which point there never can
            # be again, because agents only "wake" in response to messages), or until
            # the kernel stop time is reached.
            while message_queue and self.current_time <= stop_time:
                # Get the next message in timestamp order (delivery time) and extract it.
                self.current_time, (agent_id, msg) = message_queue.get()

                # Periodically print the simulation time and total messages, even if muted.
                if not total_messages % 100_000:
                    print(
                        f"\n--- Simulation time: {self.fmtTime(self.current_time)}, "
                        f"messages processed: {total_messages}, "
                        f"wallclock elapsed: {pd.Timestamp('now') - eventQueueWallClockStart} ---\n"
                    )

                msg_class_name = msg.__class__.__name__
                log_print(
                    "\n--- Kernel Event Queue pop ---\n"
                    f"Kernel handling {msg_class_name} message for agent {agent_id} "
                    f"at time {self.fmtTime(self.current_time)}")

                total_messages += 1

                # In between messages, always reset the currentAgentAdditionalDelay.
                self.current_agent_additional_delay = 0

                # Test to see if the agent is already in the future. If so,
                # delay the wakeup until the agent can act again.
                agent_current_time = agent_current_times[agent_id]
                if agent_current_time > self.current_time:
                    # Push the wakeup call back into the PQ with a new time.
                    message_queue.put((agent_current_time, (agent_id, msg)))
                    log_print(
                        f"Agent in future: {msg_class_name} requeued for {self.fmtTime(agent_current_time)}"
                    )
                    continue

                # Set agent's current time to global current time for start
                # of processing.
                agent_current_times[agent_id] = self.current_time

                if isinstance(msg, Message):
                    agents[agent_id].receiveMessage(self.current_time, msg)
                    called_method_name = "receiveMessage"
                elif isinstance(msg, WakeUp):
                    agents[agent_id].wakeup(self.current_time)
                    called_method_name = "wakeup"
                else:
                    raise ValueError("Unknown message type found in queue",
                                     "currentTime:", self.current_time,
                                     "messageType:", msg_class_name)

                # Delay the agent by its computation delay plus any transient additional delay requested.
                agent_current_times[agent_id] += pd.Timedelta(
                    agent_computation_delays[agent_id] +
                    self.current_agent_additional_delay)

                log_print(
                    f"After {called_method_name} return, agent {agent_id} "
                    f"delayed from {self.fmtTime(self.current_time)} to {self.fmtTime(agent_current_times[agent_id])}"
                )

            if not message_queue:
                log_print("\n--- Kernel Event Queue empty ---")
            elif self.current_time > self.stop_time:
                log_print("\n--- Kernel Stop Time surpassed ---")

            # Record wall clock stop time and elapsed time for stats at the end.
            eventQueueWallClockStop = pd.Timestamp.now()
            event_queue_wallclock_elapsed = eventQueueWallClockStop - eventQueueWallClockStart

            # Event notification for kernel end (agents may communicate with
            # other agents, as all agents are still guaranteed to exist).
            # Agents should not destroy resources they may need to respond
            # to final communications from other agents.
            log_print("\n--- Agent.kernelStopping() ---")
            for agent in agents:
                agent.kernelStopping()

            # Event notification for kernel termination (agents should not
            # attempt communication with other agents, as order of termination
            # is unknown). Agents should clean up all used resources as the
            # simulation program may not actually terminate if num_simulations > 1.
            log_print("\n--- Agent.kernelTerminating() ---")
            for agent in agents:
                agent.kernelTerminating()

            print(
                f"Event Queue elapsed: {event_queue_wallclock_elapsed}, "
                f"messages: {total_messages}, "
                f"messages per second: {total_messages / (event_queue_wallclock_elapsed / one_s_timedelta):0.1f}"
            )
            log_print(f"Ending sim {sim}")

        # The Kernel adds a handful of custom state results for all simulations,
        # which configurations may use, print, log, or discard.
        custom_state[
            'kernel_event_queue_elapsed_wallclock'] = event_queue_wallclock_elapsed
        custom_state['kernel_slowest_agent_finish_time'] = max(
            agent_current_times)

        # Agents will request the Kernel to serialize their agent logs, usually
        # during kernelTerminating, but the Kernel must write out the summary
        # log itself.
        self.writeSummaryLog()

        # This should perhaps be elsewhere, as it is explicitly financial, but it
        # is convenient to have a quick summary of the results for now.
        print("Mean ending value by agent type:")
        for a in self.mean_result_by_agent_type:
            value = self.mean_result_by_agent_type[a]
            count = self.agent_count_by_type[a]
            print(f"{a}: {round(value / count)}")

        print("Simulation ending!")

        return custom_state
コード例 #23
0
 def queryEtfNav(self, nav):
     self.nav = nav
     log_print("Received NAV of ETF.")
コード例 #24
0
    def placeOrder(self):
        # Called when it is time for the agent to determine a limit price and place an order.

        # Compute the order imbalance feature.
        bid_vol = sum([v[1] for v in self.known_bids[self.symbol]])
        ask_vol = sum([v[1] for v in self.known_asks[self.symbol]])
        imba = bid_vol - ask_vol

        # A unit of stock is now 100 shares instead of one.
        imba = int(imba / 100)

        # Get our current holdings in the stock of interest.
        h = self.getHoldings(self.symbol)

        # The new state will be called s_prime.  This agent simply uses current
        # holdings (limit: one share long or short) and offer volume imbalance.
        # State: 1000s digit is 0 (short), 1 (neutral), 2 (long).  Remaining digits
        #        are 000 (-100 imba) to 200 (+100 imba).
        s_prime = ((h + 1) * 1000) + (imba + 100)

        log_print("h: {}, imba: {}, s_prime: {}", h, imba, s_prime)

        # Compute our reward from last time.  We estimate the change in the value
        # of our portfolio by marking it to market and comparing against the last
        # time we were contemplating an action.
        v = self.markToMarket(self.holdings, use_midpoint=True)
        r = v - self.v if self.v is not None else 0

        # Store our experience tuple.
        self.experience.append((self.s, self.a, s_prime, r))

        # Update our q table.
        old_q = self.qtable.q[self.s, self.a]
        old_weighted = (1 - self.qtable.alpha) * old_q

        a_prime = np.argmax(self.qtable.q[s_prime, :])
        new_q = r + (self.qtable.gamma * self.qtable.q[s_prime, a_prime])
        new_weighted = self.qtable.alpha * new_q

        self.qtable.q[self.s, self.a] = old_weighted + new_weighted

        # Decay alpha.
        self.qtable.alpha *= self.qtable.alpha_decay
        self.qtable.alpha = max(self.qtable.alpha, self.qtable.alpha_min)

        # Compute our next action.  0 = sell one, 1 == do nothing, 2 == buy one.
        if self.random_state.rand() < self.qtable.epsilon:
            # Random action, and decay epsilon.
            a = self.random_state.randint(0, 3)
            self.qtable.epsilon *= self.qtable.epsilon_decay
            self.qtable.epsilon = max(self.qtable.epsilon, self.qtable.epsilon_min)
        else:
            # Expected best action.
            a = a_prime

        # Respect holding limit.
        if a == 0 and h == -1:
            a = 1
        elif a == 2 and h == 1:
            a = 1

        # Remember s, a, and v for next time.
        self.s = s_prime
        self.a = a
        self.v = v

        # Place the order.  We probably want this to be a market order, once supported,
        # or use a "compute required price for guaranteed execution" function like the
        # impact agent, but that requires fetching quite a bit of book depth.
        if a == 0:
            self.placeLimitOrder(self.symbol, 1, False, 50000)
        elif a == 2:
            self.placeLimitOrder(self.symbol, 1, True, 200000)
コード例 #25
0
 def getWakeFrequency(self):
     log_print(
         f"Market Replay Agent first wake up: {self.historical_orders.first_wakeup}"
     )
     return self.historical_orders.first_wakeup - self.mkt_open
コード例 #26
0
ファイル: core.py プロジェクト: andrewsonin/abides
    def __init__(self,
                 *,
                 name: str,
                 random_state: np.random.RandomState,
                 start_time: pd.Timestamp,
                 stop_time: pd.Timestamp,
                 agents: Sequence['Agent'],
                 default_computation_delay: int = 1,
                 agent_latency_model: Union[int, AgentLatencyModelBase] = 1,
                 skip_log: bool = False,
                 oracle: OracleType = None,
                 log_dir: Optional[FileName] = None) -> None:
        """
        Framework kernel class that manages message queue processing.

        Args:
            name:                       Kernel name (is for human readers only)
            random_state:               Random state used by Kernel
            start_time:                 Simulation start time
            stop_time:                  Simulation stop time
            agents:                     Sequence of agents involved in simulation
            default_computation_delay:  Default computational delay
            agent_latency_model:        Single number (in nanoseconds)
                                        [OR]
                                        AgentLatencyModelBase instance for modelling agent-to-agent delays
                                        caused by network topology characteristics and random noise
            skip_log:                   Whether to skip logging
            oracle:                     Instance of Oracle
            log_dir:                    Log directory
        """

        # kernel_name is for human readers only.
        if not isinstance(name, str):
            raise TypeError("Parameter name should be of type str")
        self.name = name

        if not isinstance(random_state, np.random.RandomState):
            raise ValueError(
                "A valid, seeded np.random.RandomState object is required for the Kernel",
                name)
        self.random_state = random_state

        # A single message queue to keep everything organized by increasing
        # delivery timestamp.
        self.message_queue: PriorityQueue[Tuple[pd.Timestamp, Tuple[
            int, MessageAbstractBase]]] = PriorityQueue()

        # Timestamp at which the Kernel was created. Primarily used to
        # create a unique log directory for this run. Also used to
        # print some elapsed time and messages per second statistics.
        self.kernel_wallclock_start = pd.Timestamp('now')

        # TODO: This is financial, and so probably should not be here...
        self.mean_result_by_agent_type: Dict[str, float] = {}
        self.agent_count_by_type: Dict[str, int] = {}

        # The Kernel maintains a summary log to which agents can write
        # information that should be centralized for very fast access
        # by separate statistical summary programs. Detailed event
        # logging should go only to the agent's individual log. This
        # is for things like "final position value" and such.
        self.summary_log: List[KernelSummaryLogEntry] = []

        # agents must be a list of agents for the simulation,
        #        based on class agent.Agent
        if not all(isinstance(x, Agent) for x in agents):
            raise TypeError(
                "'agents' must be a sequence containing only instances of the class Agent"
            )
        self.agents = agents
        num_agents = len(agents)

        # The kernel start and stop time (first and last timestamp in
        # the simulation, separate from anything like exchange open/close).
        if not isinstance(start_time, pd.Timestamp) or not isinstance(
                stop_time, pd.Timestamp):
            raise TypeError(
                "'start_time' and 'stop_time' must be of type pd.Timestamp")
        if stop_time < start_time:
            raise ValueError("'stop_time' should be larger than 'start_time'")
        self.current_time = self.start_time = start_time
        self.stop_time = stop_time

        # The kernel maintains a current time for each agent to allow
        # simulation of per-agent computation delays. The agent's time
        # is pushed forward (see below) each time it awakens, and it
        # cannot receive new messages/wakeups until the global time
        # reaches the agent's time. (i.e. it cannot act again while
        # it is still "in the future")

        # This also nicely enforces agents being unable to act before
        # the simulation startTime.
        self.agent_current_times = [self.start_time] * num_agents

        # agentComputationDelays is in nanoseconds, starts with a default
        # value from config, and can be changed by any agent at any time
        # (for itself only). It represents the time penalty applied to
        # an agent each time it is awakened  (wakeup or recvMsg). The
        # penalty applies _after_ the agent acts, before it may act again.
        # TODO: this might someday change to pd.Timedelta objects.
        if not isinstance(default_computation_delay, int):
            raise TypeError("'default_computation_delay' must be of type int")
        self.agent_computation_delays = [default_computation_delay
                                         ] * num_agents

        if isinstance(agent_latency_model, int):
            self.agent_latency_model: AgentLatencyModelBase = DefaultAgentLatencyModel(
                agent_latency_model)
        elif isinstance(agent_latency_model, AgentLatencyModelBase):
            if (isinstance(agent_latency_model, AgentLatencyModel) and any(
                    len(row) != num_agents or not all(map(is_integer, row))
                    for row in agent_latency_model.latency_matrix)):
                raise ValueError(
                    "Attribute 'latency_matrix' of 'agent_latency_model' "
                    "should be a square matrix of integers of size equals to the number of agents"
                )
            self.agent_latency_model = agent_latency_model
        else:
            raise TypeError(
                "Parameter 'agent_latency_model' should be an instance of 'AgentLatencyModelBase' or None"
            )

        # The kernel maintains an accumulating additional delay parameter
        # for the current agent. This is applied to each message sent
        # and upon return from wakeup/receiveMessage, in addition to the
        # agent's standard computation delay. However, it never carries
        # over to future wakeup/receiveMessage calls. It is useful for
        # staggering of sent messages.
        self.current_agent_additional_delay = 0

        # If a log directory was not specified, use the initial wallclock.
        self.log_dir: Union[str, PathLike, Path] = log_dir or str(
            int(self.kernel_wallclock_start.timestamp()))

        # Simulation custom state in a freeform dictionary. Allows config files
        # that drive multiple simulations, or require the ability to generate
        # special logs after simulation, to obtain needed output without special
        # case code in the Kernel. Per-agent state should be handled using the
        # provided updateAgentState() method.
        self.custom_state: KernelCustomState = {}

        # Should the Kernel skip writing agent logs?
        if not isinstance(skip_log, bool):
            raise TypeError("'skip_log' must be of type bool")
        self.skip_log = skip_log

        # The data oracle for the simulation, if needed.
        self.oracle = oracle

        log_print(f"Kernel initialized: {self.name}")
コード例 #27
0
ファイル: EtfPrimaryAgent.py プロジェクト: andrewsonin/abides
 def queryLastTrade(self, symbol, price):
     self.nav = price
     log_print("Received daily close price or nav of {} for {}.", price,
               symbol)
コード例 #28
0
    def receiveMessage(self, current_time, msg) -> None:
        super().receiveMessage(current_time, msg)
        if msg.body['msg'] == 'MARKET_DATA':
            self.cancelOrders()

            self.last_market_data_update = current_time
            bids = msg.body['bids']
            asks = msg.body['asks']

            bid_liq = sum(x[1] for x in bids)
            ask_liq = sum(x[1] for x in asks)

            log_print("bid, ask levels: {}", len(bids), len(asks))
            log_print("bids: {}, asks: {}", bids, asks)

            # OBI strategy.
            target = 0

            if bid_liq == 0 or ask_liq == 0:
                log_print("OBI agent inactive: zero bid or ask liquidity")
                return
            else:
                # bid_pct encapsulates both sides of the question, as a normalized expression
                # representing what fraction of total visible volume is on the buy side.
                bid_pct = bid_liq / (bid_liq + ask_liq)

                # If we are short, we need to decide if we should hold or exit.
                if self.is_short:
                    # Update trailing stop.
                    if bid_pct - self.trail_dist > self.trailing_stop:
                        log_print(
                            "Trailing stop updated: new > old ({:2f} > {:2f})",
                            bid_pct - self.trail_dist, self.trailing_stop)
                        self.trailing_stop = bid_pct - self.trail_dist
                    else:
                        log_print(
                            "Trailing stop remains: potential < old ({:2f} < {:2f})",
                            bid_pct - self.trail_dist, self.trailing_stop)

                    # Check the trailing stop.
                    if bid_pct < self.trailing_stop:
                        log_print(
                            "OBI agent exiting short position: bid_pct < trailing_stop ({:2f} < {:2f})",
                            bid_pct, self.trailing_stop)
                        target = 0
                        self.is_short = False
                        self.trailing_stop = None
                    else:
                        log_print(
                            "OBI agent holding short position: bid_pct > trailing_stop ({:2f} > {:2f})",
                            bid_pct, self.trailing_stop)
                        target = -100
                # If we are long, we need to decide if we should hold or exit.
                elif self.is_long:
                    if bid_pct + self.trail_dist < self.trailing_stop:
                        log_print(
                            "Trailing stop updated: new < old ({:2f} < {:2f})",
                            bid_pct + self.trail_dist, self.trailing_stop)
                        self.trailing_stop = bid_pct + self.trail_dist
                    else:
                        log_print(
                            "Trailing stop remains: potential > old ({:2f} > {:2f})",
                            bid_pct + self.trail_dist, self.trailing_stop)

                    # Check the trailing stop.
                    if bid_pct > self.trailing_stop:
                        log_print(
                            "OBI agent exiting long position: bid_pct > trailing_stop ({:2f} > {:2f})",
                            bid_pct, self.trailing_stop)
                        target = 0
                        self.is_long = False
                        self.trailing_stop = None
                    else:
                        log_print(
                            "OBI agent holding long position: bid_pct < trailing_stop ({:2f} < {:2f})",
                            bid_pct, self.trailing_stop)
                        target = 100
                # If we are flat, we need to decide if we should enter (long or short).
                else:
                    if bid_pct > (0.5 + self.entry_threshold):
                        log_print(
                            "OBI agent entering long position: bid_pct < entry_threshold ({:2f} < {:2f})",
                            bid_pct, 0.5 - self.entry_threshold)
                        target = 10000  # TODO: Amount?
                        self.is_long = True
                        self.trailing_stop = bid_pct + self.trail_dist
                        log_print("Initial trailing stop: {:2f}",
                                  self.trailing_stop)
                    elif bid_pct < (0.5 - self.entry_threshold):
                        log_print(
                            "OBI agent entering short position: bid_pct > entry_threshold ({:2f} > {:2f})",
                            bid_pct, 0.5 + self.entry_threshold)
                        target = -10000
                        self.is_short = True
                        self.trailing_stop = bid_pct - self.trail_dist
                        log_print("Initial trailing stop: {:2f}",
                                  self.trailing_stop)
                    else:
                        log_print(
                            "OBI agent staying flat: long_entry < bid_pct < short_entry ({:2f} < {:2f} < {:2f})",
                            0.5 - self.entry_threshold, bid_pct,
                            0.5 + self.entry_threshold)
                        target = 0

                self.plot_me.append({
                    'currentTime': self.current_time,
                    'midpoint': (asks[0][0] + bids[0][0]) / 2,
                    'bid_pct': bid_pct
                })

            # Adjust holdings to target.
            holdings = self.holdings[
                self.symbol] if self.symbol in self.holdings else 0
            delta = target - holdings
            direction = True if delta > 0 else False
            price = self.computeRequiredPrice(direction, abs(delta), bids,
                                              asks)

            log_print("Current holdings: {}", self.holdings)

            if delta == 0:
                log_print("No adjustments to holdings needed.")
            else:
                log_print("Adjusting holdings by {}", delta)
                self.placeLimitOrder(self.symbol, abs(delta), direction, price)
コード例 #29
0
    def processOrders(self):
        def convertDate(date_str):
            try:
                # return datetime.strptime(date_str, '%Y%m%d%H%M%S.%f')

                return pd.to_datetime("2012-06-21 00:00:00") + pd.Timedelta(
                    seconds=float(date_str))
                # return pd.Timestamp("2012-06-21 00:00:00") + float(date_str) * pd.offsets.Second()
            except ValueError:
                return None  # convertDate(date_str[:-1])

        # @mem.cache
        def read_processed_orders_file(processed_orders_file):
            with open(processed_orders_file, 'rb') as handle:
                return pickle.load(handle)

        processed_orders_file = f'{self.processed_orders_folder_path}marketreplay_{self.symbol}_{self.date.date()}.pkl'
        if os.path.isfile(processed_orders_file):
            print(
                f'Processed file exists for {self.symbol} and {self.date.date()}: {processed_orders_file}'
            )
            return read_processed_orders_file(processed_orders_file)
        else:
            print(
                f'Processed file does not exist for {self.symbol} and {self.date.date()}, processing...'
            )

            # orders_df = pd.read_csv(self.orders_file_path, header=None) #, nrows=5000
            orders_df = pd.read_pickle(
                '/Users/a16643222/Documents/abides_zbg/data/marketreplay/input/LOB_df.pkl'
            )
            orders_df = orders_df[(orders_df.Time > '2021-03-22 10:30') & (
                orders_df.Time < '2021-03-22 11:00')]  # DEBUG
            orders_df['correction'] = orders_df.groupby('Time').cumcount()
            orders_df['Time'] = orders_df['Time'] + orders_df.correction.apply(
                lambda x: pd.Timedelta(x, unit='ns'))
            # orders_df.columns = self.COLUMNS
            orders_df['Direction'] = orders_df['BUY_SELL_FLAG'].astype(
                int).replace(L3OrdersProcessor.DIRECTION)  # TODO:verify
            # orders_df['Timestamp'] = orders_df['Time'].astype(str).apply(convertDate)
            # orders_df['Size'] = orders_df['Size'].astype(int)
            # orders_df['Price'] = orders_df['Price'].astype(int)
            # orders_df['Type'] = orders_df['Type'].astype(int)
            orders_df.rename(columns={
                'Time': 'Timestamp',
                'SIZE': 'Size',
                'PRICE': 'Price',
                'RECORD_TYPE': 'Type',
                'ORDER_ID': 'Order_ID'
            },
                             inplace=True)
            orders_df = orders_df[[
                'Timestamp', 'Order_ID', 'Price', 'Direction', 'Size', 'Type'
            ]]
            orders_df = orders_df.loc[(orders_df.Timestamp >= self.start_time)
                                      & (orders_df.Timestamp < self.end_time)]
            orders_df.set_index('Timestamp', inplace=True)
            log_print(f"Number of Orders: {len(orders_df)}")
            orders_dict = {
                k: g.to_dict(orient='records')
                for k, g in orders_df.groupby(level=0)
            }
            with open(processed_orders_file, 'wb') as handle:
                pickle.dump(orders_dict,
                            handle,
                            protocol=pickle.HIGHEST_PROTOCOL)
                print(f'processed file created as {processed_orders_file}')
            return orders_dict
コード例 #30
0
ファイル: EtfPrimaryAgent.py プロジェクト: andrewsonin/abides
    def receiveMessage(self, currentTime, msg):
        super().receiveMessage(currentTime, msg)

        # Unless the intent of an experiment is to examine computational issues within an Exchange,
        # it will typically have either 1 ns delay (near instant but cannot process multiple orders
        # in the same atomic time unit) or 0 ns delay (can process any number of orders, always in
        # the atomic time unit in which they are received).  This is separate from, and additional
        # to, any parallel pipeline delay imposed for order book activity.

        # Note that computation delay MUST be updated before any calls to sendMessage.
        self.setComputationDelay(self.computation_delay)

        # Is the exchange closed?  (This block only affects post-close, not pre-open.)
        if currentTime > self.prime_close:
            # Most messages after close will receive a 'PRIME_CLOSED' message in response.
            log_print("{} received {}, discarded: prime is closed.", self.name,
                      msg.body['msg'])
            self.sendMessage(msg.body['sender'],
                             Message({"msg": "PRIME_CLOSED"}))
            # Don't do any further processing on these messages!
            return

        if msg.body['msg'] == "WHEN_MKT_CLOSE":
            self.mkt_close = msg.body['data']
            log_print("Recorded market close: {}",
                      self.kernel.fmtTime(self.mkt_close))
            self.setWakeup(self.mkt_close)
            return

        elif msg.body['msg'] == 'QUERY_LAST_TRADE':
            # Call the processQueryLastTrade method.
            self.queryLastTrade(msg.body['symbol'], msg.body['data'])
            return

        self.logEvent(msg.body['msg'], msg.body['sender'])

        # Handle all message types understood by this exchange.
        if msg.body['msg'] == "WHEN_PRIME_OPEN":
            log_print("{} received WHEN_PRIME_OPEN request from agent {}",
                      self.name, msg.body['sender'])

            # The exchange is permitted to respond to requests for simple immutable data (like "what are your
            # hours?") instantly.  This does NOT include anything that queries mutable data, like equity
            # quotes or trades.
            self.setComputationDelay(0)

            self.sendMessage(
                msg.body['sender'],
                Message({
                    "msg": "WHEN_PRIME_OPEN",
                    "data": self.prime_open
                }))

        elif msg.body['msg'] == "WHEN_PRIME_CLOSE":
            log_print("{} received WHEN_PRIME_CLOSE request from agent {}",
                      self.name, msg.body['sender'])

            # The exchange is permitted to respond to requests for simple immutable data (like "what are your
            # hours?") instantly.  This does NOT include anything that queries mutable data, like equity
            # quotes or trades.
            self.setComputationDelay(0)

            self.sendMessage(
                msg.body['sender'],
                Message({
                    "msg": "WHEN_PRIME_CLOSE",
                    "data": self.prime_close
                }))

        elif msg.body['msg'] == "QUERY_NAV":
            log_print("{} received QUERY_NAV ({}) request from agent {}",
                      self.name, msg.body['sender'])

            # Return the NAV for the requested symbol.
            self.sendMessage(
                msg.body['sender'],
                Message({
                    "msg":
                    "QUERY_NAV",
                    "nav":
                    self.nav,
                    "prime_closed":
                    True if currentTime > self.prime_close else False
                }))

        elif msg.body['msg'] == "BASKET_ORDER":
            order = msg.body['order']
            log_print("{} received BASKET_ORDER: {}", self.name, order)
            if order.is_buy_order:
                self.create += 1
            else:
                self.redeem += 1
            order.fill_price = self.nav
            self.sendMessage(
                msg.body['sender'],
                Message({
                    "msg": "BASKET_EXECUTED",
                    "order": order
                }))