Пример #1
0
  def __init__(self, kernel_name):
    # kernel_name is for human readers only.
    self.name = kernel_name
    print ("Kernel initialized: {}".format(self.name))

    self.messages = queue.PriorityQueue()

    # currentTime is None until after kernelStarting() event completes
    # for all agents.  This is a pd.Timestamp that includes the date.
    self.currentTime = None

    # Timestamp at which the Kernel was created.  Primarily used to
    # create a unique log directory for this run.
    self.kernelWallClockStart = pd.Timestamp('now')

    # TODO: This is financial, and so probably should not be here...
    self.meanResultByAgentType = {}
    self.agentCountByType = {}
    def observePrice(self, symbol, currentTime, sigma_n=1000):
        if currentTime >= self.mkt_close:
            r_t = self.r[symbol].loc[self.mkt_close - pd.Timedelta('1ns')]
        else:
            r_t = self.r[symbol].loc[currentTime]

        # Generate a noisy observation of fundamental value at the current time.
        if sigma_n == 0:
            obs = r_t
        else:
            obs = int(round(np.random.normal(loc=r_t, scale=sqrt(sigma_n))))

        print("Oracle: current fundamental value is {} at {}".format(
            r_t, currentTime))
        print("Oracle: giving client value observation {}".format(obs))

        # Reminder: all simulator prices are specified in integer cents.
        return obs
Пример #3
0
def train(loader_train, loader_val, model, optimizer, epoch, loss_list = [], val_acc_list = []):
    model = model.to(device=device)
    while True:
        tot_correct = 0.0
        tot_samples = 0.0
        tot_loss = 0.0
        for t, sample in enumerate(loader_train):
            x = sample['image'].unsqueeze(1)
            y = sample['label']
            # Move the data to the proper device (GPU or CPU)
            x = x.to(device=device, dtype=dtype)
            y = y.to(device=device, dtype=torch.float)

            scores = model(x).view(-1)
            print("Predicted scores are:", scores)
            optimizer.zero_grad()
            loss = F.binary_cross_entropy(scores, y)
            loss.backward()
            optimizer.step()
            loss_list.append(loss.item()) ## Fixed. Maybe this line was the memory leak
            
            #training acc, precision, recall, etc. metrics
            num_samples = scores.size(0)
            preds = scores > 0.5
            truepos, falsepos, trueneg, falseneg = evaluate_metrics(preds, y)
            assert (truepos + falsepos + trueneg + falseneg) == num_samples
            num_correct = truepos + trueneg
            tot_correct += num_correct
            tot_samples += num_samples
            tot_loss += loss.item()

            if t % print_every == 0:
                batch_acc = float(num_correct)/num_samples
                print('Iteration %d: batch train accuracy = %06f, loss = %06f'%(t, batch_acc, float(loss.item())))
                
        val_acc = check_accuracy(loader_val, model)
        val_acc_list.append(val_acc)
        train_acc = float(tot_correct)/tot_samples
        if epoch % save_every == 0:
            save_model({
                'epoch': epoch,
                'state_dict': model.state_dict(),
                'optimizer' : optimizer.state_dict(),
                'loss_list' : loss_list,
                'val_acc_list': val_acc_list
                }, val_acc, exp_name)
        print ("EPOCH %d, val accuracy = %06f"%(epoch, float(val_acc)))
        print ("train accuracy = %06f, loss = %06f"%(train_acc, tot_loss))
        '''
        for name, param in model.named_parameters():
            if param.requires_grad:
                print (name, param.data)
        '''
        epoch += 1
Пример #4
0
def save_saliency_and_image(tensor, image, savepath, only_saliency=False):
    """ Take in a saliency map tensor, and output as img array. Save if option provided.
    Batch size should not exist, ideally.
    Params:
        :image: The original (1024, 1024) image
        :tensor: (1024, 1024) saliency map
    Return:
        :img: [(-1,] (1024, 1024, 1) img array, or a list of such
    """
    assert savepath.endswith(".png")
    assert savepath.startswith("visualize_output/")

    # https://stackoverflow.com/questions/31877353/overlay-an-image-segmentation-with-numpy-and-matplotlib
    if only_saliency:
        plt.imshow(tensor, cmap='hot')
    else:
        plt.imshow(image, cmap='gray')
        plt.imshow(tensor, cmap='hot', alpha=0.7)
    plt.savefig(savepath)
    print("Saved image to", savepath)
Пример #5
0
  def querySpread (self, symbol, price, bids, asks, book):
    # The spread message now also includes last price for free.
    self.queryLastTrade(symbol, price)

    self.known_bids[symbol] = bids
    self.known_asks[symbol] = asks

    if bids: best_bid, best_bid_qty = (bids[0][0], bids[0][1])
    else: best_bid, best_bid_qty = ('No bids', 0)

    if asks: best_ask, best_ask_qty = (asks[0][0], asks[0][1])
    else: best_ask, best_ask_qty = ('No asks', 0)

    print ("Received spread of {} @ {} / {} @ {} for {}".format(best_bid_qty, best_bid, best_ask_qty, best_ask, symbol))

    self.logEvent("BID_DEPTH", bids)
    self.logEvent("ASK_DEPTH", asks)
    self.logEvent("IMBALANCE", [sum([x[1] for x in bids]), sum([x[1] for x in asks])])

    self.book = book
Пример #6
0
def create_saliency_overlay(model, imagepath, savepath, only_saliency=False):
    """
    Params:
        :model: pytorch model for this scope, e.g. MammogramDenseNet
        :image: (1024, 1024) numpy array
    Return: None.
    """
    assert imagepath.endswith('.npy')
    image = np.load(imagepath)
    print("Loaded image from", imagepath)

    x = image.reshape(1, 1, 1024, 1024)
    raw_gradient = get_gradient(model, x)
    saliency_tensor = raw_gradient.numpy().reshape(1024, 1024)
    saliency_tensor = normalize_between(saliency_tensor, 0, 1)

    save_saliency_and_image(saliency_tensor,
                            image,
                            savepath,
                            only_saliency=only_saliency)
Пример #7
0
def get_gradient(model, x):
    """
    Params:
        :model: A module, already trained. we freeze weights and get input gradients
        :x: The input image tensor (-1, 1, 1024, 1024).
    Return:
        :gradient: torch.Tensor (-1, 1, 1024, 1024) saliency map.
    """
    x = torch.tensor(x)
    x.requires_grad = True  # gradient wrt image

    # Freeze params, we're not updating weights
    for p in model.parameters():
        p.requires_grad = False
    #print("x requires grad is:", x.requires_grad) True

    try:
        scores = model(x)
    except RuntimeError:
        model = model.cuda()
        scores = model(x.cuda())

    print("We received score(s) ", scores)
    scores.backward()

    gradient = x.grad
    print("We got dL/dx of shape", gradient.size())

    gradient = gradient.abs_().mean(1)  # 1 = channel dim, (-1, 1024, 1024)
    print("After processing (absval + mean):", gradient.size())

    return gradient
Пример #8
0
  def getKnownLiquidity (self, symbol, within=0.00):
    bid_liq = self.getBookLiquidity(self.known_bids[symbol], within)
    ask_liq = self.getBookLiquidity(self.known_asks[symbol], within)

    print ("Bid/ask liq: {}, {}".format(bid_liq, ask_liq))
    print ("Known bids: {}".format(self.known_bids[self.symbol]))
    print ("Known asks: {}".format(self.known_asks[self.symbol]))

    return bid_liq, ask_liq
Пример #9
0
  def kernelStopping (self):
    # Always call parent method to be safe.
    super().kernelStopping()

    # Print end of day holdings.
    self.logEvent('FINAL_HOLDINGS', self.fmtHoldings(self.holdings))

    # Mark to market.
    # We may want a separate mark to market function (to use anytime) eventually.
    cash = self.markToMarket(self.holdings)

    self.logEvent('ENDING_CASH', cash)
    print ("Final holdings for {}: {}.  Marked to market: {}".format(self.name, self.fmtHoldings(self.holdings),
                                                                     cash), override=True)
    
    # TODO: Record final results for presentation/debugging.  This is probably bad.
    mytype = str(type(self)).split('.')[-1].split("'")[0]

    if mytype in self.kernel.meanResultByAgentType:
      self.kernel.meanResultByAgentType[mytype] += cash
      self.kernel.agentCountByType[mytype] += 1
    else:
      self.kernel.meanResultByAgentType[mytype] = cash
      self.kernel.agentCountByType[mytype] = 1
Пример #10
0
    def placeOrder(self):
        # Called when it is time for the agent to determine a limit price and place an order.
        # updateEstimates() returns the agent's current total valuation for the share it
        # is considering to trade and whether it will buy or sell that share.
        v, buy = self.updateEstimates()

        # Select a requested surplus for this trade.
        R = np.random.randint(self.R_min, self.R_max + 1)

        # Determine the limit price.
        p = v - R if buy else v + R

        # Either place the constructed order, or if the agent could secure (eta * R) surplus
        # immediately by taking the inside bid/ask, do that instead.
        bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)
        if buy and ask_vol > 0:
            R_ask = v - ask
            if R_ask >= (self.eta * R):
                print(
                    "{} desired R = {}, but took R = {} at ask = {} due to eta"
                    .format(self.name, R, R_ask, ask))
                p = ask
            else:
                print("{} demands R = {}, limit price {}".format(
                    self.name, R, p))
        elif (not buy) and bid_vol > 0:
            R_bid = bid - v
            if R_bid >= (self.eta * R):
                print(
                    "{} desired R = {}, but took R = {} at bid = {} due to eta"
                    .format(self.name, R, R_bid, bid))
                p = bid
            else:
                print("{} demands R = {}, limit price {}".format(
                    self.name, R, p))

        # Place the order.
        self.placeLimitOrder(self.symbol, 1, buy, p)
Пример #11
0
def save_model(state, acc, exp_name, filename='checkpoint.pth.tar'):
    file_path = os.path.join(EXPERIMENT_DIR, exp_name)
    if not os.path.exists(file_path):
        os.makedirs(file_path)
    
    torch.save(state, os.path.join(file_path, filename))
    print ("saved checkpoint to ", file_path)
    best_stats_file = os.path.join(file_path, STATS_FILE)
    if os.path.isfile(best_stats_file):
        with open(best_stats_file, 'r') as best_file:
            best_acc = best_file.read()
        if float(best_acc) < acc:
            shutil.copyfile(os.path.join(file_path, filename), os.path.join(file_path, BEST_FILE))
            print ("best checkpoint! saved to ", BEST_FILE)
            with open(best_stats_file, 'w') as best_file:
                best_file.write("%f"%(acc))
    else:
        shutil.copyfile(os.path.join(file_path, filename), os.path.join(file_path, BEST_FILE))
        print ("best checkpoint! saved to ", BEST_FILE)
        with open(best_stats_file, 'w') as best_file:
            best_file.write("%f"%(acc))
    def __init__(self, mkt_open, mkt_close, symbols):
        # Symbols must be a dictionary of dictionaries with outer keys as symbol names and
        # inner keys: r_bar, kappa, sigma_s.
        self.mkt_open = mkt_open
        self.mkt_close = mkt_close
        self.symbols = symbols
        self.r = {}

        then = dt.datetime.now()

        for symbol in symbols:
            s = symbols[symbol]
            print(
                "MeanRevertingOracle computing fundamental value series for {}"
                .format(symbol))
            self.r[symbol] = self.generate_fundamental_value_series(
                symbol=symbol, **s)

        now = dt.datetime.now()

        print("MeanRevertingOracle initialized for symbols {}".format(symbols))
        print("MeanRevertingOracle initialization took {}".format(now - then))
Пример #13
0
  def orderExecuted (self, order):
    print ("Received notification of execution for: {}".format(order))

    # Log this activity.
    self.logEvent('ORDER_EXECUTED', order)

    # At the very least, we must update CASH and holdings at execution time.
    qty = order.quantity if order.is_buy_order else -1 * order.quantity
    sym = order.symbol

    if sym in self.holdings:
      self.holdings[sym] += qty
    else:
      self.holdings[sym] = qty

    if self.holdings[sym] == 0: del self.holdings[sym]

    # As with everything else, CASH holdings are in CENTS.
    self.holdings['CASH'] -= (qty * order.fill_price)
    
    # If this original order is now fully executed, remove it from the open orders list.
    # Otherwise, decrement by the quantity filled just now.  It is _possible_ that due
    # to timing issues, it might not be in the order list (i.e. we issued a cancellation
    # but it was executed first, or something).
    if order.order_id in self.orders:
      o = self.orders[order.order_id]

      if order.quantity >= o.quantity: del self.orders[order.order_id]
      else: o.quantity -= order.quantity

    else:
      print ("Execution received for order not in orders list: {}".format(order))

    print ("After execution, agent open orders: {}".format(self.orders))

    # After execution, log holdings.
    #self.logEvent('HOLDINGS_UPDATED', self.fmtHoldings(self.holdings))
    self.logEvent('HOLDINGS_UPDATED', self.holdings)
Пример #14
0
def get_pretrained_layers(model_name='densenet201', include_denseblock=True):

    # Use Densenet-201 by default

    print("Retrieving the pretrained DenseNet model:", model_name)
    old_model = get_densenet(model_name)

    #new_model = densenet.DenseNet(
    #    growth_rate=32, block_config=(6, 12, 6),
    #    num_init_features=64, drop_rate=0., num_classes=2)
    # TODO(ojwang): Increase drop rate when model is confirmed working and time to tune

    layers = []

    # The first Conv2d layer
    print("Copying features[0]: %s..." % old_model.features[0])
    first_conv = transform_filters_to_grayscale(old_model.features[0])
    first_conv.weight.requires_grad = True  # Just in case
    layers.append(('conv0', first_conv))
    # Don't freeze layers: We will keep training it, because the domain isn't the same as ImageNet

    # The initial BatchNorm
    print("Copying features[1]: %s..." % old_model.features[1])
    first_bn = deepcopy(old_model.features[1])
    #freeze_parameters(first_bn)
    layers.append(('batchnorm0', first_bn))

    # the classic ReLU
    print("Creating new ReLU for features[2]: %s..." % old_model.features[2])
    first_relu = nn.ReLU(inplace=True)
    layers.append(('relu0', first_relu))

    # The initial MaxPool
    print("Copying features[3]: %s..." % old_model.features[3])
    first_maxpool = deepcopy(old_model.features[3])
    layers.append(('maxpool0', first_maxpool))

    if include_denseblock:
        # The first dense block: 6 dense layers
        #   (each is 1x1 conv -> 3x3 conv, w/ appropriate relu and batchnorm)
        print("Copying features[4]: %s..." % type(
            old_model.features[4]))  # print only type: DB has too much text
        denseblock = deepcopy(old_model.features[4])
        #freeze_parameters(denseblock)
        layers.append(('denseblock0', denseblock))

    layers = OrderedDict(layers)

    return layers
Пример #15
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 18, 12),
                 bn_size=4,
                 drop_rate=0.,
                 pretrained_encoder=2,
                 simple=False,
                 debug=False):
        """
        bn_size = bottleneck size, the factor by which the first conv in a _DenseLayer
            is larger than the second conv.
        :pretrained_encoder: int in [0,1,2] designating level of pretrained layers to use.
            0 is none, 1 is just first convolutions, 2 is the first dense block.
        """

        super(MammogramDenseNet, self).__init__()

        self.debug = debug
        self.pretrained = pretrained_encoder
        self.nb_dense_blocks = len(block_config)
        num_classes = 1  # indicator score of whether it is malignant

        include_denseblock = self.pretrained == 2
        pretrained_layers = get_pretrained_layers(
            include_denseblock=include_denseblock)
        self.features = nn.Sequential(pretrained_layers)

        if self.pretrained == 0:  # Re-initialize layers; don't use pretrained weights inside
            print("self.pretrained = 0. Re-initializing weights")
            for m in self.features.modules():

                if isinstance(m, nn.Conv2d):
                    old_m = deepcopy(m)
                    nn.init.kaiming_normal_(m.weight)
                    # Conv layers have no bias when in conjunction with Batchnorm
                elif isinstance(m, nn.BatchNorm2d):
                    old_m = deepcopy(m)
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)
                else:
                    continue

                print("old m is", old_m.weight)
                print("new m is", m.weight)

        # Display shapes for debugging
        if debug:
            print("pretrained_encoder = %d" % pretrained_encoder)
            print(
                "Output shape after the pretrained modules (batch, channels, H, W):"
            )

            test_input = torch.rand(1, 1, 1024, 1024)
            test_output = self.features(test_input)
            print(test_output.size())
            del test_input
            del test_output

        # A counter to track what input shape our final nn.Linear layer should expect
        #  Just num_channels is fine, because global avg pool at end
        num_features = 256 if self.pretrained == 2 else 64

        # Add the rest of the architecture (Dense blocks, transition layers)
        for i, num_layers in enumerate(block_config):
            if simple:
                block = _SimpleDenseBlock(num_layers=num_layers,
                                          num_input_features=num_features,
                                          growth_rate=growth_rate,
                                          drop_rate=drop_rate)
            else:
                block = _DenseBlock(num_layers=num_layers,
                                    num_input_features=num_features,
                                    bn_size=bn_size,
                                    growth_rate=growth_rate,
                                    drop_rate=drop_rate)

            # Initialize the weights of block
            for m in block.modules():
                if isinstance(m, nn.Conv2d):
                    nn.init.kaiming_normal_(m.weight)
                    # Conv layers have no bias when in conjunction with Batchnorm
                elif isinstance(m, nn.BatchNorm2d):
                    nn.init.constant_(m.weight, 1)
                    nn.init.constant_(m.bias, 0)

            block_name = 'simpledenseblock%d' % (
                i + 1) if simple else 'denseblock%d' % (i + 1)
            self.features.add_module(block_name, block)

            num_features = num_features + num_layers * growth_rate
            if debug:
                print("num features after denseblock %d:" % (i + 1),
                      num_features)

            # Add a transition layer if not the last dense block:
            #  Norm, 1x1 Conv (unless simple), activation, AvgPool
            if i != self.nb_dense_blocks - 1:
                if simple:
                    trans = _SimpleTransition(num_input_features=num_features)
                else:
                    trans = _Transition(num_input_features=num_features,
                                        num_output_features=num_features)

                transition_name = 'simpletransition%d' % (
                    i + 1) if simple else 'transition%d' % (i + 1)
                self.features.add_module(transition_name, trans)

                if debug:
                    print("num features after transition %d:" % (i + 1),
                          num_features)

        if debug: print("final num features:", num_features)

        # Put the classifier here separately
        #  will apply it manually in forward(x), after global avg pool and reshape
        self.classifier = nn.Linear(num_features, num_classes)
        nn.init.constant_(self.classifier.bias, 0)

        if debug:
            summary(self.features)
Пример #16
0
  def orderAccepted (self, order):
    print ("Received notification of acceptance for: {}".format(order))

    # Log this activity.
    self.logEvent('ORDER_ACCEPTED', order)
Пример #17
0
  def receiveMessage (self, currentTime, msg):
    super().receiveMessage(currentTime, msg)

    # Do we know the market hours?
    had_mkt_hours = self.mkt_open is not None and self.mkt_close is not None

    # Record market open or close times.
    if msg.body['msg'] == "WHEN_MKT_OPEN":
      self.mkt_open = msg.body['data']

      print ("Recorded market open: {}".format(self.kernel.fmtTime(self.mkt_open)))

    elif msg.body['msg'] == "WHEN_MKT_CLOSE":
      self.mkt_close = msg.body['data']

      print ("Recorded market close: {}".format(self.kernel.fmtTime(self.mkt_close)))

    elif msg.body['msg'] == "ORDER_EXECUTED":
      # Call the orderExecuted method, which subclasses should extend.  This parent
      # class could implement default "portfolio tracking" or "returns tracking"
      # behavior.
      order = msg.body['order']

      self.orderExecuted(order)

    elif msg.body['msg'] == "ORDER_ACCEPTED":
      # Call the orderAccepted method, which subclasses should extend.
      order = msg.body['order']

      self.orderAccepted(order)

    elif msg.body['msg'] == "ORDER_CANCELLED":
      # Call the orderCancelled method, which subclasses should extend.
      order = msg.body['order']

      self.orderCancelled(order)

    elif msg.body['msg'] == "MKT_CLOSED":
      # We've tried to ask the exchange for something after it closed.  Remember this
      # so we stop asking for things that can't happen.

      self.marketClosed()

    elif msg.body['msg'] == 'QUERY_LAST_TRADE':
      # Call the queryLastTrade method, which subclasses may extend.
      # Also note if the market is closed.
      if msg.body['mkt_closed']: self.mkt_closed = True

      self.queryLastTrade(msg.body['symbol'], msg.body['data'])

    elif msg.body['msg'] == 'QUERY_SPREAD':
      # Call the querySpread method, which subclasses may extend.
      if msg.body['mkt_closed']: self.mkt_closed = True

      self.querySpread(msg.body['symbol'], msg.body['data'], msg.body['bids'], msg.body['asks'], msg.body['book'])

    elif msg.body['msg'] == 'QUERY_ORDER_STREAM':
      # Call the queryOrderStream method, which subclasses may extend.
      if msg.body['mkt_closed']: self.mkt_closed = True

      self.queryOrderStream(msg.body['symbol'], msg.body['orders'])


    # Now do we know the market hours?
    have_mkt_hours = self.mkt_open is not None and self.mkt_close is not None

    # Once we know the market open and close times, schedule a wakeup call for market open.
    # Only do this once, when we first have both items.
    if have_mkt_hours and not had_mkt_hours:
      # Agents are asked to generate a wake offset from the market open time.  We structure
      # this as a subclass request so each agent can supply an appropriate offset relative
      # to its trading frequency.
      ns_offset = self.getWakeFrequency()

      self.setWakeup(self.mkt_open + ns_offset)
Пример #18
0
  def runner(self, agents = [], startTime = None, stopTime = None,
             num_simulations = 1, defaultComputationDelay = 1,
             defaultLatency = 1, agentLatency = None, latencyNoise = [ 1.0 ],
             seed = None, oracle = None, log_dir = None):

    # agents must be a list of agents for the simulation,
    #        based on class agent.Agent
    self.agents = agents
    self.startTime = startTime
    self.stopTime = stopTime
    self.seed = seed
    self.oracle = oracle

    if log_dir:
      self.log_dir = log_dir
    else:
      self.log_dir = str(int(self.kernelWallClockStart.timestamp()))

    # The kernel maintains a current time for each agent to allow
    # simulation of per-agent computation delays.  The agent's time
    # is pushed forward (see below) each time it awakens, and it
    # cannot receive new messages/wakeups until the global time
    # reaches the agent's time.  (i.e. it cannot act again while
    # it is still "in the future")

    # This also nicely enforces agents being unable to act before
    # the simulation startTime.
    self.agentCurrentTimes = [self.startTime] * len(agents)

    # agentComputationDelays is in nanoseconds, starts with a default
    # value from config, and can be changed by any agent at any time
    # (for itself only).  It represents the time penalty applied to
    # an agent each time it is awakened  (wakeup or recvMsg).  The
    # penalty applies _after_ the agent acts, before it may act again.
    # TODO: this might someday change to pd.Timedelta objects.
    self.agentComputationDelays = [defaultComputationDelay] * len(agents)

    # If agentLatency is not defined, define it using the defaultLatency.
    # This matrix defines the communication delay between every pair of
    # agents.
    if agentLatency is None:
      self.agentLatency = [[defaultLatency] * len(agents)] * len(agents)
    else:
      self.agentLatency = agentLatency

    # There is a noise model for latency, intended to be a one-sided
    # distribution with the peak at zero.  By default there is no noise
    # (100% chance to add zero ns extra delay).  Format is a list with
    # list index = ns extra delay, value = probability of this delay.
    self.latencyNoise = latencyNoise

    # The kernel maintains an accumulating additional delay parameter
    # for the current agent.  This is applied to each message sent
    # and upon return from wakeup/receiveMessage, in addition to the
    # agent's standard computation delay.  However, it never carries
    # over to future wakeup/receiveMessage calls.  It is useful for
    # staggering of sent messages.
    self.currentAgentAdditionalDelay = 0

    print ("Kernel started: {}".format(self.name))
    print ("Simulation started!")

    for sim in range(num_simulations):
      print ("Starting sim {}".format(sim))

      # Event notification for kernel init (agents should not try to
      # communicate with other agents, as order is unknown).  Agents
      # should initialize any internal resources that may be needed
      # to communicate with other agents during agent.kernelStarting().
      # Kernel passes self-reference for agents to retain, so they can
      # communicate with the kernel in the future (as it does not have
      # an agentID).
      print ("\n--- Agent.kernelInitializing() ---")
      for agent in self.agents:
        agent.kernelInitializing(self)

      # Event notification for kernel start (agents may set up
      # communications or references to other agents, as all agents
      # are guaranteed to exist now).  Agents should obtain references
      # to other agents they require for proper operation (exchanges,
      # brokers, subscription services...).
      print ("\n--- Agent.kernelStarting() ---")
      for agent in self.agents:
        agent.kernelStarting(self.startTime)

      # Set the kernel to its startTime.
      self.currentTime = self.startTime
      print ("\n--- Kernel Clock started ---")
      print ("Kernel.currentTime is now {}".format(self.currentTime))

      # Start processing the Event Queue.
      print ("\n--- Kernel Event Queue begins ---")
      print ("Kernel will start processing messages. ",
             "Queue length: {}".format(len(self.messages.queue)))

      # Track starting wall clock time and total message count for stats at the end.
      eventQueueWallClockStart = pd.Timestamp('now')
      ttl_messages = 0

      # Process messages.
      while not self.messages.empty() and self.currentTime and (self.currentTime <= self.stopTime):
        self.currentTime, event = self.messages.get()
        msg_recipient, msg_type, msg = event

        # Periodically print the simulation time and total messages, even if muted.
        if ttl_messages % 100000 == 0:
          print ("\n--- Simulation time: {}, messages processed: {}, wallclock elapsed: {} ---\n".format(
                         self.fmtTime(self.currentTime), ttl_messages, pd.Timestamp('now') - eventQueueWallClockStart),
                 override=True)

        print ("\n--- Kernel Event Queue pop ---")
        print ("Kernel handling {} message for agent {} at time {}".format(
               msg_type, msg_recipient, self.fmtTime(self.currentTime)))

        ttl_messages += 1

        # In between messages, always reset the currentAgentAdditionalDelay.
        self.currentAgentAdditionalDelay = 0

        # Dispatch message to agent.
        if msg_type == MessageType.WAKEUP:

          # Who requested this wakeup call?
          agent = msg_recipient

          # Test to see if the agent is already in the future.  If so,
          # delay the wakeup until the agent can act again.
          if self.agentCurrentTimes[agent] > self.currentTime:
            # Push the wakeup call back into the PQ with a new time.
            self.messages.put((self.agentCurrentTimes[agent],
                              (msg_recipient, msg_type, msg)))
            print ("Agent in future: wakeup requeued for {}".format(
                 self.fmtTime(self.agentCurrentTimes[agent])))
            continue
            
          # Set agent's current time to global current time for start
          # of processing.
          self.agentCurrentTimes[agent] = self.currentTime

          # Wake the agent.
          agents[agent].wakeup(self.currentTime)

          # Delay the agent by its computation delay plus any transient additional delay requested.
          self.agentCurrentTimes[agent] += pd.Timedelta(self.agentComputationDelays[agent] +
                                                        self.currentAgentAdditionalDelay)

          print ("After wakeup return, agent {} delayed from {} to {}".format(
                 agent, self.fmtTime(self.currentTime), self.fmtTime(self.agentCurrentTimes[agent])))

        elif msg_type == MessageType.MESSAGE:

          # Who is receiving this message?
          agent = msg_recipient

          # Test to see if the agent is already in the future.  If so,
          # delay the message until the agent can act again.
          if self.agentCurrentTimes[agent] > self.currentTime:
            # Push the message back into the PQ with a new time.
            self.messages.put((self.agentCurrentTimes[agent],
                              (msg_recipient, msg_type, msg)))
            print ("Agent in future: message requeued for {}".format(
                 self.fmtTime(self.agentCurrentTimes[agent])))
            #print ("TMP: delayed message was: {}".format(msg))
            continue

          # Set agent's current time to global current time for start
          # of processing.
          self.agentCurrentTimes[agent] = self.currentTime

          # Deliver the message.
          agents[agent].receiveMessage(self.currentTime, msg)

          # Delay the agent by its computation delay plus any transient additional delay requested.
          self.agentCurrentTimes[agent] += pd.Timedelta(self.agentComputationDelays[agent] +
                                                        self.currentAgentAdditionalDelay)

          print ("After receiveMessage return, agent {} delayed from {} to {}".format(
                 agent, self.fmtTime(self.currentTime), self.fmtTime(self.agentCurrentTimes[agent])))

        else:
          raise ValueError("Unknown message type found in queue",
                           "currentTime:", self.currentTime,
                           "messageType:", self.msg.type)

      if self.messages.empty():
        print ("\n--- Kernel Event Queue empty ---")

      if self.currentTime and (self.currentTime > self.stopTime):
        print ("\n--- Kernel Stop Time surpassed ---")

      # Record wall clock stop time and elapsed time for stats at the end.
      eventQueueWallClockStop = pd.Timestamp('now')

      eventQueueWallClockElapsed = eventQueueWallClockStop - eventQueueWallClockStart

      # Event notification for kernel end (agents may communicate with
      # other agents, as all agents are still guaranteed to exist).
      # Agents should not destroy resources they may need to respond
      # to final communications from other agents.
      print ("\n--- Agent.kernelStopping() ---")
      for agent in agents:
        agent.kernelStopping()

      # Event notification for kernel termination (agents should not
      # attempt communication with other agents, as order of termination
      # is unknown).  Agents should clean up all used resources as the
      # simulation program may not actually terminate if num_simulations > 1.
      print ("\n--- Agent.kernelTerminating() ---")
      for agent in agents:
        agent.kernelTerminating()

      print ("Event Queue elapsed: {}, messages: {}, messages per second: {:0.1f}".format(
             eventQueueWallClockElapsed, ttl_messages, 
             ttl_messages / (eventQueueWallClockElapsed / (np.timedelta64(1, 's')))),
             override=True)
      print ("Ending sim {}".format(sim))

    print ("Mean ending value by agent type:", override=True)
    for a in self.meanResultByAgentType:
      value = self.meanResultByAgentType[a]
      count = self.agentCountByType[a]
      print ("{}: {:d}".format(a, int(round(value / count))), override=True)

    print ("Simulation ending!", override=True)
Пример #19
0
    def placeOrders(self):

        # The background agents use the DataOracle to obtain noisy observations of the
        # actual historical intraday price on a particular date.  They use this to
        # produce a realistic "background" market of agents who trade based on a belief
        # that follows history (i.e. beliefs do not change based on other agent trading
        # activity) but whose behavior does react to market conditions -- because they
        # will try to arbitrage between their beliefs and the current market state.

        # Get current value belief for relevant stock (observation is noisy).  Beliefs
        # can change even when (unknown) real historical stock price has not changed.
        # sigma_n is the variance of gaussian observation noise as a proportion of the
        # current stock price.  (e.g. if stock trades at 100, sigma_n=0.01 will
        # select from a normal(mean=100,std=1) distribution.
        value_observation = self.oracle.observePrice(self.symbol,
                                                     self.currentTime,
                                                     sigma_n=self.sigma_n)

        # TESTING: value_belief is only allowed to change at a certain rate from the prior
        # belief, to keep some kind of consistency and make "beliefs" mean something.

        if self.value_belief: self.logEvent("OLD_BELIEF", self.value_belief)
        self.logEvent("BELIEF_OBSERVATION", value_observation)

        # If there was a prior belief, update it.
        if self.value_belief:
            delta = value_observation - self.value_belief
            print("observation {}, old belief {}, delta {}".format(
                value_observation, self.value_belief, delta))
            self.value_belief = int(
                round(self.value_belief + (delta * self.learning_rate)))
        else:
            # Otherwise use the observation as the belief.
            self.value_belief = value_observation

        print("New belief {}".format(self.value_belief))
        self.logEvent("NEW_BELIEF", self.value_belief)

        if self.ARB_LAST_TRADE:
            arb_target = self.last_trade[self.symbol]
        else:
            bid, bid_vol, ask, ask_vol = self.getKnownBidAsk(self.symbol)

            if bid and ask:
                arb_target = int(round((bid + ask) / 2))
            else:
                # No spread yet.  Use last trade (or open price) for arb_target, bid, and ask.
                arb_target = self.last_trade[self.symbol]
                bid = arb_target
                ask = arb_target

        print("{} believes {} is worth {} at {}, arb target: {}.".format(
            self.name, self.symbol, self.dollarize(self.value_belief),
            self.kernel.fmtTime(self.currentTime), self.dollarize(arb_target)))

        # The agents now have their desired behavior.  Instead of placing bracketing limit orders, they
        # arbitrage between the last trade price and their value belief.  This means one-sided orders.
        # Note that value_belief, like all prices, is in integer CENTS.
        #
        # The agent places limit orders designed to immediately execute.  They will pay slightly more (buy)
        # than the last trade price, or accept slightly less than the last current trade price (sell).
        # Offset must be adjusted to round cents.
        offset = int(
            round(
                np.random.uniform(low=self.LOW_CUSHION, high=self.HIGH_CUSHION)
                * arb_target))
        #shares = np.random.randint(100,400)
        if self.trade_vol < 200:
            print(
                "ERROR: BackgroundAgents don't work right with less than 200 average trade volume (shares)",
                override=True)
            sys.exit()
        else:
            shares = np.random.randint(200,
                                       int(round(self.trade_vol * 2)) - 200)

        # Pick an exit offset (to take profit) if the trade goes in the agent's favor by a percentage of the
        # current price.
        exit_offset = int(round(0.01 * arb_target))

        # If the last traded price is too close to the value belief, don't trade.
        if abs(arb_target - self.value_belief) < self.TRADE_THRESHOLD:
            # No trade.
            pass
        elif self.value_belief > arb_target:
            # The agent believes the price should be higher.  Go long.

            # Place 1/2 the shares for immediate execution.  This will be at least 100.
            #mkt_shares = int(round(shares/2))
            mkt_shares = 100

            # Use base limit.
            #base_limit = self.value_belief - self.TRADE_THRESHOLD
            #base_limit = int(round(((self.value_belief - arb_target) / 2) + arb_target))
            #self.placeLimitOrder(self.symbol, mkt_shares, True, ask)
            base_limit = ask
            self.placeLimitOrder(self.symbol, mkt_shares, True, base_limit)

            rem_shares = shares - mkt_shares

            while rem_shares > 0:
                trade_shares = 100 if rem_shares >= 100 else rem_shares
                rem_shares -= trade_shares

                # Each 100 share lot's limit price is drawn from a one-sided gaussian with the peak
                # at the (possibly previous after our trade above) best ask.
                #rand = np.random.normal(0, ask * self.LIMIT_STD_CENTS)
                #TMP? Peak a little shy of the belief.
                rand = np.random.normal(0, base_limit * self.LIMIT_STD_CENTS)
                limit_price = int(round(base_limit - abs(rand)))

                self.placeLimitOrder(self.symbol, trade_shares, True,
                                     limit_price)

            # Also place a profit-taking exit, not designed to be immediately executed.
            #self.placeLimitOrder(self.symbol, shares, False, arb_target + exit_offset)

        else:
            # The agent believes the price should be lower.  Go short.

            # Place 1/2 the shares for immediate execution.  This will be at least 100.
            #mkt_shares = int(round(shares/2))
            mkt_shares = 100

            # Use base limit.
            #base_limit = self.value_belief + self.TRADE_THRESHOLD
            #base_limit = int(round(((arb_target - self.value_belief) / 2) + self.value_belief))
            #self.placeLimitOrder(self.symbol, mkt_shares, False, bid)
            base_limit = bid
            self.placeLimitOrder(self.symbol, mkt_shares, False, base_limit)

            rem_shares = shares - mkt_shares

            while rem_shares > 0:
                trade_shares = 100 if rem_shares >= 100 else rem_shares
                rem_shares -= trade_shares

                # Each 100 share lot's limit price is drawn from a one-sided gaussian with the peak
                # at the (possibly previous after our trade above) best bid.
                #rand = np.random.normal(0, bid * self.LIMIT_STD_CENTS)
                #TMP? Peak a little shy of the belief.
                rand = np.random.normal(0, base_limit * self.LIMIT_STD_CENTS)
                limit_price = int(round(base_limit + abs(rand)))

                self.placeLimitOrder(self.symbol, trade_shares, False,
                                     limit_price)
Пример #20
0
    def kernelTerminating(self):
        super().kernelTerminating()

        # Skip order book dump if requested.
        if self.book_freq is None: return

        for symbol in self.order_books:
            book = self.order_books[symbol]

            # Log full depth quotes (price, volume) from this order book at some pre-determined frequency.
            if book.book_log:

                ### THE FAST WAY

                # This must already be sorted by time because it was a list of order book snapshots and time
                # only increases in our simulation.  BUT it can have duplicates if multiple orders happen
                # in the same nanosecond.  (This particularly happens if using nanoseconds as the discrete
                # but fine-grained unit for more theoretic studies.)
                dfLog = pd.DataFrame(book.book_log)
                dfLog.set_index('QuoteTime', inplace=True)

                if True:
                    dfLog = dfLog[~dfLog.index.duplicated(keep='last')]
                    dfLog.sort_index(inplace=True)
                    dfLog = dfLog.resample(self.book_freq).ffill()
                    dfLog.sort_index(inplace=True)

                    time_idx = pd.date_range(self.mkt_open,
                                             self.mkt_close,
                                             freq=self.book_freq,
                                             closed='right')
                    dfLog = dfLog.reindex(time_idx, method='ffill')
                    dfLog.sort_index(inplace=True)

                    dfLog = dfLog.stack()
                    dfLog.sort_index(inplace=True)

                    quotes = sorted(dfLog.index.get_level_values(1).unique())
                    min_quote = quotes[0]
                    max_quote = quotes[-1]
                    quotes = range(min_quote, max_quote + 1)

                    filledIndex = pd.MultiIndex.from_product(
                        [time_idx, quotes], names=['time', 'quote'])
                    dfLog = dfLog.reindex(filledIndex)
                    dfLog.fillna(0, inplace=True)

                    dfLog.rename('Volume')

                    df = pd.DataFrame(index=dfLog.index)
                    df['Volume'] = dfLog

                ### THE SLOW WAY

                if False:

                    # Make a MultiIndex dataframe of (Seconds, QuotePrice) -> Volume, giving the quote prices and volumes
                    # at the end of each second the market was open.
                    seconds = pd.date_range(self.mkt_open,
                                            self.mkt_close,
                                            freq=self.book_freq,
                                            closed='right')
                    quotes = dfLog.columns

                    df = pd.DataFrame(index=pd.MultiIndex.from_product(
                        [seconds, quotes], names=['time', 'quote']))
                    df['Volume'] = 0

                    df.sort_index(inplace=True)

                    logWriteStart = pd.Timestamp('now')
                    i = 0

                    for idx, row in df.iterrows():
                        if i % 1000 == 0:
                            print(
                                "Exchange writing order book log, interval {}, wallclock elapsed {}"
                                .format(idx[0],
                                        pd.Timestamp('now') - logWriteStart),
                                override=True)

                        best = dfLog.index.asof(idx[0])
                        if pd.isnull(best): continue
                        df.loc[idx, 'Volume'] = dfLog.loc[best, idx[1]]

                        i += 1

                    print("Exchange sorting order book index.", override=True)
                    df.sort_index(inplace=True)

                    # Create a filled version of the index without gaps from min to max quote price.
                    min_quote = df.index.get_level_values(1)[0]
                    max_quote = df.index.get_level_values(1)[-1]
                    quotes = range(min_quote, max_quote + 1)

                    # Create the new index and move the data over.
                    print("Exchange reindexing order book.", override=True)
                    filledIndex = pd.MultiIndex.from_product(
                        [seconds, quotes], names=['time', 'quote'])
                    df = df.reindex(filledIndex)

                    # NaNs represent that there is NO volume at this quoted price at this time, so they should become zero.
                    df.fillna(0, inplace=True)

                    print("Exchange archiving order book.", override=True)

                self.writeLog(df, filename='orderbook_{}'.format(symbol))

                print("Order book archival complete.", override=True)
Пример #21
0
    def receiveMessage(self, currentTime, msg):
        super().receiveMessage(currentTime, msg)

        # Exchanges currently get a very fast (but not instant) computation delay of 1 ns for handling
        # all order types.  Note that computation delay MUST be updated before any calls to sendMessage.
        self.setComputationDelay(self.computation_delay)

        # We're closed.
        if currentTime > self.mkt_close:
            # Most messages after close will receive a 'MKT_CLOSED' message in response.  A few things
            # might still be processed, like requests for final trade prices or such.
            if msg.body['msg'] in ['LIMIT_ORDER', 'CANCEL_ORDER']:
                print("{} received {}: {}".format(self.name, msg.body['msg'],
                                                  msg.body['order']))
                self.sendMessage(msg.body['sender'],
                                 Message({"msg": "MKT_CLOSED"}))

                # Don't do any further processing on these messages!
                return
            elif 'QUERY' in msg.body['msg']:
                # Specifically do allow querying after market close, so agents can get the
                # final trade of the day as their "daily close" price for a symbol.
                pass
            else:
                print("{} received {}, discarded: market is closed.".format(
                    self.name, msg.body['msg']))
                self.sendMessage(msg.body['sender'],
                                 Message({"msg": "MKT_CLOSED"}))

                # Don't do any further processing on these messages!
                return

        # Log all received messages.
        if msg.body['msg'] in ['LIMIT_ORDER', 'CANCEL_ORDER']:
            self.logEvent(msg.body['msg'], msg.body['order'])
        else:
            self.logEvent(msg.body['msg'], msg.body['sender'])

        # Handle message types understood by this exchange.
        if msg.body['msg'] == "WHEN_MKT_OPEN":
            print("{} received WHEN_MKT_OPEN request from agent {}".format(
                self.name, msg.body['sender']))

            # The exchange is permitted to respond to requests for simple immutable data (like "what are your
            # hours?") instantly.  This does NOT include anything that queries mutable data, like equity
            # quotes or trades.
            self.setComputationDelay(0)

            self.sendMessage(
                msg.body['sender'],
                Message({
                    "msg": "WHEN_MKT_OPEN",
                    "data": self.mkt_open
                }))
        elif msg.body['msg'] == "WHEN_MKT_CLOSE":
            print("{} received WHEN_MKT_CLOSE request from agent {}".format(
                self.name, msg.body['sender']))

            # The exchange is permitted to respond to requests for simple immutable data (like "what are your
            # hours?") instantly.  This does NOT include anything that queries mutable data, like equity
            # quotes or trades.
            self.setComputationDelay(0)

            self.sendMessage(
                msg.body['sender'],
                Message({
                    "msg": "WHEN_MKT_CLOSE",
                    "data": self.mkt_close
                }))
        elif msg.body['msg'] == "QUERY_LAST_TRADE":
            symbol = msg.body['symbol']
            if symbol not in self.order_books:
                print(
                    "Last trade request discarded.  Unknown symbol: {}".format(
                        symbol))
            else:
                print(
                    "{} received QUERY_LAST_TRADE ({}) request from agent {}".
                    format(self.name, symbol, msg.body['sender']))

                self.sendMessage(
                    msg.body['sender'],
                    Message({
                        "msg":
                        "QUERY_LAST_TRADE",
                        "symbol":
                        symbol,
                        "data":
                        self.order_books[symbol].last_trade,
                        "mkt_closed":
                        True if currentTime > self.mkt_close else False
                    }))
        elif msg.body['msg'] == "QUERY_SPREAD":
            symbol = msg.body['symbol']
            depth = msg.body['depth']
            if symbol not in self.order_books:
                print("Bid-ask spread request discarded.  Unknown symbol: {}".
                      format(symbol))
            else:
                print("{} received QUERY_SPREAD ({}:{}) request from agent {}".
                      format(self.name, symbol, depth, msg.body['sender']))
                self.sendMessage(
                    msg.body['sender'],
                    Message({
                        "msg":
                        "QUERY_SPREAD",
                        "symbol":
                        symbol,
                        "depth":
                        depth,
                        "bids":
                        self.order_books[symbol].getInsideBids(depth),
                        "asks":
                        self.order_books[symbol].getInsideAsks(depth),
                        "data":
                        self.order_books[symbol].last_trade,
                        "mkt_closed":
                        True if currentTime > self.mkt_close else False,
                        "book":
                        self.order_books[symbol].prettyPrint(silent=True)
                    }))
        elif msg.body['msg'] == "QUERY_ORDER_STREAM":
            symbol = msg.body['symbol']
            length = msg.body['length']

            if symbol not in self.order_books:
                print("Order stream request discarded.  Unknown symbol: {}".
                      format(symbol))
            else:
                print(
                    "{} received QUERY_ORDER_STREAM ({}:{}) request from agent {}"
                    .format(self.name, symbol, length, msg.body['sender']))

            # We return indices [1:length] inclusive because the agent will want "orders leading up to the last
            # L trades", and the items under index 0 are more recent than the last trade.
            self.sendMessage(
                msg.body['sender'],
                Message({
                    "msg":
                    "QUERY_ORDER_STREAM",
                    "symbol":
                    symbol,
                    "length":
                    length,
                    "mkt_closed":
                    True if currentTime > self.mkt_close else False,
                    "orders":
                    self.order_books[symbol].history[1:length + 1]
                }))
        elif msg.body['msg'] == "LIMIT_ORDER":
            order = msg.body['order']
            print("{} received LIMIT_ORDER: {}".format(self.name, order))
            if order.symbol not in self.order_books:
                print("Order discarded.  Unknown symbol: {}".format(
                    order.symbol))
            else:
                self.order_books[order.symbol].handleLimitOrder(
                    deepcopy(order))
        elif msg.body['msg'] == "CANCEL_ORDER":
            # Note: this is somewhat open to abuse, as in theory agents could cancel other agents' orders.
            # An agent could also become confused if they receive a (partial) execution on an order they
            # then successfully cancel, but receive the cancel confirmation first.  Things to think about
            # for later...
            order = msg.body['order']
            print("{} received CANCEL_ORDER: {}".format(self.name, order))
            if order.symbol not in self.order_books:
                print("Cancellation request discarded.  Unknown symbol: {}".
                      format(order.symbol))
            else:
                self.order_books[order.symbol].cancelOrder(deepcopy(order))
Пример #22
0
    def wakeup(self, currentTime):
        # Parent class handles discovery of exchange times and market_open wakeup call.
        super().wakeup(currentTime)

        if not self.mkt_open or not self.mkt_close:
            # TradingAgent handles discovery of exchange times.
            return
        else:
            if not self.trading:
                self.trading = True

                # Time to start trading!
                print("{} is ready to start trading now.".format(self.name))

        # Steady state wakeup behavior starts here.

        # First, see if we have received a MKT_CLOSED message for the day.  If so,
        # there's nothing to do except clean-up.  In the future, we could also do
        # some activity that is not order based (because the exchange and simulation
        # will still be running, just not accepting orders) like final price quotes
        # or final trade information.
        if self.mkt_closed and (self.symbol in self.daily_close_price):
            # Market is closed and we already got the daily close price.
            return

        # Next, schedule a wakeup for about five minutes, plus or minus ten seconds.
        # We do this early in case some of our expected message responses don't arrive.

        offset = np.random.randint(-100, 100)
        self.setWakeup(currentTime +
                       (pd.Timedelta(self.freq) +
                        pd.Timedelta('{}{}'.format(offset, self.offset_unit))))

        # If the market is closed and we haven't obtained the daily close price yet,
        # do that before we cease activity for the day.  Don't do any other behavior
        # after market close.
        if self.mkt_closed and (not self.symbol in self.daily_close_price):
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
            return

        # The agent's behavior has changed to cancel orders, wait for confirmation,
        # exit all positions, wait for confirmation, then enter new positions.
        # It does yield (return) in between these, so it can react to events that
        # occur in between.  This adds a few messages, but greatly improves the logical
        # flow of the simulation and solves several important problems.

        # On a true "wakeup", the agent is at one of its scheduled intervals.
        # We should first check for open orders we would like to cancel.
        # There should be no harm in issuing all the cancel orders simultaneously.

        if self.cancelOrders():
            self.state = 'AWAITING_CANCEL_CONFIRMATION'
            return

        # If we needed to cancel orders, the logic below will not execute, because
        # our ORDER_CANCELLED messages will come through receiveMessage().  If we
        # did not need to, we may as well carry on to exiting our positions.

        if self.exitPositions():
            self.state = 'AWAITING_EXIT_CONFIRMATION'
            return

        # The below logic is only reached if we neither needed to cancel orders
        # nor exit positions, in which case we may as well find out the most recent
        # trade prices and get ready to place new orders.

        self.getCurrentSpread(self.symbol)
        self.state = 'AWAITING_SPREAD'
Пример #23
0
    def updateEstimates(self):
        # Called by an SRG-type background agent that wishes to obtain a new fundamental observation,
        # update its internal estimation parameters, and compute a new total valuation for the
        # action it is considering.

        # The agent obtains a new noisy observation of the current fundamental value
        # and uses this to update its internal estimates in a Bayesian manner.
        obs_t = self.oracle.observePrice(self.symbol,
                                         self.currentTime,
                                         sigma_n=self.sigma_n)

        print("{} observed {} at {}".format(self.name, obs_t,
                                            self.currentTime))

        # Flip a coin to decide if we will buy or sell a unit at this time.
        q = self.getHoldings(self.symbol)

        if q >= self.q_max:
            buy = False
            print("Long holdings limit: agent will SELL")
        elif q <= -self.q_max:
            buy = True
            print("Short holdings limit: agent will BUY")
        else:
            buy = bool(np.random.randint(0, 2))
            print("Coin flip: agent will {}".format("BUY" if buy else "SELL"))

        # Update internal estimates of the current fundamental value and our error of same.

        # If this is our first estimate, treat the previous wake time as "market open".
        if self.prev_wake_time is None: self.prev_wake_time = self.mkt_open

        # First, obtain an intermediate estimate of the fundamental value by advancing
        # time from the previous wake time to the current time, performing mean
        # reversion at each time step.

        # delta must be integer time steps since last wake
        delta = (self.currentTime - self.prev_wake_time) / np.timedelta64(
            1, 'ns')

        # Update r estimate for time advancement.
        r_tprime = (1 - (1 - self.kappa)**delta) * self.r_bar
        r_tprime += ((1 - self.kappa)**delta) * self.r_t

        # Update sigma estimate for time advancement.
        sigma_tprime = ((1 - self.kappa)**(2 * delta)) * self.sigma_t
        sigma_tprime += ((1 - (1 - self.kappa)**(2 * delta)) /
                         (1 - (1 - self.kappa)**2)) * self.sigma_s

        # Apply the new observation, with "confidence" in the observation inversely proportional
        # to the observation noise, and "confidence" in the previous estimate inversely proportional
        # to the shock variance.
        self.r_t = (self.sigma_n / (self.sigma_n + sigma_tprime)) * r_tprime
        self.r_t += (sigma_tprime / (self.sigma_n + sigma_tprime)) * obs_t

        self.sigma_t = (self.sigma_n * self.sigma_t) / (self.sigma_n +
                                                        self.sigma_t)

        # Now having a best estimate of the fundamental at time t, we can make our best estimate
        # of the final fundamental (for time T) as of current time t.  Delta is now the number
        # of time steps remaining until the simulated exchange closes.
        delta = (self.mkt_close - self.currentTime) / np.timedelta64(1, 'ns')
        r_T = (1 - (1 - self.kappa)**delta) * self.r_bar
        r_T += ((1 - self.kappa)**delta) * r_tprime

        # Our final fundamental estimate should be quantized to whole units of value.
        r_T = int(round(r_T))

        # Finally (for the final fundamental estimation section) remember the current
        # time as the previous wake time.
        self.prev_wake_time = self.currentTime

        print("{} estimates r_T = {} as of {}".format(self.name, r_T,
                                                      self.currentTime))

        # Determine the agent's total valuation.
        q += (self.q_max - 1)
        theta = self.theta[q + 1 if buy else q]
        v = r_T + theta

        print("{} total unit valuation is {} (theta = {})".format(
            self.name, v, theta))

        # Return values needed to implement strategy and select limit price.
        return v, buy
Пример #24
0
    def placeOrder(self):
        # Called when it is time for the agent to determine a limit price and place an order.
        # This method implements the HBL strategy and falls back to the ZI (superclass)
        # strategy if there is not enough information for the HBL strategy.

        # See if there is enough history for HBL.  If not, we will _exactly_ perform the
        # ZI placeOrder().  If so, we will use parts of ZI but compute our limit price
        # differently.  Note that we are not given orders more recent than the most recent
        # trade.

        if len(self.stream_history[self.symbol]) < self.L:
            # Not enough history for HBL.
            print("Insufficient history for HBL: length {}, L {}".format(
                len(self.stream_history[self.symbol]), self.L))
            super().placeOrder()
            return

        # There is enough history for HBL.

        # Use the superclass (ZI) method to obtain an observation, update internal estimate
        # parameters, decide to buy or sell, and calculate the total unit valuation, because
        # all of this logic is identical to ZI.
        v, buy = self.updateEstimates()

        # Walk through the visible order history and accumulate values needed for HBL's
        # estimation of successful transaction by limit price.
        sa = {}
        sb = {}
        ua = {}
        ub = {}

        low_p = sys.maxsize
        high_p = 0

        for h in self.stream_history[self.symbol]:
            # h follows increasing "transactions into the past", with index zero being orders
            # after the most recent transaction.
            for id, order in h.items():
                p = order['limit_price']
                if p < low_p: low_p = p
                if p > high_p: high_p = p

                # For now if there are any transactions, consider the order successful.  For single
                # unit orders as used in SRG configs, this is sufficient.  For multi-unit orders,
                # we may wish to switch to a proportion of shares executed.
                if order['is_buy_order']:
                    if order['transactions']:
                        sb[p] = 1 if not p in sb else sb[p] + 1
                    else:
                        ub[p] = 1 if not p in ub else ub[p] + 1
                else:
                    if order['transactions']:
                        sa[p] = 1 if not p in sa else sa[p] + 1
                    else:
                        ua[p] = 1 if not p in ua else ua[p] + 1

        # For each limit price between the lowest and highest observed price in history,
        # compute the estimated probability of a successful transaction.  Remember the
        # price that produces the greatest expected surplus.
        best_p = None
        best_Pr = None
        best_Es = -sys.maxsize

        for p in range(low_p, high_p + 1):
            if buy:
                o = sum([sa[x] for x in sa if x <= p] +
                        [ua[x] for x in ua if x <= p])
                s = sum([sb[x] for x in sb if x <= p])
                u = sum([ub[x] for x in ub if x >= p])
            else:
                o = sum([sb[x] for x in sb if x >= p] +
                        [ub[x] for x in ub if x >= p])
                s = sum([sa[x] for x in sa if x >= p])
                u = sum([ua[x] for x in ua if x <= p])

            #print ("p {}, o {}, s {}, u {}".format(p, o, s, u))

            if o + s + u <= 0: Pr = 0
            else: Pr = (o + s) / (o + s + u)

            Es = Pr * (v - p) if buy else Pr * (p - v)

            if Es > best_Es:
                best_Es = Es
                best_Pr = Pr
                best_p = p

        # best_p should now contain the limit price that produces maximum expected surplus best_Es
        if best_Es > 0:
            print(
                "{} selects limit price {} with expected surplus {} (Pr = {:0.4f})"
                .format(self.name, best_p, int(round(best_Es)), best_Pr))

            # Place the constructed order.
            self.placeLimitOrder(self.symbol, 1, buy, best_p)
        else:
            print(
                "{} elects not to place an order (best expected surplus <= 0)".
                format(self.name))
Пример #25
0
    def wakeup(self, currentTime):
        # Parent class handles discovery of exchange times and market_open wakeup call.
        super().wakeup(currentTime)

        self.state = 'INACTIVE'

        if not self.mkt_open or not self.mkt_close:
            # TradingAgent handles discovery of exchange times.
            return
        else:
            if not self.trading:
                self.trading = True

                # Time to start trading!
                print("{} is ready to start trading now.".format(self.name))

        # Steady state wakeup behavior starts here.

        # If we've been told the market has closed for the day, we will only request
        # final price information, then stop.
        if self.mkt_closed and (self.symbol in self.daily_close_price):
            # Market is closed and we already got the daily close price.
            return

        # Schedule a wakeup for the next time this agent should arrive at the market
        # (following the conclusion of its current activity cycle).
        # We do this early in case some of our expected message responses don't arrive.

        # Agents should arrive according to a Poisson process.  This is equivalent to
        # each agent independently sampling its next arrival time from an exponential
        # distribution in alternate Beta formation with Beta = 1 / lambda, where lambda
        # is the mean arrival rate of the Poisson process.
        delta_time = np.random.exponential(scale=1.0 / self.lambda_a)
        self.setWakeup(currentTime +
                       pd.Timedelta('{}ns'.format(int(round(delta_time)))))

        # If the market has closed and we haven't obtained the daily close price yet,
        # do that before we cease activity for the day.  Don't do any other behavior
        # after market close.
        if self.mkt_closed and (not self.symbol in self.daily_close_price):
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
            return

        # Issue cancel requests for any open orders.  Don't wait for confirmation, as presently
        # the only reason it could fail is that the order already executed.  (But requests won't
        # be generated for those, anyway, unless something strange has happened.)
        self.cancelOrders()

        # The ZI agent doesn't try to maintain a zero position, so there is no need to exit positions
        # as some "active trading" agents might.  It might exit a position based on its order logic,
        # but this will be as a natural consequence of its beliefs.

        # In order to use the SRG "strategic threshold" parameter (eta), the ZI agent needs the current
        # spread (inside bid/ask quote).  It would not otherwise need any trade/quote information.
        # If the calling agent is a subclass, don't initiate the strategy section of wakeup(), as it
        # may want to do something different.
        if type(self) == ZeroIntelligenceAgent:
            self.getCurrentSpread(self.symbol)
            self.state = 'AWAITING_SPREAD'
        else:
            self.state = 'ACTIVE'
Пример #26
0
    def handleLimitOrder(self, order):
        # Matches a limit order or adds it to the order book.  Handles partial matches piecewise,
        # consuming all possible shares at the best price before moving on, without regard to
        # order size "fit" or minimizing number of transactions.  Sends one notification per
        # match.
        if order.symbol != self.symbol:
            print("{} order discarded.  Does not match OrderBook symbol: {}".
                  format(order.symbol, self.symbol))
            return

        if (order.quantity <= 0) or (int(order.quantity) != order.quantity):
            print(
                "{} order discarded.  Quantity ({}) must be a positive integer."
                .format(order.symbol, order.quantity))
            return

        # Add the order under index 0 of history: orders since the most recent trade.
        self.history[0][order.order_id] = {
            'entry_time': self.owner.currentTime,
            'quantity': order.quantity,
            'is_buy_order': order.is_buy_order,
            'limit_price': order.limit_price,
            'transactions': [],
            'cancellations': []
        }

        matching = True

        self.prettyPrint()

        executed = []

        while matching:
            matched_order = deepcopy(self.executeOrder(order))

            if matched_order:
                # Decrement quantity on new order and notify traders of execution.
                filled_order = deepcopy(order)
                filled_order.quantity = matched_order.quantity
                filled_order.fill_price = matched_order.fill_price

                order.quantity -= filled_order.quantity

                print("MATCHED: new order {} vs old order {}".format(
                    filled_order, matched_order))
                print(
                    "SENT: notifications of order execution to agents {} and {} for orders {} and {}"
                    .format(filled_order.agent_id, matched_order.agent_id,
                            filled_order.order_id, matched_order.order_id))

                self.owner.sendMessage(
                    order.agent_id,
                    Message({
                        "msg": "ORDER_EXECUTED",
                        "order": filled_order
                    }))
                self.owner.sendMessage(
                    matched_order.agent_id,
                    Message({
                        "msg": "ORDER_EXECUTED",
                        "order": matched_order
                    }))

                # Accumulate the volume and average share price of the currently executing inbound trade.
                executed.append(
                    (filled_order.quantity, filled_order.fill_price))

                if order.quantity <= 0:
                    matching = False

            else:
                # No matching order was found, so the new order enters the order book.  Notify the agent.
                self.enterOrder(deepcopy(order))

                print("ACCEPTED: new order {}".format(order))
                print(
                    "SENT: notifications of order acceptance to agent {} for order {}"
                    .format(order.agent_id, order.order_id))

                self.owner.sendMessage(
                    order.agent_id,
                    Message({
                        "msg": "ORDER_ACCEPTED",
                        "order": order
                    }))

                matching = False

        if not matching:
            # Now that we are done executing or accepting this order, log the new best bid and ask.
            if self.bids:
                self.owner.logEvent(
                    'BEST_BID',
                    "{},{},{}".format(self.symbol, self.bids[0][0].limit_price,
                                      sum([o.quantity for o in self.bids[0]])))

            if self.asks:
                self.owner.logEvent(
                    'BEST_ASK',
                    "{},{},{}".format(self.symbol, self.asks[0][0].limit_price,
                                      sum([o.quantity for o in self.asks[0]])))

            # Also log the last trade (total share quantity, average share price).
            if executed:
                trade_qty = 0
                trade_price = 0
                for q, p in executed:
                    print("Executed: {} @ {}".format(q, p))
                    trade_qty += q
                    trade_price += (p * q)

                avg_price = int(round(trade_price / trade_qty))
                print("Avg: {} @ ${:0.4f}".format(trade_qty, avg_price))
                self.owner.logEvent('LAST_TRADE',
                                    "{},${:0.4f}".format(trade_qty, avg_price))

                self.last_trade = avg_price

            # Finally, log the full depth of the order book.
            row = {'QuoteTime': self.owner.currentTime}
            for quote in self.quotes_seen:
                row[quote] = 0
            for quote, volume in self.getInsideBids():
                row[quote] = -volume
                self.quotes_seen.add(quote)
            for quote, volume in self.getInsideAsks():
                if quote in row:
                    if row[quote] != 0:
                        print(
                            "WARNING: THIS IS A REAL PROBLEM: an order book contains bids and asks at the same quote price!",
                            override=True)
                row[quote] = volume
                self.quotes_seen.add(quote)
            self.book_log.append(row)

        self.prettyPrint()
Пример #27
0
NUM_TRAIN = len(train_data) - NUM_VAL
NUM_TEST = len(test_data)

loader_train = DataLoader(train_data, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN)))
loader_val = DataLoader(train_data, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 
                                                                                              NUM_TRAIN + NUM_VAL)))
loader_test = DataLoader(test_data, batch_size=BATCH_SIZE)
loader_tiny_train = DataLoader(train_data, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(range(101)))
loader_tiny_val = DataLoader(train_data, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(range(100, 200)))

dtype = torch.float32
if USE_GPU and torch.cuda.is_available(): #Determine whether or not to use GPU
    device = torch.device('cuda')
else:
    device = torch.device('cpu')
print('using device:', device)


'''
Take a loader, model, and optimizer.  Use the optimizer to update the model
based on the training data, which is from the loader.  Does not terminate,
saves best checkpoint and latest checkpoint
'''
def train(loader_train, loader_val, model, optimizer, epoch, loss_list = [], val_acc_list = []):
    model = model.to(device=device)
    while True:
        tot_correct = 0.0
        tot_samples = 0.0
        tot_loss = 0.0
        for t, sample in enumerate(loader_train):
            x = sample['image'].unsqueeze(1)