コード例 #1
0
def exp1():
    # create dataset
    dataset = Dataset(pipe1)

    # create word2vec model
    w2v = Word2Vocab(dim=200)

    trainfeed = DataFeed(op1, batch_size=64, 
            datapoints=dataset.trainset, w2v=w2v)
    testfeed  = DataFeed(op1, batch_size=32,
            datapoints=dataset.testset, w2v=w2v)

    # instantiate model
    asreader = AttentionSumReader(hdim=200, emb_dim=200,
            embedding=w2v.load_embeddings(), 
            vocab_size=w2v.vocab_size(),
            num_layers=3)

    # training
    with tf.Session() as sess:
        # init variables
        sess.run(tf.global_variables_initializer())

        # create trainer instance
        trainer = Trainer(model=asreader,
                trainfeed=trainfeed,
                testfeed=testfeed,
                verbose=True,
                dump_cache=False)

        # let the training begin
        trainer.train()
コード例 #2
0
def experiment(epochs=10, checkpoint=1, train_datapoints=train_datapoints):
    model = Model(Config(), len(INPUT_VOCAB), len(OUTPUT_VOCAB))
    if Config().cuda: model = model.cuda()

    split_index = int(len(train_datapoints) * 0.85)
    train_feed = DataFeed(train_datapoints[:split_index],
                          batchop=batchop,
                          batch_size=128)
    test_feed = DataFeed(train_datapoints[split_index:],
                         batchop=batchop,
                         batch_size=120)

    trainer = Trainer(model=model,
                      loss_function=loss,
                      accuracy_function=accuracy,
                      checkpoint=checkpoint,
                      epochs=epochs,
                      feeder=Feeder(train_feed, test_feed))

    predictor = Predictor(model=model,
                          repr_function=repr_function,
                          feed=test_feed)

    for e in range(1):
        output, results = predictor.predict(
            random.choice(range(test_feed.num_batch)))
        display(HTML(results._repr_html_()))
        del output, results
        trainer.train()
コード例 #3
0
def eval_joint_model(model, model_params):
    # get test data for evaluation
    data, metadata = gather('10k', 0)

    # build data format
    dformat = ['contexts', 'questions', 'answers']

    accuracies = []
    with tf.Session() as sess:

        # reload model params
        sess.run(set_op(model_params))

        # build trainer
        trainer = Trainer(sess, model, batch_size=128)

        for i in range(1, 21):
            # test feed for task 'i'
            testfeed = DataFeed(dformat, data=data['test'][i])

            # evaluate
            loss, acc = trainer.evaluate(feed=testfeed)

            # note down task accuracy
            accuracies.append(acc)

    print('\n:: Evaluation on individual tasks\n::   Accuracy')
    for i, acc in enumerate(accuracies):
        print(':: \t[{}] {}'.format(i, acc))
コード例 #4
0
ファイル: simulator.py プロジェクト: davidbarkhuizen/simagora
  def __init__(self, instrument, strategies, start_date, end_date, opening_bal,time_stamp=None):
    '''
    constructs message queues
    initialises brokers and traders
    '''    
    self.instrument = instrument
    
    self.start_date = start_date
    self.end_date = end_date
    
    self.opening_bal = opening_bal
    
    self.datafeed = DataFeed(instrument)
    
    self.orderQ = MsgQ()    
    self.receiptQ = MsgQ()
    
    self.term_req_Q = MsgQ()
    self.term_notice_Q = MsgQ()
    
    self.broker = Broker(self.datafeed, self.orderQ, self.receiptQ, self.term_req_Q, self.term_notice_Q)   

    self.traders = []    
    for strategy in strategies:
      trader = Trader(self.datafeed, self.broker, self.opening_bal, self.instrument, strategy, self.start_date, self.end_date)
      self.traders.append(trader)
      
    self.time_stamp = time_stamp
コード例 #5
0
def  experiment(eons=1000, epochs=10, checkpoint=5):
    try:
        try:
            model =  BiLSTMDecoderModel(Config(), len(INPUT_VOCAB), len(CHAR_VOCAB), len(OUTPUT_VOCAB))
            if Config().cuda:  model = model.cuda()
            model.load_state_dict(torch.load('attn_model.pth'))
            log.info('loaded the old image for the model')
        except:
            log.exception('failed to load the model')
            model =  BiLSTMDecoderModel(Config(), len(INPUT_VOCAB), len(CHAR_VOCAB), len(OUTPUT_VOCAB))
            if Config().cuda:  model = model.cuda()

        print('**** the model', model)

        train_feed, test_feed, predictor_feed = {}, {}, {}
        trainer, predictor = {}, {}

        max_size = max( sorted(   [len(i[0]) for i in classified_datapoints.values()]   )[:-1] )
        #max_size = max( sorted(   [len(i[0]) for i in classified_datapoints.values()]   ) )
        
        for label in classified_datapoints.keys():
            if len(classified_datapoints[label][0]) < 1: continue

            label_desc = '-'.join([OUTPUT_VOCAB[l] for l in [i for i, x in enumerate(label) if x == 1]] )
            print('label: {} and size: {}'.format(label, len(classified_datapoints[label][0])))
            train_feed[label]      = DataFeed(label_desc, classified_datapoints[label][0], batchop=batchop, batch_size=max(128, int(len(classified_datapoints[label][0])/600))   )
            test_feed[label]       = DataFeed(label_desc, classified_datapoints[label][1], batchop=batchop, batch_size=32)
            predictor_feed[label]  = DataFeed(label_desc, classified_datapoints[label][1], batchop=batchop, batch_size=12)
            
            turns = int(max_size/train_feed[label].size) + 1            
            trainer[label] = Trainer(name=label_desc,
                                     model=model, 
                                     loss_function=partial(loss, scale=1), accuracy_function=accuracy, f1score_function=f1score_function, 
                                     checkpoint=checkpoint, epochs=epochs,
                                     feeder = Feeder(train_feed[label], test_feed[label]))

            predictor[label] = Predictor(model=model, feed=predictor_feed[label], repr_function=repr_function)

        test_predictor_feed = DataFeed('test', test_datapoints, batchop=test_batchop, batch_size=128)
        test_predictor = Predictor(model=model, feed=test_predictor_feed, repr_function=test_repr_function)

        all_class_train_feed      = MultiplexedDataFeed('atrain',  train_feed.values(), batchop=batchop, batch_size=256)
        all_class_test_feed       = MultiplexedDataFeed('atest',   test_feed.values(), batchop=batchop, batch_size=256)
        all_class_predictor_feed  = MultiplexedDataFeed('apredict',predictor_feed.values(), batchop=batchop, batch_size=256)
        
        all_class_trainer = Trainer(name='all_class_trainer',
                                    model=model, 
                                    loss_function=partial(loss, scale=1), accuracy_function=accuracy, f1score_function=f1score_function, 
                                    checkpoint=checkpoint, epochs=epochs,
                                    feeder = Feeder(all_class_train_feed, all_class_test_feed))
        
        all_class_predictor = Predictor(model=model, feed=all_class_predictor_feed, repr_function=repr_function)

        label_trainer_triples = sorted( [(l, t, train_feed[l].size) for l, t in trainer.items()], key=lambda x: x[2] )
        log.info('trainers built {}'.format(pformat(label_trainer_triples)))

        dump = open('results/experiment_attn.csv', 'w').close()
        for e in range(eons):
            dump = open('results/experiment_attn.csv', 'a')
            dump.write('#========================after eon: {}\n'.format(e))
            dump.close()
            log.info('on {}th eon'.format(e))

            
            if e and not e % 1:
                test_results = ListTable()
                test_dump = open('results/experiment_attn_over_test_{}.csv'.format(e), 'w')
                test_dump.write('|'.join(['id', 'toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']) + '\n')
                log.info('running over test')

                for i in tqdm(range(test_predictor_feed.num_batch)):
                    log.debug('i: {}'.format(i))
                    output, results = test_predictor.predict(i)
                    test_results += results

                test_dump.write(repr(test_results))            
                test_dump.close()

            with open('results/experiment_attn.csv', 'a') as dump:
                output, results = all_class_predictor.predict(random.choice(range(all_class_predictor_feed.num_batch)))
                dump.write(repr(results))
                del output, results
                
            all_class_trainer.train()
            

            """
            for label, _, _ in reversed(label_trainer_triples):
                if not sum(label) and e and not e % 10:  #Avoid neutral classes in every epoch
                    continue
                
                label_desc = '-'.join([OUTPUT_VOCAB[l] for l in [i for i, x in enumerate(label) if x == 1]] )
                log.info('=================================== training for {} datapoints ========================================'.format(label_desc))

                with open('results/experiment_attn.csv', 'a') as dump:
                    output, results = predictor[label].predict(random.choice(range(predictor_feed[label].num_batch)))
                    dump.write(repr(results))
                    del output, results
                
                turns = int(max_size/train_feed[label].size/6) + 1
                log.info('========================  size: {} and turns: {}==========================================='.format(train_feed[label].size, turns))                
                for turn in range(turns):
                    log.info('==================================  label: {} and turn: {}/{}====================================='.format(label_desc, turn, turns))                
                    trainer[label].train()
            """
    except:
        log.exception('####################')
        torch.save(model.state_dict(), open('attn_model.pth', 'wb'))

        return locals()
コード例 #6
0
def train_separate(task, dataset='1k', iterations=1, batch_size=32):

    # get data for task
    data, metadata = gather(dataset, task)

    # build data format
    dformat = ['contexts', 'questions', 'answers']

    # create feeds
    trainfeed = DataFeed(dformat, data=data['train'])
    testfeed = DataFeed(dformat, data=data['test'])

    hdim = 20 if task else 50
    eval_interval = 100 if task else 10
    batch_size = 32 if task else 128

    # instantiate model
    model = MemoryNet(hdim=20,
                      num_hops=3,
                      memsize=metadata['clen'],
                      sentence_size=metadata['slen'],
                      qlen=metadata['qlen'],
                      vocab_size=metadata['vocab_size'],
                      num_candidates=metadata['candidates']['vocab_size'])

    # info
    print(':: <task {}> [0/2] Info')
    print(':: \t memory size : {}, #candidates : {}'.format(
        metadata['clen'], metadata['candidates']['vocab_size']))

    with tf.Session() as sess:
        # run for multiple initializations
        i, accuracy, model_params = 0, [0.], [None]
        while accuracy[-1] < 0.95 and i < iterations:
            # init session
            sess.run(tf.global_variables_initializer())

            # create trainer
            trainer = Trainer(sess,
                              model,
                              trainfeed,
                              testfeed,
                              batch_size=batch_size)

            print('\n:: <task {}> ({}) [1/2] Pretraining'.format(task, i))
            # pretrain
            acc = trainer.fit(epochs=100000,
                              eval_interval=1,
                              mode=Trainer.PRETRAIN,
                              verbose=False,
                              lr=0.0005)

            print(':: \tAccuracy after pretraining: ', acc)

            print('\n:: <task {}> ({}) [2/2] Training'.format(task, i))
            # train
            acc = trainer.fit(epochs=1000000,
                              eval_interval=eval_interval,
                              mode=Trainer.TRAIN,
                              verbose=False,
                              lr=0.0005)

            print(':: \tAccuracy after training: ', acc)

            # next iteration
            i += 1
            # add accuracy to list
            accuracy.append(acc)
            model_params.append(sess.run(tf.trainable_variables()))
            print(acc)

        print(':: [x/x] End of training')
        print(':: Max accuracy :', max(accuracy))

        # return model and best model params
        return model, model_params[accuracy.index(max(accuracy))]
コード例 #7
0
def train_separate_all(dataset='1k'):

    batch_size = 64

    task_max_acc = []
    for task in range(1, 21):
        # get task 1
        #task = 18
        data, metadata = gather('1k', task)

        # gather info from metadata
        num_candidates = metadata['candidates']['vocab_size']
        vocab_size = metadata['vocab_size']
        memsize = metadata['clen']
        sentence_size = metadata['slen']
        qlen = metadata['qlen']

        print(':: <task {}> memory size : {}'.format(task, memsize))

        # build data format
        dformat = ['contexts', 'questions', 'answers']

        # create feeds
        trainfeed = DataFeed(dformat, data=data['train'])
        testfeed = DataFeed(dformat, data=data['test'])

        # instantiate model
        model = MemoryNet(hdim=20,
                          num_hops=3,
                          memsize=memsize,
                          sentence_size=sentence_size,
                          qlen=qlen,
                          vocab_size=vocab_size,
                          num_candidates=num_candidates)

        with tf.Session() as sess:
            # run for multiple initializations
            i, accuracy = 0, [0.]
            while accuracy[-1] < 0.95 and i < 5:
                # init session
                sess.run(tf.global_variables_initializer())

                # create trainer
                trainer = Trainer(sess,
                                  model,
                                  trainfeed,
                                  testfeed,
                                  batch_size=batch_size)

                print('\n:: <task {}> ({}) [1/2] Pretraining'.format(task, i))
                # pretrain
                acc = trainer.fit(epochs=100000,
                                  eval_interval=1,
                                  mode=Trainer.PRETRAIN,
                                  verbose=False,
                                  batch_size=64,
                                  lr=0.0005)
                print(':: \tAccuracy after pretraining: ', acc)

                print('\n:: <task {}> ({}) [2/2] Training'.format(task, i))
                # train
                acc = trainer.fit(epochs=1000000,
                                  eval_interval=10,
                                  mode=Trainer.TRAIN,
                                  verbose=False,
                                  batch_size=64,
                                  lr=0.0005)
                print(':: \tAccuracy after training: ', acc)

                # next iteration
                i += 1
                # add accuracy to list
                accuracy.append(acc)
                print(acc)

            print('Experiment Results : ')
            for i, a in enumerate(accuracy[1:]):
                print(i, a)

        task_max_acc.append(max(accuracy))

    print('____________________________________________')
    for i, acc in enumerate(task_max_acc):
        print('Task ({}) : {}'.format(i + 1, acc))
    print('____________________________________________')
def experiment(VOCAB,
               LABELS,
               WORD2INDEX,
               LABEL2INDEX,
               raw_samples,
               loss_weight,
               datapoints=[[], []],
               eons=1000,
               epochs=10,
               checkpoint=5):
    try:
        try:
            model = BiLSTMDecoderModel(Config(), len(VOCAB), len(LABELS))
            if Config().cuda: model = model.cuda()
            log.info('loaded the old image for the model, from {}'.format(
                '{}.{}'.format(SELF_NAME, 'pth')))
            model.load_state_dict(torch.load('{}.{}'.format(SELF_NAME, 'pth')))
            log.info('loaded the old image for the model')
        except:
            log.exception('failed to load the model')
            model = BiLSTMDecoderModel(Config(), len(VOCAB), len(LABELS))
            if Config().cuda: model = model.cuda()
        print('**** the model', model)

        name = SELF_NAME
        _batchop = partial(batchop, LABEL2INDEX=LABEL2INDEX)
        train_feed = DataFeed(name,
                              datapoints[0],
                              batchop=_batchop,
                              vocab=WORD2INDEX,
                              batch_size=1024)
        test_feed = DataFeed(name,
                             datapoints[1],
                             batchop=_batchop,
                             vocab=WORD2INDEX,
                             batch_size=256)
        predictor_feed = DataFeed(name,
                                  datapoints[1],
                                  batchop=_batchop,
                                  vocab=WORD2INDEX,
                                  batch_size=128)

        loss_weight = Variable(torch.Tensor(loss_weight))
        if Config.cuda: loss_weight = loss_weight.cuda()
        _loss = partial(loss, loss_function=nn.NLLLoss(loss_weight))
        trainer = Trainer(name=name,
                          model=model,
                          loss_function=_loss,
                          accuracy_function=accuracy,
                          checkpoint=checkpoint,
                          epochs=epochs,
                          feeder=Feeder(train_feed, test_feed))

        predictor = Predictor(model=model,
                              feed=predictor_feed,
                              repr_function=partial(repr_function,
                                                    VOCAB=VOCAB,
                                                    LABELS=LABELS,
                                                    raw_samples=raw_samples))

        for e in range(eons):
            dump = open('results/experiment_attn.csv', 'a')
            dump.write('#========================after eon: {}\n'.format(e))
            dump.close()
            log.info('on {}th eon'.format(e))

            with open('results/experiment_attn.csv', 'a') as dump:
                results = ListTable()
                for ri in range(predictor_feed.num_batch):
                    output, _results = predictor.predict(ri)
                    results.extend(_results)
                dump.write(repr(results))
            if not trainer.train():
                raise Exception

    except:
        log.exception('####################')
        trainer.save_best_model()

        return locals()
コード例 #9
0
def train_separate(task, dataset='1k', iterations=1, batch_size=128):

    # get data for task
    data, metadata = gather(dataset, task)

    # build data format
    dformat = ['contexts', 'questions', 'answers']

    # create feeds
    trainfeed = DataFeed(dformat, data=data['train'])
    testfeed = DataFeed(dformat, data=data['test'])

    # instantiate model
    model = RelationNet(clen=metadata['clen'],
                        qlen=metadata['qlen'],
                        slen=metadata['slen'],
                        vocab_size=metadata['vocab_size'],
                        num_candidates=metadata['candidates']['vocab_size'])

    # info
    print(':: <task {}> [0/2] Info')
    print(':: \t memory size : {}, #candidates : {}'.format(
        metadata['clen'], metadata['candidates']['vocab_size']))

    # create visualizer
    vis = Visualizer()
    vis.attach_scalars(model)

    with tf.Session() as sess:
        # run for multiple initializations
        i, accuracy, model_params = 0, [0.], [None]
        while accuracy[-1] < 0.95 and i < iterations:
            # init session
            sess.run(tf.global_variables_initializer())

            # add graph to visualizer
            vis.attach_graph(sess.graph)

            # create trainer
            trainer = Trainer(sess,
                              model,
                              trainfeed,
                              testfeed,
                              batch_size=batch_size)

            print('\n:: <task {}> ({}) [1/1] Training'.format(task, i))
            # train
            acc = trainer.fit(epochs=1000000,
                              eval_interval=1,
                              mode=Trainer.TRAIN,
                              verbose=True,
                              lr=0.0002)

            print(':: \tAccuracy after training: ', acc)

            # next iteration
            i += 1
            # add accuracy to list
            accuracy.append(acc)
            model_params.append(sess.run(tf.trainable_variables()))
            print(acc)

        print(':: [x/x] End of training')
        print(':: Max accuracy :', max(accuracy))

        # return model and model params
        return model, sess.run(tf.trainable_variables())
コード例 #10
0
ファイル: cli.py プロジェクト: louis-cai/Xrypto
 def create_datafeed(self, args):
     self.datafeed = DataFeed()
     self.init_observers_and_markets(args)
コード例 #11
0
def experiment(eons=1000, epochs=1, checkpoint=1):
    try:
        model = BiLSTMDecoderModel(Config(), len(INPUT_VOCAB), len(CHAR_VOCAB),
                                   len(OUTPUT_VOCAB))
        if Config().cuda: model = model.cuda()

        classified_train_feed = DataFeed(classified_datapoints[0],
                                         batchop=batchop,
                                         batch_size=128)
        classified_test_feed = DataFeed(classified_datapoints[1],
                                        batchop=batchop,
                                        batch_size=128)

        classified_trainer = Trainer(model=model,
                                     loss_function=loss,
                                     accuracy_function=accuracy,
                                     checkpoint=checkpoint,
                                     epochs=epochs,
                                     feeder=Feeder(classified_train_feed,
                                                   classified_test_feed))

        classified_predictor_feed = DataFeed(classified_datapoints[1],
                                             batchop=batchop,
                                             batch_size=12)
        classified_predictor = Predictor(model=model,
                                         feed=classified_predictor_feed,
                                         repr_function=repr_function)

        non_classified_train_feed = DataFeed(non_classified_datapoints[0],
                                             batchop=batchop,
                                             batch_size=128)
        non_classified_test_feed = DataFeed(non_classified_datapoints[1],
                                            batchop=batchop,
                                            batch_size=128)
        non_classified_trainer = Trainer(model=model,
                                         loss_function=loss,
                                         accuracy_function=accuracy,
                                         checkpoint=checkpoint,
                                         epochs=epochs,
                                         feeder=Feeder(
                                             non_classified_train_feed,
                                             non_classified_test_feed))

        non_classified_predictor_feed = DataFeed(non_classified_datapoints[1],
                                                 batchop=batchop,
                                                 batch_size=12)
        non_classified_predictor = Predictor(
            model=model,
            feed=non_classified_predictor_feed,
            repr_function=repr_function)

        test_predictor_feed = DataFeed(test_datapoints,
                                       batchop=test_batchop,
                                       batch_size=128)
        test_predictor = Predictor(model=model,
                                   feed=test_predictor_feed,
                                   repr_function=test_repr_function)

        dump = open('results/experiment_attn.csv', 'w')
        for e in range(eons):
            dump.write('#========================after eon: {}\n'.format(e))
            log.info('on {}th eon'.format(e))
            output, results = classified_predictor.predict(
                random.choice(range(classified_predictor_feed.num_batch)))
            dump.write(repr(results))
            del output, results
            output, results = non_classified_predictor.predict(
                random.choice(range(non_classified_predictor_feed.num_batch)))
            dump.write(repr(results))
            del output, results

            #non_classified_trainer.train()
            #for i in range(int(non_classified_train_feed.size/classified_train_feed.size)):
            classified_trainer.train()

            if e and not e % 10:
                test_results = ListTable()
                test_dump = open(
                    'results/experiment_attn_over_test_{}.csv'.format(e), 'w')
                test_dump.write('|'.join([
                    'id', 'toxic', 'severe_toxic', 'obscene', 'threat',
                    'insult', 'identity_hate'
                ]) + '\n')
                log.info('running over test')

                for i in tqdm(range(test_predictor_feed.num_batch)):
                    log.debug('i: {}'.format(i))
                    output, results = test_predictor.predict(i)
                    test_results += results

                test_dump.write(repr(test_results))
                test_dump.close()

    except KeyboardInterrupt:
        torch.save(model.state_dict(), open('attn_model.pth', 'wb'))
        dump.close()
        return locals()
コード例 #12
0
 def __call__(self, dset='training'):
     return DataFeed(self.dformat, self.vdata[dset])
コード例 #13
0
ファイル: simulator.py プロジェクト: davidbarkhuizen/simagora
class Simulator(object):
  '''
  simulation manager
  '''  
  def __init__(self, instrument, strategies, start_date, end_date, opening_bal,time_stamp=None):
    '''
    constructs message queues
    initialises brokers and traders
    '''    
    self.instrument = instrument
    
    self.start_date = start_date
    self.end_date = end_date
    
    self.opening_bal = opening_bal
    
    self.datafeed = DataFeed(instrument)
    
    self.orderQ = MsgQ()    
    self.receiptQ = MsgQ()
    
    self.term_req_Q = MsgQ()
    self.term_notice_Q = MsgQ()
    
    self.broker = Broker(self.datafeed, self.orderQ, self.receiptQ, self.term_req_Q, self.term_notice_Q)   

    self.traders = []    
    for strategy in strategies:
      trader = Trader(self.datafeed, self.broker, self.opening_bal, self.instrument, strategy, self.start_date, self.end_date)
      self.traders.append(trader)
      
    self.time_stamp = time_stamp
 
  def run(self):    
    '''
    simulate event series
    '''   
    current_date = date(self.start_date.year, self.start_date.month, self.start_date.day)
    
    length = self.end_date - self.start_date
    d_total = length.days
    display_int = d_total / 10
      
    while (current_date <= self.end_date):    
      
      # PROCESS TRADING DAYS
      if (self.datafeed.date_is_trading_day(current_date) == True):        

        self.broker.open_manage_and_close(current_date)                
        
        # book keeping
        for trader in self.traders:
          trader.ac.tally_individual_open_positions(current_date)
          trader.ac.record_net_end_of_day_pos(current_date)
          trader.ac.record_end_of_day_balances(current_date)            
        for trader in self.traders:
          trader.execute_strategy(current_date)          

        #self.broker.log_closed_positions()
        self.broker.log_all_positions(current_date)

      # IGNORE NON-TRADING DAYS
      else:
        pass
    
      current_date = current_date + timedelta(days=1)  
      
      elapsed = (self.end_date - current_date)
      d_elapsed = elapsed.days
      progress = (float(d_total) - float(d_elapsed)) / float(d_total) * 100.0
      if (d_elapsed % display_int == 0):
        print('%i/100' % int(progress))
      
    self.traders[0].strategy.log_self()

  def plot(self):
    '''
    analyse & report on simulation path and outcome
    '''        
    d = date(self.start_date.year, self.start_date.month, self.start_date.day)
      
    dates = []
    prices = []
   
    cash_bal = []
    margin_bal = []
    net_booked_position = []
    net_open_position = []
    
    daily_high = []
    daily_low = []
    
    mavg_band_ceiling = []
    mavg_band_floor = []
    
    trader = self.broker.traders[0]   
    ac = trader.ac
   
    df = self.datafeed
      
    pMin = None
    pMax = None
      
    while (d <= self.end_date):          
      # TRADING DAYS
      if (self.datafeed.date_is_trading_day(d) == True):        
        dates.append(d) 
          
        mavg_top = df.n_day_moving_avg(None, d, 'high', Strategy.n)
        mavg_bottom = df.n_day_moving_avg(None, d, 'low', Strategy.n)        
          
        mavg_band_ceiling.append(mavg_top)
        mavg_band_floor.append(mavg_bottom)
        
        pinfo = df.get_price_info(None, d)        
        prices.append(pinfo['close'])
        daily_high.append(pinfo['high'])
        daily_low.append(pinfo['low'])
        
        s = str(d) + ',' + str(mavg_band_ceiling[len(mavg_band_ceiling) - 1]) + ',' + str(mavg_band_floor[len(mavg_band_floor) - 1]) + ',' + str(pinfo['close'])
        logging.info(s)
        
        cash_bal.append(ac.d_cash_bal[d])
        margin_bal.append(ac.d_margin_bal[d])
        net_booked_position.append(ac.d_net_booked_position[d])
        net_open_position.append(ac.net_open_position[d])
        
        if (pMin == None):          
          pMin = pinfo['low']
          pMax = pinfo['high']
        else:
          if pinfo['low'] < pMin:
            pMin = pinfo['low']
          if pinfo['high'] > pMax: 
            pMax = pinfo['high']       
                
      # NON-TRADING DAYS
      else:
        pass    
      d = d + timedelta(days=1)  

    aDate = np.array(dates)
    aPrice = np.array(prices)
    
    fig = plt.figure(figsize=(20, 20))
    
    ax = fig.add_subplot(111)
   
    #ax.plot(aDate, aPrice, color='blue')        
    
    for series in [mavg_band_ceiling, mavg_band_floor]:
      y = np.array(series)
      t = np.array(dates)
      ax.plot(t, y, color='red')  
    
    for series in [daily_high, daily_low]:
      y = np.array(series)
      t = np.array(dates)
      ax.plot(t, y, color='blue')      
    
    plt.ylim([float(pMin), float(pMax)])    

    for series in [net_booked_position]:
      y = np.array(series)
      t = np.array(dates)
      ax2 = ax.twinx()   
      ax2.plot(t, y, color='green')  
    
    ax.grid(False)
    fig.autofmt_xdate(rotation=90)
    
    fname = 'plot/plot_' + self.time_stamp
    fig.savefig(fname) 
コード例 #14
0
from train.trainer import Trainer
from models.model import Model
from datafeed import DataFeed
from graph import *

if __name__ == '__main__':

    # get data for task
    data, lookup, metadata = gather()

    # build data format
    dformat = FIELDS + ['cmask']

    # create feeds
    trainfeed = DataFeed(dformat, data=data['valid'])
    testfeed = DataFeed(dformat, data=data['test'])

    # training params
    batch_size = 32

    # instantiate model
    model = Model(ASReaderGraph,
                  dformat=dformat,
                  n=1,
                  optimizer=tf.train.AdamOptimizer,
                  lr=0.001,
                  vocab_size=metadata['vocab_size'],
                  max_candidates=metadata['max_candidates'],
                  demb=384,
                  dhdim=384,