Пример #1
0
def run_strategy(smaPeriod, tick, tick2='TRNS', plot=True):
    # Load the yahoo feed from the CSV file
    plt = None
    feed = mongofeed.MongoFeed()
    feed.addBarsFromCSV(tick, mongofeed.MongoRowParser())
    ratio = du.get_ratio_for_key_with_date(tick, tick2, 'Adj Clos')

    f = du.ArbiFeed()
    f.addBarsFromCSV(ratio, '%s/%s' %(tick, tick2))
    now = datetime.datetime.now()
    start = now - datetime.timedelta(days=180)
    data = du.get_data(tick, start_date=start)
    
    max_date = min_date = None
    if max_date == None:
      max_date = data[-1]['date']
    elif max_date < data[-1]['date']:
      max_date = data[-1]['date']

    if min_date == None:
      min_date = data[0]['date']
    elif min_date > data[0]['date']:
      min_date = data[0]['date']   
    f.addBarsFromCSV(data, tick)
    
    data = du.get_data(tick2, start_date=start)
    if max_date == None:
      max_date = data[-1]['date']
    elif max_date < data[-1]['date']:
      max_date = data[-1]['date']

    if min_date == None:
      min_date = data[0]['date']
    elif min_date > data[0]['date']:
      min_date = data[0]['date'] 
    f.addBarsFromCSV(data, tick2)   
    
    print 'start: %r end: %r' %(min_date, max_date)
    metrics.start_date = min_date
    metrics.end_date = max_date
    
    # Evaluate the strategy with the feed's bars.
    myStrategy = PairStrategy(feed, smaPeriod, f, tick, tick2)
    if plot:plt = plotter.StrategyPlotter(myStrategy)
    if plot:plt.getInstrumentSubplot(tick).addDataSeries(tick, f.getDataSeries(tick))

    myStrategy.run()
    dd = metrics.get_all_drawdown()
    winners = metrics.get_all_win()
    losers = metrics.get_all_losses()
    avg_gain = metrics.get_all_gain()
    avg_losses = metrics.get_all_avglosses()
    returns = metrics.get_portfolio_annualized_returns()
    metrics.build_comp_report('%s-%s-pair_trading.txt' % (tick, tick2))
    print 'drawdown: \n%r' % (dd), 'Win percentage: \n%r ' % (winners), 'Loss perc.: \n%r' % (losers)
    print 'Gains: \n%r' % (avg_gain), 'Losses: \n%r' % (avg_losses)
    print 'Returns: %f' % (returns)
    if plot:plt.plot()
    
    return myStrategy.result
Пример #2
0
    def __init__(self, feed, tickers, period=9, balance=15000, limit=10):
        basestrategies.BaseTrender.__init__(self, feed, period, tickers,
                                            balance, mongofeed.MongoRowParser)
        self.cross_aboves = {}
        self.cross_belows = {}
        self.current_positions = set()
        self.pos_limit = limit
        params = {
            'timeperiod': period,
            'nbdevup': 2,
            'nbdevdn': 2,
            'matype': du.tl.MA_SMA
        }

        for ticker in self.tickers:
            print 'Initializing: %s' % (ticker)
            data = du.get_data(ticker)
            d = [v['Adj Clos'] for v in data]
            a, b, c = du.run_command('BBANDS', np.array(d), **params)

            highs = dataseries.SequenceDataSeries([d for d in a.tolist()])
            lows = dataseries.SequenceDataSeries([d for d in c.tolist()])
            mids = dataseries.SequenceDataSeries([d for d in b.tolist()])

            self.cross_aboves[ticker] = cross.CrossAbove(
                feed.getDataSeries(ticker).getAdjCloseDataSeries(), highs)
            self.cross_belows[ticker] = cross.CrossBelow(
                feed.getDataSeries(ticker).getAdjCloseDataSeries(), lows)
Пример #3
0
  def __init__(self, feed, tickers, period=9, balance=15000, limit=10):
    basestrategies.BaseTrender.__init__(self, feed, period, 
                                        tickers, balance, 
                                        mongofeed.MongoRowParser)
    self.cross_aboves = {}
    self.cross_belows = {}
    self.current_positions = set()
    self.pos_limit = limit
    params = {'timeperiod':period, 
              'nbdevup':2, 
              'nbdevdn':2, 
              'matype':du.tl.MA_SMA} 

    for ticker in self.tickers:
      print 'Initializing: %s' % (ticker)
      data = du.get_data(ticker)
      d = [v['Adj Clos'] for v in data]
      a, b, c = du.run_command( 
                          'BBANDS',
                          np.array(d),
                          **params
                        )
      
      highs = dataseries.SequenceDataSeries([d for d in a.tolist()]) 
      lows = dataseries.SequenceDataSeries([d for d in c.tolist()]) 
      mids = dataseries.SequenceDataSeries([d for d in b.tolist()])
      
      self.cross_aboves[ticker] = cross.CrossAbove(feed.getDataSeries(ticker)
                                            .getAdjCloseDataSeries(), highs)  
      self.cross_belows[ticker] = cross.CrossBelow(feed.getDataSeries(ticker)
                                            .getAdjCloseDataSeries(), lows)
    def construct_model(self):
        """get data loader"""
        input_size, input_channels, n_classes, train_data, valid_data = get_data(
            self.config.dataset, self.config.data_path, self.config.cutout_length, validation=True
        )

        if self.distributed:
            self.train_sampler = torch.utils.data.distributed.DistributedSampler(
                train_data, num_replicas=self.world_size, rank=self.rank
            )
        else:
            self.train_sampler = None

        self.train_loader = torch.utils.data.DataLoader(train_data,
                                                        batch_size=self.config.batch_size,
                                                        shuffle=(self.train_sampler is None),
                                                        num_workers=self.config.workers,
                                                        pin_memory=True,
                                                        sampler=self.train_sampler)
        self.valid_loader = torch.utils.data.DataLoader(valid_data,
                                                        batch_size=self.config.batch_size,
                                                        shuffle=False,
                                                        num_workers=self.config.workers,
                                                        pin_memory=True)
        self.sync_bn = self.config.amp_sync_bn
        self.opt_level = self.config.amp_opt_level
        print(f"sync_bn: {self.sync_bn}")

        """build model"""
        print("init model")
        self.criterion = nn.CrossEntropyLoss().to(self.device)
        self.use_aux = self.config.aux_weight > 0.
        model = AugmentStage(input_size, input_channels, self.config.init_channels, n_classes, self.config.layers, self.use_aux, self.config.genotype, self.config.DAG)
        if self.sync_bn:
            model = apex.parallel.convert_syncbn_model(model)
        self.model = model.to(self.device)
        print("init model end!")

        """ build optimizer """
        print("get optimizer")
        momentum = self.config.momentum
        weight_decay = self.config.weight_decay
        # LARSSGD
        # exclude_bias_and_bn = self.config.exclude_bias_and_bn
        # params = collect_params([self.model], exclude_bias_and_bn=exclude_bias_and_bn)
        # self.optimizer = LARS(params, lr=self.max_lr, momentum=momentum, weight_decay=weight_decay)
        # SGD
        self.optimizer = torch.optim.SGD(model.parameters(), lr=self.max_lr, momentum=momentum, weight_decay=weight_decay)

        self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(self.optimizer, self.total_epochs)

        """init amp"""
        print("amp init!")
        self.model, self.optimizer = amp.initialize(
            self.model, self.optimizer, opt_level=self.opt_level
        )
        if self.distributed:
            self.model = DDP(self.model, delay_allreduce=True)
        print("amp init end!")
Пример #5
0
    def construct_model(self):
        """get data loader"""
        input_size, input_channels, n_classes, train_data = get_data(
            self.config.dataset,
            self.config.data_path,
            cutout_length=0,
            validation=False)

        n_train = len(train_data)
        split = n_train // 2
        indices = list(range(n_train))
        train_sampler = torch.utils.data.sampler.SubsetRandomSampler(
            indices[:split])
        valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(
            indices[split:])
        self.train_loader = torch.utils.data.DataLoader(
            train_data,
            batch_size=self.config.batch_size,
            sampler=train_sampler,
            num_workers=self.config.workers,
            pin_memory=True)
        self.valid_loader = torch.utils.data.DataLoader(
            train_data,
            batch_size=self.config.batch_size,
            sampler=valid_sampler,
            num_workers=self.config.workers,
            pin_memory=True)
        """build model"""
        print("init model")
        self.criterion = nn.CrossEntropyLoss().to(self.device)
        model = SearchStageController(input_channels,
                                      self.config.init_channels,
                                      n_classes,
                                      self.config.layers,
                                      self.criterion,
                                      self.config.genotype,
                                      device_ids=self.config.gpus)
        self.model = model.to(self.device)
        print("init model end!")
        """build optimizer"""
        print("get optimizer")
        self.w_optim = torch.optim.SGD(self.model.weights(),
                                       self.config.w_lr,
                                       momentum=self.config.w_momentum,
                                       weight_decay=self.config.w_weight_decay)
        self.alpha_optim = torch.optim.Adam(
            self.model.alphas(),
            self.config.alpha_lr,
            betas=(0.5, 0.999),
            weight_decay=self.config.alpha_weight_decay)

        self.lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
            self.w_optim, self.total_epochs, eta_min=self.config.w_lr_min)
        self.architect = Architect(self.model, self.config.w_momentum,
                                   self.config.w_weight_decay)
Пример #6
0
 def get_config(self):
     tp.logger.set_logger_dir(self.conf.logdir)
     vocab = voc_util.get_vocab(self.conf.vob_dict_path)
     dataset_train = get_data(self.conf.data_dir,
                              self.conf.batch,
                              vocab,
                              self.conf.POS_filter,
                              Windsize=self.conf.Windsize,
                              stride=self.conf.stride,
                              is_train=True,
                              nV=self.conf.nV,
                              nF=self.conf.nF)
     steps_per_epoch = dataset_train.size()
     dataset_test = get_data(self.conf.data_dir,
                             self.conf.batch,
                             vocab,
                             self.conf.POS_filter,
                             Windsize=self.conf.Windsize,
                             stride=self.conf.stride,
                             is_train=False,
                             nV=self.conf.nV,
                             nF=self.conf.nF)
     drop_schedule = []
     for i, epoch in enumerate(map(int, self.conf.drop_epochs.split(','))):
         drop_schedule.append(
             (epoch, self.conf.init_lr * self.conf.drop_rate**(i + 1)))
     return tp.TrainConfig(
         dataflow=dataset_train,
         callbacks=[
             tp.ModelSaver(),
             tp.InferenceRunner(
                 dataset_test,
                 [tp.ScalarStats('cost'),
                  tp.ClassificationError()]),
             tp.ScheduledHyperParamSetter('learning_rate', drop_schedule)
         ],
         model=Model(self.conf, self.Net),
         steps_per_epoch=steps_per_epoch,
         max_epoch=self.conf.max_epoch,
     )
Пример #7
0
    def addBarsFromCSV(self, instrument, row_parser):
        data = du.get_data(instrument)
        self.__bars.setdefault(instrument, [])
        self.__nextBarIdx.setdefault(instrument, 0)

        loaded_bars = []
        for datum in data:
            if self.__barFilter is None or self.__barFilter.includeBar(datum):
                row = row_parser.parseBar(datum)
                loaded_bars.append(row)
        self.__bars[instrument].extend(loaded_bars)
        barcmp = lambda x, y: cmp(x.getDateTime(), y.getDateTime())
        self.__bars[instrument].sort(barcmp)
        self.registerInstrument(instrument)
Пример #8
0
 def addBarsFromCSV(self, instrument, row_parser):
   data = du.get_data(instrument)
   self.__bars.setdefault(instrument, [])
   self.__nextBarIdx.setdefault(instrument, 0)
   
   loaded_bars = []
   for datum in data:
     if self.__barFilter is None or self.__barFilter.includeBar(datum):
       row = row_parser.parseBar(datum)
       loaded_bars.append(row)
   self.__bars[instrument].extend(loaded_bars)
   barcmp = lambda x, y: cmp(x.getDateTime(), y.getDateTime())
   self.__bars[instrument].sort(barcmp)
   self.registerInstrument(instrument)
Пример #9
0
def run_strategy(smaPeriod, tickers, plot=True):
    # Load the yahoo feed from the CSV file
    plt = None
    f = du.ArbiFeed()
    for t in tickers:
      f.addBarsFromCSV(du.get_data(t), t)
    
    # Evaluate the strategy with the feed's bars.
    myStrategy = BuyAndHold(smaPeriod, f, tickers)
    if plot: plt = plotter.StrategyPlotter(myStrategy)
    for t in tickers:
      if plot:plt.getInstrumentSubplot(t).addDataSeries(t, f.getDataSeries(t))

    myStrategy.run()
    if plot: plt.plot()
    return myStrategy.result
Пример #10
0
def run_strategy(smaPeriod, tickers, plot=True):
    # Load the yahoo feed from the CSV file
    plt = None
    f = du.ArbiFeed()
    for t in tickers:
        f.addBarsFromCSV(du.get_data(t), t)

    # Evaluate the strategy with the feed's bars.
    myStrategy = BuyAndHold(smaPeriod, f, tickers)
    if plot: plt = plotter.StrategyPlotter(myStrategy)
    for t in tickers:
        if plot:
            plt.getInstrumentSubplot(t).addDataSeries(t, f.getDataSeries(t))

    myStrategy.run()
    if plot: plt.plot()
    return myStrategy.result
Пример #11
0
    def __init__(self, feed, smaPeriod, tickers, trend_indicator):
        global LOWERLIMIT
        strategy.Strategy.__init__(self, feed, 2000)
        self.tickers = tickers
        self.period = smaPeriod
        self.trend_indi = trend_indicator
        self.trend_data = {}
        self.positions = []
        self.p_tickers = set()
        #self.getBroker().setCommission(FixedCommission(9))
        self.failed = {}
        no_data = set()
        self.last_price = {}
        if LOWERLIMIT >= len(tickers):
            LOWERLIMIT = int(len(tickers) * .5)
        max_date = min_date = None
        import datetime
        now = datetime.datetime.now()
        start = now - datetime.timedelta(days=90)

        for ticker in tickers:
            data = du.get_data(ticker)
            if len(data) <= 0:
                no_data.add(ticker)
                continue
            feed.addBarsFromCSV(ticker, mongofeed.MongoRowParser())
            if max_date == None:
                max_date = data[-1]['date']
            elif max_date < data[-1]['date']:
                max_date = data[-1]['date']

            if min_date == None:
                min_date = data[0]['date']
            elif min_date > data[0]['date']:
                min_date = data[0]['date']
            self.trend_data[ticker] = rsi.RSI(
                feed.getDataSeries(ticker).getAdjCloseDataSeries(), smaPeriod)
        print 'start: %r end: %r' % (min_date, max_date)
        print 'No Data: %r' % no_data
        metrics.start_date = min_date
        metrics.end_date = max_date
        metrics.num_transactions = 0
        self.start_month = min_date
Пример #12
0
  def __init__(self, feed, smaPeriod, tickers, trend_indicator):
    global LOWERLIMIT
    strategy.Strategy.__init__(self, feed, 2000)
    self.tickers = tickers
    self.period = smaPeriod
    self.trend_indi = trend_indicator  
    self.trend_data  = {}
    self.positions = []
    self.p_tickers = set()
    #self.getBroker().setCommission(FixedCommission(9))
    self.failed =  {}
    no_data = set()
    self.last_price = {}
    if LOWERLIMIT >= len(tickers):
      LOWERLIMIT = int(len(tickers) * .5)
    max_date = min_date = None
    import datetime
    now = datetime.datetime.now()
    start = now - datetime.timedelta(days=90)
    

    for ticker in tickers:
      data = du.get_data(ticker)
      if len(data) <= 0:
        no_data.add(ticker)
        continue
      feed.addBarsFromCSV(ticker, mongofeed.MongoRowParser())
      if max_date == None:
        max_date = data[-1]['date']
      elif max_date < data[-1]['date']:
        max_date = data[-1]['date']

      if min_date == None:
        min_date = data[0]['date']
      elif min_date > data[0]['date']:
        min_date = data[0]['date']      
      self.trend_data[ticker] = rsi.RSI(feed.getDataSeries(ticker).getAdjCloseDataSeries(), smaPeriod)
    print 'start: %r end: %r' %(min_date, max_date)
    print 'No Data: %r' % no_data
    metrics.start_date = min_date
    metrics.end_date = max_date
    metrics.num_transactions = 0
    self.start_month = min_date
Пример #13
0
    def __init__(self, feed, period, tickers, balance, row_parser):
        strategy.Strategy.__init__(self, feed, balance)

        self.tickers = tickers
        self.period = period
        self.last_price = {}
        self.positions = []
        max_date = min_date = None
        for ticker in tickers:
            self.last_price.setdefault(ticker, None)
            data = du.get_data(ticker)
            feed.addBarsFromCSV(ticker, row_parser())
            if max_date == None:
                max_date = data[-1]['date']
            elif max_date < data[-1]['date']:
                max_date = data[-1]['date']

            if min_date == None:
                min_date = data[0]['date']
            elif min_date > data[0]['date']:
                min_date = data[0]['date']
        print 'start: %r end: %r' % (min_date, max_date)
        metrics.start_date = min_date
        metrics.end_date = max_date
Пример #14
0
 def __init__(self, feed, period, tickers, balance, row_parser):
   strategy.Strategy.__init__(self, feed, balance)
   
   self.tickers = tickers
   self.period = period
   self.last_price = {}
   self.positions = []
   max_date = min_date = None
   for ticker in tickers:
     self.last_price.setdefault(ticker, None)
     data = du.get_data(ticker)
     feed.addBarsFromCSV(ticker, row_parser())
     if max_date == None:
       max_date = data[-1]['date']
     elif max_date < data[-1]['date']:
       max_date = data[-1]['date']
     
     if min_date == None:
       min_date = data[0]['date']
     elif min_date > data[0]['date']:
       min_date = data[0]['date']    
   print 'start: %r end: %r' %(min_date, max_date)
   metrics.start_date = min_date
   metrics.end_date = max_date
Пример #15
0
    def train(self):

        # define optimizers for all networks
        dm_optim = tf.train.AdamOptimizer(self.opt.learning_rate, beta1=self.opt.beta1) \
            .minimize(self.DM_loss, var_list=self.DM_vars)

        gm_optim = tf.train.AdamOptimizer(self.opt.learning_rate, beta1=self.opt.beta1) \
            .minimize(self.GM_loss, var_list=self.G_vars)

        dd_optim = tf.train.AdamOptimizer(self.opt.learning_rate, beta1=self.opt.beta1) \
            .minimize(self.DD_loss, var_list=self.DD_vars)

        gd_optim = tf.train.AdamOptimizer(self.opt.learning_rate, beta1=self.opt.beta1) \
            .minimize(self.GD_loss, var_list=self.G_vars)

        e_optim = tf.train.AdamOptimizer(self.opt.learning_rate, beta1=self.opt.beta1) \
            .minimize(self.E_loss, var_list=self.E_vars)

        tf.global_variables_initializer().run()

        gm_sum = tf.summary.merge([self.lmg_sum, self.img_sum_m])
        gd_sum = tf.summary.merge([self.ldg_sum, self.img_sum_d])

        dm_sum = tf.summary.merge([self.lmr_sum, self.lmf_sum, self.lmt_sum])
        dd_sum = tf.summary.merge([self.ldr_sum, self.ldf_sum, self.ldt_sum])
        e_sum = tf.summary.merge([self.le_sum])

        sample_image = np.random.uniform(-1, 1, [self.opt.batch_size] +
                                         self.z_dim).astype(np.float32)
        writer = tf.summary.FileWriter(self.opt.checkpoint_dir,
                                       self.sess.graph)

        start_time = time.time()
        epoch = 0

        while epoch < self.opt.max_iterations:

            batch_images = get_data(self.opt.dataset, self.image_size,
                                    self.batch_size)
            batch_z = np.random.uniform(-1, 1, [self.opt.batch_size] +
                                        self.z_dim).astype(np.float32)
            if epoch % 2 == 0:
                print "Manifold Step: "
                # Update D network
                _, summary_str_dm = self.sess.run([dm_optim, dm_sum],
                                                  feed_dict={
                                                      self.real_images:
                                                      batch_images,
                                                  })
                # Update G network
                _, summary_str_gm = self.sess.run([gm_optim, gm_sum],
                                                  feed_dict={
                                                      self.real_images:
                                                      batch_images,
                                                      self.random_noise:
                                                      batch_z
                                                  })

                _, summary_str_e = self.sess.run([e_optim, e_sum],
                                                 feed_dict={
                                                     self.real_images:
                                                     batch_images,
                                                 })
                writer.add_summary(summary_str_dm, epoch)
                writer.add_summary(summary_str_gm, epoch)
                writer.add_summary(summary_str_e, epoch)
            elif epoch % 2 == 1:
                print "Diffusion Step:"
                # Update D network
                _, summary_str_dd = self.sess.run([dd_optim, dd_sum],
                                                  feed_dict={
                                                      self.real_images:
                                                      batch_images,
                                                      self.random_noise:
                                                      batch_z
                                                  })

                # Update G network
                _, summary_str_gd = self.sess.run(
                    [gd_optim, gd_sum], feed_dict={self.random_noise: batch_z})
                _, summary_str_e = self.sess.run([e_optim, e_sum],
                                                 feed_dict={
                                                     self.real_images:
                                                     batch_images,
                                                 })
                writer.add_summary(summary_str_dd, epoch)
                writer.add_summary(summary_str_gd, epoch)
                writer.add_summary(summary_str_e, epoch)
            # else:
            #
            #     print "Encoder Step: "
            #     # Update E network

            if epoch % self.opt.checkpoint_interval == 0:

                img_out = self.GD_net.eval({self.random_noise: sample_image})
                save_tensor(
                    img_out[0],
                    os.path.join(self.opt.sample_dir,
                                 str(epoch) + '.jpg'))

            if epoch % 50 == 0:
                self.saver.save(self.sess,
                                os.path.join(self.opt.checkpoint_dir,
                                             'fck.ckpt'),
                                global_step=epoch)

            time_delta = time.time() - start_time
            print("Epoch: [%2d] time: %4.4f" % (epoch, time_delta))

            epoch += 1
Пример #16
0
def main():
    logger.info("Logger is set - training start")

    # set default gpu device id
    torch.cuda.set_device(config.gpus[0])

    # set seed
    np.random.seed(config.seed)
    torch.manual_seed(config.seed)
    torch.cuda.manual_seed_all(config.seed)

    torch.backends.cudnn.benchmark = True

    # get data with meta info
    input_size, input_channels, n_classes, train_data, valid_data = get_data(
        config.dataset,
        config.data_path,
        config.cutout_length,
        validation=True)

    criterion = nn.CrossEntropyLoss().to(device)
    use_aux = config.aux_weight > 0.
    model = AugmentCellCNN(input_size, input_channels, config.init_channels,
                           n_classes, config.layers, use_aux, config.genotype)
    model = nn.DataParallel(model, device_ids=config.gpus).to(device)

    # weights optimizer
    optimizer = torch.optim.SGD(model.parameters(),
                                config.lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay)

    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=config.batch_size,
                                               shuffle=True,
                                               num_workers=config.workers,
                                               pin_memory=True)
    valid_loader = torch.utils.data.DataLoader(valid_data,
                                               batch_size=config.batch_size,
                                               shuffle=False,
                                               num_workers=config.workers,
                                               pin_memory=True)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer, config.epochs)

    best_top1 = 0.
    # training loop
    for epoch in range(config.epochs):
        lr_scheduler.step()
        drop_prob = config.drop_path_prob * epoch / config.epochs
        model.module.drop_path_prob(drop_prob)

        # training
        train(train_loader, model, optimizer, criterion, epoch)

        # validation
        cur_step = (epoch + 1) * len(train_loader)
        top1 = validate(valid_loader, model, criterion, epoch, cur_step)

        # save
        if best_top1 < top1:
            best_top1 = top1
            is_best = True
        else:
            is_best = False
        utils.save_checkpoint(model, config.path, is_best)

        print("")

    logger.info("Final best Prec@1 = {:.4%}".format(best_top1))
Пример #17
0
def run_strategy(smaPeriod, tick, tick2='TRNS', plot=True):
    # Load the yahoo feed from the CSV file
    plt = None
    feed = mongofeed.MongoFeed()
    feed.addBarsFromCSV(tick, mongofeed.MongoRowParser())
    ratio = du.get_ratio_for_key_with_date(tick, tick2, 'Adj Clos')

    f = du.ArbiFeed()
    f.addBarsFromCSV(ratio, '%s/%s' % (tick, tick2))
    now = datetime.datetime.now()
    start = now - datetime.timedelta(days=180)
    data = du.get_data(tick, start_date=start)

    max_date = min_date = None
    if max_date == None:
        max_date = data[-1]['date']
    elif max_date < data[-1]['date']:
        max_date = data[-1]['date']

    if min_date == None:
        min_date = data[0]['date']
    elif min_date > data[0]['date']:
        min_date = data[0]['date']
    f.addBarsFromCSV(data, tick)

    data = du.get_data(tick2, start_date=start)
    if max_date == None:
        max_date = data[-1]['date']
    elif max_date < data[-1]['date']:
        max_date = data[-1]['date']

    if min_date == None:
        min_date = data[0]['date']
    elif min_date > data[0]['date']:
        min_date = data[0]['date']
    f.addBarsFromCSV(data, tick2)

    print 'start: %r end: %r' % (min_date, max_date)
    metrics.start_date = min_date
    metrics.end_date = max_date

    # Evaluate the strategy with the feed's bars.
    myStrategy = PairStrategy(feed, smaPeriod, f, tick, tick2)
    if plot: plt = plotter.StrategyPlotter(myStrategy)
    if plot:
        plt.getInstrumentSubplot(tick).addDataSeries(tick,
                                                     f.getDataSeries(tick))

    myStrategy.run()
    dd = metrics.get_all_drawdown()
    winners = metrics.get_all_win()
    losers = metrics.get_all_losses()
    avg_gain = metrics.get_all_gain()
    avg_losses = metrics.get_all_avglosses()
    returns = metrics.get_portfolio_annualized_returns()
    metrics.build_comp_report('%s-%s-pair_trading.txt' % (tick, tick2))
    print 'drawdown: \n%r' % (dd), 'Win percentage: \n%r ' % (
        winners), 'Loss perc.: \n%r' % (losers)
    print 'Gains: \n%r' % (avg_gain), 'Losses: \n%r' % (avg_losses)
    print 'Returns: %f' % (returns)
    if plot: plt.plot()

    return myStrategy.result
Пример #18
0
    def train(self):
        """
        Training steps for Spatial GAN
        :return: 
        """

        # define optimizer for both networks
        d_optim = tf.train.AdamOptimizer(self.opt.learning_rate, beta1=self.opt.beta1) \
            .minimize(self.D_loss, var_list=self.D_vars)

        g_optim = tf.train.AdamOptimizer(self.opt.learning_rate, beta1=self.opt.beta1) \
            .minimize(self.G_loss, var_list=self.G_vars)

        tf.global_variables_initializer().run()

        g_sum = tf.summary.merge([self.lg_sum, self.img_sum])

        d_sum = tf.summary.merge([self.lr_sum, self.lf_sum, self.lt_sum])

        sample_image = np.random.uniform(-1, 1, [self.opt.batch_size] +
                                         self.z_dim).astype(np.float32)
        writer = tf.summary.FileWriter(self.opt.checkpoint_dir,
                                       self.sess.graph)

        start_time = time.time()
        epoch = 1

        while epoch < self.opt.max_iterations:

            batch_images = get_data(self.opt.dataset, self.image_size,
                                    self.batch_size)
            batch_z = np.random.uniform(-1, 1, [self.opt.batch_size] +
                                        self.z_dim).astype(np.float32)

            if epoch % 2 != 0:
                print "Update D:"
                # Update D network
                _, summary_str = self.sess.run([d_optim, d_sum],
                                               feed_dict={
                                                   self.real_images:
                                                   batch_images,
                                                   self.random_noise: batch_z
                                               })
            else:
                print "Update G:"
                # Update G network
                _, summary_str = self.sess.run(
                    [g_optim, g_sum], feed_dict={self.random_noise: batch_z})

            if epoch % self.opt.checkpoint_interval == 0:

                writer.add_summary(summary_str, epoch)
                img_out = self.G_net.eval({self.random_noise: sample_image})
                save_tensor(
                    img_out[0],
                    os.path.join(self.opt.sample_dir,
                                 str(epoch) + '.jpg'))

            if epoch % 100 == 0:
                self.saver.save(self.sess,
                                os.path.join(self.opt.checkpoint_dir,
                                             'fck.ckpt'),
                                global_step=epoch)

            time_delta = time.time() - start_time
            print("Epoch: [%2d] time: %4.4f" % (epoch, time_delta))

            epoch += 1