コード例 #1
0
  def train(self, learning_rate, step_num, init_step=None, restoring_file=None):
    print('\n%s: training...' % datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    last_step = init_step+step_num
    print('%s: training till: %d steps' %(datetime.now(), last_step))

    print_loss = 0
    train_loss = None
    save_loss = 0
    save_step = 0
    total_loss = 0
    feed_dict={self._lr_placeholder: learning_rate}
    for step in range(init_step+1, last_step+1):
      start_time = time.time()
      _, total_loss_batch, loss_batch = session.run(
        [self._train, self._total_loss, self._cross_entropy_losses], feed_dict=feed_dict
      )
      duration = time.time() - start_time
      assert not np.isnan(total_loss_batch), 'Model diverged with loss = NaN'
      cross_entropy_loss_value = np.mean(loss_batch)
      print_loss += cross_entropy_loss_value
      save_loss += cross_entropy_loss_value
      total_loss += total_loss_batch
      save_step += 1

      if ((step - init_step) % Trainer.PRINT_FREQUENCY == 0):
        examples_per_sec = Trainer.BATCH_SIZE / duration
        format_str = ('%s: step %d, loss = %.2f, lr = %f, '
                      '(%.1f examples/sec; %.3f sec/batch)')
        print_loss /= Trainer.PRINT_FREQUENCY
        print(format_str % (datetime.now(), step, print_loss, learning_rate,
                            examples_per_sec, float(duration)))
        print_loss = 0

      # Save the model checkpoint and summaries periodically.
      if (step == last_step or
        (Trainer.SAVE_FREQUENCY is not None and (step - init_step) % Trainer.SAVE_FREQUENCY == 0)):
        session.save(step)
        total_loss /= save_step
        train_loss = save_loss / save_step
        print('%s: train_loss = %.3f' % (datetime.now(), train_loss))
        if (self.writer):
          summary_str = session.run(self._all_summaries, feed_dict=feed_dict)
          self.writer.write_summaries(summary_str, step)
          self.writer.write_scalars({'losses/training/cross_entropy_loss': train_loss,
                                     'losses/training/total_loss': total_loss}, step)
        total_loss = 0
        save_loss = 0
        save_step = 0

    session.stop()
    return step, train_loss
コード例 #2
0
  def test(self, step_num=None, init_step=None, restoring_file=None):
    print('%s: testing...' %datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    if (init_step == 0):
      print('WARNING: testing an untrained model')
    if (step_num is None):
      step_num = np.int(np.ceil(np.float(self.fold_size) / Tester.BATCH_SIZE))
    test_num = step_num * Tester.BATCH_SIZE
    print('%s: test_num=%d' %(datetime.now(), test_num))

    loss_value = 0
    prob_values = np.zeros((test_num, Reader.CLASSES_NUM), dtype=np.float32)
    label_values = np.zeros(test_num, dtype=np.int64)
    filename_values = []
    begin = 0
    start_time = time.time()

    for step in range(step_num):
      #print('%s: eval_iter=%d' %(datetime.now(), i))
      loss_batch, prob_batch, label_batch, filename_batch = session.run(
        [self._loss, self._probs, self._labels, self._filenames]
      )
      loss_value += loss_batch
      begin = step * Tester.BATCH_SIZE
      prob_values[begin:begin+Tester.BATCH_SIZE, :] = prob_batch
      label_values[begin:begin+Tester.BATCH_SIZE] = label_batch
      filename_values.extend(filename_batch)

    duration = time.time() - start_time
    print('%s: duration = %.1f sec' %(datetime.now(), float(duration)))
    sys.stdout.flush()

    loss_value /= step_num
    #return loss_value, probs_values, labels_values
    print('%s: test_loss = %.3f' %(datetime.now(), loss_value))

    mult_acc, bin_acc, auc, bin_sens = self.get_pred_stat(
      prob_values, label_values, filename_values
    )
    if (self.writer):
      summary_str = session.run(self._all_summaries)
      self.writer.write_summaries(summary_str, init_step)
      self.writer.write_scalars({'losses/testing/total_loss': loss_value,
                                 'accuracy/multiclass': mult_acc,
                                 'accuracy/binary': bin_acc,
                                 'stats/AUC': auc,
                                 'stats/sensitivity': bin_sens[0],
                                 'stats/specificity': bin_sens[1]}, init_step)
    session.stop()
    return init_step, loss_value
コード例 #3
0
  def train(self, learning_rate, step_num, init_step=None, restoring_file=None):
    print('%s: training...' % datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    last_step = init_step+step_num
    print('%s: training till: %d steps' %(datetime.now(), last_step))

    print_loss = 0
    train_loss = None
    save_loss = 0
    save_step = 0
    feed_dict={self._lr_placeholder: learning_rate}
    for step in range(init_step+1, last_step+1):
      start_time = time.time()
      _, loss_batch = session.run([self._train, self._loss],
                                  feed_dict=feed_dict)
      duration = time.time() - start_time
      assert not np.isnan(loss_batch), 'Model diverged with loss = NaN'
      print_loss += loss_batch
      save_loss += loss_batch
      save_step += 1

      if ((step - init_step) % Trainer.PRINT_FREQUENCY == 0):
        examples_per_sec = Trainer.BATCH_SIZE / duration
        format_str = ('%s: step %d, loss = %.2f, lr = %f, '
                      '(%.1f examples/sec; %.3f sec/batch)')
        print_loss /= Trainer.PRINT_FREQUENCY
        print(format_str % (datetime.now(), step, print_loss, learning_rate,
                            examples_per_sec, float(duration)))
        print_loss = 0

      # Save the model checkpoint and summaries periodically.
      if (step == last_step or
        (Trainer.SAVE_FREQUENCY is not None and (step - init_step) % Trainer.SAVE_FREQUENCY == 0)):
        session.save(step)
        train_loss = save_loss / save_step
        print('%s: train_loss = %.3f' % (datetime.now(), train_loss))
        save_loss = 0
        save_step = 0
        if (self.writer):
          summary_str = session.run(self._all_summaries, feed_dict=feed_dict)
          self.writer.write_summaries(summary_str, step)
          self.writer.write_scalars({'losses/training/total_loss': train_loss}, step)


    session.stop()
    return step, train_loss
コード例 #4
0
ファイル: tester.py プロジェクト: phpmind/tensorflow-worklab
  def test(self, step_num=None, init_step=None, restoring_file=None):
    print('\n%s: testing...' %datetime.now())
    sys.stdout.flush()

    session = Session(self._graph, self.models_dir)
    init_step = session.init(self._network, init_step, restoring_file)
    session.start()

    if (init_step == 0):
      print('WARNING: testing an untrained model')
    if (step_num is None):
      step_num = np.int(np.ceil(np.float(self.fold_size) / Tester.BATCH_SIZE))
    test_num = step_num * Tester.BATCH_SIZE
    print('%s: test_num=%d' %(datetime.now(), test_num))

    loss_values = np.zeros(test_num, dtype=np.float32)
    prob_values = np.zeros((test_num, Reader.CLASSES_NUM), dtype=np.float32)
    label_values = np.zeros(test_num, dtype=np.int64)

    start_time = time.time()
    for step in range(step_num):
      #print('%s: eval_iter=%d' %(datetime.now(), i))
      loss_batch, prob_batch, label_batch = session.run(
        [self._cross_entropy_losses, self._probs, self._input['labels']]
      )
      begin = step * Tester.BATCH_SIZE
      loss_values[begin:begin+Tester.BATCH_SIZE] = loss_batch
      prob_values[begin:begin+Tester.BATCH_SIZE, :] = prob_batch
      label_values[begin:begin+Tester.BATCH_SIZE] = label_batch

    duration = time.time() - start_time
    print('%s: duration = %.1f sec' %(datetime.now(), float(duration)))
    sys.stdout.flush()

    test_loss, mult_acc = self.get_all_stat(loss_values, prob_values, label_values)
    if (self.writer):
      summary_str = session.run(self._all_summaries)
      self.writer.write_summaries(summary_str, init_step)
      self.writer.write_scalars({'losses/testing/cross_entropy_loss': test_loss,
                                 'accuracy/multiclass': mult_acc}, init_step)
    session.stop()
    return init_step, test_loss
コード例 #5
0
ファイル: exp.py プロジェクト: natlang/NLStockExchange
                                         params['network'])
    data.write_adj_matrix(zip_file, buy_network)
    ndat = data.init_ndat(params['traders_spec'], params['n_days'])

    # Run sequence of trials, 1 session per trial
    trial = 1
    logger.info('Running NLSE experiments')
    while trial < params['n_trials'] + 1:
        logger.info('Running %s' % trial)
        # Initialise traders
        traders = {}
        init_verbose = False
        setup.populate_market(params['traders_spec'], traders, buy_network,
                              sell_network, init_verbose)
        ddat, tdat = session.run(trial, params['start'], params['end'],
                                 params['order_sched'], traders, n_traders,
                                 ndat, buy_network, sell_network)
        # Add trading and day data from trial to df
        ddat_df = ddat_df.append(ddat)
        tdat_df = tdat_df.append(tdat)
        ndat_df = data.get_ndat_df(ndat, params['n_days'], buy_network)
        trial += 1

    logger.info('Experiments finished')

    # Write dataframes to csv and to zipfile
    logger.info('Writing day data to csv...')
    zip_file.writestr('ddat.csv', ddat_df.to_csv(index=False))
    logger.info('Writing trading data to csv...')
    zip_file.writestr('tdat.csv', tdat_df.to_csv(index=False))
    logger.info('Writing network data to csv...')
コード例 #6
0
from session import run

if __name__ == "__main__":
    run()