コード例 #1
0
ファイル: network.py プロジェクト: igordmn/cointrader
    def train(self, current_portfolio, history, asks, bids):
        """
            Args:
                current_portfolio: batch_count x alt_asset_number
                history: batch_count x alt_asset_number x history_size x history_indicator_number
                asks: batch_count x alt_asset_number
                bids: batch_count x alt_asset_number

            Returns:
                best_portfolio: batch_count x (1 + alt_asset_number)
                geometric_mean_profit
        """
        tflearn.is_training(True, self.session)
        results = self.session.run(
            [
                self.train_tensor, self.best_portfolio_tensor,
                self.geometric_mean_profit
            ],
            feed_dict={
                self.current_portfolio: current_portfolio,
                self.history: normalize_history(history, self.params),
                self.asks: asks,
                self.bids: bids,
                self.batch_size: history.shape[0]
            })

        return results[1], float(results[2])
コード例 #2
0
ファイル: evaluator.py プロジェクト: parksanghyoun/tflearn
    def predict(self, feed_dict):
        """ predict.

        Run data through each tensor's network, and return prediction value.

        Arguments:
            feed_dict: `dict`. Feed data dictionary, with placeholders as
                keys, and data as values.

        Returns:
            An `array`. In case of multiple tensors to predict, array is a
            concatanation of each tensor prediction result.

        """
        with self.graph.as_default():
            tflearn.is_training(False, self.session)
            prediction = []
            for output in self.tensors:
                o_pred = self.session.run(output, feed_dict=feed_dict).tolist()
                for i, val in enumerate(o_pred):  # Reshape pred per sample
                    if len(self.tensors) > 1:
                        if not len(prediction) > i: prediction.append([])
                        prediction[i].append(val)
                    else:
                        prediction.append(val)
            return prediction
コード例 #3
0
ファイル: trainer.py プロジェクト: 21hub/tflearn
def evaluate(session, op_to_evaluate, feed_dict, batch_size):
        """ evaluate.

        Evaluate an operation with provided data dict using a batch size
        to save GPU memory.

        Args:
            session: `tf.Session`. Session for running operations.
            op_to_evaluate: `tf.Op`. Operation to be evaluated.
            feed_dict: `dict`. Data dictionary to feed op_to_evaluate.
            batch_size: `int`. Batch size to be used for evaluation.

        Ret:
            `float`. op_to_evaluate mean over all batches.

        """
        tflearn.is_training(False, session)
        n_test_samples = len(get_dict_first_element(feed_dict))
        batches = make_batches(n_test_samples, batch_size)
        index_array = np.arange(n_test_samples)
        avg = 0.0
        for i, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            feed_batch = {}
            for key in feed_dict:
                # Make batch for multi-dimensional data
                if np.ndim(feed_dict[key]) > 0:
                    feed_batch[key] = slice_array(feed_dict[key], batch_ids)
                else:
                    feed_batch[key] = feed_dict[key]
            avg += session.run(op_to_evaluate, feed_batch) / len(batches)
        return avg
コード例 #4
0
def test_out(sess, list_dims, list_placeholders, list_operations, X_te, opts):
    """
    This code is to call a test on the validation set.
    INPUTS:
    - sess: (tf session) the session to run everything on
    - list_dim: (list of ints) list of dimensions
    - list_placeholders: (list of tensors) list of the placeholders for feed_dict
    - list_operations: (list of tensors) list of operations for graph access
    - X_tr: (list of strings) list of training sample names
    - opts: (parsed arguments)
    """
    # Let's unpack the lists
    matrix_size, num_channels = list_dims
    x, y, keep_prob = list_placeholders
    prob, pred, saver, L2_loss, CE_loss, cost, optimizer, accuracy, init = list_operations
    # Initializing what to put in.
    dataXX = np.zeros((1, matrix_size, matrix_size, num_channels),
                      dtype=np.float32)
    # Running through the images.
    f = open(opts.outtxt, 'w')
    for iter_data in range(len(X_te)):
        left_img, right_img = X_te[iter_data]
        dataXX[0, :, :, 0] = read_in_one_image(opts.path_data, left_img,
                                               matrix_size)
        tflearn.is_training(False)
        pred_left = sess.run(pred, feed_dict={x: dataXX, keep_prob: 1.0})
        dataXX[0, :, :, 0] = read_in_one_image(opts.path_data, right_img,
                                               matrix_size)
        pred_right = sess.run(pred, feed_dict={x: dataXX, keep_prob: 1.0})
        statement = str(pred_left) + '\t' + str(pred_right)
        super_print(statement, f)
    if len(X_te) == 0:
        statement = str(0.5) + '\t' + str(0.5)
        super_print(statement, f)
    f.close()
コード例 #5
0
ファイル: icnn.py プロジェクト: zhang9song/icnn
    def train(self):
        with self.sess.as_default():
            obs, act, rew, ob2, term2, info = self.rm.minibatch(
                size=FLAGS.bsize)
            if FLAGS.icnn_opt == 'adam':
                # f = self._opt_train_entr
                f = self._fg_entr_target
                # f = self._fg_target
            elif FLAGS.icnn_opt == 'bundle_entropy':
                f = self._fg_target
            else:
                raise RuntimeError("Unrecognized ICNN optimizer: " +
                                   FLAGS.icnn_opt)
            print('--- Optimizing for training')
            tflearn.is_training(False)
            act2 = self.opt(f, ob2)
            tflearn.is_training(True)

            _, _, loss = self._train(obs,
                                     act,
                                     rew,
                                     ob2,
                                     act2,
                                     term2,
                                     log=FLAGS.summary,
                                     global_step=self.t)
            self.sess.run(self.proj)
            return loss
コード例 #6
0
ファイル: basic_nets.py プロジェクト: spencer-Y/operatornet
    def train(self, n_epochs, batch_size, dataset, task, verbose=False):
        stats = defaultdict(list)
        train_data = dataset['train']
        batches_for_epoch = train_data.n_examples / batch_size
        for _ in range(n_epochs):
            is_training(True, session=self.sess)
            for _ in range(batches_for_epoch):
                batch_d, batch_l, _ = train_data.next_batch(batch_size)
                feed_dict = {self.feed_pl: batch_d, self.labels_pl: batch_l}
                self.sess.run([self.opt_step], feed_dict=feed_dict)
            epoch = self.sess.run(self.epoch.assign_add(tf.constant(1.0)))
            is_training(False, session=self.sess)

            if verbose:
                print epoch,
            for s in ['train', 'test', 'val']:
                feed_dict = {
                    self.feed_pl: dataset[s].feed,
                    self.labels_pl: dataset[s].labels
                }
                if task == 'regression':
                    r = self.sess.run([self.loss], feed_dict=feed_dict)
                else:
                    r = self.sess.run([self.avg_accuracy], feed_dict=feed_dict)
                stats[s].append(r)
                if verbose:
                    print r,
            if verbose:
                print

        return stats
コード例 #7
0
def train_one_iteration(sess, list_dims, list_placeholders, list_operations, X_tr, Y_tr, opts):
    """
    Basically, run one iteration of the training.
    INPUTS:
    - sess: (tf session) the session to run everything on
    - list_dim: (list of ints) list of dimensions
    - list_placeholders: (list of tensors) list of the placeholders for feed_dict
    - list_operations: (list of tensors) list of operations for graph access
    - X_tr: (list of strings) list of training sample names
    - Y_tr: (list of ints) list of lables for training samples
    - opts: (parsed arguments)
    """
    # Let's unpack the lists.
    matrix_size, num_channels = list_dims
    x, y, keep_prob = list_placeholders
    prob, pred, saver, L2_loss, CE_loss, cost, optimizer, accuracy, init = list_operations
    # Initializing what to put in.
    dataXX = np.zeros((opts.bs, matrix_size, matrix_size, num_channels), dtype=np.float32)
    dataYY = np.zeros((opts.bs, ), dtype=np.int64)
    ind_list = np.random.choice(range(len(X_tr)), opts.bs, replace=False)
    # Fill in our dataXX and dataYY for training one batch.
    for iter_data,ind in enumerate(ind_list):
        dataXX[iter_data, :, :, 0] = read_in_one_image(opts.path_data, X_tr[ind], matrix_size, data_aug=False)
        dataYY[iter_data] = Y_tr[ind]
    tflearn.is_training(True)
    _, loss_iter, acc_iter = sess.run((optimizer, cost, accuracy), feed_dict={x: dataXX, y: dataYY, keep_prob: opts.dropout})
    return (loss_iter, acc_iter)
コード例 #8
0
def encode (X):
    if len (X.shape) < 2:
        X = X.reshape (1, -1)

    tflearn.is_training (False, autoencoder.session)
    res = autoencoder.session.run (HIDDEN_STATE, feed_dict={INPUT.name:X})
    return res
コード例 #9
0
def train_NN(train_X,
             train_y,
             n_epochs=config.no_of_epochs,
             continue_work=False,
             n_layers=1,
             n_nodes=(1024, )):
    tf.reset_default_graph()
    model = create_model(no_of_layers=n_layers, num_of_nodes=n_nodes)

    i = 0
    iterator_batch = 0
    if continue_work:
        model, iterator_batch = load_batch(model)

    tflearn.init_graph(seed=1995, gpu_memory_fraction=1)
    with tf.Session() as sess:
        tflearn.is_training(True, sess)
    for train_batch_X, train_batch_y in pickle_lazy_loading(train_X,
                                                            train_y,
                                                            i=iterator_batch):
        print("training batch:", i)
        start_time__ = time.time()
        model.fit(train_batch_X,
                  train_batch_y,
                  n_epoch=n_epochs,
                  shuffle=True,
                  snapshot_step=100,
                  show_metric=True)
        print("batch", i, "trained in", time.time() - start_time__, "s")
        i += 1
        save_batch(model, i)

    remove_batch()

    return model
コード例 #10
0
ファイル: ispen.py プロジェクト: ggand0/WordAlgebra
    def train_supervised_batch(self, xbatch, ybatch, verbose=0):
        tflearn.is_training(True, self.sess)
        yt_ind = self.var_to_indicator(ybatch)
        yt_ind = np.reshape(
            yt_ind, (-1, self.config.output_num * self.config.dimension))
        #xd, yd, yp_ind = self.get_all_diff(xinput=xbatch, yinput=yt_ind, ascent=True, inf_iter=10)
        yp_ind = self.loss_augmented_soft_predict(xinput=xbatch,
                                                  yinput=yt_ind,
                                                  train=True,
                                                  ascent=True)
        yp_ind = np.reshape(
            yp_ind, (-1, self.config.output_num * self.config.dimension))
        #yt_ind = np.reshape(yd, (-1, self.config.output_num*self.config.dimension))

        feeddic = {
            self.x: xbatch,
            self.yp_ind: yp_ind,
            self.yt_ind: yt_ind,
            self.learning_rate_ph: self.config.learning_rate,
            self.margin_weight_ph: self.config.margin_weight,
            self.inf_penalty_weight_ph: self.config.inf_penalty,
            self.dropout_ph: self.config.dropout
        }

        _, o, ce, n, en_yt, en_yhat = self.sess.run([
            self.train_step, self.objective, self.ce, self.num_update,
            self.total_energy_yt, self.total_energy_yp
        ],
                                                    feed_dict=feeddic)
        if verbose > 0:
            print(self.train_iter, o, n, en_yt, en_yhat)
        return n
コード例 #11
0
 def train(self, x, y, last_w, setw):
     # print("<><><><><>", x, "<><><><><>")
     # print("<><><><><>", y, "<><><><><>")
     # print("<><><><><>", last_w, "<><><><><>")
     # print("<><><><><>", setw, "<><><><><>")
     tflearn.is_training(True, self.__net.session)
     self.evaluate_tensors(x, y, last_w, setw, [self.__train_operation])
コード例 #12
0
    def log_between_steps(self, step):
        tflearn.is_training(False, self._agent.session)

        # v_pv, v_log_mean, v_loss, log_mean_free, weights, tracking_error, excess_return, sharpe_ratio, information_ratio, tracking_ratio= \
        v_pv, v_log_mean, v_loss, log_mean_free, weights, tracking_error, excess_return, sharpe_ratio, information_ratio =\
            self._evaluate("training",
                           self._agent.portfolio_value,
                           self._agent.log_mean,
                           self._agent.loss,
                           self._agent.log_mean_free,
                           self._agent.portfolio_weights,
                           self._agent.tracking_error,
                           self._agent.excess_return,
                           self._agent.sharp_ratio,
                           self._agent.information_ratio,
                           # self._agent.tracking_ratio
                           )

        loss_value = self._evaluate("training",self._agent.loss)

        print('='*30)
        print('step %d' % step)
        print('-'*30)
        print('the portfolio value on training set is %s\nlog_mean is %s\n'
                     'loss_value is %3f\nlog mean without commission fee is %3f\ntracking error is %3f\n'
                     'excess_return is %3f\nsharpe_ratio is %3f\ninformation_ratio is %3f\n'% \
                     (v_pv, v_log_mean, v_loss, log_mean_free, tracking_error, excess_return, sharpe_ratio, information_ratio))
        # print('tracking_ratio is '+str(tracking_ratio))
        print('='*30+"\n")
コード例 #13
0
ファイル: spen.py プロジェクト: liangmuxin/dsbox-spen
  def train_supervised_value_batch(self, xbatch, ybatch, verbose=0):
    tflearn.is_training(True, self.sess)
    if self.config.dimension > 1:
      yt_ind = self.var_to_indicator(ybatch)
      yt_ind = np.reshape(yt_ind, (-1, self.config.output_num * self.config.dimension))
    else:
      yt_ind = ybatch

    yp_init = np.random.normal(0, 1, size=(np.shape(xbatch)[0], self.config.dimension * self.config.output_num))
    feeddic = {self.x: xbatch, self.yt_ind: yt_ind,
               self.yp_ind: yp_init,
               self.learning_rate_ph: self.config.learning_rate,
               self.inf_penalty_weight_ph: self.config.inf_penalty,
               self.dropout_ph: self.config.dropout}

    _, o, ind_ar, en_ar, g_ar, l_ar, v_ar, ld_ar = self.sess.run([self.train_step, self.objective,self.ind_ar, self.en_ar, self.g_ar, self.l_ar, self.v_ar, self.ld_ar ], feed_dict=feeddic)

    if verbose > 0:
      print("----------------------------------------------------------")
      if verbose > 1:
        print((v_ar[-1]))
      for i in range(int(self.config.inf_iter)):
        print((g_ar[i],  ind_ar[i], np.average(en_ar[i]), l_ar[i], np.average(v_ar[i]), ld_ar[i]))

    return o
コード例 #14
0
def get_resnet18_score(images_iter, model_path, batch_size=100, split=10):
    tf.reset_default_graph()

    incoming = tf.placeholder(tf.float32, shape=[None, 64, 64, 3], name="input")
    logits = classifier_forward(None, incoming, name="classifier")
    probs = tf.nn.softmax(logits)
    saver = tf.train.Saver([var for var in tf.global_variables()
                            if var.name.startswith("classifier")
                            and not var.name.endswith("is_training:0")])

    preds, scores = [], []

    sess = tf.Session()

    sess.run(tf.global_variables_initializer())
    tflearn.is_training(False, sess)
    saver.restore(sess, model_path)

    for images in next_batch(images_iter, batch_size):
        pred = sess.run(probs, feed_dict={incoming: images})
        preds.append(pred)

        # print(images)

    sess.close()

    preds = np.concatenate(preds, 0)

    for i in xrange(split):
        part = preds[i * len(preds) // split: (i + 1) * len(preds) // split]
        kl = part * (np.log(np.maximum(part, 1e-12)) - np.log(np.expand_dims(np.mean(part, 0), 0)))
        kl = np.mean(np.sum(kl, 1))
        scores.append(np.exp(kl))

    return np.mean(scores), np.std(scores)
コード例 #15
0
ファイル: infogan.py プロジェクト: nmiculinic/du-fer
    def train_loop(self, summary_every=20, save_every=1000):
        step = self.get_global_step()
        with self.graph.as_default():
            tflearn.is_training(True, session=self.sess)
        X, _ = mnist.train.next_batch(self.sess.run(self.batch_size))
        fd = {
            self.X: X.reshape(-1, 28, 28, 1)
        }
        if step % summary_every == 0:
            _, summ, loss_dis = self.sess.run([self.train_d, self.dis_summ, self.loss_d], feed_dict=fd)
            self.train_writer.add_summary(summ, global_step=step)
            self.sess.run(self.train_gq)
            _, summ, loss_gen, loss_q = self.sess.run([self.train_gq, self.gen_summ, self.loss_g, self.loss_q])
            self.train_writer.add_summary(summ, global_step=step)
            self.logger.info("%4d dis %7.5f, gen %7.5f, q %7.5f", step, loss_dis, loss_gen, loss_q)
            self.train_writer.flush()
        else:
            self.sess.run(self.train_gq)
            self.sess.run(self.train_gq)
            self.sess.run(self.train_d, feed_dict=fd)

        if step % save_every == 0 and step > 0:
            self.save()

        self.sess.run(self.inc_global_step)
コード例 #16
0
ファイル: main.py プロジェクト: ldf921/flownext
def validate_sintel(sess, framework, dataset, path):
    tflearn.is_training(False, session=sess)
    validationImg1, validationImg2, validationFlow = zip(*dataset)
    validationSize = len(dataset)
    batchEpe = []
    c = 0
    for j in tqdm(range(0, validationSize, batchSize)):
        if j + batchSize <= validationSize:
            batchImg1 = [sintel.load(p) for p in validationImg1[j: j + batchSize]]
            batchImg2 = [sintel.load(p) for p in validationImg2[j: j + batchSize]]
            batchFlow = [sintel.load(p) for p in validationFlow[j: j + batchSize]]

        batchEpe.append(sess.run(framework.epe, framework.feed_dict(
            img1=batchImg1,
            img2=batchImg2,
            flow=batchFlow
        )))

        if (j // batchSize) % 5 == 0:
            batchPred = sess.run(framework.flow, framework.feed_dict(
                img1=batchImg1,
                img2=batchImg2,
                flow=batchFlow
            ))
            batchImg1 = batchImg1[::4]
            batchImg2 = batchImg2[::4]
            batchFlow = batchFlow[::4]
            batchPred = batchPred[::4]
            visualization.plot(batchImg1, batchImg2, batchFlow, batchPred, path, c)
            c += len(batchImg1)
    mean_epe = np.mean(batchEpe)
    return float(mean_epe)
コード例 #17
0
ファイル: hspen.py プロジェクト: ggand0/WordAlgebra
  def inference(self, xd, yt=None, inf_iter = None, train=True, ascent=True,
                initialization = InfInit.Random_Initialization):
    """
      ARGS:
        xd: Input Tensor
        yt: Ground Truth Output

      RETURNS:
        An array of Tensor of shape (-1, output_num, dimension)

    """
    if inf_iter is None:
      inf_iter = self.config.inf_iter
    tflearn.is_training(is_training=train, session=self.sess)
    bs = np.shape(xd)[0]

    if initialization == InfInit.Random_Initialization:
      hd = np.random.uniform(0,1.0, (bs, self.config.hidden_num))
    else:
      raise NotImplementedError("Other initialization methods are not supported.")

    i=0
    h_a = []
    while i < inf_iter:
      g = self.sess.run(self.inf_gradient, feed_dict={self.x:xd, self.h:hd, self.dropout_ph: self.config.dropout})
      #print (g), self.config.inf_rate, self.config

      if ascent:
        hd = hd + self.config.inf_rate * g
      else:
        hd = hd - self.config.inf_rate * g
      h_a.append(hd)
      i += 1

    return np.array(h_a)
コード例 #18
0
def train(dataset, metadata_path, w2v, n_classes):
    print("Configuring Tensorflow Graph")
    with tf.Graph().as_default():

        sess, ner_model = initialize_tf_graph(metadata_path, w2v, n_classes)

        min_validation_loss = float("inf")
        avg_val_loss = 0.0
        prev_epoch = 0
        tflearn.is_training(True, session=sess)
        while dataset.train.epochs_completed < FLAGS.num_epochs:
            train_batch = dataset.train.next_batch(batch_size=FLAGS.batch_size,
                        pad=ner_model.args["sequence_length"], one_hot=True)
            pred, loss, step, acc = ner_model.train_step(sess,
                                train_batch.sentences, train_batch.ner1,
                                    train_batch.lengths, dataset.train.epochs_completed)

            if step % FLAGS.evaluate_every == 0:
                avg_val_loss, avg_val_acc, _ = evaluate(sess=sess,
                             dataset=dataset.validation, model=ner_model,
                                max_dev_itr=FLAGS.max_dev_itr, mode='val',
                                    step=step)

            if step % FLAGS.checkpoint_every == 0:
                min_validation_loss = maybe_save_checkpoint(sess,
                    min_validation_loss, avg_val_loss, step, ner_model)

            if dataset.train.epochs_completed != prev_epoch:
                prev_epoch = dataset.train.epochs_completed
                avg_test_loss, avg_test_acc, _ = evaluate(
                            sess=sess, dataset=dataset.test, model=ner_model,
                            max_dev_itr=0, mode='test', step=step)
                min_validation_loss = maybe_save_checkpoint(sess,
                            min_validation_loss, avg_val_loss, step, ner_model)
コード例 #19
0
ファイル: cnn.py プロジェクト: jgiers9872/oxnnet
 def test(self, save_dir, test_data, model_file, batch_size, avg=False):
     #with tf.get_default_graph().as_default():
         config = tf.compat.v1.ConfigProto()
         config.gpu_options.allow_growth = True
         model = self.module.Model(batch_size, False) #get_model_with_placeholders(self.module, reuse=False)
         if avg:
             variable_averages = tf.train.ExponentialMovingAverage(0.999)
             variables_to_restore = variable_averages.variables_to_restore()
             saver = tf.compat.v1.train.Saver(variables_to_restore)
         else:
             saver = tf.compat.v1.train.Saver()
         with tf.compat.v1.Session() as sess:
             sess.run(tf.compat.v1.global_variables_initializer())
             saver.restore(sess, model_file)
             tflearn.is_training(False)
             with tf.compat.v1.variable_scope("inference") as scope:
                 scope.reuse_variables()
             scope.reuse_variables()
             print(sess.run(tflearn.get_training_mode()))
             dices = {}
             if not os.path.exists(save_dir):
                 os.makedirs(save_dir)
             for tup in test_data:
                 inferer = model.build_full_inferer()
                 dice = inferer(sess, tup, save_dir, model)
                 dices[tup[0]] = dice
                 print("Median: {}, Max: {}, Min: {}"
                       .format(np.median(list(dices.values()), axis=0),
                               np.max(list(dices.values()), axis=0),
                               np.min(list(dices.values()), axis=0)))
             print(dices)
             return(dices)
             sess.close()
コード例 #20
0
ファイル: trainer.py プロジェクト: nicklag87/tflearn
def evaluate(session, op_to_evaluate, feed_dict, batch_size):
        """ evaluate.

        Evaluate an operation with provided data dict using a batch size
        to save GPU memory.

        Args:
            session: `tf.Session`. Session for running operations.
            op_to_evaluate: `tf.Op`. Operation to be evaluated.
            feed_dict: `dict`. Data dictionary to feed op_to_evaluate.
            batch_size: `int`. Batch size to be used for evaluation.

        Ret:
            `float`. op_to_evaluate mean over all batches.

        """
        tflearn.is_training(False, session)
        n_test_samples = len(get_dict_first_element(feed_dict))
        batches = make_batches(n_test_samples, batch_size)
        index_array = np.arange(n_test_samples)
        avg = 0.0
        for i, (batch_start, batch_end) in enumerate(batches):
            batch_ids = index_array[batch_start:batch_end]
            feed_batch = {}
            for key in feed_dict:
                # Make batch for multi-dimensional data
                if np.ndim(feed_dict[key]) > 0:
                    feed_batch[key] = slice_array(feed_dict[key], batch_ids)
                else:
                    feed_batch[key] = feed_dict[key]
            avg += session.run(op_to_evaluate, feed_batch) / len(batches)
        return avg
コード例 #21
0
ファイル: evaluator.py プロジェクト: zt706/tflearn
    def evaluate(self, feed_dict, ops, batch_size=128):
        """ Evaluate.

        Evaluate a list of tensors over a whole dataset. It is used to compute
        a metric mean score over an entire dataset.

        Arguments:
            feed_dict: `dict`. The feed dictionary of data.
            ops: list of `Tensors`. The tensors to evaluate.
            batch_size: `int`. A batch size.

        Returns:
            The mean average result per tensor over the entire dataset.

        """
        tflearn.is_training(False, self.session)
        coord = tf.train.Coordinator()
        inputs = tf.get_collection(tf.GraphKeys.INPUTS)
        # Data Preprocessing
        dprep_dict = []
        dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
        for i in range(len(inputs)):
            # Support for custom inputs not using dprep/daug
            if len(dprep_collection) > i:
                if dprep_collection[i] is not None:
                    dprep_dict[inputs[i]] = dprep_collection[i]
        # Data Flow
        df = data_flow.FeedDictFlow(feed_dict, coord,
                                    batch_size=batch_size,
                                    dprep_dict=dprep_dict,
                                    daug_dict=None,
                                    index_array=None,
                                    num_threads=1)

        return evaluate_flow(self.session, ops, df)
コード例 #22
0
ファイル: infogan.py プロジェクト: nmiculinic/du-fer
    def jupyter_sample_widgets(self, rows=4, cols=6, osize=28 * 4):
        from ipywidgets import interact

        with self.graph.as_default():
            tflearn.is_training(False, session=self.sess)

        def f(**kwargs):
            c_bernulli = np.array([
                kwargs["b%d" % i] for i in range(self.n_bernulli)
            ])
            c_gauss = np.array([
                kwargs["g%d" % i] for i in range(self.n_gauss)
            ])

            pics = self.sess.run(self.g, feed_dict={
                self.c_gauss: np.tile(c_gauss, (rows * cols, 1)),
                self.c_bernulli: np.tile(c_bernulli, (rows * cols, 1)),
                self.batch_size: rows * cols
            })
            pics = pics.reshape(rows, cols, 28, 28)
            return self.__img_from_data(pics, osize)

        return interact(
            f,
            **{("b%d" % i): True for i in range(self.n_bernulli)},
            **{("g%d" % i): (-3.0, 3.0) for i in range(self.n_gauss)},
            __manual=True
        )
コード例 #23
0
ファイル: icnn.py プロジェクト: zhang9song/icnn
    def act(self, test=False):
        with self.sess.as_default():
            print('--- Selecting action, test={}'.format(test))
            obs = np.expand_dims(self.observation, axis=0)

            if FLAGS.icnn_opt == 'adam':
                f = self._fg_entr
                # f = self._fg
            elif FLAGS.icnn_opt == 'bundle_entropy':
                f = self._fg
            else:
                raise RuntimeError("Unrecognized ICNN optimizer: " +
                                   FLAGS.icnn_opt)

            tflearn.is_training(False)
            action = self.opt(f, obs)
            tflearn.is_training(not test)

            if not test:
                self.noise -= FLAGS.outheta*self.noise - \
                              FLAGS.ousigma*npr.randn(self.dimA)
                action += self.noise
            action = np.clip(action, -1, 1)

            self.action = np.atleast_1d(np.squeeze(action, axis=0))
            return self.action
コード例 #24
0
    def _full_validation(self, sess):
        tflearn.is_training(True, session=sess)
        num_batches_vali = FLAGS.num_val_images // FLAGS.batch_size

        loss_list = []
        ad_loss_list = []
        accuracy_list = []

        for step_vali in range(num_batches_vali):
            _, _, loss, ad_loss, accuracy = sess.run([
                self.batch_data, self.batch_labels, self.loss, self.ad_loss,
                self.accuracy
            ],
                                                     feed_dict={
                                                         self.am_training:
                                                         False,
                                                         self.prob_fc: 1,
                                                         self.prob_conv: 1
                                                     })
            #feed_dict={self.am_training: False, self.prob_fc: FLAGS.keep_prob_fc, self.prob_conv: 1})

            loss_list.append(loss)
            accuracy_list.append(accuracy)
            ad_loss_list.append(ad_loss)

        vali_loss_value = np.mean(np.array(loss_list))
        vali_accuracy_value = np.mean(np.array(accuracy_list))
        vali_ad_loss_value = np.mean(np.array(ad_loss_list))
        return vali_loss_value, vali_accuracy_value, vali_ad_loss_value
コード例 #25
0
def get_quality_score(sess,
                      incoming,
                      probs,
                      image_iter,
                      batch_size=100,
                      split=10):
    preds, scores = [], []

    tflearn.is_training(False, sess)

    for images in next_batch(image_iter, batch_size):
        pred = sess.run(probs, feed_dict={incoming: images})
        preds.append(pred)

    sess.close()
    preds = np.concatenate(preds, 0)

    for i in xrange(split):
        part = preds[i * len(preds) // split:(i + 1) * len(preds) // split]
        p = np.concatenate([part, 1.0 - part], axis=1)
        q = np.full(p.shape, 0.5, np.float64)
        s = 0.5 * (p + q)
        ent = 0.5 * (entropy(np.transpose(p), np.transpose(s), base=2) +
                     entropy(np.transpose(q), np.transpose(s), base=2))
        kl = np.mean(ent)
        scores.append(kl)

    return np.mean(scores), np.std(scores)
コード例 #26
0
ファイル: evaluator.py プロジェクト: 290390573/tflearn
    def predict(self, feed_dict):
        """ predict.

        Run data through each tensor's network, and return prediction value.

        Arguments:
            feed_dict: `dict`. Feed data dictionary, with placeholders as
                keys, and data as values.

        Returns:
            An `array`. In case of multiple tensors to predict, array is a
            concatanation of each tensor prediction result.

        """
        with self.graph.as_default():
            tflearn.is_training(False, self.session)
            prediction = []
            for output in self.tensors:
                o_pred = self.session.run(output, feed_dict=feed_dict).tolist()
                for i, val in enumerate(o_pred): # Reshape pred per sample
                    if len(self.tensors) > 1:
                        if not len(prediction) > i: prediction.append([])
                        prediction[i].append(val)
                    else:
                        prediction.append(val)
            return prediction
コード例 #27
0
ファイル: spen.py プロジェクト: liangmuxin/dsbox-spen
  def train_supervised_batch(self, xbatch, ybatch, verbose=0):
    tflearn.is_training(True, self.sess)

    if self.config.dimension > 1:
      yt_ind = self.var_to_indicator(ybatch)
      yt_ind = np.reshape(yt_ind, (-1, self.config.output_num*self.config.dimension))
    else:
      yt_ind = ybatch

    yp_ind = self.loss_augmented_soft_predict(xinput=xbatch, yinput=yt_ind, train=True, inf_iter=self.config.inf_iter, ascent=True, loss_aug=True)
    yp = np.argmax(yp_ind,-1)
    yp_ind = self.var_to_indicator(yp)
    #yp_ind = self.soft_predict(xinput=xbatch, train=True, ascent=True, inf_iter=self.config.inf_iter)

    yp_ind = np.reshape(yp_ind, (-1, self.config.output_num*self.config.dimension))
    if verbose > 1:
      print((yp_ind[0]))

    feeddic = {self.x:xbatch, self.yp: yp_ind, self.yt: yt_ind,
               self.learning_rate_ph:self.config.learning_rate,
               self.margin_weight_ph: self.config.margin_weight,
               self.dropout_ph: self.config.dropout}

    _, o,ce, n, en_yt, en_yhat = self.sess.run([self.train_step, self.objective, self.ce, self.num_update, self.total_energy_yt, self.total_energy_yp], feed_dict=feeddic)
    if verbose > 0:
      print((self.train_iter ,o,n, en_yt, en_yhat, np.average(ce)))
    return o
コード例 #28
0
    def _full_validation(self, sess):
        tflearn.is_training(True)
        num_batches_vali = FLAGS.num_eval_images // FLAGS.test_batch_size

        loss_list = []
        accuracy_list = []

        start_time = time.time()

        for step_vali in range(num_batches_vali):
            vali_batch_data, vali_batch_labels = self._generate_batch(
                self.vali_data,
                self.vali_label,
                FLAGS.train_batch_size,
                step_vali,
                train=False)

            loss, accuracy = sess.run(
                [self.loss, self.accuracy],
                feed_dict={
                    self.batch_data: vali_batch_data,
                    self.batch_labels: vali_batch_labels
                })

            loss_list.append(loss)
            accuracy_list.append(accuracy)

        duration = time.time() - start_time

        vali_loss_value = np.mean(np.array(loss_list))
        vali_accuracy_value = np.mean(np.array(accuracy_list))

        return vali_loss_value, vali_accuracy_value
コード例 #29
0
def run_task_eval(config, eval_data_loader, classifier_forward, model_dir):
    print("building graph...")

    classifier_inputs = tf.placeholder(tf.float32,
                                       shape=[None] + eval_data_loader.shape(),
                                       name="input")
    classifier_label_inputs = tf.placeholder(
        tf.int32, shape=[None, eval_data_loader.classes()], name="labels")
    classifier_logits = classifier_forward(config,
                                           classifier_inputs,
                                           name="classifier")
    classifier_variables = [
        var for var in tf.all_variables() if var.name.startswith("classifier")
        and not var.name.endswith("is_training:0")
    ]

    # global_step = tf.Variable(0, False)
    # classifier_loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=classifier_label_inputs,
    #                                                                      logits=classifier_logits))
    classifier_labels = tf.cast(
        tf.argmax(tf.nn.softmax(classifier_logits), axis=-1), tf.int32)
    classifier_accuracy = tf.reduce_mean(
        tf.cast(
            tf.equal(
                classifier_labels,
                tf.cast(tf.argmax(classifier_label_inputs, axis=1), tf.int32)),
            tf.float32))

    saver_classifier = tf.train.Saver(classifier_variables, max_to_keep=10)
    print("graph built.")

    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    print("loading classifier weights from %s..." % model_dir)
    saver_classifier.restore(sess, tf.train.latest_checkpoint(model_dir))
    print("weights loaded.")

    total_step = 0
    eval_accuracies = []
    for epoch in xrange(1):
        num_steps = eval_data_loader.num_steps(config.batch_size)
        bar = trange(num_steps, leave=False)
        for step in bar:
            eval_images, eval_labels = eval_data_loader.next_batch(
                config.batch_size)

            tflearn.is_training(False, sess)
            eval_accuracy = sess.run(classifier_accuracy,
                                     feed_dict={
                                         classifier_inputs:
                                         eval_images,
                                         classifier_label_inputs:
                                         eval_labels.astype(np.int32)
                                     })
            eval_accuracies.append(eval_accuracy)
            total_step += 1

    sess.close()
    return np.mean(eval_accuracies)
コード例 #30
0
def train(dataset, metadata_path, w2v):
    print("Configuring Tensorflow Graph")
    with tf.Graph().as_default():

        sess, siamese_model = initialize_tf_graph(metadata_path, w2v)

        print('Opening the datasets')
        dataset.train.open()
        dataset.validation.open()
        dataset.test.open()

        min_validation_loss = float("inf")
        avg_val_loss = 0.0
        prev_epoch = 0
        tflearn.is_training(True, session=sess)
        while dataset.train.epochs_completed < FLAGS.num_epochs:
            train_batch = dataset.train.next_batch(batch_size=FLAGS.batch_size,
                                                   pad=0)

            sents_batch = datasets.merge_sentences(
                train_batch, 2 * siamese_model.args["sequence_length"] + 1,
                FLAGS.batch_size)

            pco, mse, loss, step = siamese_model.train_step(
                sess, sents_batch, train_batch.sim,
                dataset.train.epochs_completed)

            if step % FLAGS.evaluate_every == 0:
                avg_val_loss, avg_val_pco, _ = evaluate(
                    sess=sess,
                    dataset=dataset.validation,
                    model=siamese_model,
                    max_dev_itr=FLAGS.max_dev_itr,
                    mode='val',
                    step=step)

            if step % FLAGS.checkpoint_every == 0:
                validation_loss = maybe_save_checkpoint(
                    sess, min_validation_loss, avg_val_loss, step,
                    siamese_model)
                if validation_loss is not None:
                    min_validation_loss = validation_loss

            if dataset.train.epochs_completed != prev_epoch:
                prev_epoch = dataset.train.epochs_completed
                avg_test_loss, avg_test_pco, _ = evaluate(sess=sess,
                                                          dataset=dataset.test,
                                                          model=siamese_model,
                                                          max_dev_itr=0,
                                                          mode='test',
                                                          step=step)
                min_test_loss = maybe_save_checkpoint(sess,
                                                      min_validation_loss,
                                                      avg_val_loss, step,
                                                      siamese_model)

        dataset.train.close()
        dataset.validation.close()
        dataset.test.close()
コード例 #31
0
def evaluate(sess,
             dataset,
             model,
             step,
             max_dev_itr=100,
             verbose=True,
             mode='val'):

    samples_path, history_path = None, None
    results_dir = model.val_results_dir if mode == 'val'\
                                        else model.test_results_dir
    samples_path = os.path.join(results_dir,
                                '{}_samples_{}.txt'.format(mode, step))
    history_path = os.path.join(results_dir, '{}_history.txt'.format(mode))

    avg_val_loss, avg_val_pco = 0.0, 0.0
    print("Running Evaluation {}:".format(mode))
    tflearn.is_training(False, session=sess)

    # This is needed to reset the local variables initialized by
    # TF for calculating streaming Pearson Correlation and MSE
    sess.run(tf.local_variables_initializer())
    all_dev_x1, all_dev_x2, all_dev_sims, all_dev_gt = [], [], [], []
    dev_itr = 0
    while (dev_itr < max_dev_itr and max_dev_itr != 0) \
                                    or mode in ['test', 'train']:
        val_batch = dataset.next_batch(FLAGS.batch_size, pad=0)

        sents_batch = datasets.merge_sentences(
            val_batch, 2 * model.args["sequence_length"] + 1, FLAGS.batch_size)

        val_loss, val_pco, val_mse, val_sim = \
            model.evaluate_step(sess, sents_batch, val_batch.sim)
        avg_val_loss += val_mse
        avg_val_pco += val_pco[0]
        all_dev_x1 += id2seq(val_batch.s1, dataset.vocab_i2w)
        all_dev_x2 += id2seq(val_batch.s2, dataset.vocab_i2w)
        all_dev_sims += val_sim.tolist()
        all_dev_gt += val_batch.sim
        dev_itr += 1

        if mode == 'test' and dataset.epochs_completed == 1: break
        if mode == 'train' and dataset.epochs_completed == 1: break

    result_set = (all_dev_x1, all_dev_x2, all_dev_sims, all_dev_gt)
    avg_loss = avg_val_loss / dev_itr
    avg_pco = avg_val_pco / dev_itr
    if verbose:
        print("{}:\t Loss: {}\tPco{}".format(mode, avg_loss, avg_pco))

    with open(samples_path, 'w') as sf, open(history_path, 'a') as hf:
        for x1, x2, sim, gt in zip(all_dev_x1, all_dev_x2, all_dev_sims,
                                   all_dev_gt):
            sf.write('{}\t{}\t{}\t{}\n'.format(x1, x2, sim, gt))
        hf.write('STEP:{}\tTIME:{}\tPCO:{}\tMSE\t{}\n'.format(
            step,
            datetime.datetime.now().isoformat(), avg_pco, avg_loss))
    tflearn.is_training(True, session=sess)
    return avg_loss, avg_pco, result_set
コード例 #32
0
    def __init__(self, env, config):
        self._sess = tf.Session()
        self._env = env

        self._dqn = DQNetwork(self._sess, env.dim_state, env.dim_action,
                              config.lr)
        #       self._doubledqn = DoubleDQNetwork(self._sess, env.dim_state, env.dim_action, config.lr)

        self._dir_mod_full = '{0}/{1}-dqn'.format(config.dir_mod,
                                                  config.run_id)
        dir_sum_full = '{0}/{1}-dqn'.format(config.dir_sum, config.run_id)
        self._dir_log_full = '{0}/{1}-{2}.log'.format(config.dir_log,
                                                      config.run_id, 'dqn')

        self._summer = Summary(self._sess)
        self._summer.add_writer(dir_sum_full, name="dqn")
        #     self._summer.add_writer(dir_sum_full, name="doubledqn")
        self._summer.add_writer(dir_sum_full + '-max', name="max")
        self._summer.add_writer(dir_sum_full + '-min', name="min")
        self._summer.add_writer(dir_sum_full + '-rnd', name="rnd")
        self._summer.add_variable(name='ep-sum-reward')
        self._summer.add_variable(name='ep-mean-power')
        self._summer.add_variable(name='ep-loss')
        self._summer.add_variable(name='ep-rrh')
        self._summer.build()

        self._f_out = open(self._dir_log_full, 'w')
        self._store_args(config, self._f_out)

        self._replay_buffer = ReplayBuffer(config.buffer_size)
        self._explorer = Explorer(config.epsilon_init, config.epsilon_final,
                                  config.epsilon_steps)

        self._saver = tf.train.Saver(max_to_keep=5)

        self._train_flag = not config.load_id
        self._max_test_episodes = config.tests

        if config.load_id:
            self._load(config.dir_mod, config.load_id)
        else:
            self._sess.run(tf.global_variables_initializer())

        tflearn.is_training(self._train_flag, session=self._sess)

        self._OBVS = config.observations
        self._BATCH = config.mini_batch
        self._GAMMA = config.gamma

        self._max_episodes = config.episodes
        self._max_ep_sts = config.epochs
        self._max_steps = config.update

        self._ep = 0
        self._st = 0
        self._save_ep = config.save_ep
        self.reset_log()
コード例 #33
0
ファイル: nnagent.py プロジェクト: harisindhu/PGPortfolio
 def decide_by_history(self, history, last_w):
     assert isinstance(history, np.ndarray),\
         "the history should be a numpy array, not %s" % type(history)
     assert not np.any(np.isnan(last_w))
     assert not np.any(np.isnan(history))
     tflearn.is_training(False, self.session)
     history = history[np.newaxis, :, :, :]
     return np.squeeze(self.session.run(self.__net.output, feed_dict={self.__net.input_tensor: history,
                                                                      self.__net.previous_w: last_w[np.newaxis, 1:],
                                                                      self.__net.input_num: 1}))
コード例 #34
0
ファイル: trainer.py プロジェクト: tflearn/tflearn
    def _train_batch(self, feed_dict):
        """ _train_batch.

        Train on a single batch.

        Arguments:
            feed_dict: `dict`. The data dictionary to feed.

        """
        tflearn.is_training(True, session=self.session)
        _, loss, _ = self.session.run([self.train, self.loss, self.summ_op],
                                      feed_dict=feed_dict)
        tflearn.is_training(False, session=self.session)
        return loss
コード例 #35
0
    def __rolling_logging(self):
        fast_train = self.train_config["fast_train"]
        if not fast_train:
            tflearn.is_training(False, self._agent.session)

            v_pv, v_log_mean = self._evaluate("validation",
                                              self._agent.portfolio_value,
                                              self._agent.log_mean)
            t_pv, t_log_mean = self._evaluate("test", self._agent.portfolio_value, self._agent.log_mean)
            loss_value = self._evaluate("training", self._agent.loss)

            logging.info('training loss is %s\n' % loss_value)
            logging.info('the portfolio value on validation asset is %s\nlog_mean is %s\n' %
                         (v_pv,v_log_mean))
            logging.info('the portfolio value on test asset is %s\n mean is %s' % (t_pv,t_log_mean))
コード例 #36
0
ファイル: trainer.py プロジェクト: krishperumal/tflearn
def evaluate_flow(session, ops_to_evaluate, dataflow):
        if not isinstance(ops_to_evaluate, list):
            ops_to_evaluate = [ops_to_evaluate]
        tflearn.is_training(False, session)
        dataflow.reset()
        dataflow.start()
        res = [0. for i in ops_to_evaluate]
        feed_batch = dataflow.next()
        n_batches = len(dataflow.batches)
        while feed_batch:
            r = session.run(ops_to_evaluate, feed_batch)
            for i in range(len(r)):
                res[i] += r[i] / n_batches
            feed_batch = dataflow.next()
        return res
コード例 #37
0
ファイル: trainer.py プロジェクト: 21hub/tflearn
def evaluate_flow(session, ops_to_evaluate, dataflow):
        if not isinstance(ops_to_evaluate, list):
            ops_to_evaluate = [ops_to_evaluate]
        tflearn.is_training(False, session)
        dataflow.reset()
        dataflow.start()
        res = [0. for i in ops_to_evaluate]
        feed_batch = dataflow.next()

        while feed_batch:
            r = session.run(ops_to_evaluate, feed_batch)
            current_batch_size = get_current_batch_size(feed_batch, dataflow)
            for i in range(len(r)):
                res[i] += r[i] * current_batch_size
            feed_batch = dataflow.next()
        res = [r / dataflow.n_samples for r in res]
        return res
コード例 #38
0
ファイル: evaluator.py プロジェクト: MLDL/tflearn
    def predict(self, feed_dict):
        """ predict.

        Run data through the provided network and return the result value.

        Arguments:
            feed_dict: `dict`. Feed data dictionary, with placeholders as
                keys, and data as values.

        Returns:
            An `array`. In case of multiple tensors to predict, each tensor's
            prediction result is concatenated.

        """
        with self.graph.as_default():
            # Data Preprocessing
            dprep_dict = dict()
            for i in range(len(self.inputs)):
                # Support for custom inputs not using dprep/daug
                if len(self.dprep_collection) > i:
                    if self.dprep_collection[i] is not None:
                        dprep_dict[self.inputs[i]] = self.dprep_collection[i]
            # Apply pre-processing
            if len(dprep_dict) > 0:
                for k in dprep_dict:
                    feed_dict[k] = dprep_dict[k].apply(feed_dict[k])

            # Prediction for each tensor
            tflearn.is_training(False, self.session)
            prediction = []
            for output in self.tensors:
                o_pred = self.session.run(output, feed_dict=feed_dict).tolist()
                for i, val in enumerate(o_pred): # Reshape pred per sample
                    if len(self.tensors) > 1:
                        if not len(prediction) > i: prediction.append([])
                        prediction[i].append(val)
                    else:
                        prediction.append(val)
            return prediction
コード例 #39
0
    def log_between_steps(self, step):
        fast_train = self.train_config["fast_train"]
        tflearn.is_training(False, self._agent.session)

        summary, v_pv, v_log_mean, v_loss, log_mean_free, weights= \
            self._evaluate("test", self.summary,
                           self._agent.portfolio_value,
                           self._agent.log_mean,
                           self._agent.loss,
                           self._agent.log_mean_free,
                           self._agent.portfolio_weights)
        self.test_writer.add_summary(summary, step)

        if not fast_train:
            summary, loss_value = self._evaluate("training", self.summary, self._agent.loss)
            self.train_writer.add_summary(summary, step)

        # print 'ouput is %s' % out
        logging.info('='*30)
        logging.info('step %d' % step)
        logging.info('-'*30)
        if not fast_train:
            logging.info('training loss is %s\n' % loss_value)
        logging.info('the portfolio value on test set is %s\nlog_mean is %s\n'
                     'loss_value is %3f\nlog mean without commission fee is %3f\n' % \
                     (v_pv, v_log_mean, v_loss, log_mean_free))
        logging.info('='*30+"\n")

        if not self.__snap_shot:
            self._agent.save_model(self.save_path)
        elif v_pv > self.best_metric:
            self.best_metric = v_pv
            logging.info("get better model at %s steps,"
                         " whose test portfolio value is %s" % (step, v_pv))
            if self.save_path:
                self._agent.save_model(self.save_path)
        self.check_abnormal(v_pv, weights)
コード例 #40
0
    def __log_result_csv(self, index, time):
        from pgportfolio.trade import backtest
        dataframe = None
        csv_dir = './train_package/train_summary.csv'
        tflearn.is_training(False, self._agent.session)
        v_pv, v_log_mean, benefit_array, v_log_mean_free =\
            self._evaluate("test",
                           self._agent.portfolio_value,
                           self._agent.log_mean,
                           self._agent.pv_vector,
                           self._agent.log_mean_free)

        backtest = backtest.BackTest(self.config.copy(),
                                     net_dir=None,
                                     agent=self._agent)

        backtest.start_trading()
        result = Result(test_pv=[v_pv],
                        test_log_mean=[v_log_mean],
                        test_log_mean_free=[v_log_mean_free],
                        test_history=[''.join(str(e)+', ' for e in benefit_array)],
                        config=[json.dumps(self.config)],
                        net_dir=[index],
                        backtest_test_pv=[backtest.test_pv],
                        backtest_test_history=[''.join(str(e)+', ' for e in backtest.test_pc_vector)],
                        backtest_test_log_mean=[np.mean(np.log(backtest.test_pc_vector))],
                        training_time=int(time))
        new_data_frame = pd.DataFrame(result._asdict()).set_index("net_dir")
        if os.path.isfile(csv_dir):
            dataframe = pd.read_csv(csv_dir).set_index("net_dir")
            dataframe = dataframe.append(new_data_frame)
        else:
            dataframe = new_data_frame
        if int(index) > 0:
            dataframe.to_csv(csv_dir)
        return result
コード例 #41
0
ファイル: evaluator.py プロジェクト: MLDL/tflearn
    def evaluate(self, feed_dict, ops, batch_size=128):
        """ Evaluate.

        Evaluate a list of tensors over a whole dataset. Generally,
        'ops' argument are average performance metrics (such as average mean,
        top-3, etc...)

        Arguments:
            feed_dict: `dict`. The feed dictionary of data.
            ops: list of `Tensors`. The tensors to evaluate.
            batch_size: `int`. A batch size.

        Returns:
            The mean average result per tensor over all batches.

        """
        tflearn.is_training(False, self.session)
        coord = tf.train.Coordinator()
        inputs = tf.get_collection(tf.GraphKeys.INPUTS)
        # Data Preprocessing
        dprep_dict = []
        dprep_collection = tf.get_collection(tf.GraphKeys.DATA_PREP)
        for i in range(len(inputs)):
            # Support for custom inputs not using dprep/daug
            if len(dprep_collection) > i:
                if dprep_collection[i] is not None:
                    dprep_dict[inputs[i]] = dprep_collection[i]
        # Data Flow
        df = data_flow.FeedDictFlow(feed_dict, coord,
                                    batch_size=batch_size,
                                    dprep_dict=dprep_dict,
                                    daug_dict=None,
                                    index_array=None,
                                    num_threads=1)

        return evaluate_flow(self.session, ops, df)
コード例 #42
0
ファイル: trainer.py プロジェクト: 21hub/tflearn
    def _train(self, training_step, snapshot_epoch, snapshot_step,
               show_metric):
        """ Training process for this optimizer.

        Arguments:
            training_step: `int`. The global step.
            snapshot_epoch: `bool`. If True, snapshot network at each epoch.
            snapshot_step: `int`. If not None, snapshot network given 'step'.
            show_metric: `bool`. If True, display accuracy at every step.

        """
        self.loss_value, self.acc_value = None, None
        self.val_loss, self.val_acc = None, None
        train_summ_str, test_summ_str = None, None
        snapshot = False
        epoch = self.train_dflow.data_status.epoch

        feed_batch = self.train_dflow.next()
        tflearn.is_training(True, session=self.session)
        _, train_summ_str = self.session.run([self.train, self.summ_op],
                                             feed_batch)

        # Retrieve loss value from summary string
        sname = "- Loss/" + self.scope_name
        self.loss_value = summaries.get_value_from_summary_string(
            sname, train_summ_str)

        if show_metric and self.metric is not None:
            # Retrieve accuracy value from summary string
            sname = "- " + self.metric_summ_name + "/" + self.scope_name
            self.acc_value = summaries.get_value_from_summary_string(
                sname, train_summ_str)

        if epoch != self.train_dflow.data_status.epoch:
            if snapshot_epoch:
                snapshot = True

        # Check if step reached snapshot step
        if snapshot_step:
            if training_step % snapshot_step == 0:
                snapshot = True

        # Calculate validation
        if snapshot and self.val_feed_dict:
            tflearn.is_training(False, session=self.session)
            # Evaluation returns the mean over all batches.
            eval_ops = [self.loss]
            if show_metric and self.metric is not None:
                eval_ops.append(self.metric)
            e = evaluate_flow(self.session, eval_ops, self.test_dflow)
            self.val_loss = e[0]
            if show_metric and self.metric is not None:
                self.val_acc = e[1]

            # Set evaluation results to variables, to be summarized.
            if show_metric:
                update_val_op = [tf.assign(self.val_loss_T, self.val_loss),
                                 tf.assign(self.val_acc_T, self.val_acc)]
            else:
                update_val_op = tf.assign(self.val_loss_T, self.val_loss)
            self.session.run(update_val_op)

            # Run summary operation.
            test_summ_str = self.session.run(self.val_summary_op)

        # Write to Tensorboard
        #TODO: Delete?
        n_step = self.training_steps.eval(session=self.session)
        if n_step > 1:
            if train_summ_str:
                self.summary_writer.add_summary(
                    train_summ_str, n_step)
            if test_summ_str:
                self.summary_writer.add_summary(
                    test_summ_str, n_step)

        return snapshot
コード例 #43
0
ファイル: trainer.py プロジェクト: avr248/tflearn
    def _train(self, training_step, snapshot_epoch, snapshot_step,
               show_metric):
        """ Training process for this optimizer.

        Arguments:
            training_step: `int`. The global step.
            snapshot_epoch: `bool`. If True, snapshot network at each epoch.
            snapshot_step: `int`. If not None, snapshot network given 'step'.
            show_metric: `bool`. If True, display accuracy at every step.

        """
        tflearn.is_training(True, self.session)
        self.loss_value, self.acc_value = None, None
        self.val_loss, self.val_acc = None, None
        train_summ_str, test_summ_str = None, None
        snapshot = False

        batch_ids = self.index_array[self.batch_start:self.batch_end]

        feed_batch = {}
        for key in self.feed_dict:
            # Make batch for multi-dimensional data
            if np.ndim(self.feed_dict[key]) > 0:
                feed_batch[key] = slice_array(self.feed_dict[key], batch_ids)
            else:
                feed_batch[key] = self.feed_dict[key]

        tflearn.is_training(True, self.session)
        self.session.run([self.train], feed_batch)

        tflearn.is_training(False, self.session)
        if self.summ_op is not None:
            train_summ_str = self.session.run(self.summ_op, feed_batch)

        # Retrieve loss value from summary string
        sname = "- Loss/" + self.scope_name
        self.loss_value = summaries.get_value_from_summary_string(
            sname, train_summ_str)

        if show_metric and self.metric is not None:
            # Retrieve accuracy value from summary string
            sname = "- " + self.metric_summ_name + "/" + self.scope_name
            self.acc_value = summaries.get_value_from_summary_string(
                sname, train_summ_str)

        # Check if data reached an epoch
        if not self.next_batch():
            if self.shuffle:
                np.random.shuffle(self.index_array)
            batches = make_batches(self.n_train_samples, self.batch_size)
            self.set_batches(batches)
            if snapshot_epoch:
                snapshot = True

        # Check if step reached snapshot step
        if snapshot_step:
            if training_step % snapshot_step == 0:
                snapshot = True

        # Calculate validation
        if snapshot and self.val_feed_dict:
            # Evaluation returns the mean over all batches.
            self.val_loss = evaluate(self.session, self.loss,
                                     self.val_feed_dict,
                                     self.batch_size)
            if show_metric and self.metric is not None:
                self.val_acc = evaluate(self.session, self.metric,
                                        self.val_feed_dict,
                                        self.batch_size)
            # Set evaluation results to variables, to be summarized.
            if show_metric:
                update_val_op = [tf.assign(self.val_loss_T, self.val_loss),
                                 tf.assign(self.val_acc_T, self.val_acc)]
            else:
                update_val_op = tf.assign(self.val_loss_T, self.val_loss)
            self.session.run(update_val_op)

            # Run summary operation.
            test_summ_str = self.session.run(self.val_summary_op,
                                             self.val_feed_dict)

        # Write to Tensorboard
        n_step = self.training_steps.eval(session=self.session)
        if n_step > 1:
            if train_summ_str:
                self.summary_writer.add_summary(
                    train_summ_str, n_step)
            if test_summ_str:
                self.summary_writer.add_summary(
                    test_summ_str, n_step)

        return snapshot
コード例 #44
0
ファイル: nnagent.py プロジェクト: harisindhu/PGPortfolio
 def train(self, x, y, last_w, setw):
     tflearn.is_training(True, self.__net.session)
     self.evaluate_tensors(x, y, last_w, setw, [self.__train_operation])