def _run_epoch(self, sess, xs, labels, is_training):
        loss_batches, size_batches = [], []

        if is_training:
            xs, labels = shuffle_pairs(xs, labels)

        batches_train_xs = batches(xs, self.conf["batch_size"])
        batches_train_labels = batches(labels, self.conf["batch_size"])
        for train_x, train_label in zip(batches_train_xs, batches_train_labels):
            feed_dict = {
                self.tf_x: train_x,
                self.tf_label: train_label,
                self.is_training: is_training
            }
            self.global_step += 1
            if is_training:
                _, loss_val, summary = sess.run([self.optimizer, self.loss, self.merged_summary_op], feed_dict=feed_dict)
                self.summary_writer_train.add_summary(summary, self.global_step)
            else:
                loss_val, summary = sess.run([self.loss, self.merged_summary_op], feed_dict=feed_dict)
                self.summary_writer_dev.add_summary(summary, self.global_step)

            loss_batches.append(loss_val)
            size_batches.append(train_x.shape[0])

        loss = np.average(loss_batches, weights=size_batches)

        return loss
    def predict(self, dataset, save_dir):
        self.scaler = pickle_load(save_dir + self.save_scaler_filename)

        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True

        with tf.Session(config=config) as sess:
            self.saver.restore(sess, save_dir + self.save_filename)

            xs = dataset2arrays(dataset, return_labels=False)
            self.scaler.transform(np.reshape(xs, (-1, 1)))
            batches_xs = batches(xs, self.conf["batch_size"])
            batches_p = []

            for x in batches_xs:
                feed_dict = {
                    self.tf_x: x,
                    self.is_training: False
                }
                p_val = sess.run(self.p, feed_dict=feed_dict)
                p_val = p_val[:, 0]
                batches_p.append(p_val)

        for example, p in zip(dataset, unbatch(batches_p)):
            example["p"] = p
            example["prediction"] = int(p >= 0.5)
Esempio n. 3
0
    def test_batch1(self):
        x = [[1], [2], [3]]
        y = ['a', 'b', 'c']
        x_batches_correct = [([1], [2]), ([3], )]
        y_batches_correct = [('a', 'b'), ('c', )]
        x_batches, y_batches = [], []
        for batch in batches(list(zip(x, y)), batch_size=2, shuffle=False):
            x_batch, y_batch = zip(*batch)
            x_batches.append(x_batch)
            y_batches.append(y_batch)

        self.assertEqual(x_batches, x_batches_correct)
        self.assertEqual(y_batches, y_batches_correct)
Esempio n. 4
0
 def test_batch2(self):
     x = np.array([[1, 2], [3, 4], [5, 6]])
     batches_correct = [[[1, 2], [3, 4]], [[5, 6]]]
     for i, batch in enumerate(batches(x, batch_size=2, shuffle=False)):
         self.assertEqual(batches_correct[i], batch.tolist())