예제 #1
0
    with sess.as_default():
        # load the saved meta graph and restore variables
        saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
        saver.restore(sess, checkpoint_file)

        # Get the placeholders from the graph by name
        X = graph.get_operation_by_name("X").outputs[0]
        dropout_keep_prob = graph.get_operation_by_name(
            "dropout_keep_prob").outputs[0]

        # Tensors we want to evaluate
        predictions = graph.get_operation_by_name(
            "output/predictions").outputs[0]
        #Generate batches for one epoch
        batches = gb.batch_generator(list(x_test),
                                     FLAGS.batch_size,
                                     1,
                                     shuffle=False)

        # Collect the predictions her
        all_predictions = []
        counter = 0
        for x_test_batch in batches:
            print('batch_number: ', counter, end='\r', flush=True)
            batch_predictions = sess.run(predictions, {
                X: x_test_batch,
                dropout_keep_prob: 1.0
            })
            all_predictions = np.concatenate(
                [all_predictions, batch_predictions])
            counter += 1
예제 #2
0
                    mlp.dropout_keep_prob: 1.0,
                }
                step, summaries, loss, accuracy = sess.run(
                    [global_step, dev_summary_op, mlp.loss, mlp.accuracy],
                    feed_dict)
                time_str = datetime.datetime.now().isoformat()
                print("Evaluation: {}: step {}, loss{:g}, acc {:g}".format(
                    time_str, step, loss, accuracy),
                      end='\r',
                      flush=True)
                if writer:
                    writer.add_summary(summaries, step)

            # Generate batches
            batches = gb.batch_generator(list(zip(x_train, y_train)),
                                         FLAGS.train_batch_size,
                                         FLAGS.num_epochs)
            dev_batches = gb.batch_generator(list(zip(x_dev, y_dev)),
                                             FLAGS.train_batch_size,
                                             FLAGS.num_epochs * 10)
            # Training loop. For each batch..

            for batch in batches:
                if len(batch) != FLAGS.train_batch_size:
                    continue
                x_batch, y_batch = zip(*batch)
                train_step(x_batch, y_batch)
                current_step = tf.train.global_step(sess, global_step) - 1
                if current_step % FLAGS.evaluate_every == 0:
                    x_dev_batch, y_dev_batch = zip(*dev_batches.__next__())
                    dev_step(x_dev_batch,