示例#1
0
def run_training(data):
    # TODO: read in data
    training_data = data[0]
    labels = data[1]
    # test_data = 

    with tf.Graph().as_default():
        input_pl, labels_pl = network.placeholder_inputs(BATCH_SIZE)
        logits = network.feedforward(training_data,
                                     NUM_HIDDEN1,
                                     NUM_HIDDEN2)

        loss = network.loss(logits, labels_pl)

        train_op = network.training(loss,ETA)

        eval_correct = network.evaluation(logits, labels_pl)
        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.merge_all_summaries()

        # Create a saver for writing training checkpoints.
        saver = tf.train.Saver()

        # Create a session for running Ops on the Graph.
        sess = tf.Session()

        # Run the Op to initialize the variables.
        init = tf.initialize_all_variables()
        sess.run(init)

        # Instantiate a SummaryWriter to output summaries and the Graph.
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

        for epoch in xrange(EPOCHS):
            start_time = time.time()

            training_size = len(training_data)
            batches = [training_data[k:k+BATCHSIZE] for k in xrange(0, training_size,BATCH_SIZE)]
            for batch in batches:
                feed_dict = fill_dict(training_data,
                                      labels,
                                      input_pl,
                                      labels_pl)
                _, loss_value = sess.run([train_op, loss], feed_dict = feed_dict)
                duration = time.time() - start_time

                # Write summarry after each 10 epochs
                if epoch % 10 == 0:
                    print 'Epoch %d: loss = %.2f (%.3f sec)'%(epoch, loss_value, duration)

                    summary_str = sess.run(summary_op, feed_dict=feed_dict)
                    summary_writer.add_summary(summary_str, epoch)
                    summary_writer.flush()
        print 'Evaluate with the validation set...'
        validate(sess,
                 eval_correct,
                 input_pl,
                 labels_pl,
                 test_data)
示例#2
0
sys.path.append(os.path.abspath(os.path.join(
    os.path.dirname(__file__),
    os.path.pardir,
    'tracker')))
import network

# Load tensorflow
tf.Graph().as_default()
batchSize = 1
delta = 1
imagePlaceholder = tf.placeholder(tf.float32, shape=(batchSize * delta * 2, 227, 227, 3))
labelsPlaceholder = tf.placeholder(tf.float32, shape=(batchSize * delta, 4))
learningRate = tf.placeholder(tf.float32)
tfOutputs = network.inference(imagePlaceholder, num_unrolls=delta, train=True)
tfLossFull, tfLoss = network.loss(tfOutputs, labelsPlaceholder)
train_op = network.training(tfLossFull, learningRate)
summary = tf.summary.merge_all()
init = tf.global_variables_initializer()
saver = tf.train.Saver()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
summary_writer = tf.summary.FileWriter('logs/train/caffe_copy', sess.graph)
ops = []
with sess.as_default():
    sess.run(init)

    import caffe
    caffe.set_mode_cpu()
    # Load caffe net
    net_snapshot = sorted(glob.glob('*.caffemodel'), key=os.path.getmtime)[-1]
示例#3
0
def main():
    args = get_parser().parse_args()
    observation_length = 17
    action_length = 6

    # Read the expert rollouts from disk.
    observations, actions = load_data(args.rollouts_file)
    print("observations shape = " + str(observations.shape))
    print("actions shape = " + str(actions.shape))

    # Make sure our files exist!
    assert (os.path.exists(os.path.dirname(os.path.abspath(args.stats_file))))

    # Load the expert.
    print("Loading and building expert policy.")
    policy_fn = load_policy.load_policy(args.expert_policy_file)
    print("Expert policy loaded and built.")

    # Assemble the network.
    opl = tf.placeholder(tf.float32,
                         shape=(None, observation_length),
                         name="observations")
    apl = tf.placeholder(tf.float32,
                         shape=(None, action_length),
                         name="actions")
    logits = network.inference(opl, observation_length, args.hidden1,
                               args.hidden2, action_length)
    errors, loss = network.loss(logits, apl)
    global_step, train_op = network.training(loss, args.learning_rate)

    with tf.Session() as sess:
        # Initialize the network.
        tf_util.initialize()
        saver = tf.train.Saver()
        saver.restore(sess, tf.train.latest_checkpoint(args.checkpoint_dir))

        env = gym.make("Walker2d-v1")
        max_steps = env.spec.timestep_limit

        avg_returns = []
        stddev_returns = []
        observations = list(observations)
        actions = list(actions)

        for iteration in range(args.num_iterations):
            obs = np.array(observations)
            acts = np.array(actions)
            assert (obs.shape[0] == acts.shape[0])

            # Train the network.
            if iteration != 0:
                num_batches = int(obs.shape[0] / args.batch_size)
                for step in range(args.training_steps):
                    i = step % num_batches
                    if i == 0:
                        p = np.random.permutation(obs.shape[0])
                        obs = obs[p]
                        acts = acts[p]
                    start = int(i * args.batch_size)
                    stop = int((i + 1) * args.batch_size)
                    feed_dict = {opl: obs[start:stop], apl: acts[start:stop]}
                    _, loss_value, step_value = sess.run(
                        [train_op, loss, global_step], feed_dict=feed_dict)
                    if step % 100 == 0:
                        loss_value = sess.run(loss,
                                              feed_dict={
                                                  opl: obs,
                                                  apl: acts
                                              })
                        msg = "Iteration {}; step {}; loss = {}".format(
                            iteration, step_value, loss_value)
                        print(msg)

            # Generate new rollouts.
            rewards = []
            for i in range(args.num_rollouts):
                print("Iteration {}; rollout {}".format(iteration, i))
                obs = env.reset()
                done = False
                steps = 0
                totalr = 0
                while not done:
                    expert_action = policy_fn(obs[None, :])
                    observations.append(obs)
                    actions.append(expert_action[0])

                    action = sess.run(logits, feed_dict={opl: obs[None, :]})
                    obs, r, done, _ = env.step(action)
                    totalr += r
                    steps += 1
                    if steps >= max_steps:
                        break
                rewards.append(totalr)

            print("Iteration {}; average return {}".format(
                iteration, np.mean(rewards)))
            print("Iteration {}; stddev return {}".format(
                iteration, np.std(rewards)))
            avg_returns.append(np.mean(rewards))
            stddev_returns.append(np.std(rewards))

            with open(args.stats_file, "w") as f:
                stats = {
                    "mean_return": avg_returns,
                    "stddev_returns": stddev_returns
                }
                json.dump(stats, f, indent=4)
示例#4
0
def run_training():
    """Train network for a number of epochs."""
    # Tell TensorFlow that the model will be built into the default Graph.
    with tf.Graph().as_default():
        with tf.name_scope('input'):
            # Input data, pin to CPU because rest of pipeline is CPU-only
            with tf.device('/cpu:0'):
                input_data = tf.constant(training_data)
                input_labels = tf.constant(training_labels)

            input, label = tf.train.slice_input_producer(
                [input_data, input_labels], num_epochs=FLAGS.num_epochs)
            label = tf.cast(label, tf.int32)
            input, labels = tf.train.batch([input, label],
                                           batch_size=FLAGS.batch_size)

            # Build a Graph that computes predictions from the inference model.
            logits = network.inference(input, FLAGS.hidden1, FLAGS.hidden2)

            # Add to the Graph the Ops for loss calculation.
            loss = network.loss(logits, labels)

            # Add to the Graph the Ops that calculate and apply gradients.
            train_op = network.training(loss, FLAGS.learning_rate)

            # Add the Op to compare the logits to the labels during evaluation.
            eval_correct = network.evaluation(logits, labels)

            # Build the summary operation based on the TF collection of Summaries.
            summary_op = tf.summary.merge_all()

            # Create a saver for writing training checkpoints.
            saver = tf.train.Saver()

            # Create the op for initializing variables.
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            # Create a session for running Ops on the Graph.
            sess = tf.Session()

            # Run the Op to initialize the variables.
            sess.run(init_op)

            # Instantiate a SummaryWriter to output summaries and the Graph.
            summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
                                                    sess.graph)

            # Start input enqueue threads.
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            # And then after everything is built, start the training loop.
    for ep in xrange(FLAGS.num_epochs):
        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time
            # Write the summaries and print an overview fairly often.
            if loss_value - 0.0 <= 0.00001:
                print(
                    'Loss value: %.4f, done training for %d epochs, %d steps.'
                    % (loss_value, ep, ep * FLAGS.max_steps + step))
                return
            if step % 100 == 0:
                # Print status to stdout.
                print('Epochs %d: loss = %.4f (%.3f sec)' %
                      (ep, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

                # Save a checkpoint periodically.
                if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                    print('Saving')
                    saver.save(sess, FLAGS.train_dir, global_step=step)
示例#5
0
session_config.allow_soft_placement = True

if __name__ == "__main__":

    with tf.Graph().as_default() as g, tf.device(USE_DEVICE):
        # inference()
        input, deep_features = network.inference()
        labels, logits, cross_entropy = network.loss(deep_features)
        centroid_loss, centroids, spread = network.center_loss(
            deep_features, labels)

        # combine the two losses
        _lambda = tf.placeholder(dtype=tf.float32)
        total_loss = cross_entropy + _lambda / 2. * centroid_loss

        learning_rate, train, global_step = network.training(total_loss)
        eval = network.evaluation(logits, labels)

        init = tf.initialize_all_variables()

        with tf.Session(config=session_config) as sess, \
                h5py.File(DUMP_FILE, 'a', libver='latest', swmr=True) as h5_file:
            # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
            # to see the tensor graph, fire up the tensorboard with --logdir="./train"
            all_summary = tf.merge_all_summaries()
            train_writer = tf.train.SummaryWriter(
                SUMMARIES_DIR + '/summaries/train', sess.graph)
            test_writer = tf.train.SummaryWriter(SUMMARIES_DIR +
                                                 '/summaries/test')

            saver = tf.train.Saver()
# see: https://github.com/tensorflow/tensorflow/issues/2292
session_config.allow_soft_placement = True

if __name__ == "__main__":

    with tf.Graph().as_default() as g, tf.device(USE_DEVICE):
        # inference()
        input, deep_features = network.inference()
        labels, logits, cross_entropy = network.loss(deep_features)
        centroid_loss, centroids, spread = network.center_loss(deep_features, labels)

        # combine the two losses
        _lambda = tf.placeholder(dtype=tf.float32)
        total_loss = cross_entropy + _lambda / 2. * centroid_loss

        learning_rate, train, global_step = network.training(total_loss)
        eval = network.evaluation(logits, labels)

        init = tf.initialize_all_variables()

        with tf.Session(config=session_config) as sess, \
                h5py.File(DUMP_FILE, 'a', libver='latest', swmr=True) as h5_file:
            # Merge all the summaries and write them out to /tmp/mnist_logs (by default)
            # to see the tensor graph, fire up the tensorboard with --logdir="./train"
            all_summary = tf.merge_all_summaries()
            train_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/train', sess.graph)
            test_writer = tf.train.SummaryWriter(SUMMARIES_DIR + '/summaries/test')

            saver = tf.train.Saver()

            if RESTORE: