Beispiel #1
0
merged_summary_op = tf.merge_all_summaries()

saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V1)

# op to write logs to Tensorboard
logs_path = './logs'
summary_writer = tf.train.SummaryWriter(logs_path,
                                        graph=tf.get_default_graph())

epochs = 50
batch_size = 128

# train over the dataset about 30 times
for epoch in range(epochs):
    for i in range(int(driving_data.num_images / batch_size)):
        xs, ys = driving_data.LoadTrainBatch(batch_size)
        train_step.run(feed_dict={
            model.x: xs,
            model.y_: ys,
            model.keep_prob: 0.8
        })
        if i % 10 == 0:
            xs, ys = driving_data.LoadValBatch(batch_size)
            loss_value = loss.eval(feed_dict={
                model.x: xs,
                model.y_: ys,
                model.keep_prob: 1.0
            })
            print("Epoch: %d, Step: %d, Loss: %g" %
                  (epoch, epoch * batch_size + i, loss_value))
# merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()

# op to write logs to Tensorboard
logs_path = './logs'
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

epochs = 50
batch_size = 80

# train over the dataset about 30 times
for epoch in range(epochs):
    for i in range(int(driving_data.num_images / batch_size)):

        xs, ys, accels, brakes = driving_data.LoadTrainBatch(batch_size, False)

        train_step_accel.run(feed_dict={
            model_accel.x_accel: xs,
            model_accel.y_accel_: accels,
            model_accel.keep_prob_accel: 0.5,
            model_accel.keep_prob_accel_conv: 0.25
        },
                             session=sess_accel)

        if i % 10 == 0:
            xs, ys, accels, brakes = driving_data.LoadValBatch(
                batch_size, False)
            loss_value_accel = loss_accel.eval(feed_dict={
                model_accel.x_accel:
                xs,
Beispiel #3
0
# merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()

saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V1)

# op to write logs to Tensorboard
logs_path = './logs'  #Here we are writing all the logs and summary from above into logs file
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

epochs = 30
batch_size = 100

# train over the dataset about 30 times
for epoch in range(epochs):
    for i in range(int(driving_data.num_images / batch_size)):
        xs, ys = driving_data.LoadTrainBatch(
            batch_size)  #We had defined this function in driving_data.py file
        train_step.run(feed_dict={
            model.x: xs,
            model.y_: ys,
            model.keep_prob: 0.8
        })  #keep_probability=1-dropout
        if i % 10 == 0:  #This is meant to print test loss value or evaluation loss value for every 10th iterations in each epoch.
            xs, ys = driving_data.LoadValBatch(batch_size)
            loss_value = loss.eval(feed_dict={
                model.x: xs,
                model.y_: ys,
                model.keep_prob: 1.0
            })
            print("Epoch: %d, Step: %d, Loss: %g" %
                  (epoch, epoch * batch_size + i, loss_value))
Beispiel #4
0
def main():

    args = get_arguments()

    sess = tf.InteractiveSession()
    model = ConvModel(drop_out=args.drop_out, relu=True, is_training=True)
    L2NormConst = 0.001

    train_vars = tf.trainable_variables()

    loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y))) + \
        tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst
    train_step = tf.train.AdamOptimizer(args.learning_rate).minimize(loss)
    sess.run(tf.global_variables_initializer())

    # create a summary to monitor cost tensor
    tf.summary.scalar("loss", loss)
    # merge all summaries into a single op
    merged_summary_op = tf.summary.merge_all()

    saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V2)

    # op to write logs to Tensorboard
    logs_path = './logs'
    summary_writer = tf.summary.FileWriter(logs_path,
                                           graph=tf.get_default_graph())

    epochs = 30
    batch_size = 100

    num_of_parameters = np.sum([
        np.product([xi.value for xi in x.get_shape()])
        for x in tf.global_variables()
    ])
    print("Number of parameters: %d" % num_of_parameters)

    # train over the dataset about 30 times
    for epoch in range(epochs):
        for i in range(int(driving_data.num_images / batch_size)):
            xs, ys = driving_data.LoadTrainBatch(batch_size)
            train_step.run(feed_dict={
                model.x: xs,
                model.y_: ys,
                model.keep_prob: 0.3
            })
            if i % 10 == 0:
                xs, ys = driving_data.LoadValBatch(batch_size)
                loss_value = loss.eval(feed_dict={
                    model.x: xs,
                    model.y_: ys,
                    model.keep_prob: 1.0
                })
                print("Epoch: %d, Step: %d, Loss: %g" %
                      (epoch, epoch * batch_size + i, loss_value))

            # write logs at every iteration
            summary = merged_summary_op.eval(feed_dict={
                model.x: xs,
                model.y_: ys,
                model.keep_prob: 1.0
            })
            summary_writer.add_summary(
                summary, epoch * driving_data.num_images / batch_size + i)

            if i % batch_size == 0:
                if not os.path.exists(LOGDIR):
                    os.makedirs(LOGDIR)
                checkpoint_path = os.path.join(LOGDIR, "model.ckpt")
                filename = saver.save(sess, checkpoint_path)
        print("Model saved in file: %s" % filename)

    print("Run the command line:\n"
          "--> tensorboard --logdir=./logs "
          "\nThen open http://0.0.0.0:6006/ into your web browser")
Beispiel #5
0
if not os.path.exists(TRAIN_TENSORBOARD_LOG):
    os.makedirs(TRAIN_TENSORBOARD_LOG)
if not os.path.exists(VAL_TENSORBOARD_LOG):
    os.makedirs(VAL_TENSORBOARD_LOG)

# op to write logs to Tensorboard
train_summary_writer = tf.train.SummaryWriter(TRAIN_TENSORBOARD_LOG,
                                              graph=tf.get_default_graph())
val_summary_writer = tf.train.SummaryWriter(VAL_TENSORBOARD_LOG,
                                            graph=tf.get_default_graph())

batch_size = 100

for i in range(int(driving_data.num_images * 3)):
    xs_train, ys_train = driving_data.LoadTrainBatch(batch_size)
    train_step.run(feed_dict={
        model.x: xs_train,
        model.y_: ys_train,
        model.keep_prob: 0.8
    })

    if i % 10 == 0:
        xs_val, ys_val = driving_data.LoadValBatch(batch_size)
        # write logs at every iteration
        train_loss = train_summary.eval(feed_dict={
            model.x: xs_train,
            model.y_: ys_train,
            model.keep_prob: 1.0
        })
        val_loss = val_summary.eval(feed_dict={
Beispiel #6
0
import driving_data
import model

LOGDIR = './save'

sess = tf.InteractiveSession()

loss = tf.reduce_mean(tf.square(tf.sub(model.y_, model.y)))
train_step = tf.train.AdamOptimizer(1e-4).minimize(loss)
sess.run(tf.initialize_all_variables())

saver = tf.train.Saver()

#train over the dataset about 30 times
for i in range(int(driving_data.num_images * 0.3)):
    xs, ys = driving_data.LoadTrainBatch(100)
    train_step.run(feed_dict={model.x: xs, model.y_: ys, model.keep_prob: 0.8})
    if i % 10 == 0:
        xs, ys = driving_data.LoadValBatch(100)
        print("step %d, val loss %g" %
              (i,
               loss.eval(feed_dict={
                   model.x: xs,
                   model.y_: ys,
                   model.keep_prob: 1.0
               })))
    if i % 100 == 0:
        if not os.path.exists(LOGDIR):
            os.makedirs(LOGDIR)
        checkpoint_path = os.path.join(LOGDIR, "model.ckpt")
        filename = saver.save(sess, checkpoint_path)
Beispiel #7
0
# merge all summaries into a single op
merged_summary_op = tf.summary.merge_all()

saver = tf.train.Saver(write_version = saver_pb2.SaverDef.V1)

# op to write logs to Tensorboard
logs_path = './logs'
summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph())

epochs = 30
batch_size = 100

# train over the dataset about 30 times
for epoch in range(epochs):
  for i in range(int(driving_data.num_images/batch_size)):
    xs1, xs2, ys1, ys2, ys3 = driving_data.LoadTrainBatch(batch_size)
    train_step.run(feed_dict={model.x1:xs1, model.x2:xs2 , model.y1_: ys1, model.y2_: ys2 , model.y3_: ys3, model.keep_prob: 0.8})
    if i % 10 == 0:
      xs1, xs2, ys1, ys2, ys3 = driving_data.LoadValBatch(batch_size)
      loss_value = loss.eval(feed_dict={model.x1:xs1, model.x2:xs2 , model.y1_: ys1, model.y2_: ys2 , model.y3_: ys3, model.keep_prob: 1.0})
      print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value))

    # write logs at every iteration
    summary = merged_summary_op.eval(feed_dict={model.x1:xs1, model.x2:xs2 , model.y1_: ys1, model.y2_: ys2 , model.y3_: ys3, model.keep_prob: 1.0})
    summary_writer.add_summary(summary, epoch * driving_data.num_images/batch_size + i)

    if i % batch_size == 0:
      if not os.path.exists(LOGDIR):
        os.makedirs(LOGDIR)
      checkpoint_path = os.path.join(LOGDIR, "model.ckpt")
      filename = saver.save(sess, checkpoint_path)