graph=tf.get_default_graph()) epochs = 50 batch_size = 128 # train over the dataset about 30 times for epoch in range(epochs): for i in range(int(driving_data.num_images / batch_size)): xs, ys = driving_data.LoadTrainBatch(batch_size) train_step.run(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 0.8 }) if i % 10 == 0: xs, ys = driving_data.LoadValBatch(batch_size) loss_value = loss.eval(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 1.0 }) print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value)) # write logs at every iteration summary = merged_summary_op.eval(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 1.0 }) summary_writer.add_summary(
# train over the dataset about 30 times for epoch in range(epochs): for i in range(int(driving_data.num_images / batch_size)): xs, ys, accels, brakes = driving_data.LoadTrainBatch(batch_size, False) train_step_accel.run(feed_dict={ model_accel.x_accel: xs, model_accel.y_accel_: accels, model_accel.keep_prob_accel: 0.5, model_accel.keep_prob_accel_conv: 0.25 }, session=sess_accel) if i % 10 == 0: xs, ys, accels, brakes = driving_data.LoadValBatch( batch_size, False) loss_value_accel = loss_accel.eval(feed_dict={ model_accel.x_accel: xs, model_accel.y_accel_: accels, model_accel.keep_prob_accel: 1.0, model_accel.keep_prob_accel_conv: 1.0 }, session=sess_accel) print("Epoch: %d, Step: %d, Accel Loss: %g " % (epoch, epoch * batch_size + i, loss_value_accel)) if i % batch_size == 0:
def main(): args = get_arguments() sess = tf.InteractiveSession() model = ConvModel(drop_out=args.drop_out, relu=True, is_training=True) L2NormConst = 0.001 train_vars = tf.trainable_variables() loss = tf.reduce_mean(tf.square(tf.subtract(model.y_, model.y))) + \ tf.add_n([tf.nn.l2_loss(v) for v in train_vars]) * L2NormConst train_step = tf.train.AdamOptimizer(args.learning_rate).minimize(loss) sess.run(tf.global_variables_initializer()) # create a summary to monitor cost tensor tf.summary.scalar("loss", loss) # merge all summaries into a single op merged_summary_op = tf.summary.merge_all() saver = tf.train.Saver(write_version=saver_pb2.SaverDef.V2) # op to write logs to Tensorboard logs_path = './logs' summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) epochs = 30 batch_size = 100 num_of_parameters = np.sum([ np.product([xi.value for xi in x.get_shape()]) for x in tf.global_variables() ]) print("Number of parameters: %d" % num_of_parameters) # train over the dataset about 30 times for epoch in range(epochs): for i in range(int(driving_data.num_images / batch_size)): xs, ys = driving_data.LoadTrainBatch(batch_size) train_step.run(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 0.3 }) if i % 10 == 0: xs, ys = driving_data.LoadValBatch(batch_size) loss_value = loss.eval(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 1.0 }) print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value)) # write logs at every iteration summary = merged_summary_op.eval(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 1.0 }) summary_writer.add_summary( summary, epoch * driving_data.num_images / batch_size + i) if i % batch_size == 0: if not os.path.exists(LOGDIR): os.makedirs(LOGDIR) checkpoint_path = os.path.join(LOGDIR, "model.ckpt") filename = saver.save(sess, checkpoint_path) print("Model saved in file: %s" % filename) print("Run the command line:\n" "--> tensorboard --logdir=./logs " "\nThen open http://0.0.0.0:6006/ into your web browser")
graph=tf.get_default_graph()) val_summary_writer = tf.train.SummaryWriter(VAL_TENSORBOARD_LOG, graph=tf.get_default_graph()) batch_size = 100 for i in range(int(driving_data.num_images * 3)): xs_train, ys_train = driving_data.LoadTrainBatch(batch_size) train_step.run(feed_dict={ model.x: xs_train, model.y_: ys_train, model.keep_prob: 0.8 }) if i % 10 == 0: xs_val, ys_val = driving_data.LoadValBatch(batch_size) # write logs at every iteration train_loss = train_summary.eval(feed_dict={ model.x: xs_train, model.y_: ys_train, model.keep_prob: 1.0 }) val_loss = val_summary.eval(feed_dict={ model.x: xs_val, model.y_: ys_val, model.keep_prob: 1.0 }) train_summary_writer.add_summary(train_loss, i) val_summary_writer.add_summary(val_loss, i) train_loss = loss.eval(feed_dict={ model.x: xs_train,
LOGDIR = './save' sess = tf.InteractiveSession() loss = tf.reduce_mean(tf.square(tf.sub(model.y_, model.y))) train_step = tf.train.AdamOptimizer(1e-4).minimize(loss) sess.run(tf.initialize_all_variables()) saver = tf.train.Saver() #train over the dataset about 30 times for i in range(int(driving_data.num_images * 0.3)): xs, ys = driving_data.LoadTrainBatch(100) train_step.run(feed_dict={model.x: xs, model.y_: ys, model.keep_prob: 0.8}) if i % 10 == 0: xs, ys = driving_data.LoadValBatch(100) print("step %d, val loss %g" % (i, loss.eval(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 1.0 }))) if i % 100 == 0: if not os.path.exists(LOGDIR): os.makedirs(LOGDIR) checkpoint_path = os.path.join(LOGDIR, "model.ckpt") filename = saver.save(sess, checkpoint_path) print("Model saved in file: %s" % filename)
for i in range(int(driving_data.num_images / batch_size)): # ---- load train data in batch xs, ys = driving_data.LoadTrainBatch(batch_size) # ---- run the training with 80 percent of dropout train_step.run(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 0.8 }) # ---- every 10 batch trainig, one valuation loss calculated if i % 10 == 0: # load valuation data xs, ys = driving_data.LoadValBatch( batch_size) #locad valuation data # calculate loss loss_value = loss.eval(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 1.0 }) #calculate loss print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value)) # ---- write logs at every iteration summary = merged_summary_op.eval(feed_dict={ model.x: xs, model.y_: ys, model.keep_prob: 1.0 })
saver = tf.train.Saver(write_version = saver_pb2.SaverDef.V1) # op to write logs to Tensorboard logs_path = './logs' summary_writer = tf.summary.FileWriter(logs_path, graph=tf.get_default_graph()) epochs = 30 batch_size = 100 # train over the dataset about 30 times for epoch in range(epochs): for i in range(int(driving_data.num_images/batch_size)): xs1, xs2, ys1, ys2, ys3 = driving_data.LoadTrainBatch(batch_size) train_step.run(feed_dict={model.x1:xs1, model.x2:xs2 , model.y1_: ys1, model.y2_: ys2 , model.y3_: ys3, model.keep_prob: 0.8}) if i % 10 == 0: xs1, xs2, ys1, ys2, ys3 = driving_data.LoadValBatch(batch_size) loss_value = loss.eval(feed_dict={model.x1:xs1, model.x2:xs2 , model.y1_: ys1, model.y2_: ys2 , model.y3_: ys3, model.keep_prob: 1.0}) print("Epoch: %d, Step: %d, Loss: %g" % (epoch, epoch * batch_size + i, loss_value)) # write logs at every iteration summary = merged_summary_op.eval(feed_dict={model.x1:xs1, model.x2:xs2 , model.y1_: ys1, model.y2_: ys2 , model.y3_: ys3, model.keep_prob: 1.0}) summary_writer.add_summary(summary, epoch * driving_data.num_images/batch_size + i) if i % batch_size == 0: if not os.path.exists(LOGDIR): os.makedirs(LOGDIR) checkpoint_path = os.path.join(LOGDIR, "model.ckpt") filename = saver.save(sess, checkpoint_path) print("Model saved in file: %s" % filename) print("Run the command line:\n" \