def __log_files_and_configs(self): self.results_dir_path = tools.create_results_dir("train_seq") self.curr_dir_path = os.path.dirname(os.path.realpath(__file__)) tools.log_file_content(self.results_dir_path, [os.path.realpath(__file__), os.path.join(self.curr_dir_path, "data_roller.py"), os.path.join(self.curr_dir_path, "model.py"), os.path.join(self.curr_dir_path, "losses.py"), os.path.join(self.curr_dir_path, "train.py"), os.path.join(self.curr_dir_path, "train_seq.py"), os.path.join(self.curr_dir_path, "config.py")]) tools.set_log_file(os.path.join(self.results_dir_path, "print_logs.txt")) config.print_configs(self.cfg)
def __log_files_and_configs(self): self.results_dir_path = tools.create_results_dir("train_seq") self.curr_dir_path = os.path.dirname(os.path.realpath(__file__)) files_to_log = [] for filename in glob.iglob(os.path.join(self.curr_dir_path, "**"), recursive=True): if "/results/" not in filename and filename.endswith(".py"): files_to_log.append(filename) tools.log_file_content(self.results_dir_path, files_to_log) tools.set_log_file( os.path.join(self.results_dir_path, "print_logs.txt")) config.print_configs(self.cfg)
) _fc_losses = sess.run(fc_losses, feed_dict={ inputs: batch_data, fc_labels: fc_ground_truth, is_training: False }) fc_losses_history.append(_fc_losses) return fc_losses_history, sum(fc_losses_history) / len(fc_losses_history) # =================== SAVING/LOADING DATA ======================== results_dir_path = tools.create_results_dir("train_pair") tf_saver = tf.train.Saver() restore_model_file = None # =================== TRAINING ======================== with tf.Session() as sess: if restore_model_file: tools.printf("Restoring model weights from %s..." % restore_model_file) tf_saver.restore(sess, restore_model_file) else: tools.printf("Initializing variables...") sess.run(tf.global_variables_initializer()) # Visualization writer = tf.summary.FileWriter('graph_viz/') writer.add_graph(tf.get_default_graph())
lr = tf.placeholder(tf.float32, name="se3_lr", shape=[]) trainer = tf.train.AdamOptimizer(learning_rate=lr).minimize( total_losses, colocate_gradients_with_ops=True) # with tf.device(tf.train.replica_device_setter(ps_tasks=1, ps_device='/job:localhost/replica:0/task:0/device:CPU:0', worker_device='/job:localhost/replica:0/task:0/device:GPU:1')): # val_inputs, val_lstm_init, val_initial_poses, val_is_training, val_fc_outputs, val_se3_outputs, val_lstm_states = simple_model.build_seq_model( # val_cfg, True) # val_se3_labels, val_fc_labels = simple_model.model_labels(val_cfg) # # with tf.variable_scope("Val_Losses"): # se3_losses_val, _, _ = losses.se3_losses(val_se3_outputs, val_se3_labels, val_cfg.k_se3) # fc_losses_val, _, _, _, _, _ = losses.pair_train_fc_losses(val_fc_outputs, val_fc_labels, val_cfg.k_fc) # total_losses_val = (1 - alpha) * se3_losses_val + alpha * fc_losses_val # =================== SAVING/LOADING DATA ======================== results_dir_path = tools.create_results_dir("train_seq") tools.log_file_content(results_dir_path, os.path.realpath(__file__)) tf_checkpoint_saver = tf.train.Saver(max_to_keep=3) tf_best_saver = tf.train.Saver(max_to_keep=2) tf_restore_saver = tf.train.Saver() # restore_model_file = None restore_model_file = "/home/ben/School/e2e_results/train_seq_20180419-00-46-05_timesteps20_no_reverse/best_val/model_best_val_checkpoint-49" # just for restoring pre trained cnn weights cnn_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, "^cnn_layer.*") cnn_init_tf_saver = tf.train.Saver(cnn_variables) cnn_init_model_file = None # cnn_init_model_file = "/home/cs4li/Dev/end_to_end_visual_odometry/results/train_seq_20180414-01-33-38_simplemodel1lstmseq0f2f/model_epoch_checkpoint-199"