# feed trough network evaluation = tf.placeholder_with_default(True, shape=()) # _, coord3d_pred, R = net.inference(data['scoremap'], data['hand_side'], evaluation) #inference = net.inference(data["image"], evaluation, train=False) gesture_pred = net.inference(data['image'], evaluation, train=False) # Start TF gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.8) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) tf.train.start_queue_runners(sess=sess) if USE_RETRAINED: # retrained version: HandSegNet last_cpt = tf.train.latest_checkpoint(PATH_TO_HANDSEGNET_SNAPSHOTS) assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network and set the path accordingly?" load_weights_from_snapshot(sess, last_cpt, discard_list=['Adam', 'global_step', 'beta']) # retrained version: PoseNet last_cpt = tf.train.latest_checkpoint(PATH_TO_POSENET_SNAPSHOTS) assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network and set the path accordingly?" load_weights_from_snapshot(sess, last_cpt, discard_list=['Adam', 'global_step', 'beta']) last_cpt = tf.train.latest_checkpoint(PATH_TO_GESTURE_SNAPSHOTS) assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network and set the path accordingly?" load_weights_from_snapshot(sess, last_cpt, discard_list=['Adam', 'global_step', 'beta'])
# Solver global_step = tf.Variable(0, trainable=False, name="global_step") lr_scheduler = LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter']) lr = lr_scheduler.get_lr(global_step) opt = tf.train.AdamOptimizer(lr) train_op = opt.minimize(loss) # init weights sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=1, keep_checkpoint_every_n_hours=4.0) rename_dict = {'CPM/PoseNet': 'PoseNet2D', '_CPM': ''} load_weights_from_snapshot(sess, './weights/cpm-model-mpii', ['PersonNet', 'PoseNet/Mconv', 'conv5_2_CPM'], rename_dict) # snapshot dir if not os.path.exists(train_para['snapshot_dir']): os.mkdir(train_para['snapshot_dir']) print('Created snapshot dir:', train_para['snapshot_dir']) # Training loop print('Starting to train ...') for i in range(train_para['max_iter']): _, loss_v = sess.run([train_op, loss]) if (i % train_para['show_loss_freq']) == 0: print('Iteration %d\t Loss %.1e' % (i, loss_v)) sys.stdout.flush()
tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=gt)) # Solver global_step = tf.Variable(0, trainable=False, name="global_step") lr_scheduler = LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter']) lr = lr_scheduler.get_lr(global_step) opt = tf.train.AdamOptimizer(lr) train_op = opt.minimize(loss) # init weights sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=1, keep_checkpoint_every_n_hours=4.0) rename_dict = {'CPM/PersonNet': 'HandSegNet', '_CPM': ''} load_weights_from_snapshot(sess, './weights/cpm-model-mpii', ['PoseNet', 'Mconv', 'conv6'], rename_dict) # snapshot dir if not os.path.exists(train_para['snapshot_dir']): os.mkdir(train_para['snapshot_dir']) print('Created snapshot dir:', train_para['snapshot_dir']) # Training loop for i in tqdm(range(train_para['max_iter'])): _, loss_v = sess.run([train_op, loss]) if (i % train_para['show_loss_freq']) == 0: print('Iteration %d\t Loss %.1e' % (i, loss_v)) sys.stdout.flush() if (i % train_para['snapshot_freq']) == 0:
# Solver global_step = tf.Variable(0, trainable=False, name="global_step") lr_scheduler = LearningRateScheduler(values=train_para['lr'], steps=train_para['lr_iter']) lr = lr_scheduler.get_lr(global_step) opt = tf.train.AdamOptimizer(lr) train_op = opt.minimize(loss) # init weights sess.run(tf.global_variables_initializer()) saver = tf.train.Saver(max_to_keep=1, keep_checkpoint_every_n_hours=1.0) # use weights from previous snapshots to train the network PATH_TO_POSENET_SNAPSHOTS = './snapshots_posenet/' last_cpt = tf.train.latest_checkpoint(PATH_TO_POSENET_SNAPSHOTS) assert last_cpt is not None, "Could not locate snapshot to load. Did you already train the network and set the path accordingly?" load_weights_from_snapshot(sess, last_cpt, discard_list=['Adam', 'global_step', 'beta']) # use initial weights to train the network '''rename_dict = {'CPM/PoseNet': 'PoseNet2D', '_CPM': ''} load_weights_from_snapshot(sess, './weights/cpm-model-mpii', ['PersonNet', 'PoseNet/Mconv', 'conv5_2_CPM'], rename_dict) ''' # snapshot dir if not os.path.exists(train_para['snapshot_dir']): os.mkdir(train_para['snapshot_dir']) print('Created snapshot dir:', train_para['snapshot_dir']) # Training loop print('Starting to train ...') for i in range(train_para['max_iter']):