def describe_detections(imgs, dets): with tf.Graph().as_default(): # placeholder for image image_pl, _ = utils.placeholder_inputs() # build description net print('Building description net graph...') desc_net = description.Net(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring description model in {}...'.format( FLAGS.desc_model_dir)) utils.restore_model(sess, FLAGS.desc_model_dir) print('Done') # capture description arguments in function def compute_descriptors(image, dets): return utils.trained_descriptors(image, dets, FLAGS.desc_patch_size, sess, image_pl, desc_net.descriptors) # compute descriptors descs = [ compute_descriptors(img, img_dets) for img, img_dets in zip(imgs, dets) ] return descs
def main(): half_patch_size = FLAGS.patch_size // 2 with tf.Graph().as_default(): image_pl, _ = utils.placeholder_inputs() print('Building graph...') net = models.FCN(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring model in {}...'.format(FLAGS.model_dir_path)) utils.restore_model(sess, FLAGS.model_dir_path) print('Done') print('Loading image...') image = utils.load_image(FLAGS.image_path) print('Done') print('Detecing pores...') detections = utils.detect_pores(image, image_pl, net.predictions, half_patch_size, FLAGS.prob_thr, FLAGS.inter_thr, sess) print('Done') print('Saving detections to {}...'.format(FLAGS.save_path)) utils.save_dets_txt(detections, FLAGS.save_path) print('Done')
def detect_pores(imgs): with tf.Graph().as_default(): # placeholder for image image_pl, _ = utils.placeholder_inputs() # build detection net print('Building detection net graph...') det_net = detection.Net(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring detection model in {}...'.format( FLAGS.det_model_dir)) utils.restore_model(sess, FLAGS.det_model_dir) print('Done') # capture detection arguments in function def detect_pores(image): return utils.detect_pores(image, image_pl, det_net.predictions, FLAGS.det_patch_size, FLAGS.det_prob_thr, FLAGS.nms_inter_thr, sess) # detect pores dets = [detect_pores(img) for img in imgs] return dets
def minibatch_transformation(dataset): patches_pl, labels_pl = utils.placeholder_inputs() feed_dict = utils.fill_feed_dict(dataset.train, patches_pl, labels_pl, 36, augment=True) for patch in feed_dict[patches_pl]: cv2.imshow('patch', patch) cv2.waitKey(0)
def main(): half_patch_size = FLAGS.patch_size // 2 with tf.Graph().as_default(): image_pl, _ = utils.placeholder_inputs() print('Building graph...') net = detection.Net(image_pl, training=False) print('Done') with tf.Session() as sess: print('Restoring model in {}...'.format(FLAGS.model_dir_path)) utils.restore_model(sess, FLAGS.model_dir_path) print('Done') # capture arguments in lambda function def detect_pores(image): return utils.detect_pores(image, image_pl, net.predictions, half_patch_size, FLAGS.prob_thr, FLAGS.inter_thr, sess) # batch detect in dbi training print('Detecting pores in PolyU-HRF DBI Training images...') load_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Training') save_path = os.path.join(FLAGS.results_dir_path, 'DBI', 'Training') batch_detect(load_path, save_path, detect_pores) print('Done') # batch detect in dbi test print('Detecting pores in PolyU-HRF DBI Test images...') load_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Test') save_path = os.path.join(FLAGS.results_dir_path, 'DBI', 'Test') batch_detect(load_path, save_path, detect_pores) print('Done') # batch detect in dbii print('Detecting pores in PolyU-HRF DBII images...') load_path = os.path.join(FLAGS.polyu_dir_path, 'DBII') save_path = os.path.join(FLAGS.results_dir_path, 'DBII') batch_detect(load_path, save_path, detect_pores) print('Done')
def evaluate(train_dir): """Loads the model and runs evaluation """ target_dir = os.path.join(train_dir, "model_files") params = imp.load_source("params", os.path.join(target_dir, "params.py")) data_input = imp.load_source("input", os.path.join(target_dir, "input.py")) network = imp.load_source("network", os.path.join(target_dir, "network.py")) with tf.Graph().as_default(): # Retrieve images and labels eval_data = FLAGS.eval_data == 'test' images, labels = data_input.inputs(eval_data=eval_data, data_dir=utils.cfg.data_dir, batch_size=params.batch_size) # Generate placeholders for the images and labels. keep_prob = utils.placeholder_inputs(params.batch_size) # Build a Graph that computes predictions from the inference model. logits = network.inference(images, keep_prob) # Add to the Graph the Ops for loss calculation. loss = network.loss(logits, labels) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) # Add the Op to compare the logits to the labels during evaluation. eval_correct = network.evaluation(logits, labels) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Run the Op to initialize the variables. init = tf.initialize_all_variables() sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) ckpt = tf.train.get_checkpoint_state(train_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print("No checkpoints found! ") exit(1) print("Doing Evaluation with lots of data") utils.do_eval(sess=sess, eval_correct=eval_correct, keep_prob=keep_prob, num_examples=params.num_examples_per_epoch_for_eval, params=params, name="eval")
tf.set_random_seed(flags.seed) np.random.seed(flags.seed) # load polyu dataset print('Loading PolyU-HRF dataset...') polyu_path = os.path.join(flags.polyu_dir_path, 'GroundTruth', 'PoreGroundTruth') dataset = polyu.Dataset(os.path.join(polyu_path, 'PoreGroundTruthSampleimage'), os.path.join(polyu_path, 'PoreGroundTruthMarked'), split=(15, 5, 10), patch_size=flags.patch_size) print('Loaded') # gets placeholders for patches and labels patches_pl, labels_pl = utils.placeholder_inputs() with tf.Session() as sess: # build graph and restore model print('Restoring model...') net = models.FCN(patches_pl, training=False) utils.restore_model(sess, flags.model_dir_path) print('Done') # compute statistics f_score = None tdr = None fdr = None if flags.post == 'traditional': print('Generating proposals for test set...') pores, proposals = generate_proposals(sess, net.predictions,
def restore_description(): # create network graph inputs, _ = utils.placeholder_inputs() net = description.Net(inputs) # save random weights and keep them # in program's memory for comparison vars_ = [] saver = tf.train.Saver() with tf.Session() as sess: # initialize variables sess.run(tf.global_variables_initializer()) # assign random values to variables # and save those values for comparison for var in sorted(tf.global_variables(), key=lambda x: x.name): # create random values for variable var_val = np.random.random(var.shape) # save for later comparison vars_.append(var_val) # assign it to tf var assign = tf.assign(var, var_val) sess.run(assign) # save initialized model saver.save(sess, '/tmp/description/model.ckpt', global_step=0) # create new session to restore saved weights with tf.Session() as sess: # make new initialization of weights sess.run(tf.global_variables_initializer()) # assert weights are different i = 0 for var in sorted(tf.global_variables(), key=lambda x: x.name): # get new var val var_val = sess.run(var) # compare with old one assert not np.isclose(np.sum(np.abs(var_val - vars_[i])), 0) i += 1 # restore model utils.restore_model(sess, '/tmp/description') # check if weights are equal i = 0 for var in sorted(tf.global_variables(), key=lambda x: x.name): # get new var val var_val = sess.run(var) # compare with old one if ~np.any(np.isclose(var_val, vars_[i])): print(np.isclose(var_val, vars_[i])) print('Failed to load variable "{}"'.format(var.name)) return False i += 1 return True
def run_training(): if not os.path.exists(model_save_dir): os.makedirs(model_save_dir) use_pretrained_model = True model_filename = "./sports1m_finetuning_ucf101.model" with tf.Graph().as_default(): global_step = tf.get_variable( 'global_step', [], initializer=tf.constant_initializer(0), trainable=False ) with tf.variable_scope('var_name') as var_scope: weights = { 'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.005), 'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.005), 'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.005), 'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.005), 'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.005), 'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.005), 'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.005), 'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.005), #'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.005), #'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.005), #'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.005) } biases = { 'bc1': _variable_with_weight_decay('bc1', [64], 0.000), 'bc2': _variable_with_weight_decay('bc2', [128], 0.000), 'bc3a': _variable_with_weight_decay('bc3a', [256], 0.000), 'bc3b': _variable_with_weight_decay('bc3b', [256], 0.000), 'bc4a': _variable_with_weight_decay('bc4a', [512], 0.000), 'bc4b': _variable_with_weight_decay('bc4b', [512], 0.000), 'bc5a': _variable_with_weight_decay('bc5a', [512], 0.000), 'bc5b': _variable_with_weight_decay('bc5b', [512], 0.000), #'bd1': _variable_with_weight_decay('bd1', [4096], 0.000), #'bd2': _variable_with_weight_decay('bd2', [4096], 0.000), #'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.000), } fcn_weights = { 'wconv6': _variable_with_weight_decay('conv6', [1, 4, 4, 512, 512], 0.005), 'wconv7': _variable_with_weight_decay('conv7', [1, 7, 7, 512, 512], 0.005), 'wup6': _variable_with_weight_decay('up6', [2, 1, 1, 4096, 512], 0.005), 'wup7': _variable_with_weight_decay('up7', [2, 1, 1, 4096, 4096], 0.005), 'wup8': _variable_with_weight_decay('up8', [2, 1, 1, fcn_model.NUM_CLASSES, 4096], 0.005), } fcn_biases = { 'bconv6': _variable_with_weight_decay('bconv6', [512], 0.000), 'bconv7': _variable_with_weight_decay('bconv7', [512], 0.000), 'bup6': _variable_with_weight_decay('bup6', [4096], 0.000), 'bup7': _variable_with_weight_decay('bup7', [4096], 0.000), 'bup8': _variable_with_weight_decay('bup8', [fcn_model.NUM_CLASSES], 0.000), } with tf.name_scope('inputs'): images_placeholder, labels_placeholder, keep_pro = placeholder_inputs( FLAGS.batch_size ) varlist1 = list( set(fcn_weights.values() + fcn_biases.values()) ) varlist2 = list( set(weights.values() + biases.values()) ) feature_map = c3d_model.inference_c3d( images_placeholder, keep_pro, FLAGS.batch_size, weights, biases ) logit=fcn_model.inference_pool54( feature_map, keep_pro, FLAGS.batch_size, fcn_weights, fcn_biases ) loss = fcn_model_loss( logit, labels_placeholder, FLAGS.batch_size ) SGD_cdc = tf.train.GradientDescentOptimizer(1e-4).minimize(loss, var_list = varlist1) SGD_c3d = tf.train.GradientDescentOptimizer(1e-5).minimize(loss, var_list = varlist2) accuracy = tower_acc(logit, labels_placeholder, FLAGS.batch_size) tf.summary.scalar('accuracy', accuracy) variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY) variables_averages_op = variable_averages.apply(tf.trainable_variables()) train_op = tf.group(SGD_cdc, SGD_c3d, variables_averages_op) null_op = tf.no_op() # Create a saver for writing training checkpoints. saver = tf.train.Saver(weights.values() + biases.values()) new_saver = tf.train.Saver(weights.values() + biases.values()+ fcn_weights.values() + fcn_biases.values()) init = tf.global_variables_initializer() # Create a session for running Ops on the Graph. sess = tf.Session( config=tf.ConfigProto(allow_soft_placement=True) ) sess.run(init) merged = tf.summary.merge_all() if os.path.isfile(model_filename) and use_pretrained_model: print 'loading pretrained_model....' saver.restore(sess, model_filename) print 'complete!' # Create summary writter train_writer = tf.summary.FileWriter('./visual_logs/SGD_pool54_visual_logs/train', sess.graph) test_writer = tf.summary.FileWriter('./visual_logs/SGD_pool54_visual_logs/test', sess.graph) video_list = [] position = -1 for step in xrange(FLAGS.max_steps+1): start_time = time.time() train_images, train_labels, _, _, video_list, position = input_train_data.read_clip_and_label( filename='annotation/train.list', batch_size=FLAGS.batch_size, start_pos=position, num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP, crop_size=c3d_model.CROP_SIZE, video_list=video_list ) sess.run(train_op, feed_dict={ images_placeholder: train_images, labels_placeholder: train_labels, keep_pro: 0.5 }) duration = time.time() - start_time print('Batchnum %d: %.3f sec' % (step, duration)) if (step) %2 == 0 or (step + 1) == FLAGS.max_steps: print('Step %d/%d: %.3f sec' % (step, FLAGS.max_steps, duration)) print('Training Data Eval:') summary,loss_train,acc = sess.run( [merged, loss, accuracy], feed_dict={ images_placeholder: train_images, labels_placeholder: train_labels, keep_pro: 1 }) print 'loss: %f' % np.mean(loss_train) print ("accuracy: " + "{:.5f}".format(acc)) train_writer.add_summary(summary, step) if (step) %10 == 0 or (step + 1) == FLAGS.max_steps: print('Validation Data Eval:') val_images, val_labels, _, _, _, _ = input_train_data.read_clip_and_label( filename='annotation/test.list', batch_size=FLAGS.batch_size, start_pos=-1, num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP, crop_size=c3d_model.CROP_SIZE, video_list=[] ) summary,loss_val, acc = sess.run( [merged, loss, accuracy], feed_dict={ images_placeholder: val_images, labels_placeholder: val_labels, keep_pro: 1 }) print 'loss: %f' % np.mean(loss_val) print ("accuracy: " + "{:.5f}".format(acc)) test_writer.add_summary(summary, step) # Save the model checkpoint periodically. if step > 1 and step % 200 == 0: checkpoint_path = os.path.join('./models/SGD_pool54', 'model.ckpt') new_saver.save(sess, checkpoint_path, global_step=global_step) print("done")
def evaluate_last(): """Loads the model and runs evaluation """ with tf.Graph().as_default(): # Get images and labels for CIFAR-10. model_dir = os.path.join(FLAGS.model_dir, FLAGS.name) eval_data = FLAGS.eval_data == 'test' images, labels = data_input.inputs(eval_data=eval_data, data_dir=FLAGS.data_dir, batch_size=FLAGS.batch_size) #images, labels = data_input.distorted_inputs(eval_data=eval_data, data_dir=FLAGS.data_dir, # batch_size=FLAGS.batch_size) # Generate placeholders for the images and labels. keep_prob = utils.placeholder_inputs(FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = model.inference(images, keep_prob) # Add to the Graph the Ops for loss calculation. loss = model.loss(logits, labels) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) # Add the Op to compare the logits to the labels during evaluation. eval_correct = model.evaluation(logits, labels) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Restore the moving average version of the learned variables for eval. # variable_averages = tf.train.ExponentialMovingAverage( # cifar10.MOVING_AVERAGE_DECAY) # variables_to_restore = variable_averages.variables_to_restore() # saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. # summary_op = tf.merge_all_summaries() #graph_def = tf.get_default_graph().as_graph_def() #summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, # graph_def=graph_def) # Run the Op to initialize the variables. init = tf.initialize_all_variables() sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) print(model_dir) ckpt = tf.train.get_checkpoint_state(model_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print("No checkpoints found! ") exit(1) print("Doing Evaluation with lots of data") utils.do_eval(sess=sess, eval_correct=eval_correct, keep_prob=keep_prob, num_examples=data_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL)
def run_training(): """Train model for a number of steps.""" # Get the sets of images and labels for training, validation, and # test on MNIST. # Tell TensorFlow that the model will be built into the default Graph. train_dir = os.path.join(FLAGS.model_dir,FLAGS.name) with tf.Graph().as_default(): global_step = tf.Variable(0, trainable=False) with tf.name_scope('Input'): image_batch, label_batch = data_input.distorted_inputs(FLAGS.data_dir, FLAGS.batch_size) # Generate placeholders for the images and labels. keep_prob = utils.placeholder_inputs(FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = model.inference(image_batch, keep_prob) # Add to the Graph the Ops for loss calculation. loss = model.loss(logits, label_batch) # Add to the Graph the Ops that calculate and apply gradients. train_op = model.training(loss, global_step=global_step, learning_rate=FLAGS.learning_rate) # Add the Op to compare the logits to the labels during evaluation. eval_correct = model.evaluation(logits, label_batch) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Run the Op to initialize the variables. init = tf.initialize_all_variables() sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) # Instantiate a SummaryWriter to output summaries and the Graph. summary_writer = tf.train.SummaryWriter(train_dir, graph_def=sess.graph_def) # And then after everything is built, start the training loop. for step in xrange(FLAGS.max_steps): start_time = time.time() # Fill a feed dictionary with the actual set of images and labels # for this particular training step. feed_dict = utils.fill_feed_dict(keep_prob, train = True) # Run one step of the model. The return values are the activations # from the `train_op` (which is discarded) and the `loss` Op. To # inspect the values of your Ops or variables, you may include them # in the list passed to sess.run() and the value tensors will be # returned in the tuple from the call. _, loss_value = sess.run([train_op, loss], feed_dict=feed_dict) # Write the summaries and print an overview fairly often. if step % 100 == 0: # Print status to stdout. duration = time.time() - start_time examples_per_sec = FLAGS.batch_size / duration sec_per_batch = float(duration) print('Step %d: loss = %.2f ( %.3f sec (per Batch); %.1f examples/sec;)' % (step, loss_value, sec_per_batch, examples_per_sec)) # Update the events file. summary_str = sess.run(summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) # Save a checkpoint and evaluate the model periodically. if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps: checkpoint_path = os.path.join(train_dir, 'model.ckpt') saver.save(sess, checkpoint_path , global_step=step) # Evaluate against the training set. if (step + 1) % 10000 == 0 or (step + 1) == FLAGS.max_steps: print('Training Data Eval:') utils.do_eval(sess, eval_correct, keep_prob, data_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN)
def train(dataset, log_dir): with tf.Graph().as_default(): # gets placeholders for patches and labels patches_pl, labels_pl = utils.placeholder_inputs() # build train related ops net = models.FCN(patches_pl, FLAGS.dropout) net.build_loss(labels_pl) net.build_train(FLAGS.learning_rate) # builds validation inference graph val_net = models.FCN(patches_pl, training=False, reuse=True) # add summary to plot loss, f score, tdr and fdr f_score_pl = tf.placeholder(tf.float32, shape=()) tdr_pl = tf.placeholder(tf.float32, shape=()) fdr_pl = tf.placeholder(tf.float32, shape=()) scores_summary_op = tf.summary.merge([ tf.summary.scalar('f_score', f_score_pl), tf.summary.scalar('tdr', tdr_pl), tf.summary.scalar('fdr', fdr_pl) ]) loss_summary_op = tf.summary.scalar('loss', net.loss) # add variable initialization to graph init = tf.global_variables_initializer() # early stopping vars best_f_score = 0 faults = 0 saver = tf.train.Saver() with tf.Session() as sess: summary_writer = tf.summary.FileWriter(log_dir, sess.graph) sess.run(init) for step in range(1, FLAGS.steps + 1): feed_dict = utils.fill_feed_dict(dataset.train, patches_pl, labels_pl, FLAGS.batch_size) _, loss_value = sess.run([net.train, net.loss], feed_dict=feed_dict) # write loss summary periodically if step % 100 == 0: print('Step {}: loss = {}'.format(step, loss_value)) summary_str = sess.run(loss_summary_op, feed_dict=feed_dict) summary_writer.add_summary(summary_str, step) # evaluate the model periodically if step % 1000 == 0: print('Evaluation:') f_score, fdr, tdr = validate.by_patches(sess, val_net.predictions, FLAGS.batch_size, patches_pl, labels_pl, dataset.val) print('TDR = {}'.format(tdr)) print('FDR = {}'.format(fdr)) print('F score = {}'.format(f_score)) # early stopping if f_score > best_f_score: best_f_score = f_score saver.save( sess, os.path.join(log_dir, 'model.ckpt'), global_step=step) faults = 0 else: faults += 1 if faults >= FLAGS.tolerance: print('Training stopped early') break # write f score, tdr and fdr to summary scores_summary = sess.run( scores_summary_op, feed_dict={ f_score_pl: f_score, tdr_pl: tdr, fdr_pl: fdr }) summary_writer.add_summary(scores_summary, global_step=step) print('Finished') print('best F score = {}'.format(best_f_score))
def train(dataset, log_dir): with tf.Graph().as_default(): # gets placeholders for images and labels images_pl, labels_pl = utils.placeholder_inputs() # build net graph net = description.Net(images_pl, FLAGS.dropout) # build training related ops net.build_loss(labels_pl, FLAGS.weight_decay) net.build_train(FLAGS.learning_rate) # builds validation graph val_net = description.Net(images_pl, training=False, reuse=True) # add summary to plot loss and rank eer_pl = tf.placeholder(tf.float32, shape=(), name='eer_pl') loss_pl = tf.placeholder(tf.float32, shape=(), name='loss_pl') eer_summary_op = tf.summary.scalar('eer', eer_pl) loss_summary_op = tf.summary.scalar('loss', loss_pl) # early stopping vars best_eer = 1 faults = 0 saver = tf.train.Saver() with tf.Session() as sess: # initialize summary and variables summary_writer = tf.summary.FileWriter(log_dir, sess.graph) sess.run(tf.global_variables_initializer()) # 'compute_descriptors' function for validation compute_descriptors = lambda img, pts: utils.trained_descriptors( img, pts, patch_size=dataset.train.images_shape[1], session=sess, imgs_pl=images_pl, descs_op=val_net.descriptors) # train loop for step in range(1, FLAGS.steps + 1): # fill feed dict feed_dict = utils.fill_feed_dict(dataset.train, images_pl, labels_pl, FLAGS.batch_size, FLAGS.augment) # train step loss_value, _ = sess.run([net.loss, net.train], feed_dict=feed_dict) # write loss summary periodically if step % 100 == 0: print('Step {}: loss = {}'.format(step, loss_value)) # summarize loss loss_summary = sess.run(loss_summary_op, feed_dict={loss_pl: loss_value}) summary_writer.add_summary(loss_summary, step) # evaluate model periodically if step % 500 == 0 and dataset.val is not None: print('Validation:') eer = validate.matching.validation_eer( dataset.val, compute_descriptors) print('EER = {}'.format(eer)) # summarize eer eer_summary = sess.run(eer_summary_op, feed_dict={eer_pl: eer}) summary_writer.add_summary(eer_summary, global_step=step) # early stopping if eer < best_eer: # update early stopping vars best_eer = eer faults = 0 saver.save(sess, os.path.join(log_dir, 'model.ckpt'), global_step=step) else: faults += 1 if faults >= FLAGS.tolerance: print('Training stopped early') break # if no validation set, save model when training completes if dataset.val is None: saver.save(sess, os.path.join(log_dir, 'model.ckpt')) print('Finished') print('best EER = {}'.format(best_eer))
def main(): # parse descriptor and adjust accordingly compute_descriptors = None if FLAGS.descriptors == 'sift': compute_descriptors = utils.sift_descriptors elif FLAGS.descriptors == 'dp': if FLAGS.patch_size is None: raise TypeError('Patch size is required when using dp descriptor') compute_descriptors = lambda img, pts: utils.dp_descriptors( img, pts, FLAGS.patch_size) else: if FLAGS.model_dir_path is None: raise TypeError( 'Trained model path is required when using trained descriptor') if FLAGS.patch_size is None: raise TypeError( 'Patch size is required when using trained descriptor') # create net graph and restore saved model from models import description img_pl, _ = utils.placeholder_inputs() net = description.Net(img_pl, training=False) sess = tf.Session() print('Restoring model in {}...'.format(FLAGS.model_dir_path)) utils.restore_model(sess, FLAGS.model_dir_path) print('Done') compute_descriptors = lambda img, pts: utils.trained_descriptors( img, pts, FLAGS.patch_size, sess, img_pl, net.descriptors) # parse matching mode and adjust accordingly if FLAGS.mode == 'basic': match = matching.basic else: match = matching.spatial # make dir path be full appropriate dir path imgs_dir_path = None pts_dir_path = None subject_ids = None register_ids = None session_ids = None if FLAGS.fold == 'DBI-train': # adjust paths for appropriate fold imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Training') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBI', 'Training') # adjust ids for appropriate fold subject_ids = [ 6, 9, 11, 13, 16, 18, 34, 41, 42, 47, 62, 67, 118, 186, 187, 188, 196, 198, 202, 207, 223, 225, 226, 228, 242, 271, 272, 278, 287, 293, 297, 307, 311, 321, 323 ] register_ids = [1, 2, 3] session_ids = [1, 2] else: # adjust paths for appropriate fold if FLAGS.fold == 'DBI-test': imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Test') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBI', 'Test') else: imgs_dir_path = os.path.join(FLAGS.polyu_dir_path, 'DBII') pts_dir_path = os.path.join(FLAGS.pts_dir_path, 'DBII') # adjust ids for appropriate fold subject_ids = [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168 ] register_ids = [1, 2, 3, 4, 5] session_ids = [1, 2] # load images, points, compute descriptors and make indices correspondences print('Loading images and detections, and computing descriptors...') all_descs, all_pts, id2index = load_dataset(imgs_dir_path, pts_dir_path, subject_ids, session_ids, register_ids, compute_descriptors) print('Done') print('Matching...') pos, neg = polyu_match(all_descs, all_pts, subject_ids, register_ids, id2index, match, thr=FLAGS.thr) print('Done') # print equal error rate print('EER = {}'.format(utils.eer(pos, neg))) # save results to file if FLAGS.results_path is not None: print('Saving results to file {}...'.format(FLAGS.results_path)) # create directory tree, if non-existing dirname = os.path.dirname(FLAGS.results_path) dirname = os.path.abspath(dirname) if not os.path.exists(dirname): os.makedirs(dirname) # save comparisons with open(FLAGS.results_path, 'w') as f: # save same subject scores for score in pos: print(1, score, file=f) # save different subject scores for score in neg: print(0, score, file=f) # save invoking command string with open(FLAGS.results_path + '.cmd', 'w') as f: print(*sys.argv, file=f) print('Done')
def evaluate_last(): """Loads the model and runs evaluation """ with tf.Graph().as_default(): # Get images and labels for CIFAR-10. model_dir = os.path.join(FLAGS.model_dir, FLAGS.name) eval_data = FLAGS.eval_data == "test" images, labels = data_input.inputs(eval_data=eval_data, data_dir=FLAGS.data_dir, batch_size=FLAGS.batch_size) # images, labels = data_input.distorted_inputs(eval_data=eval_data, data_dir=FLAGS.data_dir, # batch_size=FLAGS.batch_size) # Generate placeholders for the images and labels. keep_prob = utils.placeholder_inputs(FLAGS.batch_size) # Build a Graph that computes predictions from the inference model. logits = model.inference(images, keep_prob) # Add to the Graph the Ops for loss calculation. loss = model.loss(logits, labels) # Calculate predictions. top_k_op = tf.nn.in_top_k(logits, labels, 1) # Add the Op to compare the logits to the labels during evaluation. eval_correct = model.evaluation(logits, labels) # Build the summary operation based on the TF collection of Summaries. summary_op = tf.merge_all_summaries() # Create a saver for writing training checkpoints. saver = tf.train.Saver() # Create a session for running Ops on the Graph. sess = tf.Session() # Restore the moving average version of the learned variables for eval. # variable_averages = tf.train.ExponentialMovingAverage( # cifar10.MOVING_AVERAGE_DECAY) # variables_to_restore = variable_averages.variables_to_restore() # saver = tf.train.Saver(variables_to_restore) # Build the summary operation based on the TF collection of Summaries. # summary_op = tf.merge_all_summaries() # graph_def = tf.get_default_graph().as_graph_def() # summary_writer = tf.train.SummaryWriter(FLAGS.eval_dir, # graph_def=graph_def) # Run the Op to initialize the variables. init = tf.initialize_all_variables() sess.run(init) # Start the queue runners. tf.train.start_queue_runners(sess=sess) print(model_dir) ckpt = tf.train.get_checkpoint_state(model_dir) if ckpt and ckpt.model_checkpoint_path: saver.restore(sess, ckpt.model_checkpoint_path) else: print("No checkpoints found! ") exit(1) print("Doing Evaluation with lots of data") utils.do_eval( sess=sess, eval_correct=eval_correct, keep_prob=keep_prob, num_examples=data_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL, )
def run_testing(): with tf.Graph().as_default(): global_step = tf.get_variable('global_step', [], initializer=tf.constant_initializer(0), trainable=False) with tf.variable_scope('var_name') as var_scope: weights = { 'wc1': _variable_with_weight_decay('wc1', [3, 3, 3, 3, 64], 0.005), 'wc2': _variable_with_weight_decay('wc2', [3, 3, 3, 64, 128], 0.005), 'wc3a': _variable_with_weight_decay('wc3a', [3, 3, 3, 128, 256], 0.005), 'wc3b': _variable_with_weight_decay('wc3b', [3, 3, 3, 256, 256], 0.005), 'wc4a': _variable_with_weight_decay('wc4a', [3, 3, 3, 256, 512], 0.005), 'wc4b': _variable_with_weight_decay('wc4b', [3, 3, 3, 512, 512], 0.005), 'wc5a': _variable_with_weight_decay('wc5a', [3, 3, 3, 512, 512], 0.005), 'wc5b': _variable_with_weight_decay('wc5b', [3, 3, 3, 512, 512], 0.005), #'wd1': _variable_with_weight_decay('wd1', [8192, 4096], 0.005), #'wd2': _variable_with_weight_decay('wd2', [4096, 4096], 0.005), #'out': _variable_with_weight_decay('wout', [4096, c3d_model.NUM_CLASSES], 0.005) } biases = { 'bc1': _variable_with_weight_decay('bc1', [64], 0.000), 'bc2': _variable_with_weight_decay('bc2', [128], 0.000), 'bc3a': _variable_with_weight_decay('bc3a', [256], 0.000), 'bc3b': _variable_with_weight_decay('bc3b', [256], 0.000), 'bc4a': _variable_with_weight_decay('bc4a', [512], 0.000), 'bc4b': _variable_with_weight_decay('bc4b', [512], 0.000), 'bc5a': _variable_with_weight_decay('bc5a', [512], 0.000), 'bc5b': _variable_with_weight_decay('bc5b', [512], 0.000), #'bd1': _variable_with_weight_decay('bd1', [4096], 0.000), #'bd2': _variable_with_weight_decay('bd2', [4096], 0.000), #'out': _variable_with_weight_decay('bout', [c3d_model.NUM_CLASSES], 0.000), } fcn_weights = { 'wconv6': _variable_with_weight_decay('conv6', [1, 4, 4, 512, 512], 0.005), 'wup6': _variable_with_weight_decay('up6', [2, 1, 1, 4096, 512], 0.005), 'wup7': _variable_with_weight_decay('up7', [2, 1, 1, 4096, 4096], 0.005), 'wup8': _variable_with_weight_decay( 'up8', [2, 1, 1, fcn_model.NUM_CLASSES, 4096], 0.005), } fcn_biases = { 'bconv6': _variable_with_weight_decay('bconv6', [512], 0.000), 'bup6': _variable_with_weight_decay('bup6', [4096], 0.000), 'bup7': _variable_with_weight_decay('bup7', [4096], 0.000), 'bup8': _variable_with_weight_decay('bup8', [fcn_model.NUM_CLASSES], 0.000), } with tf.name_scope('inputs'): images_placeholder, labels_placeholder, keep_pro = placeholder_inputs( FLAGS.batch_size) feature_map = c3d_model.inference_c3d(images_placeholder, keep_pro, FLAGS.batch_size, weights, biases) logit = fcn_model.inference_fcn5(feature_map, keep_pro, FLAGS.batch_size, fcn_weights, fcn_biases) loss = fcn_model_loss(logit, labels_placeholder, FLAGS.batch_size) accuracy = tower_acc(logit, labels_placeholder, FLAGS.batch_size) predictions = tf.nn.top_k(logit, 1) # Create a saver for writing training checkpoints. new_saver = tf.train.Saver(weights.values() + biases.values() + fcn_weights.values() + fcn_biases.values()) init = tf.global_variables_initializer() # Create a session for running Ops on the Graph. sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) sess.run(init) ckpt = tf.train.get_checkpoint_state(pre_model_save_dir) if ckpt and ckpt.model_checkpoint_path: print "loading checkpoint,waiting......" new_saver.restore(sess, ckpt.model_checkpoint_path) print "load complete!" if FLAGS.output_to_file: # all output will be stored in 'output.txt' print('outputs will be stored in test.txt') sys.stdout = open('test.txt', 'a', 1) predict_list = [] label_list = [] for i in xrange(3358): start_time = time.time() test_images, test_labels, _, _, _, _ = input_test_data.read_clip_and_label( filename='annotation/test.list', batch_size=1, start_pos=-1, num_frames_per_clip=c3d_model.NUM_FRAMES_PER_CLIP, crop_size=c3d_model.CROP_SIZE, video_list=[]) acc, predict = sess.run( [accuracy, predictions], feed_dict={ images_placeholder: test_images, labels_placeholder: test_labels, keep_pro: 1 }) print('acc: {}'.format(acc)) print('predict: {}'.format(np.reshape(predict[1], [32]))) predict_list.append(np.reshape(predict[1], [32])) print('labels: {}'.format(np.reshape(test_labels, [32]))) label_list.append(np.reshape(test_labels, [32])) np.save('./test/predict', predict_list) np.save('./test/label', label_list)
tf.set_random_seed(flags.seed) np.random.seed(flags.seed) # load polyu dataset print('Loading PolyU-HRF dataset...') polyu_path = os.path.join(flags.polyu_dir_path, 'GroundTruth', 'PoreGroundTruth') dataset = polyu.Dataset( os.path.join(polyu_path, 'PoreGroundTruthSampleimage'), os.path.join(polyu_path, 'PoreGroundTruthMarked'), split=(15, 5, 10), patch_size=flags.patch_size) print('Loaded') # gets placeholders for patches and labels patches_pl, _ = utils.placeholder_inputs() with tf.Session() as sess: # build graph and restore model print('Restoring model...') net = models.CNN(patches_pl) utils.restore_model(sess, flags.model_dir_path) print('Done') # compute statistics f_score = None tdr = None fdr = None print('Generating proposals for test set...') pores, proposals = generate_proposals(sess, net.predictions, patches_pl, dataset.test)