def detect_pores(imgs):
    with tf.Graph().as_default():
        # placeholder for image
        image_pl, _ = utils.placeholder_inputs()

        # build detection net
        print('Building detection net graph...')
        det_net = detection.Net(image_pl, training=False)
        print('Done')

        with tf.Session() as sess:
            print('Restoring detection model in {}...'.format(
                FLAGS.det_model_dir))
            utils.restore_model(sess, FLAGS.det_model_dir)
            print('Done')

            # capture detection arguments in function
            def detect_pores(image):
                return utils.detect_pores(image, image_pl, det_net.predictions,
                                          FLAGS.det_patch_size,
                                          FLAGS.det_prob_thr,
                                          FLAGS.nms_inter_thr, sess)

            # detect pores
            dets = [detect_pores(img) for img in imgs]

    return dets
def main():
    half_patch_size = FLAGS.patch_size // 2

    with tf.Graph().as_default():
        image_pl, _ = utils.placeholder_inputs()

        print('Building graph...')
        net = detection.Net(image_pl, training=False)
        print('Done')

        with tf.Session() as sess:
            print('Restoring model in {}...'.format(FLAGS.model_dir_path))
            utils.restore_model(sess, FLAGS.model_dir_path)
            print('Done')

            # capture arguments in lambda function
            def detect_pores(image):
                return utils.detect_pores(image, image_pl, net.predictions,
                                          half_patch_size, FLAGS.prob_thr,
                                          FLAGS.inter_thr, sess)

            # batch detect in dbi training
            print('Detecting pores in PolyU-HRF DBI Training images...')
            load_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Training')
            save_path = os.path.join(FLAGS.results_dir_path, 'DBI', 'Training')
            batch_detect(load_path, save_path, detect_pores)
            print('Done')

            # batch detect in dbi test
            print('Detecting pores in PolyU-HRF DBI Test images...')
            load_path = os.path.join(FLAGS.polyu_dir_path, 'DBI', 'Test')
            save_path = os.path.join(FLAGS.results_dir_path, 'DBI', 'Test')
            batch_detect(load_path, save_path, detect_pores)
            print('Done')

            # batch detect in dbii
            print('Detecting pores in PolyU-HRF DBII images...')
            load_path = os.path.join(FLAGS.polyu_dir_path, 'DBII')
            save_path = os.path.join(FLAGS.results_dir_path, 'DBII')
            batch_detect(load_path, save_path, detect_pores)
            print('Done')
def restore_detection():
    # create network graph
    inputs, _ = utils.placeholder_inputs()
    net = detection.Net(inputs)

    # save random weights and keep them
    # in program's memory for comparison
    vars_ = []
    saver = tf.train.Saver()
    with tf.Session() as sess:
        # initialize variables
        sess.run(tf.global_variables_initializer())

        # assign random values to variables
        # and save those values for comparison
        for var in sorted(tf.global_variables(), key=lambda x: x.name):
            # create random values for variable
            var_val = np.random.random(var.shape)

            # save for later comparison
            vars_.append(var_val)

            # assign it to tf var
            assign = tf.assign(var, var_val)
            sess.run(assign)

        # save initialized model
        saver.save(sess, '/tmp/detection/model.ckpt', global_step=0)

    # create new session to restore saved weights
    with tf.Session() as sess:
        # make new initialization of weights
        sess.run(tf.global_variables_initializer())

        # assert weights are different
        i = 0
        for var in sorted(tf.global_variables(), key=lambda x: x.name):
            # get new var val
            var_val = sess.run(var)

            # compare with old one
            assert not np.isclose(np.sum(np.abs(var_val - vars_[i])), 0)

            i += 1

        # restore model
        utils.restore_model(sess, '/tmp/detection')

        # check if weights are equal
        i = 0
        for var in sorted(tf.global_variables(), key=lambda x: x.name):
            # get new var val
            var_val = sess.run(var)

            # compare with old one
            if ~np.any(np.isclose(var_val, vars_[i])):
                print(np.isclose(var_val, vars_[i]))
                print('Failed to load variable "{}"'.format(var.name))
                return False

            i += 1

    return True
    # pick dataset fold
    if flags.fold == 'train':
        dataset = dataset.train
    elif flags.fold == 'val':
        dataset = dataset.val
    elif flags.fold == 'test':
        dataset = dataset.test
    else:
        raise ValueError(
            'Unrecognized "fold" argument. Must be "train", "val", or "test"')

    # gets placeholders for patches and labels
    patches_pl, labels_pl = utils.placeholder_inputs()

    # builds inference graph
    net = detection.Net(patches_pl, training=False)

    with tf.Session() as sess:
        print('Restoring model...')
        utils.restore_model(sess, flags.model_dir_path)
        print('Done')

        # find best threshold and statistics
        f_score, tdr, fdr, inter_thr, prob_thr = by_images(
            sess, net.predictions, patches_pl, dataset, flags.discard)

        # direct output according to user specification
        results_file = None
        if flags.results_path is None:
            results_file = sys.stdout
        else:
Exemplo n.º 5
0
def train(dataset, log_dir):
    with tf.Graph().as_default():
        # gets placeholders for patches and labels
        patches_pl, labels_pl = utils.placeholder_inputs()

        # build train related ops
        net = detection.Net(patches_pl, FLAGS.dropout)
        net.build_loss(labels_pl)
        net.build_train(FLAGS.learning_rate)

        # builds validation inference graph
        val_net = detection.Net(patches_pl, training=False, reuse=True)

        # add summary to plot loss, f score, tdr and fdr
        f_score_pl = tf.placeholder(tf.float32, shape=())
        tdr_pl = tf.placeholder(tf.float32, shape=())
        fdr_pl = tf.placeholder(tf.float32, shape=())
        scores_summary_op = tf.summary.merge([
            tf.summary.scalar('f_score', f_score_pl),
            tf.summary.scalar('tdr', tdr_pl),
            tf.summary.scalar('fdr', fdr_pl)
        ])
        loss_summary_op = tf.summary.scalar('loss', net.loss)

        # add variable initialization to graph
        init = tf.global_variables_initializer()

        # early stopping vars
        best_f_score = 0
        faults = 0
        saver = tf.train.Saver()
        with tf.Session() as sess:
            summary_writer = tf.summary.FileWriter(log_dir, sess.graph)

            sess.run(init)

            for step in range(1, FLAGS.steps + 1):
                feed_dict = utils.fill_feed_dict(dataset.train, patches_pl,
                                                 labels_pl, FLAGS.batch_size,
                                                 FLAGS.augment)

                _, loss_value = sess.run([net.train, net.loss],
                                         feed_dict=feed_dict)

                # write loss summary periodically
                if step % 100 == 0:
                    print('Step {}: loss = {}'.format(step, loss_value))
                    summary_str = sess.run(loss_summary_op,
                                           feed_dict=feed_dict)
                    summary_writer.add_summary(summary_str, step)

                # evaluate the model periodically
                if step % 1000 == 0:
                    print('Evaluation:')
                    f_score, fdr, tdr = validate.detection.by_patches(
                        sess, val_net.predictions, FLAGS.batch_size,
                        patches_pl, labels_pl, dataset.val)
                    print('TDR = {}'.format(tdr))
                    print('FDR = {}'.format(fdr))
                    print('F score = {}'.format(f_score))

                    # early stopping
                    if f_score > best_f_score:
                        best_f_score = f_score
                        saver.save(sess,
                                   os.path.join(log_dir, 'model.ckpt'),
                                   global_step=step)
                        faults = 0
                    else:
                        faults += 1
                        if faults >= FLAGS.tolerance:
                            print('Training stopped early')
                            break

                    # write f score, tdr and fdr to summary
                    scores_summary = sess.run(scores_summary_op,
                                              feed_dict={
                                                  f_score_pl: f_score,
                                                  tdr_pl: tdr,
                                                  fdr_pl: fdr
                                              })
                    summary_writer.add_summary(scores_summary,
                                               global_step=step)

    print('Finished')
    print('best F score = {}'.format(best_f_score))