Exemplo n.º 1
0
def main():
    hparams = create_hparams()
    colors, depths, labels, label_augs = get_dataset(args.dataset_dir,
                                                     num_readers=1,
                                                     num_preprocessing_threads=1,
                                                     hparams=hparams,
                                                     shuffle=False,
                                                     num_epochs=1,
                                                     is_training=False)
    net, end_points = model(colors,
                            depths,
                            num_classes=3,
                            num_channels=1000,
                            is_training=False,
                            global_pool=False,
                            output_stride=16,
                            spatial_squeeze=False,
                            color_scope='color_tower',
                            depth_scope='depth_tower',
                            scope='arcnet')
    probability_map = tf.exp(net) / tf.reduce_sum(tf.exp(net), axis=3, keepdims=True)
    saver = tf.train.Saver()
    session_config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False)
    session_config.gpu_options.allow_growth = True
    sess = tf.Session(config=session_config)
    saver.restore(sess, tf.train.latest_checkpoint(args.checkpoint_dir))
    print 'Successfully loading model: {}.'.format(tf.train.latest_checkpoint(args.checkpoint_dir))
    sess.run(tf.local_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tp, fp, tn, fn = 0.0, 0.0, 0.0, 0.0
    try:
        while not coord.should_stop():
            sample_result, sample_label = sess.run([probability_map, label_augs])
            if np.sum((sample_label[..., 0:2])) == 0:
                continue
            threshold = np.max(sample_result[..., 1]) - 0.001
            sample_tp = (sample_result[..., 1] > threshold) & (sample_label[..., 1] == 1)
            sample_fp = (sample_result[..., 1] > threshold) & (sample_label[..., 0] == 1)
            sample_tn = (sample_result[..., 1] <= threshold) & (sample_label[..., 0] == 1)
            sample_fn = (sample_result[..., 1] <= threshold) & (sample_label[..., 1] == 1)
            tp += np.sum(sample_tp)
            fp += np.sum(sample_fp)
            tn += np.sum(sample_tn)
            fn += np.sum(sample_fn)
    except tf.errors.OutOfRangeError:
        print 'epoch limit reached.'
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
    precision = tp / (tp + fp)
    recall = tp / (tp + fn)
    print 'precision : %f' % precision
    print 'recall : %f' % recall
Exemplo n.º 2
0
def main():
    hparams = create_da_full_senet2_hparams()
    images, class_labels, theta_labels = get_dataset(
        args.dataset_dir,
        num_readers=1,
        num_preprocessing_threads=1,
        hparams=hparams,
        shuffle=False,
        num_epochs=1,
        is_training=False)
    with slim.arg_scope(model_arg_scope()):
        net, end_points = full_senet2(inputs=images,
                                      num_classes=num_classes,
                                      is_training=False,
                                      dropout_keep_prob=1.0,
                                      reuse=tf.AUTO_REUSE,
                                      scope=hparams.scope,
                                      adapt_scope='target_adapt_layer',
                                      adapt_dims=hparams.adapt_dims,
                                      reduction_ratio=hparams.reduction_ratio)
    theta_lebels_one_hot = tf.one_hot(theta_labels,
                                      depth=18,
                                      on_value=1.0,
                                      off_value=0.0)
    theta_acted = tf.reduce_sum(tf.multiply(net, theta_lebels_one_hot),
                                axis=1,
                                name='theta_acted')
    sig_op = slim.nn.sigmoid(theta_acted)
    conf = tf.equal(
        tf.to_int32(tf.greater_equal(sig_op, 0.5)),
        tf.to_int32(tf.greater_equal(tf.to_float(class_labels), 0.1)))
    saver = tf.train.Saver()
    session_config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False)
    session_config.gpu_options.allow_growth = True
    sess = tf.Session(config=session_config)
    saver.restore(sess, tf.train.latest_checkpoint(args.checkpoint_dir))
    print 'Successfully loading model: {}.'.format(
        tf.train.latest_checkpoint(args.checkpoint_dir))
    sess.run(tf.local_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    num_corrects = 0
    num_samples = 0
    try:
        while not coord.should_stop():
            num_corrects += int(sess.run(conf)[:])
            num_samples += 1
            print num_samples
    except tf.errors.OutOfRangeError:
        print 'epoch limit reached.'
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
    print num_corrects * 1.0 / num_samples
Exemplo n.º 3
0
def main():
    hparams = create_domain_adapt_se_hparams()
    images, class_labels, theta_labels = get_dataset(
        args.dataset_dir,
        num_readers=1,
        num_preprocessing_threads=1,
        hparams=hparams,
        shuffle=False,
        num_epochs=1,
        is_training=False)
    with slim.arg_scope(model_arg_scope()):
        net, end_points = se_model(inputs=images,
                                   num_classes=num_classes,
                                   is_training=False,
                                   dropout_keep_prob=1.0,
                                   reuse=tf.AUTO_REUSE,
                                   scope=hparams.scope,
                                   adapt_scope='target_adapt_layer',
                                   adapt_dims=hparams.adapt_dims,
                                   reduction_ratio=hparams.reduction_ratio)
    theta_lebels_one_hot = tf.one_hot(theta_labels,
                                      depth=18,
                                      on_value=1.0,
                                      off_value=0.0)
    theta_acted = tf.reduce_sum(tf.multiply(net, theta_lebels_one_hot),
                                axis=1,
                                name='theta_acted')
    sig_op = slim.nn.sigmoid(theta_acted)
    conf = tf.equal(
        tf.to_int32(tf.greater_equal(sig_op, 0.5)),
        tf.to_int32(tf.greater_equal(tf.to_float(class_labels), 0.1)))
    saver = tf.train.Saver()
    session_config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False)
    session_config.gpu_options.allow_growth = True
    sess = tf.Session(config=session_config)
    saver.restore(sess, tf.train.latest_checkpoint(args.checkpoint_dir))
    print 'Successfully loading model: {}.'.format(
        tf.train.latest_checkpoint(args.checkpoint_dir))
    sess.run(tf.local_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    num_pos, num_neg = 0.0, 0.0
    correct_pos, correct_neg = 0.0, 0.0
    try:
        while not coord.should_stop():
            con, label = sess.run([conf, class_labels])
            con = int(con[:])
            label = label[:]
            if label:
                num_neg += 1
                correct_neg += con
            else:
                num_pos += 1
                correct_pos += con
    except tf.errors.OutOfRangeError:
        print 'epoch limit reached.'
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
    tp = correct_pos / num_pos
    tn = correct_neg / num_neg
    fp = (num_pos - correct_pos) / num_pos
    fn = (num_neg - correct_neg) / num_neg
    print 'num_pos: {}, num_neg: {}, correct_pos: {}, correct_neg: {}'.format(
        num_pos, num_neg, correct_pos, correct_neg)
    print 'tp, tn, fp, fn: {}, {}, {}, {}'.format(tp, tn, fp, fn)
    print 'precision: {}'.format(tp * 1.0 / (tp + fp))
    print 'recall: {}'.format(tp * 1.0 / (tp + fn))
    print 'F1 score: {}'.format(2.0 * tp / (2.0 * tp + fp + tn))
    print 'accuracy: {}'.format(
        (correct_neg + correct_pos) / (num_neg + num_pos))
Exemplo n.º 4
0
def main():
    hparams = create_semi_supervised_domain_adapt_hparams()
    images, class_labels, theta_labels = get_dataset(
        args.dataset_dir,
        num_readers=1,
        num_preprocessing_threads=1,
        hparams=hparams,
        shuffle=False,
        num_epochs=1,
        is_training=False)

    with slim.arg_scope(model_arg_scope()):
        net, end_points = model(inputs=images,
                                num_classes=num_classes,
                                is_training=False,
                                dropout_keep_prob=1.0,
                                reuse=tf.AUTO_REUSE,
                                scope=hparams.scope,
                                adapt_scope='target_adapt_layer',
                                adapt_dims=128)
        angle_index = tf.argmin(net, axis=1)
        theta_lebels_one_hot = tf.one_hot(theta_labels,
                                          depth=18,
                                          on_value=1.0,
                                          off_value=0.0)
        theta_acted = tf.reduce_sum(tf.multiply(net, theta_lebels_one_hot),
                                    axis=1,
                                    name='theta_acted')
        sig_op = slim.nn.sigmoid(theta_acted)
        conf = tf.equal(
            tf.to_int32(tf.greater_equal(sig_op, 0.5)),
            tf.to_int32(tf.greater_equal(tf.to_float(class_labels), 0.1)))

    saver = tf.train.Saver()
    session_config = tf.ConfigProto(allow_soft_placement=True,
                                    log_device_placement=False)
    session_config.gpu_options.allow_growth = True
    sess = tf.Session(config=session_config)
    saver.restore(sess, tf.train.latest_checkpoint(args.checkpoint_dir))
    print 'Successfully loading model: {}.'.format(
        tf.train.latest_checkpoint(args.checkpoint_dir))
    sess.run(tf.local_variables_initializer())
    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    num_corrects = 0
    num_samples = 0
    try:
        while not coord.should_stop():
            img, cls, theta, angle, con, score = sess.run(
                [images, class_labels, theta_labels, angle_index, conf, net])
            img = np.squeeze(img +
                             np.array([123.68, 116.779, 103.939])).astype(
                                 np.uint8)
            cls = cls[0]
            theta = theta[0]
            angle = angle[0]
            grasp_angle = (angle * 10 - 90) * 1.0 / 180 * pi
            theta_angle = (theta * 10 - 90) * 1.0 / 180 * pi
            con = con[0]
            img = process_and_draw_rect(img, grasp_angle, 112, 112)
            img = process_and_draw_rect(img, theta_angle, 112, 112, 0)
            num_corrects += int(con)
            num_samples += 1
            print img.shape, cls, theta, angle, con
            print score
            cv2.imshow('img', img[..., ::-1])
            cv2.waitKey(0)
    except tf.errors.OutOfRangeError:
        print 'epoch limit reached.'
    finally:
        coord.request_stop()
    coord.join(threads)
    sess.close()
Exemplo n.º 5
0
def main():
    tf.logging.set_verbosity(tf.logging.INFO)
    hparams = create_domain_adapt_hparams()
    for path in [args.train_log_dir]:
        if not tf.gfile.Exists(path):
            tf.gfile.MakeDirs(path)
    hparams_filename = os.path.join(args.train_log_dir, 'hparams.json')
    with tf.gfile.FastGFile(hparams_filename, 'w') as f:
        f.write(hparams.to_json())
    with tf.Graph().as_default():
        with tf.device(tf.train.replica_device_setter(args.task_id)):
            global_step = tf.train.get_or_create_global_step()

            images_p_t, class_labels_p_t, theta_labels_p_t = get_dataset(
                os.path.join(args.target_dir, 'positive'), args.num_readers,
                args.num_preprocessing_threads, hparams)
            images_n_t, class_labels_n_t, theta_labels_n_t = get_dataset(
                os.path.join(args.target_dir, 'negative'), args.num_readers,
                args.num_preprocessing_threads, hparams)
            images_t = tf.concat([images_p_t, images_n_t], axis=0)
            class_labels_t = tf.concat([class_labels_p_t, class_labels_n_t],
                                       axis=0)
            theta_labels_t = tf.concat([theta_labels_p_t, theta_labels_n_t],
                                       axis=0)
            with slim.arg_scope(model_arg_scope()):
                net_t, end_points_t = model(
                    inputs=images_t,
                    num_classes=num_classes,
                    is_training=True,
                    dropout_keep_prob=hparams.dropout_keep_prob,
                    reuse=tf.AUTO_REUSE,
                    scope=hparams.scope,
                    adapt_scope='adapt_layer',
                    adapt_dims=128)

            images_p_s, class_labels_p_s, theta_labels_p_s = get_dataset(
                os.path.join(args.source_dir, 'positive'), args.num_readers,
                args.num_preprocessing_threads, hparams)
            images_n_s, class_labels_n_s, theta_labels_n_s = get_dataset(
                os.path.join(args.source_dir, 'negative'), args.num_readers,
                args.num_preprocessing_threads, hparams)
            images_s = tf.concat([images_p_s, images_n_s], axis=0)
            class_labels_s = tf.concat([class_labels_p_s, class_labels_n_s],
                                       axis=0)
            theta_labels_s = tf.concat([theta_labels_p_s, theta_labels_n_s],
                                       axis=0)
            with slim.arg_scope(model_arg_scope()):
                net_s, end_points_s = model(
                    inputs=images_s,
                    num_classes=num_classes,
                    is_training=True,
                    dropout_keep_prob=hparams.dropout_keep_prob,
                    reuse=tf.AUTO_REUSE,
                    scope=hparams.scope,
                    adapt_scope='adapt_layer',
                    adapt_dims=128)

            net = tf.concat([net_t, net_s], axis=0)
            images = tf.concat([images_t, images_s], axis=0)
            class_labels = tf.concat([class_labels_t, class_labels_s], axis=0)
            theta_labels = tf.concat([theta_labels_t, theta_labels_s], axis=0)
            end_points = {}
            end_points_t[hparams.scope +
                         '/target_adapt_layer'] = end_points_t[hparams.scope +
                                                               '/adapt_layer']
            end_points_s[hparams.scope +
                         '/source_adapt_layer'] = end_points_s[hparams.scope +
                                                               '/adapt_layer']
            end_points.update(end_points_t)
            end_points.update(end_points_s)
            loss, accuracy = create_loss(
                net,
                end_points,
                class_labels,
                theta_labels,
                scope=hparams.scope,
                source_adapt_scope='source_adapt_layer',
                target_adapt_scope='target_adapt_layer')
            learning_rate = hparams.learning_rate
            if hparams.lr_decay_step:
                learning_rate = tf.train.exponential_decay(
                    hparams.learning_rate,
                    tf.train.get_or_create_global_step(),
                    decay_steps=hparams.lr_decay_step,
                    decay_rate=hparams.lr_decay_rate,
                    staircase=True)
            tf.summary.scalar('Learning_rate', learning_rate)
            optimizer = tf.train.GradientDescentOptimizer(learning_rate)
            train_op = slim.learning.create_train_op(loss, optimizer)
            add_summary(images,
                        end_points,
                        loss,
                        accuracy,
                        scope='domain_adapt')
            summary_op = tf.summary.merge_all()
            variable_map = restore_map(
                from_adapt_checkpoint=args.from_adapt_checkpoint,
                scope=hparams.scope,
                model_name='source_only',
                checkpoint_exclude_scopes=['adapt_layer', 'fc8'])
            init_saver = tf.train.Saver(variable_map)

            def initializer_fn(sess):
                init_saver.restore(
                    sess, tf.train.latest_checkpoint(args.checkpoint_dir))
                tf.logging.info('Successfully load pretrained checkpoint.')

            init_fn = initializer_fn
            session_config = tf.ConfigProto(allow_soft_placement=True,
                                            log_device_placement=False)
            session_config.gpu_options.allow_growth = True
            saver = tf.train.Saver(
                keep_checkpoint_every_n_hours=args.save_interval_secs,
                max_to_keep=200)

            slim.learning.train(train_op,
                                logdir=args.train_log_dir,
                                master=args.master,
                                global_step=global_step,
                                session_config=session_config,
                                init_fn=init_fn,
                                summary_op=summary_op,
                                number_of_steps=args.num_steps,
                                startup_delay_steps=15,
                                save_summaries_secs=args.save_summaries_steps,
                                saver=saver)
Exemplo n.º 6
0
def train(run_dir,
          master,
          task_id,
          num_readers,
          from_graspnet_checkpoint,
          dataset_dir,
          checkpoint_dir,
          save_summaries_steps,
          save_interval_secs,
          num_preprocessing_threads,
          num_steps,
          hparams,
          scope='graspnet'):
    for path in [run_dir]:
        if not tf.gfile.Exists(path):
            tf.gfile.Makedirs(path)
    hparams_filename = os.path.join(run_dir, 'hparams.json')
    with tf.gfile.FastGFile(hparams_filename, 'w') as f:
        f.write(hparams.to_json())
    with tf.Graph().as_default():
        with tf.device(tf.train.replica_device_setter(task_id)):
            global_step = slim.get_or_create_global_step()
            images, class_labels, theta_labels = get_dataset(
                dataset_dir, num_readers, num_preprocessing_threads, hparams)
            '''
            with slim.arg_scope(vgg.vgg_arg_scope()):
                net, end_points = vgg.vgg_16(inputs=images,
                                             num_classes=num_classes,
                                             is_training=True,
                                             dropout_keep_prob=0.7,
                                             scope=scope)
            '''
            with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):
                net, end_points = alexnet.alexnet_v2(inputs=images,
                                                     num_classes=num_classes,
                                                     is_training=True,
                                                     dropout_keep_prob=0.7,
                                                     scope=scope)
            loss, accuracy = create_loss(net, class_labels, theta_labels)
            learning_rate = hparams.learning_rate
            if hparams.lr_decay_step:
                learning_rate = tf.train.exponential_decay(
                    hparams.learning_rate,
                    slim.get_or_create_global_step(),
                    decay_steps=hparams.lr_decay_step,
                    decay_rate=hparams.lr_decay_rate,
                    staircase=True)
            tf.summary.scalar('Learning_rate', learning_rate)
            optimizer = tf.train.GradientDescentOptimizer(learning_rate)
            train_op = slim.learning.create_train_op(loss, optimizer)
            add_summary(images, end_points, loss, accuracy, scope=scope)
            summary_op = tf.summary.merge_all()
            variable_map = restore_map(
                from_graspnet_checkpoint=from_graspnet_checkpoint,
                scope=scope,
                model_name=hparams.model_name,
                checkpoint_exclude_scope='fc8')
            init_saver = tf.train.Saver(variable_map)

            def initializer_fn(sess):
                init_saver.restore(sess, checkpoint_dir)
                tf.logging.info('Successfully load pretrained checkpoint.')

            init_fn = initializer_fn
            session_config = tf.ConfigProto(allow_soft_placement=True,
                                            log_device_placement=False)
            session_config.gpu_options.allow_growth = True
            saver = tf.train.Saver(
                keep_checkpoint_every_n_hours=save_interval_secs,
                max_to_keep=100)

            slim.learning.train(
                train_op,
                logdir=run_dir,
                master=master,
                global_step=global_step,
                session_config=session_config,
                # init_fn=init_fn,
                summary_op=summary_op,
                number_of_steps=num_steps,
                startup_delay_steps=15,
                save_summaries_secs=save_summaries_steps,
                saver=saver)