Esempio n. 1
0
    os.environ['CUDA_VISIBLE_DEVICES'] = ''.join(FLAGS.run_device)
    config.gpu_options.allow_growth = FLAGS.allow_growth
    config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_fraction
else:
    print('Deploying Model on CPU')

# set up step
sess = tf.Session(config=config)

with tf.device(store_device):
    global_step = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int64)
    # read data
    reshape_size = [FLAGS.reshape_height, FLAGS.reshape_weight]
    name_batch, image_batch, label_batch = get_next_batch(get_dataset(
        dir=FLAGS.data_dir,
        batch_size=FLAGS.batch_size,
        num_epochs=FLAGS.epoch_num,
        reshape_size=reshape_size,
        normalize=False)
    )

# inference
with arg_scope([get_variable], device=store_device):
    with tf.device('/GPU:0'):
        outputs_to_scales_to_logits, mean_loss = deeplab_v3_plus._build_deeplab(image_batch,
                                                                           label_batch,
                                                                           ignore_labels=[255],
                                                                           FLAGS=FLAGS,
                                                                           is_training=True)

        score_map = outputs_to_scales_to_logits['semantic']['merged_logits']
Esempio n. 2
0
    print('Deploying Model on CPU')

# set up step
sess = tf.Session(config=config)

default_params.num_classes = FLAGS.num_classes
with tf.device(FLAGS.run_device):
    global_step = tf.Variable(0,
                              trainable=False,
                              name='global_step',
                              dtype=tf.int64)
    # read data
    default_params.img_shape = [FLAGS.reshape_height, FLAGS.reshape_weight]
    name_batch, image_batch, labels_batch, bboxes_batch = get_next_batch(
        get_dataset(dir=FLAGS.data_dir,
                    batch_size=FLAGS.batch_size,
                    num_epochs=FLAGS.epoch_num,
                    reshape_size=default_params.img_shape))

# inference
with arg_scope([get_variable], device=store_device):
    with tf.device('/CPU:0'):
        with arg_scope(
                ssd_arg_scope(weight_init=None,
                              weight_reg=weight_reg,
                              bias_init=tf.zeros_initializer,
                              bias_reg=bias_reg,
                              is_training=False)):
            net, endpoints, prediction_gathers = ssd_vgg16(
                image_batch, scope='ssd_vgg16_300')

        # predictions of bboxes
Esempio n. 3
0
    print('Deploying Model on CPU')
    run_device = '/CPU:0'

# set up step
sess = tf.Session(config=config)

with tf.device(run_device):
    global_step = tf.Variable(0,
                              trainable=False,
                              name='global_step',
                              dtype=tf.int64)
    # read data
    reshape_size = [FLAGS.reshape_height, FLAGS.reshape_weight]
    name_batch, image_batch, label_batch = get_next_batch(
        get_dataset(dir=FLAGS.data_dir,
                    batch_size=FLAGS.batch_size,
                    num_epochs=FLAGS.epoch_num,
                    reshape_size=reshape_size))

# inference
with arg_scope([get_variable], device=store_device):
    with tf.device('/GPU:0'):
        net = get_net(FLAGS.net_name)
        score_map, endpoints = net(image_batch,
                                   num_classes=FLAGS.num_classes,
                                   weight_init=None,
                                   weight_reg=weight_reg,
                                   bias_init=tf.zeros_initializer,
                                   bias_reg=bias_reg)

        # solve for mAP and loss
        class_map = arg_max(score_map, axis=3, name='class_map')