예제 #1
0
        def inference_to_loss():
            # with tf.device(FLAGS.store_device):
            name_batch, image_batch, label_batch = get_next_batch(dataset)

            score_map, endpoints = net(image_batch,
                                       num_classes=FLAGS.num_classes,
                                       weight_init=None,
                                       weight_reg=weight_reg,
                                       bias_init=tf.zeros_initializer,
                                       bias_reg=bias_reg)
            class_map = arg_max(score_map, axis=3, name='class_map')
            pixel_acc = mAP(class_map, label_batch)
            mean_loss = softmax_with_logits(score_map, label_batch)
            mean_IOU, IOUs = mIOU(class_map,
                                  label_batch,
                                  ignore_label=[0],
                                  num_classes=FLAGS.num_classes)
            with tf.name_scope('summary_input_output'):
                tf.summary.image('tower_image_batch',
                                 image_batch,
                                 max_outputs=1)
                tf.summary.image('tower_label_batch',
                                 tf.cast(paint(label_batch), tf.uint8),
                                 max_outputs=1)
                tf.summary.image('tower_predictions',
                                 tf.cast(paint(class_map), tf.uint8),
                                 max_outputs=1)
                tf.summary.image('tower_contrast',
                                 tf.cast(compare(class_map, label_batch),
                                         tf.uint8),
                                 max_outputs=1)
                tf.summary.scalar('tower_pixel_acc', pixel_acc)
                tf.summary.scalar('tower_mean_iou', mean_IOU)
                tf.summary.scalar('tower_mean_loss', mean_loss)
            with tf.name_scope('tower_ious'):
                add_iou_summary(IOUs, pascal_voc_classes)
            return mean_loss, mean_IOU, pixel_acc, endpoints
예제 #2
0
파일: inference.py 프로젝트: Mooonside/SSD
    print('Deploying Model on CPU')

# set up step
sess = tf.Session(config=config)

default_params.num_classes = FLAGS.num_classes
with tf.device(FLAGS.run_device):
    global_step = tf.Variable(0,
                              trainable=False,
                              name='global_step',
                              dtype=tf.int64)
    # read data
    default_params.img_shape = [FLAGS.reshape_height, FLAGS.reshape_weight]
    name_batch, image_batch, labels_batch, bboxes_batch = get_next_batch(
        get_dataset(dir=FLAGS.data_dir,
                    batch_size=FLAGS.batch_size,
                    num_epochs=FLAGS.epoch_num,
                    reshape_size=default_params.img_shape))

# inference
with arg_scope([get_variable], device=store_device):
    with tf.device('/CPU:0'):
        with arg_scope(
                ssd_arg_scope(weight_init=None,
                              weight_reg=weight_reg,
                              bias_init=tf.zeros_initializer,
                              bias_reg=bias_reg,
                              is_training=False)):
            net, endpoints, prediction_gathers = ssd_vgg16(
                image_batch, scope='ssd_vgg16_300')
예제 #3
0
    config.gpu_options.allow_growth = FLAGS.allow_growth
    config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_fraction
else:
    print('Deploying Model on CPU')

# set up step
sess = tf.Session(config=config)

with tf.device(store_device):
    global_step = tf.Variable(0, trainable=False, name='global_step', dtype=tf.int64)
    # read data
    reshape_size = [FLAGS.reshape_height, FLAGS.reshape_weight]
    name_batch, image_batch, label_batch = get_next_batch(get_dataset(
        dir=FLAGS.data_dir,
        batch_size=FLAGS.batch_size,
        num_epochs=FLAGS.epoch_num,
        reshape_size=reshape_size,
        normalize=False)
    )

# inference
with arg_scope([get_variable], device=store_device):
    with tf.device('/GPU:0'):
        outputs_to_scales_to_logits, mean_loss = deeplab_v3_plus._build_deeplab(image_batch,
                                                                           label_batch,
                                                                           ignore_labels=[255],
                                                                           FLAGS=FLAGS,
                                                                           is_training=True)

        score_map = outputs_to_scales_to_logits['semantic']['merged_logits']