Exemplo n.º 1
0
def get_dataset(dataset_name,
                split_name,
                dataset_dir,
                im_batch=1,
                is_training=False,
                file_pattern=None,
                reader=None):
    """"""
    if file_pattern is None:
        file_pattern = dataset_name + '_' + split_name + '*.tfrecord'

    tfrecords = glob.glob(dataset_dir + '/records/' + file_pattern)

    assert len(
        tfrecords
    ) > 0, "haven't found any tfrecord(did you run train.py from code root?). we were looking at %s." % dataset_dir + '/records/' + file_pattern

    image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = coco.read(
        tfrecords, is_training=is_training)

    image, new_ih, new_iw, gt_boxes, gt_masks = coco_preprocess.preprocess_image(
        image, gt_boxes, gt_masks, is_training)
    #visualize_input(gt_boxes, image, tf.expand_dims(gt_masks, axis=3))

    return image, ih, iw, new_ih, new_iw, gt_boxes, gt_masks, num_instances, img_id
Exemplo n.º 2
0
def get_dataset(dataset_name, split_name, dataset_dir, 
        im_batch=1, is_training=False, file_pattern=None, reader=None):
    """"""
    if file_pattern is None:
        file_pattern = dataset_name + '_' + split_name + '*.tfrecord' 

    tfrecords = glob.glob(dataset_dir + '/records/' + file_pattern)
    image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = coco.read(tfrecords)

    image, gt_boxes, gt_masks = coco_preprocess.preprocess_image(image, gt_boxes, gt_masks, is_training)

    return image, ih, iw, gt_boxes, gt_masks, num_instances, img_id
def get_dataset(dataset_name,
                split_name,
                dataset_dir,
                im_batch=1,
                is_training=False,
                file_pattern=None,
                reader=None):
    """"""
    if file_pattern is None:
        file_pattern = dataset_name + '_' + split_name + '*.tfrecord'

    pattern = '../' + dataset_dir + 'records/' + file_pattern

    tfrecords = glob.glob(pattern)
    image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = coco.read(
        tfrecords)

    image, gt_boxes, gt_masks = coco_preprocess.preprocess_image(
        image, gt_boxes, gt_masks, is_training)
    #visualize_input(gt_boxes, image, tf.expand_dims(gt_masks, axis=3))

    return image, ih, iw, gt_boxes, gt_masks, num_instances, img_id
Exemplo n.º 4
0
FLAGS = tf.app.flags.FLAGS

DEBUG = False

with tf.Graph().as_default():
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=0.8,
        allow_growth=True,
    )
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          allow_soft_placement=True)) as sess:
        global_step = slim.create_global_step()

        ## data
        image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = \
          coco.read('./data/coco/records/coco_train2014_00000-of-00040.tfrecord')
        with tf.control_dependencies([image, gt_boxes, gt_masks]):
            image, gt_boxes, gt_masks = coco_preprocess.preprocess_image(
                image, gt_boxes, gt_masks, is_training=True)

        ##  network
        with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=0.0001)):
            logits, end_points = resnet50(image, 1000, is_training=False)
        end_points['inputs'] = image

        for x in sorted(end_points.keys()):
            print(x, end_points[x].name, end_points[x].shape)

        pyramid = pyramid_network.build_pyramid('resnet50', end_points)
        # for p in pyramid:
        #   print (p, pyramid[p])
Exemplo n.º 5
0
import tensorflow.contrib.slim as slim
from libs.logs.log import LOG
import libs.configs.config_v1 as cfg
import libs.nets.resnet_v1 as resnet_v1
import libs.datasets.dataset_factory as dataset_factory
import libs.datasets.coco as coco
import libs.preprocessings.coco_v1 as preprocess_coco
from libs.layers import ROIAlign

resnet50 = resnet_v1.resnet_v1_50
FLAGS = tf.app.flags.FLAGS

with tf.Graph().as_default():

  image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = \
    coco.read('./data/coco/records/coco_trainval2014_00000-of-00048.tfrecord')
  
  image, gt_boxes, gt_masks = \
    preprocess_coco.preprocess_image(image, gt_boxes, gt_masks)

  

  sess = tf.Session()
  init_op = tf.group(tf.global_variables_initializer(),
                     tf.local_variables_initializer())
  # init_op = tf.initialize_all_variables()

  boxes = [[100, 100, 200, 200],
           [50, 50, 100, 100],
           [100, 100, 750, 750],
           [50, 50, 60, 60]]
Exemplo n.º 6
0
    pattern = '/home/wanghx/deepleraning/MaskRCNN_Practise/' + dataset_dir + 'records/' + file_pattern

    tfrecords = glob.glob(pattern)
    return tfrecords


with tf.Graph().as_default():

    records = get_records(FLAGS.dataset_name,
                          FLAGS.dataset_split_name,
                          FLAGS.dataset_dir,
                          FLAGS.im_batch,
                          is_training=False)

    image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = \
        coco.read(records)

    image, gt_boxes, gt_masks = \
        preprocess_coco.preprocess_image(image, gt_boxes, gt_masks)

    # using queue to input
    queue = tf.RandomShuffleQueue(capacity=12,
                                  min_after_dequeue=6,
                                  dtypes=(image.dtype, ih.dtype, iw.dtype,
                                          gt_boxes.dtype, gt_masks.dtype,
                                          num_instances.dtype, img_id.dtype))
    enqueue_op = queue.enqueue(
        (image, ih, iw, gt_boxes, gt_masks, num_instances, img_id))
    (image_, ih_, iw_, gt_boxes_, gt_masks_, num_instances_,
     img_id_) = queue.dequeue()
    qr = tf.train.QueueRunner(queue, [enqueue_op] * 4)
Exemplo n.º 7
0
FLAGS = tf.app.flags.FLAGS

DEBUG = False

with tf.Graph().as_default():
    gpu_options = tf.GPUOptions(
        per_process_gpu_memory_fraction=0.8,
        allow_growth=True,
    )
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          allow_soft_placement=True)) as sess:
        global_step = slim.create_global_step()

        ## data
        image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = \
          coco.read('/home/fortis/pycharmProjects/MaskRCNN_Practise/data/coco/records/coco_train2014_00000-of-00033.tfrecord')
        with tf.control_dependencies([image, gt_boxes, gt_masks]):
            image, gt_boxes, gt_masks = coco_preprocess.preprocess_image(
                image, gt_boxes, gt_masks, is_training=True)

        ##  network
        with slim.arg_scope(resnet_v1.resnet_arg_scope(weight_decay=0.0001)):
            logits, end_points = resnet50(image, 1000, is_training=False)
        end_points['inputs'] = image

        for x in sorted(end_points.keys()):
            print(x, end_points[x].name, end_points[x].shape)

        pyramid = pyramid_network.build_pyramid('resnet50', end_points)
        # for p in pyramid:
        #   print (p, pyramid[p])
Exemplo n.º 8
0
import tensorflow.contrib.slim as slim
from libs.logs.log import LOG
import libs.configs.config_v1 as cfg
import libs.nets.resnet_v1 as resnet_v1
import libs.datasets.dataset_factory as dataset_factory
import libs.datasets.coco as coco
import libs.preprocessings.coco_v1 as preprocess_coco
from libs.layers import ROIAlign

resnet50 = resnet_v1.resnet_v1_50
FLAGS = tf.app.flags.FLAGS

with tf.Graph().as_default():

    image, ih, iw, gt_boxes, gt_masks, num_instances, img_id = \
      coco.read('./data/coco/records/coco_train2014_00001-of-00033.tfrecord')

    image, gt_boxes, gt_masks = \
      preprocess_coco.preprocess_image(image, gt_boxes, gt_masks)

    sess = tf.Session()
    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())
    # init_op = tf.initialize_all_variables()

    boxes = [[100, 100, 200, 200], [50, 50, 100, 100], [100, 100, 750, 750],
             [50, 50, 60, 60]]
    # boxes = np.zeros((0, 4))
    boxes = tf.constant(boxes, tf.float32)
    feat = ROIAlign(image, boxes, False, 16, 7, 7)
    sess.run(init_op)