Exemplo n.º 1
0
 def inputs(self):
     return [
         tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.uint8,
                       'input_img'),
         tf.TensorSpec((None, SHAPE, SHAPE, CHANNELS), tf.uint8,
                       'target_img')
     ]
Exemplo n.º 2
0
 def inputs(self):
     ret = [
         tf.TensorSpec((None, None, 3), tf.float32, 'image'),
         tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR), tf.int32,
                       'anchor_labels'),
         tf.TensorSpec((None, None, cfg.RPN.NUM_ANCHOR, 4), tf.float32,
                       'anchor_boxes'),
         tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
         tf.TensorSpec((None, ), tf.int64, 'gt_labels')
     ]  # all > 0
     if cfg.MODE_MASK:
         ret.append(
             tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks_packed')
         )  # NR_GT x height x ceil(width/8), packed groundtruth masks
     return ret
Exemplo n.º 3
0
 def inputs(self):
     ret = [tf.TensorSpec((None, None, 3), tf.float32, 'image')]
     num_anchors = len(cfg.RPN.ANCHOR_RATIOS)
     for k in range(len(cfg.FPN.ANCHOR_STRIDES)):
         ret.extend([
             tf.TensorSpec((None, None, num_anchors), tf.int32,
                           'anchor_labels_lvl{}'.format(k + 2)),
             tf.TensorSpec((None, None, num_anchors, 4), tf.float32,
                           'anchor_boxes_lvl{}'.format(k + 2))
         ])
     ret.extend([
         tf.TensorSpec((None, 4), tf.float32, 'gt_boxes'),
         tf.TensorSpec((None, ), tf.int64, 'gt_labels')
     ])  # all > 0
     if cfg.MODE_MASK:
         ret.append(
             tf.TensorSpec((None, None, None), tf.uint8, 'gt_masks_packed'))
     return ret
Exemplo n.º 4
0
 def inputs(self):
     """
     Define all the inputs (with type, shape, name) that the graph will need.
     """
     return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE), tf.float32, 'input'),
             tf.TensorSpec((None,), tf.int32, 'label')]
Exemplo n.º 5
0
 def inputs(self):
     return [
         tf.TensorSpec([None, INPUT_SHAPE, INPUT_SHAPE, 3], tf.float32,
                       'input'),
         tf.TensorSpec([None], tf.int32, 'label')
     ]
Exemplo n.º 6
0
 def inputs(self):
     return [tf.TensorSpec((None, IMAGE_SIZE, IMAGE_SIZE, 2), tf.float32, 'input'),
             tf.TensorSpec((None,), tf.int32, 'label')]
Exemplo n.º 7
0
 def inputs(self):
     return [tf.TensorSpec([None, self.image_shape, self.image_shape, 3], self.image_dtype, 'input'),
             tf.TensorSpec([None], tf.int32, 'label')]
Exemplo n.º 8
0
 def inputs(self):
     return [
         tf.TensorSpec((None, 30, 30, 3), tf.float32, 'input'),
         tf.TensorSpec((None, ), tf.int32, 'label')
     ]
Exemplo n.º 9
0
 def inputs(self):
     # The inference graph only accepts a single image, which is different to the training model.
     return [tf.TensorSpec((None, ), tf.string, 'input_img_bytes')]
Exemplo n.º 10
0
 def inputs(self):
     return [
         tf.TensorSpec([None, 40, 40, 3], tf.float32, 'input'),
         tf.TensorSpec([None], tf.int32, 'label')
     ]