def parse_single_example(serialized_example, params): """Parses a singel serialized TFExample string.""" if 'retinanet_parser' in dir(params): parser_params = params.retinanet_parser decoder = tf_example_decoder.TfExampleDecoder() else: parser_params = params.maskrcnn_parser decoder = tf_example_decoder.TfExampleDecoder(include_mask=True) data = decoder.decode(serialized_example) image = data['image'] source_id = data['source_id'] source_id = dataloader_utils.process_source_id(source_id) height = data['height'] width = data['width'] boxes = data['groundtruth_boxes'] boxes = box_utils.denormalize_boxes(boxes, tf.shape(image)[:2]) classes = data['groundtruth_classes'] is_crowds = data['groundtruth_is_crowd'] areas = data['groundtruth_area'] masks = data.get('groundtruth_instance_masks_png', None) image = input_utils.normalize_image(image) image, image_info = input_utils.resize_and_crop_image( image, parser_params.output_size, padded_size=input_utils.compute_padded_size( parser_params.output_size, 2 ** params.architecture.max_level), aug_scale_min=1.0, aug_scale_max=1.0) labels = { 'image_info': image_info, } groundtruths = { 'source_id': source_id, 'height': height, 'width': width, 'num_detections': tf.shape(classes), 'boxes': boxes, 'classes': classes, 'areas': areas, 'is_crowds': tf.cast(is_crowds, tf.int32), } if masks is not None: groundtruths['masks'] = masks return image, labels, groundtruths
def parse_single_example(serialized_example, params): """Parses a singel serialized TFExample string.""" decoder = tf_example_decoder.TfExampleDecoder() data = decoder.decode(serialized_example) image = data['image'] source_id = data['source_id'] source_id = dataloader_utils.process_source_id(source_id) height = data['height'] width = data['width'] boxes = data['groundtruth_boxes'] boxes = box_utils.denormalize_boxes(boxes, tf.shape(image)[:2]) classes = data['groundtruth_classes'] is_crowds = data['groundtruth_is_crowd'] areas = data['groundtruth_area'] image = input_utils.normalize_image(image) image, image_info = input_utils.resize_and_crop_image( image, params.retinanet_parser.output_size, padded_size=input_utils.compute_padded_size( params.retinanet_parser.output_size, 2**params.anchor.max_level), aug_scale_min=1.0, aug_scale_max=1.0) anchors = anchor.Anchor(params.anchor.min_level, params.anchor.max_level, params.anchor.num_scales, params.anchor.aspect_ratios, params.anchor.anchor_size, image.get_shape().as_list()[:2]) labels = { 'anchor_boxes': anchors.multilevel_boxes, 'image_info': image_info, } groundtruths = { 'source_id': source_id, 'height': height, 'width': width, 'num_detections': tf.shape(classes), 'boxes': boxes, 'classes': classes, 'areas': areas, 'is_crowds': tf.cast(is_crowds, tf.int32), } return image, labels, groundtruths
def _parse_predict_data(self, data): """Parses data for prediction.""" # Gets original image and its size. image = data['image'] image_shape = tf.shape(image)[0:2] # Normalizes image with mean and std pixel values. image = input_utils.normalize_image(image) # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, padded_size=input_utils.compute_padded_size( self._output_size, 2**self._max_level), aug_scale_min=1.0, aug_scale_max=1.0) image_height, image_width, _ = image.get_shape().as_list() # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) # Compute Anchor boxes. input_anchor = anchor.Anchor(self._min_level, self._max_level, self._num_scales, self._aspect_ratios, self._anchor_size, (image_height, image_width)) labels = { 'anchor_boxes': input_anchor.multilevel_boxes, 'image_info': image_info, } # If mode is PREDICT_WITH_GT, returns groundtruths and training targets # in labels. if self._mode == ModeKeys.PREDICT_WITH_GT: # Converts boxes from normalized coordinates to pixel coordinates. boxes = box_utils.denormalize_boxes(data['groundtruth_boxes'], image_shape) groundtruths = { 'source_id': data['source_id'], 'height': data['height'], 'width': data['width'], 'num_detections': tf.shape(data['groundtruth_classes']), 'boxes': boxes, 'classes': data['groundtruth_classes'], 'areas': data['groundtruth_area'], 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), } groundtruths['source_id'] = dataloader_utils.process_source_id( groundtruths['source_id']) groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size( groundtruths, self._max_num_instances) labels['groundtruths'] = groundtruths # Computes training objective for evaluation loss. classes = data['groundtruth_classes'] image_scale = image_info[2, :] offset = image_info[3, :] boxes = input_utils.resize_and_crop_boxes( boxes, image_scale, (image_height, image_width), offset) # Filters out ground truth boxes that are all zeros. indices = box_utils.get_non_empty_box_indices(boxes) boxes = tf.gather(boxes, indices) classes = tf.gather(classes, indices) # Assigns anchors. anchor_labeler = anchor.AnchorLabeler(input_anchor, self._match_threshold, self._unmatched_threshold) (cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors( boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32)) labels['cls_targets'] = cls_targets labels['box_targets'] = box_targets labels['num_positives'] = num_positives return { 'images': image, 'labels': labels, }
def _parse_predict_data(self, data): """Parses data for prediction. Args: data: the decoded tensor dictionary from TfExampleDecoder. Returns: A dictionary of {'images': image, 'labels': labels} where images: image tensor that is preproessed to have normalized value and dimension [output_size[0], output_size[1], 3] labels: a dictionary of tensors used for training. The following describes {key: value} pairs in the dictionary. source_ids: Source image id. Default value -1 if the source id is empty in the groundtruth annotation. image_info: a 2D `Tensor` that encodes the information of the image and the applied preprocessing. It is in the format of [[original_height, original_width], [scaled_height, scaled_width], anchor_boxes: ordered dictionary with keys [min_level, min_level+1, ..., max_level]. The values are tensor with shape [height_l, width_l, 4] representing anchor boxes at each level. """ # Gets original image and its size. image = data['image'] image_shape = tf.shape(image)[0:2] # Normalizes image with mean and std pixel values. image = input_utils.normalize_image(image) # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, padded_size=input_utils.compute_padded_size( self._output_size, 2**self._max_level), aug_scale_min=1.0, aug_scale_max=1.0) image_height, image_width, _ = image.get_shape().as_list() # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) # Compute Anchor boxes. input_anchor = anchor.Anchor(self._min_level, self._max_level, self._num_scales, self._aspect_ratios, self._anchor_size, (image_height, image_width)) labels = { 'source_id': dataloader_utils.process_source_id(data['source_id']), 'anchor_boxes': input_anchor.multilevel_boxes, 'image_info': image_info, } if self._mode == ModeKeys.PREDICT_WITH_GT: # Converts boxes from normalized coordinates to pixel coordinates. boxes = box_utils.denormalize_boxes(data['groundtruth_boxes'], image_shape) groundtruths = { 'source_id': data['source_id'], 'height': data['height'], 'width': data['width'], 'num_detections': tf.shape(data['groundtruth_classes']), 'boxes': boxes, 'classes': data['groundtruth_classes'], 'areas': data['groundtruth_area'], 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), } groundtruths['source_id'] = dataloader_utils.process_source_id( groundtruths['source_id']) groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size( groundtruths, self._max_num_instances) labels['groundtruths'] = groundtruths return { 'images': image, 'labels': labels, }
def _parse_eval_data(self, data): """Parses data for evaluation. Args: data: the decoded tensor dictionary from TfExampleDecoder. Returns: image: image tensor that is preproessed to have normalized value and dimension [output_size[0], output_size[1], 3] labels: a dictionary of tensors used for training. The following describes {key: value} pairs in the dictionary. image_info: a 2D `Tensor` that encodes the information of the image and the applied preprocessing. It is in the format of [[original_height, original_width], [scaled_height, scaled_width], anchor_boxes: ordered dictionary with keys [min_level, min_level+1, ..., max_level]. The values are tensor with shape [height_l, width_l, 4] representing anchor boxes at each level. groundtruths: source_id: Groundtruth source id. height: Original image height. width: Original image width. boxes: Groundtruth bounding box annotations. The box is represented in [y1, x1, y2, x2] format. The coordinates are w.r.t the scaled image that is fed to the network. The tennsor is padded with -1 to the fixed dimension [self._max_num_instances, 4]. classes: Groundtruth classes annotations. The tennsor is padded with -1 to the fixed dimension [self._max_num_instances]. areas: Box area or mask area depend on whether mask is present. is_crowds: Whether the ground truth label is a crowd label. num_groundtruths: Number of ground truths in the image. """ # Gets original image and its size. image = data['image'] image_shape = tf.shape(image)[0:2] # Normalizes image with mean and std pixel values. image = input_utils.normalize_image(image) # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, padded_size=input_utils.compute_padded_size( self._output_size, 2**self._max_level), aug_scale_min=1.0, aug_scale_max=1.0) image_height, image_width, _ = image.get_shape().as_list() # Assigns anchor targets. input_anchor = anchor.Anchor(self._min_level, self._max_level, self._num_scales, self._aspect_ratios, self._anchor_size, (image_height, image_width)) # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) # Sets up groundtruth data for evaluation. groundtruths = { 'source_id': data['source_id'], 'height': data['height'], 'width': data['width'], 'num_groundtruths': tf.shape(data['groundtruth_classes']), 'boxes': box_utils.denormalize_boxes(data['groundtruth_boxes'], image_shape), 'classes': data['groundtruth_classes'], 'areas': data['groundtruth_area'], 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), } # TODO(b/143766089): Add ground truth masks for segmentation metrics. groundtruths['source_id'] = dataloader_utils.process_source_id( groundtruths['source_id']) groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size( groundtruths, self._max_num_instances) # Packs labels for model_fn outputs. labels = { 'anchor_boxes': input_anchor.multilevel_boxes, 'image_info': image_info, 'groundtruths': groundtruths, } return image, labels
def parse_predict_data(self, data): """Parse data for ShapeMask training.""" classes = data['groundtruth_classes'] boxes = data['groundtruth_boxes'] masks = data['groundtruth_instance_masks'] # If not using category, makes all categories with id = 0. if not self._use_category: classes = tf.cast(tf.greater(classes, 0), dtype=tf.float32) image = self.get_normalized_image(data) # Converts boxes from normalized coordinates to pixel coordinates. image_shape = tf.shape(image)[0:2] boxes = box_utils.denormalize_boxes(boxes, image_shape) # Resizes and crops image. image, image_info = input_utils.resize_and_crop_image( image, self._output_size, self._output_size, aug_scale_min=1.0, aug_scale_max=1.0) image_scale = image_info[2, :] offset = image_info[3, :] # Resizes and crops boxes and masks. boxes = input_utils.resize_and_crop_boxes(boxes, image_scale, image_info[1, :], offset) masks = input_utils.resize_and_crop_masks( tf.expand_dims(masks, axis=-1), image_scale, self._output_size, offset) # Filters out ground truth boxes that are all zeros. indices = box_utils.get_non_empty_box_indices(boxes) boxes = tf.gather(boxes, indices) classes = tf.gather(classes, indices) # Assigns anchors. input_anchor = anchor.Anchor(self._min_level, self._max_level, self._num_scales, self._aspect_ratios, self._anchor_size, self._output_size) anchor_labeler = anchor.AnchorLabeler(input_anchor, self._match_threshold, self._unmatched_threshold) # If bfloat16 is used, casts input image to tf.bfloat16. if self._use_bfloat16: image = tf.cast(image, dtype=tf.bfloat16) labels = { 'anchor_boxes': input_anchor.multilevel_boxes, 'image_info': image_info, } if self._mode == ModeKeys.PREDICT_WITH_GT: # Converts boxes from normalized coordinates to pixel coordinates. groundtruths = { 'source_id': data['source_id'], 'height': data['height'], 'width': data['width'], 'num_detections': tf.shape(data['groundtruth_classes']), 'boxes': box_utils.denormalize_boxes(data['groundtruth_boxes'], image_shape), 'classes': data['groundtruth_classes'], # 'masks': tf.squeeze(masks, axis=-1), 'areas': data['groundtruth_area'], 'is_crowds': tf.cast(data['groundtruth_is_crowd'], tf.int32), } groundtruths['source_id'] = dataloader_utils.process_source_id( groundtruths['source_id']) groundtruths = dataloader_utils.pad_groundtruths_to_fixed_size( groundtruths, self._max_num_instances) # Computes training labels. (cls_targets, box_targets, num_positives) = anchor_labeler.label_anchors( boxes, tf.cast(tf.expand_dims(classes, axis=1), tf.float32)) # Packs labels for model_fn outputs. labels.update({ 'cls_targets': cls_targets, 'box_targets': box_targets, 'num_positives': num_positives, 'groundtruths': groundtruths, }) return { 'images': image, 'labels': labels, }