def call(self, *args: tf.Tensor, **kwargs: tf.Tensor) -> tf.Tensor: """Convert input tensors arguments into a signal tensor.""" # Don't use `training` or `mask` arguments from keras.Layer. for k in ['training', 'mask']: if k in kwargs: _ = kwargs.pop(k) controls = self.get_controls(*args, **kwargs) signal = self.get_signal(**controls) return signal
def parse_example(self, example: tf.Tensor): example = super().parse_example(example) image = example.pop("image") if self._aug: image = tf.image.random_flip_left_right(image) image = tf.image.random_flip_up_down(image) image = random_rot(image) image = tf.image.random_brightness(image, max_delta=0.5) image = tf.image.random_contrast(image, 0.5, 1.5) image = tf.image.random_hue(image, max_delta=0.5) image = tf.image.random_saturation(image, 0.5, 1.5) image = random_resize_crop(image) image = tf.image.resize( image, (224, 224), preserve_aspect_ratio=False, antialias=False ) label = example.pop("label") return image, label
def parse_example(self, example: tf.Tensor) -> Dict[str, tf.Tensor]: example = super().parse_example(example) example.pop("image_id") example.pop("image_type") image = example.pop("image_content") seg = example.pop("segment_mask") if self._shp is not None: seg = seg[..., tf.newaxis] seg = tf.image.resize( seg, size=self._shp, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR, antialias=False) seg = seg[..., 0] # one-hot classes if self._one_hot_class: seg = tf.one_hot(seg, depth=self.nClass) else: seg = seg[..., tf.newaxis] example["image"] = image example["segment_mask"] = seg return example
def parse_example(self, example: tf.Tensor) -> Dict[str, tf.Tensor]: example = super().parse_example(example) example.pop("image_id") example.pop("image_type") image = example.pop("image_content") label = example.pop("classes") example["image"] = image example["label"] = label return example
def parse_example(self, example: tf.Tensor) -> Dict[str, tf.Tensor]: example = super().parse_example(example) example.pop("image_id") example.pop("image_type") image = example.pop("image_content") boxes = example.pop("boxes") if self._shp is not None: old_shp = tf.cast(tf.shape(image)[:2], dtype=tf.float32) new_shp = tf.cast(self._shp, dtype=tf.float32) image = tf.image.resize(image, self._shp, self._method, antialias=True) # resize bounding box y, x, h, w = boxes[:, 0], boxes[:, 1], boxes[:, 2], boxes[:, 3] y = new_shp[0] * y / old_shp[0] x = new_shp[1] * x / old_shp[1] h = new_shp[0] * h / old_shp[0] w = new_shp[1] * w / old_shp[1] coordinates = tf.stack([y, x, h, w], axis=1) else: coordinates = boxes[:, :4] # one-hot classes classes = boxes[:, 4] if self._one_hot_class: classes = tf.cast(classes, dtype=tf.int32) classes = tf.one_hot(classes, depth=self.nClass) else: classes = classes[..., tf.newaxis] boxes = tf.concat([coordinates, classes], axis=1) example["image"] = image example["boxes"] = boxes return example