def test_random_translation_negative_lower(self):
   mock_offset = np.random.random((12, 1))
   with test.mock.patch.object(
       gen_stateful_random_ops, 'stateful_uniform', return_value=mock_offset):
     with self.cached_session(use_gpu=True):
       layer = image_preprocessing.RandomTranslation((-0.2, .3), .4)
       layer_2 = image_preprocessing.RandomTranslation((0.2, .3), .4)
       inp = np.random.random((12, 5, 8, 3)).astype(np.float32)
       actual_output = layer(inp, training=1)
       actual_output_2 = layer_2(inp, training=1)
       self.assertAllClose(actual_output, actual_output_2)
 def test_config_with_custom_name(self):
     layer = image_preprocessing.RandomTranslation(.5,
                                                   .6,
                                                   name='image_preproc')
     config = layer.get_config()
     layer_1 = image_preprocessing.RandomTranslation.from_config(config)
     self.assertEqual(layer_1.name, layer.name)
    def test_distribution(self, distribution):
        if "CentralStorage" in type(distribution).__name__:
            self.skipTest("Does not work with CentralStorageStrategy yet.")
        # TODO(b/159738418): large image input causes OOM in ubuntu multi gpu.
        np_images = np.random.random((32, 32, 32, 3)).astype(np.float32)
        image_dataset = dataset_ops.Dataset.from_tensor_slices(
            np_images).batch(16, drop_remainder=True)

        with distribution.scope():
            input_data = keras.Input(shape=(32, 32, 3), dtype=dtypes.float32)
            image_preprocessor = keras.Sequential([
                image_preprocessing.Resizing(height=256, width=256),
                image_preprocessing.RandomCrop(height=224, width=224),
                image_preprocessing.RandomTranslation(.1, .1),
                image_preprocessing.RandomRotation(.2),
                image_preprocessing.RandomFlip(),
                image_preprocessing.RandomZoom(.2, .2)
            ])
            preprocessed_image = image_preprocessor(input_data)
            flatten_layer = keras.layers.Flatten(data_format="channels_last")
            output = flatten_layer(preprocessed_image)
            cls_layer = keras.layers.Dense(units=1, activation="sigmoid")
            output = cls_layer(output)
            model = keras.Model(inputs=input_data, outputs=output)
        model.compile(loss="binary_crossentropy")
        _ = model.predict(image_dataset)
Пример #4
0
 def test_random_translation_inference(self):
   with CustomObjectScope(
       {'RandomTranslation': image_preprocessing.RandomTranslation}):
     input_images = np.random.random((2, 5, 8, 3)).astype(np.float32)
     expected_output = input_images
     with tf_test_util.use_gpu():
       layer = image_preprocessing.RandomTranslation(.5, .5)
       actual_output = layer(input_images, training=0)
       self.assertAllClose(expected_output, actual_output)
Пример #5
0
 def test_random_translation_left_numeric_constant(self):
   for dtype in (np.int64, np.float32):
     with tf_test_util.use_gpu():
       input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
       # Shifting by -.2 * 5 = 1 pixel.
       layer = image_preprocessing.RandomTranslation(
           height_factor=0., width_factor=(-.2, -.2), fill_mode='constant')
       output_image = layer(input_image)
       # pyformat: disable
       expected_output = np.asarray([
           [1, 2, 3, 4, 0],
           [6, 7, 8, 9, 0],
           [11, 12, 13, 14, 0],
           [16, 17, 18, 19, 0],
           [21, 22, 23, 24, 0]
       ]).astype(dtype)
       # pyformat: enable
       expected_output = np.reshape(expected_output, (1, 5, 5, 1))
       self.assertAllEqual(expected_output, output_image)
Пример #6
0
 def test_random_translation_down_numeric_reflect(self):
   for dtype in (np.int64, np.float32):
     with tf_test_util.use_gpu():
       input_image = np.reshape(np.arange(0, 25), (1, 5, 5, 1)).astype(dtype)
       # Shifting by .2 * 5 = 1 pixel.
       layer = image_preprocessing.RandomTranslation(
           height_factor=(.2, .2), width_factor=0.)
       output_image = layer(input_image)
       # pyformat: disable
       expected_output = np.asarray([
           [0, 1, 2, 3, 4],
           [0, 1, 2, 3, 4],
           [5, 6, 7, 8, 9],
           [10, 11, 12, 13, 14],
           [15, 16, 17, 18, 19]
       ]).astype(dtype)
       # pyformat: enable
       expected_output = np.reshape(expected_output, (1, 5, 5, 1))
       self.assertAllEqual(expected_output, output_image)
Пример #7
0
 def test_random_translation_asymmetric_size_numeric_reflect(self):
   for dtype in (np.int64, np.float32):
     with tf_test_util.use_gpu():
       input_image = np.reshape(np.arange(0, 16), (1, 8, 2, 1)).astype(dtype)
       # Shifting by .5 * 8 = 1 pixel.
       layer = image_preprocessing.RandomTranslation(
           height_factor=(.5, .5), width_factor=0.)
       output_image = layer(input_image)
       # pyformat: disable
       expected_output = np.asarray([
           [6, 7],
           [4, 5],
           [2, 3],
           [0, 1],
           [0, 1],
           [2, 3],
           [4, 5],
           [6, 7],
       ]).astype(dtype)
       # pyformat: enable
       expected_output = np.reshape(expected_output, (1, 8, 2, 1))
       self.assertAllEqual(expected_output, output_image)
    def test_distribution(self, distribution):
        np_images = np.random.random((1000, 32, 32, 3)).astype(np.float32)
        image_dataset = dataset_ops.Dataset.from_tensor_slices(
            np_images).batch(32, drop_remainder=True)

        with distribution.scope():
            input_data = keras.Input(shape=(32, 32, 3), dtype=dtypes.float32)
            image_preprocessor = keras.Sequential([
                image_preprocessing.Resizing(height=256, width=256),
                image_preprocessing.RandomCrop(height=224, width=224),
                image_preprocessing.RandomTranslation(.1, .1),
                image_preprocessing.RandomRotation(.2),
                image_preprocessing.RandomFlip(),
                image_preprocessing.RandomZoom(.2, .2)
            ])
            preprocessed_image = image_preprocessor(input_data)
            flatten_layer = keras.layers.Flatten(data_format="channels_last")
            output = flatten_layer(preprocessed_image)
            cls_layer = keras.layers.Dense(units=1, activation="sigmoid")
            output = cls_layer(output)
            model = keras.Model(inputs=input_data, outputs=preprocessed_image)
        model.compile(loss="binary_crossentropy")
        _ = model.predict(image_dataset)
Пример #9
0
from multiprocessing.pool import ThreadPool
from tqdm import tqdm

import tensorflow as tf  # 2.4

from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.python.keras.layers.preprocessing import image_preprocessing

from tensorflow.keras.applications import EfficientNetB1, Xception

# 数据增强
image_augmentation = Sequential(
    [
        image_preprocessing.RandomRotation(factor=0.1),  # 随机旋转
        image_preprocessing.RandomTranslation(height_factor=0.1,
                                              width_factor=0.1),  # 随机平移
        image_preprocessing.RandomFlip(),  # 随机翻转
        image_preprocessing.RandomContrast(factor=0.1),  # 随机改变对比度
        image_preprocessing.RandomZoom(height_factor=0.1,
                                       width_factor=0.1),  # 随机缩放
        # image_preprocessing.RandomHeight(factor=0.1),  # 随机改变高度
        # image_preprocessing.RandomWidth(factor=0.1),  # 随机改变宽度
        # image_preprocessing.RandomCrop(height, width),  # 随机裁剪
        # image_preprocessing.CenterCrop(height, width),  # 中心裁剪
    ],
    name="img_augmentation",
)


def get_time_suffix(suffix_len=15):
    """获取一个时间后缀"""