コード例 #1
0
    def __init__(self):
        super().__init__()

        # stores the current probability of an image being augmented
        self.probability = tf.Variable(0.0)

        # the corresponding augmentation names from the paper are shown above each layer
        # the authors show (see figure 4), that the blitting and geometric augmentations
        # are the most helpful in the low-data regime
        self.augmenter = keras.Sequential(
            [
                layers.InputLayer(input_shape=(image_size, image_size, 3)),
                # blitting/x-flip:
                layers.RandomFlip("horizontal"),
                # blitting/integer translation:
                layers.RandomTranslation(
                    height_factor=max_translation,
                    width_factor=max_translation,
                    interpolation="nearest",
                ),
                # geometric/rotation:
                layers.RandomRotation(factor=max_rotation),
                # geometric/isotropic and anisotropic scaling:
                layers.RandomZoom(height_factor=(-max_zoom, 0.0),
                                  width_factor=(-max_zoom, 0.0)),
            ],
            name="adaptive_augmenter",
        )
コード例 #2
0
def get_augmenter(min_area, brightness, jitter):
    zoom_factor = 1.0 - tf.sqrt(min_area)
    return keras.Sequential([
        keras.Input(shape=(image_size, image_size, image_channels)),
        layers.Rescaling(1 / 255),
        layers.RandomFlip("horizontal"),
        layers.RandomTranslation(zoom_factor / 2, zoom_factor / 2),
        layers.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)),
        RandomColorAffine(brightness, jitter),
    ])
コード例 #3
0
def build_model_augment() -> keras.Sequential:
    # build a sequential model
    model = keras.Sequential()
    # add a data augmentation layer
    data_augmentation = keras.Sequential([  # random flip horizontally
        layers.RandomFlip("horizontal"),
        # random flip vertically
        layers.RandomFlip("vertical"),
        # random rotation at most 0.2 degrees
        layers.RandomRotation(0.2),
        # random zoom at most 0.2 times difference
        layers.RandomZoom(0.2),
        # random contrast at most 0.1 difference
        layers.RandomContrast(0.1)
    ])
    model.add(data_augmentation)
    # add the first convolutional layer, with ReLU as activation funtion and same padding
    model.add(
        keras.layers.Conv2D(32, (3, 3),
                            activation="relu",
                            padding="same",
                            input_shape=(32, 32, 3)))  # [32,32, 32]
    # add a maxpooling layer to reduce the dimension
    model.add(keras.layers.MaxPooling2D(2, 2))  # [16, 16, 32]
    # add the second convolutional layer, with ReLU as activation funtion and same padding
    model.add(
        keras.layers.Conv2D(64, (3, 3), activation="relu",
                            padding="same"))  # [16, 16, 64]
    # add a maxpooling layer to reduce the dimension
    model.add(keras.layers.MaxPooling2D(2, 2))  # [8, 8, 64]
    # add the third convolutional layer, with ReLU as activation funtion and same padding
    model.add(
        keras.layers.Conv2D(128, (3, 3), activation="relu",
                            padding="same"))  # [8, 8, 128]
    # add a maxpooling layer to reduce the dimension
    model.add(keras.layers.MaxPooling2D(2, 2))  # [4, 4, 128]
    # add a flatten layer to make the data into a 1-dimensional array
    model.add(keras.layers.Flatten())  # [1024, ]
    # add a fully connected layer, with ReLU as activation function
    model.add(keras.layers.Dense(128, activation="relu"))
    # add a fully connected layer, the output size is the same as the number of classes and
    # softmax as activation function to do multiclass classification
    model.add(keras.layers.Dense(10, activation="softmax"))
    # compile the model, use SDG as the optimizer and categorical crossentropy as loss function
    model.compile(optimizer=optimizers.SGD(learning_rate=1e-3, momentum=0.9),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    # return the model
    return model
コード例 #4
0
    projection_dim,
]  # Size of the transformer layers
transformer_layers = 8
# Size of the dense layers of the final classifier
mlp_head_units = [2048, 1024]
"""
## Use data augmentation
"""

data_augmentation = keras.Sequential(
    [
        layers.Normalization(),
        layers.Resizing(image_size, image_size),
        layers.RandomFlip("horizontal"),
        layers.RandomRotation(factor=0.02),
        layers.RandomZoom(height_factor=0.2, width_factor=0.2),
    ],
    name="data_augmentation",
)
# Compute the mean and the variance of the training data for normalization.
data_augmentation.layers[0].adapt(x_train)
"""
## Implement multilayer perceptron (MLP)
"""


def mlp(x, hidden_units, dropout_rate):
    for units in hidden_units:
        x = layers.Dense(units, activation=tf.nn.gelu)(x)
        x = layers.Dropout(dropout_rate)(x)
    return x
コード例 #5
0
## Quick recipes

### Image data augmentation

Note that image data augmentation layers are only active during training (similarly to
the `Dropout` layer).
"""

from tensorflow import keras
from tensorflow.keras import layers

# Create a data augmentation stage with horizontal flipping, rotations, zooms
data_augmentation = keras.Sequential([
    layers.RandomFlip("horizontal"),
    layers.RandomRotation(0.1),
    layers.RandomZoom(0.1),
])

# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
input_shape = x_train.shape[1:]
classes = 10

# Create a tf.data pipeline of augmented images (and their labels)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(16).map(lambda x, y:
                                            (data_augmentation(x), y))

# Create a model and train it on the augmented image data
inputs = keras.Input(shape=input_shape)
x = layers.Rescaling(1.0 / 255)(inputs)  # Rescale inputs
コード例 #6
0
print(f"Training images: {metadata.splits['train'].num_examples}")
print(f"Test images: {metadata.splits['test'].num_examples}")

"""
## Use Data Augmentation

We will rescale the data to `[0, 1]` and perform simple augmentations to our data.
"""

rescale = layers.Rescaling(1.0 / 255)

data_augmentation = tf.keras.Sequential(
    [
        layers.RandomFlip("horizontal_and_vertical"),
        layers.RandomRotation(0.3),
        layers.RandomZoom(0.2),
    ]
)


def prepare(ds, shuffle=False, augment=False):
    # Rescale dataset
    ds = ds.map(lambda x, y: (rescale(x), y), num_parallel_calls=AUTOTUNE)

    if shuffle:
        ds = ds.shuffle(1024)

    # Batch dataset
    ds = ds.batch(batch_size)

    # Use data augmentation only on the training set
コード例 #7
0
for n in range(30):
    ax = plt.subplot(5, 6, n + 1)
    plt.imshow(img_test[n].astype("uint8"))
    plt.title(np.array(class_names)[label_test[n] == True][0])
    plt.axis("off")
"""
## Augmentation

Define image augmentation using keras preprocessing layers and apply them to the training set.
"""

# Define image augmentation model
image_augmentation = keras.Sequential([
    layers.RandomFlip(mode="horizontal"),
    layers.RandomRotation(factor=0.1),
    layers.RandomZoom(height_factor=(-0.1, -0)),
    layers.RandomContrast(factor=0.1),
], )

# Apply the augmentations to the training images and plot a few examples
img_train = image_augmentation(img_train).numpy()

plt.figure(figsize=(16, 12))
for n in range(30):
    ax = plt.subplot(5, 6, n + 1)
    plt.imshow(img_train[n].astype("uint8"))
    plt.title(np.array(class_names)[label_train[n] == True][0])
    plt.axis("off")
"""
## Define model building & training functions
コード例 #8
0
## Data augmentation

Unlike simCLR, which randomly picks a single data augmentation function to apply to an input
image, we apply a set of data augmentation functions randomly to the input image.
(You can experiment with other image augmentation techniques by following
the [data augmentation tutorial](https://www.tensorflow.org/tutorials/images/data_augmentation).)
"""

data_augmentation = keras.Sequential([
    layers.RandomTranslation(height_factor=(-0.2, 0.2),
                             width_factor=(-0.2, 0.2),
                             fill_mode="nearest"),
    layers.RandomFlip(mode="horizontal"),
    layers.RandomRotation(factor=0.15, fill_mode="nearest"),
    layers.RandomZoom(height_factor=(-0.3, 0.1),
                      width_factor=(-0.3, 0.1),
                      fill_mode="nearest"),
])
"""
Display a random image
"""

image_idx = np.random.choice(range(x_data.shape[0]))
image = x_data[image_idx]
image_class = classes[y_data[image_idx][0]]
plt.figure(figsize=(3, 3))
plt.imshow(x_data[image_idx].astype("uint8"))
plt.title(image_class)
_ = plt.axis("off")
"""
Display a sample of augmented versions of the image