Beispiel #1
0
def build_model_augment() -> keras.Sequential:
    # build a sequential model
    model = keras.Sequential()
    # add a data augmentation layer
    data_augmentation = keras.Sequential([  # random flip horizontally
        layers.RandomFlip("horizontal"),
        # random flip vertically
        layers.RandomFlip("vertical"),
        # random rotation at most 0.2 degrees
        layers.RandomRotation(0.2),
        # random zoom at most 0.2 times difference
        layers.RandomZoom(0.2),
        # random contrast at most 0.1 difference
        layers.RandomContrast(0.1)
    ])
    model.add(data_augmentation)
    # add the first convolutional layer, with ReLU as activation funtion and same padding
    model.add(
        keras.layers.Conv2D(32, (3, 3),
                            activation="relu",
                            padding="same",
                            input_shape=(32, 32, 3)))  # [32,32, 32]
    # add a maxpooling layer to reduce the dimension
    model.add(keras.layers.MaxPooling2D(2, 2))  # [16, 16, 32]
    # add the second convolutional layer, with ReLU as activation funtion and same padding
    model.add(
        keras.layers.Conv2D(64, (3, 3), activation="relu",
                            padding="same"))  # [16, 16, 64]
    # add a maxpooling layer to reduce the dimension
    model.add(keras.layers.MaxPooling2D(2, 2))  # [8, 8, 64]
    # add the third convolutional layer, with ReLU as activation funtion and same padding
    model.add(
        keras.layers.Conv2D(128, (3, 3), activation="relu",
                            padding="same"))  # [8, 8, 128]
    # add a maxpooling layer to reduce the dimension
    model.add(keras.layers.MaxPooling2D(2, 2))  # [4, 4, 128]
    # add a flatten layer to make the data into a 1-dimensional array
    model.add(keras.layers.Flatten())  # [1024, ]
    # add a fully connected layer, with ReLU as activation function
    model.add(keras.layers.Dense(128, activation="relu"))
    # add a fully connected layer, the output size is the same as the number of classes and
    # softmax as activation function to do multiclass classification
    model.add(keras.layers.Dense(10, activation="softmax"))
    # compile the model, use SDG as the optimizer and categorical crossentropy as loss function
    model.compile(optimizer=optimizers.SGD(learning_rate=1e-3, momentum=0.9),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    # return the model
    return model
Beispiel #2
0
    def __init__(self):
        super().__init__()

        # stores the current probability of an image being augmented
        self.probability = tf.Variable(0.0)

        # the corresponding augmentation names from the paper are shown above each layer
        # the authors show (see figure 4), that the blitting and geometric augmentations
        # are the most helpful in the low-data regime
        self.augmenter = keras.Sequential(
            [
                layers.InputLayer(input_shape=(image_size, image_size, 3)),
                # blitting/x-flip:
                layers.RandomFlip("horizontal"),
                # blitting/integer translation:
                layers.RandomTranslation(
                    height_factor=max_translation,
                    width_factor=max_translation,
                    interpolation="nearest",
                ),
                # geometric/rotation:
                layers.RandomRotation(factor=max_rotation),
                # geometric/isotropic and anisotropic scaling:
                layers.RandomZoom(height_factor=(-max_zoom, 0.0),
                                  width_factor=(-max_zoom, 0.0)),
            ],
            name="adaptive_augmenter",
        )
def get_augmenter(min_area, brightness, jitter):
    zoom_factor = 1.0 - tf.sqrt(min_area)
    return keras.Sequential([
        keras.Input(shape=(image_size, image_size, image_channels)),
        layers.Rescaling(1 / 255),
        layers.RandomFlip("horizontal"),
        layers.RandomTranslation(zoom_factor / 2, zoom_factor / 2),
        layers.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)),
        RandomColorAffine(brightness, jitter),
    ])
Beispiel #4
0
def get_train_augmentation_model():
    model = keras.Sequential(
        [
            layers.Rescaling(1 / 255.0),
            layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
            layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
            layers.RandomFlip("horizontal"),
        ],
        name="train_data_augmentation",
    )
    return model
Beispiel #5
0
def augmenter(brightness, name, scale):
    return keras.Sequential(
        [
            layers.Input(shape=input_shape),
            layers.Rescaling(1 / 255),
            layers.RandomFlip("horizontal"),
            RandomResizedCrop(scale=scale, ratio=(3 / 4, 4 / 3)),
            RandomBrightness(brightness=brightness),
        ],
        name=name,
    )
Beispiel #6
0
    def build(self, hp, inputs=None):
        input_node = nest.flatten(inputs)[0]
        output_node = input_node

        # Translate
        translation_factor = utils.add_to_hp(self.translation_factor, hp)
        if translation_factor not in [0, (0, 0)]:
            height_factor, width_factor = self._get_fraction_value(
                translation_factor
            )
            output_node = layers.RandomTranslation(height_factor, width_factor)(
                output_node
            )

        # Flip
        horizontal_flip = self.horizontal_flip
        if horizontal_flip is None:
            horizontal_flip = hp.Boolean("horizontal_flip", default=True)
        vertical_flip = self.vertical_flip
        if self.vertical_flip is None:
            vertical_flip = hp.Boolean("vertical_flip", default=True)
        if not horizontal_flip and not vertical_flip:
            flip_mode = ""
        elif horizontal_flip and vertical_flip:
            flip_mode = "horizontal_and_vertical"
        elif horizontal_flip and not vertical_flip:
            flip_mode = "horizontal"
        elif not horizontal_flip and vertical_flip:
            flip_mode = "vertical"
        if flip_mode != "":
            output_node = layers.RandomFlip(mode=flip_mode)(output_node)

        # Rotate
        rotation_factor = utils.add_to_hp(self.rotation_factor, hp)
        if rotation_factor != 0:
            output_node = layers.RandomRotation(rotation_factor)(output_node)

        # Zoom
        zoom_factor = utils.add_to_hp(self.zoom_factor, hp)
        if zoom_factor not in [0, (0, 0)]:
            height_factor, width_factor = self._get_fraction_value(zoom_factor)
            # TODO: Add back RandomZoom when it is ready.
            # output_node = layers.RandomZoom(
            # height_factor, width_factor)(output_node)

        # Contrast
        contrast_factor = utils.add_to_hp(self.contrast_factor, hp)
        if contrast_factor not in [0, (0, 0)]:
            output_node = layers.RandomContrast(contrast_factor)(output_node)

        return output_node
Beispiel #7
0
    plt.title("{}".format(format_label(label)))
    plt.axis("off")
"""
### Data augmentation

We can use the preprocessing layers APIs for image augmentation.
"""

from tensorflow.keras.models import Sequential
from tensorflow.keras import layers

img_augmentation = Sequential(
    [
        layers.RandomRotation(factor=0.15),
        layers.RandomTranslation(height_factor=0.1, width_factor=0.1),
        layers.RandomFlip(),
        layers.RandomContrast(factor=0.1),
    ],
    name="img_augmentation",
)
"""
This `Sequential` model object can be used both as a part of
the model we later build, and as a function to preprocess
data before feeding into the model. Using them as function makes
it easy to visualize the augmented images. Here we plot 9 examples
of augmentation result of a given figure.
"""

for image, label in ds_train.take(1):
    for i in range(9):
        ax = plt.subplot(3, 3, i + 1)
Beispiel #8
0
        plt.imshow(images[i].numpy().astype("uint8"))
        plt.title(int(labels[i]))
        plt.axis("off")

"""
## Using image data augmentation

When you don't have a large image dataset, it's a good practice to artificially
introduce sample diversity by applying random yet realistic transformations to the
training images, such as random horizontal flipping or small random rotations. This
helps expose the model to different aspects of the training data while slowing down
 overfitting.
"""

data_augmentation = keras.Sequential(
    [layers.RandomFlip("horizontal"), layers.RandomRotation(0.1),]
)

"""
Let's visualize what the augmented samples look like, by applying `data_augmentation`
 repeatedly to the first image in the dataset:
"""

plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
    for i in range(9):
        augmented_images = data_augmentation(images)
        ax = plt.subplot(3, 3, i + 1)
        plt.imshow(augmented_images[0].numpy().astype("uint8"))
        plt.axis("off")
# transformations (crop and horizontal flip) to them each time we are
# looping over them. This way, we "augment" our training dataset to
# contain more data.
#
# The augmentation transformations are implemented as preprocessing
# layers in Keras. There are various such layers readily available,
# see https://keras.io/guides/preprocessing_layers/ for more
# information.
#
# ### Initialization

inputs = keras.Input(shape=[256, 256, 3])
x = layers.Rescaling(scale=1. / 255)(inputs)

x = layers.RandomCrop(160, 160)(x)
x = layers.RandomFlip(mode="horizontal")(x)

x = layers.Conv2D(32, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)

x = layers.Conv2D(32, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)

x = layers.Conv2D(64, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)

x = layers.Flatten()(x)
x = layers.Dense(64, activation='relu')(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(37, activation='softmax')(x)
Beispiel #10
0
    label_mode='categorical',
)

# have a look at some of the images:
import matplotlib.pyplot as plt
plt.figure(figsize=(10, 10))
for images, labels in train_ds.take(1):
    for i in range(16):
        ax = plt.subplot(4, 4, i + 1)
        plt.imshow(images[i].numpy().astype("uint8"))
        #plt.title(int(labels[i]))
        plt.title(int(np.where(labels[i] == 1)[0]))
        plt.axis("off")

data_augmentation = keras.Sequential([
    layers.RandomFlip("horizontal"),
    layers.RandomRotation(0.1),
])

# have a look at what the image augmentation is doing:
plt.figure(figsize=(10, 10))
for images, _ in train_ds.take(1):
    for i in range(9):
        augmented_images = data_augmentation(images)
        ax = plt.subplot(3, 3, i + 1)
        plt.imshow(augmented_images[0].numpy().astype("uint8"))
        plt.axis("off")

# Configure the dataset for performance
# Let's make sure to use buffered prefetching so we can yield data from disk without having I/O becoming blocking:
#train_ds = train_ds.prefetch( buffer_size=32 )
def main():
    # Hyperparameters and constraints
    positional_emb = True
    conv_layers = 2
    projection_dim = 128

    num_heads = 2
    transformer_units = [
        projection_dim,
        projection_dim,
    ]
    transformer_layers = 2
    stochastic_depth_rate = 0.1

    learning_rate = 0.001
    weight_decay = 0.0001
    batch_size = 128
    num_epochs = 30
    image_size = 32

    # Load CIFAR-10 dataset
    num_classes = 10
    input_shape = (32, 32, 3)

    (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
    print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")

    # The CCT tokenizer
    # The first recipe introduced by the CCT authors is the tokenizer
    # for processing the images. In a standard ViT, images are
    # organized into uniform non-overlapping patches. This eliminates
    # the boundary-level information present in between different
    # patches. This is important for a neural network to effectively
    # exploit the locality information. The figure below presents an
    # illustration of how images are organized into patches.
    # We already know that convolutions are quite good at exploiting
    # locality information. So, based on this, the authors introduced
    # an all-convolutional mini-network to produce image patches.
    class CCTTokenizer(layers.Layer):
        def __init__(self,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     pooling_kernel_size=3,
                     pooling_stride=2,
                     num_conv_layers=conv_layers,
                     num_output_channels=[64, 128],
                     positional_emb=positional_emb,
                     **kwargs):
            super(CCTTokenizer, self).__init__(**kwargs)

            # This is the tokenizer.
            self.conv_model = keras.Sequential()
            for i in range(num_conv_layers):
                self.conv_model.add(
                    layers.Conv2D(
                        num_output_channels[i],
                        kernel_size,
                        stride,
                        padding="valid",
                        use_bias=False,
                        activation="relu",
                        kernel_initializer="he_normal",
                    ))
                self.conv_model.add(layers.ZeroPadding2D(padding))
                self.conv_model.add(
                    layers.MaxPooling2D(pooling_kernel_size, pooling_stride,
                                        "same"))
            self.positional_emb = positional_emb

        def call(self, images):
            outputs = self.conv_model(images)

            # After passing the images through the mini-network the
            # spatial dimensions are flattened to form sequences.
            reshaped = tf.reshape(
                outputs, (-1, tf.shape(outputs)[1] * tf.shape(outputs)[2],
                          tf.shape(outputs)[-1]))
            return reshaped

        def positional_embedding(self, image_size):
            # Positional embeddings are optional in CCT. Here, we
            # calculate the number of sequences and initialize an
            # 'Embedding' layer to computer the positional embeddings
            # later.
            if self.positional_emb:
                dummy_inputs = tf.ones((1, image_size, image_size, 3))
                dummy_outputs = self.call(dummy_inputs)
                sequence_length = tf.shape(dummy_outputs)[1]
                projection_dim = tf.shape(dummy_outputs)[-1]

                embed_layer = layers.Embedding(input_dim=sequence_length,
                                               output_dim=projection_dim)
                return embed_layer, sequence_length
            else:
                return None

    # Stochastic depth for regularization
    # Stochastic depth is a regularization technique that randomly
    # drops a set of layers. During inference, the layers are kept as
    # they are. It is very much similar to Dropout but only that it
    # operates on a block of layers rather than individual nodes
    # present inside a layer. In CCT, stochastic depth is used just
    # before the residual blocks of a transformer encoder.
    # Referred from: github.com:rwightman/pytorch-image-models.
    class StochasticDepth(layers.Layer):
        def __init__(self, drop_prop, **kwargs):
            super(StochasticDepth, self).__init__(**kwargs)
            self.drop_prob = drop_prop

        def call(self, x, training=None):
            if training:
                keep_prob = 1 - self.drop_prob
                shape = (tf.shape(x)[0], ) + (1, ) * (len(tf.shape(x)) - 1)
                random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
                random_tensor = tf.floor(random_tensor)
                return (x / keep_prob) * random_tensor
            return x

    # MLP for the transformer encoder
    def mlp(x, hidden_units, dropout_rate):
        for units in hidden_units:
            x = layers.Dense(units, activation=tf.nn.gelu)(x)
            x = layers.Dropout(dropout_rate)(x)
        return x

    # Data augmentation
    # In the original paper, the authors use AutoAugment to induce
    # stronger regularization. For this example, use the standard
    # geometric augmentations like random cropping and flipping.
    # Note the rescaling layer. These layers have pre-defined inference
    # behavior.
    data_augmentation = keras.Sequential(
        [
            layers.Rescaling(scale=1.0 / 255),
            layers.RandomCrop(image_size, image_size),
            layers.RandomFlip('horizontal'),
        ],
        name="data_augmentation",
    )

    # The final CCT model
    # Another recipe introduced in CCT is attention pooling or sequence
    # pooling. In ViT, only the feature map corresponding to the class
    # token is pooled and is then used for the subsequent
    # classification task (or any other downstream task). In CCT,
    # outputs from the transformers encoder are weighted and then
    # passed on to the final task specific layer (in this example, we
    # do classification).
    def create_cct_model(image_size=image_size,
                         input_shape=input_shape,
                         num_heads=num_heads,
                         projection_dim=projection_dim,
                         transformer_units=transformer_units):
        inputs = layers.Input(input_shape)

        # Augment data.
        augmented = data_augmentation(inputs)

        # Encode patches.
        cct_tokenizer = CCTTokenizer()
        encoded_patches = cct_tokenizer(augmented)

        # Apply positional embedding.
        if positional_emb:
            pos_embed, seq_length = cct_tokenizer.positional_embedding(
                image_size)
            positions = tf.range(start=0, limit=seq_length, delta=1)
            position_embeddings = pos_embed(positions)
            encoded_patches += position_embeddings

        # Calculate Stochastic Depth probabilities.
        dpr = [
            x
            for x in np.linspace(0, stochastic_depth_rate, transformer_layers)
        ]

        # Create multiple layers of the transformer block.
        for i in range(transformer_layers):
            # Layer normalization 1.
            x1 = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)

            # Create a multi-head attention layer.
            attention_output = layers.MultiHeadAttention(
                num_heads=num_heads, key_dim=projection_dim, dropout=0.1)(x1,
                                                                          x1)

            # Skip connection 1.
            attention_output = StochasticDepth(dpr[i])(attention_output)
            x2 = layers.Add()([attention_output, encoded_patches])

            # Layer normalization 2.
            x3 = layers.LayerNormalization(epsilon=1e-5)(x2)

            # MLP.
            x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)

            # Skip connection 2.
            x3 = StochasticDepth(dpr[i])(x3)
            encoded_patches = layers.Add()([x3, x2])

        # Apply sequence pooling.
        representation = layers.LayerNormalization(
            epsilon=1e-5)(encoded_patches)
        attention_weights = tf.nn.softmax(layers.Dense(1)(representation),
                                          axis=1)
        weighted_representation = tf.matmul(attention_weights,
                                            representation,
                                            transpose_a=True)
        weighted_representation = tf.squeeze(weighted_representation, -2)

        # Classify outputs.
        logits = layers.Dense(num_classes)(weighted_representation)

        # Create the keras model.
        model = keras.Model(inputs=inputs, outputs=logits)
        return model

    # Model training and evaluation
    def run_experiment(model):
        optimizer = tfa.optimizers.AdamW(learning_rate=0.001,
                                         weight_decay=0.0001)

        model.compile(
            optimizer=optimizer,
            loss=keras.losses.CategoricalCrossentropy(from_logits=True,
                                                      label_smoothing=0.1),
            metrics=[
                keras.metrics.CategoricalAccuracy(name="accuracy"),
                keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy")
            ],
        )

        checkpoint_filepath = "./tmp/checkpoint"
        checkpoint_callback = keras.callbacks.ModelCheckpoint(
            checkpoint_filepath,
            monitor="val_accuracy",
            save_best_only=True,
            save_weights_only=True,
        )

        history = model.fit(
            x=x_train,
            y=y_train,
            batch_size=batch_size,
            epochs=num_epochs,
            validation_split=0.1,
            callbacks=[checkpoint_callback],
        )

        model.load_weights(checkpoint_filepath)
        _, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
        print(f"Test accuracy: {round(accuracy * 100, 2)}%")
        print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")

        return history

    cct_model = create_cct_model()
    history = run_experiment(cct_model)

    # Now visualize the training progress of the model.
    '''
	plt.plot(history.history["loss"], label="train_loss")
	plt.plot(history.history["val_loss"], label="val_loss")
	plt.xlabel("Epochs")
	plt.ylabel("Loss")
	plt.title("Train and Validation Losses Over Epochs", fontsize=14)
	plt.legend()
	plt.grid()
	plt.show()
	'''

    # The CCT model trained above has just 0.4 million parameters, and
    # it gets to ~78% top-1 accuracy within 30 epochs. The plot above
    # shows no signs of overfitting as well. This means we can train
    # this network for longer (perhaps with a bit more regularization)
    # and may obtain even better performance. This performance can
    # further be improved by additional recipes like cosine decay
    # learning rate schedule, other data augmentation techniques like
    # AutoAugment, MixUp or Cutmix. With these modifications, the
    # authors present 95.1% top-1 accuracy on the CIFAR-10 dataset. THe
    # authors also present a number of experiments to study how the
    # number of convolution blocks, transformer layers, etc. affect the
    # final performance of CCTs.
    # For a comparison, a ViT model takes about 4.7 million parameters
    # and 100 epochs of training to reach top-1 accuracy of 78.22% on
    # the CIFAR-10 dataset. You can refer to this notebook to know
    # about the experimental setup.
    # The authors also demonstrate the performance of Compact
    # Convolutional Transformers on NLP tasks and they report
    # competitive results there.

    # Exit the program.
    exit(0)
print(f"Image shape: {metadata.features['image'].shape}")
print(f"Training images: {metadata.splits['train'].num_examples}")
print(f"Test images: {metadata.splits['test'].num_examples}")

"""
## Use Data Augmentation

We will rescale the data to `[0, 1]` and perform simple augmentations to our data.
"""

rescale = layers.Rescaling(1.0 / 255)

data_augmentation = tf.keras.Sequential(
    [
        layers.RandomFlip("horizontal_and_vertical"),
        layers.RandomRotation(0.3),
        layers.RandomZoom(0.2),
    ]
)


def prepare(ds, shuffle=False, augment=False):
    # Rescale dataset
    ds = ds.map(lambda x, y: (rescale(x), y), num_parallel_calls=AUTOTUNE)

    if shuffle:
        ds = ds.shuffle(1024)

    # Batch dataset
    ds = ds.batch(batch_size)
Beispiel #13
0
def main():
    # Download the dataset
    # Using the Flickr8K dataset for this tutorial. This dataset
    # comprises over 8,000 images, that are each paired with five
    # different captions.
    #!wget -q https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_Dataset.zip
    #!wget -q https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_text.zip
    #!unzip -qq Flickr8k_Dataset.zip
    #!unzip -qq Flickr8k_text.zip
    #!rm Flickr8k_Dataset.zip Flickr8k_text.zip

    # Path to images.
    IMAGES_PATH = "Flicker8k_Dataset"

    # Desired image dimensions.
    IMAGE_SIZE = (299, 299)

    # Vocabulary size.
    VOCAB_SIZE = 10000

    # Fixed length allowed for any sequence.
    SEQ_LENGTH = 25

    # Dimension for the image embeddings and token embeddings.
    EMBED_DIM = 512

    # Pre-layer units in the feed-forward network.
    FF_DIM = 512

    # Other training parameters.
    BATCH_SIZE = 64
    EPOCHS = 30
    AUTOTUNE = tf.data.AUTOTUNE

    # Preparing the dataset.
    def load_captions_data(filename):
        # Load captions (text) data and maps them to corresponding
        # images.
        # @param: filename, path to the text file containing caption
        #	data.
        # @return: caption_mapping, dictionary mapping image names and
        #	the corresponding captions.
        # @return: text_data, list containing all the available
        #	captions.
        with open(filename) as caption_file:
            caption_data = caption_file.readlines()
            caption_mapping = {}
            text_data = []
            images_to_skip = set()

            for line in caption_data:
                line = line.rstrip("\n")

                # IMage name and captions are separated using a tab.
                img_name, caption = line.split("\t")

                # Each image is repeated five times for the five
                # different captions. Each image name has a suffix
                # '#(caption_number)'.
                img_name = img_name.split("#")[0]
                img_name = os.path.join(IMAGES_PATH, img_name.strip())

                # Remove captions that are either too short or too
                # long.
                tokens = caption.strip().split()

                if len(tokens) < 5 or len(tokens) > SEQ_LENGTH:
                    images_to_skip.add(img_name)
                    continue

                if img_name.endswith("jpg") and img_name not in images_to_skip:
                    # Add a start and end token to each caption.
                    caption = "<start>" + caption.strip() + "<end>"
                    text_data.append(caption)

                    if img_name in caption_mapping:
                        caption_mapping[img_name].append(caption)
                    else:
                        caption_mapping[img_name] = [caption]

            for img_name in images_to_skip:
                if img_name in caption_mapping:
                    del caption_mapping[img_name]

            return caption_mapping, text_data

    def train_val_split(caption_data, train_size=0.8, shuffle=True):
        # Split the captioning dataset into train and validation sets.
        # @param: caption_data (dict), dictionary containing the mapped
        #	data.
        # @param: train_size (float), fraction of all the full dataset
        #	to use as training data.
        # @param: shuffle (bool), whether to shuffle the dataset before
        #	splitting.
        # @return: training and validation datasets as two separated
        #	dicts.
        # 1) Get the list of all image names.
        all_images = list(caption_data.keys())

        # 2) Shuffle if necessary.
        if shuffle:
            np.random.shuffle(all_images)

        # 3) Split into training and validation sets.
        train_size = int(len(caption_data) * train_size)

        training_data = {
            img_name: caption_data[img_name]
            for img_name in all_images[:train_size]
        }
        validation_data = {
            img_name: caption_data[img_name]
            for img_name in all_images[train_size:]
        }

        # 4) Return the splits.
        return training_data, validation_data

    # Load the dataset.
    captions_mapping, text_data = load_captions_data("Flickr8k.token.txt")

    # Split the dataset into training and validation sets.
    train_data, valid_data = train_val_split(captions_mapping)
    print("Number of training samples: ", len(train_data))
    print("Number of validation samples: ", len(valid_data))

    # Vectorizing the text data
    # Use the TextVectorization layer to vectorize the text data, that
    # is to say, to turn the original strings into integer sequences
    # where each integer represents the index of a word in a
    # vocabulary. Use a custom string standardization scheme (in this
    # case, strip punctuation characters except < and >) and the
    # default splitting scheme (split on whitespace).
    def custom_standardization(input_string):
        lowercase = tf.strings.lower(input_string)
        return tf.strings.regex_replace(lowercase,
                                        "[%s]" % re.escape(strip_chars), "")

    strip_chars = "!\"#$%&'()*+,-./;<=>?@[\]^_`{|}~"
    strip_chars = strip_chars.replace("<", "")
    strip_chars = strip_chars.replace(">", "")

    vectorization = TextVectorization(
        max_tokens=VOCAB_SIZE,
        output_mode="int",
        output_sequence_length=SEQ_LENGTH,
        standardize=custom_standardization,
    )
    vectorization.adapt(text_data)

    # Data augmentation for image data.
    image_augmentation = keras.Sequential([
        layers.RandomFlip("horizontal"),
        layers.RandomRotation(0.2),
        layers.RandomContrast(0.3),
    ])

    # Building a tf.data.Dataset pipeline for training
    # Generate pairs of images and corresponding captions using a
    # tf.data.Dataset object. The pipeline consists of two steps:
    # 1) Read the image from the disk.
    # 2) Tokenize all the five captions corresponding to the image.
    def decode_and_resize(img_path):
        img = tf.io.read_file(img_path)
        img = tf.image.decode_jpeg(img, channels=3)
        img = tf.image.resize(img, IMAGE_SIZE)
        img = tf.image.convert_image_dtype(img, tf.float32)
        return img

    def process_input(img_path, captions):
        return decode_and_resize(img_path), vectorization(captions)

    def make_dataset(images, captions):
        '''
		if split == "train":
			img_dataset = tf.data.Dataset.from_tensor_slices(images).map(
				read_train_image, num_parallel_calls=AUTOTUNE
			)
		else:
			img_dataset = tf.data.Dataset.from_tensor_slices(images).map(
				read_valid_image, num_parallel_calls=AUTOTUNE
			)

		cap_dataset = tf.data.Dataset.from_tensor_slices(captions).map(
			vectorization, num_parallel_calls=AUTOTUNE
		)

		dataset = tf.data.Dataset.zip((img_dataset, cap_dataset))
		dataset = dataset.batch(BATCH_SIZE).shuffle(256).prefetch(AUTOTUNE)
		return dataset
		'''
        dataset = tf.data.Dataset.from_tensor_slices((images, captions))
        dataset = dataset.shuffle(len(images))
        dataset = dataset.map(process_input, num_parallel_calls=AUTOTUNE)
        dataset = dataset.batch(BATCH_SIZE).prefetch(AUTOTUNE)
        return dataset

    # Pass the list of images and the list of corresponding captions.
    train_dataset = make_dataset(list(train_data.keys()),
                                 list(train_data.values()))
    valid_dataset = make_dataset(list(valid_data.keys()),
                                 list(valid_data.values()))

    # Building the model
    # The image captioning architecture consists of three models:
    # 1) A CNN: Used to extract the image features.
    # 2) A TransformerEncoder: The extracted image features are then
    #	passed to a Transformer based encoder that generates a new
    #	representation of the inputs.
    # 3) A TransformerDecoder: This model takes the encoder output and
    #	the text data (sequences) as inputs and tries to learn to
    #	generate the caption.
    def get_cnn_model():
        base_model = efficientnet.EfficientNetB0(
            input_shape=(*IMAGE_SIZE, 3),
            include_top=False,
            weights="imagenet",
        )

        # Freeze the feature extractor.
        base_model.trainable = False
        base_model_out = base_model.output
        base_model_out = layers.Reshape(
            (-1, base_model_out.shape[-1]))(base_model_out)
        cnn_model = keras.models.Model(base_model.input, base_model_out)
        return cnn_model

    class TransformerEncoderBlock(layers.Layer):
        def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
            super().__init__(**kwargs)
            self.embed_dim = embed_dim
            self.dense_dim = dense_dim
            self.num_heads = num_heads
            self.attention_1 = layers.MultiHeadAttention(num_heads=num_heads,
                                                         key_dim=embed_dim,
                                                         dropout=0.0)
            self.layernorm1 = layers.LayerNormalization()
            self.layernorm2 = layers.LayerNormalization()
            self.dense_1 = layers.Dense(embed_dim, activation="relu")

        def call(self, inputs, training, mask=None):
            inputs = self.layernorm1(inputs)
            inputs = self.dense_1(inputs)

            attention_output_1 = self.attention_1(
                query=inputs,
                value=inputs,
                key=inputs,
                attention_mask=None,
                training=training,
            )

            out_1 = self.layernorm2(inputs + attention_output_1)
            return out_1

    class PositionalEmbedding(layers.Layer):
        def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
            super().__init__(**kwargs)
            self.token_embeddings = layers.Embedding(input_dim=vocab_size,
                                                     output_dim=embed_dim)
            self.position_embeddings = layers.Embedding(
                input_dim=sequence_length, output_dim=embed_dim)
            self.sequence_length = sequence_length
            self.vocab_size = vocab_size
            self.embed_dim = embed_dim
            self.embed_scale = tf.math.sqrt(tf.cast(embed_dim, tf.float32))

        def call(self, inputs):
            length = tf.shape(inputs)[-1]
            positions = tf.range(start=0, limit=length, delta=1)
            embedded_tokens = self.token_embeddings(inputs)
            embedded_tokens = embedded_tokens * self.embed_scale
            embedded_positions = self.position_embeddings(positions)
            return embedded_tokens + embedded_positions

        def compute_mask(self, inputs, mask=None):
            return tf.math.not_equal(inputs, 0)

    class TransformerDecoderBlock(layers.Layer):
        def __init__(self, embed_dim, ff_dim, num_heads, **kwargs):
            super().__init__(**kwargs)
            self.embed_dim = embed_dim
            self.ff_dim = ff_dim
            self.num_heads = num_heads
            self.attention_1 = layers.MultiHeadAttention(num_heads=num_heads,
                                                         key_dim=embed_dim,
                                                         dropout=0.1)
            self.attention_2 = layers.MultiHeadAttention(num_heads=num_heads,
                                                         key_dim=embed_dim,
                                                         dropout=0.1)
            self.ffn_layer_1 = layers.Dense(ff_dim, activation="relu")
            self.ffn_layer_2 = layers.Dense(embed_dim)

            self.layernorm_1 = layers.LayerNormalization()
            self.layernorm_2 = layers.LayerNormalization()
            self.layernorm_3 = layers.LayerNormalization()

            self.embedding = PositionalEmbedding(embed_dim=EMBED_DIM,
                                                 sequence_length=SEQ_LENGTH,
                                                 vocab_size=VOCAB_SIZE)
            self.out = layers.Dense(VOCAB_SIZE, activation="softmax")

            self.dropout_1 = layers.Dropout(0.3)
            self.dropout_2 = layers.Dropout(0.5)
            self.supports_masking = True

        def call(self, inputs, encoder_outputs, training, mask=None):
            inputs = self.embedding(inputs)
            causal_mask = self.get_causal_attention_mask(inputs)

            if mask is not None:
                padding_mask = tf.cast(mask[:, :, tf.newaxis], dtype=tf.int32)
                combined_mask = tf.cast(mask[:, tf.newaxis, :], dtype=tf.int32)
                combined_mask = tf.minimum(combined_mask, causal_mask)

            attention_output_1 = self.attention_1(
                query=inputs,
                value=inputs,
                key=inputs,
                attention_mask=combined_mask,
                training=training,
            )
            out_1 = self.layernorm_1(inputs + attention_output_1)

            attention_output_2 = self.attention_2(
                query=out_1,
                value=encoder_outputs,
                key=encoder_outputs,
                attention_mask=padding_mask,
                training=training,
            )
            out_2 = self.layernorm_2(out_1 + attention_output_2)

            ffn_out = self.ffn_layer_1(out_2)
            ffn_out = self.dropout_1(ffn_out, training=training)
            ffn_out = self.ffn_layer_2(ffn_out)

            ffn_out = self.layernorm_3(ffn_out + out_2, training=training)
            ffn_out = self.dropout_2(ffn_out, training=training)
            preds = self.out(ffn_out)
            return preds

        def get_causal_attention_mask(self, inputs):
            input_shape = tf.shape(inputs)
            batch_size, sequence_length = input_shape[0], input_shape[1]
            i = tf.range(sequence_length)[:, tf.newaxis]
            j = tf.range(sequence_length)
            mask = tf.cast(i >= j, dtype="int32")
            mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
            mult = tf.concat(
                [
                    tf.expand_dims(batch_size, -1),
                    tf.constant([1, 1], dtype=tf.int32)
                ],
                axis=0,
            )
            return tf.tile(mask, mult)

    class ImageCaptioningModel(keras.Model):
        def __init__(self,
                     cnn_model,
                     encoder,
                     decoder,
                     num_captions_per_image=5,
                     image_aug=None):
            super().__init__()
            self.cnn_model = cnn_model
            self.encoder = encoder
            self.decoder = decoder
            self.loss_tracker = keras.metrics.Mean(name="loss")
            self.acc_tracker = keras.metrics.Mean(name="accuracy")
            self.num_captions_per_image = num_captions_per_image
            self.image_aug = image_aug

        def calculate_loss(self, y_true, y_pred, mask):
            loss = self.loss(y_true, y_pred)
            mask = tf.cast(mask, dtype=loss.dtype)
            loss *= mask
            return tf.reduce_sum(loss) / tf.reduce_sum(mask)

        def calculate_accuracy(self, y_true, y_pred, mask):
            accuracy = tf.equal(y_true, tf.argmax(y_pred, axis=2))
            accuracy = tf.math.logical_and(mask, accuracy)
            accuracy = tf.cast(accuracy, dtype=tf.float32)
            mask = tf.cast(mask, dtype=tf.float32)
            return tf.reduce_sum(accuracy) / tf.reduce_sum(mask)

        def _compute_caption_loss_and_acc(self,
                                          img_embed,
                                          batch_seq,
                                          training=True):
            encoder_out = self.encoder(img_embed, training=training)
            batch_seq_inp = batch_seq[:, :-1]
            batch_seq_true = batch_seq[:, 1:]
            mask = tf.math.not_equal(batch_seq_true, 0)
            batch_seq_pred = self.decoder(batch_seq_inp,
                                          encoder_out,
                                          training=training,
                                          mask=mask)
            loss = self.calculate_loss(batch_seq_true, batch_seq_pred, mask)
            acc = self.calculate_accuracy(batch_seq_true, batch_seq_pred, mask)
            return loss, acc

        def train_step(self, batch_data):
            batch_img, batch_seq = batch_data
            batch_loss = 0
            batch_acc = 0

            if self.image_aug:
                batch_img = self.image_aug(batch_img)

            # 1) Get image embeddings.
            img_embed = self.cnn_model(batch_img)

            # 2) Pass each of the five captions one by one to the
            # decoder along with the encoder outputs and compute the
            # loss as well as accuracy for each caption.
            for i in range(self.num_captions_per_image):
                with tf.GradientTape() as tape:
                    loss, acc = self._compute_caption_loss_and_acc(
                        img_embed, batch_seq[:, i, :], training=True)

                    # 3) Update loss and accuracy.
                    batch_loss += loss
                    batch_acc += acc

                # 4) Get the list of all the trainable weights.
                train_vars = (self.encoder.trainable_variables +
                              self.decoder.trainable_variables)

                # 5) Get the gradients.
                grads = tape.gradient(loss, train_vars)

                # 6) Update the trainable weights.
                self.optimizer.apply_gradients(zip(grads, train_vars))

            # 7) Update the trackers.
            batch_acc /= float(self.num_captions_per_image)
            self.loss_tracker.update_state(batch_loss)
            self.acc_tracker.update_state(batch_acc)

            # 8) Return the loss and accuracy values.
            return {
                "loss": self.loss_tracker.result(),
                "acc": self.acc_tracker.result()
            }

        def test_step(self, batch_data):
            batch_img, batch_seq = batch_data
            batch_loss = 0
            batch_acc = 0

            # 1) Get image embeddings.
            img_embed = self.cnn_model(batch_img)

            # 2) Pass each of the five captions one by one to the
            # decoder along with the encoder outputs and compute the
            # loss as well as accuracy for each caption.
            for i in range(self.num_captions_per_image):
                loss, acc = self._compute_caption_loss_and_acc(img_embed,
                                                               batch_seq[:,
                                                                         i, :],
                                                               training=False)

                # 3) Update loss and accuracy.
                batch_loss += loss
                batch_acc += acc

            batch_acc /= float(self.num_captions_per_image)

            # 4) Update the trackers.
            self.loss_tracker.update_state(batch_loss)
            self.acc_tracker.update_state(batch_acc)

            # 8) Return the loss and accuracy values.
            return {
                "loss": self.loss_tracker.result(),
                "acc": self.acc_tracker.result()
            }

        @property
        def metrics(self):
            # List the metrics here so the 'reset_states()' can be
            # called automatically.
            return [self.loss_tracker, self.acc_tracker]

    cnn_model = get_cnn_model()
    encoder = TransformerEncoderBlock(embed_dim=EMBED_DIM,
                                      dense_dim=FF_DIM,
                                      num_heads=1)
    decoder = TransformerDecoderBlock(embed_dim=EMBED_DIM,
                                      ff_dim=FF_DIM,
                                      num_heads=2)
    caption_model = ImageCaptioningModel(
        cnn_model=cnn_model,
        encoder=encoder,
        decoder=decoder,
        image_aug=image_augmentation,
    )

    # Model training
    # Define the loss function.
    cross_entropy = keras.losses.SparseCategoricalCrossentropy(
        from_logits=False, reduction="none")

    # Early stopping criteria.
    early_stopping = keras.callbacks.EarlyStopping(patience=3,
                                                   restore_best_weights=True)

    # Learning Rate Scheduler for the optimizer.
    class LRSchedule(keras.optimizers.schedules.LearningRateSchedule):
        def __init__(self, post_warmup_learning_rate, warmup_steps):
            super().__init__()
            self.post_warmup_learning_rate = post_warmup_learning_rate
            self.warmup_steps = warmup_steps

        def __call__(self, step):
            global_step = tf.cast(step, tf.float32)
            warmup_steps = tf.cast(self.warmup_steps, tf.float32)
            warmup_progress = global_step / warmup_steps
            warmup_learning_rate = self.post_warmup_learning_rate * warmup_progress
            return tf.cond(
                global_step < warmup_steps,
                lambda: warmup_learning_rate,
                lambda: self.post_warmup_learning_rate,
            )

    # Create a learning rate schedule.
    num_train_steps = len(train_dataset) * EPOCHS
    num_warmup_steps = num_train_steps // 15
    lr_schedule = LRSchedule(post_warmup_learning_rate=1e-4,
                             warmup_steps=num_warmup_steps)

    # Compile the model.
    caption_model.compile(optimizer=keras.optimizers.Adam(lr_schedule),
                          loss=cross_entropy)

    # Fit the model.
    caption_model.fit(
        train_dataset,
        epochs=EPOCHS,
        validation_data=valid_dataset,
        callbacks=[early_stopping],
    )

    # Check sample predictions
    '''
	vocab = vectorization.get_vocabulary()
	index_lookup = dict(zip(range(len(vocab)), vocab))
	max_decoded_sentence_length = SEQ_LENGTH - 1
	valid_images = list(valid_data.keys())

	def generate_caption():
		# Select a random image from the validation dataset.
		sample_img = np.random.choice(valid_images)

		# Read the image from the disk.
		sample_img = decode_and_resize(sample_img)
		img = sample_img.numpy().clip(0, 255).astype(np.uint8)
		plt.imshow(img)
		plt.show()

		# Pass the image to the CNN.
		img = tf.expand_dims(sample_img, 0)
		img = caption_model.cnn_model(img)

		# Pass the image features to the Transformer encoder.
		encoded_img = caption_model.encoder(img, training=False)

		# Generate the caption using the Transformer decoder.
		decoded_caption = "<start>"
		for i in range(max_decoded_sentence_length):
			tokenized_caption = vectorization([decoded_caption])[:, :-1]
			mask = tf.math.not_equal(tokenized_caption, 0)
			predictions = caption_model.decoder(
				tokenized_caption, encoded_img, training=False, mask=mask
			)
			sampled_token_index = np.argmax(predictions[0, i, :])
			sampled_token = index_lookup[sampled_token_index]
			if sampled_token == " <end>":
				break
			decoded_caption += " " + sampled_token

		decoded_caption = decoded_caption.replace("<start>", "")
		decoded_caption = decoded_caption.replace("<end>", "").strip()
		print("Predicted Caption: ", decoded_caption)

	# Check predictions for a few samples.
	generate_caption()
	generate_caption()
	generate_caption()
	'''

    # End Notes
    # Notice that the model starts to generate reasonable captions
    # after a few epochs. To keep this example easily runnable, it has
    # been trained with a few constraints, like a minimal number of
    # attention heads. To improve predictions, try changing these
    # training settings and find a good model for your use case.

    # Exit the program.
    exit(0)