def get_tokenizer(vocab_file=VOCAB_FILE, seq_len=SEQ_LEN):
    with open(vocab_file, 'r') as f:
        vocab_list = f.read().split('\n')

    vectorizer = TextVectorization(max_tokens=len(vocab_list),
                                   standardize=tf_preprocess,
                                   output_sequence_length=seq_len,
                                   vocabulary=vocab_list)
    # vectorizer.adapt(train_ds)
    print("Vocabulary Size:", vectorizer.vocabulary_size())
    return vectorizer
def get_tokenizer():
    vocab = get_vocab()
    return TextVectorization(max_tokens=None,
                             standardize=None,
                             split='whitespace',
                             output_sequence_length=cls_config['seq_len'],
                             vocabulary=vocab)
Example #3
0
def get_vectorize_layer(texts, vocab_size, max_seq, special_tokens=["[MASK]"]):
    """Build Text vectorization layer

    Args:
      texts (list): List of string i.e input texts
      vocab_size (int): vocab size
      max_seq (int): Maximum sequence lenght.
      special_tokens (list, optional): List of special tokens. Defaults to ['[MASK]'].

    Returns:
        layers.Layer: Return TextVectorization Keras Layer
    """
    vectorize_layer = TextVectorization(
        max_tokens=vocab_size,
        output_mode="int",
        standardize=custom_standardization,
        output_sequence_length=max_seq,
    )
    vectorize_layer.adapt(texts)

    # Insert mask token in vocabulary
    vocab = vectorize_layer.get_vocabulary()
    vocab = vocab[2:vocab_size - len(special_tokens)] + ["[mask]"]
    vectorize_layer.set_vocabulary(vocab)
    return vectorize_layer
# Model constants.
max_features = 20000
embedding_dim = 128
sequence_length = 500

# Now that we have our custom standardization, we can instantiate our text
# vectorization layer. We are using this layer to normalize, split, and map
# strings to integers, so we set our 'output_mode' to 'int'.
# Note that we're using the default split function,
# and the custom standardization defined above.
# We also set an explicit maximum sequence length, since the CNNs later in our
# model won't support ragged sequences.
vectorize_layer = TextVectorization(
    standardize=custom_standardization,
    max_tokens=max_features,
    output_mode="int",
    output_sequence_length=sequence_length,
)

# Now that the vocab layer has been created, call `adapt` on a text-only
# dataset to create the vocabulary. You don't have to batch, but for very large
# datasets this means you're not keeping spare copies of the dataset in memory.

# Let's make a text-only dataset (no labels):
text_ds = raw_train_ds.map(lambda x, y: x)
# Let's call `adapt`:
vectorize_layer.adapt(text_ds)
"""
## Two options to vectorize the data

There are 2 ways we can use our text vectorization layer:
def custom_standardization(input_data):
    lowercase = tf.strings.lower(input_data)
    stripped_html = tf.strings.regex_replace(lowercase, '<br />', ' ')
    return tf.strings.regex_replace(stripped_html,
                                    '[%s]' % re.escape(string.punctuation), '')


# Vocabulary size and number of words in a sequence.
vocab_size = 10000
sequence_length = 100

# Use the text vectorization layer to normalize, split, and map strings to
# integers. Note that the layer uses the custom standardization defined above.
# Set maximum_sequence length as all samples are not of the same length.
vectorize_layer = TextVectorization(standardize=custom_standardization,
                                    max_tokens=vocab_size,
                                    output_mode='int',
                                    output_sequence_length=sequence_length)

# Make a text-only dataset (no labels) and call adapt to build the vocabulary.
text_ds = train_ds.map(lambda x, y: x)
vectorize_layer.adapt(text_ds)

# 创建分类模型
# 使用Keras Sequential API定义情感分类模型. 在这种情况下, 它是“连续词袋”样式模型.
# 1, 该TextVectorization层将字符串转换为词汇索引.
#   您已经初始化vectorize_layer为 TextVectorization 层并通过调用构建其词汇表adapt.
#   text_ds现在 vectorize_layer 可以用作端到端分类模型的第一层, 将转换后的字符串输入嵌入层.
# 2, 该Embedding层采用整数编码的词汇表并查找每个单词索引的嵌入向量.
#   这些向量是作为模型训练来学习的. 向量向输出数组添加一个维度. 结果尺寸为: (batch, sequence, embedding).
# 3, 该GlobalAveragePooling1D层通过对序列维度进行平均来为每个示例返回一个固定长度的输出向量. 这允许模型以最简单的方式处理可变长度的输入.
# 4, Dense固定长度的输出向量通过具有 16 个隐藏单元的全连接 ( ) 层进行管道传输.
Example #6
0
"""


def custom_standardization(input_string):
    lowercase = tf.strings.lower(input_string)
    return tf.strings.regex_replace(lowercase, "[%s]" % re.escape(strip_chars),
                                    "")


strip_chars = "!\"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"
strip_chars = strip_chars.replace("<", "")
strip_chars = strip_chars.replace(">", "")

vectorization = TextVectorization(
    max_tokens=VOCAB_SIZE,
    output_mode="int",
    output_sequence_length=SEQ_LENGTH,
    standardize=custom_standardization,
)
vectorization.adapt(text_data)
"""
## Building a `tf.data.Dataset` pipeline for training

We will generate pairs of images and corresponding captions using a `tf.data.Dataset` object.
The pipeline consists of two steps:

1. Read the image from the disk
2. Tokenize all the five captions corresponding to the image
"""


def read_image(img_path, size=IMAGE_SIZE):
Example #7
0
**Example: turning strings into sequences of integer word indices**


"""

from tensorflow.keras.layers import TextVectorization

# Example training data, of dtype `string`.
training_data = np.array([["This is the 1st sample."], ["And here's the 2nd sample."]])

# Create a TextVectorization layer instance. It can be configured to either
# return integer token indices, or a dense token representation (e.g. multi-hot
# or TF-IDF). The text standardization and text splitting algorithms are fully
# configurable.
vectorizer = TextVectorization(output_mode="int")

# Calling `adapt` on an array or dataset makes the layer generate a vocabulary
# index for the data, which can then be reused when seeing new data.
vectorizer.adapt(training_data)

# After calling adapt, the layer is able to encode any n-gram it has seen before
# in the `adapt()` data. Unknown n-grams are encoded via an "out-of-vocabulary"
# token.
integer_data = vectorizer(training_data)
print(integer_data)

"""
**Example: turning strings into sequences of one-hot encoded bigrams**
"""
Example #8
0
def main():
    # Download the dataset
    # Using the Flickr8K dataset for this tutorial. This dataset
    # comprises over 8,000 images, that are each paired with five
    # different captions.
    #!wget -q https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_Dataset.zip
    #!wget -q https://github.com/jbrownlee/Datasets/releases/download/Flickr8k/Flickr8k_text.zip
    #!unzip -qq Flickr8k_Dataset.zip
    #!unzip -qq Flickr8k_text.zip
    #!rm Flickr8k_Dataset.zip Flickr8k_text.zip

    # Path to images.
    IMAGES_PATH = "Flicker8k_Dataset"

    # Desired image dimensions.
    IMAGE_SIZE = (299, 299)

    # Vocabulary size.
    VOCAB_SIZE = 10000

    # Fixed length allowed for any sequence.
    SEQ_LENGTH = 25

    # Dimension for the image embeddings and token embeddings.
    EMBED_DIM = 512

    # Pre-layer units in the feed-forward network.
    FF_DIM = 512

    # Other training parameters.
    BATCH_SIZE = 64
    EPOCHS = 30
    AUTOTUNE = tf.data.AUTOTUNE

    # Preparing the dataset.
    def load_captions_data(filename):
        # Load captions (text) data and maps them to corresponding
        # images.
        # @param: filename, path to the text file containing caption
        #	data.
        # @return: caption_mapping, dictionary mapping image names and
        #	the corresponding captions.
        # @return: text_data, list containing all the available
        #	captions.
        with open(filename) as caption_file:
            caption_data = caption_file.readlines()
            caption_mapping = {}
            text_data = []
            images_to_skip = set()

            for line in caption_data:
                line = line.rstrip("\n")

                # IMage name and captions are separated using a tab.
                img_name, caption = line.split("\t")

                # Each image is repeated five times for the five
                # different captions. Each image name has a suffix
                # '#(caption_number)'.
                img_name = img_name.split("#")[0]
                img_name = os.path.join(IMAGES_PATH, img_name.strip())

                # Remove captions that are either too short or too
                # long.
                tokens = caption.strip().split()

                if len(tokens) < 5 or len(tokens) > SEQ_LENGTH:
                    images_to_skip.add(img_name)
                    continue

                if img_name.endswith("jpg") and img_name not in images_to_skip:
                    # Add a start and end token to each caption.
                    caption = "<start>" + caption.strip() + "<end>"
                    text_data.append(caption)

                    if img_name in caption_mapping:
                        caption_mapping[img_name].append(caption)
                    else:
                        caption_mapping[img_name] = [caption]

            for img_name in images_to_skip:
                if img_name in caption_mapping:
                    del caption_mapping[img_name]

            return caption_mapping, text_data

    def train_val_split(caption_data, train_size=0.8, shuffle=True):
        # Split the captioning dataset into train and validation sets.
        # @param: caption_data (dict), dictionary containing the mapped
        #	data.
        # @param: train_size (float), fraction of all the full dataset
        #	to use as training data.
        # @param: shuffle (bool), whether to shuffle the dataset before
        #	splitting.
        # @return: training and validation datasets as two separated
        #	dicts.
        # 1) Get the list of all image names.
        all_images = list(caption_data.keys())

        # 2) Shuffle if necessary.
        if shuffle:
            np.random.shuffle(all_images)

        # 3) Split into training and validation sets.
        train_size = int(len(caption_data) * train_size)

        training_data = {
            img_name: caption_data[img_name]
            for img_name in all_images[:train_size]
        }
        validation_data = {
            img_name: caption_data[img_name]
            for img_name in all_images[train_size:]
        }

        # 4) Return the splits.
        return training_data, validation_data

    # Load the dataset.
    captions_mapping, text_data = load_captions_data("Flickr8k.token.txt")

    # Split the dataset into training and validation sets.
    train_data, valid_data = train_val_split(captions_mapping)
    print("Number of training samples: ", len(train_data))
    print("Number of validation samples: ", len(valid_data))

    # Vectorizing the text data
    # Use the TextVectorization layer to vectorize the text data, that
    # is to say, to turn the original strings into integer sequences
    # where each integer represents the index of a word in a
    # vocabulary. Use a custom string standardization scheme (in this
    # case, strip punctuation characters except < and >) and the
    # default splitting scheme (split on whitespace).
    def custom_standardization(input_string):
        lowercase = tf.strings.lower(input_string)
        return tf.strings.regex_replace(lowercase,
                                        "[%s]" % re.escape(strip_chars), "")

    strip_chars = "!\"#$%&'()*+,-./;<=>?@[\]^_`{|}~"
    strip_chars = strip_chars.replace("<", "")
    strip_chars = strip_chars.replace(">", "")

    vectorization = TextVectorization(
        max_tokens=VOCAB_SIZE,
        output_mode="int",
        output_sequence_length=SEQ_LENGTH,
        standardize=custom_standardization,
    )
    vectorization.adapt(text_data)

    # Data augmentation for image data.
    image_augmentation = keras.Sequential([
        layers.RandomFlip("horizontal"),
        layers.RandomRotation(0.2),
        layers.RandomContrast(0.3),
    ])

    # Building a tf.data.Dataset pipeline for training
    # Generate pairs of images and corresponding captions using a
    # tf.data.Dataset object. The pipeline consists of two steps:
    # 1) Read the image from the disk.
    # 2) Tokenize all the five captions corresponding to the image.
    def decode_and_resize(img_path):
        img = tf.io.read_file(img_path)
        img = tf.image.decode_jpeg(img, channels=3)
        img = tf.image.resize(img, IMAGE_SIZE)
        img = tf.image.convert_image_dtype(img, tf.float32)
        return img

    def process_input(img_path, captions):
        return decode_and_resize(img_path), vectorization(captions)

    def make_dataset(images, captions):
        '''
		if split == "train":
			img_dataset = tf.data.Dataset.from_tensor_slices(images).map(
				read_train_image, num_parallel_calls=AUTOTUNE
			)
		else:
			img_dataset = tf.data.Dataset.from_tensor_slices(images).map(
				read_valid_image, num_parallel_calls=AUTOTUNE
			)

		cap_dataset = tf.data.Dataset.from_tensor_slices(captions).map(
			vectorization, num_parallel_calls=AUTOTUNE
		)

		dataset = tf.data.Dataset.zip((img_dataset, cap_dataset))
		dataset = dataset.batch(BATCH_SIZE).shuffle(256).prefetch(AUTOTUNE)
		return dataset
		'''
        dataset = tf.data.Dataset.from_tensor_slices((images, captions))
        dataset = dataset.shuffle(len(images))
        dataset = dataset.map(process_input, num_parallel_calls=AUTOTUNE)
        dataset = dataset.batch(BATCH_SIZE).prefetch(AUTOTUNE)
        return dataset

    # Pass the list of images and the list of corresponding captions.
    train_dataset = make_dataset(list(train_data.keys()),
                                 list(train_data.values()))
    valid_dataset = make_dataset(list(valid_data.keys()),
                                 list(valid_data.values()))

    # Building the model
    # The image captioning architecture consists of three models:
    # 1) A CNN: Used to extract the image features.
    # 2) A TransformerEncoder: The extracted image features are then
    #	passed to a Transformer based encoder that generates a new
    #	representation of the inputs.
    # 3) A TransformerDecoder: This model takes the encoder output and
    #	the text data (sequences) as inputs and tries to learn to
    #	generate the caption.
    def get_cnn_model():
        base_model = efficientnet.EfficientNetB0(
            input_shape=(*IMAGE_SIZE, 3),
            include_top=False,
            weights="imagenet",
        )

        # Freeze the feature extractor.
        base_model.trainable = False
        base_model_out = base_model.output
        base_model_out = layers.Reshape(
            (-1, base_model_out.shape[-1]))(base_model_out)
        cnn_model = keras.models.Model(base_model.input, base_model_out)
        return cnn_model

    class TransformerEncoderBlock(layers.Layer):
        def __init__(self, embed_dim, dense_dim, num_heads, **kwargs):
            super().__init__(**kwargs)
            self.embed_dim = embed_dim
            self.dense_dim = dense_dim
            self.num_heads = num_heads
            self.attention_1 = layers.MultiHeadAttention(num_heads=num_heads,
                                                         key_dim=embed_dim,
                                                         dropout=0.0)
            self.layernorm1 = layers.LayerNormalization()
            self.layernorm2 = layers.LayerNormalization()
            self.dense_1 = layers.Dense(embed_dim, activation="relu")

        def call(self, inputs, training, mask=None):
            inputs = self.layernorm1(inputs)
            inputs = self.dense_1(inputs)

            attention_output_1 = self.attention_1(
                query=inputs,
                value=inputs,
                key=inputs,
                attention_mask=None,
                training=training,
            )

            out_1 = self.layernorm2(inputs + attention_output_1)
            return out_1

    class PositionalEmbedding(layers.Layer):
        def __init__(self, sequence_length, vocab_size, embed_dim, **kwargs):
            super().__init__(**kwargs)
            self.token_embeddings = layers.Embedding(input_dim=vocab_size,
                                                     output_dim=embed_dim)
            self.position_embeddings = layers.Embedding(
                input_dim=sequence_length, output_dim=embed_dim)
            self.sequence_length = sequence_length
            self.vocab_size = vocab_size
            self.embed_dim = embed_dim
            self.embed_scale = tf.math.sqrt(tf.cast(embed_dim, tf.float32))

        def call(self, inputs):
            length = tf.shape(inputs)[-1]
            positions = tf.range(start=0, limit=length, delta=1)
            embedded_tokens = self.token_embeddings(inputs)
            embedded_tokens = embedded_tokens * self.embed_scale
            embedded_positions = self.position_embeddings(positions)
            return embedded_tokens + embedded_positions

        def compute_mask(self, inputs, mask=None):
            return tf.math.not_equal(inputs, 0)

    class TransformerDecoderBlock(layers.Layer):
        def __init__(self, embed_dim, ff_dim, num_heads, **kwargs):
            super().__init__(**kwargs)
            self.embed_dim = embed_dim
            self.ff_dim = ff_dim
            self.num_heads = num_heads
            self.attention_1 = layers.MultiHeadAttention(num_heads=num_heads,
                                                         key_dim=embed_dim,
                                                         dropout=0.1)
            self.attention_2 = layers.MultiHeadAttention(num_heads=num_heads,
                                                         key_dim=embed_dim,
                                                         dropout=0.1)
            self.ffn_layer_1 = layers.Dense(ff_dim, activation="relu")
            self.ffn_layer_2 = layers.Dense(embed_dim)

            self.layernorm_1 = layers.LayerNormalization()
            self.layernorm_2 = layers.LayerNormalization()
            self.layernorm_3 = layers.LayerNormalization()

            self.embedding = PositionalEmbedding(embed_dim=EMBED_DIM,
                                                 sequence_length=SEQ_LENGTH,
                                                 vocab_size=VOCAB_SIZE)
            self.out = layers.Dense(VOCAB_SIZE, activation="softmax")

            self.dropout_1 = layers.Dropout(0.3)
            self.dropout_2 = layers.Dropout(0.5)
            self.supports_masking = True

        def call(self, inputs, encoder_outputs, training, mask=None):
            inputs = self.embedding(inputs)
            causal_mask = self.get_causal_attention_mask(inputs)

            if mask is not None:
                padding_mask = tf.cast(mask[:, :, tf.newaxis], dtype=tf.int32)
                combined_mask = tf.cast(mask[:, tf.newaxis, :], dtype=tf.int32)
                combined_mask = tf.minimum(combined_mask, causal_mask)

            attention_output_1 = self.attention_1(
                query=inputs,
                value=inputs,
                key=inputs,
                attention_mask=combined_mask,
                training=training,
            )
            out_1 = self.layernorm_1(inputs + attention_output_1)

            attention_output_2 = self.attention_2(
                query=out_1,
                value=encoder_outputs,
                key=encoder_outputs,
                attention_mask=padding_mask,
                training=training,
            )
            out_2 = self.layernorm_2(out_1 + attention_output_2)

            ffn_out = self.ffn_layer_1(out_2)
            ffn_out = self.dropout_1(ffn_out, training=training)
            ffn_out = self.ffn_layer_2(ffn_out)

            ffn_out = self.layernorm_3(ffn_out + out_2, training=training)
            ffn_out = self.dropout_2(ffn_out, training=training)
            preds = self.out(ffn_out)
            return preds

        def get_causal_attention_mask(self, inputs):
            input_shape = tf.shape(inputs)
            batch_size, sequence_length = input_shape[0], input_shape[1]
            i = tf.range(sequence_length)[:, tf.newaxis]
            j = tf.range(sequence_length)
            mask = tf.cast(i >= j, dtype="int32")
            mask = tf.reshape(mask, (1, input_shape[1], input_shape[1]))
            mult = tf.concat(
                [
                    tf.expand_dims(batch_size, -1),
                    tf.constant([1, 1], dtype=tf.int32)
                ],
                axis=0,
            )
            return tf.tile(mask, mult)

    class ImageCaptioningModel(keras.Model):
        def __init__(self,
                     cnn_model,
                     encoder,
                     decoder,
                     num_captions_per_image=5,
                     image_aug=None):
            super().__init__()
            self.cnn_model = cnn_model
            self.encoder = encoder
            self.decoder = decoder
            self.loss_tracker = keras.metrics.Mean(name="loss")
            self.acc_tracker = keras.metrics.Mean(name="accuracy")
            self.num_captions_per_image = num_captions_per_image
            self.image_aug = image_aug

        def calculate_loss(self, y_true, y_pred, mask):
            loss = self.loss(y_true, y_pred)
            mask = tf.cast(mask, dtype=loss.dtype)
            loss *= mask
            return tf.reduce_sum(loss) / tf.reduce_sum(mask)

        def calculate_accuracy(self, y_true, y_pred, mask):
            accuracy = tf.equal(y_true, tf.argmax(y_pred, axis=2))
            accuracy = tf.math.logical_and(mask, accuracy)
            accuracy = tf.cast(accuracy, dtype=tf.float32)
            mask = tf.cast(mask, dtype=tf.float32)
            return tf.reduce_sum(accuracy) / tf.reduce_sum(mask)

        def _compute_caption_loss_and_acc(self,
                                          img_embed,
                                          batch_seq,
                                          training=True):
            encoder_out = self.encoder(img_embed, training=training)
            batch_seq_inp = batch_seq[:, :-1]
            batch_seq_true = batch_seq[:, 1:]
            mask = tf.math.not_equal(batch_seq_true, 0)
            batch_seq_pred = self.decoder(batch_seq_inp,
                                          encoder_out,
                                          training=training,
                                          mask=mask)
            loss = self.calculate_loss(batch_seq_true, batch_seq_pred, mask)
            acc = self.calculate_accuracy(batch_seq_true, batch_seq_pred, mask)
            return loss, acc

        def train_step(self, batch_data):
            batch_img, batch_seq = batch_data
            batch_loss = 0
            batch_acc = 0

            if self.image_aug:
                batch_img = self.image_aug(batch_img)

            # 1) Get image embeddings.
            img_embed = self.cnn_model(batch_img)

            # 2) Pass each of the five captions one by one to the
            # decoder along with the encoder outputs and compute the
            # loss as well as accuracy for each caption.
            for i in range(self.num_captions_per_image):
                with tf.GradientTape() as tape:
                    loss, acc = self._compute_caption_loss_and_acc(
                        img_embed, batch_seq[:, i, :], training=True)

                    # 3) Update loss and accuracy.
                    batch_loss += loss
                    batch_acc += acc

                # 4) Get the list of all the trainable weights.
                train_vars = (self.encoder.trainable_variables +
                              self.decoder.trainable_variables)

                # 5) Get the gradients.
                grads = tape.gradient(loss, train_vars)

                # 6) Update the trainable weights.
                self.optimizer.apply_gradients(zip(grads, train_vars))

            # 7) Update the trackers.
            batch_acc /= float(self.num_captions_per_image)
            self.loss_tracker.update_state(batch_loss)
            self.acc_tracker.update_state(batch_acc)

            # 8) Return the loss and accuracy values.
            return {
                "loss": self.loss_tracker.result(),
                "acc": self.acc_tracker.result()
            }

        def test_step(self, batch_data):
            batch_img, batch_seq = batch_data
            batch_loss = 0
            batch_acc = 0

            # 1) Get image embeddings.
            img_embed = self.cnn_model(batch_img)

            # 2) Pass each of the five captions one by one to the
            # decoder along with the encoder outputs and compute the
            # loss as well as accuracy for each caption.
            for i in range(self.num_captions_per_image):
                loss, acc = self._compute_caption_loss_and_acc(img_embed,
                                                               batch_seq[:,
                                                                         i, :],
                                                               training=False)

                # 3) Update loss and accuracy.
                batch_loss += loss
                batch_acc += acc

            batch_acc /= float(self.num_captions_per_image)

            # 4) Update the trackers.
            self.loss_tracker.update_state(batch_loss)
            self.acc_tracker.update_state(batch_acc)

            # 8) Return the loss and accuracy values.
            return {
                "loss": self.loss_tracker.result(),
                "acc": self.acc_tracker.result()
            }

        @property
        def metrics(self):
            # List the metrics here so the 'reset_states()' can be
            # called automatically.
            return [self.loss_tracker, self.acc_tracker]

    cnn_model = get_cnn_model()
    encoder = TransformerEncoderBlock(embed_dim=EMBED_DIM,
                                      dense_dim=FF_DIM,
                                      num_heads=1)
    decoder = TransformerDecoderBlock(embed_dim=EMBED_DIM,
                                      ff_dim=FF_DIM,
                                      num_heads=2)
    caption_model = ImageCaptioningModel(
        cnn_model=cnn_model,
        encoder=encoder,
        decoder=decoder,
        image_aug=image_augmentation,
    )

    # Model training
    # Define the loss function.
    cross_entropy = keras.losses.SparseCategoricalCrossentropy(
        from_logits=False, reduction="none")

    # Early stopping criteria.
    early_stopping = keras.callbacks.EarlyStopping(patience=3,
                                                   restore_best_weights=True)

    # Learning Rate Scheduler for the optimizer.
    class LRSchedule(keras.optimizers.schedules.LearningRateSchedule):
        def __init__(self, post_warmup_learning_rate, warmup_steps):
            super().__init__()
            self.post_warmup_learning_rate = post_warmup_learning_rate
            self.warmup_steps = warmup_steps

        def __call__(self, step):
            global_step = tf.cast(step, tf.float32)
            warmup_steps = tf.cast(self.warmup_steps, tf.float32)
            warmup_progress = global_step / warmup_steps
            warmup_learning_rate = self.post_warmup_learning_rate * warmup_progress
            return tf.cond(
                global_step < warmup_steps,
                lambda: warmup_learning_rate,
                lambda: self.post_warmup_learning_rate,
            )

    # Create a learning rate schedule.
    num_train_steps = len(train_dataset) * EPOCHS
    num_warmup_steps = num_train_steps // 15
    lr_schedule = LRSchedule(post_warmup_learning_rate=1e-4,
                             warmup_steps=num_warmup_steps)

    # Compile the model.
    caption_model.compile(optimizer=keras.optimizers.Adam(lr_schedule),
                          loss=cross_entropy)

    # Fit the model.
    caption_model.fit(
        train_dataset,
        epochs=EPOCHS,
        validation_data=valid_dataset,
        callbacks=[early_stopping],
    )

    # Check sample predictions
    '''
	vocab = vectorization.get_vocabulary()
	index_lookup = dict(zip(range(len(vocab)), vocab))
	max_decoded_sentence_length = SEQ_LENGTH - 1
	valid_images = list(valid_data.keys())

	def generate_caption():
		# Select a random image from the validation dataset.
		sample_img = np.random.choice(valid_images)

		# Read the image from the disk.
		sample_img = decode_and_resize(sample_img)
		img = sample_img.numpy().clip(0, 255).astype(np.uint8)
		plt.imshow(img)
		plt.show()

		# Pass the image to the CNN.
		img = tf.expand_dims(sample_img, 0)
		img = caption_model.cnn_model(img)

		# Pass the image features to the Transformer encoder.
		encoded_img = caption_model.encoder(img, training=False)

		# Generate the caption using the Transformer decoder.
		decoded_caption = "<start>"
		for i in range(max_decoded_sentence_length):
			tokenized_caption = vectorization([decoded_caption])[:, :-1]
			mask = tf.math.not_equal(tokenized_caption, 0)
			predictions = caption_model.decoder(
				tokenized_caption, encoded_img, training=False, mask=mask
			)
			sampled_token_index = np.argmax(predictions[0, i, :])
			sampled_token = index_lookup[sampled_token_index]
			if sampled_token == " <end>":
				break
			decoded_caption += " " + sampled_token

		decoded_caption = decoded_caption.replace("<start>", "")
		decoded_caption = decoded_caption.replace("<end>", "").strip()
		print("Predicted Caption: ", decoded_caption)

	# Check predictions for a few samples.
	generate_caption()
	generate_caption()
	generate_caption()
	'''

    # End Notes
    # Notice that the model starts to generate reasonable captions
    # after a few epochs. To keep this example easily runnable, it has
    # been trained with a few constraints, like a minimal number of
    # attention heads. To improve predictions, try changing these
    # training settings and find a good model for your use case.

    # Exit the program.
    exit(0)
Example #9
0
strip_chars = strip_chars.replace("]", "")

vocab_size = 15000
sequence_length = 20
batch_size = 64


def custom_standardization(input_string):
    lowercase = tf.strings.lower(input_string)
    return tf.strings.regex_replace(lowercase, "[%s]" % re.escape(strip_chars),
                                    "")


eng_vectorization = TextVectorization(
    max_tokens=vocab_size,
    output_mode="int",
    output_sequence_length=sequence_length,
)
spa_vectorization = TextVectorization(
    max_tokens=vocab_size,
    output_mode="int",
    output_sequence_length=sequence_length + 1,
    standardize=custom_standardization,
)
train_eng_texts = [pair[0] for pair in train_pairs]
train_spa_texts = [pair[1] for pair in train_pairs]
eng_vectorization.adapt(train_eng_texts)
spa_vectorization.adapt(train_spa_texts)
"""
Next, we'll format our datasets.
val_samples = samples[-num_validation_samples:]
train_labels = labels[:-num_validation_samples]
val_labels = labels[-num_validation_samples:]
"""
## Create a vocabulary index

Let's use the `TextVectorization` to index the vocabulary found in the dataset.
Later, we'll use the same layer instance to vectorize the samples.

Our layer will only consider the top 20,000 words, and will truncate or pad sequences to
be actually 200 tokens long.
"""

from tensorflow.keras.layers import TextVectorization

vectorizer = TextVectorization(max_tokens=20000, output_sequence_length=200)
text_ds = tf.data.Dataset.from_tensor_slices(train_samples).batch(128)
vectorizer.adapt(text_ds)
"""
You can retrieve the computed vocabulary used via `vectorizer.get_vocabulary()`. Let's
print the top 5 words:
"""

vectorizer.get_vocabulary()[:5]
"""
Let's vectorize a test sentence:
"""

output = vectorizer([["the cat sat on the mat"]])
output.numpy()[0, :6]
"""
text_ds = text_ds.shuffle(buffer_size=256)
text_ds = text_ds.batch(batch_size)


def custom_standardization(input_string):
    """ Remove html line-break tags and handle punctuation """
    lowercased = tf.strings.lower(input_string)
    stripped_html = tf.strings.regex_replace(lowercased, "<br />", " ")
    return tf.strings.regex_replace(stripped_html, f"([{string.punctuation}])",
                                    r" \1")


# Create a vectorization layer and adapt it to the text
vectorize_layer = TextVectorization(
    standardize=custom_standardization,
    max_tokens=vocab_size - 1,
    output_mode="int",
    output_sequence_length=maxlen + 1,
)
vectorize_layer.adapt(text_ds)
vocab = vectorize_layer.get_vocabulary(
)  # To get words back from token indices


def prepare_lm_inputs_labels(text):
    """
    Shift word sequences by 1 position so that the target for position (i) is
    word at position (i+1). The model will use all words up till position (i)
    to predict the next word.
    """
    text = tf.expand_dims(text, -1)
    tokenized_sentences = vectorize_layer(text)