コード例 #1
0
    def __init__(self, name="kid", **kwargs):
        super().__init__(name=name, **kwargs)

        # KID is estimated per batch and is averaged across batches
        self.kid_tracker = keras.metrics.Mean()

        # a pretrained InceptionV3 is used without its classification layer
        # transform the pixel values to the 0-255 range, then use the same
        # preprocessing as during pretraining
        self.encoder = keras.Sequential(
            [
                layers.InputLayer(input_shape=(image_size, image_size, 3)),
                layers.Rescaling(255.0),
                layers.Resizing(height=kid_image_size, width=kid_image_size),
                layers.Lambda(
                    keras.applications.inception_v3.preprocess_input),
                keras.applications.InceptionV3(
                    include_top=False,
                    input_shape=(kid_image_size, kid_image_size, 3),
                    weights="imagenet",
                ),
                layers.GlobalAveragePooling2D(),
            ],
            name="inception_encoder",
        )
コード例 #2
0
def build_model(num_classes, img_size=image_size[0], top_dropout=0.3):
    """Creates a classifier based on pre-trained MobileNetV2.

    Arguments:
        num_classes: Int, number of classese to use in the softmax layer.
        img_size: Int, square size of input images (defaults is 224).
        top_dropout: Int, value for dropout layer (defaults is 0.3).

    Returns:
        Uncompiled Keras model.
    """

    # Create input and pre-processing layers for MobileNetV2
    inputs = layers.Input(shape=(img_size, img_size, 3))
    x = layers.Rescaling(scale=1.0 / 127.5, offset=-1)(inputs)
    model = keras.applications.MobileNetV2(include_top=False,
                                           weights="imagenet",
                                           input_tensor=x)

    # Freeze the pretrained weights
    model.trainable = False

    # Rebuild top
    x = layers.GlobalAveragePooling2D(name="avg_pool")(model.output)
    x = layers.Dropout(top_dropout)(x)
    outputs = layers.Dense(num_classes, activation="softmax")(x)
    model = keras.Model(inputs, outputs)

    print("Trainable weights:", len(model.trainable_weights))
    print("Non_trainable weights:", len(model.non_trainable_weights))
    return model
コード例 #3
0
def build_model():
    inputs = keras.Input(shape=(IMAGE_SIZE[0], IMAGE_SIZE[1], 3))
    x = layers.Rescaling(1.0 / 255)(inputs)
    x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
    x = layers.Conv2D(16, 3, activation="relu", padding="same")(x)
    x = layers.MaxPool2D()(x)

    x = conv_block(32, x)
    x = conv_block(64, x)

    x = conv_block(128, x)
    x = layers.Dropout(0.2)(x)

    x = conv_block(256, x)
    x = layers.Dropout(0.2)(x)

    x = layers.Flatten()(x)
    x = dense_block(512, 0.7, x)
    x = dense_block(128, 0.5, x)
    x = dense_block(64, 0.3, x)

    outputs = layers.Dense(1, activation="sigmoid")(x)

    model = keras.Model(inputs=inputs, outputs=outputs)
    return model
コード例 #4
0
def get_preprocessing():
    model = keras.Sequential(
        [
            layers.Rescaling(1 / 255.0),
            layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
        ],
        name="preprocessing",
    )
    return model
コード例 #5
0
def get_test_augmentation_model():
    model = keras.Sequential(
        [
            layers.Rescaling(1 / 255.0),
            layers.Resizing(IMAGE_SIZE, IMAGE_SIZE),
        ],
        name="test_data_augmentation",
    )
    return model
コード例 #6
0
def get_augmenter(min_area, brightness, jitter):
    zoom_factor = 1.0 - tf.sqrt(min_area)
    return keras.Sequential([
        keras.Input(shape=(image_size, image_size, image_channels)),
        layers.Rescaling(1 / 255),
        layers.RandomFlip("horizontal"),
        layers.RandomTranslation(zoom_factor / 2, zoom_factor / 2),
        layers.RandomZoom((-zoom_factor, 0.0), (-zoom_factor, 0.0)),
        RandomColorAffine(brightness, jitter),
    ])
コード例 #7
0
def get_train_augmentation_model():
    model = keras.Sequential(
        [
            layers.Rescaling(1 / 255.0),
            layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
            layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
            layers.RandomFlip("horizontal"),
        ],
        name="train_data_augmentation",
    )
    return model
コード例 #8
0
def get_training_model(num_classes=5):
    inputs = layers.Input((None, None, 3))
    resnet_base = keras.applications.ResNet50V2(include_top=False,
                                                weights=None,
                                                pooling="avg")
    resnet_base.trainable = True

    x = layers.Rescaling(scale=1.0 / 127.5, offset=-1)(inputs)
    x = resnet_base(x)
    outputs = layers.Dense(num_classes, activation="softmax")(x)
    return keras.Model(inputs, outputs)
コード例 #9
0
ファイル: nnclr.py プロジェクト: strongdiamond/keras-io
def augmenter(brightness, name, scale):
    return keras.Sequential(
        [
            layers.Input(shape=input_shape),
            layers.Rescaling(1 / 255),
            layers.RandomFlip("horizontal"),
            RandomResizedCrop(scale=scale, ratio=(3 / 4, 4 / 3)),
            RandomBrightness(brightness=brightness),
        ],
        name=name,
    )
コード例 #10
0
ファイル: adamatch.py プロジェクト: strongdiamond/keras-io
def get_network(image_size=32, num_classes=10):
    n = (DEPTH - 4) / 6
    n_stages = [16, 16 * WIDTH_MULT, 32 * WIDTH_MULT, 64 * WIDTH_MULT]

    inputs = keras.Input(shape=(image_size, image_size, 3))
    x = layers.Rescaling(scale=1.0 / 255)(inputs)

    conv1 = layers.Conv2D(
        n_stages[0],
        (3, 3),
        strides=1,
        padding="same",
        kernel_initializer=INIT,
        kernel_regularizer=regularizers.l2(WEIGHT_DECAY),
        use_bias=False,
    )(x)

    ## Add wide residual blocks ##

    conv2 = block_series(
        conv1,
        n_input_plane=n_stages[0],
        n_output_plane=n_stages[1],
        count=n,
        stride=(1, 1),
    )  # Stage 1

    conv3 = block_series(
        conv2,
        n_input_plane=n_stages[1],
        n_output_plane=n_stages[2],
        count=n,
        stride=(2, 2),
    )  # Stage 2

    conv4 = block_series(
        conv3,
        n_input_plane=n_stages[2],
        n_output_plane=n_stages[3],
        count=n,
        stride=(2, 2),
    )  # Stage 3

    batch_norm = layers.BatchNormalization()(conv4)
    relu = layers.Activation("relu")(batch_norm)

    # Classifier
    trunk_outputs = layers.GlobalAveragePooling2D()(relu)
    outputs = layers.Dense(
        num_classes,
        kernel_regularizer=regularizers.l2(WEIGHT_DECAY))(trunk_outputs)

    return keras.Model(inputs, outputs)
コード例 #11
0
def get_training_model():
    resnet50_v2 = tf.keras.applications.ResNet50V2(
        weights=None,
        include_top=True,
        input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
        classes=10,
    )
    model = tf.keras.Sequential([
        layers.Input((IMAGE_SIZE, IMAGE_SIZE, 3)),
        layers.Rescaling(scale=1.0 / 127.5, offset=-1),
        resnet50_v2,
    ])
    return model
コード例 #12
0
def make_model(input_shape, num_classes):
    inputs = keras.Input(shape=input_shape)
    # Image augmentation block
    x = data_augmentation(inputs)

    # Entry block
    x = layers.Rescaling(1.0 / 255)(x)
    x = layers.Conv2D(32, 3, strides=2, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.Conv2D(64, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    previous_block_activation = x  # Set aside residual

    for size in [128, 256, 512, 728]:
        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.Activation("relu")(x)
        x = layers.SeparableConv2D(size, 3, padding="same")(x)
        x = layers.BatchNormalization()(x)

        x = layers.MaxPooling2D(3, strides=2, padding="same")(x)

        # Project residual
        residual = layers.Conv2D(size, 1, strides=2, padding="same")(
            previous_block_activation
        )
        x = layers.add([x, residual])  # Add back residual
        previous_block_activation = x  # Set aside next residual

    x = layers.SeparableConv2D(1024, 3, padding="same")(x)
    x = layers.BatchNormalization()(x)
    x = layers.Activation("relu")(x)

    x = layers.GlobalAveragePooling2D()(x)
    if num_classes == 2:
        activation = "sigmoid"
        units = 1
    else:
        activation = "softmax"
        units = num_classes

    x = layers.Dropout(0.5)(x)
    outputs = layers.Dense(units, activation=activation)(x)
    return keras.Model(inputs, outputs)
コード例 #13
0
def get_training_model(num_classes=10):
    resnet50_v2 = tf.keras.applications.ResNet50V2(
        weights=None,
        include_top=False,
        input_shape=(CROP_TO, CROP_TO, 3),
    )
    model = tf.keras.Sequential([
        layers.Input((CROP_TO, CROP_TO, 3)),
        layers.Rescaling(scale=1.0 / 127.5, offset=-1),
        resnet50_v2,
        layers.GlobalAveragePooling2D(),
        layers.Dense(num_classes),
    ])
    return model
コード例 #14
0
    def create_mobilevit(num_classes=5):
        inputs = keras.Input((image_size, image_size, 3))
        x = layers.Rescaling(scale=1.0 / 255)(inputs)

        # Initial conv-stem -> NV2 block.
        x = conv_block(x, filters=16)
        x = inverted_residual_block(x,
                                    expanded_channels=16 * expansion_factor,
                                    output_channels=16)

        # Downsampling with MV2 block.
        x = inverted_residual_block(x,
                                    expanded_channels=16 * expansion_factor,
                                    output_channels=24,
                                    strides=2)
        x = inverted_residual_block(x,
                                    expanded_channels=24 * expansion_factor,
                                    output_channels=24)
        x = inverted_residual_block(x,
                                    expanded_channels=24 * expansion_factor,
                                    output_channels=24)

        # First MV2 -> MobileViT block.
        x = inverted_residual_block(x,
                                    expanded_channels=24 * expansion_factor,
                                    output_channels=48,
                                    strides=2)
        x = mobilevit_block(x, num_blocks=2, projection_dim=64)

        # Second MV2 -> MobileViT block.
        x = inverted_residual_block(x,
                                    expanded_channels=64 * expansion_factor,
                                    output_channels=64,
                                    strides=2)
        x = mobilevit_block(x, num_blocks=4, projection_dim=80)

        # Third MV2 -> MobileViT block.
        x = inverted_residual_block(x,
                                    expanded_channels=80 * expansion_factor,
                                    output_channels=80,
                                    strides=2)
        x = mobilevit_block(x, num_blocks=3, projection_dim=96)
        x = conv_block(x, filters=320, kernel_size=1, strides=1)

        # Classification head.
        x = layers.GlobalAvgPool2D()(x)
        outputs = layers.Dense(num_classes, activation="softmax")(x)

        return keras.Model(inputs, outputs)
コード例 #15
0
def get_model():
    backbone = tf.keras.applications.DenseNet121(
        weights=None,
        include_top=True,
        classes=2,
        input_shape=((TARGET_SIZE[0], TARGET_SIZE[1], 3)),
    )
    backbone.trainable = True

    inputs = layers.Input((INP_SIZE[0], INP_SIZE[1], 3))
    x = layers.Rescaling(scale=1.0 / 255)(inputs)
    x = learnable_resizer(x)
    outputs = backbone(x)

    return tf.keras.Model(inputs, outputs)
コード例 #16
0
def get_encoder():
    # Input and backbone.
    inputs = layers.Input((CROP_TO, CROP_TO, 3))
    x = layers.Rescaling(scale=1.0 / 127.5, offset=-1)(inputs)
    x = resnet_cifar10_v2.stem(x)
    x = resnet_cifar10_v2.learner(x, NUM_BLOCKS)
    x = layers.GlobalAveragePooling2D(name="backbone_pool")(x)

    # Projection head.
    x = layers.Dense(PROJECT_DIM,
                     use_bias=False,
                     kernel_regularizer=regularizers.l2(WEIGHT_DECAY))(x)
    x = layers.BatchNormalization()(x)
    x = layers.ReLU()(x)
    x = layers.Dense(PROJECT_DIM,
                     use_bias=False,
                     kernel_regularizer=regularizers.l2(WEIGHT_DECAY))(x)
    outputs = layers.BatchNormalization()(x)
    return tf.keras.Model(inputs, outputs, name="encoder")
コード例 #17
0
ファイル: convmixer.py プロジェクト: strongdiamond/keras-io
def get_conv_mixer_256_8(image_size=32,
                         filters=256,
                         depth=8,
                         kernel_size=5,
                         patch_size=2,
                         num_classes=10):
    """ConvMixer-256/8: https://openreview.net/pdf?id=TVHS5Y4dNvM.
    The hyperparameter values are taken from the paper.
    """
    inputs = keras.Input((image_size, image_size, 3))
    x = layers.Rescaling(scale=1.0 / 255)(inputs)

    # Extract patch embeddings.
    x = conv_stem(x, filters, patch_size)

    # ConvMixer blocks.
    for _ in range(depth):
        x = conv_mixer_block(x, filters, kernel_size)

    # Classification block.
    x = layers.GlobalAvgPool2D()(x)
    outputs = layers.Dense(num_classes, activation="softmax")(x)

    return keras.Model(inputs, outputs)
コード例 #18
0
# However, due to the small number of training images, a large network
# will easily overfit. Therefore, to make the most of our limited
# number of training examples, we'll apply random augmentation
# transformations (crop and horizontal flip) to them each time we are
# looping over them. This way, we "augment" our training dataset to
# contain more data.
#
# The augmentation transformations are implemented as preprocessing
# layers in Keras. There are various such layers readily available,
# see https://keras.io/guides/preprocessing_layers/ for more
# information.
#
# ### Initialization

inputs = keras.Input(shape=[256, 256, 3])
x = layers.Rescaling(scale=1. / 255)(inputs)

x = layers.RandomCrop(160, 160)(x)
x = layers.RandomFlip(mode="horizontal")(x)

x = layers.Conv2D(32, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)

x = layers.Conv2D(32, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)

x = layers.Conv2D(64, (3, 3), activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(2, 2))(x)

x = layers.Flatten()(x)
x = layers.Dense(64, activation='relu')(x)
コード例 #19
0
def main():
    # Hyperparameters and constraints
    positional_emb = True
    conv_layers = 2
    projection_dim = 128

    num_heads = 2
    transformer_units = [
        projection_dim,
        projection_dim,
    ]
    transformer_layers = 2
    stochastic_depth_rate = 0.1

    learning_rate = 0.001
    weight_decay = 0.0001
    batch_size = 128
    num_epochs = 30
    image_size = 32

    # Load CIFAR-10 dataset
    num_classes = 10
    input_shape = (32, 32, 3)

    (x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()

    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    print(f"x_train shape: {x_train.shape} - y_train shape: {y_train.shape}")
    print(f"x_test shape: {x_test.shape} - y_test shape: {y_test.shape}")

    # The CCT tokenizer
    # The first recipe introduced by the CCT authors is the tokenizer
    # for processing the images. In a standard ViT, images are
    # organized into uniform non-overlapping patches. This eliminates
    # the boundary-level information present in between different
    # patches. This is important for a neural network to effectively
    # exploit the locality information. The figure below presents an
    # illustration of how images are organized into patches.
    # We already know that convolutions are quite good at exploiting
    # locality information. So, based on this, the authors introduced
    # an all-convolutional mini-network to produce image patches.
    class CCTTokenizer(layers.Layer):
        def __init__(self,
                     kernel_size=3,
                     stride=1,
                     padding=1,
                     pooling_kernel_size=3,
                     pooling_stride=2,
                     num_conv_layers=conv_layers,
                     num_output_channels=[64, 128],
                     positional_emb=positional_emb,
                     **kwargs):
            super(CCTTokenizer, self).__init__(**kwargs)

            # This is the tokenizer.
            self.conv_model = keras.Sequential()
            for i in range(num_conv_layers):
                self.conv_model.add(
                    layers.Conv2D(
                        num_output_channels[i],
                        kernel_size,
                        stride,
                        padding="valid",
                        use_bias=False,
                        activation="relu",
                        kernel_initializer="he_normal",
                    ))
                self.conv_model.add(layers.ZeroPadding2D(padding))
                self.conv_model.add(
                    layers.MaxPooling2D(pooling_kernel_size, pooling_stride,
                                        "same"))
            self.positional_emb = positional_emb

        def call(self, images):
            outputs = self.conv_model(images)

            # After passing the images through the mini-network the
            # spatial dimensions are flattened to form sequences.
            reshaped = tf.reshape(
                outputs, (-1, tf.shape(outputs)[1] * tf.shape(outputs)[2],
                          tf.shape(outputs)[-1]))
            return reshaped

        def positional_embedding(self, image_size):
            # Positional embeddings are optional in CCT. Here, we
            # calculate the number of sequences and initialize an
            # 'Embedding' layer to computer the positional embeddings
            # later.
            if self.positional_emb:
                dummy_inputs = tf.ones((1, image_size, image_size, 3))
                dummy_outputs = self.call(dummy_inputs)
                sequence_length = tf.shape(dummy_outputs)[1]
                projection_dim = tf.shape(dummy_outputs)[-1]

                embed_layer = layers.Embedding(input_dim=sequence_length,
                                               output_dim=projection_dim)
                return embed_layer, sequence_length
            else:
                return None

    # Stochastic depth for regularization
    # Stochastic depth is a regularization technique that randomly
    # drops a set of layers. During inference, the layers are kept as
    # they are. It is very much similar to Dropout but only that it
    # operates on a block of layers rather than individual nodes
    # present inside a layer. In CCT, stochastic depth is used just
    # before the residual blocks of a transformer encoder.
    # Referred from: github.com:rwightman/pytorch-image-models.
    class StochasticDepth(layers.Layer):
        def __init__(self, drop_prop, **kwargs):
            super(StochasticDepth, self).__init__(**kwargs)
            self.drop_prob = drop_prop

        def call(self, x, training=None):
            if training:
                keep_prob = 1 - self.drop_prob
                shape = (tf.shape(x)[0], ) + (1, ) * (len(tf.shape(x)) - 1)
                random_tensor = keep_prob + tf.random.uniform(shape, 0, 1)
                random_tensor = tf.floor(random_tensor)
                return (x / keep_prob) * random_tensor
            return x

    # MLP for the transformer encoder
    def mlp(x, hidden_units, dropout_rate):
        for units in hidden_units:
            x = layers.Dense(units, activation=tf.nn.gelu)(x)
            x = layers.Dropout(dropout_rate)(x)
        return x

    # Data augmentation
    # In the original paper, the authors use AutoAugment to induce
    # stronger regularization. For this example, use the standard
    # geometric augmentations like random cropping and flipping.
    # Note the rescaling layer. These layers have pre-defined inference
    # behavior.
    data_augmentation = keras.Sequential(
        [
            layers.Rescaling(scale=1.0 / 255),
            layers.RandomCrop(image_size, image_size),
            layers.RandomFlip('horizontal'),
        ],
        name="data_augmentation",
    )

    # The final CCT model
    # Another recipe introduced in CCT is attention pooling or sequence
    # pooling. In ViT, only the feature map corresponding to the class
    # token is pooled and is then used for the subsequent
    # classification task (or any other downstream task). In CCT,
    # outputs from the transformers encoder are weighted and then
    # passed on to the final task specific layer (in this example, we
    # do classification).
    def create_cct_model(image_size=image_size,
                         input_shape=input_shape,
                         num_heads=num_heads,
                         projection_dim=projection_dim,
                         transformer_units=transformer_units):
        inputs = layers.Input(input_shape)

        # Augment data.
        augmented = data_augmentation(inputs)

        # Encode patches.
        cct_tokenizer = CCTTokenizer()
        encoded_patches = cct_tokenizer(augmented)

        # Apply positional embedding.
        if positional_emb:
            pos_embed, seq_length = cct_tokenizer.positional_embedding(
                image_size)
            positions = tf.range(start=0, limit=seq_length, delta=1)
            position_embeddings = pos_embed(positions)
            encoded_patches += position_embeddings

        # Calculate Stochastic Depth probabilities.
        dpr = [
            x
            for x in np.linspace(0, stochastic_depth_rate, transformer_layers)
        ]

        # Create multiple layers of the transformer block.
        for i in range(transformer_layers):
            # Layer normalization 1.
            x1 = layers.LayerNormalization(epsilon=1e-5)(encoded_patches)

            # Create a multi-head attention layer.
            attention_output = layers.MultiHeadAttention(
                num_heads=num_heads, key_dim=projection_dim, dropout=0.1)(x1,
                                                                          x1)

            # Skip connection 1.
            attention_output = StochasticDepth(dpr[i])(attention_output)
            x2 = layers.Add()([attention_output, encoded_patches])

            # Layer normalization 2.
            x3 = layers.LayerNormalization(epsilon=1e-5)(x2)

            # MLP.
            x3 = mlp(x3, hidden_units=transformer_units, dropout_rate=0.1)

            # Skip connection 2.
            x3 = StochasticDepth(dpr[i])(x3)
            encoded_patches = layers.Add()([x3, x2])

        # Apply sequence pooling.
        representation = layers.LayerNormalization(
            epsilon=1e-5)(encoded_patches)
        attention_weights = tf.nn.softmax(layers.Dense(1)(representation),
                                          axis=1)
        weighted_representation = tf.matmul(attention_weights,
                                            representation,
                                            transpose_a=True)
        weighted_representation = tf.squeeze(weighted_representation, -2)

        # Classify outputs.
        logits = layers.Dense(num_classes)(weighted_representation)

        # Create the keras model.
        model = keras.Model(inputs=inputs, outputs=logits)
        return model

    # Model training and evaluation
    def run_experiment(model):
        optimizer = tfa.optimizers.AdamW(learning_rate=0.001,
                                         weight_decay=0.0001)

        model.compile(
            optimizer=optimizer,
            loss=keras.losses.CategoricalCrossentropy(from_logits=True,
                                                      label_smoothing=0.1),
            metrics=[
                keras.metrics.CategoricalAccuracy(name="accuracy"),
                keras.metrics.TopKCategoricalAccuracy(5, name="top-5-accuracy")
            ],
        )

        checkpoint_filepath = "./tmp/checkpoint"
        checkpoint_callback = keras.callbacks.ModelCheckpoint(
            checkpoint_filepath,
            monitor="val_accuracy",
            save_best_only=True,
            save_weights_only=True,
        )

        history = model.fit(
            x=x_train,
            y=y_train,
            batch_size=batch_size,
            epochs=num_epochs,
            validation_split=0.1,
            callbacks=[checkpoint_callback],
        )

        model.load_weights(checkpoint_filepath)
        _, accuracy, top_5_accuracy = model.evaluate(x_test, y_test)
        print(f"Test accuracy: {round(accuracy * 100, 2)}%")
        print(f"Test top 5 accuracy: {round(top_5_accuracy * 100, 2)}%")

        return history

    cct_model = create_cct_model()
    history = run_experiment(cct_model)

    # Now visualize the training progress of the model.
    '''
	plt.plot(history.history["loss"], label="train_loss")
	plt.plot(history.history["val_loss"], label="val_loss")
	plt.xlabel("Epochs")
	plt.ylabel("Loss")
	plt.title("Train and Validation Losses Over Epochs", fontsize=14)
	plt.legend()
	plt.grid()
	plt.show()
	'''

    # The CCT model trained above has just 0.4 million parameters, and
    # it gets to ~78% top-1 accuracy within 30 epochs. The plot above
    # shows no signs of overfitting as well. This means we can train
    # this network for longer (perhaps with a bit more regularization)
    # and may obtain even better performance. This performance can
    # further be improved by additional recipes like cosine decay
    # learning rate schedule, other data augmentation techniques like
    # AutoAugment, MixUp or Cutmix. With these modifications, the
    # authors present 95.1% top-1 accuracy on the CIFAR-10 dataset. THe
    # authors also present a number of experiments to study how the
    # number of convolution blocks, transformer layers, etc. affect the
    # final performance of CCTs.
    # For a comparison, a ViT model takes about 4.7 million parameters
    # and 100 epochs of training to reach top-1 accuracy of 78.22% on
    # the CIFAR-10 dataset. You can refer to this notebook to know
    # about the experimental setup.
    # The authors also demonstrate the performance of Compact
    # Convolutional Transformers on NLP tasks and they report
    # competitive results there.

    # Exit the program.
    exit(0)
コード例 #20
0
    return x


"""
## Data augmentation

In the [original paper](https://arxiv.org/abs/2104.05704), the authors use
[AutoAugment](https://arxiv.org/abs/1805.09501) to induce stronger regularization. For
this example, we will be using the standard geometric augmentations like random cropping
and flipping.
"""

# Note the rescaling layer. These layers have pre-defined inference behavior.
data_augmentation = keras.Sequential(
    [
        layers.Rescaling(scale=1.0 / 255),
        layers.RandomCrop(image_size, image_size),
        layers.RandomFlip("horizontal"),
    ],
    name="data_augmentation",
)

"""
## The final CCT model

Another recipe introduced in CCT is attention pooling or sequence pooling. In ViT, only
the feature map corresponding to the class token is pooled and is then used for the
subsequent classification task (or any other downstream task). In CCT, outputs from the
Transformers encoder are weighted and then passed on to the final task-specific layer (in
this example, we do classification).
"""
コード例 #21
0
ファイル: token_learner.py プロジェクト: ksalama/keras-io
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test))
test_ds = test_ds.batch(BATCH_SIZE).prefetch(AUTO)
"""
## Data augmentation

The augmentation pipeline consists of:

- Rescaling
- Resizing
- Random cropping (fixed-sized or random sized)
- Random horizontal flipping
"""

data_augmentation = keras.Sequential(
    [
        layers.Rescaling(1 / 255.0),
        layers.Resizing(INPUT_SHAPE[0] + 20, INPUT_SHAPE[0] + 20),
        layers.RandomCrop(IMAGE_SIZE, IMAGE_SIZE),
        layers.RandomFlip("horizontal"),
    ],
    name="data_augmentation",
)
"""
Note that image data augmentation layers do not apply data transformations at inference time.
This means that when these layers are called with `training=False` they behave differently. Refer
[to the documentation](https://keras.io/api/layers/preprocessing_layers/image_augmentation/) for more
details.
"""
"""
## Positional embedding module
コード例 #22
0
    layers.RandomZoom(0.1),
])

# Load some data
(x_train, y_train), _ = keras.datasets.cifar10.load_data()
input_shape = x_train.shape[1:]
classes = 10

# Create a tf.data pipeline of augmented images (and their labels)
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train))
train_dataset = train_dataset.batch(16).map(lambda x, y:
                                            (data_augmentation(x), y))

# Create a model and train it on the augmented image data
inputs = keras.Input(shape=input_shape)
x = layers.Rescaling(1.0 / 255)(inputs)  # Rescale inputs
outputs = keras.applications.ResNet50(  # Add the rest of the model
    weights=None,
    input_shape=input_shape,
    classes=classes)(x)
model = keras.Model(inputs, outputs)
model.compile(optimizer="rmsprop", loss="sparse_categorical_crossentropy")
model.fit(train_dataset, steps_per_epoch=5)
"""
You can see a similar setup in action in the example
[image classification from scratch](https://keras.io/examples/vision/image_classification_from_scratch/).
"""
"""
### Normalizing numerical features
"""
コード例 #23
0
    split=[tfds.Split.TRAIN, tfds.Split.TEST],
    with_info=True,
    as_supervised=True,
)

print(f"Image shape: {metadata.features['image'].shape}")
print(f"Training images: {metadata.splits['train'].num_examples}")
print(f"Test images: {metadata.splits['test'].num_examples}")

"""
## Use Data Augmentation

We will rescale the data to `[0, 1]` and perform simple augmentations to our data.
"""

rescale = layers.Rescaling(1.0 / 255)

data_augmentation = tf.keras.Sequential(
    [
        layers.RandomFlip("horizontal_and_vertical"),
        layers.RandomRotation(0.3),
        layers.RandomZoom(0.2),
    ]
)


def prepare(ds, shuffle=False, augment=False):
    # Rescale dataset
    ds = ds.map(lambda x, y: (rescale(x), y), num_parallel_calls=AUTOTUNE)

    if shuffle: