Пример #1
0
def model_builder(hp):
    #build a CNN 
    dense = keras.layers.Dense(units=16)
    # Let's say we expect our inputs to be RGB images of arbitrary size
    inputs = keras.Input(shape=(None, None, 3))

    from tensorflow.keras import layers
    # Center-crop images to 101 x 200 to fit sample size
    x = CenterCrop(height=101, width=200)(inputs)
    # Rescale images to [0, 1]
    x = Rescaling(scale=1./255)(x)
    # Apply some convolution and pooling layers
    x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(x)
    x = layers.MaxPooling2D(pool_size=(3, 3))(x)
    x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(x)
    # Apply global average pooling to get flat feature vectors
    x = layers.GlobalAveragePooling2D()(x)
    # add a dense layer
    x = layers.Dense(20, activation='relu')(x)
    # Add a dense classifier on top
    num_classes = 10
    outputs = layers.Dense(num_classes, activation='softmax')(x)
    model = keras.Model(inputs=inputs, outputs=outputs)
    model.summary()
    # Tune the learning rate for the optimizer 
    # Choose an optimal value from 0.01, 0.001, or 0.0001
    hp_learning_rate = hp.Choice('learning_rate', values = [1e-2, 1e-3, 1e-4]) 
    #compile and keep metrics
    model.compile(optimizer=keras.optimizers.Adam(learning_rate = hp_learning_rate), 
                    loss=keras.losses.SparseCategoricalCrossentropy(from_logits = True),
                    metrics=[keras.metrics.SparseCategoricalAccuracy(name='acc')])
    #model.fit(x_train, y_train, batch_size=32, epochs=10)
    return model
Пример #2
0
def make_model(metrics=METRICS, output_bias=None):
    if output_bias is not None:
        output_bias = tf.keras.initializers.Constant(output_bias)
    inputs = keras.Input(shape=(None, None, 3))
    targetsize = 512
    from tensorflow.keras import layers
    import math
    # 1D cropping to fit sample size
    x = CenterCrop(height=1, width=targetsize)(inputs)
    #print(x.shape)
    # Rescale images to [0, 1]
    x = Rescaling(scale=1. / 255)(inputs)
    # Apply some convolution and pooling layers
    x = layers.Conv2D(filters=32,
                      kernel_size=3,
                      strides=2,
                      padding='SAME',
                      activation='relu')(x)
    x = layers.Conv2D(filters=32,
                      kernel_size=3,
                      strides=2,
                      padding='SAME',
                      activation='relu')(x)
    x = layers.Conv2D(filters=32,
                      kernel_size=3,
                      strides=2,
                      padding='SAME',
                      activation='relu')(x)
    # Apply global average pooling to get flat feature vectors
    x = layers.GlobalAveragePooling2D()(x)
    # add a dense layer
    x = layers.Dense(16, activation='relu')(x)
    # Add a dense classifier on top
    num_classes = 3
    outputs = layers.Dense(num_classes,
                           activation='sigmoid',
                           bias_initializer=output_bias)(x)
    model = keras.Model(inputs=inputs, outputs=outputs)
    model.summary()
    model.compile(optimizer=keras.optimizers.Adam(lr=1e-3),
                  loss=keras.losses.BinaryCrossentropy(),
                  metrics=metrics)
    return model
Пример #3
0
    def __init__(self, path_cartoon_dataset, path_real_faces_dataset,
                 batch_size):
        # Image cropper
        self.cropper = CenterCrop(height=64, width=64)

        self.training_cartoon = tf.keras.preprocessing.image_dataset_from_directory(
            path_cartoon_dataset,
            image_size=(83, 83),
            batch_size=batch_size,
            label_mode=None,
            shuffle=True)
        self.training_cartoon = self.training_cartoon.map(
            self.preprocessing_image)

        self.training_face = tf.keras.preprocessing.image_dataset_from_directory(
            path_real_faces_dataset,
            image_size=(83, 83),
            batch_size=batch_size,
            label_mode=None,
            shuffle=True)
        self.training_face = self.training_face.map(self.preprocessing_image)

        self.dataset_numpy = self.join_datasets_to_numpy()
print("mean: %.4f" % np.mean(normalized_data))
"""
**Example: rescaling & center-cropping images**

Both the `Rescaling` layer and the `CenterCrop` layer are stateless, so it isn't
 necessary to call `adapt()` in this case.
"""

from tensorflow.keras.layers.experimental.preprocessing import CenterCrop
from tensorflow.keras.layers.experimental.preprocessing import Rescaling

# Example image data, with values in the [0, 255] range
training_data = np.random.randint(0, 256,
                                  size=(64, 200, 200, 3)).astype("float32")

cropper = CenterCrop(height=150, width=150)
scaler = Rescaling(scale=1.0 / 255)

output_data = scaler(cropper(training_data))
print("shape:", output_data.shape)
print("min:", np.min(output_data))
print("max:", np.max(output_data))
"""
## Building models with the Keras Functional API

A "layer" is a simple input-output transformation (such as the scaling &
center-cropping transformations above). For instance, here's a linear projection layer
 that maps its inputs to a 16-dimensional feature space:

```python
dense = keras.layers.Dense(units=16)
Пример #5
0
def main(
    root_dir: str,
    split: str,
    input_shape: Tuple[int, int, int],
    n_classes: int,
    margin: float,
    scale: float,
    embedding_dimension: int,
    momentum: float,
    weight_decay: float,
    batch_size: int,
    epochs: int,
    seed: int,
    model_path: str,
    precision_policy: Optional[str] = None,
    **kwargs,
):
    set_gpu_memory_growth()

    if precision_policy is None:
        precision_policy = "float32"
    policy = tf.keras.mixed_precision.Policy(precision_policy)
    tf.keras.mixed_precision.set_global_policy(policy)

    read_config = tfds.ReadConfig(shuffle_seed=seed)
    builder = tfds.ImageFolder(root_dir)
    ds: tf.data.Dataset = builder.as_dataset(
        split=split,
        batch_size=batch_size,
        shuffle_files=True,
        decoders={"label": onehot_encoding(depth=n_classes)},
        read_config=read_config,
        as_supervised=True,
    )

    height, width, n_channels = input_shape
    data_augmentation = tf.keras.Sequential([
        RandomRotation(factor=0.05, fill_mode="nearest", seed=seed),
        RandomTranslation(height_factor=0.1,
                          width_factor=0.1,
                          fill_mode="wrap",
                          seed=seed),
        RandomZoom(height_factor=0.1, fill_mode="reflect", seed=seed),
        RandomContrast(factor=0.1, seed=seed),
        CenterCrop(height=height, width=width),
    ])

    ds = (ds.map(lambda x, y: (preprocess_input(x), y),
                 num_parallel_calls=AUTOTUNE).map(
                     lambda x, y: (data_augmentation(x), y),
                     num_parallel_calls=AUTOTUNE).unbatch())

    valid_size = 1000
    valid_ds = ds.take(valid_size).batch(batch_size).prefetch(AUTOTUNE)
    train_ds = (ds.skip(valid_size).shuffle(buffer_size=100000).batch(
        batch_size, drop_remainder=True).prefetch(AUTOTUNE))

    model = create_model(
        input_shape=input_shape,
        n_classes=n_classes,
        embedding_dimension=embedding_dimension,
        weights_decay=weight_decay,
        use_pretrain=False,
    )

    optimizer = tf.keras.optimizers.SGD(momentum=momentum)

    model_checkpoint = ModelCheckpoint(
        # "./model/weights.{epoch:03d}-{val_loss:.3f}.hdf5",
        model_path,
        monitor="val_loss",
        save_best_only=True,
    )

    def scheduler(epoch, lr):
        if epoch < 30:
            return 1e-1
        elif epoch < 60:
            return 1e-2
        elif epoch < 90:
            return 1e-3
        else:
            return 1e-4

    lr_scheduler = tf.keras.callbacks.LearningRateScheduler(scheduler)

    tensorboard_callback = tf.keras.callbacks.TensorBoard(histogram_freq=1)

    model.compile(
        optimizer=optimizer,
        loss=ClippedValueLoss(
            loss_func=AdditiveAngularMarginLoss(
                loss_func=tf.keras.losses.CategoricalCrossentropy(),
                margin=margin,
                scale=scale,
                dtype=policy.compute_dtype,
            ),
            x_min=tf.keras.backend.epsilon(),
            x_max=1.0,
        ),
        metrics=[tf.keras.metrics.CategoricalAccuracy()],
    )
    model.fit(
        train_ds,
        batch_size=batch_size,
        epochs=epochs,
        validation_data=valid_ds,
        callbacks=[
            model_checkpoint,
            lr_scheduler,
            tensorboard_callback,
        ],
        verbose=1,
    )
Пример #6
0
import numpy as np
import tensorflow as tf
from tensorflow import keras

from tensorflow.keras.layers.experimental.preprocessing import CenterCrop, Rescaling

training_data = np.random.randint(0, 256,
                                  size=(64, 200, 200, 3)).astype("float32")

cropper = CenterCrop(height=150, width=150)
scaler = Rescaling(scale=1.0 / 255)

output_data = scaler(cropper(training_data))
print("shape:", output_data.shape)
print("min:", np.min(output_data))
print("max:", np.max(output_data))
Пример #7
0
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers.experimental.preprocessing import CenterCrop
from tensorflow.keras.layers.experimental.preprocessing import Rescaling

#build a CNN 
dense = keras.layers.Dense(units=16)
# Let's say we expect our inputs to be RGB images of arbitrary size
inputs = keras.Input(shape=(None, None, 3))

from tensorflow.keras import layers
# Center-crop images to 101 x 200 to fit sample size
x = CenterCrop(height=101, width=200)(inputs)
# Rescale images to [0, 1]
x = Rescaling(scale=1./255)(x)
# Apply some convolution and pooling layers
x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(x)
x = layers.MaxPooling2D(pool_size=(3, 3))(x)
x = layers.Conv2D(filters=32, kernel_size=(3, 3), padding='SAME', activation='relu')(x)
# Apply global average pooling to get flat feature vectors
x = layers.GlobalAveragePooling2D()(x)
# add a dense layer
x = layers.Dense(20, activation='relu')(x)
# Add a dense classifier on top
num_classes = 10
outputs = layers.Dense(num_classes, activation='softmax')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.summary()
#compile and keep metrics
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy',metrics=[keras.metrics.SparseCategoricalAccuracy(name='acc')])
Пример #8
0
directory="./miml_dataset/images",
x_col="Filenames",
batch_size=1,
seed=42,
shuffle=False,
class_mode=None,
target_size=(100,100))


# Let's say we expect our inputs to be RGB images of arbitrary size
inputs = keras.Input(shape=(None,None,3))
targetsize=512
from tensorflow.keras import layers
import math
# 1D cropping to fit sample size
x = CenterCrop(height=1,width=targetsize)(inputs)
#print(x.shape)
# Rescale images to [0, 1]
x = Rescaling(scale=1./255)(inputs)
# Apply some convolution and pooling layers
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, padding='SAME', activation='relu')(x)
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, padding='SAME', activation='relu')(x)
x = layers.Conv2D(filters=32, kernel_size=3, strides=2, padding='SAME', activation='relu')(x)
# Apply global average pooling to get flat feature vectors
x = layers.GlobalAveragePooling2D()(x)
# add a dense layer
x = layers.Dense(16, activation='relu')(x)
# Add a dense classifier on top
num_classes = 3
outputs = layers.Dense(num_classes, activation='sigmoid')(x)
model = keras.Model(inputs=inputs, outputs=outputs)