Example #1
0
    def model(self):
        lr = self.lr
        augmentation = self.augmentation
        normalization = self.normalization

        # EMB1 image (convolutional)
        input0 = Input(shape=(128, 4, 1), name='EMB1')
        if (augmentation): X0 = RandomFlip(name='aug_reflect_0')(input0)
        else: X0 = input0

        if (normalization): X0 = NormalizationBlock(axes=[1, 2])(X0)
        X0 = Conv2D(32, (4, 2), activation='relu')(X0)
        X0 = MaxPooling2D(pool_size=(2, 2))(X0)
        X0 = Dropout(0.2)(X0)
        X0 = Flatten()(X0)
        X0 = Dense(128, activation='relu')(X0)

        # EMB2 image (convolutional)
        input1 = Input(shape=(16, 16, 1), name='EMB2')
        if (augmentation): X1 = RandomFlip(name='aug_reflect_1')(input1)
        else: X1 = input1

        if (normalization): X1 = NormalizationBlock(axes=[1, 2])(X1)
        X1 = Conv2D(32, (4, 4), activation='relu')(X1)
        X1 = MaxPooling2D(pool_size=(2, 2))(X1)
        X1 = Dropout(0.2)(X1)
        X1 = Flatten()(X1)
        X1 = Dense(128, activation='relu')(X1)

        # EMB3 image (convolutional)
        input2 = Input(shape=(8, 16, 1), name='EMB3')
        if (augmentation): X2 = RandomFlip(name='aug_reflect_2')(input2)
        else: X2 = input2

        if (normalization): X2 = NormalizationBlock(axes=[1, 2])(X2)
        X2 = Conv2D(32, (2, 4), activation='relu')(X2)
        X2 = MaxPooling2D(pool_size=(2, 2))(X2)
        X2 = Dropout(0.2)(X2)
        X2 = Flatten()(X2)
        X2 = Dense(128, activation='relu')(X2)

        # concatenate outputs from the three networks above
        X = Concatenate(axis=1)([X0, X1, X2])  # remember that axis=0 is batch!
        X = Dense(50, activation='relu')(X)

        # final output
        X = Dense(2, activation='softmax')(X)
        model = Model(inputs=[input0, input1, input2], outputs=X)

        # compile model
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
Example #2
0
    def model(self):
        lr = self.lr
        input_shape = self.input_shape
        f = self.f
        pool = self.pool
        augmentation = self.augmentation
        normalization = self.normalization

        X_in = Input(input_shape, name='input')

        # Augmentation: randomly flip the image during training
        if (augmentation): X = RandomFlip(name='aug_reflect')(X_in)
        else: X = X_in

        # Normalization: Rescale the image to have an integral of 1
        if (normalization): X = NormalizationBlock(axes=[1, 2])(X)

        X = Conv2D(32, f, name='conv1', activation='relu')(X)
        #temp_pool = (2,2) #TODO: Including this pooling caused model to fail to compile. Did this work in original CNN notebook w/ original parameter choices?
        #X = MaxPooling2D(temp_pool)(X)
        X = Conv2D(16, pool, activation='relu')(X)
        X = MaxPooling2D(pool)(X)
        X = Flatten()(X)
        X = Dense(128, activation='relu')(X)
        X = Dense(50, activation='relu')(X)
        X = Dense(2, kernel_initializer='normal', activation='softmax')(X)
        model = Model(inputs=X_in, outputs=X, name='SL_CNN')
        # compile model
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
def base_model():

    input_img = Input(shape=(128, 128,4))
    #x = RandomContrast([0.1, 1])(input_img)
    
    x = RandomFlip()(input_img)
    x = Conv2D(16, 11)(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    x = Conv2D(32, 9)(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D()(x)
    x = Conv2D(64, 7)(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D()(x)
    x = Conv2D(128, 5)(x)    
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D()(x)
    x = Conv2D(256, 3)(x)
    x = Activation('relu')(x)
    x = BatchNormalization()(x)
    x = Flatten()(x)
    x = Dense(512, 'relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(512, 'relu')(x)
    x = Dropout(0.5)(x)

    output = Dense(1, activation='linear', name='class_dense')(x)

    model = Model(inputs=input_img, outputs=output)
    
    return model
Example #4
0
def createVGG16(inputs, num_classes):

    x = RandomFlip()(inputs)
    x = RandomRotation(0.2)(x)

    x = Conv2D(64, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(64, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(128, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(128, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(256, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(256, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(256, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(512, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(512, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(512, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = MaxPooling2D((2, 2))(x)

    x = Conv2D(512, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(512, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(512, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = MaxPooling2D((2, 2))(x)

    x = Flatten()(x)
    x = Dropout(0.5)(x)
    x = Dense(4096)(x)
    x = Activation('relu')(x)
    x = Dropout(0.5)(x)
    x = Dense(4096)(x)
    x = Activation('relu')(x)
    logits = Dense(num_classes)(x)

    return logits
Example #5
0
    def model(self):
        input_shape = self.input_shape
        kernel = self.kernel
        lr = self.lr
        dropout = self.dropout
        augmentation = self.augmentation
        normalization = self.normalization

        # Input images from all calorimeter layers.
        input0 = Input(shape=(128, 4, 1), name='EMB1')
        input1 = Input(shape=(16, 16, 1), name='EMB2')
        input2 = Input(shape=(8, 16, 1), name='EMB3')
        input3 = Input(shape=(4, 4, 1), name='TileBar0')
        input4 = Input(shape=(4, 4, 1), name='TileBar1')
        input5 = Input(shape=(2, 4, 1), name='TileBar2')
        inputs = [input0, input1, input2, input3, input4, input5]

        # Rescale our EMB images, and pass them through convolutions.
        EMB = ImageScaleBlock(input_shape,
                              normalization=True,
                              name_prefix='scaled_input_')(
                                  [input0, input1, input2])
        if (augmentation): EMB = RandomFlip(name='aug_reflect')(EMB)
        if (normalization): EMB = NormalizationBlock(axes=[1, 2, 3])(EMB)

        EMB = ZeroPadding2D((3, 3))(EMB)
        EMB = Conv2D(32, kernel, activation='relu')(EMB)
        EMB = MaxPooling2D(pool_size=(2, 2))(EMB)
        EMB = Flatten()(EMB)

        # For TileBar, just get some simple info.
        # Using ImageScaleBlock with final size of (1,1) will give us a list of integrals of the images.
        TiB = ImageScaleBlock(
            (1, 1), normalization=True,
            name_prefix='scaled_input_TiB_')([input3, input4, input5])
        TiB = Flatten()(TiB)
        if (normalization): TiB = NormalizationBlock(axes=[1])(TiB)

        X = Concatenate(axis=1)([EMB, TiB])
        X = Dense(128, activation='relu')(X)
        if (dropout > 0.): X = Dropout(dropout)(X)
        X = Dense(64, activation='relu')(X)
        if (dropout > 0.): X = Dropout(dropout)(X)
        output = Dense(2, activation='softmax')(X)

        model = Model(inputs=inputs, outputs=output)
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
def cifar_baseline_augmentation(x, padding_mode='ZEROS', cutout=True):
    padding_mode = padding_mode.upper()
    assert padding_mode in ['ZEROS', 'REFLECT']
    if padding_mode == 'ZEROS':
        x = ZeroPadding2D((4, 4), name='padding')(x)
    elif padding_mode == 'REFLECT':
        x = ReflectPadding2D((4, 4), name='padding')(x)

    x = RandomFlip(mode='horizontal', name='h_flip')(x)

    if cutout:
        x = RandomCutout(16, 0.0, name='cutout')(x)

    x = RandomCrop(32, 32, name='crop')(x)
    return x
Example #7
0
    def model(self):
        input_shapes = [self.input_shape1, self.input_shape2]
        lr = self.lr
        dropout = self.dropout
        augmentation = self.augmentation
        normalization = self.normalization

        # Input images from all calorimeter layers.
        input0 = Input(shape=(128, 4, 1), name='EMB1')
        input1 = Input(shape=(16, 16, 1), name='EMB2')
        input2 = Input(shape=(8, 16, 1), name='EMB3')
        input3 = Input(shape=(4, 4, 1), name='TileBar0')
        input4 = Input(shape=(4, 4, 1), name='TileBar1')
        input5 = Input(shape=(2, 4, 1), name='TileBar2')
        inputs = [input0, input1, input2, input3, input4, input5]

        inputs_EMB = [input0, input1, input2]
        inputs_TileBar = [input3, input4, input5]

        Xs = []
        for i, input_list in enumerate([inputs_EMB, inputs_TileBar]):
            input_shape = input_shapes[i]
            X = ImageScaleBlock(input_shape,
                                normalization=True,
                                name_prefix='scaled_input_')(inputs)
            if (augmentation):
                X = RandomFlip(name='aug_reflect_{}'.format(i))(X)
            if (normalization): X = NormalizationBlock(axes=[1, 2, 3])(X)
            X = ZeroPadding2D((3, 3))(X)
            X = Conv2D(32, (int(input_shape[0] / 4), int(input_shape[1] / 4)),
                       activation='relu')(X)
            X = MaxPooling2D(pool_size=(2, 2))(X)
            if (dropout > 0.): X = Dropout(dropout)(X)
            X = Flatten()(X)
            Xs.append(X)

        # concatenate results
        X = Concatenate(axis=1)(Xs)
        X = Dense(128, activation='relu')(X)
        output = Dense(2, activation='softmax')(X)

        model = Model(inputs=inputs, outputs=output)
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
    def __init__(self, time, sf) -> None:
        _time = time
        _sampling_frequency = sf
        _window_length = int(_time * _sampling_frequency)

        _img_shape = (_window_length,
                      PrivateConfigurator().getint('cnn', 'input_shape_y'), 3)

        _acc_shape = (PrivateConfigurator().getint('cnn', 'input_shape_x'),
                      PrivateConfigurator().getint('cnn', 'input_shape_y'))

        _output_size = PrivateConfigurator().getint('cnn', 'output')

        _data_augmentation = Sequential([
            RandomZoom(0.1),
            RandomFlip('horizontal', seed=28, input_shape=_img_shape),
            RandomRotation(0.1)
        ])

        _model_1_for_acc = Sequential([
            Conv1D(filters=64,
                   kernel_size=PrivateConfigurator().getint(
                       'm1_acc', 'conv-kernel'),
                   activation='relu',
                   input_shape=_acc_shape),
            MaxPooling1D(
                pool_size=PrivateConfigurator().getint('m1_acc', 'pool-size')),

            # Features Vector
            Flatten(),
            Dense(128, activation="relu",
                  kernel_initializer=Xavier(seed=28)),  # 1024
            Dropout(0.5),
            Dense(128, activation="relu",
                  kernel_initializer=Xavier(seed=28)),  # 1024
            Dropout(0.5),

            # Add an output layer
            Dense(_output_size, activation="softmax")
        ])

        self._models = {'m1_acc': _model_1_for_acc}
def imagenet_baseline_augmentation(x):
    x = RandomFlip(mode='horizontal', name='h_flip')(x)
    return x
Example #10
0
def create_optpresso_model(input_shape: List) -> Sequential:
    model = Sequential()
    model.add(InputLayer(input_shape=input_shape))
    model.add(SubtractMeanLayer(mean=MEAN_IMG_VALUES))
    model.add(Rescaling(1.0 / 255))
    model.add(RandomFlip())
    model.add(RandomRotation(1))

    model.add(Convolution2D(
        32,
        (5, 5),
        padding="same",
    ))
    model.add(BatchNormalization())
    model.add(Activation("relu"))
    # model.add(SpatialDropout2D(0.3))
    model.add(Convolution2D(
        48,
        (5, 5),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(BatchNormalization())
    # model.add(SpatialDropout2D(0.3))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        48,
        (5, 5),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(SpatialDropout2D(0.1))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        64,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(SpatialDropout2D(0.1))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        64,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        128,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    # model.add(SpatialDropout2D(0.15))
    model.add(Activation("relu"))
    model.add(Convolution2D(
        128,
        (3, 3),
        strides=(2, 2),
        padding="same",
    ))
    model.add(Flatten())
    model.add(Activation("relu"))
    model.add(Dense(128))
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(96))
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(64))
    model.add(Dropout(0.5))
    model.add(Activation("relu"))
    model.add(Dense(1, bias_initializer=Constant(MEAN_PULL_TIME)))

    return model
X = []
y = []

for features, label in training_data:
    X.append(features)
    y.append(label)

y = np.array(y)
X = np.array(X).reshape(-1, IMG_SIZE, IMG_SIZE, 3)

X = X / 255.0

input_shape = X.shape[1:]

model = Sequential()
model.add(RandomFlip("horizontal", input_shape=X.shape[1:]))
model.add(RandomRotation(0.1))
model.add(RandomZoom(0.1))
model.add(Conv2D(32, (3, 3), input_shape=X.shape[1:]))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))

model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Activation('relu'))
optimizer="Adam",
loss="sparse_categorical_crossentropy",
metrics=['accuracy'],
)
model_.fit(x_train, y_train, epochs=1, batch_size=16)
"""
When you don't have a large image dataset, it's a good practice
to artificially introduce sample diversity by applying random yet
realistic transformations to the training images. This helps
expose the model to different aspects of the training data while
slowing down overfitting

"""
augmentations = Sequential([
RandomFlip("horizontal"),
RandomRotation(0.1),
RandomZoom(0.1),
])

model_ = Sequential([
    augmentations,
    scaling_layer,
    model
])

model_.compile(
optimizer="Adam",
loss="sparse_categorical_crossentropy",
metrics=['accuracy'],
)
Example #13
0
    def model(self):
        filter_sets = self.filter_sets
        f_vals = self.f_vals
        s_vals = self.s_vals
        i_vals = self.i_vals
        channels = self.channels
        ndense = self.ndense
        dense_units = self.dense_units
        input_shape = self.input_shape
        augmentation = self.augmentation
        normalization = self.normalization
        lr = self.lr
        decay = self.decay

        # Input images -- one for each channel, each channel's dimensions may be different.
        inputs = [
            Input((None, None, 1), name='input_' + str(i))
            for i in range(channels)
        ]
        energy_input = Input((1), name='energy')
        eta_input = Input((1), name='eta')

        # Rescale all the input images, so that their dimensions now match.
        # Note that we make sure to re-normalize the images so that we preserve their energies.
        X = ImageScaleBlock(input_shape,
                            normalization=True,
                            name_prefix='scaled_input_')(inputs)
        X = ZeroPadding2D((3, 3))(X)

        # Data augmentation.
        # With channels combined, we can now flip images in (eta,phi, eta&phi).
        # Note that these flips will not require making any changes to
        # the other inputs (energy, abs(eta)), so the augmentation is
        # as simple as flipping the images using built-in functions.
        # These augmentation functions will only be active during training.
        if (augmentation): X = RandomFlip(name='aug_reflect')(X)

        # Stage 1
        X = Conv2D(64, (7, 7),
                   strides=(2, 2),
                   name='conv1',
                   kernel_initializer=glorot_uniform(seed=0))(X)
        X = BatchNormalization(axis=3, name='bn_conv1')(X)
        X = Activation('relu')(X)
        X = MaxPooling2D((3, 3), strides=(2, 2))(X)

        n = len(f_vals)
        for i in range(n):
            filters = filter_sets[i]
            f = f_vals[i]
            s = s_vals[i]
            ib = i_vals[i]
            stage = i + 1  # 1st stage is Conv2D etc. before ResNet blocks
            X = ConvolutionBlock(f=f,
                                 filters=filters,
                                 stage=stage,
                                 block='a',
                                 s=s,
                                 normalization=normalization)(X)

            for j in range(ib):
                X = IdentityBlock(f=f,
                                  filters=filters,
                                  stage=stage,
                                  block=ascii_lowercase[j + 1],
                                  normalization=normalization)(X)

        # AVGPOOL
        pool_size = (2, 2)
        if (X.shape[1] == 1): pool_size = (1, 2)
        elif (X.shape[2] == 1): pool_size = (2, 1)
        X = AveragePooling2D(pool_size=pool_size, name="avg_pool")(X)

        # Transition into output: Add energy and eta info, and use a simple DNN.
        X = Flatten()(X)
        tensor_list = [X, energy_input, eta_input]
        X = Concatenate(axis=1)(tensor_list)

        if (dense_units <= 0): units = X.shape[1]
        else: units = dense_units
        for i in range(ndense):
            X = Dense(units=units,
                      activation='relu',
                      name='Dense{}'.format(i + 1))(X)
        X = Dense(units=1,
                  activation='linear',
                  name='output',
                  kernel_initializer='normal')(X)

        # Create model object.
        input_list = inputs + [energy_input, eta_input]
        model = Model(inputs=input_list, outputs=X, name='ResNet')

        # Compile the model
        if (self.opt == 'adam'):
            optimizer = Adam(learning_rate=lr, decay=decay)
        else:
            optimizer = SGD(learning_rate=lr, momentum=0.)
        model.compile(optimizer=optimizer, loss='mse', metrics=['mae', 'mse'])
        return model
Example #14
0
    def model(self):
        dropout = self.dropout
        decay = self.decay
        lr = self.lr
        augmentation = self.augmentation
        # Gather inputs -- the images, the reco energy and eta.
        EMB1 = Input(shape=(128, 4, 1), name='input_0')
        EMB2 = Input(shape=(16, 16, 1), name='input_1')
        EMB3 = Input(shape=(8, 16, 1), name='input_2')
        TB0 = Input(shape=(4, 4, 1), name='input_3')
        TB1 = Input(shape=(4, 4, 1), name='input_4')
        TB2 = Input(shape=(2, 4, 1), name='input_5')
        energy = Input(shape=(1), name='energy')
        eta = Input(shape=(1), name='eta')
        input_list = [EMB1, EMB2, EMB3, TB0, TB1, TB2, energy, eta]

        # Image augmentation.
        # Note: EMB1 and (EMB2+EMB3) are separately augmented, so they may be given different flips.
        #       Note sure if this is necessarily a problem, with what we're doing.
        if (augmentation): EMB1 = RandomFlip(name='emb1_flip')(EMB1)

        # Perform some convolutions with EMB1
        x1 = Conv2D(32, (3, 3), padding='same', name='emb1_conv2d_1')(EMB1)
        x1 = Activation('relu')(x1)
        x1 = Conv2D(32, (3, 3), padding='same', name='emb1_conv2d_2')(x1)
        x1 = Activation('relu')(x1)
        x1 = MaxPooling2D(pool_size=(2, 1),
                          padding='same',
                          name='emb1_maxpool_3')(x1)
        x1 = Flatten(name='emb1_flatten_4')(x1)

        # Perform some convolutions with EMB2 + EMB3
        x2 = ImageScaleBlock(
            (16, 16), normalization=True,
            name_prefix='emb_stack')([EMB2, EMB3])  # merge EMB2 and EMB3

        if (augmentation): x2 = RandomFlip(name='emb23_flip')(x2)

        x2 = Conv2D(32, (1, 1), padding='same', name='emb23_conv1d_1')(x2)
        x2 = Activation('relu')(x2)
        x2 = Conv2D(64, (2, 2), padding='same', name='emb23_conv2d_2')(x2)
        x2 = Activation('relu')(x2)
        x2 = MaxPooling2D(pool_size=(2, 1),
                          padding='same',
                          name='emb23_maxpool_3')(x2)
        x2 = Flatten(name='emb23_flatten_4')(x2)

        # Mix down the results of the EMB convolutions.
        x = Concatenate(axis=1, name='emb_concat')([x1, x2])
        x = Dense(32, activation='relu', name='emb_dense_1')(x)
        x = Dropout(dropout, name='emb_dropout_2')(x)
        x = Dense(16, activation='relu', name='emb_dense_3')(x)
        x = Dropout(dropout, name='emb_dropout_4')(x)

        # Compute energy depth information
        depth = ImageScaleBlock(
            (1, 1), normalization=True,
            name_prefix='depth')([EMB1, EMB2, EMB3, TB0, TB1, TB2])
        depth = Flatten(name='depth_flatten')(depth)

        x = Concatenate(axis=1, name='concatenate_2')([x, depth, energy, eta])
        x = Dense(32, activation='relu', name='full_dense_1')(x)
        x = Dropout(dropout, name='full_dropout_2')(x)
        x = Dense(32, activation='relu', name='full_dense_3')(x)
        x = Dropout(dropout, name='full_dropout_4')(x)
        output = Dense(1, kernel_initializer='normal', activation='linear')(x)

        if (self.opt == 'adam'):
            optimizer = Adam(learning_rate=lr, decay=decay)
        else:
            optimizer = SGD(learning_rate=lr, momentum=0.)
        model = Model(inputs=input_list, outputs=output, name='Simple_CNN')
        model.compile(optimizer=optimizer, loss='mse', metrics=['mae', 'mse'])
        return model
Example #15
0
    def model(self):
        filter_sets = self.filter_sets
        f_vals = self.f_vals
        s_vals = self.s_vals
        i_vals = self.i_vals
        channels = self.channels
        input_shape = self.input_shape
        augmentation = self.augmentation
        normalization = self.normalization
        lr = self.lr
        classes = self.classes

        # Input images -- one for each channel, each channel's dimensions may be different.
        inputs = [
            Input((None, None, 1), name='input' + str(i))
            for i in range(channels)
        ]

        # Rescale all the input images, so that their dimensions now match.
        X = ImageScaleBlock(input_shape,
                            normalization=True,
                            name_prefix='scaled_input_')(inputs)
        if (normalization): X = NormalizationBlock(axes=[1, 2, 3])(X)

        X = ZeroPadding2D((3, 3))(X)

        # Data augmentation.
        # With channels combined, we can now flip images in (eta,phi, eta&phi).
        # Note that these flips will not require making any changes to
        # the other inputs (energy, abs(eta)), so the augmentation is
        # as simple as flipping the images using built-in functions.
        # These augmentation functions will only be active during training.
        if (augmentation): X = RandomFlip(name='aug_reflect')(X)

        # Stage 1
        X = Conv2D(64, (7, 7),
                   strides=(2, 2),
                   name='conv1',
                   kernel_initializer=glorot_uniform(seed=0))(X)
        X = BatchNormalization(axis=3, name='bn_conv1')(X)
        X = Activation('relu')(X)
        X = MaxPooling2D((3, 3), strides=(2, 2))(X)

        n = len(f_vals)
        for i in range(n):
            filters = filter_sets[i]
            f = f_vals[i]
            s = s_vals[i]
            ib = i_vals[i]
            stage = i + 1  # 1st stage is Conv2D etc. before ResNet blocks
            #X = convolutional_block(X, f=f, filters=filters, stage=stage, block='a', s=1)
            X = ConvolutionBlock(f=f,
                                 filters=filters,
                                 stage=stage,
                                 block='a',
                                 s=s,
                                 normalization=normalization)(X)

            for j in range(ib):
                #X = identity_block(X, f, filters, stage=stage, block=ascii_lowercase[j+1]) # will only cause naming issues if there are many id blocks
                X = IdentityBlock(f=f,
                                  filters=filters,
                                  stage=stage,
                                  block=ascii_lowercase[j + 1],
                                  normalization=normalization)(X)

        # AVGPOOL
        pool_size = (2, 2)
        if (X.shape[1] == 1): pool_size = (1, 2)
        elif (X.shape[2] == 1): pool_size = (2, 1)
        X = AveragePooling2D(pool_size=pool_size, name="avg_pool")(X)

        # output layer
        X = Flatten()(X)
        X = Dense(classes,
                  activation='softmax',
                  name='fc' + str(classes),
                  kernel_initializer=glorot_uniform(seed=0))(X)

        # Create model object.
        model = Model(inputs=inputs, outputs=X, name='ResNet')

        # Compile the model
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
#  for i in range(9):
#    ax = plt.subplot(3, 3, i + 1)
#    plt.imshow(images[i].numpy().astype("uint8"))
#    plt.title(class_names[labels[i]])
#    plt.axis("off")

#for image_batch, labels_batch in train_ds:
#  print(image_batch.shape)
#  print(labels_batch.shape)
#  break

train_ds = train_ds.cache().shuffle(256).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)

augment_data = Sequential([
    RandomFlip("horizontal", input_shape=(img_height, img_width, 3)),
    RandomContrast(0.5),  #, input_shape=(img_height, img_width, 3)),
    RandomZoom(0.2),
    RandomRotation(0.3),
])

model = Sequential([
    augment_data,
    Rescaling(1. / 255, input_shape=(img_height, img_width, 3)),
    layers.Conv2D(16, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(32, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Conv2D(64, 3, padding='same', activation='relu'),
    layers.MaxPooling2D(),
    layers.Dropout(0.2),
def createXception(inputs, num_classes):

    # data augmentation
    x = RandomFlip()(inputs)
    x = RandomRotation(0.2)(x)

    # entry flow
    x = Conv2D(32, (3, 3), strides=2, padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = Conv2D(64, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))

    residual = Conv2D(128, (1, 1), strides=2, padding='same')(x)
    residual = BatchNormalization()(residual)

    x = SeparableConv2D(128, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = SeparableConv2D(128, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = add([x, residual])

    residual = Conv2D(256, (1, 1), strides=2, padding='same')(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu')(x)
    x = SeparableConv2D(256, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = SeparableConv2D(256, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = add([x, residual])

    residual = Conv2D(728, (1, 1), strides=2, padding='same')(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu')(x)
    x = SeparableConv2D(728, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = SeparableConv2D(728, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = add([x, residual])

    # middle flow
    for i in range(8):
        residual = x
        x = Activation('relu')(x)
        x = SeparableConv2D(728, (3, 3), padding='same')(x)
        x = Activation('relu')(BatchNormalization()(x))
        x = SeparableConv2D(728, (3, 3), padding='same')(x)
        x = Activation('relu')(BatchNormalization()(x))
        x = SeparableConv2D(728, (3, 3), padding='same')(x)
        x = BatchNormalization()(x)
        x = add([x, residual])

    # exit flow
    residual = Conv2D(1024, (1, 1), strides=2, padding='same')(x)
    residual = BatchNormalization()(residual)

    x = Activation('relu')(x)
    x = SeparableConv2D(728, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = SeparableConv2D(1024, (3, 3), padding='same')(x)
    x = BatchNormalization()(x)
    x = MaxPooling2D((3, 3), strides=2, padding='same')(x)
    x = add([x, residual])

    x = SeparableConv2D(1536, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))
    x = SeparableConv2D(2048, (3, 3), padding='same')(x)
    x = Activation('relu')(BatchNormalization()(x))

    x = GlobalAveragePooling2D()(x)
    logits = Dense(num_classes, kernel_initializer='he_normal')(x)

    return logits
Example #18
0
    train_dataset = tf.data.Dataset.from_generator(
        data_generator(batch_size, train=True), (tf.uint8, tf.uint8),
        (tf.TensorShape([None, 218, 178, 3]), tf.TensorShape([None])))

    test_dataset = tf.data.Dataset.from_generator(
        data_generator(batch_size, train=False), (tf.uint8, tf.uint8),
        (tf.TensorShape([None, 218, 178, 3]), tf.TensorShape([None])))

    with mlflow.start_run():

        mlflow.tensorflow.autolog()

        model = Sequential()
        model.add(Input(shape=IMAGE_SHAPE))
        model.add(RandomFlip())

        for x in range(convolutions):
            model.add(Conv2D(32, (3, 3), strides=(1, 1), activation='relu'))
            model.add(MaxPooling2D())
            model.add(Dropout(0.5))

        model.add(Flatten())
        model.add(Dense(32, activation='relu'))
        model.add(Dense(1, activation='sigmoid'))

        model.compile(optimizer=Adam(),
                      loss=BinaryCrossentropy(),
                      metrics=['accuracy'])
        model.fit(train_dataset,
                  validation_data=test_dataset,
Example #19
0

class Saturation(Layer):
    def __init__(self, saturation_factor=1.5, **kwargs):
        super().__init__(**kwargs)
        self.saturation_factor = saturation_factor

    def call(self, data):
        # Saturation per pixel: converted from RGB -> HSV and S * saturation_factor
        return tf.image.adjust_saturation(data, self.saturation_factor)


augment = Sequential([
    Saturation(3),
    Brightness(0.1),
    RandomFlip('horizontal'),
    RandomRotation(0.4),
    RandomZoom(0.2)
])

# ----------------------------------------------------
# CNN Model for Fire/NoFire recognition
# ----------------------------------------------------


class FireModel:
    def __init__(self, base, preprocess):
        '''
        Wrapper class for a CNN model that augments incoming data, uses a pre-trained model as a
        a base and is trained to perform binary classification.