Пример #1
0
    def model(self):
        lr = self.lr
        augmentation = self.augmentation
        normalization = self.normalization

        # EMB1 image (convolutional)
        input0 = Input(shape=(128, 4, 1), name='EMB1')
        if (augmentation): X0 = RandomFlip(name='aug_reflect_0')(input0)
        else: X0 = input0

        if (normalization): X0 = NormalizationBlock(axes=[1, 2])(X0)
        X0 = Conv2D(32, (4, 2), activation='relu')(X0)
        X0 = MaxPooling2D(pool_size=(2, 2))(X0)
        X0 = Dropout(0.2)(X0)
        X0 = Flatten()(X0)
        X0 = Dense(128, activation='relu')(X0)

        # EMB2 image (convolutional)
        input1 = Input(shape=(16, 16, 1), name='EMB2')
        if (augmentation): X1 = RandomFlip(name='aug_reflect_1')(input1)
        else: X1 = input1

        if (normalization): X1 = NormalizationBlock(axes=[1, 2])(X1)
        X1 = Conv2D(32, (4, 4), activation='relu')(X1)
        X1 = MaxPooling2D(pool_size=(2, 2))(X1)
        X1 = Dropout(0.2)(X1)
        X1 = Flatten()(X1)
        X1 = Dense(128, activation='relu')(X1)

        # EMB3 image (convolutional)
        input2 = Input(shape=(8, 16, 1), name='EMB3')
        if (augmentation): X2 = RandomFlip(name='aug_reflect_2')(input2)
        else: X2 = input2

        if (normalization): X2 = NormalizationBlock(axes=[1, 2])(X2)
        X2 = Conv2D(32, (2, 4), activation='relu')(X2)
        X2 = MaxPooling2D(pool_size=(2, 2))(X2)
        X2 = Dropout(0.2)(X2)
        X2 = Flatten()(X2)
        X2 = Dense(128, activation='relu')(X2)

        # concatenate outputs from the three networks above
        X = Concatenate(axis=1)([X0, X1, X2])  # remember that axis=0 is batch!
        X = Dense(50, activation='relu')(X)

        # final output
        X = Dense(2, activation='softmax')(X)
        model = Model(inputs=[input0, input1, input2], outputs=X)

        # compile model
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
Пример #2
0
    def model(self):
        lr = self.lr
        input_shape = self.input_shape
        f = self.f
        pool = self.pool
        augmentation = self.augmentation
        normalization = self.normalization

        X_in = Input(input_shape, name='input')

        # Augmentation: randomly flip the image during training
        if (augmentation): X = RandomFlip(name='aug_reflect')(X_in)
        else: X = X_in

        # Normalization: Rescale the image to have an integral of 1
        if (normalization): X = NormalizationBlock(axes=[1, 2])(X)

        X = Conv2D(32, f, name='conv1', activation='relu')(X)
        #temp_pool = (2,2) #TODO: Including this pooling caused model to fail to compile. Did this work in original CNN notebook w/ original parameter choices?
        #X = MaxPooling2D(temp_pool)(X)
        X = Conv2D(16, pool, activation='relu')(X)
        X = MaxPooling2D(pool)(X)
        X = Flatten()(X)
        X = Dense(128, activation='relu')(X)
        X = Dense(50, activation='relu')(X)
        X = Dense(2, kernel_initializer='normal', activation='softmax')(X)
        model = Model(inputs=X_in, outputs=X, name='SL_CNN')
        # compile model
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
Пример #3
0
    def model(self):
        input_shape = self.input_shape
        kernel = self.kernel
        lr = self.lr
        dropout = self.dropout
        augmentation = self.augmentation
        normalization = self.normalization

        # Input images from all calorimeter layers.
        input0 = Input(shape=(128, 4, 1), name='EMB1')
        input1 = Input(shape=(16, 16, 1), name='EMB2')
        input2 = Input(shape=(8, 16, 1), name='EMB3')
        input3 = Input(shape=(4, 4, 1), name='TileBar0')
        input4 = Input(shape=(4, 4, 1), name='TileBar1')
        input5 = Input(shape=(2, 4, 1), name='TileBar2')
        inputs = [input0, input1, input2, input3, input4, input5]

        # Rescale our EMB images, and pass them through convolutions.
        EMB = ImageScaleBlock(input_shape,
                              normalization=True,
                              name_prefix='scaled_input_')(
                                  [input0, input1, input2])
        if (augmentation): EMB = RandomFlip(name='aug_reflect')(EMB)
        if (normalization): EMB = NormalizationBlock(axes=[1, 2, 3])(EMB)

        EMB = ZeroPadding2D((3, 3))(EMB)
        EMB = Conv2D(32, kernel, activation='relu')(EMB)
        EMB = MaxPooling2D(pool_size=(2, 2))(EMB)
        EMB = Flatten()(EMB)

        # For TileBar, just get some simple info.
        # Using ImageScaleBlock with final size of (1,1) will give us a list of integrals of the images.
        TiB = ImageScaleBlock(
            (1, 1), normalization=True,
            name_prefix='scaled_input_TiB_')([input3, input4, input5])
        TiB = Flatten()(TiB)
        if (normalization): TiB = NormalizationBlock(axes=[1])(TiB)

        X = Concatenate(axis=1)([EMB, TiB])
        X = Dense(128, activation='relu')(X)
        if (dropout > 0.): X = Dropout(dropout)(X)
        X = Dense(64, activation='relu')(X)
        if (dropout > 0.): X = Dropout(dropout)(X)
        output = Dense(2, activation='softmax')(X)

        model = Model(inputs=inputs, outputs=output)
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
Пример #4
0
    def model(self):
        input_shapes = [self.input_shape1, self.input_shape2]
        lr = self.lr
        dropout = self.dropout
        augmentation = self.augmentation
        normalization = self.normalization

        # Input images from all calorimeter layers.
        input0 = Input(shape=(128, 4, 1), name='EMB1')
        input1 = Input(shape=(16, 16, 1), name='EMB2')
        input2 = Input(shape=(8, 16, 1), name='EMB3')
        input3 = Input(shape=(4, 4, 1), name='TileBar0')
        input4 = Input(shape=(4, 4, 1), name='TileBar1')
        input5 = Input(shape=(2, 4, 1), name='TileBar2')
        inputs = [input0, input1, input2, input3, input4, input5]

        inputs_EMB = [input0, input1, input2]
        inputs_TileBar = [input3, input4, input5]

        Xs = []
        for i, input_list in enumerate([inputs_EMB, inputs_TileBar]):
            input_shape = input_shapes[i]
            X = ImageScaleBlock(input_shape,
                                normalization=True,
                                name_prefix='scaled_input_')(inputs)
            if (augmentation):
                X = RandomFlip(name='aug_reflect_{}'.format(i))(X)
            if (normalization): X = NormalizationBlock(axes=[1, 2, 3])(X)
            X = ZeroPadding2D((3, 3))(X)
            X = Conv2D(32, (int(input_shape[0] / 4), int(input_shape[1] / 4)),
                       activation='relu')(X)
            X = MaxPooling2D(pool_size=(2, 2))(X)
            if (dropout > 0.): X = Dropout(dropout)(X)
            X = Flatten()(X)
            Xs.append(X)

        # concatenate results
        X = Concatenate(axis=1)(Xs)
        X = Dense(128, activation='relu')(X)
        output = Dense(2, activation='softmax')(X)

        model = Model(inputs=inputs, outputs=output)
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
Пример #5
0
    def model(self):
        filter_sets = self.filter_sets
        f_vals = self.f_vals
        s_vals = self.s_vals
        i_vals = self.i_vals
        channels = self.channels
        ndense = self.ndense
        dense_units = self.dense_units
        input_shape = self.input_shape
        augmentation = self.augmentation
        normalization = self.normalization
        lr = self.lr
        decay = self.decay

        # Input images -- one for each channel, each channel's dimensions may be different.
        inputs = [
            Input((None, None, 1), name='input_' + str(i))
            for i in range(channels)
        ]
        energy_input = Input((1), name='energy')
        eta_input = Input((1), name='eta')

        # Rescale all the input images, so that their dimensions now match.
        # Note that we make sure to re-normalize the images so that we preserve their energies.
        X = ImageScaleBlock(input_shape,
                            normalization=True,
                            name_prefix='scaled_input_')(inputs)
        X = ZeroPadding2D((3, 3))(X)

        # Data augmentation.
        # With channels combined, we can now flip images in (eta,phi, eta&phi).
        # Note that these flips will not require making any changes to
        # the other inputs (energy, abs(eta)), so the augmentation is
        # as simple as flipping the images using built-in functions.
        # These augmentation functions will only be active during training.
        if (augmentation): X = RandomFlip(name='aug_reflect')(X)

        # Stage 1
        X = Conv2D(64, (7, 7),
                   strides=(2, 2),
                   name='conv1',
                   kernel_initializer=glorot_uniform(seed=0))(X)
        X = BatchNormalization(axis=3, name='bn_conv1')(X)
        X = Activation('relu')(X)
        X = MaxPooling2D((3, 3), strides=(2, 2))(X)

        n = len(f_vals)
        for i in range(n):
            filters = filter_sets[i]
            f = f_vals[i]
            s = s_vals[i]
            ib = i_vals[i]
            stage = i + 1  # 1st stage is Conv2D etc. before ResNet blocks
            X = ConvolutionBlock(f=f,
                                 filters=filters,
                                 stage=stage,
                                 block='a',
                                 s=s,
                                 normalization=normalization)(X)

            for j in range(ib):
                X = IdentityBlock(f=f,
                                  filters=filters,
                                  stage=stage,
                                  block=ascii_lowercase[j + 1],
                                  normalization=normalization)(X)

        # AVGPOOL
        pool_size = (2, 2)
        if (X.shape[1] == 1): pool_size = (1, 2)
        elif (X.shape[2] == 1): pool_size = (2, 1)
        X = AveragePooling2D(pool_size=pool_size, name="avg_pool")(X)

        # Transition into output: Add energy and eta info, and use a simple DNN.
        X = Flatten()(X)
        tensor_list = [X, energy_input, eta_input]
        X = Concatenate(axis=1)(tensor_list)

        if (dense_units <= 0): units = X.shape[1]
        else: units = dense_units
        for i in range(ndense):
            X = Dense(units=units,
                      activation='relu',
                      name='Dense{}'.format(i + 1))(X)
        X = Dense(units=1,
                  activation='linear',
                  name='output',
                  kernel_initializer='normal')(X)

        # Create model object.
        input_list = inputs + [energy_input, eta_input]
        model = Model(inputs=input_list, outputs=X, name='ResNet')

        # Compile the model
        if (self.opt == 'adam'):
            optimizer = Adam(learning_rate=lr, decay=decay)
        else:
            optimizer = SGD(learning_rate=lr, momentum=0.)
        model.compile(optimizer=optimizer, loss='mse', metrics=['mae', 'mse'])
        return model
Пример #6
0
    def model(self):
        dropout = self.dropout
        decay = self.decay
        lr = self.lr
        augmentation = self.augmentation
        # Gather inputs -- the images, the reco energy and eta.
        EMB1 = Input(shape=(128, 4, 1), name='input_0')
        EMB2 = Input(shape=(16, 16, 1), name='input_1')
        EMB3 = Input(shape=(8, 16, 1), name='input_2')
        TB0 = Input(shape=(4, 4, 1), name='input_3')
        TB1 = Input(shape=(4, 4, 1), name='input_4')
        TB2 = Input(shape=(2, 4, 1), name='input_5')
        energy = Input(shape=(1), name='energy')
        eta = Input(shape=(1), name='eta')
        input_list = [EMB1, EMB2, EMB3, TB0, TB1, TB2, energy, eta]

        # Image augmentation.
        # Note: EMB1 and (EMB2+EMB3) are separately augmented, so they may be given different flips.
        #       Note sure if this is necessarily a problem, with what we're doing.
        if (augmentation): EMB1 = RandomFlip(name='emb1_flip')(EMB1)

        # Perform some convolutions with EMB1
        x1 = Conv2D(32, (3, 3), padding='same', name='emb1_conv2d_1')(EMB1)
        x1 = Activation('relu')(x1)
        x1 = Conv2D(32, (3, 3), padding='same', name='emb1_conv2d_2')(x1)
        x1 = Activation('relu')(x1)
        x1 = MaxPooling2D(pool_size=(2, 1),
                          padding='same',
                          name='emb1_maxpool_3')(x1)
        x1 = Flatten(name='emb1_flatten_4')(x1)

        # Perform some convolutions with EMB2 + EMB3
        x2 = ImageScaleBlock(
            (16, 16), normalization=True,
            name_prefix='emb_stack')([EMB2, EMB3])  # merge EMB2 and EMB3

        if (augmentation): x2 = RandomFlip(name='emb23_flip')(x2)

        x2 = Conv2D(32, (1, 1), padding='same', name='emb23_conv1d_1')(x2)
        x2 = Activation('relu')(x2)
        x2 = Conv2D(64, (2, 2), padding='same', name='emb23_conv2d_2')(x2)
        x2 = Activation('relu')(x2)
        x2 = MaxPooling2D(pool_size=(2, 1),
                          padding='same',
                          name='emb23_maxpool_3')(x2)
        x2 = Flatten(name='emb23_flatten_4')(x2)

        # Mix down the results of the EMB convolutions.
        x = Concatenate(axis=1, name='emb_concat')([x1, x2])
        x = Dense(32, activation='relu', name='emb_dense_1')(x)
        x = Dropout(dropout, name='emb_dropout_2')(x)
        x = Dense(16, activation='relu', name='emb_dense_3')(x)
        x = Dropout(dropout, name='emb_dropout_4')(x)

        # Compute energy depth information
        depth = ImageScaleBlock(
            (1, 1), normalization=True,
            name_prefix='depth')([EMB1, EMB2, EMB3, TB0, TB1, TB2])
        depth = Flatten(name='depth_flatten')(depth)

        x = Concatenate(axis=1, name='concatenate_2')([x, depth, energy, eta])
        x = Dense(32, activation='relu', name='full_dense_1')(x)
        x = Dropout(dropout, name='full_dropout_2')(x)
        x = Dense(32, activation='relu', name='full_dense_3')(x)
        x = Dropout(dropout, name='full_dropout_4')(x)
        output = Dense(1, kernel_initializer='normal', activation='linear')(x)

        if (self.opt == 'adam'):
            optimizer = Adam(learning_rate=lr, decay=decay)
        else:
            optimizer = SGD(learning_rate=lr, momentum=0.)
        model = Model(inputs=input_list, outputs=output, name='Simple_CNN')
        model.compile(optimizer=optimizer, loss='mse', metrics=['mae', 'mse'])
        return model
Пример #7
0
    def model(self):
        filter_sets = self.filter_sets
        f_vals = self.f_vals
        s_vals = self.s_vals
        i_vals = self.i_vals
        channels = self.channels
        input_shape = self.input_shape
        augmentation = self.augmentation
        normalization = self.normalization
        lr = self.lr
        classes = self.classes

        # Input images -- one for each channel, each channel's dimensions may be different.
        inputs = [
            Input((None, None, 1), name='input' + str(i))
            for i in range(channels)
        ]

        # Rescale all the input images, so that their dimensions now match.
        X = ImageScaleBlock(input_shape,
                            normalization=True,
                            name_prefix='scaled_input_')(inputs)
        if (normalization): X = NormalizationBlock(axes=[1, 2, 3])(X)

        X = ZeroPadding2D((3, 3))(X)

        # Data augmentation.
        # With channels combined, we can now flip images in (eta,phi, eta&phi).
        # Note that these flips will not require making any changes to
        # the other inputs (energy, abs(eta)), so the augmentation is
        # as simple as flipping the images using built-in functions.
        # These augmentation functions will only be active during training.
        if (augmentation): X = RandomFlip(name='aug_reflect')(X)

        # Stage 1
        X = Conv2D(64, (7, 7),
                   strides=(2, 2),
                   name='conv1',
                   kernel_initializer=glorot_uniform(seed=0))(X)
        X = BatchNormalization(axis=3, name='bn_conv1')(X)
        X = Activation('relu')(X)
        X = MaxPooling2D((3, 3), strides=(2, 2))(X)

        n = len(f_vals)
        for i in range(n):
            filters = filter_sets[i]
            f = f_vals[i]
            s = s_vals[i]
            ib = i_vals[i]
            stage = i + 1  # 1st stage is Conv2D etc. before ResNet blocks
            #X = convolutional_block(X, f=f, filters=filters, stage=stage, block='a', s=1)
            X = ConvolutionBlock(f=f,
                                 filters=filters,
                                 stage=stage,
                                 block='a',
                                 s=s,
                                 normalization=normalization)(X)

            for j in range(ib):
                #X = identity_block(X, f, filters, stage=stage, block=ascii_lowercase[j+1]) # will only cause naming issues if there are many id blocks
                X = IdentityBlock(f=f,
                                  filters=filters,
                                  stage=stage,
                                  block=ascii_lowercase[j + 1],
                                  normalization=normalization)(X)

        # AVGPOOL
        pool_size = (2, 2)
        if (X.shape[1] == 1): pool_size = (1, 2)
        elif (X.shape[2] == 1): pool_size = (2, 1)
        X = AveragePooling2D(pool_size=pool_size, name="avg_pool")(X)

        # output layer
        X = Flatten()(X)
        X = Dense(classes,
                  activation='softmax',
                  name='fc' + str(classes),
                  kernel_initializer=glorot_uniform(seed=0))(X)

        # Create model object.
        model = Model(inputs=inputs, outputs=X, name='ResNet')

        # Compile the model
        optimizer = Adam(learning_rate=lr)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optimizer,
                      metrics=['acc'])
        return model
Пример #8
0

class Saturation(Layer):
    def __init__(self, saturation_factor=1.5, **kwargs):
        super().__init__(**kwargs)
        self.saturation_factor = saturation_factor

    def call(self, data):
        # Saturation per pixel: converted from RGB -> HSV and S * saturation_factor
        return tf.image.adjust_saturation(data, self.saturation_factor)


augment = Sequential([
    Saturation(3),
    Brightness(0.1),
    RandomFlip('horizontal'),
    RandomRotation(0.4),
    RandomZoom(0.2)
])

# ----------------------------------------------------
# CNN Model for Fire/NoFire recognition
# ----------------------------------------------------


class FireModel:
    def __init__(self, base, preprocess):
        '''
        Wrapper class for a CNN model that augments incoming data, uses a pre-trained model as a
        a base and is trained to perform binary classification.