Пример #1
0
def CNN_submodel_2(INPUTS, l2):
    conv1 = Conv2D(32, (1, 128), padding='same',
                   input_shape=(12, 1024, 1))(INPUTS)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = DepthwiseConv2D((12, 1),
                            depth_multiplier=3,
                            depthwise_constraint=max_norm(1.))(conv1)
    conv1 = BatchNormalization(axis=1)(conv1)
    conv1 = Activation('elu')(conv1)
    conv1 = AveragePooling2D((1, 8))(conv1)
    conv1 = Dropout(0.5)(conv1)

    conv2 = SeparableConv2D(64, (1, 64), padding='same')(conv1)
    conv2 = BatchNormalization(axis=1)(conv2)
    conv2 = Activation('elu')(conv2)
    conv2 = AveragePooling2D((1, 8))(conv2)
    conv2 = Dropout(0.5)(conv2)

    conv3 = SeparableConv2D(64, (1, 64), padding='same')(conv2)
    conv3 = BatchNormalization(axis=1)(conv3)
    conv3 = Activation('elu')(conv3)
    conv3 = AveragePooling2D((1, 8))(conv3)
    conv3 = Dropout(0.5)(conv3)

    flatten = Flatten()(conv3)
    dense = Dense(16,
                  activation="elu",
                  kernel_regularizer=keras.regularizers.l2(l2))(flatten)
    dense = Dense(2, kernel_constraint=max_norm(0.01))(dense)
    result = Activation('softmax')(dense)

    return Model(inputs=INPUTS, outputs=result)
Пример #2
0
def inception_resnet(nb_classes=3,
                     Chans=64,
                     Samples=321,
                     dropoutRate=0.5,
                     kernLength=64,
                     F1=8,
                     D=2,
                     F2=16,
                     norm_rate=0.25,
                     dropoutType='Dropout',
                     gpu=True):

    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    input1 = Input(shape=(1, Chans, Samples))

    block1 = Conv2D(F1, (1, kernLength),
                    padding='same',
                    input_shape=(1, Chans, Samples),
                    use_bias=False,
                    data_format='channels_first')(input1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.),
                             data_format='channels_first')(block1)
    block1 = BatchNormalization(axis=1)(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 2), data_format='channels_first')(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 16),
                             use_bias=False,
                             padding='same',
                             data_format='channels_first')(block1)
    block2 = BatchNormalization(axis=1)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 4), data_format='channels_first')(block2)
    block2 = dropoutType(dropoutRate)(block2)

    block3 = Conv2D(F2, (1, 8), padding='same',
                    data_format='channels_first')(block2)
    block3 = BatchNormalization(axis=1)(block3)
    block3 = Activation('elu')(block3)
    block3 = AveragePooling2D((1, 8), data_format='channels_first')(block3)

    flatten = Flatten(name='flatten')(block3)

    dense = Dense(1, name='out',
                  kernel_constraint=max_norm(norm_rate))(flatten)
    softmax = Activation('sigmoid', name='sigmoid')(dense)

    return Model(inputs=input1, outputs=softmax)
Пример #3
0
def EEGNet3D_Branch(nb_classes, XDim, YDim, Samples, dropoutRate, kernLength,
                    F1, D, F2, norm_rate, dropoutType, block):
    block1 = Conv3D(F1, (1, 1, kernLength),
                    padding='same',
                    input_shape=(XDim, YDim, Samples, 1),
                    use_bias=False)(block)
    block1 = BatchNormalization()(block1)
    block1 = Conv3D(D * F1, (XDim, YDim, 1),
                    groups=F1,
                    kernel_constraint=max_norm(1.),
                    use_bias=False)(block1)
    block1 = BatchNormalization()(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling3D((1, 1, 4))(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = Conv3D(F2, (1, 1, 16), groups=F2, use_bias=False,
                    padding='same')(block1)
    block2 = Conv3D(F2, (1, 1, 1), use_bias=False, padding='same')(block2)
    block2 = BatchNormalization()(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling3D((1, 1, 8))(block2)
    block2 = dropoutType(dropoutRate)(block2)

    flatten = Flatten()(block2)

    return Dense(nb_classes, kernel_constraint=max_norm(norm_rate))(flatten)
Пример #4
0
    def __output_layer(self, connected_layer):
        if self.network_type == 'val-adv':
            # Reference: https://www.reddit.com/r/reinforcementlearning/comments/bu02ej/help_with_dueling_dqn/
            # Value & Advantage Layer
            val = Dense(1,
                        kernel_initializer='he_uniform',
                        kernel_constraint=max_norm(5),
                        name='Value')(connected_layer)
            val = Activation('linear')(val)
            adv = Dense(self.action_size,
                        kernel_initializer='he_uniform',
                        kernel_constraint=max_norm(5),
                        name='Advantage')(connected_layer)
            adv = Activation('linear')(adv)

            # Output layer
            mean = Lambda(lambda x: K.mean(x, axis=1, keepdims=True),
                          name='Mean')(adv)
            adv = Subtract(name='Advantage_Mean')([adv, mean])
            outputs = Add(name='Value_Advantage')([val, adv])

        else:
            outputs = Dense(self.action_size,
                            kernel_initializer='he_uniform',
                            kernel_constraint=max_norm(5),
                            name='Output')(connected_layer)
            outputs = Activation('linear')(outputs)

        return outputs
Пример #5
0
def create_model(ModelInfo):
    ### Defining reguralization technique
    
#    reg = l1(0.001)
    model = keras.Sequential()
    model.add(layers.Dense(ModelInfo['Nerouns'][0], input_shape=[7],
                           activation=ModelInfo['Activation_Method'][0],
#                           kernel_initializer =ModelInfo['W_Initialization_Method'][0],
#                            bias_initializer = ModelInfo['W_Initialization_Method'][0],
#                            activity_regularizer=ModelInfo['Reguralization'][0],
                             activity_regularizer=l1(ModelInfo['Reguralization'][0]),

#    kernel_constraint=ModelInfo['kernel_constraint'][0])),
     kernel_constraint=max_norm(ModelInfo['kernel_constraint'][0]))),
    model.add(layers.Dropout(ModelInfo['Dropout_Value'][0])),
    for c in range(1,ModelInfo['Layers'][0]):
        print('Index=',c) 
        model.add(layers.Dense(ModelInfo['Nerouns'][c],
                               activation=tf.nn.relu, 
#                               kernel_initializer =ModelInfo['W_Initialization_Method'][0],
#                            bias_initializer = ModelInfo['W_Initialization_Method'][0],
#                            activity_regularizer=ModelInfo['Reguralization'][c],
                             activity_regularizer=l1(ModelInfo['Reguralization'][c]),
#                            kernel_constraint=ModelInfo['kernel_constraint'][c])),
                             kernel_constraint=max_norm(ModelInfo['kernel_constraint'][c]))),
        model.add(layers.Dropout(ModelInfo['Dropout_Value'][c])),
#        model.add(layers.BatchNormalization()),
        
    
    model.add(layers.Dense(1, activation=ModelInfo['Activation_Method'][0])),
    return model
    def add_decoder_layers(self):
        self.model.add(
            lay.Conv2DTranspose(128, (3, 3),
                                kernel_constraint=max_norm(2.0),
                                kernel_initializer='he_uniform'))

        self.model.add(lay.BatchNormalization())
        self.model.add(lay.ReLU())

        self.model.add(
            lay.Conv2DTranspose(64, (3, 3),
                                kernel_initializer='he_uniform',
                                kernel_constraint=max_norm(2.0)))

        self.model.add(lay.BatchNormalization())
        self.model.add(lay.ReLU())

        self.model.add(
            lay.Conv2DTranspose(32, (3, 3),
                                kernel_initializer='he_uniform',
                                kernel_constraint=max_norm(2.0)))

        self.model.add(lay.BatchNormalization())
        self.model.add(lay.ReLU())

        self.model.add(
            lay.Conv2DTranspose(3, (3, 3),
                                activation='sigmoid',
                                kernel_constraint=max_norm(2.0),
                                padding='same',
                                kernel_initializer='he_uniform'))
Пример #7
0
    def build(self):
        'encoder --> DCNN'
        encoder_input = Input(self.input_shape)
        en_conv = Conv2D(self.filter_1, (1, 64),
                         activation='elu',
                         padding="same",
                         kernel_constraint=max_norm(2.,
                                                    axis=(0, 1,
                                                          2)))(encoder_input)
        en_conv = BatchNormalization(axis=3, epsilon=1e-05,
                                     momentum=0.1)(en_conv)
        en_conv = AveragePooling2D(pool_size=self.pool_size_1)(en_conv)
        en_conv = Conv2D(self.filter_2, (1, 32),
                         activation='elu',
                         padding="same",
                         kernel_constraint=max_norm(2.,
                                                    axis=(0, 1, 2)))(en_conv)
        en_conv = BatchNormalization(axis=3, epsilon=1e-05,
                                     momentum=0.1)(en_conv)
        en_conv = AveragePooling2D(pool_size=self.pool_size_2)(en_conv)
        en_conv = Flatten()(en_conv)
        encoder_output = Dense(self.latent_dim,
                               kernel_constraint=max_norm(0.5))(en_conv)

        z = Dense(self.num_class,
                  activation='softmax',
                  kernel_constraint=max_norm(0.5),
                  name='classifier')(encoder_output)

        return models.Model(inputs=encoder_input,
                            outputs=[encoder_output, z],
                            name='MIN2Net_without_decoder')
    def add_encoder_layers(self):
        self.model.add(
            lay.Conv2D(32, (3, 3),
                       activation='relu',
                       kernel_constraint=max_norm(2.0),
                       kernel_initializer='he_uniform'))

        self.model.add(lay.BatchNormalization())
        self.model.add(lay.ReLU())

        self.model.add(
            lay.Conv2D(64, (3, 3),
                       kernel_constraint=max_norm(2.0),
                       kernel_initializer='he_uniform'))

        self.model.add(lay.BatchNormalization())
        self.model.add(lay.ReLU())

        self.model.add(lay.Conv2D(128, (3, 3),
                                  kernel_initializer='he_uniform'))

        self.model.add(lay.BatchNormalization())
        self.model.add(lay.ReLU())

        self.model.add(
            lay.Conv2D(128, (3, 3),
                       kernel_initializer='he_uniform',
                       kernel_constraint=max_norm(2.0),
                       padding='same'))

        self.model.add(lay.BatchNormalization())
        self.model.add(lay.ReLU())
def cifar_vgg_model(input_shape):
    vgg_base = VGG16(
        include_top=False,
        # weights=None,
        weights='imagenet',
        input_tensor=Input(shape=input_shape),
        input_shape=input_shape,
        pooling=None
    )
    x = vgg_base.output
    dropout1 = tf.keras.layers.Dropout(0.5)
    dropout2 = tf.keras.layers.Dropout(0.5)
    x = dropout1(x)
    x = Flatten(name='flatten')(x)
    x = Dense(4096, activation='relu', name='fc1', kernel_constraint=max_norm(2), trainable=True
              )(x)
    x = dropout2(x)
    x = Dense(1024, activation='relu', name='fc2', kernel_constraint=max_norm(2), trainable=True
              )(x)
    predictions = Dense(10, activation='softmax', name='predictions', trainable=True)(x)
    model = Model(inputs=vgg_base.input, outputs=predictions)
    learning_rate = 0.001
    lr_decay = 1e-6
    sgd = SGD(lr=learning_rate, decay=lr_decay, momentum=0.9, nesterov=True)
    model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])
    return model
Пример #10
0
 def create_model():
     model = Sequential()
     model.add(
         Dense(16,
               input_dim=input_dim,
               kernel_constraint=max_norm(3),
               kernel_regularizer=regularizers.l2(0.001)))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(0.4))
     model.add(
         Dense(8,
               kernel_constraint=max_norm(3),
               kernel_regularizer=regularizers.l2(0.001)))
     model.add(BatchNormalization())
     model.add(Activation('relu'))
     model.add(Dropout(0.4))
     model.add(
         Dense(output_dim,
               kernel_constraint=max_norm(3),
               kernel_regularizer=regularizers.l2(0.001)))
     model.add(BatchNormalization())
     model.add(Activation('sigmoid'))
     model.compile(optimizer=Adam(0.01),
                   loss='binary_crossentropy',
                   metrics=['accuracy'],
                   run_eagerly=True)  #loss=odds_loss
     return model
Пример #11
0
    def build(self):
        input1 = Input(shape=self.input_shape)

        ##################################################################
        block1 = Conv2D(self.F1, (1, self.kernLength),
                        padding='same',
                        input_shape=self.input_shape,
                        use_bias=False)(input1)
        block1 = BatchNormalization()(block1)
        block1 = DepthwiseConv2D((self.Chans, 1),
                                 use_bias=False,
                                 depth_multiplier=self.D,
                                 depthwise_constraint=max_norm(1.))(block1)
        block1 = BatchNormalization()(block1)
        block1 = Activation('elu')(block1)
        block1 = AveragePooling2D((1, 4))(block1)
        block1 = Dropout(self.dropout_rate)(block1)

        block2 = SeparableConv2D(self.F2, (1, self.kernLength // 4),
                                 use_bias=False,
                                 padding='same')(block1)
        block2 = BatchNormalization()(block2)
        block2 = Activation('elu')(block2)
        block2 = AveragePooling2D((1, 8))(block2)
        block2 = Dropout(self.dropout_rate)(block2)

        flatten = Flatten(name='flatten')(block2)

        dense = Dense(self.num_class,
                      name='dense',
                      kernel_constraint=max_norm(self.norm_rate))(flatten)
        softmax = Activation('softmax', name='softmax')(dense)

        return Model(inputs=input1, outputs=softmax)
Пример #12
0
def ShallowConvNet(nb_classes, Chans=64, Samples=128, dropoutRate=0.5, cpu=False):
    if cpu:
        input_shape = (Samples, Chans, 1)
        conv_filters = (25, 1)
        conv_filters2 = (1, Chans)
        pool_size = (45, 1)
        strides = (15, 1)
        axis = -1
    else:
        input_shape = (1, Chans, Samples)
        conv_filters = (1, 20)
        conv_filters2 = (Chans, 1)
        pool_size = (1, 45)
        strides = (1, 15)
        axis = 1

    input_main = Input(input_shape)
    block1 = Conv2D(20, conv_filters,
                    input_shape=input_shape,
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(input_main)
    block1 = Conv2D(20, conv_filters2, use_bias=False,
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block1)
    block1 = BatchNormalization(axis=axis, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation(square)(block1)
    block1 = AveragePooling2D(pool_size=pool_size, strides=strides)(block1)
    block1 = Activation(log)(block1)
    block1 = Dropout(dropoutRate)(block1)
    flatten = Flatten()(block1)
    dense = Dense(nb_classes, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_main, outputs=softmax)
Пример #13
0
def EEGNet(nb_classes, Chans=64, Samples=128,
           dropoutRate=0.5, kernLength=64, F1=8,
           D=2, F2=16, norm_rate=0.25, dropoutType='Dropout', cpu=False):
    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    if cpu:
        input_shape = (Samples, Chans, 1)
        conv_filters = (kernLength, 1)
        depth_filters = (1, Chans)
        pool_size = (6, 1)
        pool_size2 = (12, 1)
        separable_filters = (20, 1)
        axis = -1
    else:
        input_shape = (1, Chans, Samples)
        conv_filters = (1, kernLength)
        depth_filters = (Chans, 1)
        pool_size = (1, 6)
        pool_size2 = (1, 12)
        separable_filters = (1, 20)
        axis = 1

    input1 = Input(shape=input_shape)
    block1 = Conv2D(F1, conv_filters, padding='same',
                    input_shape=input_shape,
                    use_bias=False)(input1)
    block1 = BatchNormalization(axis=axis)(block1)
    block1 = DepthwiseConv2D(depth_filters, use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.))(block1)
    block1 = BatchNormalization(axis=axis)(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D(pool_size)(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, separable_filters,
                             use_bias=False, padding='same')(block1)
    block2 = BatchNormalization(axis=axis)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D(pool_size2)(block2)
    block2 = dropoutType(dropoutRate)(block2)

    flatten = Flatten(name='flatten')(block2)

    dense = Dense(nb_classes, name='dense',
                  kernel_constraint=max_norm(norm_rate))(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
Пример #14
0
def classification_model(input_shape, class_num):
    model = Sequential()

    # Convolutional layer
    model.add(Conv2D(32, (3, 3), input_shape=input_shape, padding='same'))
    model.add(Activation('relu'))

    # Dropout 2% of the data to avoid overfitting
    model.add(Dropout(0.2))

    # Normalize inputs for the next layer
    model.add(BatchNormalization())

    # Second convolutional layer (complex representations)
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))

    # Learn relevant patterns
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    # Repeat
    model.add(Conv2D(64, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())
    model.add(Conv2D(128, (3, 3), padding='same'))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    # Flatten result
    model.add(Flatten())
    model.add(Dropout(0.2))

    # Dense layers
    model.add(Dense(256, kernel_constraint=max_norm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    model.add(Dense(128, kernel_constraint=max_norm(3)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(BatchNormalization())

    # Prediction layers
    model.add(Dense(class_num))
    model.add(Activation('softmax'))

    return model
Пример #15
0
def get_model():
    """
    Returns a compiled convolutional neural network model. Assume that the
    `input_shape` of the first layer is `(IMG_WIDTH, IMG_HEIGHT, 3)`.
    The output layer should have `NUM_CATEGORIES` units, one for each category.
    """
    # Create a convolutional neural network
    model = tf.keras.models.Sequential([

        # Convolutional layer #1. Learn 32 filters using a 3x3 kernel using input size
        tf.keras.layers.Conv2D(FILTERS, (KERNEL, KERNEL),
                               input_shape=(IMG_WIDTH, IMG_HEIGHT, IMG_THI),
                               padding="same"),
        tf.keras.layers.Activation("relu"),
        tf.keras.layers.BatchNormalization(),

        # Convolutional layer #2. Learn 64 filters using a 3x3 kernel, with pooling
        tf.keras.layers.Conv2D(FILTERS * 2, (KERNEL, KERNEL), padding="same"),
        tf.keras.layers.Activation("relu"),
        tf.keras.layers.MaxPooling2D(pool_size=(POOLSIZE, POOLSIZE)),
        tf.keras.layers.BatchNormalization(),

        # Convolutional layer #3. Learn 128 filters using a 3x3 kernel, with pooling
        tf.keras.layers.Conv2D(FILTERS * 4, (KERNEL, KERNEL), padding="same"),
        tf.keras.layers.Activation("relu"),
        tf.keras.layers.MaxPooling2D(pool_size=(POOLSIZE, POOLSIZE)),
        tf.keras.layers.BatchNormalization(),

        # Flatten units
        tf.keras.layers.Flatten(),

        # Add a Dense hidden layer #1, 256 filters with dropout
        tf.keras.layers.Dense(FILTERS * 8, kernel_constraint=max_norm(3)),
        tf.keras.layers.Activation("relu"),
        tf.keras.layers.Dropout(DROPOUT),
        tf.keras.layers.BatchNormalization(),

        # Add a Dense hidden layer #2, 128 filters with dropout
        tf.keras.layers.Dense(FILTERS * 4, kernel_constraint=max_norm(3)),
        tf.keras.layers.Activation("relu"),
        tf.keras.layers.Dropout(DROPOUT),
        tf.keras.layers.BatchNormalization(),

        # Add an output layer with output units for all categories
        tf.keras.layers.Dense(NUM_CATEGORIES),
        tf.keras.layers.Activation("softmax")
    ])

    # Train neural network
    model.compile(optimizer="adam",
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])
    return model
def EEGNet(nb_classes,
           Chans=64,
           Samples=128,
           dropoutRate=0.5,
           kernLength=64,
           F1=4,
           D=2,
           F2=8,
           norm_rate=0.25,
           dropoutType='Dropout'):

    if dropoutType == 'SpatialDropout2D':
        dropoutType = SpatialDropout2D
    elif dropoutType == 'Dropout':
        dropoutType = Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    input1 = Input(shape=(Chans, Samples, 1))

    ##################################################################
    # padding = 'valid' => no padding
    # padding = 'same' => output has the same height/width dimension as the input
    block1 = Conv2D(F1, (1, kernLength),
                    padding='same',
                    input_shape=(Chans, Samples, 1),
                    use_bias=False)(input1)
    block1 = BatchNormalization()(block1)
    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.))(block1)
    block1 = BatchNormalization()(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 16))(block1)
    block1 = dropoutType(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 64), use_bias=False,
                             padding='same')(block1)
    block2 = BatchNormalization()(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 16))(block2)
    block2 = dropoutType(dropoutRate)(block2)

    flatten = Flatten(name='flatten')(block2)

    dense = Dense(nb_classes,
                  name='dense',
                  kernel_constraint=max_norm(norm_rate))(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input1, outputs=softmax)
Пример #17
0
    def build_model_conv(self, actions):
        """
        define the neural network model architecture for the deep q agent
        """

        model = tf.keras.models.Sequential()

        model.add(
            Conv2D(32, (2, 2),
                   padding='same',
                   kernel_initializer='he_uniform',
                   kernel_constraint=max_norm(3),
                   input_shape=(1, self.env.height, self.env.width)))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))

        model.add(
            Conv2D(64, (2, 2),
                   padding='same',
                   kernel_initializer='he_uniform',
                   kernel_constraint=max_norm(3)))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))

        model.add(
            Conv2D(64, (2, 2),
                   padding='same',
                   kernel_initializer='he_uniform',
                   kernel_constraint=max_norm(3)))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))

        # model.add(MaxPooling2D(pool_size=(2,2)))

        # end of convolutional layers, start of 'hidden' dense layers
        model.add(Flatten())
        model.add(
            Dense(128,
                  kernel_initializer='he_uniform',
                  kernel_constraint=max_norm(3)))
        model.add(BatchNormalization())
        model.add(Activation('tanh'))
        model.add(Dropout(0.5))

        # Final dense layer
        model.add(Dense(actions))
        model.add(BatchNormalization())
        model.add(Activation('linear'))

        return model
Пример #18
0
def ShallowConvNet(nb_classes,
                   Chans=64,
                   Samples=128,
                   is_denoising=False,
                   dropoutRate=0.5):
    """ Keras implementation of the Shallow Convolutional Network as described
    in Schirrmeister et. al. (2017), Human Brain Mapping.

    Assumes the input is a 2-second EEG signal sampled at 128Hz. Note that in
    the original paper, they do temporal convolutions of length 25 for EEG
    data sampled at 250Hz. We instead use length 13 since the sampling rate is
    roughly half of the 250Hz which the paper used. The pool_size and stride
    in later layers is also approximately half of what is used in the paper.

    Note that we use the max_norm constraint on all convolutional layers, as
    well as the classification layer. We also change the defaults for the
    BatchNormalization layer. We used this based on a personal communication
    with the original authors.

                     ours        original paper
    pool_size        1, 35       1, 75
    strides          1, 7        1, 15
    conv filters     1, 13       1, 25

    Note that this implementation has not been verified by the original
    authors. We do note that this implementation reproduces the results in the
    original paper with minor deviations.
    """

    # start the model
    input_main = Input((1, Chans, Samples))
    block1 = Conv2D(40, (1, 13),
                    input_shape=(1, Chans, Samples),
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(input_main)
    # if is_denoising:
    #     block1 = Lambda(lambda t: denoising(t, name='denosing_1', embed=True, softmax=True))(block1)
    block1 = Conv2D(40, (Chans, 1),
                    use_bias=False,
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block1)

    block1 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation(square)(block1)
    block1 = AveragePooling2D(pool_size=(1, 35), strides=(1, 7))(block1)
    block1 = Activation(log)(block1)
    block1 = Dropout(dropoutRate)(block1)
    flatten = Flatten()(block1)
    dense = Dense(nb_classes, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_main, outputs=softmax)
Пример #19
0
def build_model(num_heroes):
    model = Sequential()
    model.add(Dropout(0.2, input_shape=(num_heroes, )))
    # model.add(Dense(128, activation='relu', input_dim=num_heroes))
    model.add(Dense(128, activation='relu', kernel_constraint=max_norm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(64, activation='relu', kernel_constraint=max_norm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(1, activation='sigmoid'))

    adam = Adam(lr=0.01)
    model.compile(optimizer=adam,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Пример #20
0
def EEGNet(input_layer, F1=4, kernLength=64, D=2, Chans=22, dropout=0.1):
    F2 = F1 * D
    block1 = Conv2D(F1, (kernLength, 1),
                    padding='same',
                    data_format='channels_last',
                    use_bias=False)(input_layer)
    block1 = BatchNormalization(axis=-1)(block1)
    block2 = DepthwiseConv2D((1, Chans),
                             use_bias=False,
                             depth_multiplier=D,
                             data_format='channels_last',
                             depthwise_constraint=max_norm(1.))(block1)
    block2 = BatchNormalization(axis=-1)(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((8, 1), data_format='channels_last')(block2)
    block2 = Dropout(dropout)(block2)
    block3 = SeparableConv2D(F2, (16, 1),
                             data_format='channels_last',
                             use_bias=False,
                             padding='same')(block2)
    block3 = BatchNormalization(axis=-1)(block3)
    block3 = Activation('elu')(block3)
    block3 = AveragePooling2D((8, 1), data_format='channels_last')(block3)
    block3 = Dropout(dropout)(block3)
    return block3
Пример #21
0
def get_untrained_l6_all_digit_model(inputs):
    model = Sequential()

    model.add(
        Conv2D(32, (3, 3),
               padding='same',
               activation='relu',
               input_shape=inputs.shape[1:]))
    model.add(Dropout(0.2))

    model.add(Conv2D(32, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(Dropout(0.2))

    model.add(Conv2D(64, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(Dropout(0.2))

    model.add(Conv2D(128, (3, 3), padding='same', activation='relu'))
    model.add(MaxPooling2D(pool_size=(2, 2)))

    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(1024, activation='relu', kernel_constraint=max_norm(3)))
    model.add(Dropout(0.2))
    model.add(Dense(10, activation='softmax'))

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Пример #22
0
    def define_discriminator(n_blocks, input_shape=(4, 4, 3)):
        weight_init = RandomNormal(stddev=0.02)
        weight_constr = max_norm(1.0)
        model_list = []

        img_input = Input(shape=input_shape)
        d = Conv2D(128, (1, 1), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(img_input)
        d = LeakyReLU(alpha=0.2)(d)

        d = MinibatchStdDev()(d)
        d = Conv2D(128, (3, 3), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(d)
        d = LeakyReLU(alpha=0.2)(d)

        d = Conv2D(128, (4, 4), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(d)
        d = LeakyReLU(alpha=0.2)(d)

        d = Flatten()(d)
        out_class = Dense(1)(d)

        model = Model(img_input, out_class)
        model.compile(loss=ProGAN.wasserstein_loss, optimizer=Adam(lr=0.001, beta_1=0, beta_2=0.99, epsilon=10e-8))
        model_list.append([model, model])

        # Create sub_models
        for i in range(1, n_blocks):
            old_model = model_list[i - 1][0]
            new_models = Discriminator._add_discriminator_block(old_model)
            model_list.append(new_models)
        return model_list
Пример #23
0
    def add_g(self, old_model):
        # weight initialization
        init = RandomNormal(stddev=0.02)
        # weight constraint
        const = max_norm(1.0)

        block_end = old_model.layers[-2].output

        upsampling = UpSampling2D()(block_end)
        g = Conv2D(128, (3, 3),
                   padding='same',
                   kernel_initializer=init,
                   kernel_constraint=const)(upsampling)
        g = PixelNormalization()(g)
        g = LeakyReLU(alpha=0.2)(g)
        g = Conv2D(128, (3, 3),
                   padding='same',
                   kernel_initializer=init,
                   kernel_constraint=const)(g)
        g = PixelNormalization()(g)
        g = LeakyReLU(alpha=0.2)(g)
        out_image = Conv2D(3, (1, 1),
                           padding='same',
                           kernel_initializer=init,
                           kernel_constraint=const)(g)

        model1 = Model(old_model.input, out_image)
        out_old = old_model.layers[-1]
        out_image2 = out_old(upsampling)
        merged = WeightedSum()([out_image2, out_image])

        model2 = Model(old_model.input, merged)
        return [model1, model2]
Пример #24
0
    def __conv_block(self, x, r):

        if self.use_constraint:
            constraint = max_norm(self.constraint_rate)
        else:
            constraint = None
        for i in range(self.conv_blocks):
            x = tf.keras.layers.Conv2D(filters=self.filter_start * (r + 1),
                                       kernel_size=self.add_tuples(
                                           self.filter_size, (r + 1, r + 1)),
                                       kernel_constraint=constraint)(x)
            if self.use_bn:
                x = tf.keras.layers.BatchNormalization()(x)
            x = tf.keras.layers.Activation(self.activation)(x)
            x = tf.keras.layers.Conv2D(filters=self.filter_start * (r + 1),
                                       kernel_size=self.add_tuples(
                                           self.filter_size, (r + 1, r + 1)),
                                       kernel_constraint=constraint)(x)
            if self.use_bn:
                x = tf.keras.layers.BatchNormalization()(x)
            x = tf.keras.layers.Activation(self.activation)(x)
            if self.use_dropout:
                x = tf.keras.layers.Dropout(self.dropout_rate)(x)
            x = tf.keras.layers.MaxPooling2D(pool_size=self.pool_size)(x)
        return x
Пример #25
0
def TA_CSPNN(nb_classes,
             Channels=64,
             Timesamples=90,
             dropOut=0.25,
             timeKernelLen=50,
             Ft=11,
             Fs=6):

    # Input shape is (trials, 1, number of channels, number of time samples)

    input_e = Input(shape=(1, Channels, Timesamples))
    convL1 = Conv2D(Ft, (1, timeKernelLen),
                    padding='same',
                    input_shape=(1, Channels, Timesamples),
                    use_bias=False)(input_e)

    bNorm1 = BatchNormalization(axis=1)(convL1)

    convL2 = DepthwiseConv2D((Channels, 1),
                             use_bias=False,
                             depth_multiplier=Fs,
                             depthwise_constraint=max_norm(1.))(bNorm1)
    bNorm2 = BatchNormalization(axis=1)(convL2)

    lambdaL = Lambda(lambda x: x**2)(bNorm2)
    aPool = AveragePooling2D((1, Timesamples))(lambdaL)

    dOutL = Dropout(dropOut)(aPool)

    flatten = Flatten(name='flatten')(dOutL)

    dense = Dense(nb_classes, name='dense')(flatten)
    softmax = Activation('softmax', name='softmax')(dense)

    return Model(inputs=input_e, outputs=softmax)
Пример #26
0
    def define_generator(latent_dim, n_blocks, output_shape=(4, 4, 3)):
        weight_init = RandomNormal(stddev=0.02)
        weight_constr = max_norm(1.0)
        model_list = []

        in_latent = Input(shape=(latent_dim,))

        g = Dense(128 * output_shape[0] * output_shape[1], kernel_initializer=weight_init, kernel_constraint=weight_constr)(in_latent)
        g = Reshape((output_shape[0], output_shape[1], 128))(g)

        g = Conv2D(128, (4, 4), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(g)
        g = PixelNormalisation()(g)
        g = LeakyReLU(alpha=0.2)(g)
        g = Conv2D(128, (3, 3), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(g)
        g = PixelNormalisation()(g)
        g = LeakyReLU(alpha=0.2)(g)

        img_output = Conv2D(output_shape[-1], (1, 1), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(g)

        model = Model(in_latent, img_output)

        model_list.append([model, model])

        for i in range(1, n_blocks):
            old_model = model_list[i - 1][0]
            new_models = Generator._add_generator_block(old_model, output_shape)
            model_list.append(new_models)

        return model_list
Пример #27
0
    def _add_generator_block(old_model, output_shape):
        weight_init = RandomNormal(stddev=0.02)
        weight_constr = max_norm(1.0)

        block_end = old_model.layers[-2].output
        upsampling = UpSampling2D()(block_end)

        g = Conv2D(128, (3, 3), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(upsampling)
        g = PixelNormalisation()(g)
        g = LeakyReLU(alpha=0.2)(g)
        g = Conv2D(128, (3, 3), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(g)
        g = PixelNormalisation()(g)
        g = LeakyReLU(alpha=0.2)(g)

        img_output_new = Conv2D(output_shape[-1], (1, 1), padding='same', kernel_initializer=weight_init, kernel_constraint=weight_constr)(g)

        straight_through_model = Model(old_model.input, img_output_new)

        old_output = old_model.layers[-1]
        img_output_old = old_output(upsampling)

        merged = WeightedSum()([img_output_old, img_output_new])

        fade_in_model = Model(old_model.input, merged)

        return straight_through_model, fade_in_model
Пример #28
0
def EEGNetEncPartB(_input,
                   D=2,
                   D_pooling=4,
                   dropoutRate=0.25,
                   activation='elu',
                   l1_reg=0.000, l2_reg=0.000,
                   return_model=None):
    # Spatial-filter-like.
    # Applies D different filters along channels domain for each F1 (temporal filter above).
    # As our channels-domain filter is the number of channels, and we use padding='valid',
    # the channels domain is reduced from n_channels to 1.
    # (num_chans, num_samples, F1) --> (1, num_samples, F1*D)
    depth_regu = tf.keras.regularizers.l1_l2(l1=l1_reg, l2=l2_reg)
    _y = layers.DepthwiseConv2D((_input.shape.as_list()[1], 1),
                                padding='valid',
                                # use_bias=False,
                                depth_multiplier=D,
                                depthwise_regularizer=depth_regu,
                                depthwise_constraint=constraints.max_norm(1.))(_input)
    _y = layers.BatchNormalization()(_y)
    _y = layers.Activation(activation)(_y)

    # Smooth in the time-dimension
    # (1, num_samples, F1 * D) --> (1, num_samples // D_pooling, F1 * D)
    _y = layers.AveragePooling2D((1, D_pooling))(_y)
    _y = layers.Dropout(dropoutRate)(_y)

    if return_model is False:
        return _y
    else:
        return models.Model(inputs=_input, outputs=_y)
Пример #29
0
def DepthwiseConv2DBlock(_input,
                         depth_multiplier=4,
                         depth_pooling=1,
                         dropout_rate=0.25, dropout_type='Dropout',
                         activation='elu',
                         return_model=None):
    if dropout_type == 'SpatialDropout2D':
        dropout_type = layers.SpatialDropout2D
    elif dropout_type == 'Dropout':
        dropout_type = layers.Dropout
    else:
        raise ValueError('dropoutType must be one of SpatialDropout2D '
                         'or Dropout, passed as a string.')

    _y = layers.DepthwiseConv2D((_input.shape.as_list()[0], 1), use_bias=False,
                                depth_multiplier=depth_multiplier,
                                depthwise_constraint=constraints.max_norm(1.))(_input)
    _y = layers.BatchNormalization()(_y)
    _y = layers.Activation(activation)(_y)
    if depth_pooling > 1:
        _y = layers.AveragePooling2D((1, depth_pooling))(_y)
    _y = dropout_type(dropout_rate)(_y)

    if return_model is False:
        return _y
    else:
        return models.Model(inputs=_input, outputs=_y)
Пример #30
0
    def genb(self):
        model = Sequential()
        const = max_norm(1.0)
        init = RandomNormal(stddev=0.02)
        #########   b to a domain
        imgb = Input(shape=self.img_shape)
        imga = Input(shape=self.img_shape)
        noise1 = self.encoder3(imgb)
        noise2 = self.encoder4(imga)
        noise = Concatenate(axis=1)([noise1, noise2])
        dep = 16
        noise = Dense(256 * dep * dep,
                      kernel_initializer=init,
                      kernel_constraint=const)(noise)
        noise = Reshape((dep, dep, 256))(noise)

        model.add(
            Conv2DTranspose(256,
                            kernel_size=3,
                            strides=2,
                            padding="same",
                            kernel_initializer=init,
                            kernel_constraint=const))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))

        model.add(
            Conv2DTranspose(128,
                            kernel_size=3,
                            padding="same",
                            strides=2,
                            kernel_initializer=init,
                            kernel_constraint=const))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))

        model.add(
            Conv2DTranspose(64,
                            kernel_size=7,
                            padding="same",
                            strides=1,
                            kernel_initializer=init,
                            kernel_constraint=const))
        model.add(Activation("relu"))
        model.add(BatchNormalization(momentum=0.8))

        model.add(
            Conv2D(self.channels,
                   kernel_size=7,
                   strides=1,
                   padding="same",
                   kernel_initializer=init,
                   kernel_constraint=const))

        # model.add(Conv2D(self.channels, kernel_size=3, stride=1,padding="same", kernel_initializer=init, kernel_constraint=const))
        model.add(Activation("tanh"))

        image = model(noise)

        return Model([imgb, imga], image)