def one_block_model(self, input_tensor):
        """
        Method to model one cnn. It doesn't compile the model.
        :param input_tensor: tensor, to feed the two path
        :return: output: tensor, the output of the cnn
        """

        # localPath
        loc_path = Conv2D(64, (7, 7), data_format='channels_first', padding='valid', activation='relu', use_bias=True,
                         kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                         kernel_constraint=max_norm(2.),
                         bias_constraint=max_norm(2.), kernel_initializer='lecun_uniform', bias_initializer='zeros')(input_tensor)
        loc_path = MaxPooling2D(pool_size=(4, 4), data_format='channels_first', strides=1, padding='valid')(loc_path)
        loc_path = Dropout(self.dropout_rate)(loc_path)
        loc_path = Conv2D(64, (3, 3), data_format='channels_first', padding='valid', activation='relu', use_bias=True,
                          kernel_initializer='lecun_uniform', bias_initializer='zeros',
                          kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),kernel_constraint=max_norm(2.),
                          bias_constraint=max_norm(2.))(loc_path)
        loc_path = MaxPooling2D(pool_size=(2, 2), data_format='channels_first', strides=1, padding='valid')(loc_path)
        loc_path = Dropout(self.dropout_rate)(loc_path)
        # globalPath
        glob_path = Conv2D(160, (13, 13), data_format='channels_first', strides=1, padding='valid', activation='relu', use_bias=True,
                           kernel_initializer='lecun_uniform', bias_initializer='zeros',
                           kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                           kernel_constraint=max_norm(2.),
                           bias_constraint=max_norm(2.))(input_tensor)
        glob_path = Dropout(self.dropout_rate)(glob_path)
        # concatenation of the two path
        path = Concatenate(axis=1)([loc_path, glob_path])
        # output layer
        output = Conv2D(5, (21, 21), data_format='channels_first', strides=1, padding='valid', activation='softmax', use_bias=True,
                        kernel_initializer='lecun_uniform', bias_initializer='zeros')(path)
        return output
Esempio n. 2
0
def test_dense():
    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(None, None, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3},
               input_shape=(3, 4, 5, 2))

    layer_test(layers.Dense,
               kwargs={'units': 3,
                       'kernel_regularizer': regularizers.l2(0.01),
                       'bias_regularizer': regularizers.l1(0.01),
                       'activity_regularizer': regularizers.L1L2(l1=0.01, l2=0.01),
                       'kernel_constraint': constraints.MaxNorm(1),
                       'bias_constraint': constraints.max_norm(1)},
               input_shape=(3, 2))

    layer = layers.Dense(3,
                         kernel_regularizer=regularizers.l1(0.01),
                         bias_regularizer='l1')
    layer.build((None, 4))
    assert len(layer.losses) == 2
Esempio n. 3
0
def test_max_norm():
    array = get_example_array()
    for m in get_test_values():
        norm_instance = constraints.max_norm(m)
        normed = norm_instance(K.variable(array))
        assert(np.all(K.eval(normed) < m))

    # a more explicit example
    norm_instance = constraints.max_norm(2.0)
    x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
    x_normed_target = np.array([[0, 0, 0], [1.0, 0, 0],
                                [2.0, 0, 0],
                                [2. / np.sqrt(3),
                                 2. / np.sqrt(3),
                                 2. / np.sqrt(3)]]).T
    x_normed_actual = K.eval(norm_instance(K.variable(x)))
    assert_allclose(x_normed_actual, x_normed_target, rtol=1e-05)
Esempio n. 4
0
def res_first(input_tensor, filters, kernel_size):
    eps = 1.1e-5
    nb_filter1, nb_filter2 = filters
    x = Conv1D(filters=nb_filter1,
               kernel_initializer=initializers.he_normal(seed=1),
               kernel_size=kernel_size,
               padding='same',
               use_bias=bias,
               kernel_constraint=max_norm(maxnorm))(input_tensor)  ##
    x = BatchNormalization(epsilon=eps, axis=-1)(x)
    x = Scale(axis=-1)(x)
    x = Activation('relu')(x)
    x = Dropout(rate=dropout_rate, seed=1)(x)
    x = Conv1D(filters=nb_filter2,
               kernel_initializer=initializers.he_normal(seed=1),
               kernel_size=kernel_size,
               padding='same',
               use_bias=bias,
               kernel_constraint=max_norm(maxnorm))(x)  ##
    x = add([x, input_tensor])
    return x
Esempio n. 5
0
def modelo_CNN2(X_train, y_train, individual):
    """
    Cria um modelo CNN2
    :parametro X_train: dados para treinamento
    :parametro y_train: rótulo dos dados de treinamento
    :parametro individual: dicionário com os hiperparâmetros do modelo
    :return: o modelo
    """
    warnings.filterwarnings('ignore')
    model = Sequential()
    call = [
        EarlyStopping(monitor='loss', mode='min', patience=15, verbose=1),
    ]
    if individual['filters'] == 0:
        filters = 16
    elif individual['filters'] == 1:
        filters = 32
    else:
        filters = 64

    if individual['kernel_size'] == 0:
        kernel_size = 2
    elif individual['kernel_size'] == 1:
        kernel_size = 3
    elif individual['kernel_size'] == 2:
        kernel_size = 5
    else:
        kernel_size = 11

    for i in range(individual['num_conv']):
        d = 2**i
        model.add(
            Conv1D(filters=filters,
                   kernel_size=kernel_size,
                   activation='relu',
                   input_shape=(X_train.shape[1], 1),
                   padding='causal',
                   dilation_rate=d,
                   kernel_constraint=max_norm(3)))
        model.add(SpatialDropout1D(round(individual['dropout'], 1)))
        if individual['norm'] == 0:
            model.add(BatchNormalization())

    model.add(Flatten())
    model.add(Dense(1))
    model.compile(loss='mse', optimizer='Adam')
    history = model.fit(X_train,
                        y_train,
                        epochs=60,
                        verbose=0,
                        batch_size=filters,
                        callbacks=call)
    return model, history
Esempio n. 6
0
def ShallowConvNet(model_input, cfg, nb_classes, dropoutRate=0.5):
    # article: EEGNet: a compact convolutional neural network for EEG-based brain–computer interfaces
    # changed as the comments
    Chans = cfg.chans
    block1 = Conv2D(40, (1, 25),
                    input_shape=(Chans, Samples, 1),
                    kernel_constraint=max_norm(2.,
                                               axis=(0, 1, 2)))(model_input)
    block1 = Conv2D(40, (Chans, 1),
                    use_bias=False,
                    kernel_constraint=max_norm(2., axis=(0, 1, 2)))(block1)
    block1 = BatchNormalization(axis=-1, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation(square)(block1)
    block1 = AveragePooling2D(pool_size=(1, 75), strides=(1, 15))(block1)
    block1 = Activation(log)(block1)
    block1 = Dropout(dropoutRate)(block1)
    flatten = Flatten()(block1)
    dense = Dense(nb_classes, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=model_input, outputs=softmax)
Esempio n. 7
0
def baseline_model():
    # create model
    model = Sequential()

    model.add(
        Dense(20,
              input_dim=4,
              kernel_initializer='glorot_uniform',
              kernel_constraint=max_norm(1.),
              bias_constraint=max_norm(0.4),
              activation='relu'))
    model.add(
        Dense(10,
              kernel_initializer='glorot_uniform',
              kernel_constraint=max_norm(1.),
              bias_constraint=max_norm(0.4),
              activation='relu'))
    model.add(
        Dense(3,
              kernel_initializer='glorot_uniform',
              kernel_constraint=max_norm(1.),
              bias_constraint=max_norm(0.4),
              activation='softmax'))
    # Compile model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Esempio n. 8
0
 def build_branch(inputs,
                  branchName,
                  numCategories,
                  numVggDenseNodes=512,
                  finalAct="softmax"):
     # Add additional layers for each branch
     # DENSE(512) => DENSE(512) => OUTPUT
     x = Dense(numVggDenseNodes,
               activation='relu',
               name=str(branchName + "_dense" + str(numVggDenseNodes) +
                        "_1"),
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3))(inputs)
     x = Dropout(rate=0.5)(x)
     x = Dense(numVggDenseNodes,
               activation='relu',
               name=str(branchName + "_dense" + str(numVggDenseNodes) +
                        "_2"),
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3))(x)
     x = Dropout(rate=0.5)(x)
     x = Dense(numCategories,
               activation=finalAct,
               name=str(branchName + "_output"),
               kernel_constraint=max_norm(3),
               bias_constraint=max_norm(3))(x)
     return x
Esempio n. 9
0
def EEGNet(model_input,
           cfg,
           nb_classes,
           dropoutRate=0.5,
           kernLength=64,
           F1=8,
           D=2,
           F2=16,
           norm_rate=0.25):
    # article: EEGNet: a compact convolutional neural network for EEG-based brain–computer interfaces
    # changed as the comments
    Chans = cfg.chans
    block1 = Conv2D(F1, (1, kernLength),
                    padding='same',
                    input_shape=(Chans, Samples, 1),
                    use_bias=False)(model_input)
    block1 = BatchNormalization()(
        block1)  # I'm not sure the axis, axis=1 before

    block1 = DepthwiseConv2D((Chans, 1),
                             use_bias=False,
                             depth_multiplier=D,
                             depthwise_constraint=max_norm(1.))(block1)
    block1 = BatchNormalization()(block1)
    block1 = Activation('elu')(block1)
    block1 = AveragePooling2D((1, 4))(block1)
    block1 = Dropout(dropoutRate)(block1)

    block2 = SeparableConv2D(F2, (1, 16), use_bias=False,
                             padding='same')(block1)  # it's(1,16)before
    block2 = BatchNormalization()(block2)
    block2 = Activation('elu')(block2)
    block2 = AveragePooling2D((1, 8))(block2)
    block2 = Dropout(dropoutRate)(block2)

    flatten = Flatten()(block2)
    dense = Dense(nb_classes, kernel_constraint=max_norm(norm_rate))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=model_input, outputs=softmax)
Esempio n. 10
0
def get_VGG16(n_classes=5,
              dropout_rate_classif=0.5,
              dropout_bbox=0.5,
              N_trainable=19,
              BN=False):
    modelVGG16 = VGG16(include_top=False, weights='imagenet')
    if BN:
        GAP = GlobalAveragePooling2D()(BatchNormalization()(modelVGG16.output))
    else:
        GAP = GlobalAveragePooling2D()(modelVGG16.output)

    if dropout_rate_classif > 0:
        classification = Dense(n_classes,
                               activation='softmax',
                               name='category_output',
                               kernel_constraint=max_norm(1.))(
                                   Dropout(dropout_rate_classif)(GAP))
    else:
        classification = Dense(n_classes,
                               activation='softmax',
                               name='category_output',
                               kernel_constraint=max_norm(1.))(GAP)

    if dropout_bbox > 0:
        bounding_box = Dense(4,
                             name='bounding_box',
                             kernel_constraint=max_norm(2.))(
                                 Dropout(dropout_bbox)(GAP))
    else:
        bounding_box = Dense(4,
                             name='bounding_box',
                             kernel_constraint=max_norm(2.))(GAP)

    model = Model(inputs=modelVGG16.input,
                  outputs=[classification, bounding_box])
    for layer in model.layers[N_trainable:]:
        layer.trainable = True
    for layer in model.layers[:N_trainable]:
        layer.trainable = False
    return model
Esempio n. 11
0
    def build(self, input_shape):
        _, H, W, _ = input_shape

        
        if self.constraint is not None:
            constraint = constraints.max_norm(self.constraint, axis=0)
        else:
            constraint = None
            
        if self.regularizer is not None:
            regularizer = regularizers.l2(self.regularizer)
        else:
            regularizer = None
        
        self.P1_encode = self.add_weight(name='mode1_encode', 
                                          shape=(H, self.h),
                                          initializer='he_normal',
                                          constraint=constraint,
                                          regularizer=regularizer)
        
        self.P2_encode = self.add_weight(name='mode2_encode', 
                                      shape=(W, self.w),
                                      initializer='he_normal',
                                      constraint=constraint,
                                      regularizer=regularizer)
        
        self.P3_encode = self.add_weight(name='mode3_encode', 
                                      shape=(3, self.d),
                                      constraint=constraint,
                                      regularizer=regularizer,
                                      initializer='he_normal')
    
        if self.separate_decoder:
            self.P1_decode = self.add_weight(name='mode1_decode', 
                                              shape=(self.h, H),
                                              initializer='he_normal',
                                              constraint=constraint,
                                              regularizer=regularizer)
            
            self.P2_decode = self.add_weight(name='mode2_decode', 
                                          shape=(self.w, W),
                                          initializer='he_normal',
                                          constraint=constraint,
                                          regularizer=regularizer)
            
            self.P3_decode = self.add_weight(name='mode3_decode', 
                                          shape=(self.d, 3),
                                          constraint=constraint,
                                          regularizer=regularizer,
                                          initializer='he_normal')
        
        super(TensorSensing, self).build(input_shape)  # Be sure to call this at the end
Esempio n. 12
0
    def build_generator(self, n_blocks=5):

        dep = 4
        noise = Input(shape=(self.latent_dim, ))
        l = Input(shape=(1, ))

        label = Flatten()(Embedding(self.classnum, self.latent_dim)(l))

        ### from n to z sapce
        model_input = multiply([noise, label])

        # weight initialization
        init = RandomNormal(stddev=0.02)
        # weight constraint
        const = max_norm(1.0)

        model_list = list()

        g = Dense(256 * dep * dep,
                  activation="relu",
                  input_dim=self.latent_dim)(model_input)
        g = Reshape((dep, dep, 256))(g)

        g = Conv2D(128, (3, 3),
                   padding='same',
                   kernel_initializer=init,
                   kernel_constraint=const)(g)
        g = PixelNormalization()(g)
        g = LeakyReLU(alpha=0.2)(g)

        g = Conv2D(128, (3, 3),
                   padding='same',
                   kernel_initializer=init,
                   kernel_constraint=const)(g)
        g = PixelNormalization()(g)
        g = LeakyReLU(alpha=0.2)(g)

        out_image = Conv2D(self.channels, (1, 1),
                           padding='same',
                           kernel_initializer=init,
                           kernel_constraint=const)(g)

        model = Model([noise, l], out_image)

        model_list.append([model, model])

        for i in range(1, n_blocks):
            old_model = model_list[i - 1][0]
            models = self.add_g(old_model, i)
            model_list.append(models)

        return model_list
Esempio n. 13
0
    def ScaleBlock(self,
                   input_layer,
                   output_channel,
                   kernel_size,
                   prev_lstm_state=None,
                   name=None):
        strides = 1

        # start_features = 32
        start_features = 8

        in_block = self.InBlock(input_layer,
                                start_features,
                                kernel_size=kernel_size,
                                strides=strides)

        eblock1 = self.EBlock(in_block, kernel_size, strides=strides)

        eblock2 = self.EBlock(eblock1, kernel_size, strides=strides)

        prev_shape = list(int_shape(eblock2))
        new_shape = prev_shape
        features = new_shape[-1]
        new_shape.pop(0)
        new_shape = (1, *new_shape)

        lstm = Reshape(target_shape=new_shape)(eblock2)

        # BUG: can't use initial_state -> https://github.com/keras-team/keras/issues/9761#issuecomment-567915470
        lstm = ConvLSTM2D(features,
                          kernel_size=kernel_size,
                          padding="same",
                          return_state=False,
                          input_shape=new_shape,
                          data_format='channels_last',
                          kernel_constraint=max_norm(3))(lstm)
        lstm_state = None

        dblock2 = self.DBlock(lstm, kernel_size=kernel_size, strides=strides)

        dblock1 = self.DBlock(dblock2,
                              block_to_connect=eblock1,
                              kernel_size=kernel_size,
                              strides=strides)

        out_block = self.OutBlock(dblock1,
                                  output_channel,
                                  block_to_connect=in_block,
                                  kernel_size=kernel_size,
                                  name=f"{name}_output")

        return out_block, lstm_state
Esempio n. 14
0
def build_model(
    vocab_size: int,
    sequence_length: int,
    output_units: Sequence[int],
    embed_dim: int,
    windows: Iterable[int],
    num_pos: int = 0,
    constraint: Optional[float] = None,
    static_embs: bool = False,
    classification: bool = False,
) -> Model:
    """Build CNN model."""
    input_layer_args = InputLayerArgs(
        num_pos=num_pos,
        mask_zero=False,
        embed_dim=embed_dim,
        pos_embed_dim=POS_EMB_DIM,
        vocab_size=vocab_size,
        sequence_len=sequence_length,
        static_embeddings=static_embs,
    )
    inputs, embedding_layer = build_inputs_and_embeddings(input_layer_args)

    pooled_feature_maps = []
    for kernel_size in windows:
        conv_layer = Conv1D(filters=100,
                            kernel_size=kernel_size,
                            activation="relu")(embedding_layer)
        pooled_feature_maps.extend([
            # GlobalAveragePooling1D()(conv_layer),
            GlobalMaxPooling1D()(conv_layer)
        ])
    merged = Concatenate(name=REPRESENTATION_LAYER)(pooled_feature_maps)
    dropout_layer = Dropout(0.5)(merged)

    kernel_constraint = constraint and max_norm(constraint)
    activation = "softmax" if classification else "sigmoid"
    outputs = [
        Dense(
            output_units[0],
            activation=activation,
            kernel_constraint=kernel_constraint,
            name=OUTPUT_NAME,
        )(dropout_layer)
    ]
    if len(output_units) > 1:
        aux_out = Dense(output_units[1],
                        activation="softmax",
                        name=AUX_OUTPUT_NAME)(dropout_layer)
        outputs.append(aux_out)

    return Model(inputs=inputs, outputs=outputs)
Esempio n. 15
0
    def ScaleBlock(self,
                   input_layer,
                   output_channel,
                   kernel_size,
                   prev_lstm_state=None,
                   name_output=None):
        strides = 1

        start_features = 32

        in_block = self.InBlock(input_layer,
                                start_features,
                                kernel_size=kernel_size,
                                strides=strides)

        eblock1 = self.EBlock(in_block, kernel_size, strides=strides)

        eblock2 = self.EBlock(eblock1, kernel_size, strides=strides)

        if self.use_lstm:
            prev_shape = list(int_shape(eblock2))
            new_shape = prev_shape[1] * prev_shape[2] * prev_shape[3]
            hidden_features = 32
            lstm = Reshape(target_shape=(1, new_shape))(eblock2)
            # output,
            lstm, h, c = LSTM(hidden_features,
                              return_state=True,
                              kernel_constraint=max_norm(3))(lstm,
                                                             prev_lstm_state)
            # lstm output = (batch,hidden_features)
            lstm_state = [h, c]
            # Allow to use the same hidden features at different scale
            lstm = Dense(new_shape)(lstm)
            # Restore the correct shape
            lstm = Reshape(target_shape=(prev_shape[1], prev_shape[2],
                                         prev_shape[3]))(lstm)

        else:
            lstm_state = None
            lstm = eblock2

        dblock2 = self.DBlock(lstm, kernel_size=kernel_size, strides=strides)

        dblock1 = self.DBlock(dblock2, eblock1, kernel_size, strides)

        out_block = self.OutBlock(dblock1,
                                  output_channel,
                                  block_to_connect=in_block,
                                  features=32,
                                  name_output=f"{name_output}_output")

        return out_block, lstm_state
Esempio n. 16
0
def sequential_model(n_channels, n_timepoints):
    model = Sequential()
    model.add(BatchNormalization(input_shape=(n_channels, n_timepoints)))
    model.add(
        LSTM(128,
             dropout=0.5,
             recurrent_constraint=max_norm(2.),
             return_sequences=True))
    model.add(
        LSTM(128,
             dropout=0.5,
             recurrent_constraint=max_norm(2.),
             return_sequences=True))
    model.add(Dropout(0.5))
    model.add(Flatten())
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    return model
def big_filters_model(args, n_channels):
    hit_shapes = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, n_channels),
                       name='hit_shape_input')
    infos = Input(shape=(len(dataset.featureLabs), ), name='info_input')

    conv = Conv2D(128, (5, 5),
                  activation='relu',
                  padding='valid',
                  data_format="channels_last",
                  name='conv1')(hit_shapes)

    flat = Flatten()(conv)
    concat = concatenate([flat, infos])

    drop = Dropout(args.dropout)(concat)
    dense = Dense(256,
                  activation='relu',
                  kernel_constraint=max_norm(args.maxnorm),
                  name='dense1')(drop)
    drop = Dropout(args.dropout)(dense)
    dense = Dense(64,
                  activation='relu',
                  kernel_constraint=max_norm(args.maxnorm),
                  name='dense2')(drop)
    drop = Dropout(args.dropout)(dense)
    pred = Dense(2,
                 activation='softmax',
                 kernel_constraint=max_norm(args.maxnorm),
                 name='output')(drop)

    model = Model(inputs=[hit_shapes, infos], outputs=pred)
    my_sgd = optimizers.SGD(lr=args.lr,
                            decay=1e-5,
                            momentum=args.momentum,
                            nesterov=True)
    model.compile(optimizer=my_sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
Esempio n. 18
0
def ShallowConvNet(input_shape):
    """ Keras implementation of the Shallow Convolutional Network as described
    in Schirrmeister et. al. (2017), arXiv 1703.0505
    
    Assumes the input is a 2-second EEG signal sampled at 128Hz. Note that in 
    the original paper, they do temporal convolutions of length 25 for EEG
    data sampled at 250Hz. We instead use length 13 since the sampling rate is 
    roughly half of the 250Hz which the paper used. The pool_size and stride
    in later layers is also approximately half of what is used in the paper.
    
                     ours        original paper
    pool_size        1, 35       1, 75
    strides          1, 7        1, 15
    conv filters     1, 13       1, 25
    """

    #    if K.image_data_format() == 'channels_first':
    #        input_shape = (1, Chans, Samples)
    #    else:
    #        input_shape = (Chans, Samples, 1)
    #    print(input_shape)
    # start the model
    input_EEG = Input(input_shape)
    block1 = Conv2D(10, (1, 25),
                    input_shape=(1, n_ch, n_samp),
                    kernel_constraint=max_norm(2.))(input_EEG)
    block1 = Conv2D(10, (n_ch, 1),
                    use_bias=False,
                    kernel_constraint=max_norm(2.))(block1)
    block1 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation(square)(block1)
    block1 = AveragePooling2D(pool_size=(1, 30), strides=(1, 10))(block1)
    block1 = Activation(safe_log)(block1)
    block1 = Dropout(dropout_rate)(block1)
    flatten = Flatten()(block1)
    dense = Dense(n_class, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_EEG, outputs=softmax)
Esempio n. 19
0
def get_VGG16(n_classes=5,
              dropout_rate_1=0.5,
              dropout_rate_2=0.25,
              N_trainable=19):
    modelVGG16 = VGG16(include_top=False, weights='imagenet')
    GAP = GlobalAveragePooling2D()(modelVGG16.output)
    classification = Dense(n_classes,
                           activation='softmax',
                           name='category_output',
                           kernel_constraint=max_norm(1.))(
                               Dropout(dropout_rate_1)(GAP))
    bounding_box = Dense(4,
                         name='bounding_box',
                         kernel_constraint=max_norm(2.))(
                             Dropout(dropout_rate_2)(GAP))
    model = Model(inputs=modelVGG16.input,
                  outputs=[classification, bounding_box])
    for layer in model.layers[N_trainable:]:
        layer.trainable = True
    for layer in model.layers[:N_trainable]:
        layer.trainable = False
    return model
def create_model():
    # create model, insert code here
    model = Sequential()
    model.add(Dense(60 , input_dim=60,  kernel_initializer='normal', activation='relu'))
    model.add(Dropout(0.3))
    model.add(Dense(30, kernel_constraint=max_norm(2.) , kernel_initializer='normal', activation='relu'))
    model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))


    # Compile model
    sgd = SGD(lr=0.1, momentum=0.9, decay=0.0, nesterov=False)
    model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy'])
    return model
def DeepconvnetOriginConstraint(model_input, cfg, nb_classes=2, dropoutRate=0.5):
    '''
    在DeepconvnetOrigin基础上,加了constraint以比较kernel_constraint的效果
    '''


    chans = len(cfg.elec)

    blk1       = Conv2D(25, (1, 10),kernel_constraint=max_norm(2., axis=(0, 1, 2)))(model_input)
    blk1       = Conv2D(25, (chans, 1),use_bias=False,kernel_constraint=max_norm(2., axis=(0, 1, 2)))(blk1)
    blk1       = BatchNormalization(epsilon=1e-05, momentum=0.1)(blk1)
    blk1       = Activation('elu')(blk1)
    blk1       = MaxPooling2D(pool_size=(1, 3), strides=(1, 2))(blk1)
    blk1       = Dropout(dropoutRate)(blk1)
  
    blk2       = Conv2D(50, (1, 10),use_bias=False,kernel_constraint=max_norm(2., axis=(0, 1, 2)))(blk1)
    blk2       = BatchNormalization(epsilon=1e-05, momentum=0.1)(blk2)
    blk2       = Activation('elu')(blk2)
    blk2       = MaxPooling2D(pool_size=(1, 3), strides=(1, 2))(blk2)
    blk2       = Dropout(dropoutRate)(blk2)
    
    blk3       = Conv2D(100, (1, 10),use_bias=False,kernel_constraint=max_norm(2., axis=(0, 1, 2)))(blk2)
    blk3       = BatchNormalization(epsilon=1e-05, momentum=0.1)(blk3)
    blk3       = Activation('elu')(blk3)
    blk3       = MaxPooling2D(pool_size=(1, 3), strides=(1, 2))(blk3)
    blk3       = Dropout(dropoutRate)(blk3)
    
    blk4       = Conv2D(200, (1, 10),use_bias=False,kernel_constraint=max_norm(2., axis=(0, 1, 2)))(blk3)
    blk4       = BatchNormalization(epsilon=1e-05, momentum=0.1)(blk4)
    blk4       = Activation('elu')(blk4)
    blk4       = MaxPooling2D(pool_size=(1, 3), strides=(1, 2))(blk4)
    blk4       = Dropout(dropoutRate)(blk4)
    
    flatten      = Flatten()(blk4)
    
    dense        = Dense(nb_classes)(flatten)
    softmax      = Activation('softmax')(dense)
    
    return Model(inputs=model_input, outputs=softmax)
def DeepConvNet(nb_classes, Chans=64, Samples=256, dropoutRate=0.5):
    """ Keras implementation of the Deep Convolutional Network as described in
    Schirrmeister et. al. (2017), Human Brain Mapping.
    
    This implementation assumes the input is a 2-second EEG signal sampled at 
    128Hz, as opposed to signals sampled at 250Hz as described in the original
    paper. We also perform temporal convolutions of length (1, 5) as opposed
    to (1, 10) due to this sampling rate difference. 
    
    Note that we use the max_norm constraint on all convolutional layers, as 
    well as the classification layer. We also change the defaults for the
    BatchNormalization layer. We used this based on a personal communication 
    with the original authors.
    
                      ours        original paper
    pool_size        1, 2        1, 5
    strides          1, 2        1, 5
    conv filters     1, 5        1, 10
    
    Note that this implementation has not been verified by the original 
    authors. 
    
    """

    # start the model
    input_main = Input((1, Chans, Samples))
    block1 = Conv2D(25, (1, 5),
                    input_shape=(1, Chans, Samples),
                    kernel_constraint=max_norm(2.))(input_main)
    block1 = Conv2D(25, (Chans, 1), kernel_constraint=max_norm(2.))(block1)
    block1 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block1)
    block1 = Activation('elu')(block1)
    block1 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block1)
    block1 = Dropout(dropoutRate)(block1)

    block2 = Conv2D(50, (1, 5), kernel_constraint=max_norm(2.))(block1)
    block2 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block2)
    block2 = Activation('elu')(block2)
    block2 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block2)
    block2 = Dropout(dropoutRate)(block2)

    block3 = Conv2D(100, (1, 5), kernel_constraint=max_norm(2.))(block2)
    block3 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block3)
    block3 = Activation('elu')(block3)
    block3 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block3)
    block3 = Dropout(dropoutRate)(block3)

    block4 = Conv2D(200, (1, 5), kernel_constraint=max_norm(2.))(block3)
    block4 = BatchNormalization(axis=1, epsilon=1e-05, momentum=0.1)(block4)
    block4 = Activation('elu')(block4)
    block4 = MaxPooling2D(pool_size=(1, 2), strides=(1, 2))(block4)
    block4 = Dropout(dropoutRate)(block4)

    flatten = Flatten()(block4)

    dense = Dense(nb_classes, kernel_constraint=max_norm(0.5))(flatten)
    softmax = Activation('softmax')(dense)

    return Model(inputs=input_main, outputs=softmax)
Esempio n. 23
0
def train_NN(activ_fcn, X_train, y_train, X_valid, y_valid):

    # Initialize the constructor
    model = Sequential()

    # Add layers
    model.add(Dense(100, activation=activ_fcn, input_shape=(100, )))
    model.add(Dense(100, activation=activ_fcn, kernel_constraint=max_norm(2.)))
    model.add(Dense(10, activation='softmax'))

    # Model summary
    model.summary()

    # Train the NN
    model.compile(loss='categorical_crossentropy',
                  optimizer=adam_decay,
                  metrics=['accuracy'])

    filepath = "weights.best.hdf5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')
    callbacks_list = [checkpoint]

    history = model.fit(X_train,
                        y_train,
                        epochs=100,
                        shuffle=True,
                        batch_size=25,
                        verbose=1,
                        validation_data=(X_valid, y_valid),
                        callbacks=callbacks_list)

    # summarize history for accuracy
    plt.plot(history.history['acc'])
    plt.plot(history.history['val_acc'])
    plt.title('model accuracy')
    plt.ylabel('accuracy')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()
    # summarize history for loss
    plt.plot(history.history['loss'])
    plt.plot(history.history['val_loss'])
    plt.title('model loss')
    plt.ylabel('loss')
    plt.xlabel('epoch')
    plt.legend(['train', 'validation'], loc='upper left')
    plt.show()
Esempio n. 24
0
def res_first(input_tensor, num_filt, kernel_size, random_seed, padding, bias,
              maxnorm, l2_reg, eps, bn_momentum, activation_function,
              dropout_rate, subsam, trainable):
    num_filt1, num_filt2 = num_filt
    x = Conv1D(num_filt1,
               kernel_size=kernel_size,
               kernel_initializer=initializers.he_normal(seed=random_seed),
               padding='same',
               use_bias=bias,
               strides=subsam,
               kernel_constraint=max_norm(maxnorm),
               trainable=trainable,
               kernel_regularizer=l2(l2_reg))(input_tensor)  ##
    x = BatchNormalization(epsilon=eps, momentum=bn_momentum, axis=-1)(x)
    x = Activation(activation_function)(x)
    x = Dropout(rate=dropout_rate, seed=random_seed)(x)
    # x = MaxPooling1D(pool_size=subsam)(x)
    x = Conv1D(
        num_filt2,
        kernel_size=kernel_size,
        kernel_initializer=initializers.he_normal(seed=random_seed),
        padding='same',
        use_bias=bias,
        # strides=subsam,
        kernel_constraint=max_norm(maxnorm),
        trainable=trainable,
        kernel_regularizer=l2(l2_reg))(x)  ##
    short = Conv1D(num_filt2,
                   kernel_size=1,
                   kernel_initializer=initializers.he_normal(seed=random_seed),
                   padding='same',
                   use_bias=bias,
                   kernel_constraint=max_norm(maxnorm),
                   strides=subsam,
                   trainable=trainable,
                   kernel_regularizer=l2(l2_reg))(input_tensor)
    x = add([x, short])
    return x
Esempio n. 25
0
def create_model(x_train, y_train, x_test, y_test):
    
    batch_size = 256
    epochs = 1
    learning_rate = 0.8713270582626444
    momentum = 0.8671876498073315
    decay = 0.0
    early_stop_th = 10**-5
    input_dim = (784,)

    dropout_1 = 0.026079803111884514
    dropout_2 = 0.4844455237320119

    # Stop the training if the accuracy is not moving more than a delta
    # keras.callbacks.History is by default added to all keras model
    # callbacks = [EarlyStopping(monitor='acc', min_delta=early_stop_th, patience=5, verbose=0, mode='auto')]

    # Code up the network
    x_input = Input(input_dim)
    x = Dropout(dropout_1)(x_input)
    x = Dense(1024, activation='relu', name ="dense1",kernel_constraint=max_norm( {{uniform(0.9, 5)}} ) )(x)
    x = Dropout(dropout_2)(x)
    x = Dense(1024, activation='relu', name = "dense2",kernel_constraint=max_norm( {{uniform(0.9,5)}} ) )(x)
    predictions = Dense(10, activation='softmax')(x)

    # Optimizer
    sgd = optimizers.SGD(lr=learning_rate, momentum=momentum, decay=0, nesterov=False)


    # Create and train model
    model = Model(inputs = x_input, outputs = predictions)
    model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
    model.fit(x=x_train,y= y_train, validation_split = 0.1, batch_size = batch_size ,epochs = epochs, verbose = 1)
    metrics = model.evaluate(x=x_test, y=y_test, batch_size=batch_size, verbose=0, sample_weight=None, steps=None)


    accuracy = metrics[1]
    return {'loss': 1-accuracy, 'status': STATUS_OK, 'model': model}
Esempio n. 26
0
def model_3_LSTM_advanced2(X_train,Y_train,Var):   
   maxnorm=3.
   batch_size=X_train.shape[0]
   n_frames=X_train.shape[2]
   model = Sequential()
   model.add(Masking(mask_value=Var.mask_value, input_shape=(X_train.shape[1],X_train.shape[2])))
#   model.add(Dropout(0.2, noise_shape=(None, 1, X_train.shape[2]) ))   
   model.add(Dense(Var.Dense_Unit, activation=Var.activationF, kernel_constraint=max_norm(max_value=3.)))
   model.add(Bidirectional(LSTM(Var.hidden_units, return_sequences=True,   
                                kernel_regularizer=regularizers.l2(0.001),
                                activity_regularizer=regularizers.l1(0.001),
                                kernel_constraint=max_norm(max_value=3.), 
                                dropout=Var.dropout)))
   model.add(Bidirectional(LSTM(Var.hidden_units, return_sequences=True,
                                kernel_regularizer=regularizers.l2(0.001),
                                activity_regularizer=regularizers.l1(0.001),
                                kernel_constraint=max_norm(max_value=3.), 
                                dropout=Var.dropout)))     
   model.add(Dropout(0.5, noise_shape=(None, 1, Var.hidden_units*2)))
   model.add(Dense(Y_train.shape[-1], activation='softmax', kernel_constraint=max_norm(max_value=3.)))
   model.summary()
   
   return model  
def resnet_layer(inputs,
                 prefix,
                 num_filters=16,
                 kernel_size=3,
                 strides=1,
                 activation='relu',
                 batch_normalization=True,
                 conv_first=True,
                 regularizer=None,
                 constraint=None):
    
    
    if regularizer is not None:
        regularizer = regularizers.l2(regularizer)
    
    if constraint is not None:
        constraint = constraints.max_norm(constraint, axis=[0,1,2])
        

    

    x = inputs
    if conv_first:
        x = Conv2D(num_filters,
                  kernel_size=kernel_size,
                  strides=strides,
                  padding='same',
                  kernel_initializer='he_normal',
                  kernel_regularizer=regularizer,
                  kernel_constraint=constraint,
                  name=prefix + '_conv1')(x)
        
        if batch_normalization:
            x = BN(name=prefix + '_BN')(x)
        if activation is not None:
            x = Activation(activation, name=prefix + '_activation')(x)
    else:
        if batch_normalization:
            x = BN(name=prefix + '_BN')(x)
        if activation is not None:
            x = Activation(activation, name=prefix + '_activation')(x)
        x = Conv2D(num_filters,
                   kernel_size=kernel_size,
                   strides=strides,
                   padding='same',
                   kernel_initializer='he_normal',
                   kernel_regularizer=regularizer,
                   kernel_constraint=constraint,
                   name=prefix + '_conv1')(x)
    return x
def train_random_model():

    total_length = 21324

    embeddings_random = np.random.rand(total_length, 300)
    embedding_layer = Embedding(input_dim=total_length,
                                output_dim=300,
                                weights=[embeddings_random],
                                trainable=True)

    input_vec = Input(shape=(23, ))
    embedding_out = embedding_layer(input_vec)

    conv1 = Conv1D(100, 3, activation='relu',
                   kernel_constraint=max_norm(3))(embedding_out)
    pool1 = MaxPooling1D(2)(conv1)
    out1 = Flatten()(pool1)

    conv2 = Conv1D(100, 4, activation='relu',
                   kernel_constraint=max_norm(3))(embedding_out)
    pool2 = MaxPooling1D(2)(conv2)
    out2 = Flatten()(pool2)

    conv3 = Conv1D(100, 5, activation='relu',
                   kernel_constraint=max_norm(3))(embedding_out)
    pool3 = MaxPooling1D(2)(conv3)
    out3 = Flatten()(pool3)

    final_out = Concatenate()([out1, out2, out3])

    final_out = Dropout(0.5)(final_out)

    final_out = Dense(1, activation='sigmoid')(final_out)

    model = Model(inputs=input_vec, outputs=final_out)

    return model
Esempio n. 29
0
def model_2():
    input_cords = Input(shape=(30, ))

    x = Dense(1024, kernel_constraint=max_norm(1))(input_cords)
    x = BatchNormalization(trainable=False)(x)
    x = Activation("relu")(x)
    x = Dropout(0.5)(x)

    x = Dense(1024, kernel_constraint=max_norm(1))(x)
    x = BatchNormalization(trainable=False)(x)
    x = Activation("relu")(x)
    x = Dropout(0.5)(x)

    x = Dense(1024, kernel_constraint=max_norm(1))(x)
    x = BatchNormalization(trainable=False)(x)
    x = Activation("relu")(x)
    x = Dropout(0.5)(x)

    x = Concatenate()([input_cords, x])

    x = Dense(1024, kernel_constraint=max_norm(1))(x)
    x = BatchNormalization(trainable=False)(x)
    x = Activation("relu")(x)
    x = Dropout(0.5)(x)

    x = Dense(1024, kernel_constraint=max_norm(1))(x)
    x = BatchNormalization(trainable=False)(x)
    x = Activation("relu")(x)
    x = Dropout(0.5)(x)

    x = Concatenate()([input_cords, x])

    x = Dense(45)(x)
    output_cords = Activation("relu")(x)

    model = Model(inputs=input_cords, outputs=output_cords)
    return model
Esempio n. 30
0
def dense_model(args, n_channels):
    hit_shapes = Input(shape=(IMAGE_SIZE, IMAGE_SIZE, n_channels),
                       name='hit_shape_input')
    infos = Input(shape=(len(dataset.featurelabs), ), name='info_input')
    flat = Flatten()(hit_shapes)
    concat = concatenate([flat, infos])

    b_norm = BatchNormalization()(concat)
    dense = Dense(256,
                  activation='relu',
                  kernel_constraint=max_norm(args.maxnorm),
                  name='dense1')(b_norm)
    drop = Dropout(args.dropout)(dense)
    dense = Dense(128,
                  activation='relu',
                  kernel_constraint=max_norm(args.maxnorm),
                  name='dense2')(drop)
    drop = Dropout(args.dropout)(dense)
    dense = Dense(64,
                  activation='relu',
                  kernel_constraint=max_norm(args.maxnorm),
                  name='dense3')(drop)
    drop = Dropout(args.dropout)(dense)
    pred = Dense(2,
                 activation='softmax',
                 kernel_constraint=max_norm(args.maxnorm),
                 name='output')(drop)

    model = Model(inputs=[hit_shapes, infos], outputs=pred)
    my_sgd = optimizers.SGD(lr=args.lr,
                            decay=1e-4,
                            momentum=args.momentum,
                            nesterov=True)
    model.compile(optimizer=my_sgd,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    return model
Esempio n. 31
0
def get_VGG16_world(n_classes=5,
                    input_shape=(375, 500, 3),
                    dropout_class=0.5,
                    dropout_confidence=0.5,
                    dropout_bbox=0.5,
                    N_trainable=19,
                    activation_class='softmax',
                    activation_bbox=None,
                    activation_confidence='sigmoid'):
    modelVGG16 = VGG16(include_top=False,
                       weights='imagenet',
                       input_shape=input_shape)
    GAP = GlobalAveragePooling2D()(BatchNormalization()(modelVGG16.output))

    classification = Dense(n_classes,
                           activation=activation_class,
                           name='category_output',
                           kernel_constraint=max_norm(1.))(
                               Dropout(dropout_class)(GAP))
    bounding_box = Dense(4,
                         activation=activation_bbox,
                         name='bounding_box',
                         kernel_constraint=max_norm(2.))(
                             Dropout(dropout_bbox)(GAP))
    confidence = Dense(1,
                       activation=activation_confidence,
                       name='obj_confidence',
                       kernel_constraint=max_norm(2.))(
                           Dropout(dropout_confidence)(GAP))
    all_outs = Concatenate(name='concatenated_outputs')(
        [classification, bounding_box, confidence])
    model = Model(inputs=modelVGG16.input, outputs=[all_outs])
    for layer in model.layers[N_trainable:]:
        layer.trainable = True
    for layer in model.layers[:N_trainable]:
        layer.trainable = False
    return model
    def create_model(self):
        hidden_policy = self.states_ph

        for i in range(self.num_layers):
            hidden_policy = tf.layers.batch_normalization(hidden_policy)
            hidden_policy = tf.layers.dense(hidden_policy, self.num_neuron_per_layer,
                                            activation=tf.keras.activations.relu,
                                            kernel_constraint=max_norm(16),
                                            bias_constraint=max_norm(16))

        policy_output = (tf.layers.dense(hidden_policy, self.action_size,
                                         activation=tf.keras.activations.softmax,
                                         kernel_constraint=max_norm(16),
                                         bias_constraint=max_norm(16))
                         + 0.000001)  # for numeric stability

        hidden_value = self.states_ph
        for i in range(self.num_layers):
            hidden_value = tf.layers.batch_normalization(hidden_value)
            hidden_value = tf.layers.dense(hidden_value, self.num_neuron_per_layer,
                                           activation=tf.keras.activations.relu,
                                           kernel_constraint=max_norm(16),
                                           bias_constraint=max_norm(16))

        value_output = tf.squeeze(tf.layers.dense(hidden_value, 1, activation=tf.keras.activations.linear,
                                                  kernel_constraint=max_norm(16),
                                                  bias_constraint=max_norm(16)), 1)

        advantages = self.advantages_ph

        r = policy_output / self.old_policies_ph

        # Lclip
        policy_loss = -tf.minimum(tf.multiply(r, advantages),
                                  tf.multiply(tf.clip_by_value(r, 1 - self.clip_epsilon,
                                                               1 + self.clip_epsilon),
                                              advantages))

        value_loss = tf.reduce_mean(tf.square(value_output - self.accumulated_reward_ph))

        entropy_loss = -policy_output * tf.log(policy_output)

        full_loss = policy_loss + self.c1 * value_loss + self.c2 * entropy_loss

        train_op = tf.train.AdamOptimizer().minimize(full_loss)

        return policy_output, value_output, train_op
Esempio n. 33
0
 def getDiscriminator(self,im_dim,droprate,momentum,alpha):
     in_data = Input(shape=(im_dim,im_dim))
     if len(backend.tensorflow_backend._get_available_gpus()) > 0:
         out = Bidirectional(CuDNNLSTM(im_dim,return_sequences=True,
                                       kernel_constraint=max_norm(3),recurrent_constraint=max_norm(3),
                                       bias_constraint=max_norm(3)))(in_data)
     else:
         out = Bidirectional(LSTM(im_dim,return_sequences=True,
                                  kernel_constraint=max_norm(3),
                                  recurrent_constraint=max_norm(3),bias_constraint=max_norm(3)))(in_data)
     out = Conv1D(im_dim, kernel_size=2, dilation_rate=2, padding="same")(out)
     out = Activation("relu")(out)
     out = Reshape((im_dim,im_dim,1))(out)
     # block 1
     out = Conv2D(256, kernel_size=4, dilation_rate=2)(out)
     out = LeakyReLU(alpha=alpha)(out)
     out = Dropout(droprate)(out)
     # block 2
     out = Conv2D(128, kernel_size=3, dilation_rate=2)(out)
     out = BatchNormalization(momentum=momentum)(out)
     out = LeakyReLU(alpha=alpha)(out)
     out = Dropout(droprate)(out)
     # block 3
     out = Conv2D(64, kernel_size=2, dilation_rate=2)(out)
     out = BatchNormalization(momentum=momentum)(out)
     out = LeakyReLU(alpha=alpha)(out)
     out = Dropout(droprate)(out)
     # block 4
     out = Conv2D(32, kernel_size=2, dilation_rate=2)(out)
     out = BatchNormalization(momentum=momentum)(out)
     out = LeakyReLU(alpha=alpha)(out)
     out = Dropout(droprate)(out)
     # dense output
     out = GlobalMaxPool2D()(out)
     out = Dense(1)(out)
     out = Activation("sigmoid")(out)
     return Model(inputs=in_data,outputs=out)
Esempio n. 34
0
def define_discriminator(n_blocks, input_shape=(4, 4, 3)):
    # weight initialization
    init = RandomNormal(stddev=0.02)
    # weight constraint
    const = max_norm(1.0)
    model_list = list()
    # base model input
    in_image = Input(shape=input_shape)
    # conv 1x1
    d = Conv2D(128, (1, 1),
               padding='same',
               kernel_initializer=init,
               kernel_constraint=const)(in_image)
    d = LeakyReLU(alpha=0.2)(d)
    # conv 3x3 (output block)
    d = MinibatchStdev()(d)
    d = Conv2D(128, (3, 3),
               padding='same',
               kernel_initializer=init,
               kernel_constraint=const)(d)
    d = LeakyReLU(alpha=0.2)(d)
    # conv 4x4
    d = Conv2D(128, (4, 4),
               padding='same',
               kernel_initializer=init,
               kernel_constraint=const)(d)
    d = LeakyReLU(alpha=0.2)(d)
    # dense output layer
    d = Flatten()(d)
    out_class = Dense(1)(d)
    # define model
    model = Model(in_image, out_class)
    # compile model
    model.compile(loss=wasserstein_loss,
                  optimizer=Adam(lr=0.001,
                                 beta_1=0,
                                 beta_2=0.99,
                                 epsilon=10e-8))
    # store model
    model_list.append([model, model])
    # create submodels
    for i in range(1, n_blocks):
        # get prior model without the fade-on
        old_model = model_list[i - 1][0]
        # create new model for next resolution
        models = add_discriminator_block(old_model)
        # store model
        model_list.append(models)
    return model_list
    def one_block_model(self, input_tensor):
        """
        Model for the twoPathways CNN.
        It doesn't compile the model.
        The consist of two streams, namely:
        local_path anc global_path joined
        in a final stream named path
        local_path is articulated through:
            1st convolution 64x7x7 + relu
            1st maxpooling  4x4
            1st Dropout with rate: 0.5
            2nd convolution 64x3x3 + relu
            2nd maxpooling 2x2
            2nd droput with rate: 0.5
        global_path is articulated through:
            convolution 160x13x13 + relu
            dropout with rate: 0.5
        path is articulated through:
            convolution 5x21x21

        :param input_tensor: tensor, to feed the two path
        :return: output: tensor, the output of the cnn
        """

        # localPath
        loc_path = Conv2D(64, (7, 7), padding='valid', activation='relu', use_bias=True,
                          kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                          kernel_constraint=max_norm(2.),
                          bias_constraint=max_norm(2.))(input_tensor)
        loc_path = MaxPooling2D(pool_size=(4, 4), strides=1, padding='valid')(loc_path)
        loc_path = Dropout(self.dropout_rate)(loc_path)
        loc_path = Conv2D(64, (3, 3), padding='valid', activation='relu', use_bias=True,
                         kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                         kernel_constraint=max_norm(2.),
                         bias_constraint=max_norm(2.))(loc_path)
        loc_path = MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid')(loc_path)
        loc_path = Dropout(self.dropout_rate)(loc_path)
        # globalPath
        glob_path = Conv2D(160, (13, 13), strides=1, padding='valid', activation='relu', use_bias=True,
                           kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                           kernel_constraint=max_norm(2.),
                           bias_constraint=max_norm(2.))(input_tensor)
        glob_path = Dropout(self.dropout_rate)(glob_path)
        # concatenation of the two path
        path = Concatenate(axis=-1)([loc_path, glob_path])
        # output layer
        output = Conv2D(5, (21, 21), strides=1, padding='valid', activation='softmax', use_bias=True)(path)
        return output
    def twoBlocksDCNN(self):
        """


        :param in_channels: int, number of input channel
        :param in_shape: int, dim of the input image
        :return: Model, TwoPathCNN compiled
        """
        input = Input(shape=(65, 65, 4))
        # localPath
        locPath = Conv2D(64, (7, 7), padding='valid', activation='relu', use_bias=True,
                         kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                         kernel_constraint=max_norm(2.),
                         bias_constraint=max_norm(2.))(input)
        locPath = MaxPooling2D(pool_size=(4, 4), strides=1, padding='valid')(locPath)
        locPath = Dropout(self.dropout_rate)(locPath)
        locPath = Conv2D(64, (3, 3), padding='valid', activation='relu', use_bias=True,
                         kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                         kernel_constraint=max_norm(2.),
                         bias_constraint=max_norm(2.))(locPath)
        locPath = MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid')(locPath)
        locPath = Dropout(self.dropout_rate)(locPath)
        # globalPath
        globPath = Conv2D(160, (13, 13), strides=1, padding='valid', activation='relu', use_bias=True,
                          kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                          kernel_constraint=max_norm(2.),
                          bias_constraint=max_norm(2.))(input)
        globPath = Dropout(self.dropout_rate)(globPath)
        # concatenation of the two path
        path = Concatenate(axis=-1)([locPath, globPath])
        # output layer
        cnn1 = Conv2D(5, (21, 21), padding='valid', activation='softmax', use_bias=True)(path)
        #second CNN
        input_cnn2 = Input(shape=(33, 33, 4))
        conc_input = Concatenate(axis=-1)([input_cnn2, cnn1])
        locPath2 = Conv2D(64, (7, 7), padding='valid', activation='relu', use_bias=True,
                         kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                         kernel_constraint=max_norm(2.),
                         bias_constraint=max_norm(2.))(conc_input)
        locPath2 = MaxPooling2D(pool_size=(4, 4), strides=1, padding='valid')(locPath2)
        locPath2 = Dropout(self.dropout_rate)(locPath2)
        locPath2 = Conv2D(64, (3, 3), padding='valid', activation='relu', use_bias=True,
                         kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                         kernel_constraint=max_norm(2.),
                         bias_constraint=max_norm(2.))(locPath2)
        locPath2 = MaxPooling2D(pool_size=(2, 2), strides=1, padding='valid')(locPath2)
        locPath2 = Dropout(self.dropout_rate)(locPath2)
        # globalPath
        globPath2 = Conv2D(160, (13, 13), padding='valid', activation='relu', use_bias=True,
                          kernel_regularizer=regularizers.l1_l2(self.l1_rate, self.l2_rate),
                          kernel_constraint=max_norm(2.),
                          bias_constraint=max_norm(2.))(input_cnn2)
        globPath2 = Dropout(self.dropout_rate)(globPath2)
        # concatenation of the two path
        path2 = Concatenate(axis=-1)([locPath2, globPath2])
        # output layer
        output = Conv2D(5, (21, 21), strides=1, padding='valid', activation='softmax', use_bias=True)(path2)
        #compiling model
        model = Model(inputs=[input, input_cnn2], outputs=output)
        sgd = SGD(lr=self.learning_rate, momentum=self.momentum_rate, decay=self.decay_rate, nesterov=False)
        model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
        print 'DCNN done!'
        return model