예제 #1
0
def MCNN(trainX1,trainX2,trainY1,valX1,valX2,valY1,input_1,input_2, i,class_weights):
    onehot_secstr = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.04), padding='valid', name='0_secstr')(input_1)
    onehot_secstr = Dropout(0.6)(onehot_secstr)
    onehot_secstr = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr)
    onehot_secstr = core.Flatten()(onehot_secstr)
    onehot_secstr2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.02), padding='valid', name='1_secstr')(input_1)
    onehot_secstr2 = Dropout(0.4)(onehot_secstr2)
    onehot_secstr2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr2)
    onehot_secstr2 = core.Flatten()(onehot_secstr2)
    output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2], axis=-1)
    onehot_x = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.04), padding='valid', name='0')(input_2)
    onehot_x = Dropout(0.6)(onehot_x)
    onehot_x = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x)
    onehot_x = core.Flatten()(onehot_x)
    onehot_x2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.02), padding='valid', name='1')(input_2)
    onehot_x2 = Dropout(0.4)(onehot_x2)
    onehot_x2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x2)
    onehot_x2 = core.Flatten()(onehot_x2)
    output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
    final_output = concatenate([output_onehot_sec, output_onehot_seq])
    dense_out = Dense(100, kernel_initializer='glorot_normal', activation='softplus', name='dense_concat')(final_output)
    out = Dense(2, activation="softmax", kernel_initializer='glorot_normal', name='6')(dense_out)
    ########## Set Net ##########
    cnn = Model(inputs=[input_1,input_2], outputs=out)
    cnn.summary()
    nadam = Nadam(lr=0.001)
    #early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=20, verbose=1, mode='auto')
    cnn.compile(loss='binary_crossentropy', optimizer=nadam, metrics=[keras.metrics.binary_accuracy])  # Nadam
    early_stopping = EarlyStopping(monitor='val_loss', patience=20)
    checkpointer = ModelCheckpoint(filepath='%d-secstr_seq_denseconcat_60perc.h5' % i, verbose=1,save_best_only=True, monitor='val_loss', mode='min')
    fitHistory = cnn.fit([trainX1,trainX2], trainY1, batch_size=256, nb_epoch=500,validation_data=([valX1,valX2], valY1),class_weight=class_weights,callbacks=[checkpointer,early_stopping])
    history_dict = fitHistory.history
    myjson_file = "myhist_" +"dict_" + "secstr_seq_denseconcat_60perc_" +str(i)
    json.dump(history_dict, open(myjson_file, 'w'))
    return cnn, fitHistory
예제 #2
0
def model(input):
    x = conv.Conv1D(100,
                    3,
                    activation=config.get('first layer', 'activation'),
                    kernel_initializer=config.get('first layer',
                                                  'kernel_initializer'),
                    kernel_regularizer=l2(
                        config.getfloat('first layer', 'kernel_regularizer')),
                    padding=config.get('first layer', 'padding'),
                    name='Conv1')(input)
    x = Dropout(config.getfloat('first layer', 'dropout'), name='drop1')(x)
    x = BatchNormalization()(x)
    x = MaxPooling1D(pool_size=2, strides=None, padding='valid')(x)

    x = conv.Conv1D(200,
                    7,
                    activation=config.get('second layer', 'activation'),
                    kernel_initializer=config.get('second layer',
                                                  'kernel_initializer'),
                    kernel_regularizer=l2(
                        config.getfloat('second layer', 'kernel_regularizer')),
                    padding=config.get('second layer', 'padding'),
                    name='Conv2')(x)
    x = Dropout(config.getfloat('second layer', 'dropout'), name='drop2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling1D(pool_size=2, strides=None, padding='valid')(x)

    x = GRU(units=64, return_sequences=True)(x)
    x = Dropout(0.2)(x)

    x = GRU(units=64, return_sequences=True)(x)
    x = Dropout(0.2)(x)

    output = core.Flatten()(x)
    output = BatchNormalization()(output)
    output = Dropout(config.getint('flatten layer', 'dropout'),
                     name='dropo3')(output)

    output = Dense(config.getint('first dense layer', 'units'),
                   kernel_initializer=config.get('first dense layer',
                                                 'kernel_initializer'),
                   activation=config.get('first dense layer', 'activation'),
                   name='Denseo1')(output)
    output = Dropout(config.getfloat('first dense layer', 'dropout'),
                     name='dropo4')(output)
    output = BatchNormalization()(output)
    out = Dense(2,
                activation="softmax",
                kernel_initializer='glorot_normal',
                name='Denseo2')(output)

    #  ########## Set Cnn ##########
    cnn = Model(inputs=input, outputs=out)
    cnn.summary()
    adam = Adam(lr=0.0005)
    cnn.compile(loss='binary_crossentropy',
                optimizer=adam,
                metrics=[keras.metrics.binary_accuracy])  # Nadam
    return cnn
def test_conv_1d():
    batch_size = 2
    steps = 8
    input_dim = 2
    kernel_size = 3
    filters = 3

    for padding in _convolution_paddings:
        for strides in [1, 2]:
            if padding == 'same' and strides != 1:
                continue
            layer_test(convolutional.Conv1D,
                       kwargs={
                           'filters': filters,
                           'kernel_size': kernel_size,
                           'padding': padding,
                           'strides': strides
                       },
                       input_shape=(batch_size, steps, input_dim))

            layer_test(convolutional.Conv1D,
                       kwargs={
                           'filters': filters,
                           'kernel_size': kernel_size,
                           'padding': padding,
                           'kernel_regularizer': 'l2',
                           'bias_regularizer': 'l2',
                           'activity_regularizer': 'l2',
                           'kernel_constraint': 'max_norm',
                           'bias_constraint': 'max_norm',
                           'strides': strides
                       },
                       input_shape=(batch_size, steps, input_dim))

    # Test dilation
    layer_test(convolutional.Conv1D,
               kwargs={
                   'filters': filters,
                   'kernel_size': kernel_size,
                   'padding': padding,
                   'dilation_rate': 2,
                   'activation': None
               },
               input_shape=(batch_size, steps, input_dim))

    convolutional.Conv1D(filters=filters,
                         kernel_size=kernel_size,
                         padding=padding,
                         input_shape=(input_dim, ))
예제 #4
0
파일: CNN.py 프로젝트: Jonson-Sun/CL
def build1():
	#1维卷积
	inputshape= (40,100)
	active='elu'
	print('#添加层++++++++++++++++++++++++++++++++++++++++')
	model=Sequential()
	model.add(conv.Conv1D(filters=20,kernel_size=(10),input_shape=inputshape,activation=active))
	model.add(Dropout(0.2) )
	#model.add(conv.Conv1D(filters=20,kernel_size=(5),activation=active))
	#model.add(Dropout(0.2) )
	
	model.add(Flatten() )
	model.add(Dense(4) )
	model.add(Activation('softmax') )
	return model
예제 #5
0
class LayerCorrectnessTest(test_combinations.TestCase):
    def setUp(self):
        super(LayerCorrectnessTest, self).setUp()
        # Set two virtual CPUs to test MirroredStrategy with multiple devices
        cpus = tf.config.list_physical_devices('CPU')
        tf.config.set_logical_device_configuration(cpus[0], [
            tf.config.LogicalDeviceConfiguration(),
            tf.config.LogicalDeviceConfiguration(),
        ])

    def _create_model_from_layer(self, layer, input_shapes):
        inputs = [layers.Input(batch_input_shape=s) for s in input_shapes]
        if len(inputs) == 1:
            inputs = inputs[0]
        y = layer(inputs)
        model = models.Model(inputs, y)
        model.compile('sgd', 'mse')
        return model

    @parameterized.named_parameters(
        ('LeakyReLU', activation.LeakyReLU, (2, 2)),
        ('PReLU', activation.PReLU, (2, 2)), ('ELU', activation.ELU, (2, 2)),
        ('ThresholdedReLU', activation.ThresholdedReLU,
         (2, 2)), ('Softmax', activation.Softmax,
                   (2, 2)), ('ReLU', activation.ReLU, (2, 2)),
        ('Conv1D', lambda: convolutional.Conv1D(2, 2), (2, 2, 1)),
        ('Conv2D', lambda: convolutional.Conv2D(2, 2), (2, 2, 2, 1)),
        ('Conv3D', lambda: convolutional.Conv3D(2, 2), (2, 2, 2, 2, 1)),
        ('Conv2DTranspose', lambda: convolutional.Conv2DTranspose(2, 2),
         (2, 2, 2, 2)),
        ('SeparableConv2D', lambda: convolutional.SeparableConv2D(2, 2),
         (2, 2, 2, 1)),
        ('DepthwiseConv2D', lambda: convolutional.DepthwiseConv2D(2, 2),
         (2, 2, 2, 1)), ('UpSampling2D', reshaping.UpSampling2D, (2, 2, 2, 1)),
        ('ZeroPadding2D', reshaping.ZeroPadding2D,
         (2, 2, 2, 1)), ('Cropping2D', reshaping.Cropping2D, (2, 3, 3, 1)),
        ('ConvLSTM2D', lambda: conv_lstm2d.ConvLSTM2D(4, kernel_size=(2, 2)),
         (4, 4, 4, 4, 4)), ('Dense', lambda: core.Dense(2), (2, 2)),
        ('Dropout', lambda: regularization.Dropout(0.5), (2, 2)),
        ('SpatialDropout2D', lambda: regularization.SpatialDropout2D(0.5),
         (2, 2, 2, 2)), ('Activation', lambda: core.Activation('sigmoid'),
                         (2, 2)), ('Reshape', lambda: reshaping.Reshape(
                             (1, 4, 1)), (2, 2, 2)),
        ('Permute', lambda: reshaping.Permute(
            (2, 1)), (2, 2, 2)), ('Attention', attention.Attention, [
                (2, 2, 3), (2, 3, 3), (2, 3, 3)
            ]), ('AdditiveAttention', attention.AdditiveAttention, [
                (2, 2, 3), (2, 3, 3), (2, 3, 3)
            ]), ('Embedding', lambda: core.Embedding(4, 4),
                 (2, 4), 2e-3, 2e-3, np.random.randint(4, size=(2, 4))),
        ('LocallyConnected1D',
         lambda: locally_connected.LocallyConnected1D(2, 2),
         (2, 2, 1)), ('LocallyConnected2D',
                      lambda: locally_connected.LocallyConnected2D(2, 2),
                      (2, 2, 2, 1)), ('Add', merging.Add, [(2, 2), (2, 2)]),
        ('Subtract', merging.Subtract, [(2, 2), (2, 2)]),
        ('Multiply', merging.Multiply, [
            (2, 2), (2, 2)
        ]), ('Average', merging.Average, [(2, 2), (2, 2)]),
        ('Maximum', merging.Maximum, [
            (2, 2), (2, 2)
        ]), ('Minimum', merging.Minimum, [
            (2, 2), (2, 2)
        ]), ('Concatenate', merging.Concatenate, [
            (2, 2), (2, 2)
        ]), ('Dot', lambda: merging.Dot(1), [(2, 2), (2, 2)]),
        ('GaussianNoise', lambda: regularization.GaussianNoise(0.5), (2, 2)),
        ('GaussianDropout', lambda: regularization.GaussianDropout(0.5),
         (2, 2)), ('AlphaDropout', lambda: regularization.AlphaDropout(0.5),
                   (2, 2)),
        ('BatchNormalization', batch_normalization.BatchNormalization,
         (2, 2), 1e-2, 1e-2),
        ('LayerNormalization', layer_normalization.LayerNormalization,
         (2, 2)), ('LayerNormalizationUnfused',
                   lambda: layer_normalization.LayerNormalization(axis=1),
                   (2, 2, 2)), ('MaxPooling2D', pooling.MaxPooling2D,
                                (2, 2, 2, 1)),
        ('AveragePooling2D', pooling.AveragePooling2D,
         (2, 2, 2, 1)), ('GlobalMaxPooling2D', pooling.GlobalMaxPooling2D,
                         (2, 2, 2, 1)),
        ('GlobalAveragePooling2D', pooling.GlobalAveragePooling2D,
         (2, 2, 2, 1)), ('SimpleRNN', lambda: simple_rnn.SimpleRNN(units=4),
                         (4, 4, 4), 1e-2, 1e-2),
        ('SimpleRNN_stateful',
         lambda: simple_rnn.SimpleRNN(units=4, stateful=True), (4, 4, 4), 1e-2,
         1e-2), ('GRU', lambda: gru_v1.GRU(units=4),
                 (4, 4, 4)), ('LSTM', lambda: lstm_v1.LSTM(units=4),
                              (4, 4, 4)), ('GRUV2', lambda: gru.GRU(units=4),
                                           (4, 4, 4)),
        ('GRUV2_stateful', lambda: gru.GRU(units=4, stateful=True),
         (4, 4, 4)), ('LSTMV2', lambda: lstm.LSTM(units=4), (4, 4, 4)),
        ('LSTMV2_stateful', lambda: lstm.LSTM(units=4, stateful=True),
         (4, 4, 4)), ('TimeDistributed',
                      lambda: time_distributed.TimeDistributed(core.Dense(2)),
                      (2, 2, 2)),
        ('Bidirectional',
         lambda: bidirectional.Bidirectional(simple_rnn.SimpleRNN(units=4)),
         (2, 2, 2)),
        ('AttentionLayerCausal', lambda: attention.Attention(causal=True), [
            (2, 2, 3), (2, 3, 3), (2, 3, 3)
        ]), ('AdditiveAttentionLayerCausal',
             lambda: attention.AdditiveAttention(causal=True), [
                 (2, 3, 4), (2, 3, 4), (2, 3, 4)
             ]), ('NormalizationAdapt', _create_normalization_layer_with_adapt,
                  (4, 4)),
        ('NormalizationNoAdapt', _create_normalization_layer_without_adapt,
         (4, 4)), ('Resizing', lambda: image_preprocessing.Resizing(3, 3),
                   (2, 5, 5, 1)),
        ('Rescaling', lambda: image_preprocessing.Rescaling(2., 1.),
         (6, 6)), ('CenterCrop', lambda: image_preprocessing.CenterCrop(3, 3),
                   (2, 5, 5, 1)))
    def test_layer(self,
                   f32_layer_fn,
                   input_shape,
                   rtol=2e-3,
                   atol=2e-3,
                   input_data=None):
        """Tests a layer by comparing the float32 and mixed precision weights.

    A float32 layer, a mixed precision layer, and a distributed mixed precision
    layer are run. The three layers are identical other than their dtypes and
    distribution strategies. The outputs after predict() and weights after fit()
    are asserted to be close.

    Args:
      f32_layer_fn: A function returning a float32 layer. The other two layers
        will automatically be created from this
      input_shape: The shape of the input to the layer, including the batch
        dimension. Or a list of shapes if the layer takes multiple inputs.
      rtol: The relative tolerance to be asserted.
      atol: The absolute tolerance to be asserted.
      input_data: A Numpy array with the data of the input. If None, input data
        will be randomly generated
    """

        if f32_layer_fn == reshaping.ZeroPadding2D and tf.test.is_built_with_rocm(
        ):
            return
        if isinstance(input_shape[0], int):
            input_shapes = [input_shape]
        else:
            input_shapes = input_shape
        strategy = create_mirrored_strategy()
        f32_layer = f32_layer_fn()

        # Create the layers
        assert f32_layer.dtype == f32_layer._compute_dtype == 'float32'
        config = f32_layer.get_config()
        config['dtype'] = policy.Policy('mixed_float16')
        mp_layer = f32_layer.__class__.from_config(config)
        distributed_mp_layer = f32_layer.__class__.from_config(config)

        # Compute per_replica_input_shapes for the distributed model
        global_batch_size = input_shapes[0][0]
        assert global_batch_size % strategy.num_replicas_in_sync == 0, (
            'The number of replicas, %d, does not divide the global batch size of '
            '%d' % (strategy.num_replicas_in_sync, global_batch_size))
        per_replica_batch_size = (global_batch_size //
                                  strategy.num_replicas_in_sync)
        per_replica_input_shapes = [(per_replica_batch_size, ) + s[1:]
                                    for s in input_shapes]

        # Create the models
        f32_model = self._create_model_from_layer(f32_layer, input_shapes)
        mp_model = self._create_model_from_layer(mp_layer, input_shapes)
        with strategy.scope():
            distributed_mp_model = self._create_model_from_layer(
                distributed_mp_layer, per_replica_input_shapes)

        # Set all model weights to the same values
        f32_weights = f32_model.get_weights()
        mp_model.set_weights(f32_weights)
        distributed_mp_model.set_weights(f32_weights)

        # Generate input data
        if input_data is None:
            # Cast inputs to float16 to avoid measuring error from having f16 layers
            # cast to float16.
            input_data = [
                np.random.normal(size=s).astype('float16')
                for s in input_shapes
            ]
            if len(input_data) == 1:
                input_data = input_data[0]

        # Assert all models have close outputs.
        f32_output = f32_model.predict(input_data)
        mp_output = mp_model.predict(input_data)
        self.assertAllClose(mp_output, f32_output, rtol=rtol, atol=atol)
        self.assertAllClose(distributed_mp_model.predict(input_data),
                            f32_output,
                            rtol=rtol,
                            atol=atol)

        # Run fit() on models
        output = np.random.normal(
            size=f32_model.outputs[0].shape).astype('float16')
        for model in f32_model, mp_model, distributed_mp_model:
            model.fit(input_data, output, batch_size=global_batch_size)

        # Assert all models have close weights
        f32_weights = f32_model.get_weights()
        self.assertAllClose(mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
        self.assertAllClose(distributed_mp_model.get_weights(),
                            f32_weights,
                            rtol=rtol,
                            atol=atol)
예제 #6
0
def OOKCNN(trainX,
           trainY,
           nb_epoch,
           earlystop=None,
           compiletimes=0,
           compilemodels=None,
           batch_size=2048,
           class_weights={
               0: 1,
               1: 1
           },
           predict=False):
    #Set Oneofkey Network Size and Data
    input_row = trainX.shape[2]
    input_col = trainX.shape[3]
    trainX_t = trainX
    # Early_stop
    if (earlystop is not None):
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=earlystop)
    # set to a very big value since earlystop used
    nb_epoch = nb_epoch
    # TrainX_t For Shape
    trainX_t.shape = (trainX_t.shape[0], input_row, input_col)
    input = Input(shape=(input_row, input_col))

    # params = {'dropout1': 0.09055921027754717, 'dropout2': 0.6391239298866936, 'dropout3': 0.4494981811340072,
    #  'dropout4': 0.13858850326177857, 'dropout5': 0.37168935754516325, 'layer1_node': 21.380001953812567,
    #  'layer1_size': 1, 'layer2_node': 42.3937544103545, 'layer2_size': 16, 'layer3_node': 184.87943202539697,
    #  'layer4_node': 61.85302597240724, 'layer5_node': 415.9952475249118, 'nb_epoch': 178, 'windowSize': 16}

    # layer1_node = int(params["layer1_node"])
    # layer2_node = int(params["layer2_node"])
    # layer3_node = int(params["layer3_node"])
    # layer4_node = int(params["layer4_node"])
    # layer5_node = int(params["layer5_node"])
    # layer1_size = params["layer1_size"]
    # layer2_size = params["layer2_size"]
    # dropout1 = params["dropout1"]
    # dropout2 = params["dropout2"]
    # dropout3 = params["dropout3"]
    # dropout4 = params["dropout4"]
    # dropout5 = params["dropout5"]

    if compiletimes == 0:
        # Total Set Classes
        nb_classes = 2
        # Total Set Batch_size
        batch_size = 8192
        # Total Set Optimizer
        # optimizer = SGD(lr=0.0001, momentum=0.9, nesterov= True)
        optimization = 'Nadam'
        #begin of Oneofkey Network

        # x = conv.Conv1D(layer1_node, layer1_size, name="layer1", kernel_initializer="glorot_normal",
        #                 kernel_regularizer=l2(0), padding="same")(input)
        # x = Dropout(dropout1)(input)
        # x = Activation('softsign')(x)
        #
        # x = conv.Conv1D(layer2_node, layer2_size, name="layer2", kernel_initializer="glorot_normal",
        #                 kernel_regularizer=l2(0), padding="same")(x)
        # x = Dropout(dropout2)(x)
        # x = Activation('softsign')(x)
        #
        # output_x = core.Flatten()(x)
        # output = BatchNormalization()(output_x)
        # output = Dropout(dropout3)(output)
        #
        # # attention_probs = Dense(1155, activation='softmax', name='attention_probs')(output)
        # # attention_mul = Multiply()([output, attention_probs])
        #
        # output = Dense(layer3_node, kernel_initializer='glorot_normal', activation='relu', name='layer3')(output)
        # output = Dropout(dropout4)(output)
        # output = Dense(layer4_node, kernel_initializer='glorot_normal', activation="relu", name='layer4')(output)
        # output = Dropout(dropout5)(output)
        # output = Dense(layer5_node, kernel_initializer='glorot_normal', activation="relu", name='layer5')(output)
        # End of Oneofkey Network
        # out = Dense(nb_classes, kernel_initializer='glorot_normal', activation='softmax', kernel_regularizer=l2(0.001),
        #             name='7')(output)
        #
        # cnn = Model(input, out)
        # cnn.compile(loss=keras.losses.binary_crossentropy, optimizer=optimization, metrics=[keras.metrics.binary_accuracy])
        x = conv.Conv1D(51,
                        2,
                        name="0",
                        kernel_initializer="glorot_normal",
                        kernel_regularizer=l2(0),
                        padding="same")(input)
        x = Dropout(0.3)(x)
        x = Activation('softsign')(x)

        x = conv.Conv1D(21,
                        3,
                        name="1",
                        kernel_initializer="glorot_normal",
                        kernel_regularizer=l2(0),
                        padding="same")(x)
        x = Dropout(0.4)(x)
        x = Activation('softsign')(x)

        # x = conv.Conv1D(21, 5, name="2", kernel_initializer="glorot_normal", kernel_regularizer=l2(0), padding="same")(x)
        # x = Dropout(0.4)(x)
        # x = Activation('softsign')(x)
        #
        # x = conv.Conv1D(101, 7, name="3", kernel_initializer="glorot_normal", kernel_regularizer=l2(0), padding="same")(x)
        # x = Activation('softsign')(x)
        # # x_reshape = core.Reshape((x._keras_shape[2], x._keras_shape[1]))(x)
        # x = Dropout(0.4)(x)

        output_x = core.Flatten()(x)
        output = BatchNormalization()(output_x)
        output = Dropout(0.3)(output)

        # attention_probs = Dense(1155, activation='softmax', name='attention_probs')(output)
        # attention_mul = Multiply()([output, attention_probs])

        output = Dense(128,
                       kernel_initializer='glorot_normal',
                       activation='relu',
                       name='4')(output)
        output = Dropout(0.2)(output)
        output = Dense(64,
                       kernel_initializer='glorot_normal',
                       activation="relu",
                       name='5')(output)
        output = Dropout(0.2)(output)
        output = Dense(415,
                       kernel_initializer='glorot_normal',
                       activation="relu",
                       name='6')(output)
        # End of Oneofkey Network
        out = Dense(nb_classes,
                    kernel_initializer='glorot_normal',
                    activation='softmax',
                    kernel_regularizer=l2(0.001),
                    name='7')(output)

        cnn = Model(input, out)
        cnn.compile(loss=keras.losses.binary_crossentropy,
                    optimizer=optimization,
                    metrics=[keras.metrics.binary_accuracy])
    else:
        cnn = compilemodels

    oneofkclass_weights = class_weights

    if (predict is False):
        if (trainY is not None):
            if (earlystop is None):
                fitHistory = cnn.fit(trainX_t,
                                     trainY,
                                     batch_size=batch_size,
                                     nb_epoch=nb_epoch)
            else:
                # checkpointer = ModelCheckpoint(filepath='oneofk.h5', verbose=1, save_best_only=True)
                weight_checkpointer = ModelCheckpoint(
                    filepath='oneofkweight9.h5',
                    verbose=0,
                    save_best_only=True,
                    monitor='val_binary_accuracy',
                    mode='max',
                    save_weights_only=True)
                fitHistory = cnn.fit(
                    trainX_t,
                    trainY,
                    batch_size=batch_size,
                    epochs=nb_epoch,
                    shuffle=True,
                    validation_split=0.2,
                    callbacks=[early_stopping, weight_checkpointer],
                    class_weight=oneofkclass_weights,
                    verbose=0)
        else:
            fitHistory = cnn.fit(trainX_t,
                                 trainY,
                                 batch_size=batch_size,
                                 nb_epoch=nb_epoch)
    return cnn
def MCNN_best(trainX1, trainX2, trainY1, valX1, valX2, valY1, input_1, input_2,
              i, class_weights, t):
    if (t == 0):
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        onehot_secstr = conv.Conv1D(
            5,
            10,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.0010949235793883667),
            padding='valid',
            name='0_secstr')(input_1)
        onehot_secstr = Dropout(0.745150134528914)(onehot_secstr)
        onehot_secstr = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr)
        onehot_secstr = core.Flatten()(onehot_secstr)
        onehot_secstr2 = conv.Conv1D(
            9,
            4,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.03405758144816304),
            padding='valid',
            name='1_secstr')(input_1)
        onehot_secstr2 = Dropout(0.36965944352568686)(onehot_secstr2)
        onehot_secstr2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr2)
        onehot_secstr2 = core.Flatten()(onehot_secstr2)
        output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2],
                                        axis=-1)
        onehot_x = conv.Conv1D(5,
                               10,
                               kernel_initializer='glorot_normal',
                               kernel_regularizer=l2(0.03217477728270726),
                               padding='valid',
                               name='0')(input_2)
        onehot_x = Dropout(0.6653716368558287)(onehot_x)
        onehot_x = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x)
        onehot_x = core.Flatten()(onehot_x)
        onehot_x2 = conv.Conv1D(9,
                                4,
                                kernel_initializer='glorot_normal',
                                kernel_regularizer=l2(0.01608962762003551),
                                padding='valid',
                                name='1')(input_2)
        onehot_x2 = Dropout(0.038045356303735206)(onehot_x2)
        onehot_x2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x2)
        onehot_x2 = core.Flatten()(onehot_x2)
        output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
        final_output = concatenate([output_onehot_sec, output_onehot_seq])
        dense_out = Dense(512,
                          kernel_initializer='glorot_normal',
                          activation='softplus',
                          name='dense_concat')(final_output)
        out = Dense(2,
                    activation="softmax",
                    kernel_initializer='glorot_normal',
                    name='6')(dense_out)
        ########## Set Net ##########
        cnn = Model(inputs=[input_1, input_2], outputs=out)
        cnn.load_weights('weightsfile_hyperasbest.h5')
        cnn.summary()
        adam = Adam(lr=0.08582474007227135)
        nadam = Nadam(lr=0.0014045290291504406)
        rmsprop = RMSprop(lr=0.037289952982092284)
        sgd = SGD(lr=0.01373965388919854)
        optim = sgd
        ##        choiceval = {{choice(['adam', 'sgd', 'rmsprop','nadam'])}}
        ##        if choiceval == 'adam':
        ##            optim = adam
        ##        elif choiceval == 'rmsprop':
        ##            optim = rmsprop
        ##        elif choiceval=='nadam':
        ##            optim = nadam
        ##        else:
        ##            optim = sgd
        cnn.compile(loss='binary_crossentropy',
                    optimizer=optim,
                    metrics=[keras.metrics.binary_accuracy])  # Nadam
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_best.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             callbacks=[checkpointer, early_stopping],
                             class_weight=class_weights)
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_best_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'w'))
        return cnn, fitHistory
    else:
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        cnn = models.load_model('%d-secstr_seq_denseconcat_60perc_best.h5' % i)
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_best.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             class_weight=class_weights,
                             callbacks=[checkpointer, early_stopping])
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_best_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'a'))
        return cnn, fitHistory
def MCNN_26(trainX1, trainX2, trainY1, valX1, valX2, valY1, input_1, input_2,
            i, class_weights, t):
    if (t == 0):
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        onehot_secstr = conv.Conv1D(
            5,
            10,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.011206678796947282),
            padding='valid',
            name='0_secstr')(input_1)
        onehot_secstr = Dropout(0.9942741825824339)(onehot_secstr)
        onehot_secstr = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr)
        onehot_secstr = core.Flatten()(onehot_secstr)
        onehot_secstr2 = conv.Conv1D(
            9,
            4,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.04663753230167181),
            padding='valid',
            name='1_secstr')(input_1)
        onehot_secstr2 = Dropout(0.4084429796653032)(onehot_secstr2)
        onehot_secstr2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr2)
        onehot_secstr2 = core.Flatten()(onehot_secstr2)
        output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2],
                                        axis=-1)
        onehot_x = conv.Conv1D(5,
                               10,
                               kernel_initializer='glorot_normal',
                               kernel_regularizer=l2(0.032491576988669696),
                               padding='valid',
                               name='0')(input_2)
        onehot_x = Dropout(0.5471399358933519)(onehot_x)
        onehot_x = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x)
        onehot_x = core.Flatten()(onehot_x)
        onehot_x2 = conv.Conv1D(9,
                                4,
                                kernel_initializer='glorot_normal',
                                kernel_regularizer=l2(0.021775346680719416),
                                padding='valid',
                                name='1')(input_2)
        onehot_x2 = Dropout(0.10926522237224338)(onehot_x2)
        onehot_x2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x2)
        onehot_x2 = core.Flatten()(onehot_x2)
        output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
        final_output = concatenate([output_onehot_sec, output_onehot_seq])
        dense_out = Dense(30,
                          kernel_initializer='glorot_normal',
                          activation='softplus',
                          name='dense_concat')(final_output)
        out = Dense(2,
                    activation="softmax",
                    kernel_initializer='glorot_normal',
                    name='6')(dense_out)
        ########## Set Net ##########
        cnn = Model(inputs=[input_1, input_2], outputs=out)
        cnn.load_weights('weightsfile_hyperas26.h5')
        cnn.summary()
        adam = Adam(lr=0.06971582946481189)
        nadam = Nadam(lr=0.010482932560304255)
        rmsprop = RMSprop(lr=0.031598749327261345)
        sgd = SGD(lr=0.008615890670714792)
        optim = sgd
        ##        choiceval = {{choice(['adam', 'sgd', 'rmsprop','nadam'])}}
        ##        if choiceval == 'adam':
        ##            optim = adam
        ##        elif choiceval == 'rmsprop':
        ##            optim = rmsprop
        ##        elif choiceval=='nadam':
        ##            optim = nadam
        ##        else:
        ##            optim = sgd
        cnn.compile(loss='binary_crossentropy',
                    optimizer=optim,
                    metrics=[keras.metrics.binary_accuracy])  # Nadam
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_trial26.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             callbacks=[checkpointer, early_stopping],
                             class_weight=class_weights)
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_trial26_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'w'))
        return cnn, fitHistory
    else:
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        cnn = models.load_model('%d-secstr_seq_denseconcat_60perc_trial26.h5' %
                                i)
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_trial26.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             class_weight=class_weights,
                             callbacks=[checkpointer, early_stopping])
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_trial26_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'a'))
        return cnn, fitHistory
예제 #9
0
def MCNN(trainX1,trainX2,trainY1,valX1,valX2,valY1,testX1,testX2,testY):
    import pickle
    from sklearn.metrics import precision_score, recall_score, f1_score
    import pandas as pd
    #import pirna_kmer as pk
    from pandas import DataFrame
    from sklearn.model_selection import train_test_split
    import numpy as np
    #import phy_net as pn
    from keras.layers import Input
    import keras.utils.np_utils as kutils
    #import threading
    import time
    from keras.utils import np_utils
    from keras.utils.np_utils import to_categorical
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, BatchNormalization, Activation, Flatten
    from keras.optimizers import Adam
    from keras.wrappers.scikit_learn import KerasClassifier
    from keras.models import load_model
    from sklearn.model_selection import cross_val_score
    from sklearn.preprocessing import LabelEncoder
    from sklearn.model_selection import StratifiedKFold
    from sklearn.preprocessing import StandardScaler
    from sklearn.pipeline import Pipeline
    from sklearn.utils.class_weight import compute_class_weight
    from keras.layers import Convolution2D as Conv2D
    from keras.layers import MaxPooling2D
    from keras.callbacks import EarlyStopping
    import json
    #from sklearn.metrics import matthews_corrcoef
    from keras.models import Model

    import tensorflow as tf
##    from tensorflow.keras.callbacks import TensorBoard
    import os
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score, matthews_corrcoef
    from sklearn.metrics import precision_score, recall_score, f1_score
    #from sklearn.metrics import accuracy_score, recall_score
    remark = ''  # The mark written in the result file.
    import time
    import numpy as np
    import matplotlib
    import pickle
    matplotlib.use('Agg')
    import keras.layers.core as core
    import keras.layers.convolutional as conv
    import keras.models as models
    from keras.models import Model
    from keras.layers.merge import concatenate
    from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, LearningRateScheduler, History
    from keras.layers import Dense, Dropout, Activation, Flatten, Input
    from keras.layers.normalization import BatchNormalization
    from keras.regularizers import l1, l2, l1_l2
    import keras.metrics
    import matplotlib.pyplot as plt
    from keras.optimizers import Nadam,Adam,RMSprop,SGD
    from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score, matthews_corrcoef
    import os
    from sklearn import svm
    from sklearn.manifold import TSNE
    from matplotlib import offsetbox
    from sklearn.metrics import accuracy_score, recall_score
    import random
##    from tensorflow.keras.callbacks import TensorBoard
    row1,col1 = trainX1[0].shape
    input_1 = Input(shape=(row1,col1))
    row2,col2 = trainX2[0].shape
    input_2 = Input(shape=(row2,col2))
    NAME = "combined_secstr_seq_CNN_model_emboss-{}".format(int(time.time()))
    tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
    onehot_secstr = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='0_secstr')(input_1)
    onehot_secstr = Dropout({{uniform(0, 1)}})(onehot_secstr)
    onehot_secstr = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr)
    onehot_secstr = core.Flatten()(onehot_secstr)
    onehot_secstr2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='1_secstr')(input_1)
    onehot_secstr2 = Dropout({{uniform(0, 1)}})(onehot_secstr2)
    onehot_secstr2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr2)
    onehot_secstr2 = core.Flatten()(onehot_secstr2)
    output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2], axis=-1)
    onehot_x = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='0')(input_2)
    onehot_x = Dropout({{uniform(0, 1)}})(onehot_x)
    onehot_x = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x)
    onehot_x = core.Flatten()(onehot_x)
    onehot_x2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='1')(input_2)
    onehot_x2 = Dropout({{uniform(0, 1)}})(onehot_x2)
    onehot_x2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x2)
    onehot_x2 = core.Flatten()(onehot_x2)
    output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
    final_output = concatenate([output_onehot_sec, output_onehot_seq])
    dense_out = Dense({{choice([20,30,50,60,64,70,80,90,100, 128, 256, 512, 1024])}}, kernel_initializer='glorot_normal', activation='softplus', name='dense_concat')(final_output)
    out = Dense(2, activation="softmax", kernel_initializer='glorot_normal', name='6')(dense_out)
    ########## Set Net ##########
    cnn = Model(inputs=[input_1,input_2], outputs=out)
    cnn.summary()
    adam = Adam(lr={{uniform(0.0001, 0.1)}})
    nadam = Nadam(lr={{uniform(0.0001, 0.1)}})
    rmsprop = RMSprop(lr={{uniform(0.0001, 0.1)}})
    sgd = SGD(lr={{uniform(0.0001, 0.1)}})
    choiceval = {{choice(['adam', 'sgd', 'rmsprop','nadam'])}}
    if choiceval == 'adam':
        optim = adam
    elif choiceval == 'rmsprop':
        optim = rmsprop
    elif choiceval=='nadam':
        optim = nadam
    else:
        optim = sgd
    globalvars.globalVar += 1
    #early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=20, verbose=1, mode='auto')
    cnn.compile(loss='binary_crossentropy', optimizer=optim, metrics=[keras.metrics.binary_accuracy])  # Nadam
    early_stopping = EarlyStopping(monitor='val_loss', patience=20)
    checkpointer = ModelCheckpoint(filepath='%d-secstr_seq_denseconcat.h5' % globalvars.globalVar, verbose=1,save_best_only=True, monitor='val_loss', mode='min')
    fitHistory = cnn.fit([trainX1,trainX2], trainY1, batch_size={{choice([32,64,128,256,512])}}, nb_epoch=500,validation_data=([valX1,valX2], valY1),callbacks=[checkpointer,early_stopping,tensorboard],class_weight=cwt.class_weights)
    myjson_file = "myhist_" +"_dict" + "_hyperas_model_trial_" +str(globalvars.globalVar)
    json.dump(fitHistory.history, open(myjson_file, 'w'))
    score, acc = cnn.evaluate([valX1, valX2], valY1, batch_size=32)
    pred_proba = cnn.predict([valX1,valX2], batch_size=32)
    pred_score = pred_proba[:, 1]
    true_class = valY1[:, 1]
    f1_sc = f1_score(true_class,pred_score)
    print('F1 score:', f1_sc)
    print('Test score:', score)
    print('accuracy:', acc)
    return {'loss': -f1_sc, 'status': STATUS_OK, 'model': cnn}
예제 #10
0
def MultiCNN(train_ook_X,
             trainAAIndexX,
             train_ook_Y,
             nb_epoch,
             earlystop=None,
             compiletimes=0,
             batch_size=2048,
             predict=False,
             compileModel=None,
             class_weight={
                 0: 0.5,
                 1: 0.5
             },
             verbose=1,
             model_id=0):
    # Set Oneofkey Data
    ook_row = train_ook_X.shape[2]
    ook_col = train_ook_X.shape[3]
    ook_x_t = train_ook_X
    ook_x_t.shape = (ook_x_t.shape[0], ook_row, ook_col)
    ook_input = Input(shape=(ook_row, ook_col))
    # AAindex
    aaindex_x_t = trainAAIndexX
    aaindex_row = trainAAIndexX.shape[2]
    aaindex_col = trainAAIndexX.shape[3]
    aaindex_x_t.shape = (trainAAIndexX.shape[0], aaindex_row, aaindex_col)
    aaindex_input = Input(shape=(aaindex_row, aaindex_col))

    if (earlystop is not None):
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=earlystop)

    nb_epoch = nb_epoch
    # TrainX_t For Shape
    if compiletimes == 0:
        # Total Set Classes
        nb_classes = 2
        # Total Set Batch_size
        batch_size = 8192
        # Total Set Optimizer
        # optimizer = SGD(lr=0.0001, momentum=0.9, nesterov= True)
        optimization = 'Nadam'
        # begin of Oneofkey Network
        ook_x = conv.Conv1D(51,
                            2,
                            name="0",
                            kernel_initializer="glorot_normal",
                            kernel_regularizer=l2(0),
                            padding="same")(ook_input)
        ook_x = Dropout(0.3)(ook_x)
        ook_x = Activation('softsign')(ook_x)

        ook_x = conv.Conv1D(21,
                            3,
                            name="1",
                            kernel_initializer="glorot_normal",
                            kernel_regularizer=l2(0),
                            padding="same")(ook_x)
        ook_x = Dropout(0.4)(ook_x)
        ook_x = Activation('softsign')(ook_x)

        output_ook_x = core.Flatten()(ook_x)
        output_ook_x = BatchNormalization()(output_ook_x)
        output_ook_x = Dropout(0.3)(output_ook_x)

        output_ook_x = Dense(128,
                             kernel_initializer='glorot_normal',
                             activation='relu',
                             name='2')(output_ook_x)
        output_ook_x = Dropout(0.2)(output_ook_x)
        output_ook_x = Dense(64,
                             kernel_initializer='glorot_normal',
                             activation="relu",
                             name='3')(output_ook_x)
        output_ook_x = Dropout(0.2)(output_ook_x)
        # below modified
        output_ook_x = Dense(415,
                             kernel_initializer='glorot_normal',
                             activation="relu",
                             name='4')(output_ook_x)
        # output_ook_x = Dense(nb_classes, kernel_initializer='glorot_normal', activation='softmax', kernel_regularizer=l2(0.001),
        #             name='7')(output_ook_x)
        # End of Oneofkey Network

        # start with AAindex Dnn
        aaindex_x = core.Flatten()(aaindex_input)
        attention_probs = Dense(aaindex_row * aaindex_col,
                                activation='softmax',
                                name='5')(aaindex_x)
        aaindex_x = Multiply()([aaindex_x, attention_probs])
        aaindex_x = BatchNormalization()(aaindex_x)
        aaindex_x = Dense(256,
                          kernel_initializer='he_uniform',
                          activation='relu',
                          name='6')(aaindex_x)
        aaindex_x = Dropout(0.6)(aaindex_x)
        aaindex_x = Dense(128,
                          kernel_initializer='he_uniform',
                          activation='softplus',
                          name='7')(aaindex_x)
        # aaindex_x = BatchNormalization()(aaindex_x)
        aaindex_x = Dropout(0.55)(aaindex_x)

        aaindex_x = GaussianNoise(10)(aaindex_x)

        output_aaindex_x = Dense(64,
                                 kernel_initializer='glorot_normal',
                                 activation='relu',
                                 name='8')(aaindex_x)

        # output_aaindex_x = Dense(nb_classes, kernel_initializer='glorot_normal', activation='softmax',
        #                          kernel_regularizer=l2(0.001), name='19')(output_aaindex_x)

        # output = Maximum()([output_ook_x, output_aaindex_x])
        output = Concatenate()([output_ook_x, output_aaindex_x])
        output = BatchNormalization()(output)

        # output = Dense(64, activation="relu", kernel_initializer="he_normal", kernel_regularizer=l2(0.001), name="21")(output)
        # output = BatchNormalization()(output)
        output = Dense(128,
                       activation="relu",
                       kernel_initializer="he_normal",
                       kernel_regularizer=l2(0.001),
                       name="9")(output)
        output = Dropout(0.6)(output)
        output = Dense(64,
                       activation="relu",
                       kernel_initializer="he_normal",
                       kernel_regularizer=l2(0.001),
                       name="10")(output)
        output = Dropout(0.5)(output)
        output = Dense(16,
                       activation="relu",
                       kernel_initializer="he_normal",
                       kernel_regularizer=l2(0.001),
                       name="11")(output)
        out = Dense(nb_classes,
                    kernel_initializer='glorot_normal',
                    activation='softmax',
                    kernel_regularizer=l2(0.001),
                    name='12')(output)

        multinn = Model([ook_input, aaindex_input], out)
        multinn.compile(loss=keras.losses.binary_crossentropy,
                        optimizer=optimization,
                        metrics=[keras.metrics.binary_accuracy])

    else:
        multinn = compileModel

    oneofkclass_weights = class_weight

    if (earlystop is None):
        fitHistory = multinn.fit([ook_x_t, aaindex_x_t],
                                 train_ook_Y,
                                 batch_size=batch_size,
                                 nb_epoch=nb_epoch)
    else:
        weight_checkpointer = ModelCheckpoint(filepath='temp/temp.h5',
                                              verbose=verbose,
                                              save_best_only=True,
                                              monitor='val_binary_accuracy',
                                              mode='auto',
                                              save_weights_only=True)
        fitHistory = multinn.fit(
            [ook_x_t, aaindex_x_t],
            train_ook_Y,
            batch_size=batch_size,
            epochs=nb_epoch,
            shuffle=True,
            validation_split=0.2,
            callbacks=[early_stopping, weight_checkpointer],
            class_weight=oneofkclass_weights,
            verbose=verbose)
    return multinn
예제 #11
0
    def run_model(self, data, targets, batch_size, epochs):
        # double the sample - disabled
        #data = double_inverse_samples(data)
        #targets = double_inverse_samples(targets)

        test_size_1 = 0.25
        test_size_2 = 0.15
        drop_out = 0.5

        # split the data up into multiple sets: training, testing validation
        train_data, data_set_2, train_target, target_set_2 = train_test_split(
            data, targets, test_size=test_size_1, random_state=42)
        test_data, val_data, test_target, val_target = train_test_split(
            data_set_2, target_set_2, test_size=test_size_2, random_state=24)
        # pre-processing
        X_train = train_data.reshape(train_data.shape[0], train_data.shape[1],
                                     1)
        X_test = test_data.reshape(test_data.shape[0], test_data.shape[1], 1)
        y_train = np_utils.to_categorical(train_target, 2)
        y_test = np_utils.to_categorical(test_target, 2)
        val_data = val_data.reshape(val_data.shape[0], -1, 1)
        val_target = np_utils.to_categorical(val_target, 2)

        # create a linear model
        model = Sequential()
        # add a convolutional layer
        model.add(
            convolutional.Conv1D(filters=16,
                                 kernel_size=1,
                                 padding='same',
                                 strides=1,
                                 activation='relu',
                                 input_shape=X_train.shape[1:]))

        # add a max pooling layer
        model.add(pooling.MaxPooling1D(
            pool_size=1,
            padding='same',
        ))
        # add a convolutional layer
        model.add(
            convolutional.Conv1D(
                filters=32,
                kernel_size=2,
                padding='same',
                strides=1,
                activation='relu',
            ))
        # add a max pooling layer
        model.add(pooling.MaxPooling1D(
            pool_size=1,
            padding='same',
        ))

        # flatten the activation maps into a 1d vector
        model.add(Flatten())
        # add a dense layer with 128 neurons
        model.add(Dense(128))
        # set activation layer
        model.add(Activation('relu'))
        # set drop out rate
        model.add(Dropout(drop_out))
        # add a dense layer with 2 neurons
        model.add(Dense(2))
        # set softmax function to make the categories
        model.add(Activation('softmax'))
        # define adam optimizer
        adam = Adam(lr=0.001,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=1e-08,
                    decay=0.0)
        # compile mode to use cross entropy
        model.compile(optimizer=adam,
                      loss='categorical_crossentropy',
                      metrics=['accuracy'])
        # fit the model and use cross validation
        model.fit(X_train,
                  y_train,
                  batch_size=batch_size,
                  epochs=epochs,
                  verbose=2,
                  validation_data=(val_data, val_target))
        # get the test loss and accuracy of our model
        test_loss, test_accuracy = model.evaluate(X_test, y_test, verbose=2)
        # get the validation loss and accuracy of our model
        val_loss, val_accuracy = model.evaluate(val_data,
                                                val_target,
                                                verbose=2)
        # collect metrics for output
        metrics = {
            "test_loss": test_loss,
            "test_accuracy": test_accuracy,
            "val_loss": val_loss,
            "val_accuracy": val_accuracy,
            "batch_size": batch_size,
            "epochs": epochs,
            "test_size_1": test_size_1,
            "test_size_2": test_size_2,
            "drop_out": drop_out
        }

        return metrics, model
예제 #12
0
    def build(self, nb_genes, nb_classes):
        # params
        dropout_f = 0.25  # prevent overfitting
        dropout_c = 0.25
        dropout_d = 0.25

        # feature extration
        nb_filters = 500
        kernel_size = 10
        L1CNN = 0
        actfun = 'relu'
        pool_size = 11  # window size for features

        # hidden layers
        units1 = 200  # number of nodes in hidden layer
        units2 = 150
        units3 = 120
        units4 = 100

        # compilation
        INIT_LR = 0.01  # initial learning rate
        lamb = 1.0

        # build the model
        input1 = Input(shape=(nb_genes, 1))
        feature1 = conv.Conv1D(nb_filters,
                               kernel_size,
                               padding='same',
                               kernel_initializer='he_normal',
                               kernel_regularizer=reg.l1(L1CNN))(input1)
        feature1 = Dropout(dropout_f)(feature1)
        feature1 = Activation(actfun)(feature1)
        feature1 = pool.MaxPooling1D(pool_size)(feature1)
        feature1 = Flatten()(feature1)

        input2 = Input(shape=(nb_genes, 1))
        feature2 = conv.Conv1D(nb_filters,
                               kernel_size,
                               padding='same',
                               kernel_initializer='he_normal',
                               kernel_regularizer=reg.l1(L1CNN))(input2)
        feature2 = Dropout(dropout_f)(feature2)
        feature2 = Activation(actfun)(feature2)
        feature2 = pool.MaxPooling1D(pool_size)(feature2)
        feature2 = Flatten()(feature2)

        hidden1_1 = Dense(units1, activation='relu')(feature1)
        target = Dense(units3, activation='relu')(hidden1_1)  # z1 -> z3
        hidden1_2 = Dense(units1, activation='relu')(feature2)
        context = Dense(units3, activation='relu')(hidden1_2)
        hidden2 = Dense(units2, activation='relu')(hidden1_1)  # z1 -> z2
        hidden4 = Dense(units4, activation='relu')(target)  # z3 -> z4
        concatenated = Concatenate(axis=1)([hidden2,
                                            hidden4])  # concatenate z2, z4
        concatenated = Dropout(dropout_c)(concatenated)

        similarity = Dot(axes=1,
                         normalize=True)([target, context
                                          ])  # setup for the validation model
        dot_product = Dot(axes=1)([target, context])
        dot_product = Reshape((1, ))(dot_product)
        dot_product = Dropout(dropout_d)(dot_product)

        output1 = Dense(nb_classes, activation='softmax',
                        name='output1')(concatenated)
        output2 = Dense(1, activation='sigmoid', name='output2')(dot_product)

        model = Model(inputs=[input1, input2],
                      outputs=[output1, output2],
                      name='graphSemiCNN')
        val_model = Model(inputs=[input1, input2],
                          outputs=similarity)  # similarity callback

        # compile the model
        losses = {
            'output1': 'categorical_crossentropy',
            'output2': 'binary_crossentropy',
        }
        lossWeights = {'output1': 1.0, 'output2': lamb}
        opt = SGD(lr=INIT_LR)
        model.compile(loss=losses,
                      loss_weights=lossWeights,
                      optimizer=opt,
                      metrics=['accuracy'])  # loss function: cross entropy

        return model, val_model
예제 #13
0
파일: cnn.py 프로젝트: Grotex/Saturn
        self.testing_x_path = "./data/PCA16_test.npy"
        self.batch_size = 16
        self.input_dim = (4, 4)
        self.max_epochs = 20
        self.lr = 1e-3
        self.cnn_model_weights_path = "./model/cnn_model_weights.h5"
        self.cnn_model_structure_path = "./model/cnn_model_structure.json"


cnn_config = Config()

inputs = Input(shape=cnn_config.input_dim)

x = convolutional.Conv1D(32,
                         kernel_size=3,
                         strides=1,
                         padding='same',
                         activation='relu')(inputs)
x = pooling.MaxPool1D(2)(x)
x = convolutional.Conv1D(64,
                         kernel_size=3,
                         strides=1,
                         padding='same',
                         activation='relu')(x)
x = pooling.MaxPool1D(2)(x)

# model.add(convolutional.Conv1D(32, kernel_size=3,
#                                strides=1, padding='same',
#                                activation='relu',
#                                input_shape=cnn_config.input_dim))
# model.add(pooling.MaxPool1D(2))