Ejemplo n.º 1
0
def MCNN(trainX1,trainX2,trainY1,valX1,valX2,valY1,input_1,input_2, i,class_weights):
    onehot_secstr = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.04), padding='valid', name='0_secstr')(input_1)
    onehot_secstr = Dropout(0.6)(onehot_secstr)
    onehot_secstr = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr)
    onehot_secstr = core.Flatten()(onehot_secstr)
    onehot_secstr2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.02), padding='valid', name='1_secstr')(input_1)
    onehot_secstr2 = Dropout(0.4)(onehot_secstr2)
    onehot_secstr2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr2)
    onehot_secstr2 = core.Flatten()(onehot_secstr2)
    output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2], axis=-1)
    onehot_x = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.04), padding='valid', name='0')(input_2)
    onehot_x = Dropout(0.6)(onehot_x)
    onehot_x = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x)
    onehot_x = core.Flatten()(onehot_x)
    onehot_x2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2(0.02), padding='valid', name='1')(input_2)
    onehot_x2 = Dropout(0.4)(onehot_x2)
    onehot_x2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x2)
    onehot_x2 = core.Flatten()(onehot_x2)
    output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
    final_output = concatenate([output_onehot_sec, output_onehot_seq])
    dense_out = Dense(100, kernel_initializer='glorot_normal', activation='softplus', name='dense_concat')(final_output)
    out = Dense(2, activation="softmax", kernel_initializer='glorot_normal', name='6')(dense_out)
    ########## Set Net ##########
    cnn = Model(inputs=[input_1,input_2], outputs=out)
    cnn.summary()
    nadam = Nadam(lr=0.001)
    #early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=20, verbose=1, mode='auto')
    cnn.compile(loss='binary_crossentropy', optimizer=nadam, metrics=[keras.metrics.binary_accuracy])  # Nadam
    early_stopping = EarlyStopping(monitor='val_loss', patience=20)
    checkpointer = ModelCheckpoint(filepath='%d-secstr_seq_denseconcat_60perc.h5' % i, verbose=1,save_best_only=True, monitor='val_loss', mode='min')
    fitHistory = cnn.fit([trainX1,trainX2], trainY1, batch_size=256, nb_epoch=500,validation_data=([valX1,valX2], valY1),class_weight=class_weights,callbacks=[checkpointer,early_stopping])
    history_dict = fitHistory.history
    myjson_file = "myhist_" +"dict_" + "secstr_seq_denseconcat_60perc_" +str(i)
    json.dump(history_dict, open(myjson_file, 'w'))
    return cnn, fitHistory
Ejemplo n.º 2
0
def create_simple_model(num_classes, layer1_filters=32, layer2_filters=64):
    epochs = 5
    n_conv = 2
    model = models.Sequential()

    # First layer
    model.add(conv.ZeroPadding2D(
        (1, 1),
        input_shape=(1, IMG_COLS, IMG_ROWS),
    ))
    model.add(
        conv.Convolution2D(layer1_filters, n_conv, n_conv, activation="relu"))
    model.add(conv.MaxPooling2D(strides=(2, 2)))

    # Second layer
    model.add(conv.ZeroPadding2D((1, 1)))
    model.add(
        conv.Convolution2D(layer2_filters, n_conv, n_conv, activation="relu"))
    model.add(conv.MaxPooling2D(strides=(2, 2)))

    model.add(core.Flatten())
    model.add(core.Dropout(0.2))
    model.add(core.Dense(128, activation="relu"))
    model.add(core.Dense(num_classes, activation="softmax"))

    model.summary()
    model.compile(loss="categorical_crossentropy",
                  optimizer="adadelta",
                  metrics=["accuracy"])

    return model, epochs
Ejemplo n.º 3
0
    def __init__(self, img_size, nb_classes):
        batch_size = 128
        img_rows, img_cols = img_size

        nb_filters_1 = 32  # 64
        nb_filters_2 = 64  # 128
        nb_filters_3 = 128  # 256
        nb_conv = 3


        cnn = models.Sequential()

        cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(img_rows, img_cols, 1),
                                   border_mode='same'))
        cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", border_mode='same'))
        cnn.add(conv.MaxPooling2D(strides=(2, 2)))

        cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
        cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
        cnn.add(conv.MaxPooling2D(strides=(2, 2)))

        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
        # cnn.add(conv.MaxPooling2D(strides=(2,2)))

        cnn.add(core.Flatten())
        cnn.add(core.Dropout(0.2))
        cnn.add(core.Dense(128, activation="relu"))  # 4096
        cnn.add(core.Dense(nb_classes, activation="softmax"))

        cnn.summary()
        cnn.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
        self.cnn = cnn
Ejemplo n.º 4
0
def Dave_v3(input_tensor=None, load_weights=False):
    model = models.Sequential()
    model.add(
        convolutional.Convolution2D(16,
                                    3,
                                    3,
                                    input_shape=(32, 128, 3),
                                    activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(32, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(64, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(core.Flatten())
    model.add(core.Dense(500, activation='relu'))
    #model.add(core.Dropout(.5))
    model.add(core.Dense(100, activation='relu'))
    #model.add(core.Dropout(.25))
    model.add(core.Dense(20, activation='relu'))
    model.add(core.Dense(1))
    model.add(
        Lambda(One_to_radius, output_shape=atan_layer_shape,
               name="prediction"))
    if load_weights:
        model.load_weights('./models/dave3/dave3.h5')
    model.compile(optimizer=optimizers.Adam(lr=1e-04),
                  loss='mean_squared_error')
    return model
Ejemplo n.º 5
0
def convnet_simple_lion_keras(image_dims):
    model = keras.models.Sequential()

    model.add(core.Lambda(lambda x: (x / 255.0) - 0.5, input_shape=image_dims))

    model.add(
        convolutional.Conv2D(32, (3, 3), activation='relu', padding='same'))
    model.add(convolutional.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        convolutional.Conv2D(64, (3, 3), activation='relu', padding='same'))
    model.add(convolutional.MaxPooling2D(pool_size=(2, 2)))
    model.add(
        convolutional.Conv2D(128, (3, 3), activation='relu', padding='same'))
    model.add(convolutional.MaxPooling2D(pool_size=(2, 2)))

    model.add(core.Flatten())

    model.add(core.Dense(512, activation='relu'))
    model.add(core.Dropout(0.5))
    model.add(core.Dense(1024, activation='relu'))
    model.add(core.Dropout(0.5))
    model.add(core.Dense(6, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['acc'])

    return model
Ejemplo n.º 6
0
def model(input):
    x = conv.Conv1D(100,
                    3,
                    activation=config.get('first layer', 'activation'),
                    kernel_initializer=config.get('first layer',
                                                  'kernel_initializer'),
                    kernel_regularizer=l2(
                        config.getfloat('first layer', 'kernel_regularizer')),
                    padding=config.get('first layer', 'padding'),
                    name='Conv1')(input)
    x = Dropout(config.getfloat('first layer', 'dropout'), name='drop1')(x)
    x = BatchNormalization()(x)
    x = MaxPooling1D(pool_size=2, strides=None, padding='valid')(x)

    x = conv.Conv1D(200,
                    7,
                    activation=config.get('second layer', 'activation'),
                    kernel_initializer=config.get('second layer',
                                                  'kernel_initializer'),
                    kernel_regularizer=l2(
                        config.getfloat('second layer', 'kernel_regularizer')),
                    padding=config.get('second layer', 'padding'),
                    name='Conv2')(x)
    x = Dropout(config.getfloat('second layer', 'dropout'), name='drop2')(x)
    x = BatchNormalization()(x)
    x = MaxPooling1D(pool_size=2, strides=None, padding='valid')(x)

    x = GRU(units=64, return_sequences=True)(x)
    x = Dropout(0.2)(x)

    x = GRU(units=64, return_sequences=True)(x)
    x = Dropout(0.2)(x)

    output = core.Flatten()(x)
    output = BatchNormalization()(output)
    output = Dropout(config.getint('flatten layer', 'dropout'),
                     name='dropo3')(output)

    output = Dense(config.getint('first dense layer', 'units'),
                   kernel_initializer=config.get('first dense layer',
                                                 'kernel_initializer'),
                   activation=config.get('first dense layer', 'activation'),
                   name='Denseo1')(output)
    output = Dropout(config.getfloat('first dense layer', 'dropout'),
                     name='dropo4')(output)
    output = BatchNormalization()(output)
    out = Dense(2,
                activation="softmax",
                kernel_initializer='glorot_normal',
                name='Denseo2')(output)

    #  ########## Set Cnn ##########
    cnn = Model(inputs=input, outputs=out)
    cnn.summary()
    adam = Adam(lr=0.0005)
    cnn.compile(loss='binary_crossentropy',
                optimizer=adam,
                metrics=[keras.metrics.binary_accuracy])  # Nadam
    return cnn
Ejemplo n.º 7
0
def main(n_filters,
         conv_size,
         pool_size,
         dropout,
         patch_size,
         n_astro=7,
         out_path=None):
    # Imports must be in the function, or whenever we import this module, Keras
    # will dump to stdout.
    import keras.layers.core as core
    from keras.layers import Input, Dense, Concatenate
    import keras.layers.convolutional as conv
    import keras.layers.merge
    from keras.models import Model

    im_in = Input(shape=(1, patch_size, patch_size))
    astro_in = Input(shape=(n_astro, ))
    # 1 x 32 x 32
    conv1 = conv.Convolution2D(filters=n_filters,
                               kernel_size=(conv_size, conv_size),
                               border_mode='valid',
                               activation='relu',
                               data_format='channels_first')(im_in)
    # 32 x 28 x 28
    pool1 = conv.MaxPooling2D(pool_size=(pool_size, pool_size),
                              data_format='channels_first')(conv1)
    # 32 x 14 x 14
    conv2 = conv.Convolution2D(filters=n_filters,
                               kernel_size=(conv_size, conv_size),
                               border_mode='valid',
                               activation='relu',
                               data_format='channels_first')(pool1)
    # 32 x 10 x 10
    pool2 = conv.MaxPooling2D(pool_size=(pool_size, pool_size),
                              data_format='channels_first')(conv2)
    # 32 x 5 x 5
    conv3 = conv.Convolution2D(filters=n_filters,
                               kernel_size=(conv_size, conv_size),
                               border_mode='valid',
                               activation='relu',
                               data_format='channels_first')(pool2)
    # 32 x 1 x 1
    dropout = core.Dropout(dropout)(conv3)
    flatten = core.Flatten()(dropout)
    conc = Concatenate()([astro_in, flatten])
    lr = Dense(1, activation='sigmoid')(conc)

    model = Model(inputs=[astro_in, im_in], outputs=[lr])
    model.compile(loss='binary_crossentropy', optimizer='adadelta')

    model_json = model.to_json()

    if out_path is not None:
        with open(out_path, 'w') as f:
            f.write(model_json)

    return model_json
Ejemplo n.º 8
0
def Simple_Convo(train, nb_classes):
    batch_size = 128
    img_rows, img_cols = 56, 56

    nb_filters_1 = 32  # 64
    nb_filters_2 = 64  # 128
    nb_filters_3 = 128  # 256
    nb_conv = 3

    # train = np.concatenate([train, train], axis=1)
    trainX = train[:, 1:].reshape(train.shape[0], 28, 28, 1)
    trainX = trainX.astype(float)

    trainX /= 255.0
    trainX = np.concatenate([trainX, np.roll(trainX, 14, axis=1)], axis=1)
    trainX = np.concatenate([trainX, np.fliplr(np.roll(trainX, 7, axis=2))], axis=2)
    print(trainX.shape)

    cnn = models.Sequential()

    cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", input_shape=(img_rows, img_cols, 1),
                               border_mode='same'))
    cnn.add(conv.Convolution2D(nb_filters_1, nb_conv, nb_conv, activation="relu", border_mode='same'))
    cnn.add(conv.MaxPooling2D(strides=(2, 2)))

    cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
    cnn.add(conv.Convolution2D(nb_filters_2, nb_conv, nb_conv, activation="relu", border_mode='same'))
    cnn.add(conv.MaxPooling2D(strides=(2, 2)))

    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu", border_mode='same'))
    # cnn.add(conv.MaxPooling2D(strides=(2,2)))

    cnn.add(core.Flatten())
    cnn.add(core.Dropout(0.2))
    cnn.add(core.Dense(128, activation="relu"))  # 4096
    cnn.add(core.Dense(nb_classes, activation="softmax"))

    cnn.summary()
    return cnn
Ejemplo n.º 9
0
def mnist_model(input_shape):
  """Creates a MNIST model."""
  model = sequential_model_lib.Sequential()

  # Adding custom pass-through layer to visualize input images.
  model.add(LayerForImageSummary())

  model.add(
      conv_layer_lib.Conv2D(
          32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
  model.add(conv_layer_lib.Conv2D(64, (3, 3), activation='relu'))
  model.add(pool_layer_lib.MaxPooling2D(pool_size=(2, 2)))
  model.add(layer_lib.Dropout(0.25))
  model.add(layer_lib.Flatten())
  model.add(layer_lib.Dense(128, activation='relu'))
  model.add(layer_lib.Dropout(0.5))
  model.add(layer_lib.Dense(NUM_CLASSES, activation='softmax'))

  # Adding custom pass-through layer for summary recording.
  model.add(LayerForHistogramSummary())
  return model
Ejemplo n.º 10
0
def model():
    model1 = Sequential()
    model1.add(
        Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))
    model1.add(MaxPooling2D(pool_size=(2, 2)))
    model1.add(Convolution2D(32, 3, 3, activation='relu'))
    model1.add(MaxPooling2D(pool_size=(2, 2)))
    model1.add(Convolution2D(64, 3, 3, activation='relu'))
    model1.add(MaxPooling2D(pool_size=(2, 2)))
    model1.add(core.Flatten())
    model1.add(core.Dropout(0.5))
    model1.add(core.Dense(200))
    model1.add(Activation('relu'))
    model1.add(core.Dropout(0.25))
    model1.add(core.Dense(50))
    model1.add(Activation('relu'))
    model1.add(core.Dropout(0.25))
    model1.add(core.Dense(10))
    model1.add(Activation('relu'))
    model1.add(core.Dense(1))
    return model1
Ejemplo n.º 11
0
 def testIgnoreSaveCounter(self):
   checkpoint_directory = self.get_temp_dir()
   checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
   with self.cached_session() as session:
     # Create and save a model using Saver() before using a Checkpoint. This
     # generates a snapshot without the Checkpoint's `save_counter`.
     model = sequential.Sequential()
     model.add(core.Flatten(input_shape=(1,)))
     model.add(core.Dense(1))
     name_saver = tf.compat.v1.train.Saver(model.trainable_variables)
     save_path = name_saver.save(
         sess=session, save_path=checkpoint_prefix, global_step=1)
     # Checkpoint.restore must successfully load that checkpoint.
     ckpt = tf.train.Checkpoint(model=model)
     status = ckpt.restore(save_path)
     status.assert_existing_objects_matched()
     # It should, however, refuse to load a checkpoint where an unrelated
     # `save_counter` variable is missing.
     model.layers[1].var = tf.Variable(0., name="save_counter")
     status = ckpt.restore(save_path)
     with self.assertRaises(AssertionError):
       status.assert_existing_objects_matched()
Ejemplo n.º 12
0
def cpg_layers(params):
    layers = []
    if params.drop_in:
        layer = kcore.Dropout(params.drop_in)
        layers.append(('xd', layer))
    nb_layer = len(params.nb_filter)
    w_reg = kr.WeightRegularizer(l1=params.l1, l2=params.l2)
    for l in range(nb_layer):
        layer = kconv.Convolution2D(nb_filter=params.nb_filter[l],
                                    nb_row=1,
                                    nb_col=params.filter_len[l],
                                    activation=params.activation,
                                    init='glorot_uniform',
                                    W_regularizer=w_reg,
                                    border_mode='same')
        layers.append(('c%d' % (l + 1), layer))
        layer = kconv.MaxPooling2D(pool_size=(1, params.pool_len[l]))
        layers.append(('p%d' % (l + 1), layer))

    layer = kcore.Flatten()
    layers.append(('f1', layer))
    if params.drop_out:
        layer = kcore.Dropout(params.drop_out)
        layers.append(('f1d', layer))
    if params.nb_hidden:
        layer = kcore.Dense(params.nb_hidden,
                            activation='linear',
                            init='glorot_uniform')
        layers.append(('h1', layer))
        if params.batch_norm:
            layer = knorm.BatchNormalization()
            layers.append(('h1b', layer))
        layer = kcore.Activation(params.activation)
        layers.append(('h1a', layer))
        if params.drop_out:
            layer = kcore.Dropout(params.drop_out)
            layers.append(('h1d', layer))
    return layers
Ejemplo n.º 13
0
def model():
    inputs = ks.layers.Input(shape=(150, 150, 3))
    #conv2d
    x0 = ks.layers.Conv2D(128, (3, 3), padding='same')(inputs)
    x0 = b_n()(x0)
    x00 = ks.layers.core.Activation('relu')(x0)
    #residual
    x1 = ks.layers.Conv2D(128, (3, 3), padding='same')(inputs)
    x2 = b_n()(x1)
    x3 = ks.layers.core.Activation('relu')(x2)
    x4 = ks.layers.Conv2D(128, (3, 3), padding='same')(x3)
    x5 = b_n()(x4)
    x6 = ks.layers.merge([x00, x5], mode='sum')
    x7 = ks.layers.core.Activation('relu')(x6)
    #residual end
    # residual
    x1 = ks.layers.Conv2D(128, (3, 3), padding='same')(x7)
    x2 = b_n()(x1)
    x3 = ks.layers.core.Activation('relu')(x2)
    x4 = ks.layers.Conv2D(128, (3, 3), padding='same')(x3)
    x5 = b_n()(x4)
    x6 = ks.layers.merge([x00, x5], mode='sum')
    x7 = ks.layers.core.Activation('relu')(x6)
    # residual end
    # residual
    x1 = ks.layers.Conv2D(128, (3, 3), padding='same')(x7)
    x2 = b_n()(x1)
    x3 = ks.layers.core.Activation('relu')(x2)
    x4 = ks.layers.Conv2D(128, (3, 3), padding='same')(x3)
    x5 = b_n()(x4)
    x6 = ks.layers.merge([x00, x5], mode='sum')
    x7 = ks.layers.core.Activation('relu')(x6)
    # residual end
    flat = core.Flatten()(x7)
    dense1 = Dense(1000, activation='relu')(flat)
    out = Dense(2)(dense1)
    model = ks.models.Model(inputs=inputs, outputs=out)
    return model
Ejemplo n.º 14
0
def model(input):
    #  ######### First Network ##########
    x = GRU(units=64, return_sequences=True)(input)
    x = Dropout(0.2)(x)

    x = GRU(units=64, return_sequences=True)(x)
    x = Dropout(0.2)(x)

    output = core.Flatten()(x)
    output = BatchNormalization()(output)

    out = Dense(64, activation="relu",
                kernel_initializer='glorot_normal')(output)
    out = Dense(2, activation="softmax",
                kernel_initializer='glorot_normal')(out)

    #  ########## Set Cnn ##########
    cnn = Model(inputs=input, outputs=out)
    cnn.summary()
    adam = keras.optimizers.Adam()
    cnn.compile(loss='binary_crossentropy',
                optimizer=adam,
                metrics=[keras.metrics.binary_accuracy])  # Nadam
    return cnn
Ejemplo n.º 15
0
if __name__ == '__main__':

    # Read splitted  data

    df_train = pd.read_csv('train.csv')
    df_valid = pd.read_csv('test.csv')

    # CNN Model Architecture
    model = models.Sequential()
    model.add(convolutional.Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(32, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(convolutional.Convolution2D(64, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(core.Flatten())
    model.add(core.Dense(500, activation='relu'))
    model.add(core.Dropout(.5))
    model.add(core.Dense(100, activation='relu'))
    model.add(core.Dropout(.25))
    model.add(core.Dense(20, activation='relu'))
    model.add(core.Dense(1))
    model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error')

    # load the exist model
    model.load_weights("model.h5")

    history = model.fit_generator(# continue  training model for 17 epochs
        generate_samples(df_train, ''),
        samples_per_epoch=df_train.shape[0],
        nb_epoch=17,#0.016
Ejemplo n.º 16
0
 def test_flatten(self):
     layer = core.Flatten()
     self._runner(layer)
Ejemplo n.º 17
0
cnn.add(kpool.MaxPooling2D(strides=(2, 2)))

conv_dim_2 = 3
cnn.add(kconv.ZeroPadding2D((1, 1)))
cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu"))
cnn.add(kpool.MaxPooling2D(strides=(2, 2)))

cnn.add(kconv.ZeroPadding2D((1, 1)))
cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu"))
cnn.add(kpool.MaxPooling2D(strides=(2, 2)))

cnn.add(kconv.ZeroPadding2D((1, 1)))
cnn.add(kconv.Convolution2D(num_filters_2, conv_dim_2, conv_dim_2, activation="relu"))


cnn.add(kcore.Flatten())
cnn.add(kcore.Dropout(0.5))
cnn.add(kcore.Dense(128, activation="relu")) # 4096
cnn.add(kcore.Dense(num_class, activation="softmax"))

cnn.summary()
myadadelta = kopt.Adadelta(lr=0.65, rho=0.95, epsilon=1e-08, decay=0.001)

cnn.compile(loss="categorical_crossentropy", optimizer=myadadelta, metrics=["accuracy"])
cnn.fit(train_X, train_Y, batch_size=128, nb_epoch=180, verbose=1)
#
test_X = test_data_raw.reshape(test_data_raw.shape[0], 1, 28, 28)
test_X = test_X.astype(float)
test_X /= 255.0

yPred = cnn.predict_classes(test_X)
Ejemplo n.º 18
0
def HydrophobicityNetwork(trainX,
                          trainY,
                          valX,
                          valY,
                          physical_H_input,
                          folds,
                          train_time=None):

    if (train_time == 0):
        x = core.Flatten()(physical_H_input)
        x = BatchNormalization()(x)

        x = Dense(1024, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.4)(x)

        x = Dense(512, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.3)(x)

        x = Dense(256, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)

        x = Dense(128, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.1)(x)

        physical_H_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        HydrophobicityNetwork = Model(physical_H_input, physical_H_output)

        # optimization = SGD(lr=0.01, momentum=0.9, nesterov= True)
        optimization = 'Nadam'
        HydrophobicityNetwork.compile(loss='binary_crossentropy',
                                      optimizer=optimization,
                                      metrics=[keras.metrics.binary_accuracy])
    else:
        HydrophobicityNetwork = load_model('model/' + str(folds) + '/model/' +
                                           str(train_time - 1) +
                                           'HydrophobicityNetwork.h5')

    if (trainY is not None):
        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'Hydrophobicityweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=50)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'HydrophobicityNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'Hydrophobicityloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        fitHistory = HydrophobicityNetwork.fit(
            trainX,
            trainY,
            batch_size=512,
            epochs=5000,
            verbose=2,
            validation_data=(valX, valY),
            shuffle=True,
            class_weight='auto',
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer])
    return HydrophobicityNetwork
Ejemplo n.º 19
0
def OOKCNN(trainX,
           trainY,
           nb_epoch,
           earlystop=None,
           compiletimes=0,
           compilemodels=None,
           batch_size=2048,
           class_weights={
               0: 1,
               1: 1
           },
           predict=False):
    #Set Oneofkey Network Size and Data
    input_row = trainX.shape[2]
    input_col = trainX.shape[3]
    trainX_t = trainX
    # Early_stop
    if (earlystop is not None):
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=earlystop)
    # set to a very big value since earlystop used
    nb_epoch = nb_epoch
    # TrainX_t For Shape
    trainX_t.shape = (trainX_t.shape[0], input_row, input_col)
    input = Input(shape=(input_row, input_col))

    # params = {'dropout1': 0.09055921027754717, 'dropout2': 0.6391239298866936, 'dropout3': 0.4494981811340072,
    #  'dropout4': 0.13858850326177857, 'dropout5': 0.37168935754516325, 'layer1_node': 21.380001953812567,
    #  'layer1_size': 1, 'layer2_node': 42.3937544103545, 'layer2_size': 16, 'layer3_node': 184.87943202539697,
    #  'layer4_node': 61.85302597240724, 'layer5_node': 415.9952475249118, 'nb_epoch': 178, 'windowSize': 16}

    # layer1_node = int(params["layer1_node"])
    # layer2_node = int(params["layer2_node"])
    # layer3_node = int(params["layer3_node"])
    # layer4_node = int(params["layer4_node"])
    # layer5_node = int(params["layer5_node"])
    # layer1_size = params["layer1_size"]
    # layer2_size = params["layer2_size"]
    # dropout1 = params["dropout1"]
    # dropout2 = params["dropout2"]
    # dropout3 = params["dropout3"]
    # dropout4 = params["dropout4"]
    # dropout5 = params["dropout5"]

    if compiletimes == 0:
        # Total Set Classes
        nb_classes = 2
        # Total Set Batch_size
        batch_size = 8192
        # Total Set Optimizer
        # optimizer = SGD(lr=0.0001, momentum=0.9, nesterov= True)
        optimization = 'Nadam'
        #begin of Oneofkey Network

        # x = conv.Conv1D(layer1_node, layer1_size, name="layer1", kernel_initializer="glorot_normal",
        #                 kernel_regularizer=l2(0), padding="same")(input)
        # x = Dropout(dropout1)(input)
        # x = Activation('softsign')(x)
        #
        # x = conv.Conv1D(layer2_node, layer2_size, name="layer2", kernel_initializer="glorot_normal",
        #                 kernel_regularizer=l2(0), padding="same")(x)
        # x = Dropout(dropout2)(x)
        # x = Activation('softsign')(x)
        #
        # output_x = core.Flatten()(x)
        # output = BatchNormalization()(output_x)
        # output = Dropout(dropout3)(output)
        #
        # # attention_probs = Dense(1155, activation='softmax', name='attention_probs')(output)
        # # attention_mul = Multiply()([output, attention_probs])
        #
        # output = Dense(layer3_node, kernel_initializer='glorot_normal', activation='relu', name='layer3')(output)
        # output = Dropout(dropout4)(output)
        # output = Dense(layer4_node, kernel_initializer='glorot_normal', activation="relu", name='layer4')(output)
        # output = Dropout(dropout5)(output)
        # output = Dense(layer5_node, kernel_initializer='glorot_normal', activation="relu", name='layer5')(output)
        # End of Oneofkey Network
        # out = Dense(nb_classes, kernel_initializer='glorot_normal', activation='softmax', kernel_regularizer=l2(0.001),
        #             name='7')(output)
        #
        # cnn = Model(input, out)
        # cnn.compile(loss=keras.losses.binary_crossentropy, optimizer=optimization, metrics=[keras.metrics.binary_accuracy])
        x = conv.Conv1D(51,
                        2,
                        name="0",
                        kernel_initializer="glorot_normal",
                        kernel_regularizer=l2(0),
                        padding="same")(input)
        x = Dropout(0.3)(x)
        x = Activation('softsign')(x)

        x = conv.Conv1D(21,
                        3,
                        name="1",
                        kernel_initializer="glorot_normal",
                        kernel_regularizer=l2(0),
                        padding="same")(x)
        x = Dropout(0.4)(x)
        x = Activation('softsign')(x)

        # x = conv.Conv1D(21, 5, name="2", kernel_initializer="glorot_normal", kernel_regularizer=l2(0), padding="same")(x)
        # x = Dropout(0.4)(x)
        # x = Activation('softsign')(x)
        #
        # x = conv.Conv1D(101, 7, name="3", kernel_initializer="glorot_normal", kernel_regularizer=l2(0), padding="same")(x)
        # x = Activation('softsign')(x)
        # # x_reshape = core.Reshape((x._keras_shape[2], x._keras_shape[1]))(x)
        # x = Dropout(0.4)(x)

        output_x = core.Flatten()(x)
        output = BatchNormalization()(output_x)
        output = Dropout(0.3)(output)

        # attention_probs = Dense(1155, activation='softmax', name='attention_probs')(output)
        # attention_mul = Multiply()([output, attention_probs])

        output = Dense(128,
                       kernel_initializer='glorot_normal',
                       activation='relu',
                       name='4')(output)
        output = Dropout(0.2)(output)
        output = Dense(64,
                       kernel_initializer='glorot_normal',
                       activation="relu",
                       name='5')(output)
        output = Dropout(0.2)(output)
        output = Dense(415,
                       kernel_initializer='glorot_normal',
                       activation="relu",
                       name='6')(output)
        # End of Oneofkey Network
        out = Dense(nb_classes,
                    kernel_initializer='glorot_normal',
                    activation='softmax',
                    kernel_regularizer=l2(0.001),
                    name='7')(output)

        cnn = Model(input, out)
        cnn.compile(loss=keras.losses.binary_crossentropy,
                    optimizer=optimization,
                    metrics=[keras.metrics.binary_accuracy])
    else:
        cnn = compilemodels

    oneofkclass_weights = class_weights

    if (predict is False):
        if (trainY is not None):
            if (earlystop is None):
                fitHistory = cnn.fit(trainX_t,
                                     trainY,
                                     batch_size=batch_size,
                                     nb_epoch=nb_epoch)
            else:
                # checkpointer = ModelCheckpoint(filepath='oneofk.h5', verbose=1, save_best_only=True)
                weight_checkpointer = ModelCheckpoint(
                    filepath='oneofkweight9.h5',
                    verbose=0,
                    save_best_only=True,
                    monitor='val_binary_accuracy',
                    mode='max',
                    save_weights_only=True)
                fitHistory = cnn.fit(
                    trainX_t,
                    trainY,
                    batch_size=batch_size,
                    epochs=nb_epoch,
                    shuffle=True,
                    validation_split=0.2,
                    callbacks=[early_stopping, weight_checkpointer],
                    class_weight=oneofkclass_weights,
                    verbose=0)
        else:
            fitHistory = cnn.fit(trainX_t,
                                 trainY,
                                 batch_size=batch_size,
                                 nb_epoch=nb_epoch)
    return cnn
cnn.add(conv.Convolution2D(64, 3, 3, activation="relu"))
cnn.add(conv.ZeroPadding2D((1, 1)))
cnn.add(conv.Convolution2D(64, 3, 3, activation="relu"))
cnn.add(conv.MaxPooling2D(strides=(2, 2)))

# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
# cnn.add(conv.ZeroPadding2D((1, 1)))
# cnn.add(conv.Convolution2D(nb_filters_3, nb_conv, nb_conv, activation="relu"))
# cnn.add(conv.MaxPooling2D(strides=(2,2)))

cnn.add(core.Flatten())
cnn.add(core.Dropout(0.2))
cnn.add(core.Dense(128, activation="relu"))  # 4096
cnn.add(core.Dense(nb_classes, activation="softmax"))

cnn.summary()
cnn.compile(loss="categorical_crossentropy",
            optimizer="adadelta",
            metrics=["accuracy"])

cnn.fit(trainX, trainY, batch_size=128, nb_epoch=1, verbose=1)

testX = test.reshape(test.shape[0], 1, 48, 48)
testX = testX.astype(float)
testX /= 255.0
Ejemplo n.º 21
0
def OtherNetwork(trainX,
                 trainY,
                 valX,
                 valY,
                 physical_O_input,
                 folds,
                 train_time=None):

    if (train_time == 0):
        x = core.Flatten()(physical_O_input)
        x = BatchNormalization()(x)

        x = Dense(256, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)

        x = Dense(128, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.2)(x)

        x = Dense(64, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.1)(x)

        x = Dense(32, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)

        physical_O_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        OtherNetwork = Model(physical_O_input, physical_O_output)

        optimization = Nadam(lr=0.0001,
                             beta_1=0.9,
                             beta_2=0.999,
                             epsilon=1e-08,
                             schedule_decay=0.004)

        OtherNetwork.compile(loss='binary_crossentropy',
                             optimizer=optimization,
                             metrics=[keras.metrics.binary_accuracy])
    else:
        OtherNetwork = load_model('model/' + str(folds) + '/model/' +
                                  str(train_time - 1) + 'OtherNetwork.h5')

    if (trainY is not None):
        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'Otherweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=200)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'OtherNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'Otherloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')

        fitHistory = OtherNetwork.fit(
            trainX,
            trainY,
            batch_size=512,
            epochs=5000,
            verbose=2,
            validation_data=(valX, valY),
            shuffle=True,
            class_weight='auto',
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer])
    return OtherNetwork
def MCNN_best(trainX1, trainX2, trainY1, valX1, valX2, valY1, input_1, input_2,
              i, class_weights, t):
    if (t == 0):
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        onehot_secstr = conv.Conv1D(
            5,
            10,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.0010949235793883667),
            padding='valid',
            name='0_secstr')(input_1)
        onehot_secstr = Dropout(0.745150134528914)(onehot_secstr)
        onehot_secstr = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr)
        onehot_secstr = core.Flatten()(onehot_secstr)
        onehot_secstr2 = conv.Conv1D(
            9,
            4,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.03405758144816304),
            padding='valid',
            name='1_secstr')(input_1)
        onehot_secstr2 = Dropout(0.36965944352568686)(onehot_secstr2)
        onehot_secstr2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr2)
        onehot_secstr2 = core.Flatten()(onehot_secstr2)
        output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2],
                                        axis=-1)
        onehot_x = conv.Conv1D(5,
                               10,
                               kernel_initializer='glorot_normal',
                               kernel_regularizer=l2(0.03217477728270726),
                               padding='valid',
                               name='0')(input_2)
        onehot_x = Dropout(0.6653716368558287)(onehot_x)
        onehot_x = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x)
        onehot_x = core.Flatten()(onehot_x)
        onehot_x2 = conv.Conv1D(9,
                                4,
                                kernel_initializer='glorot_normal',
                                kernel_regularizer=l2(0.01608962762003551),
                                padding='valid',
                                name='1')(input_2)
        onehot_x2 = Dropout(0.038045356303735206)(onehot_x2)
        onehot_x2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x2)
        onehot_x2 = core.Flatten()(onehot_x2)
        output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
        final_output = concatenate([output_onehot_sec, output_onehot_seq])
        dense_out = Dense(512,
                          kernel_initializer='glorot_normal',
                          activation='softplus',
                          name='dense_concat')(final_output)
        out = Dense(2,
                    activation="softmax",
                    kernel_initializer='glorot_normal',
                    name='6')(dense_out)
        ########## Set Net ##########
        cnn = Model(inputs=[input_1, input_2], outputs=out)
        cnn.load_weights('weightsfile_hyperasbest.h5')
        cnn.summary()
        adam = Adam(lr=0.08582474007227135)
        nadam = Nadam(lr=0.0014045290291504406)
        rmsprop = RMSprop(lr=0.037289952982092284)
        sgd = SGD(lr=0.01373965388919854)
        optim = sgd
        ##        choiceval = {{choice(['adam', 'sgd', 'rmsprop','nadam'])}}
        ##        if choiceval == 'adam':
        ##            optim = adam
        ##        elif choiceval == 'rmsprop':
        ##            optim = rmsprop
        ##        elif choiceval=='nadam':
        ##            optim = nadam
        ##        else:
        ##            optim = sgd
        cnn.compile(loss='binary_crossentropy',
                    optimizer=optim,
                    metrics=[keras.metrics.binary_accuracy])  # Nadam
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_best.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             callbacks=[checkpointer, early_stopping],
                             class_weight=class_weights)
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_best_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'w'))
        return cnn, fitHistory
    else:
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        cnn = models.load_model('%d-secstr_seq_denseconcat_60perc_best.h5' % i)
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_best.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             class_weight=class_weights,
                             callbacks=[checkpointer, early_stopping])
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_best_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'a'))
        return cnn, fitHistory
Ejemplo n.º 23
0
def mixallDNNmodel(trainX,
                   trainY,
                   valX,
                   valY,
                   physical_D_input,
                   folds,
                   train_time=None):

    if (train_time == 0):

        x = core.Flatten()(physical_D_input)
        x = BatchNormalization()(x)

        x = Dense(2048, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.4)(x)

        x = Dense(512, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.4)(x)

        x = Dense(128, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.4)(x)

        x = Dense(64, init='glorot_normal', activation='relu')(x)
        x = BatchNormalization()(x)
        x = Dropout(0.1)(x)

        physical_D_output = Dense(2,
                                  init='glorot_normal',
                                  activation='softmax',
                                  W_regularizer=l2(0.001))(x)

        mixallDNNmodel = Model(physical_D_input, physical_D_output)

        optimization = 'Nadam'
        mixallDNNmodel.compile(loss='binary_crossentropy',
                               optimizer=optimization,
                               metrics=[keras.metrics.binary_accuracy])
    else:
        mixallDNNmodel = load_model('model/' + str(folds) + '/model/' +
                                    str(train_time - 1) + 'DNNNetwork.h5')

    if (trainY is not None):

        weight_checkpointer = ModelCheckpoint(
            filepath='./model/' + str(folds) + '/weight/' + str(train_time) +
            'DNNweight.h5',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min',
            save_weights_only=True)
        loss_checkpointer = LossModelCheckpoint(
            model_file_path='model/' + str(folds) + '/model/' +
            str(train_time) + 'DNNNetwork.h5',
            monitor_file_path='model/' + str(folds) + '/loss/' +
            str(train_time) + 'DNNloss.json',
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = mixallDNNmodel.fit(
            trainX,
            trainY,
            batch_size=4096,
            nb_epoch=50,
            shuffle=True,
            callbacks=[early_stopping, loss_checkpointer, weight_checkpointer],
            class_weight='auto',
            validation_data=(valX, valY))
    return mixallDNNmodel
Ejemplo n.º 24
0
def MultiCNN(train_ook_X,
             trainAAIndexX,
             train_ook_Y,
             nb_epoch,
             earlystop=None,
             compiletimes=0,
             batch_size=2048,
             predict=False,
             compileModel=None,
             class_weight={
                 0: 0.5,
                 1: 0.5
             },
             verbose=1,
             model_id=0):
    # Set Oneofkey Data
    ook_row = train_ook_X.shape[2]
    ook_col = train_ook_X.shape[3]
    ook_x_t = train_ook_X
    ook_x_t.shape = (ook_x_t.shape[0], ook_row, ook_col)
    ook_input = Input(shape=(ook_row, ook_col))
    # AAindex
    aaindex_x_t = trainAAIndexX
    aaindex_row = trainAAIndexX.shape[2]
    aaindex_col = trainAAIndexX.shape[3]
    aaindex_x_t.shape = (trainAAIndexX.shape[0], aaindex_row, aaindex_col)
    aaindex_input = Input(shape=(aaindex_row, aaindex_col))

    if (earlystop is not None):
        early_stopping = EarlyStopping(monitor='val_loss',
                                       mode='min',
                                       patience=earlystop)

    nb_epoch = nb_epoch
    # TrainX_t For Shape
    if compiletimes == 0:
        # Total Set Classes
        nb_classes = 2
        # Total Set Batch_size
        batch_size = 8192
        # Total Set Optimizer
        # optimizer = SGD(lr=0.0001, momentum=0.9, nesterov= True)
        optimization = 'Nadam'
        # begin of Oneofkey Network
        ook_x = conv.Conv1D(51,
                            2,
                            name="0",
                            kernel_initializer="glorot_normal",
                            kernel_regularizer=l2(0),
                            padding="same")(ook_input)
        ook_x = Dropout(0.3)(ook_x)
        ook_x = Activation('softsign')(ook_x)

        ook_x = conv.Conv1D(21,
                            3,
                            name="1",
                            kernel_initializer="glorot_normal",
                            kernel_regularizer=l2(0),
                            padding="same")(ook_x)
        ook_x = Dropout(0.4)(ook_x)
        ook_x = Activation('softsign')(ook_x)

        output_ook_x = core.Flatten()(ook_x)
        output_ook_x = BatchNormalization()(output_ook_x)
        output_ook_x = Dropout(0.3)(output_ook_x)

        output_ook_x = Dense(128,
                             kernel_initializer='glorot_normal',
                             activation='relu',
                             name='2')(output_ook_x)
        output_ook_x = Dropout(0.2)(output_ook_x)
        output_ook_x = Dense(64,
                             kernel_initializer='glorot_normal',
                             activation="relu",
                             name='3')(output_ook_x)
        output_ook_x = Dropout(0.2)(output_ook_x)
        # below modified
        output_ook_x = Dense(415,
                             kernel_initializer='glorot_normal',
                             activation="relu",
                             name='4')(output_ook_x)
        # output_ook_x = Dense(nb_classes, kernel_initializer='glorot_normal', activation='softmax', kernel_regularizer=l2(0.001),
        #             name='7')(output_ook_x)
        # End of Oneofkey Network

        # start with AAindex Dnn
        aaindex_x = core.Flatten()(aaindex_input)
        attention_probs = Dense(aaindex_row * aaindex_col,
                                activation='softmax',
                                name='5')(aaindex_x)
        aaindex_x = Multiply()([aaindex_x, attention_probs])
        aaindex_x = BatchNormalization()(aaindex_x)
        aaindex_x = Dense(256,
                          kernel_initializer='he_uniform',
                          activation='relu',
                          name='6')(aaindex_x)
        aaindex_x = Dropout(0.6)(aaindex_x)
        aaindex_x = Dense(128,
                          kernel_initializer='he_uniform',
                          activation='softplus',
                          name='7')(aaindex_x)
        # aaindex_x = BatchNormalization()(aaindex_x)
        aaindex_x = Dropout(0.55)(aaindex_x)

        aaindex_x = GaussianNoise(10)(aaindex_x)

        output_aaindex_x = Dense(64,
                                 kernel_initializer='glorot_normal',
                                 activation='relu',
                                 name='8')(aaindex_x)

        # output_aaindex_x = Dense(nb_classes, kernel_initializer='glorot_normal', activation='softmax',
        #                          kernel_regularizer=l2(0.001), name='19')(output_aaindex_x)

        # output = Maximum()([output_ook_x, output_aaindex_x])
        output = Concatenate()([output_ook_x, output_aaindex_x])
        output = BatchNormalization()(output)

        # output = Dense(64, activation="relu", kernel_initializer="he_normal", kernel_regularizer=l2(0.001), name="21")(output)
        # output = BatchNormalization()(output)
        output = Dense(128,
                       activation="relu",
                       kernel_initializer="he_normal",
                       kernel_regularizer=l2(0.001),
                       name="9")(output)
        output = Dropout(0.6)(output)
        output = Dense(64,
                       activation="relu",
                       kernel_initializer="he_normal",
                       kernel_regularizer=l2(0.001),
                       name="10")(output)
        output = Dropout(0.5)(output)
        output = Dense(16,
                       activation="relu",
                       kernel_initializer="he_normal",
                       kernel_regularizer=l2(0.001),
                       name="11")(output)
        out = Dense(nb_classes,
                    kernel_initializer='glorot_normal',
                    activation='softmax',
                    kernel_regularizer=l2(0.001),
                    name='12')(output)

        multinn = Model([ook_input, aaindex_input], out)
        multinn.compile(loss=keras.losses.binary_crossentropy,
                        optimizer=optimization,
                        metrics=[keras.metrics.binary_accuracy])

    else:
        multinn = compileModel

    oneofkclass_weights = class_weight

    if (earlystop is None):
        fitHistory = multinn.fit([ook_x_t, aaindex_x_t],
                                 train_ook_Y,
                                 batch_size=batch_size,
                                 nb_epoch=nb_epoch)
    else:
        weight_checkpointer = ModelCheckpoint(filepath='temp/temp.h5',
                                              verbose=verbose,
                                              save_best_only=True,
                                              monitor='val_binary_accuracy',
                                              mode='auto',
                                              save_weights_only=True)
        fitHistory = multinn.fit(
            [ook_x_t, aaindex_x_t],
            train_ook_Y,
            batch_size=batch_size,
            epochs=nb_epoch,
            shuffle=True,
            validation_split=0.2,
            callbacks=[early_stopping, weight_checkpointer],
            class_weight=oneofkclass_weights,
            verbose=verbose)
    return multinn
                                   strides=(1, 1),
                                   padding='same',
                                   activation='relu')(pool_layer2)
pool_layer3 = pooling.MaxPooling2D(pool_size=(2, 2),
                                   strides=None,
                                   padding='valid',
                                   data_format=None)(conv_layer4)
conv_layer5 = convolutional.Conv2D(32, (3, 3),
                                   strides=(1, 1),
                                   padding='same',
                                   activation='relu')(conv_layer3)
pool_layer4 = pooling.MaxPooling2D(pool_size=(2, 2),
                                   strides=None,
                                   padding='valid',
                                   data_format=None)(conv_layer5)
flatten_layer = core.Flatten()(pool_layer4)
hidden1 = core.Dense(64, activation='relu')(flatten_layer)

inp_2 = Input(shape=(img_width, img_height, 3))
conv_layer1_2 = convolutional.Conv2D(8, (3, 3),
                                     strides=(1, 1),
                                     padding='same',
                                     activation='relu')(inp_2)
conv_layer2_2 = convolutional.Conv2D(8, (3, 3),
                                     strides=(1, 1),
                                     padding='same',
                                     activation='relu')(conv_layer1_2)
pool_layer1_2 = pooling.MaxPooling2D(pool_size=(2, 2),
                                     strides=None,
                                     padding='valid',
                                     data_format=None)(conv_layer2_2)
Ejemplo n.º 26
0
def MCNN(trainX1,trainX2,trainY1,valX1,valX2,valY1,testX1,testX2,testY):
    import pickle
    from sklearn.metrics import precision_score, recall_score, f1_score
    import pandas as pd
    #import pirna_kmer as pk
    from pandas import DataFrame
    from sklearn.model_selection import train_test_split
    import numpy as np
    #import phy_net as pn
    from keras.layers import Input
    import keras.utils.np_utils as kutils
    #import threading
    import time
    from keras.utils import np_utils
    from keras.utils.np_utils import to_categorical
    from keras.models import Sequential
    from keras.layers import Dense, Dropout, BatchNormalization, Activation, Flatten
    from keras.optimizers import Adam
    from keras.wrappers.scikit_learn import KerasClassifier
    from keras.models import load_model
    from sklearn.model_selection import cross_val_score
    from sklearn.preprocessing import LabelEncoder
    from sklearn.model_selection import StratifiedKFold
    from sklearn.preprocessing import StandardScaler
    from sklearn.pipeline import Pipeline
    from sklearn.utils.class_weight import compute_class_weight
    from keras.layers import Convolution2D as Conv2D
    from keras.layers import MaxPooling2D
    from keras.callbacks import EarlyStopping
    import json
    #from sklearn.metrics import matthews_corrcoef
    from keras.models import Model

    import tensorflow as tf
##    from tensorflow.keras.callbacks import TensorBoard
    import os
    import matplotlib
    matplotlib.use('Agg')
    import matplotlib.pyplot as plt
    from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score, matthews_corrcoef
    from sklearn.metrics import precision_score, recall_score, f1_score
    #from sklearn.metrics import accuracy_score, recall_score
    remark = ''  # The mark written in the result file.
    import time
    import numpy as np
    import matplotlib
    import pickle
    matplotlib.use('Agg')
    import keras.layers.core as core
    import keras.layers.convolutional as conv
    import keras.models as models
    from keras.models import Model
    from keras.layers.merge import concatenate
    from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback, LearningRateScheduler, History
    from keras.layers import Dense, Dropout, Activation, Flatten, Input
    from keras.layers.normalization import BatchNormalization
    from keras.regularizers import l1, l2, l1_l2
    import keras.metrics
    import matplotlib.pyplot as plt
    from keras.optimizers import Nadam,Adam,RMSprop,SGD
    from sklearn.metrics import precision_recall_curve, roc_curve, auc, average_precision_score, matthews_corrcoef
    import os
    from sklearn import svm
    from sklearn.manifold import TSNE
    from matplotlib import offsetbox
    from sklearn.metrics import accuracy_score, recall_score
    import random
##    from tensorflow.keras.callbacks import TensorBoard
    row1,col1 = trainX1[0].shape
    input_1 = Input(shape=(row1,col1))
    row2,col2 = trainX2[0].shape
    input_2 = Input(shape=(row2,col2))
    NAME = "combined_secstr_seq_CNN_model_emboss-{}".format(int(time.time()))
    tensorboard = TensorBoard(log_dir="logs/{}".format(NAME))
    onehot_secstr = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='0_secstr')(input_1)
    onehot_secstr = Dropout({{uniform(0, 1)}})(onehot_secstr)
    onehot_secstr = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr)
    onehot_secstr = core.Flatten()(onehot_secstr)
    onehot_secstr2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='1_secstr')(input_1)
    onehot_secstr2 = Dropout({{uniform(0, 1)}})(onehot_secstr2)
    onehot_secstr2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_secstr2)
    onehot_secstr2 = core.Flatten()(onehot_secstr2)
    output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2], axis=-1)
    onehot_x = conv.Conv1D(5, 10, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='0')(input_2)
    onehot_x = Dropout({{uniform(0, 1)}})(onehot_x)
    onehot_x = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x)
    onehot_x = core.Flatten()(onehot_x)
    onehot_x2 = conv.Conv1D(9, 4, kernel_initializer='glorot_normal',kernel_regularizer=l2({{uniform(0.0001, 0.1)}}), padding='valid', name='1')(input_2)
    onehot_x2 = Dropout({{uniform(0, 1)}})(onehot_x2)
    onehot_x2 = keras.layers.advanced_activations.PReLU(alpha_initializer='zeros', alpha_regularizer=None,alpha_constraint=None, shared_axes=None)(onehot_x2)
    onehot_x2 = core.Flatten()(onehot_x2)
    output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
    final_output = concatenate([output_onehot_sec, output_onehot_seq])
    dense_out = Dense({{choice([20,30,50,60,64,70,80,90,100, 128, 256, 512, 1024])}}, kernel_initializer='glorot_normal', activation='softplus', name='dense_concat')(final_output)
    out = Dense(2, activation="softmax", kernel_initializer='glorot_normal', name='6')(dense_out)
    ########## Set Net ##########
    cnn = Model(inputs=[input_1,input_2], outputs=out)
    cnn.summary()
    adam = Adam(lr={{uniform(0.0001, 0.1)}})
    nadam = Nadam(lr={{uniform(0.0001, 0.1)}})
    rmsprop = RMSprop(lr={{uniform(0.0001, 0.1)}})
    sgd = SGD(lr={{uniform(0.0001, 0.1)}})
    choiceval = {{choice(['adam', 'sgd', 'rmsprop','nadam'])}}
    if choiceval == 'adam':
        optim = adam
    elif choiceval == 'rmsprop':
        optim = rmsprop
    elif choiceval=='nadam':
        optim = nadam
    else:
        optim = sgd
    globalvars.globalVar += 1
    #early_stopping = EarlyStopping(monitor='val_loss', min_delta=0, patience=20, verbose=1, mode='auto')
    cnn.compile(loss='binary_crossentropy', optimizer=optim, metrics=[keras.metrics.binary_accuracy])  # Nadam
    early_stopping = EarlyStopping(monitor='val_loss', patience=20)
    checkpointer = ModelCheckpoint(filepath='%d-secstr_seq_denseconcat.h5' % globalvars.globalVar, verbose=1,save_best_only=True, monitor='val_loss', mode='min')
    fitHistory = cnn.fit([trainX1,trainX2], trainY1, batch_size={{choice([32,64,128,256,512])}}, nb_epoch=500,validation_data=([valX1,valX2], valY1),callbacks=[checkpointer,early_stopping,tensorboard],class_weight=cwt.class_weights)
    myjson_file = "myhist_" +"_dict" + "_hyperas_model_trial_" +str(globalvars.globalVar)
    json.dump(fitHistory.history, open(myjson_file, 'w'))
    score, acc = cnn.evaluate([valX1, valX2], valY1, batch_size=32)
    pred_proba = cnn.predict([valX1,valX2], batch_size=32)
    pred_score = pred_proba[:, 1]
    true_class = valY1[:, 1]
    f1_sc = f1_score(true_class,pred_score)
    print('F1 score:', f1_sc)
    print('Test score:', score)
    print('accuracy:', acc)
    return {'loss': -f1_sc, 'status': STATUS_OK, 'model': cnn}
Ejemplo n.º 27
0
def architecture():
    # put the normalization fucntion inside the model ensure preprocess using Lambda layer
    # There were many issue depending on using the lambda layer and in this waa it works
    def resize_normalize(image):
        import cv2
        from keras.backend import tf as ktf
        """
        Applies preprocessing pipeline to an image: crops `top` and `bottom`
        portions of image, resizes to 66*200 px and scales pixel values to [0, 1].
        """
        # resize to width 200 and high 66 liek recommended
        # in the nvidia paper for the used CNN
        # image = cv2.resize(image, (66, 200)) #first try
        resized = ktf.image.resize_images(image, (32, 128))
        #normalize 0-1
        resized = resized / 255.0 - 0.5

        return resized

    print('I am inside call of architecture')
    #initialize model
    model = Sequential()
    #dropout = 0.5
    nonlinear = 'tanh'
    print('I am before call of cropping layer')
    ### Convolution layers and parameters were taken from the "nvidia paper" on end-to-end autonomous steering.
    model.add(
        Cropping2D(cropping=((60, 20), (1, 1)), input_shape=(160, 320, 3)))
    print('I am before call of Lambda')
    model.add(
        Lambda(resize_normalize,
               input_shape=(160, 320, 3),
               output_shape=(32, 128, 3)))

    # Model architecture
    model.add(
        Convolution2D(16, 3, 3, input_shape=(32, 128, 3), activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(32, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(Convolution2D(64, 3, 3, activation='relu'))
    model.add(pooling.MaxPooling2D(pool_size=(2, 2)))
    model.add(core.Flatten())
    model.add(core.Dense(500, activation='relu'))
    model.add(core.Dropout(.5))
    model.add(core.Dense(100, activation='relu'))
    model.add(core.Dropout(.25))
    model.add(core.Dense(20, activation='relu'))
    model.add(core.Dense(1))
    model.compile(optimizer=optimizers.Adam(lr=1e-04),
                  loss='mean_squared_error')
    #model.add(Lambda(lambda x: resize_normalize(x), input_shape=(80,318,3), output_shape=(66, 200, 3)))
    # model.add(Convolution2D(24, 5, 5, name='conv1', subsample=(2, 2), activation=nonlinear))
    # model.add(Convolution2D(36, 5, 5, name='conv2', subsample=(2, 2), activation=nonlinear))
    # model.add(Convolution2D(48, 5, 5, name='conv3', subsample=(2, 2), activation=nonlinear))
    # model.add(Convolution2D(64, 3, 3, name='conv4', activation=nonlinear))
    # model.add(Convolution2D(64, 3, 3, name='conv5', activation=nonlinear))

    # ### Regression
    # model.add(Flatten())
    # model.add(Dropout(dropout))
    # model.add(Dense(1164, name='hidden1', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(100, name='hidden2', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(50, name='hidden3', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(10, name='hidden4', activation=nonlinear))
    # model.add(Dropout(dropout))
    # model.add(Dense(1, name='output', activation=nonlinear))

    # #model.compile(optimizer=optimizers.Adam(lr=1e-04), loss='mean_squared_error')
    # model.compile(optimizer='adam', loss='mse')
    print('I am finished build the model')
    print(model.summary())
    return model
Ejemplo n.º 28
0
               activation='relu',
               padding='same',
               data_format='channels_first')(up3)
conv6 = Dropout(0.2)(conv6)
conv6 = Conv2D(16, (3, 3),
               activation='relu',
               padding='same',
               data_format='channels_first')(conv6)

conv7 = Conv2D(3, (1, 1),
               activation='relu',
               padding='same',
               data_format='channels_first')(conv6)
conv7 = core.Reshape((3, 128 * 128))(conv7)
#conv6 = core.Permute((3,1))(conv6)
conv7 = core.Flatten()(conv7)
#conv7 = core.Dense(64)(conv7)
#conv7 = core.Activation('relu')(conv7)
#conv7 = Dropout(0.2)(conv7)
conv7 = core.Dense(2)(conv7)

############
conv8 = core.Activation('softmax')(conv7)

model = Model(input=inputs, output=conv8)

# sgd = SGD(lr=0.01, decay=1e-6, momentum=0.3, nesterov=False)
model.compile(optimizer='adam',
              loss='categorical_crossentropy',
              metrics=['accuracy'])
Ejemplo n.º 29
0
def test_flatten():
    layer = core.Flatten()
    _runner(layer)
def MCNN_26(trainX1, trainX2, trainY1, valX1, valX2, valY1, input_1, input_2,
            i, class_weights, t):
    if (t == 0):
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        onehot_secstr = conv.Conv1D(
            5,
            10,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.011206678796947282),
            padding='valid',
            name='0_secstr')(input_1)
        onehot_secstr = Dropout(0.9942741825824339)(onehot_secstr)
        onehot_secstr = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr)
        onehot_secstr = core.Flatten()(onehot_secstr)
        onehot_secstr2 = conv.Conv1D(
            9,
            4,
            kernel_initializer='glorot_normal',
            kernel_regularizer=l2(0.04663753230167181),
            padding='valid',
            name='1_secstr')(input_1)
        onehot_secstr2 = Dropout(0.4084429796653032)(onehot_secstr2)
        onehot_secstr2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_secstr2)
        onehot_secstr2 = core.Flatten()(onehot_secstr2)
        output_onehot_sec = concatenate([onehot_secstr, onehot_secstr2],
                                        axis=-1)
        onehot_x = conv.Conv1D(5,
                               10,
                               kernel_initializer='glorot_normal',
                               kernel_regularizer=l2(0.032491576988669696),
                               padding='valid',
                               name='0')(input_2)
        onehot_x = Dropout(0.5471399358933519)(onehot_x)
        onehot_x = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x)
        onehot_x = core.Flatten()(onehot_x)
        onehot_x2 = conv.Conv1D(9,
                                4,
                                kernel_initializer='glorot_normal',
                                kernel_regularizer=l2(0.021775346680719416),
                                padding='valid',
                                name='1')(input_2)
        onehot_x2 = Dropout(0.10926522237224338)(onehot_x2)
        onehot_x2 = keras.layers.advanced_activations.PReLU(
            alpha_initializer='zeros',
            alpha_regularizer=None,
            alpha_constraint=None,
            shared_axes=None)(onehot_x2)
        onehot_x2 = core.Flatten()(onehot_x2)
        output_onehot_seq = concatenate([onehot_x, onehot_x2], axis=-1)
        final_output = concatenate([output_onehot_sec, output_onehot_seq])
        dense_out = Dense(30,
                          kernel_initializer='glorot_normal',
                          activation='softplus',
                          name='dense_concat')(final_output)
        out = Dense(2,
                    activation="softmax",
                    kernel_initializer='glorot_normal',
                    name='6')(dense_out)
        ########## Set Net ##########
        cnn = Model(inputs=[input_1, input_2], outputs=out)
        cnn.load_weights('weightsfile_hyperas26.h5')
        cnn.summary()
        adam = Adam(lr=0.06971582946481189)
        nadam = Nadam(lr=0.010482932560304255)
        rmsprop = RMSprop(lr=0.031598749327261345)
        sgd = SGD(lr=0.008615890670714792)
        optim = sgd
        ##        choiceval = {{choice(['adam', 'sgd', 'rmsprop','nadam'])}}
        ##        if choiceval == 'adam':
        ##            optim = adam
        ##        elif choiceval == 'rmsprop':
        ##            optim = rmsprop
        ##        elif choiceval=='nadam':
        ##            optim = nadam
        ##        else:
        ##            optim = sgd
        cnn.compile(loss='binary_crossentropy',
                    optimizer=optim,
                    metrics=[keras.metrics.binary_accuracy])  # Nadam
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_trial26.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             callbacks=[checkpointer, early_stopping],
                             class_weight=class_weights)
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_trial26_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'w'))
        return cnn, fitHistory
    else:
        print("####################################bootstrap iteration ", t,
              "#############fold iteration ", i, "\n")
        cnn = models.load_model('%d-secstr_seq_denseconcat_60perc_trial26.h5' %
                                i)
        early_stopping = EarlyStopping(monitor='val_loss', patience=20)
        checkpointer = ModelCheckpoint(
            filepath='%d-secstr_seq_denseconcat_60perc_trial26.h5' % i,
            verbose=1,
            save_best_only=True,
            monitor='val_loss',
            mode='min')
        fitHistory = cnn.fit([trainX1, trainX2],
                             trainY1,
                             batch_size=32,
                             nb_epoch=500,
                             validation_data=([valX1, valX2], valY1),
                             class_weight=class_weights,
                             callbacks=[checkpointer, early_stopping])
        myjson_file = "myhist_" + "dict_" + "secstr_seq_denseconcat_60perc_trial26_" + str(
            i)
        json.dump(fitHistory.history, open(myjson_file, 'a'))
        return cnn, fitHistory