Ejemplo n.º 1
0
def make_model(input_shape):
    nn = models.Sequential()
    nn.add(
        layers.SeparableConv1D(64,
                               5,
                               activation='relu',
                               input_shape=(None, input_shape[-1])))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(64, 5, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.MaxPooling1D(3))
    nn.add(layers.Dropout(0.3))

    nn.add(layers.SeparableConv1D(128, 5, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(128, 5, activation='relu'))
    nn.add(layers.BatchNormalization())
    # nn.add(layers.MaxPooling1D(3))
    # nn.add(layers.Dropout(0.3))
    #
    # nn.add(layers.SeparableConv1D(512, 5, activation = 'relu'))
    # nn.add(layers.BatchNormalization())
    # nn.add(layers.SeparableConv1D(512, 5, activation = 'relu'))
    # nn.add(layers.BatchNormalization())

    nn.add(
        layers.Bidirectional(
            layers.LSTM(128,
                        dropout=0.3,
                        recurrent_dropout=0.3,
                        return_sequences=True)))
    nn.add(
        layers.Bidirectional(
            layers.LSTM(128,
                        dropout=0.3,
                        recurrent_dropout=0.3,
                        return_sequences=True)))
    nn.add(
        layers.Bidirectional(
            layers.LSTM(128,
                        dropout=0.3,
                        recurrent_dropout=0.3,
                        return_sequences=True)))
    nn.add(
        layers.Bidirectional(
            layers.LSTM(128, dropout=0.3, recurrent_dropout=0.3)))

    nn.add(layers.Dense(41, activation='softmax'))

    return nn
Ejemplo n.º 2
0
    def create_channel(self, x, kernel_size, feature_map):
        """
        Creates a layer, working channel wise

        Arguments:
            x           : Input for convoltuional channel
            kernel_size : Kernel size for creating Conv1D
            feature_map : Feature map

        Returns:
            x           : Channel including (Conv1D + {GlobalMaxPooling & GlobalAveragePooling} + Dense [+ Dropout])
        """
        x = layers.SeparableConv1D(feature_map,
                                   kernel_size=kernel_size,
                                   activation='relu',
                                   strides=1,
                                   padding='valid',
                                   depth_multiplier=4)(x)

        x1 = layers.GlobalMaxPooling1D()(x)
        x2 = layers.GlobalAveragePooling1D()(x)
        x = layers.concatenate([x1, x2])

        x = layers.Dense(self.hidden_units)(x)
        if self.dropout_rate:
            x = layers.Dropout(self.dropout_rate)(x)
        return x
Ejemplo n.º 3
0
def xception_layers(x: tf.Tensor, channels: int, bach_norm: bool = False) -> tf.Tensor:
    """Стандартный набор слоев в Xception."""
    x = layers.Activation("relu")(x)
    x = layers.SeparableConv1D(channels, kernel_size=3, strides=1, padding="same")(x)
    if bach_norm:
        x = layers.BatchNormalization()(x)

    x = layers.Activation("relu")(x)
    x = layers.SeparableConv1D(channels, kernel_size=3, strides=1, padding="same")(x)
    if bach_norm:
        x = layers.BatchNormalization()(x)

    x = layers.Activation("relu")(x)
    x = layers.SeparableConv1D(channels, kernel_size=3, strides=1, padding="same")(x)
    if bach_norm:
        x = layers.BatchNormalization()(x)
    return x
Ejemplo n.º 4
0
def make_model(input_shape):
    nn = models.Sequential()
    nn.add(
        layers.SeparableConv1D(64,
                               7,
                               activation='relu',
                               input_shape=(None, input_shape[-1])))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(64, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.MaxPooling1D(5))
    nn.add(layers.Dropout(0.3))

    nn.add(layers.SeparableConv1D(128, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(128, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.MaxPooling1D(5))
    nn.add(layers.Dropout(0.3))

    nn.add(layers.SeparableConv1D(512, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(512, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.GlobalAveragePooling1D())

    nn.add(layers.Dense(41, activation='softmax'))

    return nn
Ejemplo n.º 5
0
 def __call__(self, x):
     # x.shape = (batch, seqlen, features)
     out = []
     for window_size in self.window_sizes:
         if window_size == 1:
             # no need to perform the convolution
             out.append(x)
         else:
             conv = kl.SeparableConv1D(1,
                                       kernel_size=window_size,
                                       padding='same',
                                       depthwise_initializer='ones',
                                       pointwise_initializer='ones',
                                       use_bias=False,
                                       trainable=False)
             out.append(conv(x))
     # (batch, seqlen, len(window_sizes))
     binp = kl.concatenate(out)
     return kl.Conv1D(1, kernel_size=1, use_bias=False)(binp)
Ejemplo n.º 6
0
def Conv1DRegressorIn1(flag):
    K.clear_session()
    current_neighbor           = space['neighbor']
    current_idx_idx            = space['idx_idx']
    current_batch_size         = space['batch_size']

    current_dense_num          = space['dense_num']
    current_conv1D_filter_num1 = space['conv1D_filter_num1']
    current_conv1D_filter_num2 = space['conv1D_filter_num2']
    current_conv1D_filter_num3 = space['conv1D_filter_num3']

    summary = True
    verbose = 0

    #
    # setHyperParams
    #
    ## hypers for data
    neighbor = {{choice([50, 60, 70, 80, 90, 100, 110, 120, 130, 140])}}
    idx_idx = {{choice([0,1,2,3,4,5,6,7,8])}}
    idx_lst = [
        [x for x in range(158) if x not in [24, 26]],  # 去除无用特征
        [x for x in range(158) if x not in [24, 26] + [x for x in range(1, 6)] + [x for x in range(16, 22)] + [40, 42]], # 去除无用特征+冗余特征
        [x for x in range(158) if x not in [24, 26] + [x for x in range(0, 22)]],  # 去除无用特征+方位特征
        [x for x in range(158) if x not in [24, 26] + [22, 23, 26, 37, 38]],  # 去除无用特征+深度特征
        [x for x in range(158) if x not in [24, 26] + [x for x in range(27, 37)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息
        # [x for x in range(158) if x not in [24, 26] + [x for x in range(27, 34)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息1
        # [x for x in range(158) if x not in [24, 26] + [x for x in range(34, 37)] + [x for x in range(40, 46)]],# 去除无用特征+二级结构信息2
        [x for x in range(158) if x not in [24, 26] + [46, 47]],  # 去除无用特征+实验条件
        [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(48, 57)] + [x for x in range(61, 81)] + [x for x in range(140, 155)]], # 去除无用特征+所有原子编码
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(48, 57)] + [x for x in range(140, 145)]],# 去除无用特征+原子编码1
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(61, 77)] + [x for x in range(145, 153)]],# 去除无用特征+原子编码2
        # [x for x in range(158) if x not in [24, 26] + [39] + [x for x in range(57, 61)] + [x for x in range(77, 81)] + [x for x in range(153, 155)]],# 去除无用特征+原子编码3
        [x for x in range(158) if x not in [24, 26] + [x for x in range(81, 98)]],  # 去除无用特征+rosetta_energy
        [x for x in range(158) if x not in [24, 26] + [x for x in range(98, 140)] + [x for x in range(155, 158)]]# 去除无用特征+msa
    ]
    idx = idx_lst[idx_idx]
    ## hypers for net
    lr = 1e-4  # 0.0001
    batch_size = {{choice([1, 16, 32, 64])}}
    epochs = 200
    padding_style = 'same'
    activator_Conv1D = 'elu'
    activator_Dense = 'tanh'
    dense_num = {{choice([64, 96, 128])}}
    conv1D_filter_num1 = {{choice([16, 32])}}
    conv1D_filter_num2 = {{choice([16, 32, 64])}}
    conv1D_filter_num3 = {{choice([32,64])}}
    dropout_rate_conv1D = 0.15
    dropout_rate_dense = 0.25
    initializer_Conv1D = initializers.lecun_uniform(seed=527)
    initializer_Dense = initializers.he_normal(seed=527)
    kernel_size = 5
    l2_rate = 0.001
    loss_type = logcosh
    metrics = ('mae', pearson_r, rmse)
    pool_size = 2

    def _data(fold_num, neighbor, idx):
        train_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_train_center_CA_PCA_False_neighbor_140.npz' % fold_num
        val_data_pth = '/dl/sry/mCNN/dataset/deepddg/npz/wild/cross_valid/cro_fold%s_valid_center_CA_PCA_False_neighbor_140.npz' % fold_num

        ## train data
        train_data = np.load(train_data_pth)
        x_train = train_data['x']
        y_train = train_data['y']
        ddg_train = train_data['ddg'].reshape(-1)
        ## select kneighbor atoms
        x_train_kneighbor_lst = []
        for sample in x_train:
            dist_arr = sample[:, 0]
            indices = sorted(dist_arr.argsort()[:neighbor])
            x_train_kneighbor_lst.append(sample[indices, :])
        x_train = np.array(x_train_kneighbor_lst)
        ## idx
        x_train = x_train[:, :, idx]

        ## val data
        val_data = np.load(val_data_pth)
        x_val = val_data['x']
        y_val = val_data['y']
        ddg_val = val_data['ddg'].reshape(-1)
        ## select kneighbor atoms
        x_val_kneighbor_lst = []
        for sample in x_val:
            dist_arr = sample[:, 0]
            indices = sorted(dist_arr.argsort()[:neighbor])
            x_val_kneighbor_lst.append(sample[indices, :])
        x_val = np.array(x_val_kneighbor_lst)
        ##  idx
        x_val = x_val[:, :, idx]

        # sort row default is chain, pass

        # reshape and one-hot
        y_train = to_categorical(y_train)
        y_val = to_categorical(y_val)
        # normalization
        train_shape = x_train.shape
        val_shape = x_val.shape
        col_train = train_shape[-1]
        col_val = val_shape[-1]
        x_train = x_train.reshape((-1, col_train))
        x_val = x_val.reshape((-1, col_val))
        mean = x_train.mean(axis=0)
        std = x_train.std(axis=0)
        std[np.argwhere(std == 0)] = 0.01
        x_train -= mean
        x_train /= std
        x_val -= mean
        x_val /= std
        x_train = x_train.reshape(train_shape)
        x_val = x_val.reshape(val_shape)
        print('x_train: %s'
              '\ny_train: %s'
              '\nddg_train: %s'
              '\nx_val: %s'
              '\ny_val: %s'
              '\nddg_val: %s'
              % (x_train.shape, y_train.shape, ddg_train.shape,
                 x_val.shape, y_val.shape, ddg_val.shape))
        return x_train, y_train, ddg_train, x_val, y_val, ddg_val

    #
    # cross_valid
    #
    hyper_param_tag = '%s_%s_%s_%s_%s_%s_%s' % (
        current_neighbor, current_idx_idx, current_batch_size, current_dense_num,
        current_conv1D_filter_num1, current_conv1D_filter_num2, current_conv1D_filter_num3)
    modeldir = '/dl/sry/projects/from_hp/mCNN/src/Network/deepddg/opt_all_simpleNet/model/%s-%s' % (
        hyper_param_tag, time.strftime("%Y.%m.%d.%H.%M.%S", time.localtime()))
    os.makedirs(modeldir, exist_ok=True)
    opt_lst = []

    for k_count in range(1,11):
        print('\n** fold %s is processing **\n' % k_count)
        filepth = '%s/fold_%s_weights-best.h5' % (modeldir, k_count)
        my_callbacks = [
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.33,
                patience=5,
                verbose=verbose,
                mode='min',
                min_lr=1e-8,
            ),
            callbacks.EarlyStopping(
                monitor='val_loss',
                patience=10,
                verbose=verbose
            ),
            callbacks.ModelCheckpoint(
                filepath=filepth,
                monitor='val_mean_absolute_error',
                verbose=verbose,
                save_best_only=True,
                mode='min',
                save_weights_only=True)
        ]

        x_train, y_train, ddg_train, x_val, y_val, ddg_val = _data(k_count,neighbor,idx)
        row_num, col_num = x_train.shape[1:3]
        #
        # build net
        #
        network = models.Sequential()
        network.add(layers.Conv1D(filters=conv1D_filter_num1,
                                  kernel_size=kernel_size,
                                  kernel_initializer=initializer_Conv1D,
                                  kernel_regularizer=regularizers.l2(l2_rate),
                                  activation=activator_Conv1D,
                                  input_shape=(row_num, col_num)))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.MaxPooling1D(pool_size=pool_size,
                                        padding=padding_style))

        network.add(layers.SeparableConv1D(filters=conv1D_filter_num2,
                                           kernel_size=kernel_size,
                                           depthwise_initializer=initializer_Conv1D,
                                           pointwise_initializer=initializer_Conv1D,
                                           depthwise_regularizer=regularizers.l2(l2_rate),
                                           pointwise_regularizer=regularizers.l2(l2_rate),
                                           activation=activator_Conv1D))
        network.add(layers.Dropout(dropout_rate_conv1D))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.MaxPooling1D(pool_size=pool_size,
                                        padding=padding_style))

        network.add(layers.SeparableConv1D(filters=conv1D_filter_num3,
                                           kernel_size=3,
                                           depthwise_initializer=initializer_Conv1D,
                                           pointwise_initializer=initializer_Conv1D,
                                           depthwise_regularizer=regularizers.l2(l2_rate),
                                           pointwise_regularizer=regularizers.l2(l2_rate),
                                           activation=activator_Conv1D))
        network.add(layers.Dropout(dropout_rate_conv1D))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.MaxPooling1D(pool_size=pool_size,
                                        padding=padding_style))
        network.add(layers.Flatten())
        network.add(layers.Dense(dense_num,
                                 kernel_initializer=initializer_Dense,
                                 kernel_regularizer=regularizers.l2(l2_rate),
                                 activation=activator_Dense))
        network.add(layers.Dropout(dropout_rate_dense))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.Dense(1))
        if summary:
            trainable_count = int(
                np.sum([K.count_params(p) for p in set(network.trainable_weights)]))
            non_trainable_count = int(
                np.sum([K.count_params(p) for p in set(network.non_trainable_weights)]))

            print('Total params: {:,}'.format(trainable_count + non_trainable_count))
            print('Trainable params: {:,}'.format(trainable_count))
            print('Non-trainable params: {:,}'.format(non_trainable_count))
            # print(network.summary())
        # rmsp = optimizers.RMSprop(lr=0.0001)
        adam = optimizers.Adam(lr=lr)
        network.compile(optimizer=adam,  # 'rmsprop',  # SGD,adam,rmsprop
                        loss=loss_type,
                        metrics=list(metrics))  # mae平均绝对误差(mean absolute error) accuracy
        result = network.fit(x=x_train,
                             y=ddg_train,
                             batch_size=batch_size,
                             epochs=epochs,
                             verbose=verbose,
                             callbacks=my_callbacks,
                             validation_data=(x_val, ddg_val),
                             shuffle=True,
                             )
        # print('\n----------History:\n%s'%result.history)
        #
        # save
        #
        save_train_cv(network, modeldir, result.history,k_count)
        opt_lst.append(np.mean(result.history['val_mean_absolute_error'][-10:]))
    opt_loss = np.mean(opt_lst)
    #
    # print hyper combination group and current loss value
    #
    print('\n@current_hyper_tag: %s'
          '\n@current optmized_loss: %s'
          %(hyper_param_tag, opt_loss))
    # return {'loss': validation_loss, 'status': STATUS_OK, 'model':model}
    return {'loss': opt_loss, 'status': STATUS_OK}
Ejemplo n.º 7
0
def UNet_module_test(rd_input,
                     kernels,
                     conv_window_len,
                     maxpooling_len,
                     stride=1,
                     BN=True,
                     DropoutRate=0.2):

    initializer = 'glorot_uniform'

    ##################### Conv1 #########################
    conv1_0 = layers.SeparableConv1D(int(kernels[0]/4), 1, strides= stride , padding='same', \
        kernel_initializer=initializer)(rd_input)
    if BN: conv1_0 = layers.BatchNormalization()(conv1_0)
    conv1_0 = layers.Activation('relu')(conv1_0)

    conv1_1 = layers.SeparableConv1D(int(kernels[0]/4), 3, strides= stride , padding='same', \
        kernel_initializer=initializer)(rd_input)
    if BN: conv1_1 = layers.BatchNormalization()(conv1_1)
    conv1_1 = layers.Activation('relu')(conv1_1)

    conv1_2 = layers.SeparableConv1D(int(kernels[0]/4), 7, strides= stride, padding='same', \
        kernel_initializer=initializer)(rd_input)
    if BN: conv1_2 = layers.BatchNormalization()(conv1_2)
    conv1_2 = layers.Activation('relu')(conv1_2)

    conv1_3 = layers.SeparableConv1D(int(kernels[0]/4), 11, strides= stride, padding='same', \
        kernel_initializer=initializer)(rd_input)
    if BN: conv1_3 = layers.BatchNormalization()(conv1_3)
    conv1_3 = layers.Activation('relu')(conv1_3)

    conv1_4 = layers.SeparableConv1D(int(kernels[0]/4), 15, strides= stride, padding='same', \
        kernel_initializer=initializer)(rd_input)
    if BN: conv1_4 = layers.BatchNormalization()(conv1_4)
    conv1_4 = layers.Activation('relu')(conv1_4)

    conv1 = layers.Concatenate(-1)(
        [conv1_0, conv1_1, conv1_2, conv1_3, conv1_4])
    pool1 = layers.MaxPooling1D(maxpooling_len[0])(conv1)

    ##################### Conv2 ##########################
    conv2 = layers.Conv1D(kernels[1], 3, strides= stride, padding='same',\
        kernel_initializer=initializer)(pool1)
    if BN: conv2 = layers.BatchNormalization()(conv2)
    conv2 = layers.Activation('relu')(conv2)

    conv2 = layers.Conv1D(kernels[1], 3, strides= stride, padding='same',\
        kernel_initializer=initializer)(conv2)
    if BN: conv2 = layers.BatchNormalization()(conv2)
    conv2 = layers.Activation('relu')(conv2)

    pool2 = layers.MaxPooling1D(maxpooling_len[1])(conv2)

    ##################### conv3 ###########################
    conv3 = layers.Conv1D(kernels[2], 3, strides= stride,  padding='same',\
        kernel_initializer=initializer)(pool2)
    if BN: conv3 = layers.BatchNormalization()(conv3)
    conv3 = layers.Activation('relu')(conv3)

    conv3 = layers.Conv1D(kernels[2], 3, strides= stride, padding='same',\
        kernel_initializer=initializer)(conv3)
    if BN: conv3 = layers.BatchNormalization()(conv3)
    conv3 = layers.Activation('relu')(conv3)

    if DropoutRate > 0:
        drop3 = layers.Dropout(DropoutRate)(conv3)
    else:
        drop3 = conv3

    pool3 = layers.MaxPooling1D(maxpooling_len[2])(drop3)

    ####################  conv4 (U bottle) #####################
    conv4 = layers.Conv1D(kernels[3], 3, strides= 1, padding='same',\
        kernel_initializer=initializer)(pool3)
    if BN: conv4 = layers.BatchNormalization()(conv4)
    conv4 = layers.Activation('relu')(conv4)

    conv4 = layers.Conv1D(kernels[3], 3, strides= 1, padding='same',\
        kernel_initializer=initializer)(conv4)
    if BN: conv4 = layers.BatchNormalization()(conv4)
    conv4 = layers.Activation('relu')(conv4)

    if DropoutRate > 0:
        drop4 = layers.Dropout(DropoutRate)(conv4)
    else:
        drop4 = conv4

    ################### upSampling, upConv5 ##########################
    # up5 = layers.UpSampling1D(maxpooling_len[2])(drop4)
    up5 = Conv1DTranspose(drop4,
                          kernels[2],
                          3,
                          strides=maxpooling_len[2],
                          padding='same')

    merge5 = layers.Concatenate(-1)([drop3, up5])

    conv5 = layers.Conv1D(kernels[2], 3, padding='same', \
        kernel_initializer=initializer)(merge5)
    if BN: conv5 = layers.BatchNormalization()(conv5)
    conv5 = layers.Activation('relu')(conv5)

    conv5 = layers.Conv1D(kernels[2], 3, padding='same', \
        kernel_initializer=initializer)(conv5)
    if BN: conv5 = layers.BatchNormalization()(conv5)
    conv5 = layers.Activation('relu')(conv5)

    ################### upConv 6 ##############################
    #up6 = layers.UpSampling1D(maxpooling_len[1])(conv5)
    up6 = Conv1DTranspose(conv5,
                          kernels[1],
                          3,
                          strides=maxpooling_len[1],
                          padding='same')

    merge6 = layers.Concatenate(-1)([conv2, up6])

    conv6 = layers.Conv1D(kernels[1], 3, padding='same', \
        kernel_initializer=initializer)(merge6)
    if BN: conv6 = layers.BatchNormalization()(conv6)
    conv6 = layers.Activation('relu')(conv6)

    conv6 = layers.Conv1D(kernels[1], 3, padding='same',\
        kernel_initializer=initializer)(conv6)
    if BN: conv6 = layers.BatchNormalization()(conv6)
    conv6 = layers.Activation('relu')(conv6)

    ################### upConv 7 #########################
    #up7 = layers.UpSampling1D(maxpooling_len[0])(conv6)
    up7 = Conv1DTranspose(conv6,
                          kernels[0],
                          3,
                          strides=maxpooling_len[0],
                          padding='same')

    merge7 = layers.Concatenate(-1)([conv1, up7])

    conv7 = layers.Conv1D(kernels[0], 3, padding='same',\
        kernel_initializer=initializer)(merge7)
    if BN: conv7 = layers.BatchNormalization()(conv7)
    conv7 = layers.Activation('relu')(conv7)

    conv7 = layers.Conv1D(kernels[0], 3, padding='same', \
        kernel_initializer=initializer)(conv7)
    if BN: conv7 = layers.BatchNormalization()(conv7)
    conv7 = layers.Activation('relu')(conv7)

    ################## final output ######################
    conv8 = layers.Conv1D(kernels[0], 3, padding='same', \
        kernel_initializer=initializer)(conv7)
    if BN: conv8 = layers.BatchNormalization()(conv8)
    conv8 = layers.Activation('relu')(conv8)

    if DropoutRate > 0:
        conv8 = layers.Dropout(DropoutRate)(conv8)

    return conv8
Ejemplo n.º 8
0
def Conv1DRegressorIn1(x_train, y_train, ddg_train, x_val, y_val, ddg_val,
                       filepth):
    K.clear_session()
    summary = False
    verbose = 0

    #
    # setHyperParams
    #
    row_num, col_num = x_train.shape[1:3]
    batch_size = 32  #{{choice([128, 32, 64])}}  # 128#64
    epochs = 200
    padding_style = 'same'

    activator_Conv1D = 'elu'
    activator_Dense = 'tanh'
    dense_num = 64  # 64
    dilation1D_layers = 8
    dilation1D_filter_num = 16
    dilation_lower = 1
    dilation_upper = 16
    dropout_rate_dilation = 0.25  #{{uniform(0.1, 0.35)}}
    dropout_rate_reduce = 0.25  #{{uniform(0.1, 0.25)}}
    dropout_rate_dense = 0.25

    initializer_Conv1D = 'lecun_uniform'
    initializer_Dense = 'he_normal'
    kernel_size = 9  #{{choice([9, 5, 7, 3])}}
    l2_rate = 0.045  #{{uniform(0.01, 0.35)}}
    loss_type = logcosh
    lr = 0.0001
    metrics = ('mae', pearson_r, rmse)

    my_callbacks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.33,
            patience=5,
        ),
        callbacks.EarlyStopping(
            monitor='val_loss',
            patience=10,
        ),
        callbacks.ModelCheckpoint(
            filepath=filepth,
            # monitor='val_pearson_r',
            monitor='val_loss',
            verbose=1,
            save_best_only=True,
            mode='min',
            save_weights_only=True)
    ]
    pool_size = 2
    reduce_layers = 5
    reduce1D_filter_num = 32
    residual_stride = 2

    network = models.Sequential()
    network.add(
        layers.SeparableConv1D(filters=16,
                               kernel_size=5,
                               activation='relu',
                               input_shape=(row_num, col_num)))
    network.add(layers.MaxPooling1D(pool_size=2))
    network.add(layers.SeparableConv1D(32, 5, activation='relu'))
    network.add(layers.MaxPooling1D(pool_size=2))
    network.add(layers.SeparableConv1D(64, 3, activation='relu'))
    network.add(layers.MaxPooling1D(pool_size=2))
    network.add(layers.Flatten())
    network.add(layers.Dense(128, activation='relu'))
    network.add(layers.Dropout(0.3))
    network.add(layers.Dense(16, activation='relu'))
    network.add(layers.Dropout(0.3))
    network.add(layers.Dense(1))
    # print(network.summary())
    # rmsp = optimizers.RMSprop(lr=0.0001,  decay=0.1)
    rmsp = optimizers.RMSprop(lr=0.0001)
    network.compile(
        optimizer=rmsp,  # 'rmsprop',  # SGD,adam,rmsprop
        loss='mae',
        metrics=list(metrics))  # mae平均绝对误差(mean absolute error) accuracy
    result = network.fit(
        x=x_train,
        y=ddg_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=my_callbacks,
        validation_data=(x_val, ddg_val),
        shuffle=True,
    )

    return model, result.history
Ejemplo n.º 9
0
def TrainConv1D(x_train,
                y_train,
                x_val=None,
                y_val=None,
                class_weights_dict=None,
                filepth=None,
                epochs=200,
                lr=1e-2,
                verbose=1):
    summary = False
    batch_size = 128
    optimizer = 'adam'
    activator = 'relu'

    pool_size = 2
    init_Conv1D = initializers.lecun_uniform()
    init_Dense = initializers.he_normal()
    padding_style = 'same'
    drop_rate = 0.025
    l2_coeff = 1e-3
    loss_type = 'categorical_crossentropy'
    metrics = ('acc', )

    loop_conv_num = 4  # 100 -> 50 -> 25 -> 13 -> 7 -> 4
    dense_num = 128
    dropout_dense = 0.25

    if lr > 0:
        if optimizer == 'adam':
            chosed_optimizer = optimizers.Adam(lr=lr)
        elif optimizer == 'sgd':
            chosed_optimizer = optimizers.SGD(lr=lr)
        elif optimizer == 'rmsprop':
            chosed_optimizer = optimizers.RMSprop(lr=lr)

    if x_val is None or y_val is None:
        val_data = None
        my_callbacks = None
    else:
        val_data = (x_val, y_val)
        my_callbacks = [
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.3,
                patience=5,
                verbose=verbose,
            ),
            callbacks.EarlyStopping(
                monitor='val_acc',
                min_delta=1e-4,
                patience=20,
                mode='max',
                verbose=verbose,
            ),
        ]
        if filepth is not None:
            my_callbacks += [
                callbacks.ModelCheckpoint(
                    filepath=filepth,
                    monitor='val_acc',
                    mode='max',
                    save_best_only=True,
                    save_weights_only=True,
                    verbose=verbose,
                )
            ]
    #
    # build model
    #
    network = models.Sequential()
    network.add(
        layers.SeparableConv1D(filters=16,
                               kernel_size=5,
                               activation=activator,
                               padding=padding_style,
                               depthwise_initializer=init_Conv1D,
                               pointwise_initializer=init_Conv1D,
                               depthwise_regularizer=regularizers.l2(l2_coeff),
                               pointwise_regularizer=regularizers.l1(l2_coeff),
                               input_shape=(x_train.shape[1:])))
    network.add(layers.BatchNormalization(axis=-1))
    network.add(layers.Dropout(drop_rate))
    network.add(layers.MaxPooling1D(pool_size=pool_size,
                                    padding=padding_style))

    for _ in range(loop_conv_num):
        network.add(
            layers.SeparableConv1D(
                filters=32,
                kernel_size=5,
                activation=activator,
                padding=padding_style,
                depthwise_initializer=init_Conv1D,
                pointwise_initializer=init_Conv1D,
                depthwise_regularizer=regularizers.l2(l2_coeff),
                pointwise_regularizer=regularizers.l1(l2_coeff),
            ))
        network.add(layers.BatchNormalization(axis=-1))
        network.add(layers.Dropout(drop_rate))
        network.add(
            layers.MaxPooling1D(pool_size=pool_size, padding=padding_style))

    network.add(
        layers.SeparableConv1D(
            filters=64,
            kernel_size=3,
            activation=activator,
            padding=padding_style,
            depthwise_initializer=init_Conv1D,
            pointwise_initializer=init_Conv1D,
            depthwise_regularizer=regularizers.l2(l2_coeff),
            pointwise_regularizer=regularizers.l1(l2_coeff),
        ))
    network.add(layers.BatchNormalization(axis=-1))
    network.add(layers.Dropout(drop_rate))
    network.add(layers.MaxPooling1D(pool_size=pool_size,
                                    padding=padding_style))

    network.add(layers.Flatten())
    network.add(
        layers.Dense(units=dense_num,
                     kernel_initializer=init_Dense,
                     activation=activator))
    network.add(layers.Dropout(dropout_dense))
    network.add(
        layers.Dense(units=10,
                     kernel_initializer=init_Dense,
                     activation='softmax'))

    if summary:
        print(network.summary())

    network.compile(optimizer=chosed_optimizer,
                    loss=loss_type,
                    metrics=list(metrics))
    result = network.fit(x=x_train,
                         y=y_train,
                         batch_size=batch_size,
                         epochs=epochs,
                         verbose=verbose,
                         callbacks=my_callbacks,
                         validation_data=val_data,
                         shuffle=True,
                         class_weight=class_weights_dict)
    return network, result.history
Ejemplo n.º 10
0
def TrainResNet(x_train,
                y_train,
                x_val=None,
                y_val=None,
                class_weights_dict=None,
                filepth=None,
                epochs=200,
                lr=1e-2,
                verbose=1):
    summary = True
    batch_size = 128
    optimizer = 'adam'
    activator = 'relu'

    kernel_size = 3
    pool_size = 2
    init_Conv1D = initializers.lecun_uniform()
    init_Dense = initializers.he_normal()
    padding_style = 'same'
    dropout_rate = 0.025
    l2_coeff = 1e-3
    loss_type = 'categorical_crossentropy'
    metrics = ('acc', )

    ## used in the dilation loop
    dilation_lower = 1
    dilation_upper = 16
    dilation1D_layers = 16
    dilation1D_filter_num = 16

    ## used in the reduce loop
    residual_stride = 2
    reduce_layers = 6  # 100 -> 50 -> 25 -> 13 -> 7 -> 4 -> 2
    reduce1D_filter_num = 16

    dense_num = 128
    dropout_dense = 0.25

    if lr > 0:
        if optimizer == 'adam':
            chosed_optimizer = optimizers.Adam(lr=lr)
        elif optimizer == 'sgd':
            chosed_optimizer = optimizers.SGD(lr=lr)
        elif optimizer == 'rmsprop':
            chosed_optimizer = optimizers.RMSprop(lr=lr)

    if x_val is None or y_val is None:
        val_data = None
        my_callbacks = None
    else:
        val_data = (x_val, y_val)
        my_callbacks = [
            callbacks.ReduceLROnPlateau(
                monitor='val_loss',
                factor=0.3,
                patience=5,
                verbose=verbose,
            ),
            callbacks.EarlyStopping(
                monitor='val_acc',
                min_delta=1e-4,
                patience=20,
                mode='max',
                verbose=verbose,
            ),
        ]
        if filepth is not None:
            my_callbacks += [
                callbacks.ModelCheckpoint(
                    filepath=filepth,
                    monitor='val_acc',
                    mode='max',
                    save_best_only=True,
                    save_weights_only=True,
                    verbose=verbose,
                )
            ]

    #
    # build
    #
    ## basic Conv1D
    input_layer = Input(shape=x_train.shape[1:])

    y = layers.SeparableConv1D(filters=dilation1D_filter_num,
                               kernel_size=1,
                               padding=padding_style,
                               kernel_initializer=init_Conv1D,
                               activation=activator)(input_layer)
    res = layers.BatchNormalization(axis=-1)(y)

    ## loop with Conv1D with dilation (padding='same')
    for _ in range(dilation1D_layers):
        y = layers.SeparableConv1D(
            filters=dilation1D_filter_num,
            kernel_size=kernel_size,
            padding=padding_style,
            dilation_rate=dilation_lower,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(res)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(dropout_rate)(y)
        y = layers.SeparableConv1D(
            filters=dilation1D_filter_num,
            kernel_size=kernel_size,
            padding=padding_style,
            dilation_rate=dilation_lower,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(y)
        y = layers.BatchNormalization(axis=-1)(y)

        res = layers.add([y, res])

        dilation_lower *= 2
        if dilation_lower > dilation_upper:
            dilation_lower = 1

    ## residual block to reduce dimention.
    for _ in range(reduce_layers):
        y = layers.SeparableConv1D(
            filters=reduce1D_filter_num,
            kernel_size=kernel_size,
            padding=padding_style,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(res)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(dropout_rate)(y)
        y = layers.MaxPooling1D(pool_size, padding=padding_style)(y)
        res = layers.SeparableConv1D(
            filters=reduce1D_filter_num,
            kernel_size=kernel_size,
            strides=residual_stride,
            padding=padding_style,
            kernel_initializer=init_Conv1D,
            activation=activator,
            kernel_regularizer=regularizers.l2(l2_coeff))(res)
        res = layers.add([y, res])

    ## flat & dense
    y = layers.Flatten()(y)
    y = layers.Dense(dense_num, activation=activator)(y)
    y = layers.BatchNormalization(axis=-1)(y)
    y = layers.Dropout(dropout_dense)(y)

    output_layer = layers.Dense(10, activation='softmax')(y)

    model = models.Model(inputs=input_layer, outputs=output_layer)

    if summary:
        model.summary()

    model.compile(
        optimizer=chosed_optimizer,
        loss=loss_type,
        metrics=list(metrics)  # accuracy
    )

    # K.set_session(tf.Session(graph=model.output.graph))
    # init = K.tf.global_variables_initializer()
    # K.get_session().run(init)

    result = model.fit(x=x_train,
                       y=y_train,
                       batch_size=batch_size,
                       epochs=epochs,
                       verbose=verbose,
                       callbacks=my_callbacks,
                       validation_data=val_data,
                       shuffle=True,
                       class_weight=class_weights_dict)
    return model, result.history