コード例 #1
0
        #
        with open('%s/fold_%s_model.json' % (modeldir, k_count),
                  'r') as json_file:
            loaded_model_json = json_file.read()
        loaded_model = models.model_from_json(
            loaded_model_json)  # keras.models.model_from_yaml(yaml_string)
        loaded_model.load_weights(filepath=filepth)
        # loaded_model.compile(optimizer='rmsprop',  # SGD,adam,rmsprop
        #                      loss='mse',
        #                      metrics=['mae',pearson_r])  # mae平均绝对误差(mean absolute error) accuracy)

        #
        # Test model
        #
        # pearson_coeff, std, mae = test_report_reg(model, x_test, ddg_test)
        pearson_coeff, std, mae = test_report_reg(loaded_model, x_test,
                                                  ddg_test)
        print('\n----------Predict:'
              '\npearson_coeff: %s, std: %s, mae: %s' %
              (pearson_coeff, std, mae))
        score_dict['pearson_coeff'].append(pearson_coeff)
        score_dict['std'].append(std)
        score_dict['mae'].append(mae)

        train_score_dict['pearson_coeff'].append(history_dict['pearson_r'][-1])
        train_score_dict['std'].append(history_dict['rmse'][-1])
        train_score_dict['mae'].append(history_dict['mean_absolute_error'][-1])

        es_train_score_dict['pearson_coeff'].append(
            history_dict['pearson_r'][-11])
        es_train_score_dict['std'].append(history_dict['rmse'][-11])
        es_train_score_dict['mae'].append(
コード例 #2
0
    #     validation_mae = np.amax(result.history['val_mean_absolute_error'])
    #     print('Best validation mae of epoch:', validation_mae)
    #     return {'loss': validation_mae, 'status': STATUS_OK}


if __name__ == '__main__':
    import sys
    neighbor_obj, CUDA_rate = sys.argv[1:]
    ## config TF
    #os.environ['CUDA_VISIBLE_DEVICES'] = '0'
    from mCNN.queueGPU import queueGPU
    queueGPU()
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'

    if CUDA_rate != 'full':
        config = tf.ConfigProto()
        if float(CUDA_rate) < 0.1:
            config.gpu_options.allow_growth = True
        else:
            config.gpu_options.per_process_gpu_memory_fraction = float(
                CUDA_rate)
        set_session(tf.Session(config=config))

    x_train, y_train, ddg_train, x_test, y_test, ddg_test, class_weights_dict, obj, kneighbor = data(
        neighbor_obj)
    model = Conv2DRegressorIn1(x_train, y_train, ddg_train, x_test, y_test,
                               ddg_test, class_weights_dict, obj, kneighbor)
    pearson_coeff, std = test_report_reg(model, x_test, ddg_test)
    print('\n--reg_pearson_coeff_%s: %s'
          '\n--reg_std_%s: %s' % (kneighbor, pearson_coeff, kneighbor, std))
コード例 #3
0
ファイル: regressor.py プロジェクト: ruiyangsong/mCNN
def Conv2DRegressorIn1(x_train, y_train, ddg_train, x_test, y_test, ddg_test,
                       class_weights_dict, obj):
    K.clear_session()
    summary = True
    verbose = 0
    # setHyperParams------------------------------------------------------------------------------------------------
    batch_size = 64
    epochs = {{choice([50, 100, 150, 200, 250])}}

    lr = {{loguniform(np.log(1e-4), np.log(1e-2))}}

    optimizer = {{choice(['adam', 'sgd', 'rmsprop'])}}

    activator = {{choice(['elu', 'relu', 'tanh'])}}

    basic_conv2D_layers = {{choice([1, 2])}}
    basic_conv2D_filter_num = {{choice([16, 32])}}

    loop_dilation2D_layers = {{choice([2, 4, 6])}}
    loop_dilation2D_filter_num = {{choice([16, 32, 64])}}  #used in the loop
    loop_dilation2D_dropout_rate = {{uniform(0.001, 0.35)}}
    dilation_lower = 2
    dilation_upper = 16

    reduce_layers = 5  # conv 3 times: 120 => 60 => 30 => 15
    reduce_conv2D_filter_num = {{choice([8, 16,
                                         32])}}  #used for reduce dimention
    reduce_conv2D_dropout_rate = {{uniform(0.001, 0.25)}}
    residual_stride = 2

    dense1_num = {{choice([64, 128, 256])}}
    dense2_num = {{choice([32, 64])}}

    drop_num = {{uniform(0.0001, 0.3)}}

    kernel_size = (3, 3)
    pool_size = (2, 2)
    initializer = 'random_uniform'
    padding_style = 'same'
    loss_type = 'mse'
    metrics = ('mae', )

    my_callbacks = [
        callbacks.ReduceLROnPlateau(
            monitor='val_loss',
            factor=0.8,
            patience=10,
        )
    ]

    if lr > 0:
        if optimizer == 'adam':
            chosed_optimizer = optimizers.Adam(lr=lr)
        elif optimizer == 'sgd':
            chosed_optimizer = optimizers.SGD(lr=lr)
        elif optimizer == 'rmsprop':
            chosed_optimizer = optimizers.RMSprop(lr=lr)

    # build --------------------------------------------------------------------------------------------------------
    ## basic Conv2D
    input_layer = Input(shape=x_train.shape[1:])
    y = layers.Conv2D(basic_conv2D_filter_num,
                      kernel_size,
                      padding=padding_style,
                      kernel_initializer=initializer,
                      activation=activator)(input_layer)
    y = layers.BatchNormalization(axis=-1)(y)
    if basic_conv2D_layers == 2:
        y = layers.Conv2D(basic_conv2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)

    ## loop with Conv2D with dilation (padding='same')
    for _ in range(loop_dilation2D_layers):
        y = layers.Conv2D(loop_dilation2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          dilation_rate=dilation_lower,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(loop_dilation2D_dropout_rate)(y)
        dilation_lower *= 2
        if dilation_lower > dilation_upper:
            dilation_lower = 2

    ## Conv2D with dilation (padding='valaid') and residual block to reduce dimention.
    for _ in range(reduce_layers):
        y = layers.Conv2D(reduce_conv2D_filter_num,
                          kernel_size,
                          padding=padding_style,
                          kernel_initializer=initializer,
                          activation=activator)(y)
        y = layers.BatchNormalization(axis=-1)(y)
        y = layers.Dropout(reduce_conv2D_dropout_rate)(y)
        y = layers.MaxPooling2D(pool_size, padding=padding_style)(y)
        residual = layers.Conv2D(reduce_conv2D_filter_num,
                                 1,
                                 strides=residual_stride,
                                 padding='same')(input_layer)
        y = layers.add([y, residual])
        residual_stride *= 2

    ## flat & dense
    y = layers.Flatten()(y)
    y = layers.Dense(dense1_num, activation=activator)(y)
    y = layers.BatchNormalization(axis=-1)(y)
    y = layers.Dropout(drop_num)(y)
    y = layers.Dense(dense2_num, activation=activator)(y)
    y = layers.BatchNormalization(axis=-1)(y)
    y = layers.Dropout(drop_num)(y)

    output_layer = layers.Dense(1)(y)

    model = models.Model(inputs=input_layer, outputs=output_layer)

    if summary:
        model.summary()

    model.compile(
        optimizer=chosed_optimizer,
        loss=loss_type,
        metrics=list(metrics)  # accuracy
    )

    K.set_session(tf.Session(graph=model.output.graph))
    init = K.tf.global_variables_initializer()
    K.get_session().run(init)

    result = model.fit(
        x=x_train,
        y=ddg_train,
        batch_size=batch_size,
        epochs=epochs,
        verbose=verbose,
        callbacks=my_callbacks,
        validation_data=(x_test, ddg_test),
        shuffle=True,
    )
    # print('\n----------History:\n%s'%result.history)

    if obj == 'test_report_reg':
        pearson_coeff, std = test_report_reg(model, x_test, ddg_test)
        print('\n----------Predict:\npearson_coeff: %s, std: %s' %
              (pearson_coeff, std))
        objective = pearson_coeff * 2 + std
        return {'loss': -objective, 'status': STATUS_OK}

    elif obj == 'val_mae':
        validation_mae = np.amax(result.history['val_mean_absolute_error'])
        print('Best validation mae of epoch:', validation_mae)
        return {'loss': validation_mae, 'status': STATUS_OK}