コード例 #1
0
def test_load_data(h5file):
    @custom_preprocessor
    class PlusOnePreprocessor(BasePreprocessor):
        def transform(self, x, y):
            return x + 1, y + 1

    actual_dr = load_data(load_json_config(
        read_file('tests/json/dataset_config.json')))

    expected_dr = HDF5Reader(
        filename=h5file,
        batch_size=8,
        preprocessors=PlusOnePreprocessor(),
        x_name='input',
        y_name='target',
        train_folds=[0, 1, 2],
        val_folds=[3],
        test_folds=[4, 5]
    )

    assert isinstance(actual_dr, HDF5Reader)
    assert isinstance(actual_dr.preprocessors[0], PlusOnePreprocessor)

    actual_train_data = actual_dr.train_generator
    actual_val_data = actual_dr.val_generator
    actual_test_data = actual_dr.test_generator

    expected_train_data = expected_dr.train_generator
    expected_val_data = expected_dr.val_generator
    expected_test_data = expected_dr.test_generator

    check_equal_data_generator(actual_train_data, expected_train_data)
    check_equal_data_generator(actual_val_data, expected_val_data)
    check_equal_data_generator(actual_test_data, expected_test_data)
コード例 #2
0
def test_load_params():
    params = load_params(
        load_json_config(read_file('tests/json/model_param_config.json')))

    assert isinstance(params['optimizer'], Adam)
    assert isinstance(params['loss'], BinaryFbetaLoss)
    assert isinstance(params['metrics'][0], BinaryFbeta)
コード例 #3
0
def test_load_params():
    params = load_train_params(load_json_config(
        read_file('tests/json/train_param_config.json')))

    assert params['epochs'] == 5
    assert len(params['callbacks']) == 3

    assert isinstance(params['callbacks'][0], CSVLogger)
    assert isinstance(params['callbacks'][1], ModelCheckpoint)
    assert isinstance(params['callbacks'][2], TerminateOnNaN)
コード例 #4
0
def test_load_vnet_model_resize():
    architecture = load_json_config(
        read_file('tests/json/vnet_architecture.json')
    )

    input_params = {'shape': [129, 129, 129, 3]}

    model = load_architecture(architecture, input_params)

    assert np.all(model.input_shape == (None, 129, 129, 129, 3))
    assert np.all(model.output_shape == (None, 129, 129, 129, 1))
コード例 #5
0
def test_generate_resnet_model():
    architecture = generate_resnet_architecture(n_upsampling=3,
                                                n_filter=64,
                                                stride=2)

    input_params = {'shape': [128, 128, 3]}

    model = load_architecture(architecture, input_params)
    model.summary()

    expected_model = load_architecture(
        load_json_config('tests/json/resnet_architecture.json'), input_params)

    assert check_same_models(model, expected_model)
コード例 #6
0
def test_load_sequential_model():
    architecture = load_json_config(
        read_file('tests/json/sequential_architecture.json'))

    input_params = {'shape': [32, 32]}

    model = load_architecture(architecture, input_params)

    input_layer = Input(shape=(32, 32))
    flatten = Flatten()(input_layer)
    dense = Dense(units=128, activation='relu')(flatten)
    dropout = Dropout(rate=0.2)(dense)
    output_layer = Dense(units=10, activation='softmax')(dropout)

    expected_model = Model(inputs=input_layer, outputs=output_layer)

    assert check_same_models(model, expected_model)
コード例 #7
0
def test_create_voxresnet_json():
    unet = generate_voxresnet_architecture()
    generate_voxresnet_json(FILE_NAME)
    actual = load_json_config(FILE_NAME)

    assert np.all(unet == actual)
コード例 #8
0
def test_load_vnet_model():
    architecture = load_json_config(
        read_file('tests/json/vnet_architecture.json')
    )

    input_params = {'shape': [128, 128, 128, 3]}

    model = load_architecture(architecture, input_params)

    def conv_layers(filters, pre_layer):
        conv = BatchNormalization()(
            Conv3D(filters,
                   kernel_size=3,
                   activation='relu',
                   padding='same')(pre_layer))

        return BatchNormalization()(
            Conv3D(filters,
                   kernel_size=3,
                   activation='relu',
                   padding='same')(conv))

    input_layer = Input(shape=[128, 128, 128, 3])

    conv_1 = conv_layers(4, input_layer)
    max_pool_1 = MaxPooling3D()(conv_1)

    conv_2 = conv_layers(8, max_pool_1)
    max_pool_2 = MaxPooling3D()(conv_2)

    conv_3 = conv_layers(16, max_pool_2)
    max_pool_3 = MaxPooling3D()(conv_3)

    conv_4 = conv_layers(32, max_pool_3)
    max_pool_4 = MaxPooling3D()(conv_4)

    conv_5 = conv_layers(64, max_pool_4)
    max_pool_5 = MaxPooling3D()(conv_5)

    conv_t_kwargs = {"kernel_size": 3,
                     "strides": [
                         2,
                         2,
                         2
                     ],
                     "padding": "same"}

    conv_6 = conv_layers(128, max_pool_5)
    conv_trans_1 = Conv3DTranspose(32, **conv_t_kwargs)(conv_6)

    upconv_1 = conv_layers(64, concatenate([conv_5, conv_trans_1]))
    conv_trans_2 = Conv3DTranspose(16, **conv_t_kwargs)(upconv_1)

    upconv_2 = conv_layers(32, concatenate([conv_4, conv_trans_2]))
    conv_trans_3 = Conv3DTranspose(8, **conv_t_kwargs)(upconv_2)

    upconv_3 = conv_layers(16, concatenate([conv_3, conv_trans_3]))
    conv_trans_4 = Conv3DTranspose(4, **conv_t_kwargs)(upconv_3)

    upconv_4 = conv_layers(8, concatenate([conv_2, conv_trans_4]))
    conv_trans_5 = Conv3DTranspose(2, **conv_t_kwargs)(upconv_4)

    upconv_5 = conv_layers(4, concatenate([conv_1, conv_trans_5]))
    output = Conv3D(1, kernel_size=1, activation='sigmoid')(upconv_5)

    expected_model = Model(inputs=input_layer, outputs=output)

    assert check_same_models(model, expected_model)