示例#1
0
def test_save_load():
    """Test the save/load operations of PGNN"""
    PhysicsGuidedNeuralNetwork.seed(0)
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=HIDDEN_LAYERS,
                                       loss_weights=(0.0, 1.0),
                                       n_features=2,
                                       n_labels=1,
                                       feature_names=['a', 'b'],
                                       output_names=['c'])

    model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)
    y_pred = model.predict(X)

    with tempfile.TemporaryDirectory() as td:
        fpath = os.path.join(td, 'tempfile.pkl')
        model.save(fpath)
        loaded = PhysicsGuidedNeuralNetwork.load(fpath)

    assert len(model.layers) == len(loaded.layers)
    for layer0, layer1 in zip(model.layers, loaded.layers):
        for i in range(len(layer0.weights)):
            assert layer0.weights[i].shape == layer1.weights[i].shape
            assert np.allclose(layer0.weights[i], layer1.weights[i])

    y_pred_loaded = loaded.predict(X)
    assert np.allclose(y_pred, y_pred_loaded)
    assert loaded.feature_names == ['a', 'b']
    assert loaded.output_names == ['c']
    assert isinstance(model._optimizer, Adam)
    assert isinstance(loaded._optimizer, Adam)
    assert model._optimizer.get_config() == loaded._optimizer.get_config()
示例#2
0
def test_conv3d():
    """Test a phygnn model with a conv3d layer. The data in this test is
    garbage, just a test on shapes and save/load functionality"""

    input_layer = {
        'class': 'Conv3D',
        'filters': 2,
        'kernel_size': 3,
        'activation': 'relu'
    }
    hidden_layers = [{'units': 64, 'activation': 'relu'}, {'class': 'Flatten'}]
    output_layer = {'units': 24}
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=hidden_layers,
                                       input_layer=input_layer,
                                       output_layer=output_layer,
                                       loss_weights=(1.0, 0.0),
                                       n_features=1,
                                       n_labels=24)

    train_x_bad = np.random.uniform(-1, 1, (50, 12, 7, 7, 2))
    train_x = np.random.uniform(-1, 1, (50, 12, 7, 7, 1))
    train_y = np.random.uniform(-1, 1, (50, 24))

    assert len(model.layers) == 5, "conv layers did not get added!"
    assert isinstance(model.layers[0], Conv3D)
    assert isinstance(model.layers[1], Dense)
    assert isinstance(model.layers[2], Activation)
    assert isinstance(model.layers[3], Flatten)
    assert isinstance(model.layers[4], Dense)

    # test raise on bad feature channel dimension
    with pytest.raises(AssertionError):
        model.fit(train_x_bad, train_y, train_x, n_batch=1, n_epoch=10)

    model.fit(train_x, train_y, train_x, n_batch=1, n_epoch=10)
    y_pred = model.predict(train_x)
    assert y_pred.shape == (50, 24)

    with tempfile.TemporaryDirectory() as td:
        fpath = os.path.join(td, 'tempfile.pkl')
        model.save(fpath)
        loaded = PhysicsGuidedNeuralNetwork.load(fpath)

    assert len(model.layers) == len(loaded.layers)
    for layer0, layer1 in zip(model.layers, loaded.layers):
        for i in range(len(layer0.weights)):
            assert layer0.weights[i].shape == layer1.weights[i].shape
            assert np.allclose(layer0.weights[i], layer1.weights[i])

    y_pred_loaded = loaded.predict(train_x)

    assert np.allclose(y_pred, y_pred_loaded)
    assert len(model.layers) == len(loaded.layers)
示例#3
0
def test_batch_norm():
    """Test the addition of BatchNormalization layers"""
    HIDDEN_LAYERS = [
        {
            'units': 64
        },
        {
            'batch_normalization': {
                'axis': 1
            }
        },
        {
            'activation': 'relu'
        },
        {
            'units': 64,
            'activation': 'relu',
            'batch_normalization': {
                'axis': 1
            }
        },
    ]
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=HIDDEN_LAYERS,
                                       loss_weights=(0.0, 1.0),
                                       n_features=2,
                                       n_labels=1)

    assert len(model.layers) == 8, "Batch norm layers did not get added!"
    assert isinstance(model.layers[0], InputLayer)
    assert isinstance(model.layers[1], Dense)
    assert isinstance(model.layers[2], BatchNormalization)
    assert isinstance(model.layers[3], Activation)
    assert isinstance(model.layers[4], Dense)
    assert isinstance(model.layers[5], BatchNormalization)
    assert isinstance(model.layers[6], Activation)

    model.fit(X, Y_NOISE, P, n_batch=1, n_epoch=10)
    y_pred = model.predict(X)

    with tempfile.TemporaryDirectory() as td:
        fpath = os.path.join(td, 'tempfile.pkl')
        model.save(fpath)
        loaded = PhysicsGuidedNeuralNetwork.load(fpath)

    y_pred_loaded = loaded.predict(X)

    assert np.allclose(y_pred, y_pred_loaded)
    assert len(model.layers) == len(loaded.layers)
示例#4
0
def test_classification():
    """Test the phygnn model as a classifier without the pfun"""
    PhysicsGuidedNeuralNetwork.seed(0)
    model = PhysicsGuidedNeuralNetwork(p_fun=None,
                                       hidden_layers=hidden_layers,
                                       output_layer=output_layer,
                                       loss_weights=(1.0, 0.0),
                                       metric='binary_crossentropy',
                                       learning_rate=0.05,
                                       n_features=2,
                                       n_labels=1)
    model.fit(features, labels, features, n_batch=1, n_epoch=50)

    y_pred = model.predict(features)
    accuracy = 100 * (np.round(y_pred) == labels.values).sum() / len(labels)
    assert accuracy > 0.99
示例#5
0
def test_lstm():
    """Test a phygnn model with a conv1d layer. The data in this test is
    garbage, just a test on shapes and creation. Save/load doesnt work yet
    for lstm"""

    input_layer = {'class': 'LSTM', 'units': 24, 'return_sequences': True}
    hidden_layers = [{'units': 64, 'activation': 'relu'}]
    output_layer = {'units': 24}
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=hidden_layers,
                                       input_layer=input_layer,
                                       output_layer=output_layer,
                                       loss_weights=(1.0, 0.0),
                                       n_features=2,
                                       n_labels=24)

    train_x = np.random.uniform(-1, 1, (50, 12, 2))
    train_y = np.random.uniform(-1, 1, (50, 12, 24))

    assert len(model.layers) == 4, "lstm layers did not get added!"
    assert isinstance(model.layers[0], LSTM)
    assert isinstance(model.layers[1], Dense)
    assert isinstance(model.layers[2], Activation)
    assert isinstance(model.layers[3], Dense)

    model.fit(train_x, train_y, train_x, n_batch=1, n_epoch=10)
    y_pred = model.predict(train_x)
    assert y_pred.shape == (50, 12, 24)

    with tempfile.TemporaryDirectory() as td:
        fpath = os.path.join(td, 'tempfile.pkl')
        model.save(fpath)
        loaded = PhysicsGuidedNeuralNetwork.load(fpath)

    assert len(model.layers) == len(loaded.layers)
    for layer0, layer1 in zip(model.layers, loaded.layers):
        for i in range(len(layer0.weights)):
            assert layer0.weights[i].shape == layer1.weights[i].shape
            assert np.allclose(layer0.weights[i], layer1.weights[i])

    y_pred_loaded = loaded.predict(train_x)

    assert np.allclose(y_pred, y_pred_loaded)
    assert len(model.layers) == len(loaded.layers)
示例#6
0
def test_nn():
    """Test the basic NN operation of the PGNN without weighting pfun."""
    PhysicsGuidedNeuralNetwork.seed(0)
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=HIDDEN_LAYERS,
                                       loss_weights=(1.0, 0.0),
                                       n_features=2,
                                       n_labels=1,
                                       feature_names=['a', 'b'],
                                       output_names=['c'])
    model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)

    test_mae = np.mean(np.abs(model.predict(X) - Y))

    assert len(model.layers) == 6
    assert len(model.weights) == 6
    assert len(model.history) == 20
    assert model.history.validation_loss.values[-1] < 0.15
    assert test_mae < 0.15
示例#7
0
def test_save_load():
    """Test the save/load operations of PGNN"""
    PhysicsGuidedNeuralNetwork.seed(0)
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=HIDDEN_LAYERS,
                                       loss_weights=(0.0, 1.0),
                                       n_features=2,
                                       n_labels=1,
                                       feature_names=['a', 'b'],
                                       output_names=['c'])

    model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)
    y_pred = model.predict(X)

    model.save(FPATH)
    loaded = PhysicsGuidedNeuralNetwork.load(FPATH)
    y_pred_loaded = loaded.predict(X)
    assert np.allclose(y_pred, y_pred_loaded)
    assert loaded.feature_names == ['a', 'b']
    assert loaded.output_names == ['c']
    os.remove(FPATH)
示例#8
0
def test_dropouts():
    """Test the dropout rate kwargs for adding dropout layers."""
    HIDDEN_LAYERS = [{
        'units': 64
    }, {
        'activation': 'relu'
    }, {
        'dropout': 0.1
    }, {
        'units': 64,
        'activation': 'relu',
        'name': 'relu2',
        'dropout': 0.1
    }]
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=HIDDEN_LAYERS,
                                       loss_weights=(0.0, 1.0),
                                       n_features=2,
                                       n_labels=1)

    assert len(model.layers) == 8, "dropout layers did not get added!"
    assert isinstance(model.layers[0], InputLayer)
    assert isinstance(model.layers[1], Dense)
    assert isinstance(model.layers[2], Activation)
    assert isinstance(model.layers[3], Dropout)
    assert isinstance(model.layers[4], Dense)
    assert isinstance(model.layers[5], Activation)
    assert isinstance(model.layers[6], Dropout)

    model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)
    y_pred = model.predict(X)

    model.save(FPATH)
    loaded = PhysicsGuidedNeuralNetwork.load(FPATH)
    y_pred_loaded = loaded.predict(X)
    assert np.allclose(y_pred, y_pred_loaded)
    assert len(model.layers) == len(loaded.layers)
    os.remove(FPATH)
示例#9
0
def test_phygnn():
    """Test the operation of the PGNN with weighting pfun."""
    PhysicsGuidedNeuralNetwork.seed(0)
    model = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                       hidden_layers=HIDDEN_LAYERS,
                                       loss_weights=(0.0, 1.0),
                                       n_features=2,
                                       n_labels=1)
    model.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)

    test_mae = np.mean(np.abs(model.predict(X) - Y))

    assert len(model.layers) == 6
    assert len(model.weights) == 6
    assert len(model.history) == 20
    assert isinstance(model.layers[0], InputLayer)
    assert isinstance(model.layers[1], Dense)
    assert isinstance(model.layers[2], Activation)
    assert isinstance(model.layers[3], Dense)
    assert isinstance(model.layers[4], Activation)
    assert isinstance(model.layers[5], Dense)
    assert model.history.validation_loss.values[-1] < 0.015
    assert test_mae < 0.015
示例#10
0
def test_dropouts():
    """Test the dropout rate kwargs for adding dropout layers."""
    hidden_layers_1 = [{
        'units': 64
    }, {
        'activation': 'relu'
    }, {
        'units': 64,
        'activation': 'relu',
        'name': 'relu2'
    }]
    hidden_layers_2 = [{
        'units': 64
    }, {
        'activation': 'relu'
    }, {
        'dropout': 0.1
    }, {
        'units': 64,
        'activation': 'relu',
        'name': 'relu2',
        'dropout': 0.1
    }]
    PhysicsGuidedNeuralNetwork.seed()
    model_1 = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                         hidden_layers=hidden_layers_1,
                                         loss_weights=(0.0, 1.0),
                                         n_features=2,
                                         n_labels=1)
    PhysicsGuidedNeuralNetwork.seed()
    model_2 = PhysicsGuidedNeuralNetwork(p_fun=p_fun_pythag,
                                         hidden_layers=hidden_layers_2,
                                         loss_weights=(0.0, 1.0),
                                         n_features=2,
                                         n_labels=1)

    assert len(model_1.layers) == 6
    assert len(model_2.layers) == 8, "dropout layers did not get added!"
    assert isinstance(model_2.layers[0], InputLayer)
    assert isinstance(model_2.layers[1], Dense)
    assert isinstance(model_2.layers[2], Activation)
    assert isinstance(model_2.layers[3], Dropout)
    assert isinstance(model_2.layers[4], Dense)
    assert isinstance(model_2.layers[5], Activation)
    assert isinstance(model_2.layers[6], Dropout)

    PhysicsGuidedNeuralNetwork.seed()
    model_1.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)

    PhysicsGuidedNeuralNetwork.seed()
    model_2.fit(X, Y_NOISE, P, n_batch=4, n_epoch=20)

    y_pred_1 = model_1.predict(X)
    y_pred_2 = model_2.predict(X)

    # make sure dropouts dont predict the same as non-dropout
    diff = np.abs(y_pred_1 - y_pred_2)
    assert not np.allclose(y_pred_1, y_pred_2)
    assert np.max(diff) > 0.1

    with tempfile.TemporaryDirectory() as td:
        fpath = os.path.join(td, 'tempfile.pkl')
        model_2.save(fpath)
        loaded = PhysicsGuidedNeuralNetwork.load(fpath)

    y_pred_loaded = loaded.predict(X)
    assert np.allclose(y_pred_2, y_pred_loaded)
    assert len(model_2.layers) == len(loaded.layers)