コード例 #1
0
ファイル: model_test.py プロジェクト: kellylab/Fireworks
def test_multiple_Models_training_in_pipeline():
    """
    Here, model A pipes its output into B
    """
    A = DummyModel({'m': [3.]}, out_column='y1')
    B = DummyModel({
        'm': [1.],
        'b': [2.]
    },
                   input=A,
                   in_column='y1',
                   out_column='y')
    A.freeze('b')
    B.freeze('m')
    training_data = generate_linear_model_data()
    m = training_data[1]['m']
    b = training_data[1]['b']
    errors = training_data[1]['errors']
    minibatcher = get_minibatcher(training_data[0])
    assert (A.m == 3.).all()
    assert (B.m == 1).all()
    assert (A.b == 0).all()
    assert (B.b == 2.).all()
    batch = minibatcher.__next__()
    batch.to_tensors()
    A(batch)
    train_model(B, minibatcher, models=[A, B])
    assert (A.m - m < .6).all()
    assert (B.b != 2).all()
    assert (B.m == 1).all()
    assert (A.b == 0).all()
コード例 #2
0
def test_IgniteJunction():

    # Instantiate model
    model = DummyModel({'m': [1.]})
    # Instantiate dataste
    data, labels = generate_linear_model_data()
    to_tensor = lambda m: m.to_tensors()
    batcher = \
        FunctionPipe(
            BatchingPipe(
                ShufflerPipe(data),
            batch_size=50),
        function=to_tensor)
    # Instantiate engine
    junkie = training.IgniteJunction({
        'model': model,
        'dataset': batcher
    },
                                     loss=loss,
                                     optimizer='Adam')
    # Train and test that changes took place
    assert (model.m == 1.).all()
    assert (model.b == 0.).all()
    junkie.train()
    assert not (model.m == 1.).all()
    assert not (model.b == 0.).all()
コード例 #3
0
ファイル: model_test.py プロジェクト: kellylab/Fireworks
def test_one_Model_training():

    A = DummyModel({'m': [0.]})
    B = model_from_module(LinearModule)()
    training_data = generate_linear_model_data()
    m = training_data[1]['m']
    b = training_data[1]['b']
    errors = training_data[1]['errors']
    minibatcher = get_minibatcher(training_data[0])
    train_model(A, minibatcher)
    # For some reason, this model struggles to learn the y-intercept.
    assert (m - A.m < .6).all()
    train_model(B, minibatcher)
    assert (m - B.m < .6).all()

    assert (A.m - B.m < .6).all()  # Test precision between models
コード例 #4
0
ファイル: model_test.py プロジェクト: kellylab/Fireworks
def test_multiple_Models_training_in_junction():
    """
    Here, model A is provided as a component of B
    """
    A = DummyModel({'m': [1.], 'b': [3.]}, out_column='z')
    B = LinearJunctionModel(components={'b': [.5], 'f': A})
    training_data = generate_linear_model_data()
    m = training_data[1]['m']
    b = training_data[1]['b']
    errors = training_data[1]['errors']
    minibatcher = get_minibatcher(training_data[0])
    batch = minibatcher.__next__()
    batch.to_tensors()
    B(batch)
    assert (A.m == 1.).all()
    train_model(B, minibatcher, models=[A, B])
    assert (A.m != 1).all()
コード例 #5
0
def test_train_test_split():

    # Test using a Message as input
    data, metadata = generate_linear_model_data(50)
    train, test = pr.train_test_split(data)
    assert len(train) == 40
    assert len(test) == 10
    train[0:20]
    test[0:4]
    for train_row, i in zip(
            train,
            count()):  # Check that training set and test set are different
        for test_row, j in zip(test, count()):
            assert (train_row != test_row)

    # Test using a Pipe as input
    cache = CachingPipe(data)
    train2, test2 = pr.train_test_split(cache)
    train2[0:20]
    test2[0:3]

    for train_row in train2:  # Check that training set and test set are different
        for test_row in test2:
            assert train_row != test_row
コード例 #6
0
ファイル: model_test.py プロジェクト: kellylab/Fireworks
def test_multiple_Models_training_via_junction():
    """
    Here, model B takes a junction A as input that randomly calls one of C, D, or E
    Hence, B implements y = f(x) + b, where f is randomly C, D, or E
    """
    C = DummyModel({'m': [1.], 'b': [0.]}, out_column='z')
    D = DummyModel({'m': [2.], 'b': [0.]}, out_column='z')
    E = DummyModel({'m': [3.], 'b': [0.]}, out_column='z')
    C.freeze('b')
    D.freeze('b')
    E.freeze('b')
    A = RandomJunction(components={'C': C, 'D': D, 'E': E})
    B = LinearJunctionModel(components={'b': [3.], 'f': A})
    training_data = generate_linear_model_data(n=750)
    m = training_data[1]['m']
    b = training_data[1]['b']
    errors = training_data[1]['errors']
    minibatcher = get_minibatcher(training_data[0])
    batch = minibatcher.__next__()
    batch.to_tensors()
    banana = B(batch)['y']
    rambo = False
    for i in range(20):
        bonana = B(batch)['y']
        if (banana != bonana).all():
            rambo = True
            break
    assert rambo
    train_model(B, minibatcher, models=[B, C, D, E])
    # Test that all Models trained
    assert (C.m - m < .6).all()
    assert (D.m - m < .6).all()
    assert (E.m - m < .6).all()
    assert (C.m != D.m).all()
    assert (D.m != E.m).all()
    assert (E.m != C.m).all()
コード例 #7
0
ファイル: model_test.py プロジェクト: kellylab/Fireworks
def test_enable_disable():
    def get_dummys():
        """
        This Model increments it's internal _count variable by the length of the Message being accessed on update.
        On inference, it simply appends the input to itself and returns that.
        """
        dummy = DummyUpdateModel()
        tummy = DummyUpdateModel(input=dummy)
        assert dummy._count == 0
        assert tummy._count == 0
        return dummy, tummy

    data, meta = generate_linear_model_data()
    batch = data[2:10]
    l = len(batch)

    # Test updates and inference for one model
    dummy, tummy = get_dummys()
    out = dummy(batch)
    assert dummy._count == l
    assert tummy._count == 0
    assert out == batch.append(batch)

    # Test updates and inference for multiple Models
    dummy, tummy = get_dummys()
    out = tummy(batch)
    assert dummy._count == l
    assert tummy._count == 2 * l
    assert out == batch.append(batch).append(batch).append(batch)  # 4x

    # Test disable updates for one Model
    dummy, tummy = get_dummys()
    tummy.disable_updates()
    out = tummy(batch)
    assert dummy._count == l
    assert tummy._count == 0
    assert out == batch.append(batch).append(batch).append(batch)  # 4x

    # Test disable updates for all Models recursively
    dummy, tummy = get_dummys()
    tummy.disable_updates_all()
    out = tummy(batch)
    assert dummy._count == 0
    assert tummy._count == 0
    assert out == batch.append(batch).append(batch).append(batch)  # 4x
    # Test reenabling updates for one Model
    tummy.enable_updates()
    out = tummy(batch)
    assert dummy._count == 0
    assert tummy._count == 2 * l
    assert out == batch.append(batch).append(batch).append(batch)  # 4x
    # Test reenabling updates for all Models
    tummy.enable_updates_all()
    out = tummy(batch)
    assert dummy._count == l
    assert tummy._count == 4 * l
    assert out == batch.append(batch).append(batch).append(batch)  # 4x

    # Test disable inference for one Model
    dummy, tummy = get_dummys()
    tummy.disable_inference()
    out = tummy(batch)
    assert dummy._count == l
    assert tummy._count == 2 * l
    assert out == batch.append(batch)  # 2x from the first one

    # Test disable inference for all Models recursively
    dummy, tummy = get_dummys()
    tummy.disable_inference_all()
    out = tummy(batch)
    assert dummy._count == l
    assert tummy._count == l
    assert out == batch  # Identity
    # Test renabling inference for one Model
    tummy.enable_inference()
    out = tummy(batch)
    assert dummy._count == 2 * l
    assert tummy._count == 2 * l
    assert out == batch.append(batch)  # 2x from the first one
    # Test renabling inference for all Models
    tummy.enable_inference_all()
    out = tummy(batch)
    assert dummy._count == 3 * l
    assert tummy._count == 4 * l
    assert out == batch.append(batch).append(batch).append(
        batch)  # 4x from the first one