예제 #1
0
파일: test_mlp.py 프로젝트: ddofer/breze
def test_awn_fit():
    X = np.random.standard_normal((10, 2))
    Z = np.random.standard_normal((10, 1))
    loss = lambda target, prediction: squared(target, prediction[:, :target.shape[1]])
    mlp = AwnNetwork(
        2, [10], 1, ['rectifier'], 'identity', loss, max_iter=10)
    mlp.fit(X, Z)
예제 #2
0
파일: test_simple.py 프로젝트: Wiebke/breze
def test_linear_regression():
    inpt = T.matrix('inpt')
    inpt.tag.test_value = np.zeros((3, 10))
    inpt.tag.test_value
    target = T.matrix('target')
    target.tag.test_value = np.zeros((3, 2))

    l = AffineNonlinear(inpt, 10, 2, 'tanh')

    loss = squared(target, l.output).sum(1).mean()

    m = SupervisedModel(inpt=inpt, target=target, output=l.output, loss=loss,
                        parameters=l.parameters)

    f_predict = m.function([m.inpt], m.output)
    f_loss = m.function([m.inpt, m.target], m.loss)

    X = np.zeros((20, 10))
    Z = np.zeros((20, 2))

    Y = f_predict(X)

    assert Y.shape == (20, 2), 'ouput has wrong shape'

    l = f_loss(X, Z)

    assert np.array(l).ndim == 0, 'loss is not a scalar'
예제 #3
0
파일: test_mlp.py 프로젝트: gabobert/breze
def test_fd_predict():
    X = np.random.standard_normal((10, 2))
    X, = theano_floatx(X)
    loss = lambda target, prediction: squared(target, prediction[:, :target.shape[1]])
    mlp = FastDropoutNetwork(
        2, [10], 1, ['rectifier'], 'identity', loss, max_iter=10)
    mlp.predict(X)
예제 #4
0
def test_linear_regression():
    inpt = T.matrix('inpt')
    inpt.tag.test_value = np.zeros((3, 10))
    inpt.tag.test_value
    target = T.matrix('target')
    target.tag.test_value = np.zeros((3, 2))

    l = AffineNonlinear(inpt, 10, 2, 'tanh')

    loss = squared(target, l.output).sum(1).mean()

    m = SupervisedModel(inpt=inpt,
                        target=target,
                        output=l.output,
                        loss=loss,
                        parameters=l.parameters)

    f_predict = m.function([m.inpt], m.output)
    f_loss = m.function([m.inpt, m.target], m.loss)

    X = np.zeros((20, 10))
    Z = np.zeros((20, 2))

    Y = f_predict(X)

    assert Y.shape == (20, 2), 'ouput has wrong shape'

    l = f_loss(X, Z)

    assert np.array(l).ndim == 0, 'loss is not a scalar'
예제 #5
0
def test_squared():
    X, Y = T.matrix(), T.matrix()
    X.tag.test_value = test_X
    Y.tag.test_value = test_Y
    dist = squared(X, Y).sum()
    f = theano.function([X, Y], dist, mode='FAST_COMPILE')
    res = f(test_X, test_Y)
    assert np.allclose(res, 0.0097), 'squared loss not working'
예제 #6
0
파일: test_mlp.py 프로젝트: korhammer/breze
def test_fd_iter_fit():
    X = np.random.standard_normal((10, 2))
    Z = np.random.standard_normal((10, 1))
    loss = lambda target, prediction: squared(target, prediction[:, :target.shape[1]])
    mlp = FastDropoutNetwork(
        2, [10], 1, ['rectifier'], 'identity', loss, max_iter=10)
    for i, info in enumerate(mlp.iter_fit(X, Z)):
        if i >= 10:
            break
예제 #7
0
def test_squared_colwise():
    X, Y = T.matrix(), T.matrix()
    X.tag.test_value = test_X
    Y.tag.test_value = test_Y
    dist = squared(X, Y).sum(axis=0)
    f = theano.function([X, Y], dist, mode='FAST_COMPILE')
    res = f(test_X, test_Y)
    correct = np.allclose(res, [0.0018, 0.005, 0.0029])
    assert correct, 'squared loss colwise not working'
예제 #8
0
def test_squared_rowwise():
    X, Y = T.matrix(), T.matrix()
    X.tag.test_value = test_X
    Y.tag.test_value = test_Y
    dist = squared(X, Y).sum(axis=1)
    f = theano.function([X, Y], dist, mode='FAST_COMPILE')
    res = f(test_X, test_Y)
    correct = roughly(res, [0.0014, 0.0083])
    assert correct, 'squared loss rowwise not working'
예제 #9
0
파일: test_mlp.py 프로젝트: gabobert/breze
def test_fd_iter_fit():
    X = np.random.standard_normal((10, 2))
    Z = np.random.standard_normal((10, 1))
    X, Z = theano_floatx(X, Z)
    loss = lambda target, prediction: squared(target, prediction[:, :target.shape[1]])
    mlp = FastDropoutNetwork(
        2, [10], 1, ['rectifier'], 'identity', loss, max_iter=10)
    for i, info in enumerate(mlp.iter_fit(X, Z)):
        if i >= 10:
            break