Пример #1
0
def trainTest():
    batchsize, inmaps, d, h, w = 5, 1, 3, 3, 3
    outmaps = 1
    size = 3

    data = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, inmaps, d, h, w)).astype(np.float32))
    conv = Conv3D(inmaps, outmaps, size)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    target = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, outmaps, 1, 1, 1)).astype(np.float32))

    for i in range(100):
        learnRate = 1e-2

        conv(data)
        error, grad = mse(conv.data, target)

        conv.backward(grad)
        conv.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #2
0
def trainTest():
    groups, insize, outsize = 16, 128, 32
    batchsize = 32

    data = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, groups, insize)).astype(np.float32))
    target = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, groups, outsize)).astype(np.float32))

    grpLinear = GroupLinear(groups, insize, outsize)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    for i in range(100):
        learnRate = 1e-1

        grpLinear(data)
        error, grad = mse(grpLinear.data, target)

        grpLinear.backward(grad)
        grpLinear.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #3
0
def trainTest(dtype):
    insize, outsize = 500, 100

    hostData = np.random.randn(32, insize).astype(dtype)
    hostTarget = np.random.randn(32, outsize).astype(np.float32)

    data, target = gpuarray.to_gpu(hostData), gpuarray.to_gpu(hostTarget)

    linear = Linear(insize, outsize)
    linear.calcMode(dtype)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    learnRate = 1e-1

    for i in range(100):
        linear(data)

        outdata = linear.data if dtype == np.float32 else linear.data.astype(
            np.float32)
        error, grad = mse(outdata, target)

        linear.backward(grad if dtype == np.float32 else grad.astype(dtype))
        linear.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #4
0
def trainTest():
    seqlen, batchsize, insize, hsize = 10, 32, 64, 32

    data = gpuarray.to_gpu(
        np.random.randn(seqlen, batchsize, insize).astype(np.float32))
    target = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (seqlen, batchsize, hsize)).astype(np.float32))

    rnn = RNN(insize, hsize, mode="relu", getSequences=True)
    rnn(data)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    for i in range(200):
        learnRate = 1e-1

        rnn(data)
        error, grad = mse(rnn.data, target)

        rnn.backward(grad)
        rnn.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #5
0
def errorTest():
    pred = gpuarray.to_gpu(np.random.randn(10, 10).astype(np.float32))
    target = gpuarray.to_gpu(np.random.randn(10, 10).astype(np.float32))

    from PuzzleLib.Cost.Abs import Abs
    from PuzzleLib.Cost.MSE import MSE

    multi = Multi().append(MSE()).append(Abs())
    multi([pred, pred], [target, target])

    hostError = [
        np.linalg.norm(target.get() - pred.get())**2 /
        (2.0 * np.prod(target.shape)),
        np.linalg.norm(
            (target.get() - pred.get()).ravel(), ord=1) / np.prod(target.shape)
    ]

    assert np.isclose(multi.error[0], hostError[0])
    assert np.isclose(multi.error[1], hostError[1])
Пример #6
0
def trainHardTest(optCls, dtype, *args, **kwargs):
    from PuzzleLib.Containers.Sequential import Sequential

    from PuzzleLib.Modules.Conv2D import Conv2D
    from PuzzleLib.Modules.BatchNorm2D import BatchNorm2D
    from PuzzleLib.Modules.Activation import Activation, relu
    from PuzzleLib.Modules.Cast import Cast

    from PuzzleLib.Cost.MSE import MSE

    seq = Sequential()

    seq.append(Conv2D(4, 8, 5, pad=1))
    seq.append(BatchNorm2D(8))
    seq.append(Activation(relu))

    seq.append(Conv2D(8, 16, 5, pad=1))

    seq.calcMode(dtype)
    seq.append(Cast(intype=dtype, outtype=np.float32))

    optimizer = optCls(*args, **kwargs)
    optimizer.setupOn(seq, useGlobalState=True)

    mse = MSE()

    data = gpuarray.to_gpu(np.random.randn(4, 4, 5, 5).astype(dtype))
    target = gpuarray.to_gpu(np.random.randn(4, 16, 1, 1).astype(np.float32))

    for i in range(200):
        error, grad = mse(seq(data), target)

        optimizer.zeroGradParams()
        seq.backward(grad)
        optimizer.update()

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #7
0
def trainSimpleTest(optCls, dtype, *args, **kwargs):
    from PuzzleLib.Containers.Sequential import Sequential

    from PuzzleLib.Modules.Linear import Linear
    from PuzzleLib.Modules.Activation import Activation, relu
    from PuzzleLib.Modules.Cast import Cast

    from PuzzleLib.Cost.MSE import MSE

    seq = Sequential()

    seq.append(Linear(128, 64, useBias=False))
    seq.append(Activation(relu))
    seq.append(Linear(64, 32, useBias=False))
    seq.append(Activation(relu))
    seq.append(Linear(32, 16))

    seq.calcMode(dtype)
    seq.append(Cast(intype=dtype, outtype=np.float32))

    optimizer = optCls(*args, **kwargs)
    optimizer.setupOn(seq, useGlobalState=True)

    mse = MSE()

    data = gpuarray.to_gpu(np.random.randn(16, 128).astype(dtype))
    target = gpuarray.to_gpu(np.random.randn(16, 16).astype(np.float32))

    for i in range(200):
        error, grad = mse(seq(data), target)

        optimizer.zeroGradParams()
        seq.backward(grad)
        optimizer.update()

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))