Пример #1
0
def onDeviceTest():
    from PuzzleLib.Containers import Sequential
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear

    from PuzzleLib.Cost.CrossEntropy import CrossEntropy
    from PuzzleLib.Optimizers.NesterovSGD import NesterovSGD

    data = gpuarray.to_gpu(
        np.random.randn(10000, 3, 28, 28).astype(np.float32))
    dataTarget = gpuarray.to_gpu(
        np.random.randint(low=0, high=10, size=(10000, )).astype(np.int32))

    seq = Sequential()
    seq.append(Conv2D(3, 16, 9))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Conv2D(16, 32, 5))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Flatten())
    seq.append(Linear(3 * 3 * 32, 10))

    entr = CrossEntropy()

    opt = NesterovSGD()
    opt.setupOn(seq)

    def onBatchFinish(train):
        print("Finished batch #%d, error=%s" %
              (train.currBatch, train.cost.getError()))

    trainer = Trainer(seq, entr, opt, onBatchFinish=onBatchFinish)
    trainer.train(data, dataTarget)
Пример #2
0
def calcTest(dtype, atol):
    lr, epsilon = 1e-3, 1e-16
    shape = (11, 13)

    hostW, hostDw = np.random.randn(*shape).astype(dtype), np.random.randn(
        *shape).astype(dtype)
    hostMem = (1.0 + np.random.randn(*shape)**2).astype(np.float32)
    hostMg, hostMs = np.random.randn(*shape).astype(
        np.float32), np.random.randn(*shape).astype(np.float32)**2

    w, dw = gpuarray.to_gpu(hostW), gpuarray.to_gpu(hostDw)
    mem, mg, ms = gpuarray.to_gpu(hostMem), gpuarray.to_gpu(
        hostMg), gpuarray.to_gpu(hostMs)

    smorms3Ker(w.dtype)(w, dw, mem, mg, ms, lr, epsilon)

    hostW, hostDw = hostW.astype(np.float32), hostDw.astype(np.float32)

    r = 1.0 / (1.0 + hostMem)
    hostMg = (1.0 - r) * hostMg + r * hostDw
    hostMs = (1.0 - r) * hostMs + r * hostDw**2
    x = hostMg**2 / (hostMs + epsilon)

    hostMem = 1.0 + hostMem * (1.0 - x)
    hostW += hostDw * np.minimum(lr, x) / (np.sqrt(hostMs) + epsilon)

    hostW, hostDw = hostW.astype(dtype), hostDw.astype(dtype)

    assert np.allclose(hostMem, mem.get(), atol=atol)
    assert np.allclose(hostMg, mg.get(), atol=atol)
    assert np.allclose(hostMs, ms.get(), atol=atol)
    assert np.allclose(hostW, w.get(), atol=atol)
Пример #3
0
def dropoutTest(dtype):
    hostData = np.random.randn(11, 13, 4, 3).astype(dtype)
    data = gpuarray.to_gpu(hostData)

    dropout = Dropout()
    dropout.calcMode(dtype)

    dropout(data)

    hostRands = dropout.rands.get()[:data.size].reshape(data.shape)

    hostOutData = hostData * (hostRands < dropout.partition) / (1.0 -
                                                                dropout.p)
    assert np.allclose(hostOutData, dropout.data.get())

    hostGrad = np.random.randn(*dropout.data.shape).astype(dtype)
    grad = gpuarray.to_gpu(hostGrad)

    dropout.backward(grad)

    hostInGrad = hostGrad * (hostRands < dropout.partition) / (1.0 - dropout.p)
    assert np.allclose(hostInGrad, dropout.grad.get())

    dropout.evalMode()
    dropout(data)

    hostOutData = hostData
    assert np.allclose(hostOutData, dropout.data.get())
Пример #4
0
def replicateTest(dtype):
    hostData = np.random.randn(10, 10, 3, 3).astype(dtype)
    data = gpuarray.to_gpu(hostData)

    times = 3

    repl = Replicate(times)
    repl.calcMode(dtype)

    repl(data)

    assert len(repl.data) == times

    hostGrad = [
        np.random.randn(10, 10, 3, 3).astype(dtype) for _ in range(times)
    ]
    grad = [gpuarray.to_gpu(gr) for gr in hostGrad]

    repl.backward(grad)

    hostInGrad = np.zeros(grad[0].shape, dtype=dtype)
    for i in range(times):
        hostInGrad += hostGrad[i]

    assert np.allclose(hostInGrad, repl.grad.get())
Пример #5
0
def unittest():
    topk = 5
    axis = 2

    data = gpuarray.to_gpu(np.random.randn(32, 10, 16).astype(np.float32))

    kmaxpool = KMaxPool(topk=topk, axis=axis)
    kmaxpool(data)

    hostData = data.get()

    hostOutData = np.partition(hostData, -topk, axis=axis)[:, :, -topk:]
    hostIndices = np.argpartition(hostData, -topk, axis=axis)[:, :, -topk:]

    hostInIndices = np.argsort(hostOutData, axis=axis)

    tup = (np.arange(hostOutData.shape[0])[:, None, None],
           np.arange(hostOutData.shape[1])[None, :, None], hostInIndices)
    hostIndices = hostIndices[tup]
    hostOutData = hostOutData[tup]

    assert np.allclose(kmaxpool.data.get(), hostOutData)

    grad = gpuarray.to_gpu(
        np.random.randn(*data.shape[:axis], topk).astype(np.float32))
    kmaxpool.backward(grad)

    hostGrad = grad.get()
    hostInGrad = np.zeros(hostData.shape, dtype=np.float32)

    tup = (np.arange(hostInGrad.shape[0])[:, None, None],
           np.arange(hostInGrad.shape[1])[None, :, None], hostIndices)
    hostInGrad[tup] = hostGrad

    assert np.allclose(hostInGrad, kmaxpool.grad.get())
Пример #6
0
def main():
    net = loadVGG(None, "16")

    batchsize = 16
    size = (batchsize, 3, 224, 224)

    batch = np.random.normal(size=size).astype(dtype=np.float32)
    batch = gpuarray.to_gpu(batch)

    labels = np.random.randint(low=0,
                               high=1000,
                               size=(batchsize, ),
                               dtype=np.int32)
    labels = gpuarray.to_gpu(labels)

    optimizer = SGD()
    optimizer.setupOn(net)

    cost = CrossEntropy(maxlabels=1000)
    trainer = Trainer(net, cost, optimizer)

    print("Started benchmarking %s ..." % net.name)
    timeKernel(trainer.train,
               args=(batch, labels),
               looplength=100,
               logname="Before optimizing %s" % net.name,
               normalize=True)

    net.optimizeForShape(size)
    timeKernel(trainer.train,
               args=(batch, labels),
               looplength=100,
               logname="After optimizing %s" % net.name,
               normalize=True)
Пример #7
0
def dropout2dTest(dtype):
    batchsize, maps, height, width = 11, 13, 4, 3

    hostData = np.random.randn(batchsize, maps, height, width).astype(dtype)
    data = gpuarray.to_gpu(hostData)

    dropout2d = Dropout2D()
    dropout2d.calcMode(dtype)

    dropout2d(data)

    hostRands = dropout2d.rands.get()[:batchsize * maps].reshape(
        batchsize, maps)[:, :, np.newaxis, np.newaxis]

    hostOutData = hostData * (hostRands < dropout2d.partition) / (1.0 -
                                                                  dropout2d.p)
    assert np.allclose(hostOutData, dropout2d.data.get())

    hostGrad = np.random.randn(*dropout2d.data.shape).astype(dtype)
    grad = gpuarray.to_gpu(hostGrad)

    dropout2d.backward(grad)

    hostInGrad = hostGrad * (hostRands < dropout2d.partition) / (1.0 -
                                                                 dropout2d.p)
    assert np.allclose(hostInGrad, dropout2d.grad.get())

    dropout2d.evalMode()
    dropout2d(data)

    hostOutData = hostData
    assert np.allclose(hostOutData, dropout2d.data.get())
Пример #8
0
def splitTest(dtype):
    batchsize, groups, size = 5, 3, 4

    hostData = np.random.randn(batchsize, groups, size).astype(dtype)
    data = gpuarray.to_gpu(hostData)

    split = Split(axis=2, sections=(3, 1))
    split.calcMode(dtype)

    split(data)

    hostOutData = np.split(hostData, [split.sections[0]], axis=split.axis)
    assert all(
        np.allclose(hostOutData[i], split.data[i].get())
        for i in range(len(hostOutData)))

    hostGrad = [
        np.random.randn(*split.data[i].shape).astype(dtype)
        for i in range(len(split.data))
    ]
    grad = [gpuarray.to_gpu(gr) for gr in hostGrad]

    split.backward(grad)

    hostInGrad = np.concatenate(hostGrad, axis=split.axis)
    assert np.allclose(hostInGrad, split.grad.get())
Пример #9
0
def calcTest(dtype, atol):
    alpha, beta1, beta2, epsilon = 0.01, 0.9, 0.999, 1e-8
    shape = (11, 13)

    hostW, hostDw = np.random.randn(*shape).astype(dtype), np.random.randn(
        *shape).astype(dtype)
    hostMs, hostMg = (1.0 + np.random.randn(*shape)**2).astype(
        np.float32), np.random.randn(*shape).astype(np.float32)

    w, dw = gpuarray.to_gpu(hostW), gpuarray.to_gpu(hostDw)
    ms, mg = gpuarray.to_gpu(hostMs), gpuarray.to_gpu(hostMg)

    fix1, fix2 = 1.0 - beta1, 1.0 - beta2
    lr = alpha * math.sqrt(fix2) / fix1

    fix1, fix2 = 1.0 - beta1, 1.0 - beta2
    adamKer(w.dtype)(w, dw, mg, ms, lr, fix1, fix2, epsilon)

    hostW, hostDw = hostW.astype(np.float32), hostDw.astype(np.float32)

    hostMg = (1 - fix1) * hostMg + fix1 * hostDw
    hostMs = (1 - fix2) * hostMs + fix2 * hostDw**2
    hostW += lr * hostMg / (np.sqrt(hostMs) + epsilon)

    hostW, hostDw = hostW.astype(dtype), hostDw.astype(dtype)

    assert np.allclose(hostMg, mg.get(), atol=atol)
    assert np.allclose(hostMs, ms.get(), atol=atol)
    assert np.allclose(hostW, w.get(), atol=atol)
Пример #10
0
def reflectTest():
    batchsize, maps, insize = 4, 8, 48
    lpad, rpad = 2, 3

    data = gpuarray.to_gpu(
        np.random.randn(batchsize, maps, insize).astype(np.float32))

    reflectpad = Pad1D(pad=(lpad, rpad), mode=PadMode.reflect)
    reflectpad(data)

    hostData, hostOutData = data.get(), reflectpad.data.get()
    outsize = hostOutData.shape[2]

    assert np.allclose(hostOutData[:, :, lpad:insize + lpad], hostData)
    assert np.allclose(hostOutData[:, :, :lpad][:, :, ::-1],
                       hostData[:, :, 1:lpad + 1])
    assert np.allclose(hostOutData[:, :, insize + lpad:][:, :, ::-1],
                       hostData[:, :, insize - 1 - rpad:insize - 1])

    grad = gpuarray.to_gpu(
        np.random.randn(batchsize, maps, outsize).astype(np.float32))
    reflectpad.backward(grad)

    hostGrad, hostInGrad = grad.get(), reflectpad.grad.get()

    assert np.allclose(hostInGrad[:, :, lpad + 1:insize - rpad - 1],
                       hostGrad[:, :, 2 * lpad + 1:outsize - 2 * rpad - 1])
    assert np.allclose(
        hostInGrad[:, :, 1:lpad + 1], hostGrad[:, :, :lpad][:, :, ::-1] +
        hostGrad[:, :, lpad + 1:2 * lpad + 1])
    assert np.allclose(
        hostInGrad[:, :, insize - rpad - 1:insize - 1],
        hostGrad[:, :, outsize - rpad:][:, :, ::-1] +
        hostGrad[:, :, outsize - 2 * rpad - 1:outsize - rpad - 1])
Пример #11
0
def trainTest():
    seqlen, batchsize, insize, hsize = 10, 32, 64, 32

    data = gpuarray.to_gpu(
        np.random.randn(seqlen, batchsize, insize).astype(np.float32))
    target = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (seqlen, batchsize, hsize)).astype(np.float32))

    rnn = RNN(insize, hsize, mode="relu", getSequences=True)
    rnn(data)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    for i in range(200):
        learnRate = 1e-1

        rnn(data)
        error, grad = mse(rnn.data, target)

        rnn.backward(grad)
        rnn.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #12
0
def trainTest():
    batchsize, inmaps, d, h, w = 5, 1, 3, 3, 3
    outmaps = 1
    size = 3

    data = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, inmaps, d, h, w)).astype(np.float32))
    conv = Conv3D(inmaps, outmaps, size)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    target = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, outmaps, 1, 1, 1)).astype(np.float32))

    for i in range(100):
        learnRate = 1e-2

        conv(data)
        error, grad = mse(conv.data, target)

        conv.backward(grad)
        conv.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #13
0
def unittest():
    from PuzzleLib.Containers.Sequential import Sequential
    from PuzzleLib.Modules import Linear, Activation, sigmoid, Identity, Concat

    data1 = gpuarray.to_gpu(np.random.randn(128, 128).astype(np.float32))
    data2 = gpuarray.to_gpu(np.random.randn(128, 16).astype(np.float32))
    data3 = gpuarray.to_gpu(np.random.randn(128, 32).astype(np.float32))

    seq = Sequential()
    seq.append(Linear(128, 64))
    seq.append(Activation(sigmoid))

    parallel = Parallel()
    parallel.append(seq)
    parallel.append(Identity())
    parallel.append(Identity())

    concat = Concat(axis=1)

    parallel([data1, data2, data3])
    concat(parallel.data)

    assert np.allclose(data2.get(), concat.data.get()[:, 64:64 + 16])

    grad = gpuarray.to_gpu(np.random.randn(128, 112).astype(np.float32))
    concat.backward(grad)
    parallel.backward(concat.grad)

    assert np.allclose(grad.get()[:, 64:64 + 16], parallel.grad[1].get())

    parallel = parallel[::2]
    parallel([data1, data3])
Пример #14
0
def calcTest(dtype, atol):
    insize, outsize = 5, 1

    hostData = np.random.randn(5, insize).astype(dtype)
    data = gpuarray.to_gpu(hostData)

    linear = Linear(insize, outsize, initscheme=("xavier", "avg"))
    linear.calcMode(dtype)

    linear(data)

    hostGrad = np.random.randn(*linear.data.shape).astype(dtype)
    grad = gpuarray.to_gpu(hostGrad)

    linear.backward(grad)

    hostW, hostBias = linear.W.get(), linear.b.get()

    hostOutData = np.dot(hostData, hostW) + hostBias[np.newaxis, :]
    hostInGrad = np.dot(hostGrad, hostW.T)

    hostWGrad = np.dot(hostData.T, hostGrad)
    hostBiasGrad = np.sum(hostGrad, axis=0)

    assert np.allclose(hostOutData, linear.data.get(), atol=atol)
    assert np.allclose(hostInGrad, linear.grad.get(), atol=atol)

    assert np.allclose(hostWGrad, linear.vars["W"].grad.get(), atol=atol)
    assert np.allclose(hostBiasGrad, linear.vars["b"].grad.get(), atol=atol)
Пример #15
0
def trainTest(dtype):
    insize, outsize = 500, 100

    hostData = np.random.randn(32, insize).astype(dtype)
    hostTarget = np.random.randn(32, outsize).astype(np.float32)

    data, target = gpuarray.to_gpu(hostData), gpuarray.to_gpu(hostTarget)

    linear = Linear(insize, outsize)
    linear.calcMode(dtype)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    learnRate = 1e-1

    for i in range(100):
        linear(data)

        outdata = linear.data if dtype == np.float32 else linear.data.astype(
            np.float32)
        error, grad = mse(outdata, target)

        linear.backward(grad if dtype == np.float32 else grad.astype(dtype))
        linear.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #16
0
def noiseInjectorTest(dtype):
    hostData = np.random.randn(10, 3, 16, 16).astype(dtype)
    data = gpuarray.to_gpu(hostData)

    injector = NoiseInjector(mode="mul",
                             noisetype="uniform",
                             params=(0.0, 10.0))
    injector.calcMode(dtype)

    injector(data)
    assert np.allclose(injector.data.get(), hostData * injector.rands.get())

    hostGrad = np.random.randn(*data.shape).astype(dtype)
    grad = gpuarray.to_gpu(hostGrad)

    injector.backward(grad)
    assert np.allclose(injector.grad.get(), hostGrad * injector.rands.get())

    injector = NoiseInjector(mode="add",
                             noisetype="gaussian",
                             params=(0.0, 1.0))
    injector.calcMode(dtype)

    injector(data)
    assert np.allclose(injector.data.get(), hostData + injector.rands.get())

    injector.backward(grad)
    assert np.allclose(injector.grad.get(), hostGrad)
Пример #17
0
def errorValTest():
    batchsize, size = 20, 4

    scores = gpuarray.to_gpu(
        np.random.randn(batchsize, size).astype(np.float32))
    labels = gpuarray.to_gpu(
        (np.random.randint(low=0, high=2, size=(batchsize, size)) * 2 -
         1).astype(np.int32))

    hinge = Hinge()
    error, grad = hinge(scores, labels)

    hostScores, hostLabels = scores.get(), labels.get()

    hostGrad = np.empty(grad.shape, dtype=np.float32)
    hostError = 0.0

    for b in range(batchsize):
        for n in range(size):
            val = hostLabels[b, n] * hostScores[b, n]

            hostGrad[
                b,
                n] = hostLabels[b, n] / batchsize / size if val < 1.0 else 0.0
            hostError += max(0.0, 1.0 - val) / batchsize / size

    assert np.allclose(hostGrad, grad.get())
    assert np.isclose(hostError, error)

    error = hinge.validate(scores, labels)
    assert np.isclose(hostError, error)
Пример #18
0
def simpleNetTest():
    from PuzzleLib.Modules import Linear, Activation, sigmoid

    data = gpuarray.to_gpu(np.random.randn(128, 128).astype(np.float32))

    seq = Sequential()

    seq.append(Linear(128, 64))
    seq.append(Activation(sigmoid))

    seq.append(Linear(64, 32))
    seq.append(Activation(sigmoid))

    seq(data)
    assert seq.data.shape == (128, 32)

    grad = gpuarray.to_gpu(np.random.randn(128, 32).astype(np.float32))
    seq.backward(grad)
    seq.updateParams(1e-4)
    assert seq.grad.shape == data.shape

    data = gpuarray.to_gpu(np.random.randn(64, 128).astype(np.float32))
    seq = seq[:2]
    seq(data)
    assert seq.data.shape == (64, 64)
Пример #19
0
def calcTest():
	from PuzzleLib.Modules import Linear, Split, Concat, Activation, relu

	v1 = Linear(100, 50, name="v1").node()
	h1 = Split(axis=1, sections=(20, 20, 10), name="h1").node(v1)

	v2 = Linear(100, 50, name="v2").node()
	h2 = Concat(axis=1, name="h2").node((h1, [1, 2]), v2)
	h3 = Activation(relu, name="h3").node(h2)

	h4 = Concat(axis=1, name="h4").node((h1, 0), h3)

	mlp = Graph(inputs=[v1, v2], outputs=h4)

	v1data = gpuarray.to_gpu(np.random.randn(5, 100).astype(np.float32))
	v2data = gpuarray.to_gpu(np.random.randn(5, 100).astype(np.float32))

	mlp.optimizeForShape([v1data.shape, v2data.shape])
	mlp([v1data, v2data])

	assert mlp.data.shape == (5, 100)
	assert mlp.dataShapeFrom([v1data.shape, v2data.shape]) == mlp.data.shape

	grad = gpuarray.to_gpu(np.random.randn(*mlp.data.shape).astype(np.float32))
	mlp.backward(grad)

	assert len(mlp.grad) == 2 and mlp.grad[0].shape == mlp.grad[1].shape == (5, 100)
	assert mlp.gradShapeFrom(grad.shape) == [gr.shape for gr in mlp.grad]
Пример #20
0
def alongBatchAxis():
    data = []
    for _ in range(3):
        data.append(
            gpuarray.to_gpu(
                np.random.randn(np.random.randint(low=5, high=10), 10, 5,
                                3).astype(np.float32)))

    concat = Concat(axis=0)
    concat(data)

    hostOutData = np.concatenate([d.get() for d in data], axis=0)
    assert np.allclose(hostOutData, concat.data.get())

    grad = gpuarray.to_gpu(
        np.random.randn(*hostOutData.shape).astype(np.float32))
    concat.backward(grad)

    stride = 0
    hostInGrad = []
    for i in range(len(data)):
        hostInGrad.append(grad.get()[stride:stride + data[i].shape[0], :])
        stride += data[i].shape[0]

    assert all([
        np.allclose(hostInGrad[i], concat.grad[i].get())
        for i in range(len(data))
    ])
Пример #21
0
def trainTest():
    groups, insize, outsize = 16, 128, 32
    batchsize = 32

    data = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, groups, insize)).astype(np.float32))
    target = gpuarray.to_gpu(
        np.random.normal(0.0, 1.0,
                         (batchsize, groups, outsize)).astype(np.float32))

    grpLinear = GroupLinear(groups, insize, outsize)

    from PuzzleLib.Cost.MSE import MSE
    mse = MSE()

    for i in range(100):
        learnRate = 1e-1

        grpLinear(data)
        error, grad = mse(grpLinear.data, target)

        grpLinear.backward(grad)
        grpLinear.updateParams(learnRate)

        if (i + 1) % 5 == 0:
            print("Iteration #%d error: %s" % (i + 1, error))
Пример #22
0
def swapAxesTest(dtype):
    shape = (10, 3, 5, 4, 2)

    for axis1 in range(len(shape)):
        for axis2 in range(axis1 + 1, len(shape)):
            hostData = np.random.randn(*shape).astype(dtype)
            data = gpuarray.to_gpu(hostData)

            swapaxes = SwapAxes(axis1, axis2)
            swapaxes.calcMode(dtype)

            swapaxes(data)

            hostOutData = np.swapaxes(hostData, axis1=axis1, axis2=axis2)
            assert np.allclose(hostOutData, swapaxes.data.get())

            hostGrad = np.random.randn(*swapaxes.data.shape).astype(dtype)
            grad = gpuarray.to_gpu(hostGrad)

            swapaxes.backward(grad)

            hostInGrad = np.swapaxes(hostGrad, axis1=axis2, axis2=axis1)

            assert swapaxes.grad.shape == data.shape
            assert np.allclose(hostInGrad, swapaxes.grad.get())
Пример #23
0
def calcTest(dtype, atol):
    lr, alpha, mr, epsilon = 0.01, 0.95, 0.9, 10.0
    shape = (11, 13)

    hostW, hostDw = np.random.randn(*shape).astype(dtype), np.random.randn(
        *shape).astype(dtype)
    hostMs, hostMg = (5.0 + np.random.randn(*shape)**
                      2).astype(dtype), np.random.randn(*shape).astype(dtype)
    hostDelta = np.random.randn(*shape).astype(dtype)

    w, dw = gpuarray.to_gpu(hostW), gpuarray.to_gpu(hostDw)
    ms, mg, delta = gpuarray.to_gpu(hostMs), gpuarray.to_gpu(
        hostMg), gpuarray.to_gpu(hostDelta)

    rmspropGravesKer(w.dtype)(w, dw, mg, ms, delta, lr, alpha, mr, epsilon)

    hostW, hostDw = hostW.astype(np.float32), hostDw.astype(np.float32)
    hostMs, hostMg, hostDelta = hostMs.astype(np.float32), hostMg.astype(
        np.float32), hostDelta.astype(np.float32)

    hostMg = alpha * hostMg + (1 - alpha) * hostDw
    hostMs = alpha * hostMs + (1 - alpha) * hostDw**2
    hostDelta = mr * hostDelta + lr * hostDw / np.sqrt(hostMs - hostMg**2 +
                                                       epsilon)
    hostW += hostDelta

    hostW, hostDw = hostW.astype(dtype), hostDw.astype(dtype)
    hostMs, hostMg, hostDelta = hostMs.astype(dtype), hostMg.astype(
        dtype), hostDelta.astype(dtype)

    assert np.allclose(hostMg, mg.get(), atol=atol)
    assert np.allclose(hostMs, ms.get(), atol=atol)
    assert np.allclose(hostDelta, delta.get(), atol=atol)
    assert np.allclose(hostW, w.get(), atol=atol)
Пример #24
0
def constantTest():
    data = gpuarray.to_gpu(np.random.randn(3, 4, 5).astype(np.float32))

    lpad, rpad = 0, 1
    fillValue = -0.1

    padmod = Pad1D(pad=(lpad, rpad),
                   mode=PadMode.constant,
                   fillValue=fillValue)
    padmod(data)

    assert padmod.dataShapeFrom(data.shape) == padmod.data.shape

    hostData, hostOutData = data.get(), padmod.data.get()
    assert np.allclose(hostOutData[:, :, lpad:hostOutData.shape[2] - rpad],
                       hostData)

    assert np.isclose(hostOutData[0, 0, hostOutData.shape[2] - 1], fillValue)

    grad = gpuarray.to_gpu(
        np.random.randn(*hostOutData.shape).astype(np.float32))
    padmod.backward(grad)

    assert padmod.gradShapeFrom(grad.shape) == data.shape
    assert np.allclose(padmod.grad.get(),
                       grad.get()[:, :, lpad:grad.shape[2] - rpad])
Пример #25
0
def onDeviceTest():
    from PuzzleLib.Containers import Sequential
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear

    from PuzzleLib.Cost.CrossEntropy import CrossEntropy

    data = gpuarray.to_gpu(
        np.random.randn(10000, 3, 28, 28).astype(np.float32))
    dataTarget = gpuarray.to_gpu(
        np.random.randint(low=0, high=10, size=(10000, )).astype(np.int32))

    seq = Sequential()
    seq.append(Conv2D(3, 16, 9))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Conv2D(16, 32, 5))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Flatten())
    seq.append(Linear(3 * 3 * 32, 10))

    entr = CrossEntropy()

    val = Validator(seq, entr)
    print("Validation error on small data: %s" %
          val.validate(data, dataTarget))
Пример #26
0
def l1SVMTest():
    batchsize, size = 20, 4

    scores = gpuarray.to_gpu(
        np.random.randn(batchsize, size).astype(np.float32))
    labels = gpuarray.to_gpu(
        np.random.randint(low=0, high=size, size=(batchsize, ),
                          dtype=np.int32))

    svm = SVM(mode="l1")
    error, grad = svm(scores, labels)

    hostScores, hostLabels = scores.get(), labels.get()

    hostGrad = np.empty(grad.shape, dtype=np.float32)
    hostError = 0.0

    for b in range(batchsize):
        for n in range(size):
            cls = 2 * (hostLabels[b] == n) - 1
            val = hostScores[b, n] * cls

            hostGrad[b, n] = cls / batchsize / size if val < 1 else 0.0
            hostError += max(0.0, 1.0 - val) / batchsize / size

    assert np.allclose(hostGrad, grad.get())
    assert np.isclose(hostError, error)

    error = svm.validate(scores, labels)
    print("Validation error: %s" % error)
    assert np.allclose(np.argmax(scores.get(), axis=1), svm.mostProb.get())
Пример #27
0
def unittest():
    batchsize, maps, h, w = 1, 1, 6, 6
    data = gpuarray.to_gpu(
        np.random.randn(batchsize, maps, h, w).astype(np.float32))

    maxpool2d = MaxPool2D()
    maxpool2d(data)

    grad = gpuarray.to_gpu(
        np.random.randn(*maxpool2d.data.shape).astype(np.float32))
    maxpool2d.backward(grad)

    def maxDownSample2d(dat, factor):
        trimrows = dat.shape[0] // factor * factor
        trimcols = dat.shape[1] // factor * factor

        maxSoFar = None
        first = True

        for coff in range(factor):
            for roff in range(factor):
                hopped = dat[roff:trimrows:factor, coff:trimcols:factor]
                if first:
                    maxSoFar = hopped
                    first = False
                else:
                    maxSoFar = np.maximum(maxSoFar, hopped)

        return maxSoFar

    hostOutData = maxDownSample2d(data.get()[0, 0], 2)
    assert np.allclose(hostOutData, maxpool2d.data.get()[0, 0])
Пример #28
0
def moveAxisTest(dtype):
    shape = (10, 3, 5, 4, 2)

    for src in range(len(shape)):
        for dst in range(len(shape)):
            if src == dst:
                continue

            hostData = np.random.randn(*shape).astype(dtype)
            data = gpuarray.to_gpu(hostData)

            moveaxis = MoveAxis(src, dst)
            moveaxis.calcMode(dtype)

            moveaxis(data)

            hostOutData = np.moveaxis(hostData, source=src, destination=dst)
            assert np.allclose(hostOutData, moveaxis.data.get())

            hostGrad = np.random.randn(*moveaxis.data.shape).astype(dtype)
            grad = gpuarray.to_gpu(hostGrad)

            moveaxis.backward(grad)

            hostInGrad = np.moveaxis(hostGrad, source=dst, destination=src)

            assert moveaxis.grad.shape == data.shape
            assert np.allclose(hostInGrad, moveaxis.grad.get())
Пример #29
0
def main():
    net = buildNet()
    cost = BCE()

    data = gpuarray.to_gpu(np.random.randn(1, 1, 6, 6).astype(np.float32))
    target = gpuarray.to_gpu(np.random.randint(0, 2, size=(1, )))

    gradientCheck(net, data, target, cost)
Пример #30
0
def lastStateTest():
    seqlen, batchsize, insize, hsize = 5, 3, 6, 5

    hostData = np.random.randn(seqlen, batchsize, insize).astype(np.float32)
    data = gpuarray.to_gpu(hostData)

    rnn = RNN(insize, hsize, mode="relu", getSequences=False)
    rnn(data)

    hostOutData = np.zeros((seqlen + 1, batchsize, hsize), dtype=np.float32)
    params = {name: param.get() for name, param in rnn.params[0].items()}

    for d in range(seqlen):
        res = np.dot(hostData[d], params["wi"].T) + np.dot(hostOutData[d], params["ri"].T) + \
           params["bwi"] + params["bri"]
        hostOutData[d + 1] = (res > 0.0) * res

    assert np.allclose(hostOutData[-1], rnn.data.get())

    hostGrad = np.random.randn(*rnn.data.shape).astype(np.float32)
    grad = gpuarray.to_gpu(hostGrad)

    rnn.backward(grad)

    hostGrad = np.zeros((seqlen, batchsize, hsize), dtype=np.float32)
    hostGrad[-1] = grad.get()

    hostAccGrad = np.zeros((seqlen + 1, batchsize, hsize), dtype=np.float32)
    hostInGrad = np.zeros((seqlen, batchsize, insize), dtype=np.float32)

    for d in range(seqlen):
        acc = (hostGrad[seqlen - d - 1] + np.dot(hostAccGrad[seqlen - d], params["ri"])) * \
           (hostOutData[seqlen - d] > 0)

        hostAccGrad[seqlen - d - 1] = acc
        hostInGrad[seqlen - d - 1] = np.dot(acc, params["wi"])

    assert np.allclose(hostInGrad, rnn.grad.get())

    hostRiGrad = np.zeros(params["ri"].shape, dtype=np.float32)
    hostWiGrad = np.zeros(params["wi"].shape, dtype=np.float32)
    hostBiGrad = np.zeros(params["bwi"].shape, dtype=np.float32)

    for d in range(seqlen):
        hostRiGrad += np.dot(hostAccGrad[seqlen - d - 1].T,
                             hostOutData[seqlen - d - 1])
        hostWiGrad += np.dot(hostAccGrad[seqlen - d - 1].T,
                             hostData[seqlen - d - 1])
        hostBiGrad += np.sum(hostAccGrad[seqlen - d - 1], axis=0)

    _, dwparams = acquireRnnParams(rnn.descRnn, w=rnn.dw)
    dwparams = dwparams[0]

    assert np.allclose(hostRiGrad, dwparams["ri"].get())
    assert np.allclose(hostWiGrad, dwparams["wi"].get())
    assert np.allclose(hostBiGrad, dwparams["bwi"].get())
    assert np.allclose(hostBiGrad, dwparams["bri"].get())