Example #1
0
def simpleNetTest():
    from PuzzleLib.Modules import Linear, Activation, sigmoid

    data = gpuarray.to_gpu(np.random.randn(128, 128).astype(np.float32))

    seq = Sequential()

    seq.append(Linear(128, 64))
    seq.append(Activation(sigmoid))

    seq.append(Linear(64, 32))
    seq.append(Activation(sigmoid))

    seq(data)
    assert seq.data.shape == (128, 32)

    grad = gpuarray.to_gpu(np.random.randn(128, 32).astype(np.float32))
    seq.backward(grad)
    seq.updateParams(1e-4)
    assert seq.grad.shape == data.shape

    data = gpuarray.to_gpu(np.random.randn(64, 128).astype(np.float32))
    seq = seq[:2]
    seq(data)
    assert seq.data.shape == (64, 64)
Example #2
0
def main():
    batchsize, insize = 16, 1000

    inNode = Linear(insize, 1000, name="linear1").node()
    node = Activation(relu, name="relu1").node(inNode)

    node1 = Linear(1000, 800, name="linear2").node(node)
    node1 = Activation(relu, name="relu2").node(node1)

    node2 = Linear(1000, 800, name="linear3").node(node)
    node2 = Activation(relu, name="relu3").node(node2)

    outNode = Add(name="add").node(node1, node2)

    graph = Graph(inputs=inNode, outputs=outNode, name="graph")

    data = gpuarray.to_gpu(
        np.random.randn(batchsize, insize).astype(np.float32))

    engine = buildVINOEngine(graph, (batchsize, insize),
                             savepath="../TestData")

    outdata = graph(data)
    enginedata = engine(data)

    assert np.allclose(outdata.get(), enginedata.get())
    benchModels(graph, engine, data)
Example #3
0
def graphTest():
    from PuzzleLib.Modules import Linear, Activation, relu, Add

    net = Sequential()

    net.append(Linear(10, 10))
    net.append(Activation(relu))

    inp = Linear(10, 10).node()
    node = Activation(relu).node(inp)

    subseq = Sequential()
    subseq.append(Linear(10, 10))
    subseq.append(Activation(relu))

    node2 = subseq.node(node)
    node3 = Linear(10, 10).node(node)

    node = Add(name="add").node(node2, node3)

    graph = Graph(inputs=inp, outputs=node)
    net.append(graph)

    net.append(Linear(10, 10))
    net.append(Activation(relu))

    data = gpuarray.to_gpu(np.random.randn(1, 10).astype(np.float32))
    outdata = net(data)

    net.reset()

    graph = toGraph(net)
    graphdata = graph(data)

    assert np.allclose(outdata.get(), graphdata.get())
Example #4
0
def buildNet():
    from PuzzleLib.Containers import Sequential
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear

    seq = Sequential()
    seq.append(Conv2D(3, 32, 5, pad=2, wscale=0.0001, initscheme="gaussian"))
    seq.append(MaxPool2D(3, 2))
    seq.append(Activation(relu))

    seq.append(Conv2D(32, 32, 5, pad=2, wscale=0.01, initscheme="gaussian"))
    seq.append(MaxPool2D(3, 2))
    seq.append(Activation(relu))

    seq.append(Conv2D(32, 64, 5, pad=2, wscale=0.01, initscheme="gaussian"))
    seq.append(MaxPool2D(3, 2))
    seq.append(Activation(relu))

    seq.append(Flatten())
    seq.append(
        Linear(seq.dataShapeFrom((1, 3, 32, 32))[1],
               64,
               wscale=0.1,
               initscheme="gaussian"))
    seq.append(Activation(relu))

    seq.append(Linear(64, 10, wscale=0.1, initscheme="gaussian"))
    return seq
Example #5
0
def onDeviceTest():
    from PuzzleLib.Containers import Sequential
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear

    from PuzzleLib.Cost.CrossEntropy import CrossEntropy
    from PuzzleLib.Optimizers.NesterovSGD import NesterovSGD

    data = gpuarray.to_gpu(
        np.random.randn(10000, 3, 28, 28).astype(np.float32))
    dataTarget = gpuarray.to_gpu(
        np.random.randint(low=0, high=10, size=(10000, )).astype(np.int32))

    seq = Sequential()
    seq.append(Conv2D(3, 16, 9))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Conv2D(16, 32, 5))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Flatten())
    seq.append(Linear(3 * 3 * 32, 10))

    entr = CrossEntropy()

    opt = NesterovSGD()
    opt.setupOn(seq)

    def onBatchFinish(train):
        print("Finished batch #%d, error=%s" %
              (train.currBatch, train.cost.getError()))

    trainer = Trainer(seq, entr, opt, onBatchFinish=onBatchFinish)
    trainer.train(data, dataTarget)
Example #6
0
def onHostTest():
    from PuzzleLib.Containers import Sequential
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear

    from PuzzleLib.Cost.CrossEntropy import CrossEntropy

    data = np.random.randn(50000, 3, 28, 28).astype(np.float32)
    dataTarget = np.random.randint(low=0, high=10,
                                   size=(50000, )).astype(np.int32)

    seq = Sequential()
    seq.append(Conv2D(3, 16, 9))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Conv2D(16, 32, 5))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Flatten())
    seq.append(Linear(3 * 3 * 32, 10))

    entr = CrossEntropy()

    val = Validator(seq, entr)
    val.validateFromHost(data, dataTarget)
    print("Validation error on big data: %s" % val.error)
Example #7
0
def unittest():
    from PuzzleLib.Containers.Sequential import Sequential
    from PuzzleLib.Modules import Linear, Activation, sigmoid, Identity, Concat

    data1 = gpuarray.to_gpu(np.random.randn(128, 128).astype(np.float32))
    data2 = gpuarray.to_gpu(np.random.randn(128, 16).astype(np.float32))
    data3 = gpuarray.to_gpu(np.random.randn(128, 32).astype(np.float32))

    seq = Sequential()
    seq.append(Linear(128, 64))
    seq.append(Activation(sigmoid))

    parallel = Parallel()
    parallel.append(seq)
    parallel.append(Identity())
    parallel.append(Identity())

    concat = Concat(axis=1)

    parallel([data1, data2, data3])
    concat(parallel.data)

    assert np.allclose(data2.get(), concat.data.get()[:, 64:64 + 16])

    grad = gpuarray.to_gpu(np.random.randn(128, 112).astype(np.float32))
    concat.backward(grad)
    parallel.backward(concat.grad)

    assert np.allclose(grad.get()[:, 64:64 + 16], parallel.grad[1].get())

    parallel = parallel[::2]
    parallel([data1, data3])
Example #8
0
def calcTest():
	from PuzzleLib.Modules import Linear, Split, Concat, Activation, relu

	v1 = Linear(100, 50, name="v1").node()
	h1 = Split(axis=1, sections=(20, 20, 10), name="h1").node(v1)

	v2 = Linear(100, 50, name="v2").node()
	h2 = Concat(axis=1, name="h2").node((h1, [1, 2]), v2)
	h3 = Activation(relu, name="h3").node(h2)

	h4 = Concat(axis=1, name="h4").node((h1, 0), h3)

	mlp = Graph(inputs=[v1, v2], outputs=h4)

	v1data = gpuarray.to_gpu(np.random.randn(5, 100).astype(np.float32))
	v2data = gpuarray.to_gpu(np.random.randn(5, 100).astype(np.float32))

	mlp.optimizeForShape([v1data.shape, v2data.shape])
	mlp([v1data, v2data])

	assert mlp.data.shape == (5, 100)
	assert mlp.dataShapeFrom([v1data.shape, v2data.shape]) == mlp.data.shape

	grad = gpuarray.to_gpu(np.random.randn(*mlp.data.shape).astype(np.float32))
	mlp.backward(grad)

	assert len(mlp.grad) == 2 and mlp.grad[0].shape == mlp.grad[1].shape == (5, 100)
	assert mlp.gradShapeFrom(grad.shape) == [gr.shape for gr in mlp.grad]
Example #9
0
def matchTest():
	from PuzzleLib.Containers import Sequential, Parallel
	from PuzzleLib.Modules import Linear, Activation, sigmoid, Replicate, Concat

	seq = Sequential()
	seq.append(Linear(128, 64, name="linear-1"))
	seq.append(Activation(sigmoid))
	seq.append(Replicate(times=2))

	parallel = Parallel()
	parallel.append(Linear(64, 10, name="linear-2"))
	parallel.append(Linear(64, 5, name="linear-3"))
	seq.append(parallel)

	seq.append(Concat(axis=1))

	v1 = Linear(128, 64, name="linear-1").node()
	h1 = Activation(sigmoid).node(v1)

	h2 = Linear(64, 10, name="linear-2").node(h1)
	h3 = Linear(64, 5, name="linear-3").node(h1)

	h4 = Concat(axis=1).node(h2, h3)

	mlp = Graph(inputs=v1, outputs=h4)

	mlp.getByName("linear-1").W.set(seq.getByName("linear-1").W)
	mlp.getByName("linear-1").b.set(seq.getByName("linear-1").b)

	mlp.getByName("linear-2").W.set(seq.getByName("linear-2").W)
	mlp.getByName("linear-2").b.set(seq.getByName("linear-2").b)

	mlp.getByName("linear-3").W.set(seq.getByName("linear-3").W)
	mlp.getByName("linear-3").b.set(seq.getByName("linear-3").b)

	data = gpuarray.to_gpu(np.random.randn(32, 128).astype(np.float32))
	seq(data)
	mlp(data)

	assert np.allclose(seq.data.get(), mlp.data.get())

	grad = gpuarray.to_gpu(np.random.randn(32, 15).astype(np.float32))
	seq.backward(grad)
	mlp.backward(grad)

	assert np.allclose(seq.grad.get(), mlp.grad.get())
Example #10
0
def buildNet():
    from PuzzleLib.Containers import Sequential, Parallel
    from PuzzleLib.Modules import Linear, Activation, relu, Replicate, Concat

    seq = Sequential()

    seq.append(Linear(20, 10, name="linear-1"))
    seq.append(Activation(relu, name="relu-1"))

    seq.append(Linear(10, 5, name="linear-2"))
    seq.append(Activation(relu, name="relu-2"))

    seq.append(Replicate(times=2, name="repl"))
    seq.append(Parallel().append(Linear(5, 2, name="linear-3-1")).append(
        Linear(5, 3, name="linear-3-2")))
    seq.append(Concat(axis=1, name="concat"))

    return seq
Example #11
0
def buildNet():
    from PuzzleLib.Containers import Sequential
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear

    seq = Sequential(name="lenet-5-like")
    seq.append(Conv2D(1, 16, 3))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Conv2D(16, 32, 4))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Flatten())
    seq.append(Linear(32 * 5 * 5, 1024))
    seq.append(Activation(relu))

    seq.append(Linear(1024, 10))
    return seq
Example #12
0
def complexNetTest():
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten

    data = gpuarray.to_gpu(
        np.random.randn(128, 3, 150, 150).astype(np.float32))

    seq = Sequential()

    seq.append(Conv2D(3, 16, 11))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Conv2D(16, 16, 5))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Flatten())

    seq(data)

    grad = gpuarray.to_gpu(np.random.randn(*seq.data.shape).astype(np.float32))
    seq.backward(grad)
    seq.updateParams(1e-4)
Example #13
0
def buildGraph():
    from PuzzleLib.Containers import Graph
    from PuzzleLib.Modules import Linear, Activation, relu, Concat

    inp = Linear(20, 10, name="linear-1").node()
    h = Activation(relu, name="relu-1").node(inp)

    h1 = Linear(10, 5, name="linear-2").node(h)
    h2 = Linear(10, 5, name="linear-3").node(h)

    output = Concat(axis=1, name="concat").node(h1, h2)
    graph = Graph(inputs=inp, outputs=output)

    return graph
Example #14
0
def mixedTest():
	from PuzzleLib.Containers import Graph, Sequential
	from PuzzleLib.Modules import Linear, Split, Concat, Activation, relu

	v1 = Linear(100, 50, name="v1").node()
	h1 = Split(axis=1, sections=(20, 20, 10), name="h1").node(v1)

	v2 = Linear(100, 50, name="v2").node()
	h2 = Concat(axis=1, name="h2").node((h1, [1, 2]), v2)
	h3 = Activation(relu, name="h3").node(h2)

	h4 = Concat(axis=1, name="h4").node((h1, 0), h3)
	mlp = Graph(inputs=[v1, v2], outputs=h4)

	seq = Sequential()

	seq.append(Linear(10, 200))
	seq.append(Split(axis=1, sections=(100, 100)))

	seq.append(mlp)
	seq.append(Activation(relu))

	drawBoard(seq, filename="./TestData/mixed.gv", view=False, modulesOnly=False)
Example #15
0
def onDeviceTest():
    from PuzzleLib.Containers import Sequential
    from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear

    data = gpuarray.to_gpu(
        np.random.randn(10000, 3, 28, 28).astype(np.float32))

    seq = Sequential()
    seq.append(Conv2D(3, 16, 9))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Conv2D(16, 32, 5))
    seq.append(MaxPool2D())
    seq.append(Activation(relu))

    seq.append(Flatten())
    seq.append(Linear(3 * 3 * 32, 10))

    calc = Calculator(seq)
    calc.onBatchFinish = lambda calculator: print("Finished batch #%d" %
                                                  calculator.currBatch)
    calc.calc(data)
def loadNet(modelpath=None, name="OpenPoseFaceNet"):
    net = Sequential(name=name)

    net.append(Conv2D(3, 64, 3, pad=1, name="conv1_1"))
    net.append(Activation(relu, name="conv1_1_re"))
    net.append(Conv2D(64, 64, 3, pad=1, name="conv1_2"))
    net.append(Activation(relu, name="conv1_2_re"))

    net.append(MaxPool2D(2, 2, name="pool1"))

    net.append(Conv2D(64, 128, 3, pad=1, name="conv2_1"))
    net.append(Activation(relu, name="conv2_1_re"))
    net.append(Conv2D(128, 128, 3, pad=1, name="conv2_2"))
    net.append(Activation(relu, name="conv2_2_re"))

    net.append(MaxPool2D(2, 2, name="pool2"))

    net.append(Conv2D(128, 256, 3, pad=1, name="conv3_1"))
    net.append(Activation(relu, name="conv3_1_re"))
    net.append(Conv2D(256, 256, 3, pad=1, name="conv3_2"))
    net.append(Activation(relu, name="conv3_2_re"))
    net.append(Conv2D(256, 256, 3, pad=1, name="conv3_3"))
    net.append(Activation(relu, name="conv3_3_re"))
    net.append(Conv2D(256, 256, 3, pad=1, name="conv3_4"))
    net.append(Activation(relu, name="conv3_4_re"))

    net.append(MaxPool2D(2, 2, name="pool3"))

    net.append(Conv2D(256, 512, 3, pad=1, name="conv4_1"))
    net.append(Activation(relu, name="conv4_1_re"))
    net.append(Conv2D(512, 512, 3, pad=1, name="conv4_2"))
    net.append(Activation(relu, name="conv4_2_re"))
    net.append(Conv2D(512, 512, 3, pad=1, name="conv4_3"))
    net.append(Activation(relu, name="conv4_3_re"))
    net.append(Conv2D(512, 512, 3, pad=1, name="conv4_4"))
    net.append(Activation(relu, name="conv4_4_re"))

    net.append(Conv2D(512, 512, 3, pad=1, name="conv5_1"))
    net.append(Activation(relu, name="conv5_1_re"))
    net.append(Conv2D(512, 512, 3, pad=1, name="conv5_2"))
    net.append(Activation(relu, name="conv5_2_re"))

    net.append(Conv2D(512, 128, 3, pad=1, name="conv5_3_CPM"))
    net.append(Activation(relu, name="conv5_3_CPM_re"))

    net.append(Replicate(2))

    shortcut0 = Sequential()
    shortcut0.append(Identity())

    branch0 = Sequential()
    branch0.append(Replicate(2))

    shortcut1 = Sequential()
    shortcut1.append(Identity())

    branch1 = Sequential()
    branch1.append(Replicate(2))

    shortcut2 = Sequential()
    shortcut2.append(Identity())

    branch2 = Sequential()
    branch2.append(Replicate(2))

    shortcut3 = Sequential()
    shortcut3.append(Identity())

    branch3 = Sequential()
    branch3.append(Replicate(2))

    shortcut4 = Sequential()
    shortcut4.append(Identity())

    branch4 = Sequential()
    branch4.append(Conv2D(128, 512, 1, pad=0, name="conv6_1_CPM"))
    branch4.append(Activation(relu, name="conv6_1_CPM_re"))
    branch4.append(Conv2D(512, 71, 1, pad=0, name="conv6_2_CPM"))

    branches = [branch4, branch3, branch2, branch1, branch0, net]
    shortcuts = [shortcut4, shortcut3, shortcut2, shortcut1, shortcut0, None]

    for branchIdx, branch in enumerate(branches):
        if branchIdx == 0:
            continue

        branch.append(Parallel().append(branches[branchIdx - 1]).append(
            shortcuts[branchIdx - 1]))
        branch.append(
            Concat(name="features_in_stage_%d" % (branchIdx + 1), axis=1))

        branch.append(
            Conv2D(199, 128, 7, pad=3,
                   name="Mconv1_stage%d" % (branchIdx + 1)))
        branch.append(
            Activation(relu, name="Mconv1_stage%d_re" % (branchIdx + 1)))
        branch.append(
            Conv2D(128, 128, 7, pad=3,
                   name="Mconv2_stage%d" % (branchIdx + 1)))
        branch.append(
            Activation(relu, name="Mconv2_stage%d_re" % (branchIdx + 1)))
        branch.append(
            Conv2D(128, 128, 7, pad=3,
                   name="Mconv3_stage%d" % (branchIdx + 1)))
        branch.append(
            Activation(relu, name="Mconv3_stage%d_re" % (branchIdx + 1)))
        branch.append(
            Conv2D(128, 128, 7, pad=3,
                   name="Mconv4_stage%d" % (branchIdx + 1)))
        branch.append(
            Activation(relu, name="Mconv4_stage%d_re" % (branchIdx + 1)))
        branch.append(
            Conv2D(128, 128, 7, pad=3,
                   name="Mconv5_stage%d" % (branchIdx + 1)))
        branch.append(
            Activation(relu, name="Mconv5_stage%d_re" % (branchIdx + 1)))
        branch.append(
            Conv2D(128, 128, 1, pad=0,
                   name="Mconv6_stage%d" % (branchIdx + 1)))
        branch.append(
            Activation(relu, name="Mconv6_stage%d_re" % (branchIdx + 1)))
        branch.append(
            Conv2D(128, 71, 1, pad=0, name="Mconv7_stage%d" % (branchIdx + 1)))

    if modelpath is not None:
        net.load(modelpath, assumeUniqueNames=True, name=name)
        net.evalMode()

    return net
Example #17
0
 def __init__(self, config, name=None):
     super().__init__(name)
     self.registerBlueprint(locals())
     self.append(Linear(config.hidden_size, config.hidden_size, name='dense'))
     self.activation = Activation('tanh')