Пример #1
0
    def testToyRegression(self):
        """Tests a toy regression end to end.

        The test code carries a simple toy regression in the form
            y = 2.0 x1 + 1.5 x2 + 0.5
        by randomly generating gaussian inputs and calculating the ground
        truth outputs in the net as well. It uses a standard SGD to then
        train the parameters.
        """
        workspace.ResetWorkspace()
        init_net = core.Net("init")
        W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
        B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
        W_gt = init_net.GivenTensorFill([],
                                        "W_gt",
                                        shape=[1, 2],
                                        values=[2.0, 1.5])
        B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
        LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
        ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
        ITER = init_net.ConstantIntFill([], "ITER", shape=[1], value=0.)

        train_net = core.Net("train")
        X = train_net.GaussianFill([], "X", shape=[64, 2], mean=0.0, std=1.0)
        Y_gt = X.FC([W_gt, B_gt], "Y_gt")
        Y_pred = X.FC([W, B], "Y_pred")
        dist = train_net.SquaredL2Distance([Y_gt, Y_pred], "dist")
        loss = dist.AveragedLoss([], ["loss"])
        # Get gradients for all the computations above. Note that in fact we
        # don't need to get the gradient the Y_gt computation, but we'll just
        # leave it there. In many cases, I am expecting one to load X and Y
        # from the disk, so there is really no operator that will calculate the
        # Y_gt input.
        input_to_grad = train_net.AddGradientOperators([loss], skip=2)
        # updates
        train_net.Iter(ITER, ITER)
        train_net.LearningRate(ITER,
                               "LR",
                               base_lr=-0.1,
                               policy="step",
                               stepsize=20,
                               gamma=0.9)
        train_net.WeightedSum([W, ONE, input_to_grad[str(W)], LR], W)
        train_net.WeightedSum([B, ONE, input_to_grad[str(B)], LR], B)
        for blob in [loss, W, B]:
            train_net.Print(blob, [])

        # the CPU part.
        plan = core.Plan("toy_regression")
        plan.AddStep(core.ExecutionStep("init", init_net))
        plan.AddStep(core.ExecutionStep("train", train_net, 200))

        workspace.RunPlan(plan)
        W_result = workspace.FetchBlob("W")
        B_result = workspace.FetchBlob("B")
        np.testing.assert_array_almost_equal(W_result, [[2.0, 1.5]], decimal=2)
        np.testing.assert_array_almost_equal(B_result, [0.5], decimal=2)
        workspace.ResetWorkspace()
Пример #2
0
 def testRunPlan(self):
     plan = core.Plan("test-plan")
     plan.AddNets([self.net])
     plan.AddStep(core.ExecutionStep("test-step", self.net))
     self.assertEqual(workspace.RunPlan(plan.Proto().SerializeToString()),
                      True)
     self.assertEqual(workspace.HasBlob("testblob"), True)
Пример #3
0
 def testRunPlanInBackground(self):
     plan = core.Plan("test-plan")
     plan.AddStep(core.ExecutionStep("test-step", self.net))
     background_plan = workspace.RunPlanInBackground(plan)
     while not background_plan.is_done():
         pass
     self.assertEqual(background_plan.is_succeeded(), True)
     self.assertEqual(workspace.HasBlob("testblob"), True)
Пример #4
0
def benchmark(ws, net, warmups=5, iters=100):
    for _ in range(warmups):
        ws.run(net)
    plan = core.Plan("plan")
    plan.AddStep(core.ExecutionStep("test-step", net, iters))
    before = time.time()
    ws.run(plan)
    after = time.time()
    print("Timing network, time taken per-iteration: {:.6f}ms".format((
        after - before) / float(iters) * 1000.0))
    return after - before
Пример #5
0
def benchmark(net, warmups=5, iters=100):
    for _ in range(warmups):
        workspace.RunNetOnce(net.Proto().SerializeToString())
    plan = core.Plan("plan")
    plan.AddStep(core.ExecutionStep("test-step", net, iters))
    before = time.time()
    workspace.RunPlan(plan.Proto().SerializeToString())
    after = time.time()
    print("Timing network, time taken per-iteration: {:.6f}ms".format(
        (after - before) / float(iters) * 1000.0))
    return after - before
Пример #6
0
    def test_plan_run(self, blob_name, plan_name, net_name, value):
        ws = workspace.C.Workspace()
        plan = core.Plan(plan_name)
        net = core.Net(net_name)
        net.ConstantFill([], [blob_name], shape=[1], value=value)

        plan.AddStep(core.ExecutionStep("step", nets=[net], num_iter=1))

        ws.run(plan)
        self.assertIn(blob_name, ws.blobs)
        self.assertIn(net.Name(), ws.nets)
        np.testing.assert_allclose(
            [value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
Пример #7
0
 def test_multithreaded_evaluation(self, x, n, w):
     def f(inputs, outputs):
         outputs[0].reshape(inputs[0].shape)
         outputs[0].data[...] = inputs[0].data
     ops = [CreatePythonOperator(f, ["x"], [str(i)]) for i in range(n)]
     net = core.Net("net")
     net.Proto().op.extend(ops)
     net.Proto().type = "dag"
     net.Proto().num_workers = w
     iters = 100
     plan = core.Plan("plan")
     plan.AddStep(core.ExecutionStep("test-step", net, iters))
     workspace.FeedBlob("x", x)
     workspace.RunPlan(plan.Proto().SerializeToString())
     for i in range(n):
         y = workspace.FetchBlob(str(i))
         np.testing.assert_almost_equal(x, y)
Пример #8
0
                    stride=1, order="NHWC").MaxPool([],
                                                    kernel=2,
                                                    stride=2,
                                                    order="NHWC"))
softmax = pool2.Flatten().FC([W3, B3]).Relu().FC([W4, B4]).Softmax()

# Cross entropy, and accuracy
xent = softmax.LabelCrossEntropy([label], "xent")
# The loss function.
loss = xent.AveragedLoss([], ["loss"])
# Get gradient
train_net.AddGradientOperators()
accuracy = softmax.Accuracy([label], "accuracy")
# parameter update.
for param in params:
    train_net.WeightedSum([param, ONE, param.Grad(), LR], param)
LR = train_net.Mul([LR, DECAY], "LR")
train_net.Print([accuracy], [])

# Run all on GPU.
#init_net.RunAllOnGPU()
#train_net.RunAllOnGPU()

plan = core.Plan("mnist_lenet_gc")
plan.AddNets([init_net, train_net])
plan.AddStep(core.ExecutionStep("init", init_net))
plan.AddStep(core.ExecutionStep("train", train_net, 1000))

with open('mnist_lenet_group_convolution_nhwc.pbtxt', 'w') as fid:
    fid.write(str(plan.Proto()))
Пример #9
0
 def testConstructPlanFromSteps(self):
     step = core.ExecutionStep("test-step-as-plan", self.net)
     self.assertEqual(workspace.RunPlan(step), True)
     self.assertEqual(workspace.HasBlob("testblob"), True)
Пример #10
0
def Benchmark(model_gen, arg):
    model, input_size = model_gen(arg.order)
    model.Proto().type = arg.net_type
    model.Proto().num_workers = arg.num_workers

    # In order to be able to run everything without feeding more stuff, let's
    # add the data and label blobs to the parameter initialization net as well.

    if arg.order == "NCHW":
        input_shape = [arg.batch_size, 3, input_size, input_size]
    else:
        input_shape = [arg.batch_size, input_size, input_size, 3]
        if arg.model == "MLP":
            input_shape = [arg.batch_size, input_size]

    model.param_init_net.GaussianFill([],
                                      "data",
                                      shape=input_shape,
                                      mean=0.0,
                                      std=1.0)
    model.param_init_net.UniformIntFill([],
                                        "label",
                                        shape=[
                                            arg.batch_size,
                                        ],
                                        min=0,
                                        max=999)

    if arg.forward_only:
        print('{}: running forward only.'.format(arg.model))
    else:
        print('{}: running forward-backward.'.format(arg.model))
        model.AddGradientOperators(["loss"])
        AddParameterUpdate(model)

        if arg.order == 'NHWC':
            print(
                '==WARNING==\n'
                'NHWC order with CuDNN may not be supported yet, so I might\n'
                'exit suddenly.')

    if not arg.cpu:
        model.param_init_net.RunAllOnGPU()
        model.net.RunAllOnGPU()

    if arg.dump_model:
        # Writes out the pbtxt for benchmarks on e.g. Android
        with open("{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size),
                  "w") as fid:
            fid.write(str(model.param_init_net.Proto()))
            with open("{0}.pbtxt".format(arg.model, arg.batch_size),
                      "w") as fid:
                fid.write(str(model.net.Proto()))

    workspace.RunNetOnce(model.param_init_net)
    workspace.CreateNet(model.net)
    for i in range(arg.warmup_iterations):
        workspace.RunNet(model.net.Proto().name)

    plan = core.Plan("plan")
    plan.AddStep(core.ExecutionStep("run", model.net, arg.iterations))
    start = time.time()
    workspace.RunPlan(plan)
    print('Spent: {}'.format((time.time() - start) / arg.iterations))
    if arg.layer_wise_benchmark:
        print('Layer-wise benchmark.')
        workspace.BenchmarkNet(model.net.Proto().name, 1, arg.iterations, True)
Пример #11
0
W8 = init_net.ConstantFill([], "W8", shape=[1000, 4096])
B8 = init_net.ConstantFill([], "B8", shape=[1000], value=0.0)
pred = (pool5_flatten.FC([W6, B6]).Relu().Dropout(outputs=2)[0].FC(
    [W7, B7]).Relu().Dropout(outputs=2)[0].FC([W8, B8]).Softmax())
xent = pred.LabelCrossEntropy([label], "xent")
# The loss function.
loss = xent.AveragedLoss([], ["loss"])
test_net.AddGradientOperators(first=2)
test_net.Print([loss], [])

dump_net = core.Net("dump")
for blob in [
        data, pool1, pool1a, pool1b, pool2, conv3a, conv3b, conv4a, conv4b,
        conv5a, conv5b, pool5_flatten
]:
    dump_net.SaveFloatTensor([blob], [], file=str(blob))

init_net.RunAllOnGPU()
test_net.RunAllOnGPU()
dump_net.RunAllOnGPU()

plan = core.Plan("alexnet")
plan.AddNets([init_net, test_net, dump_net])
plan.AddStep(core.ExecutionStep("init", init_net))
plan.AddStep(core.ExecutionStep("first_run", test_net))
#plan.AddStep(core.ExecutionStep("subsequent_run", test_net, 10))
plan.AddStep(core.ExecutionStep("dump", dump_net))

with open('alexnet.pbtxt', 'w') as fid:
    fid.write(str(plan.Proto()))