예제 #1
0
    def test_concrete_program(self):
        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = to_variable(np.ones([4, 10]).astype('float32'))
            y = to_variable(np.ones([4, 10]).astype('float32') * 2)
            int_val = 4.

            net = SimpleNet()
            # We can get concrete_program by specificing InputSpec information. Faking input is no need.
            net.add_func = declarative(net.add_func,
                                       input_spec=[
                                           InputSpec([-1, 10]),
                                           InputSpec([-1, 10], name='y')
                                       ])
            cp1 = net.add_func.concrete_program
            self.assertTrue(cp1.inputs[-1].shape == (-1, 10))
            self.assertTrue(cp1.inputs[-1].name == 'y')

            # generate another program
            net.add_func = declarative(
                net.add_func,
                input_spec=[InputSpec([10]),
                            InputSpec([10], name='label')])
            cp2 = net.add_func.concrete_program
            self.assertTrue(cp2.inputs[-1].shape == (10, ))
            self.assertTrue(cp2.inputs[-1].name == 'label')
            # Note(Aurelius84): New instance will be returned if we use `declarative(foo)` every time.
            # So number of cache program is 1.
            self.assertTrue(len(net.add_func.program_cache) == 1)
            self.assertTrue(cp1 != cp2)
예제 #2
0
    def test_input_spec(self):
        net = SimpleNet()
        net = declarative(net, input_spec=[InputSpec([None, 8, 10])])
        self.assertTrue(len(net.forward.inputs) == 1)
        self.assertTrue(len(net.forward.program_cache) == 1)
        input_shape = net.forward.inputs[0].shape
        self.assertListEqual(list(input_shape), [-1, 8, 10])

        # redecorate
        net = declarative(net, input_spec=[InputSpec([None, 16, 10])])
        input_shape = net.forward.inputs[0].shape
        self.assertListEqual(list(input_shape), [-1, 16, 10])
예제 #3
0
    def test_with_different_input(self):
        with fluid.dygraph.guard(fluid.CPUPlace()):
            x_data = np.ones([16, 10]).astype('float32')
            y_data = np.ones([10]).astype('float32') * 2
            z_data = np.ones([10]).astype('float32') * 2.2

            foo = declarative(foo_func)

            # [16, 10] + [10] (varbase)
            out_1 = foo(to_variable(x_data), to_variable(y_data))
            self.assertTrue(np.allclose(x_data + y_data, out_1.numpy()))
            self.assertTrue(len(foo.program_cache) == 1)
            self.assertTrue(len(foo.program_cache.concrete_programs()) == 1)

            # [16, 10] + [10] (numpy)
            out_2 = foo(to_variable(x_data), y_data)
            self.assertTrue(np.allclose(x_data + y_data, out_2.numpy()))
            self.assertTrue(len(foo.program_cache) == 1)

            # [16, 10] + [10] (numpy)
            out_3 = foo(to_variable(x_data), z_data)
            self.assertTrue(np.allclose(x_data + z_data, out_3.numpy()))
            # hit cache program
            self.assertTrue(len(foo.program_cache) == 1)

            # [16, 10] + [10] (numpy) with other different arguments (c=3)
            out_4 = foo(to_variable(x_data), z_data, 3)
            self.assertTrue(np.allclose(x_data + z_data, out_4.numpy()))
            # create a new program
            self.assertTrue(len(foo.program_cache) == 2)
예제 #4
0
    def test_with_input_spec(self):
        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = to_variable(np.ones([4, 10]).astype('float32'))
            y = to_variable(np.ones([4, 10]).astype('float32') * 2)
            int_val = 4.

            net = SimpleNet()

            # 1. each method holds independent program cache
            out = net(x)
            self.assertTrue(len(net.forward.program_cache) == 1)

            # 2. test save load
            net.inner_function(x)
            jit.save(net, './simple_net')
            infer_net = fluid.dygraph.jit.load('./simple_net')
            pred = infer_net(x)
            self.assertTrue(np.allclose(out.numpy(), pred.numpy()))

            # 3. we can decorate any method
            x_2 = to_variable(np.ones([4, 20]).astype('float32'))
            # uses `declarative(func)` instead of `@declarative`
            net.add_func = declarative(net.add_func)
            out = net.add_func(x_2, np.ones([20]).astype('float32'))
            self.assertTrue(len(net.add_func.program_cache) == 1)

            # 5. test input with list
            out = net.func_with_list([x, y], int_val)

            # 6. test input with dict
            out = net.func_with_dict({'x': x, 'y': y})

            # 7. test input with lits contains dict
            int_np = np.ones([1]).astype('float32')
            out = net.func_with_list_dict([int_np, {'x': x, 'y': y}])
예제 #5
0
    def test_get_concrete_program(self):

        foo = declarative(foo_func)

        # 1. specific InputSpec for `x`/`y`
        concrete_program_1 = foo.get_concrete_program(
            InputSpec([None, 10]), InputSpec([10]))
        self.assertTrue(len(foo.program_cache) == 1)

        # 2. specific `c`/`d` explicitly with same default value
        concrete_program_2 = foo.get_concrete_program(
            InputSpec([None, 10]), InputSpec([10]), 1, 2)
        self.assertTrue(concrete_program_2 == concrete_program_1)
        self.assertTrue(len(foo.program_cache) == 1)

        # 3. specific `c` = 2
        concrete_program_3 = foo.get_concrete_program(
            InputSpec([None, 10]), InputSpec([10]), c=2)
        self.assertTrue(concrete_program_3 != concrete_program_1)
        self.assertTrue(len(foo.program_cache) == 2)

        # 4. specific x.shape = [10]
        concrete_program_4 = foo.get_concrete_program(
            InputSpec([10]), InputSpec([10]))
        self.assertTrue(concrete_program_4 != concrete_program_1)
        self.assertTrue(len(foo.program_cache) == 3)

        # 5. only specific InputSpec of x
        with self.assertRaises(ValueError):
            concrete_program_5 = foo.get_concrete_program(InputSpec([10]))

        # 6. specific unknown kwargs `e`=4
        with self.assertRaises(TypeError):
            concrete_program_5 = foo.get_concrete_program(
                InputSpec([10]), InputSpec([10]), e=4)
예제 #6
0
    def run_dygraph(self, func, to_static=False):

        with fluid.dygraph.guard(self.place):
            x_v = fluid.dygraph.to_variable(self.x)
            if to_static:
                ret = declarative(func)(x_v)
            else:
                ret = func(x_v)
            return ret.numpy()
예제 #7
0
    def _run(self, to_static):
        with fluid.dygraph.guard(self.place):
            if to_static:
                out = declarative(self.func)(self.x_data)
            else:
                out = self.func(self.x_data)

            if isinstance(out, fluid.core.VarBase):
                out = out.numpy()
            return out
예제 #8
0
    def _run(self, to_static):
        with fluid.dygraph.guard():
            if self.x is None or self.y is None:
                self.fake_input()

            if to_static:
                out = declarative(nested_input)(self.x, self.y)
            else:
                out = nested_input(self.x, self.y)

        return out.numpy()
예제 #9
0
    def _run(self, to_static):
        with fluid.dygraph.guard():
            if self.x is None or self.y is None:
                self.x = fake_data([10, 16])
                self.y = fake_data([10, 16])

            if to_static:
                out = declarative(nested_output)(self.x, self.y)
            else:
                out = nested_output(self.x, self.y)

        return out
예제 #10
0
    def test_error(self):
        func = declarative(dyfunc_to_variable)

        paddle.enable_static()

        # Failed to run the callable object decorated by '@paddle.jit.to_static'
        # if it does NOT in dynamic mode.
        with self.assertRaises(RuntimeError):
            func(np.ones(5).astype("int32"))

        program_trans.enable(False)
        with self.assertRaises(AssertionError):
            # AssertionError: We Only support to_variable in imperative mode,
            #  please use fluid.dygraph.guard() as context to run it in imperative Mode
            func(np.ones(5).astype("int32"))
예제 #11
0
    def test_nest_output(self):
        x = fluid.dygraph.to_variable(
            np.random.random((4, 8)).astype('float32'))

        net = LinearNetWithNestOut(8, 8)
        dy_outs = flatten(net(x))
        net = declarative(net, input_spec=[InputSpec([None, 8], name='x')])

        model_path = "net_with_nest_out/model"
        paddle.jit.save(net, model_path)

        load_net = paddle.jit.load(model_path)
        load_outs = flatten(load_net(x))

        self.assertTrue(len(dy_outs) == 4)
        for dy_out, load_out in zip(dy_outs, load_outs):
            self.assertTrue(np.allclose(dy_out.numpy(), load_out.numpy()))
예제 #12
0
    def test_with_error(self):
        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = to_variable(np.ones([4, 10]).astype('float32'))
            y = to_variable(np.ones([4, 10]).astype('float32') * 2)
            int_val = 4.

            net = SimpleNet()

            # 1. kwargs and input_spec should not be specificed in same time
            with self.assertRaises(ValueError):
                net(x, a=1, other_kwarg=2)

            # 2. requires len(input_spec) <= len(args)
            with self.assertRaises(ValueError):
                net.add_func = declarative(net.add_func,
                                           input_spec=[
                                               InputSpec([-1, 10]),
                                               InputSpec([-1, 10]),
                                               InputSpec([10])
                                           ])
                net.add_func(x, y)
예제 #13
0
    def test_with_input_spec(self):
        net = LinearNetReturnLoss(8, 8)
        # set x.shape = [None, 8]
        net.forward = declarative(net.forward,
                                  input_spec=[InputSpec([None, 8], name='x')])

        model_path = "input_spec.output_spec/model"
        # check inputs and outputs
        self.assertTrue(len(net.forward.inputs) == 1)
        input_x = net.forward.inputs[0]
        self.assertTrue(input_x.shape == (-1, 8))
        self.assertTrue(input_x.name == 'x')

        # 1. prune loss
        output_spec = net.forward.outputs[:1]
        paddle.jit.save(net, model_path, output_spec=output_spec)

        # 2. load to infer
        infer_layer = paddle.jit.load(model_path)
        x = fluid.dygraph.to_variable(
            np.random.random((4, 8)).astype('float32'))
        pred = infer_layer(x)
예제 #14
0
 def test_fake_input(self):
     net = SimpleNet()
     net = declarative(net)
     y = net(self.x)
     self.assertTrue(len(net.forward.program_cache) == 1)