Ejemplo n.º 1
0
 def test_detection_output(self):
     program = Program()
     with program_guard(program):
         pb = layers.data(
             name='prior_box',
             shape=[10, 4],
             append_batch_size=False,
             dtype='float32')
         pbv = layers.data(
             name='prior_box_var',
             shape=[10, 4],
             append_batch_size=False,
             dtype='float32')
         loc = layers.data(
             name='target_box',
             shape=[2, 10, 4],
             append_batch_size=False,
             dtype='float32')
         scores = layers.data(
             name='scores',
             shape=[2, 10, 20],
             append_batch_size=False,
             dtype='float32')
         out = layers.detection_output(
             scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv)
         self.assertIsNotNone(out)
         self.assertEqual(out.shape[-1], 6)
     print(str(program))
Ejemplo n.º 2
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        with program_guard(program):
            x = layers.data(
                name='x', shape=[1], dtype='float32', stop_gradient=False)
            table = layers.lod_rank_table(x, level=0)
            array = layers.lod_tensor_to_array(x, table)
            result = layers.array_to_lod_tensor(array, table)

            mean = layers.mean(result)

            append_backward(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum()
            for item in exe.run(program,
                                feed={'x': tensor},
                                fetch_list=[g_vars],
                                return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Ejemplo n.º 3
0
    def test_detection_api(self):
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[4], dtype='float32')
            y = layers.data(name='y', shape=[4], dtype='float32')
            z = layers.data(name='z', shape=[4], dtype='float32', lod_level=1)
            iou = layers.iou_similarity(x=x, y=y)
            bcoder = layers.box_coder(
                prior_box=x,
                prior_box_var=y,
                target_box=z,
                code_type='encode_center_size')
            self.assertIsNotNone(iou)
            self.assertIsNotNone(bcoder)

            matched_indices, matched_dist = layers.bipartite_match(iou)
            self.assertIsNotNone(matched_indices)
            self.assertIsNotNone(matched_dist)

            gt = layers.data(
                name='gt', shape=[1, 1], dtype='int32', lod_level=1)
            trg, trg_weight = layers.target_assign(
                gt, matched_indices, mismatch_value=0)
            self.assertIsNotNone(trg)
            self.assertIsNotNone(trg_weight)

            gt2 = layers.data(
                name='gt2', shape=[10, 4], dtype='float32', lod_level=1)
            trg, trg_weight = layers.target_assign(
                gt2, matched_indices, mismatch_value=0)
            self.assertIsNotNone(trg)
            self.assertIsNotNone(trg_weight)

        print(str(program))
Ejemplo n.º 4
0
    def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
        place = self.place()
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[10])
            x.persistable = True
            table = layers.lod_rank_table(x, level=level)
            max_len = layers.max_sequence_len(table)
            max_len.persistable = True
            array = layers.lod_tensor_to_array(x, table)
            array.persistable = True

            result = layers.array_to_lod_tensor(array, table)
            result.persistable = True
        exe = Executor(place)
        scope = core.Scope()
        exe.run(program, feed={'x': tensor}, scope=scope)
        var = scope.find_var(array.name)
        array = var.get_lod_tensor_array()
        if expect_array is not None and expect_lod is not None:
            self.check_array_same(array, expect_array, expect_lod)
        self.check_tensor_same(scope.find_var(result.name).get_tensor(), tensor)

        self.assertEqual(
            numpy.array(scope.find_var(max_len.name).get_tensor())[0],
            expect_max_len)
Ejemplo n.º 5
0
    def test_select(self):
        with framework.program_guard(framework.Program()):
            ch1 = fluid.make_channel(
                dtype=core.VarDesc.VarType.LOD_TENSOR, capacity=1)

            result1 = self._create_tensor('return_value',
                                          core.VarDesc.VarType.LOD_TENSOR,
                                          core.VarDesc.VarType.FP64)

            input_value = fill_constant(
                shape=[1], dtype=core.VarDesc.VarType.FP64, value=10)

            with fluid.Select() as select:
                with select.case(fluid.channel_send, ch1, input_value):
                    # Execute something.
                    pass

                with select.default():
                    pass

            # This should not block because we are using a buffered channel.
            result1, status = fluid.channel_recv(ch1, result1)
            fluid.channel_close(ch1)

            cpu = core.CPUPlace()
            exe = Executor(cpu)

            result = exe.run(fetch_list=[result1])
            self.assertEqual(result[0][0], 10)
Ejemplo n.º 6
0
 def test_row_conv(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1)
         out = layers.row_conv(input=x, future_context_size=2)
         self.assertIsNotNone(out)
     print(str(program))
Ejemplo n.º 7
0
    def test_simple_conv2d(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32')
            layers.conv2d(input=images, num_filters=3, filter_size=[4, 4])

        print(str(program))
    def test_override(self):
        # compare func to check
        compare_fns = [
            lambda _a, _b: _a == _b,
            lambda _a, _b: _a != _b,
            lambda _a, _b: _a < _b,
            lambda _a, _b: _a <= _b,
            lambda _a, _b: _a > _b,
            lambda _a, _b: _a >= _b,
        ]

        # places to check
        places = [fluid.CPUPlace()]
        if fluid.core.is_compiled_with_cuda():
            places.append(fluid.CUDAPlace(0))

        # dtypes to check
        dtypes = ['int32', 'float32']

        for place in places:
            for dtype in dtypes:
                for compare_fn in compare_fns:
                    with framework.program_guard(framework.Program(),
                                                 framework.Program()):
                        self.check_result(compare_fn, place, dtype)
Ejemplo n.º 9
0
    def test_recognize_digits_conv(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(
                name='pixel', shape=[1, 28, 28], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu")
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu")

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(cost)

        print(str(program))
Ejemplo n.º 10
0
 def test_sequence_reshape(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1)
         out = layers.sequence_reshape(input=x, new_dim=16)
         self.assertIsNotNone(out)
     print(str(program))
Ejemplo n.º 11
0
 def test_softmax(self):
     program = Program()
     with program_guard(program):
         data = layers.data(name='data', shape=[10], dtype='float32')
         hid = layers.fc(input=data, size=20)
         self.assertIsNotNone(layers.softmax(hid))
     print(str(program))
Ejemplo n.º 12
0
 def test_lod_reset(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[10], dtype='float32')
         y = layers.data(
             name='y', shape=[10, 20], dtype='float32', lod_level=2)
         print(layers.lod_reset(x=x, y=y))
     print(str(program))
Ejemplo n.º 13
0
 def test_sequence_softmax(self):
     program = Program()
     with program_guard(program):
         seq_data = layers.data(
             name='seq_data', shape=[10, 10], dtype='float32', lod_level=1)
         seq = layers.fc(input=seq_data, size=20)
         self.assertIsNotNone(layers.sequence_softmax(seq))
     print(str(program))
Ejemplo n.º 14
0
 def test_topk(self):
     program = Program()
     with program_guard(program):
         data = layers.data(name="label", shape=[200], dtype="float32")
         values, indices = layers.topk(data, k=5)
         self.assertIsNotNone(values)
         self.assertIsNotNone(indices)
     print(str(program))
Ejemplo n.º 15
0
 def test_smooth_l1(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[4], dtype='float32')
         y = layers.data(name='label', shape=[4], dtype='float32')
         loss = layers.smooth_l1(x, y)
         self.assertIsNotNone(loss)
     print(str(program))
Ejemplo n.º 16
0
 def test_softmax_with_cross_entropy(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[16], dtype='float32')
         y = layers.data(name='label', shape=[1], dtype='int64')
         loss = layers.softmax_with_cross_entropy(x, y)
         self.assertIsNotNone(loss)
     print(str(program))
Ejemplo n.º 17
0
 def test_sequence_expand(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[10], dtype='float32')
         y = layers.data(
             name='y', shape=[10, 20], dtype='float32', lod_level=2)
         self.assertIsNotNone(layers.sequence_expand(x=x, y=y, ref_level=1))
     print(str(program))
Ejemplo n.º 18
0
 def test_switch(self):
     test_data = {(-0.1, 0), (0.1, 1), (1.1, 2), (2.1, 3)}
     for x, expected_result in test_data:
         main_program = framework.Program()
         startup_program = framework.Program()
         with framework.program_guard(main_program, startup_program):
             result = self.check_switch(x)
             self.assertEqual(result, expected_result)
Ejemplo n.º 19
0
 def test_im2sequence(self):
     print("test_im2sequence")
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
         output = layers.im2sequence(
             input=x, stride=[1, 1], filter_size=[2, 2])
         self.assertIsNotNone(output)
     print(str(program))
Ejemplo n.º 20
0
 def test_multiplex(self):
     program = Program()
     with program_guard(program):
         x1 = layers.data(name='x1', shape=[4], dtype='float32')
         x2 = layers.data(name='x2', shape=[4], dtype='float32')
         index = layers.data(name='index', shape=[1], dtype='int32')
         out = layers.multiplex(inputs=[x1, x2], index=index)
         self.assertIsNotNone(out)
     print(str(program))
Ejemplo n.º 21
0
 def test_upsampling_bilinear2d(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[3, 9, 6], dtype="float32")
         output = layers.upsampling_bilinear2d(x, out_shape=[12, 12])
         self.assertIsNotNone(output)
         output = layers.upsampling_bilinear2d(x, scale=3)
         self.assertIsNotNone(output)
     print(str(program))
Ejemplo n.º 22
0
 def test_label_smooth(self):
     program = Program()
     with program_guard(program):
         label = layers.data(name="label", shape=[1], dtype="float32")
         one_hot_label = layers.one_hot(input=label, depth=10)
         smooth_label = layers.label_smooth(
             label=one_hot_label, epsilon=0.1, dtype="float32")
         self.assertIsNotNone(smooth_label)
     print(str(program))
Ejemplo n.º 23
0
 def test_roi_pool(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
         rois = layers.data(
             name="rois", shape=[4], dtype="float32", lod_level=1)
         output = layers.roi_pool(x, rois, 7, 7, 0.6)
         self.assertIsNotNone(output)
     print(str(program))
Ejemplo n.º 24
0
 def test_sigmoid_cross_entropy(self):
     program = Program()
     with program_guard(program):
         dat = layers.data(name='data', shape=[10], dtype='float32')
         lbl = layers.data(name='label', shape=[10], dtype='float32')
         self.assertIsNotNone(
             layers.sigmoid_cross_entropy_with_logits(
                 x=dat, label=lbl))
     print(str(program))
Ejemplo n.º 25
0
    def test_fit_line_inference_model(self):
        MODEL_DIR = "./tmp/inference_model"

        init_program = Program()
        program = Program()

        with program_guard(program, init_program):
            x = layers.data(name='x', shape=[2], dtype='float32')
            y = layers.data(name='y', shape=[1], dtype='float32')

            y_predict = layers.fc(input=x, size=1, act=None)

            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)

            sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
            sgd_optimizer.minimize(avg_cost, init_program)

        place = core.CPUPlace()
        exe = executor.Executor(place)

        exe.run(init_program, feed={}, fetch_list=[])

        for i in xrange(100):
            tensor_x = np.array(
                [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32")
            tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32")

            exe.run(program,
                    feed={'x': tensor_x,
                          'y': tensor_y},
                    fetch_list=[avg_cost])

        save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program)
        expected = exe.run(program,
                           feed={'x': tensor_x,
                                 'y': tensor_y},
                           fetch_list=[avg_cost])[0]

        reload(executor)  # reload to build a new scope
        exe = executor.Executor(place)

        [infer_prog, feed_var_names, fetch_vars] = load_inference_model(
            MODEL_DIR, exe)

        outs = exe.run(
            infer_prog,
            feed={feed_var_names[0]: tensor_x,
                  feed_var_names[1]: tensor_y},
            fetch_list=fetch_vars)
        actual = outs[0]

        self.assertEqual(feed_var_names, ["x", "y"])
        self.assertEqual(len(fetch_vars), 1)
        self.assertEqual(str(fetch_vars[0]), str(avg_cost))
        self.assertEqual(expected, actual)
Ejemplo n.º 26
0
    def test_program_clone_with_parameter(self):
        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            d = layers.data(name='x', shape=[784], dtype='float32')
            hidden = layers.fc(input=d, size=100)
            layers.fc(input=hidden, size=100)

        new_program = main_program.clone()
        self.assertNotEqual(0, len(new_program.blocks[0].all_parameters()))
Ejemplo n.º 27
0
    def test_ifelse(self):
        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')

            label = layers.data(name='y', shape=[1], dtype='int64')

            limit = layers.fill_constant_batch_size_like(
                input=label, dtype='int64', shape=[1], value=5.0)
            cond = layers.less_than(x=label, y=limit)
            ie = layers.IfElse(cond)

            with ie.true_block():
                true_image = ie.input(image)
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            with ie.false_block():
                false_image = ie.input(image)
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            prob = ie()
            loss = layers.cross_entropy(input=prob[0], label=label)
            avg_loss = layers.mean(loss)

            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=8192),
            batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(kwargs['main_program'],
                               feed={'x': x_data,
                                     'y': y_data},
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
Ejemplo n.º 28
0
def save_program_desc(network_func):
    startup_program = framework.Program()
    train_program = framework.Program()

    with framework.program_guard(train_program, startup_program):
        network_func(with_optimize=False)

    with open("startup_program", "w") as f:
        f.write(startup_program.desc.serialize_to_string())
    with open("main_program", "w") as f:
        f.write(train_program.desc.serialize_to_string())
Ejemplo n.º 29
0
 def test_dynamic_lstmp(self):
     program = Program()
     with program_guard(program):
         hidden_dim, proj_dim = 16, 8
         seq_data = layers.data(
             name='seq_data', shape=[10, 10], dtype='float32', lod_level=1)
         fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
         self.assertIsNotNone(
             layers.dynamic_lstmp(
                 input=fc_out, size=4 * hidden_dim, proj_size=proj_dim))
     print(str(program))
Ejemplo n.º 30
0
    def test_slice(self):
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

        program = Program()
        with program_guard(program):
            input = layers.data(
                name="input", shape=[3, 4, 5, 6], dtype='float32')

            out = layers.slice(input, axes=axes, starts=starts, ends=ends)
Ejemplo n.º 31
0
    def test_fit_a_line(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
Ejemplo n.º 32
0
 def test_dynamic_lstmp(self):
     program = Program()
     with program_guard(program):
         hidden_dim, proj_dim = 16, 8
         seq_data = layers.data(
             name='seq_data', shape=[10, 10], dtype='float32', lod_level=1)
         fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
         self.assertIsNotNone(
             layers.dynamic_lstmp(
                 input=fc_out, size=4 * hidden_dim, proj_size=proj_dim))
     print(str(program))
    def test_error(self):
        startup_program = Program()
        main_program = Program()
        use_cuda = core.is_compiled_with_cuda()
        with program_guard(main_program, startup_program):

            def fn_1(opt, avg_loss):
                opt.minimize(avg_loss)

            def fn_2(opt, avg_loss):
                opt.minimize(avg_loss)

            x = fluid.layers.data("X", [10], 'float32')
            hidden = layers.fc(x, 5)
            avg_loss = layers.mean(hidden)

            adam = optimizer.Adam(learning_rate=LR)
            sgd = optimizer.SGD(learning_rate=LR)

            cond = layers.fill_constant([1], 'bool', True)

            layers.case([(cond, lambda: fn_1(adam, avg_loss))],
                        lambda: fn_2(sgd, avg_loss))

        cpu_place = fluid.CPUPlace()
        cuda_place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

        for place in [cpu_place, cuda_place]:

            exe = fluid.Executor(place)
            exe.run(startup_program)

            np.random.seed(SEED)

            # NOTE(liym27):
            # This test needs to run in multi cards to test NotImplementedError.
            # Here, move this test from RUN_TYPE=DIST in tests/unittests/CMakeList.txt,
            # to use multi cards ** only on CPU ** not GPU to reduce CI time.
            os.environ['CPU_NUM'] = str(2)

            pe_exe = fluid.ParallelExecutor(use_cuda=use_cuda,
                                            main_program=main_program,
                                            loss_name=avg_loss.name)
            num_devices = pe_exe.device_count

            def not_implemented_error():
                pe_exe.run(feed={
                    'X':
                    np.random.random(size=[64, 10]).astype('float32'),
                },
                           fetch_list=[avg_loss.name])

            if num_devices > 1:
                self.assertRaises(NotImplementedError, not_implemented_error)
Ejemplo n.º 34
0
def save_program_desc():
    startup_program = framework.Program()
    train_program = framework.Program()

    with framework.program_guard(train_program, startup_program):
        minist_classfication_network()

    with open("startup_program", "w") as f:
        f.write(startup_program.desc.serialize_to_string())
    with open("main_program", "w") as f:
        f.write(train_program.desc.serialize_to_string())
Ejemplo n.º 35
0
    def not_test_ifelse(self):
        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')

            label = layers.data(name='y', shape=[1], dtype='int64')

            limit = layers.fill_constant(shape=[1], dtype='int64', value=5)
            cond = layers.less_than(x=label, y=limit)
            ie = layers.IfElse(cond)

            with ie.true_block():
                true_image = ie.input(image)
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            with ie.false_block():
                false_image = ie.input(image)
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            prob = ie()
            loss = layers.cross_entropy(input=prob[0], label=label)
            avg_loss = layers.mean(loss)

            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=8192),
            batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(startup_prog)
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array([x[0] for x in data]).astype("float32")
                y_data = np.array([x[1] for x in data]).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(prog,
                               feed={'x': x_data,
                                     'y': y_data},
                               fetch_list=[avg_loss])
                print(outs[0])
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
Ejemplo n.º 36
0
    def test_sampling_id(self):
        program = Program()
        with program_guard(program):
            x = layers.data(name="X",
                            shape=[13, 11],
                            dtype='float32',
                            append_batch_size=False)

            out = layers.sampling_id(x)
            self.assertIsNotNone(out)
        print(str(program))
Ejemplo n.º 37
0
    def test_gaussian_random_batch_size_like(self):
        program = Program()
        with program_guard(program):
            input = layers.data(name="input", shape=[13, 11], dtype='float32')

            out = layers.gaussian_random_batch_size_like(input,
                                                         shape=[-1, 11],
                                                         mean=1.0,
                                                         std=2.0)
            self.assertIsNotNone(out)
        print(str(program))
Ejemplo n.º 38
0
    def test_fit_a_line(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
Ejemplo n.º 39
0
    def test_spectral_norm(self):
        program = Program()
        with program_guard(program):
            weight = layers.data(name='weight',
                                 shape=[2, 3, 32, 32],
                                 dtype="float32",
                                 append_batch_size=False)
            out = layers.spectral_norm(weight, dim=1, power_iters=1)
            self.assertIsNotNone(out)

        print(str(program))
Ejemplo n.º 40
0
 def test_roi_pool(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
         rois = layers.data(name="rois",
                            shape=[4],
                            dtype="float32",
                            lod_level=1)
         output = layers.roi_pool(x, rois, 7, 7, 0.6)
         self.assertIsNotNone(output)
     print(str(program))
 def test_multiclass_nms(self):
     program = Program()
     with program_guard(program):
         bboxes = layers.data(name='bboxes',
                              shape=[-1, 10, 4],
                              dtype='float32')
         scores = layers.data(name='scores',
                              shape=[-1, 10],
                              dtype='float32')
         output = layers.multiclass_nms(bboxes, scores, 0.3, 400, 200, 0.7)
         self.assertIsNotNone(output)
 def test_box_coder_api(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[4], dtype='float32')
         y = layers.data(name='z', shape=[4], dtype='float32', lod_level=1)
         bcoder = layers.box_coder(prior_box=x,
                                   prior_box_var=[0.1, 0.2, 0.1, 0.2],
                                   target_box=y,
                                   code_type='encode_center_size')
         self.assertIsNotNone(bcoder)
     print(str(program))
    def setUp(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)
            opt = optimizer.SGD(learning_rate=0.001)
            opt = opt.minimize(avg_cost)

        self.program = program
Ejemplo n.º 44
0
 def test_sequence_slice(self):
     program = Program()
     with program_guard(program):
         import numpy as np
         seqs = layers.data(
             name='x', shape=[10, 5], dtype='float32', lod_level=1)
         offset = layers.assign(input=np.array([[0, 1]]).astype('int32'))
         length = layers.assign(input=np.array([[2, 1]]).astype('int32'))
         out = layers.sequence_slice(
             input=seqs, offset=offset, length=length)
         self.assertIsNotNone(out)
     print(str(program))
Ejemplo n.º 45
0
 def test_generate_mask_labels(self):
     program = Program()
     with program_guard(program):
         im_info = layers.data(
             name='im_info',
             shape=[1, 3],
             dtype='float32',
             lod_level=1,
             append_batch_size=False)
         gt_classes = layers.data(
             name='gt_classes',
             shape=[2, 1],
             dtype='int32',
             lod_level=1,
             append_batch_size=False)
         is_crowd = layers.data(
             name='is_crowd',
             shape=[2, 1],
             dtype='int32',
             lod_level=1,
             append_batch_size=False)
         gt_segms = layers.data(
             name='gt_segms',
             shape=[20, 2],
             dtype='float32',
             lod_level=3,
             append_batch_size=False)
         rois = layers.data(
             name='rois',
             shape=[4, 4],
             dtype='float32',
             lod_level=1,
             append_batch_size=False)
         labels_int32 = layers.data(
             name='labels_int32',
             shape=[4, 1],
             dtype='int32',
             lod_level=1,
             append_batch_size=False)
         num_classes = 5
         resolution = 14
         outs = fluid.layers.generate_mask_labels(
             im_info=im_info,
             gt_classes=gt_classes,
             is_crowd=is_crowd,
             gt_segms=gt_segms,
             rois=rois,
             labels_int32=labels_int32,
             num_classes=num_classes,
             resolution=resolution)
         mask_rois, roi_has_mask_int32, mask_int32 = outs
         assert mask_rois.shape[1] == 4
         assert mask_int32.shape[1] == num_classes * resolution * resolution
Ejemplo n.º 46
0
 def test_softmax_with_cross_entropy(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[16], dtype='float32')
         y = layers.data(name='label', shape=[1], dtype='int64')
         loss, softmax = layers.softmax_with_cross_entropy(
             x, y, return_softmax=True)
         self.assertIsNotNone(loss)
         self.assertIsNotNone(softmax)
         loss = layers.softmax_with_cross_entropy(x, y)
         self.assertIsNotNone(loss)
     print(str(program))
    def setUp(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)
            opt = optimizer.SGD(learning_rate=0.001)
            opt = opt.minimize(avg_cost)

        self.program = program
Ejemplo n.º 48
0
 def test_im2sequence(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
         y = layers.data(name='y', shape=[], dtype='float32')
         output = layers.im2sequence(input=x,
                                     input_image_size=y,
                                     stride=[1, 1],
                                     filter_size=[2, 2],
                                     out_stride=[1, 1])
         self.assertIsNotNone(output)
     print(str(program))
Ejemplo n.º 49
0
    def test_nested_net(self):
        def external_cond(i, j, init, sums):
            return layers.less_than(i, loop_len1)

        def external_body(i, j, init, sums):
            def internal_cond(j, init, sums):
                return layers.less_than(j, loop_len2)

            def internal_body(j, init, sums):
                init = layers.elementwise_add(x=init, y=ones)
                sums = layers.elementwise_add(x=init, y=sums)
                j = layers.increment(j)
                return [j, init, sums]

            result = layers.while_loop(internal_cond, internal_body,
                                       [j, init, sums])
            j = result[0]
            init = result[1]
            sums = result[2]
            sums = layers.elementwise_add(x=init, y=sums)
            i = layers.increment(i)
            return [i, j, init, sums]

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = layers.zeros(shape=[1], dtype='int64')
            j = layers.zeros(shape=[1], dtype='int64')
            init = fluid.data(name='init', shape=[3, 3], dtype='float32')
            sums = fluid.data(name='sums', shape=[3, 3], dtype='float32')
            loop_len1 = layers.fill_constant(shape=[1], dtype='int64', value=2)
            loop_len2 = layers.fill_constant(shape=[1], dtype='int64', value=3)
            ones = layers.fill_constant(shape=[3, 3], dtype='float32', value=1)

            out = layers.while_loop(external_cond, external_body,
                                    [i, j, init, sums])

            data = np.random.rand(3, 3).astype('float32')
            data_sums = np.zeros([3, 3]).astype('float32')

        place = fluid.CUDAPlace(0) if core.is_compiled_with_cuda(
        ) else fluid.CPUPlace()
        exe = fluid.Executor(place)
        res = exe.run(main_program,
                      feed={'init': data,
                            'sums': data_sums},
                      fetch_list=out)
        for i in range(3):
            data = np.add(data, 1)
            data_sums = np.add(data, data_sums)
        for j in range(2):
            data_sums = np.add(data, data_sums)
        self.assertTrue(np.allclose(np.asarray(res[3]), data_sums))
Ejemplo n.º 50
0
def save_program_desc(network_func):
    startup_program = framework.Program()
    train_program = framework.Program()

    with framework.program_guard(train_program, startup_program):
        # build graph here
        network_func(with_optimize=is_trainable)

    with open("startup_program", "wb") as f:
        f.write(startup_program.desc.serialize_to_string())
    with open("main_program", "wb") as f:
        f.write(train_program.desc.serialize_to_string())
Ejemplo n.º 51
0
    def test_var_dict(self):
        def cond(i, ten, test_dict, test_list, test_list_dict):
            return layers.less_than(i, ten)

        def body(i, ten, test_dict, test_list, test_list_dict):
            test_dict["test_key"] = i
            test_dict["test_key"] += 1

            test_list[0] = fluid.layers.reshape(test_list[0], [2, -1]) + 1

            test_list_dict[0]["test_key"] += 1
            test_list_dict[0]["test_key"] = fluid.layers.relu(
                test_list_dict[0]["test_key"])

            i = layers.increment(i)
            return [i, ten, test_dict, test_list, test_list_dict]

        main_program = Program()
        startup_program = Program()
        with program_guard(main_program, startup_program):
            i = layers.zeros(shape=[1], dtype='int64')
            ten = layers.fill_constant(shape=[1], dtype='int64', value=10)
            test_data = layers.fill_constant(shape=[1], dtype='int64', value=0)

            test_dict = {"test_key": test_data}
            test_list = [
                layers.fill_constant(shape=[1, 2], dtype='int64', value=0)
            ]
            test_list_dict = [{
                "test_key":
                layers.fill_constant(shape=[1], dtype='float32', value=0)
            }]

            i, ten, test_dict, test_list, test_list_dict = layers.while_loop(
                cond, body, [i, ten, test_dict, test_list, test_list_dict])
        place = fluid.CUDAPlace(
            0) if core.is_compiled_with_cuda() else fluid.CPUPlace()
        exe = fluid.Executor(place)
        res = exe.run(main_program,
                      fetch_list=[
                          test_dict["test_key"], test_list[0],
                          test_list_dict[0]["test_key"]
                      ])
        self.assertTrue(
            np.allclose(np.asarray(res[0]),
                        np.full(shape=(1), fill_value=10, dtype=np.int64)))
        self.assertTrue(
            np.allclose(np.asarray(res[1]),
                        np.full(shape=(2, 1), fill_value=10, dtype=np.int64)))
        self.assertTrue(
            np.allclose(np.asarray(res[2]),
                        np.full(shape=(1), fill_value=10, dtype=np.float32)))
Ejemplo n.º 52
0
    def test_decay(self):
        common_kwargs_true = {
            "learning_rate": 1.0,
            "decay_steps": 5,
            "decay_rate": 0.5,
            "staircase": True
        }
        common_kwargs_false = copy.deepcopy(common_kwargs_true)
        common_kwargs_false["staircase"] = False

        decay_fns = [
            (exponential_decay, layers.exponential_decay, common_kwargs_true),
            (exponential_decay, layers.exponential_decay, common_kwargs_false),
            (natural_exp_decay, layers.natural_exp_decay, common_kwargs_true),
            (natural_exp_decay, layers.natural_exp_decay, common_kwargs_false),
            (inverse_time_decay, layers.inverse_time_decay,
             common_kwargs_true),
            (inverse_time_decay, layers.inverse_time_decay,
             common_kwargs_false),
            (polynomial_decay, layers.polynomial_decay, {
                "learning_rate": 1.0,
                "decay_steps": 5,
                "cycle": True
            }),
            (polynomial_decay, layers.polynomial_decay, {
                "learning_rate": 1.0,
                "decay_steps": 5,
                "cycle": False
            }),
            (piecewise_decay, layers.piecewise_decay, {
                "boundaries": [3, 6, 9],
                "values": [0.1, 0.2, 0.3, 0.4]
            }),
            (cosine_decay, layers.cosine_decay, {
                "learning_rate": 0.1,
                "step_each_epoch": 100,
                "epochs": 120
            }),
            (noam_decay, layers.noam_decay, {
                "d_model": 0.01,
                "warmup_steps": 200,
                "learning_rate": 2.0
            })
        ]

        for py_decay_fn, fluid_decay_fn, kwargs in decay_fns:
            print("class=" + self.__class__.__name__ + " decay_fn=" +
                  py_decay_fn.__name__ + " kwargs=" + str(kwargs))
            main_program = framework.Program()
            startup_program = framework.Program()
            with framework.program_guard(main_program, startup_program):
                self.check_decay(py_decay_fn, fluid_decay_fn, kwargs)
Ejemplo n.º 53
0
def get_program(layer, input_spec, output_spec, **configs):
    paddle.jit.set_verbosity(0)
    prog_translator = program_translator.ProgramTranslator()
    if not prog_translator.enable_to_static:
        raise RuntimeError(
            "The paddle.jit.save doesn't work when setting ProgramTranslator.enable to False."
        )
    if isinstance(layer, Layer):
        if isinstance(layer.forward, program_translator.StaticFunction):
            concrete_program = layer.forward.concrete_program
        else:
            # transform in jit.save, if input_spec is incomplete, declarative will throw error
            layer = paddle.jit.to_static(layer, input_spec=input_spec)
            concrete_program = layer.forward.concrete_program
            # the input_spec has been used in declarative, which is equal to
            # @declarative with input_spec and jit.save without input_spec,
            # avoid needless warning
            input_spec = None
    else:
        raise TypeError(
            "The input Layer should be 'Layer', but received  type is %s." %
            type(layer))
    feed_var_names = paddle.fluid.dygraph.jit._get_input_var_names(
        concrete_program.inputs, input_spec)
    target_vars = paddle.fluid.dygraph.jit._get_output_vars(
        concrete_program.outputs, output_spec)
    main_program = concrete_program.main_program.clone()
    with program_guard(main_program):
        uniq_target_vars = []
        for i, var in enumerate(target_vars):
            if isinstance(var, Variable):
                var = layers.scale(var,
                                   1.,
                                   name="save_infer_model/scale_{}".format(i))
            uniq_target_vars.append(var)
        target_vars = uniq_target_vars
    global_block = main_program.global_block()
    need_to_remove_op_index = []
    for i, op in enumerate(global_block.ops):
        op.desc.set_is_target(False)
        if op.type == "feed" or op.type == "fetch":
            need_to_remove_op_index.append(i)
    for index in need_to_remove_op_index[::-1]:
        global_block._remove_op(index)
    main_program.desc.flush()
    main_program = main_program._prune_with_input(
        feeded_var_names=feed_var_names, targets=target_vars)
    main_program = main_program._inference_optimize(prune_read_op=True)
    fetch_var_names = [v.name for v in target_vars]
    prepend_feed_ops(main_program, feed_var_names)
    append_fetch_ops(main_program, fetch_var_names)
    return main_program, feed_var_names, target_vars
Ejemplo n.º 54
0
 def test_prelu(self):
     program = Program()
     with program_guard(program):
         input = layers.data(name="input",
                             shape=[5, 200, 100, 100],
                             dtype="float32")
         mode = 'channel'
         out = layers.prelu(input,
                            mode,
                            param_attr=ParamAttr(initializer=Constant(1.0)),
                            name='prelu')
         self.assertIsNotNone(out)
     print(str(program))
Ejemplo n.º 55
0
 def test_pad2d(self):
     program = Program()
     with program_guard(program):
         input = layers.data(name="input",
                             shape=[3, 100, 100],
                             dtype="float32")
         out = layers.pad2d(input,
                            paddings=[1, 2, 3, 4],
                            mode='reflect',
                            data_format='NCHW',
                            name="shape")
         self.assertIsNotNone(out)
     print(str(program))
Ejemplo n.º 56
0
 def test_yolo_box_with_scale(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[30, 7, 7], dtype='float32')
         img_size = layers.data(name='img_size', shape=[2], dtype='int32')
         boxes, scores = layers.yolo_box(x,
                                         img_size, [10, 13, 30, 13],
                                         10,
                                         0.01,
                                         32,
                                         scale_x_y=1.2)
         self.assertIsNotNone(boxes)
         self.assertIsNotNone(scores)
Ejemplo n.º 57
0
 def test_adaptive_pool2d(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[3, 224, 224], dtype='float32')
         self.assertIsNotNone(
             layers.adaptive_pool2d(x, [3, 3], pool_type='avg'))
         pool, mask = layers.adaptive_pool2d(x, [3, 3], require_index=True)
         self.assertIsNotNone(pool)
         self.assertIsNotNone(mask)
         self.assertIsNotNone(layers.adaptive_pool2d(x, 3, pool_type='avg'))
         pool, mask = layers.adaptive_pool2d(x, 3, require_index=True)
         self.assertIsNotNone(pool)
         self.assertIsNotNone(mask)
Ejemplo n.º 58
0
 def test_distribute_fpn_proposals_error(self):
     program = Program()
     with program_guard(program):
         fpn_rois = fluid.data(
             name='data_error', shape=[10, 4], dtype='int32', lod_level=1)
         self.assertRaises(
             TypeError,
             layers.distribute_fpn_proposals,
             fpn_rois=fpn_rois,
             min_level=2,
             max_level=5,
             refer_level=4,
             refer_scale=224)
Ejemplo n.º 59
0
 def _append_scale_to_output(self, program):
     # 1. append scale & save var
     scale_output_vars = []
     with framework.program_guard(program):
         for i, out in enumerate(self._output_descs):
             var = program.global_block().var(out.name())
             var = nn.scale(var,
                            1.,
                            name="translated_layer/scale_{}".format(i))
             scale_output_vars.append(var)
     # 2. update output names & descs
     for i, var in enumerate(scale_output_vars):
         self._output_descs[i] = var.desc
Ejemplo n.º 60
0
 def test_box_decoder_and_assign(self):
     program = Program()
     with program_guard(program):
         pb = fluid.data(name='prior_box', shape=[None, 4], dtype='float32')
         pbv = fluid.data(name='prior_box_var', shape=[4], dtype='float32')
         loc = fluid.data(
             name='target_box', shape=[None, 4 * 81], dtype='float32')
         scores = fluid.data(
             name='scores', shape=[None, 81], dtype='float32')
         decoded_box, output_assign_box = fluid.layers.box_decoder_and_assign(
             pb, pbv, loc, scores, 4.135)
         self.assertIsNotNone(decoded_box)
         self.assertIsNotNone(output_assign_box)