Example #1
0
    def test_recognize_digits_conv(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(
                name='pixel', shape=[1, 28, 28], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu")
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu")

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(cost)

        print(str(program))
Example #2
0
 def test_detection_output(self):
     program = Program()
     with program_guard(program):
         pb = layers.data(
             name='prior_box',
             shape=[10, 4],
             append_batch_size=False,
             dtype='float32')
         pbv = layers.data(
             name='prior_box_var',
             shape=[10, 4],
             append_batch_size=False,
             dtype='float32')
         loc = layers.data(
             name='target_box',
             shape=[2, 10, 4],
             append_batch_size=False,
             dtype='float32')
         scores = layers.data(
             name='scores',
             shape=[2, 10, 20],
             append_batch_size=False,
             dtype='float32')
         out = layers.detection_output(
             scores=scores, loc=loc, prior_box=pb, prior_box_var=pbv)
         self.assertIsNotNone(out)
         self.assertEqual(out.shape[-1], 6)
     print(str(program))
Example #3
0
    def test_detection_api(self):
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[4], dtype='float32')
            y = layers.data(name='y', shape=[4], dtype='float32')
            z = layers.data(name='z', shape=[4], dtype='float32', lod_level=1)
            iou = layers.iou_similarity(x=x, y=y)
            bcoder = layers.box_coder(
                prior_box=x,
                prior_box_var=y,
                target_box=z,
                code_type='encode_center_size')
            self.assertIsNotNone(iou)
            self.assertIsNotNone(bcoder)

            matched_indices, matched_dist = layers.bipartite_match(iou)
            self.assertIsNotNone(matched_indices)
            self.assertIsNotNone(matched_dist)

            gt = layers.data(
                name='gt', shape=[1, 1], dtype='int32', lod_level=1)
            trg, trg_weight = layers.target_assign(
                gt, matched_indices, mismatch_value=0)
            self.assertIsNotNone(trg)
            self.assertIsNotNone(trg_weight)

            gt2 = layers.data(
                name='gt2', shape=[10, 4], dtype='float32', lod_level=1)
            trg, trg_weight = layers.target_assign(
                gt2, matched_indices, mismatch_value=0)
            self.assertIsNotNone(trg)
            self.assertIsNotNone(trg_weight)

        print(str(program))
Example #4
0
    def create_rnn_op(self):
        x = layers.data(
            shape=[self.sent_len, self.batch_size, self.input_dim],
            dtype='float32',
            name='x',
            append_batch_size=False,
            **self.p_info)
        x.stop_gradient = False
        h_boot = layers.data(
            shape=[self.input_dim],
            dtype='float32',
            name='h_boot',
            **self.p_info)
        h_boot.stop_gradient = False

        rnn = layers.StaticRNN(main_program=self.main_program)
        with rnn.step():
            h_pre = rnn.memory(init=h_boot)
            x_t = rnn.step_input(x)

            h = layers.scale(
                x=layers.elementwise_add(
                    x=h_pre, y=x_t, **self.p_info),
                scale=self.py_rnn.scale,
                **self.p_info)

            rnn.update_memory(h_pre, h)
            rnn.output(h)

        return rnn()
Example #5
0
 def test_softmax_with_cross_entropy(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[16], dtype='float32')
         y = layers.data(name='label', shape=[1], dtype='int64')
         loss = layers.softmax_with_cross_entropy(x, y)
         self.assertIsNotNone(loss)
     print(str(program))
Example #6
0
 def test_smooth_l1(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[4], dtype='float32')
         y = layers.data(name='label', shape=[4], dtype='float32')
         loss = layers.smooth_l1(x, y)
         self.assertIsNotNone(loss)
     print(str(program))
Example #7
0
 def test_sequence_expand(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[10], dtype='float32')
         y = layers.data(
             name='y', shape=[10, 20], dtype='float32', lod_level=2)
         self.assertIsNotNone(layers.sequence_expand(x=x, y=y, ref_level=1))
     print(str(program))
Example #8
0
 def test_lod_reset(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[10], dtype='float32')
         y = layers.data(
             name='y', shape=[10, 20], dtype='float32', lod_level=2)
         print(layers.lod_reset(x=x, y=y))
     print(str(program))
Example #9
0
 def test_multiplex(self):
     program = Program()
     with program_guard(program):
         x1 = layers.data(name='x1', shape=[4], dtype='float32')
         x2 = layers.data(name='x2', shape=[4], dtype='float32')
         index = layers.data(name='index', shape=[1], dtype='int32')
         out = layers.multiplex(inputs=[x1, x2], index=index)
         self.assertIsNotNone(out)
     print(str(program))
Example #10
0
 def test_sigmoid_cross_entropy(self):
     program = Program()
     with program_guard(program):
         dat = layers.data(name='data', shape=[10], dtype='float32')
         lbl = layers.data(name='label', shape=[10], dtype='float32')
         self.assertIsNotNone(
             layers.sigmoid_cross_entropy_with_logits(
                 x=dat, label=lbl))
     print(str(program))
Example #11
0
 def test_roi_pool(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name="x", shape=[256, 30, 30], dtype="float32")
         rois = layers.data(
             name="rois", shape=[4], dtype="float32", lod_level=1)
         output = layers.roi_pool(x, rois, 7, 7, 0.6)
         self.assertIsNotNone(output)
     print(str(program))
def get_usr_combined_features():
    # FIXME(dzh) : old API integer_value(10) may have range check.
    # currently we don't have user configurated check.

    USR_DICT_SIZE = paddle.dataset.movielens.max_user_id() + 1

    uid = layers.data(name='user_id', shape=[1], dtype='int64')

    usr_emb = layers.embedding(
        input=uid,
        dtype='float32',
        size=[USR_DICT_SIZE, 32],
        param_attr='user_table',
        is_sparse=IS_SPARSE)

    usr_fc = layers.fc(input=usr_emb, size=32)

    USR_GENDER_DICT_SIZE = 2

    usr_gender_id = layers.data(name='gender_id', shape=[1], dtype='int64')

    usr_gender_emb = layers.embedding(
        input=usr_gender_id,
        size=[USR_GENDER_DICT_SIZE, 16],
        param_attr='gender_table',
        is_sparse=IS_SPARSE)

    usr_gender_fc = layers.fc(input=usr_gender_emb, size=16)

    USR_AGE_DICT_SIZE = len(paddle.dataset.movielens.age_table)
    usr_age_id = layers.data(name='age_id', shape=[1], dtype="int64")

    usr_age_emb = layers.embedding(
        input=usr_age_id,
        size=[USR_AGE_DICT_SIZE, 16],
        is_sparse=IS_SPARSE,
        param_attr='age_table')

    usr_age_fc = layers.fc(input=usr_age_emb, size=16)

    USR_JOB_DICT_SIZE = paddle.dataset.movielens.max_job_id() + 1
    usr_job_id = layers.data(name='job_id', shape=[1], dtype="int64")

    usr_job_emb = layers.embedding(
        input=usr_job_id,
        size=[USR_JOB_DICT_SIZE, 16],
        param_attr='job_table',
        is_sparse=IS_SPARSE)

    usr_job_fc = layers.fc(input=usr_job_emb, size=16)

    concat_embed = layers.concat(
        input=[usr_fc, usr_gender_fc, usr_age_fc, usr_job_fc], axis=1)

    usr_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

    return usr_combined_features
    def test_fit_line_inference_model(self):
        MODEL_DIR = "./tmp/inference_model"

        init_program = Program()
        program = Program()

        with program_guard(program, init_program):
            x = layers.data(name='x', shape=[2], dtype='float32')
            y = layers.data(name='y', shape=[1], dtype='float32')

            y_predict = layers.fc(input=x, size=1, act=None)

            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)

            sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
            sgd_optimizer.minimize(avg_cost, init_program)

        place = core.CPUPlace()
        exe = executor.Executor(place)

        exe.run(init_program, feed={}, fetch_list=[])

        for i in xrange(100):
            tensor_x = np.array(
                [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32")
            tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32")

            exe.run(program,
                    feed={'x': tensor_x,
                          'y': tensor_y},
                    fetch_list=[avg_cost])

        save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program)
        expected = exe.run(program,
                           feed={'x': tensor_x,
                                 'y': tensor_y},
                           fetch_list=[avg_cost])[0]

        reload(executor)  # reload to build a new scope
        exe = executor.Executor(place)

        [infer_prog, feed_var_names, fetch_vars] = load_inference_model(
            MODEL_DIR, exe)

        outs = exe.run(
            infer_prog,
            feed={feed_var_names[0]: tensor_x,
                  feed_var_names[1]: tensor_y},
            fetch_list=fetch_vars)
        actual = outs[0]

        self.assertEqual(feed_var_names, ["x", "y"])
        self.assertEqual(len(fetch_vars), 1)
        self.assertEqual(str(fetch_vars[0]), str(avg_cost))
        self.assertEqual(expected, actual)
Example #14
0
    def test_ifelse(self):
        prog = Program()
        startup_prog = Program()
        with program_guard(prog, startup_prog):
            image = layers.data(name='x', shape=[784], dtype='float32')

            label = layers.data(name='y', shape=[1], dtype='int64')

            limit = layers.fill_constant_batch_size_like(
                input=label, dtype='int64', shape=[1], value=5.0)
            cond = layers.less_than(x=label, y=limit)
            ie = layers.IfElse(cond)

            with ie.true_block():
                true_image = ie.input(image)
                hidden = layers.fc(input=true_image, size=100, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            with ie.false_block():
                false_image = ie.input(image)
                hidden = layers.fc(input=false_image, size=200, act='tanh')
                prob = layers.fc(input=hidden, size=10, act='softmax')
                ie.output(prob)

            prob = ie()
            loss = layers.cross_entropy(input=prob[0], label=label)
            avg_loss = layers.mean(loss)

            optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
            optimizer.minimize(avg_loss, startup_prog)
        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=8192),
            batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(kwargs['main_program'],
                               feed={'x': x_data,
                                     'y': y_data},
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
Example #15
0
    def test_fit_a_line(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
Example #16
0
    def test_simple_forward(self):
        d0 = layers.data(
            "d0", shape=[10], append_batch_size=False, dtype='float32')
        d1 = layers.data(
            "d1", shape=[10], append_batch_size=False, dtype='float32')
        d2 = layers.data(
            "d2", shape=[10], append_batch_size=False, dtype='float32')
        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True
        init = layers.zeros(shape=[10], dtype='float32')
        mem_array = layers.array_write(x=init, i=i)
        data_array = layers.array_write(x=d0, i=i)

        i = layers.increment(i)
        layers.array_write(d1, i, array=data_array)

        i = layers.increment(i)
        layers.array_write(d2, i, array=data_array)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True

        array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
        array_len.stop_gradient = True
        cond = layers.less_than(x=i, y=array_len)

        while_op = layers.While(cond=cond)
        with while_op.block():
            d = layers.array_read(array=data_array, i=i)
            prev = layers.array_read(array=mem_array, i=i)
            result = layers.sums(input=[d, prev])

            i = layers.increment(x=i, in_place=True)
            layers.array_write(result, i=i, array=mem_array)
            layers.less_than(x=i, y=array_len, cond=cond)

        sum_result = layers.array_read(array=mem_array, i=i)
        loss = layers.mean(sum_result)

        append_backward(loss)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        d = []

        for i in xrange(3):
            d.append(numpy.random.random(size=[10]).astype('float32'))

        outs = exe.run(feed={'d0': d[0],
                             'd1': d[1],
                             'd2': d[2]},
                       fetch_list=[sum_result])
        self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
    def setUp(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)
            opt = optimizer.SGD(learning_rate=0.001)
            opt = opt.minimize(avg_cost)

        self.program = program
def get_mov_combined_features():

    MOV_DICT_SIZE = paddle.dataset.movielens.max_movie_id() + 1

    mov_id = layers.data(name='movie_id', shape=[1], dtype='int64')

    mov_emb = layers.embedding(
        input=mov_id,
        dtype='float32',
        size=[MOV_DICT_SIZE, 32],
        param_attr='movie_table',
        is_sparse=IS_SPARSE)

    mov_fc = layers.fc(input=mov_emb, size=32)

    CATEGORY_DICT_SIZE = len(paddle.dataset.movielens.movie_categories())

    category_id = layers.data(
        name='category_id', shape=[1], dtype='int64', lod_level=1)

    mov_categories_emb = layers.embedding(
        input=category_id, size=[CATEGORY_DICT_SIZE, 32], is_sparse=IS_SPARSE)

    mov_categories_hidden = layers.sequence_pool(
        input=mov_categories_emb, pool_type="sum")

    MOV_TITLE_DICT_SIZE = len(paddle.dataset.movielens.get_movie_title_dict())

    mov_title_id = layers.data(
        name='movie_title', shape=[1], dtype='int64', lod_level=1)

    mov_title_emb = layers.embedding(
        input=mov_title_id, size=[MOV_TITLE_DICT_SIZE, 32], is_sparse=IS_SPARSE)

    mov_title_conv = nets.sequence_conv_pool(
        input=mov_title_emb,
        num_filters=32,
        filter_size=3,
        act="tanh",
        pool_type="sum")

    concat_embed = layers.concat(
        input=[mov_fc, mov_categories_hidden, mov_title_conv], axis=1)

    # FIXME(dzh) : need tanh operator
    mov_combined_features = layers.fc(input=concat_embed, size=200, act="tanh")

    return mov_combined_features
Example #19
0
 def test_lstm_unit(self):
     program = Program()
     with program_guard(program):
         x_t_data = layers.data(
             name='x_t_data', shape=[10, 10], dtype='float32')
         x_t = layers.fc(input=x_t_data, size=10)
         prev_hidden_data = layers.data(
             name='prev_hidden_data', shape=[10, 30], dtype='float32')
         prev_hidden = layers.fc(input=prev_hidden_data, size=30)
         prev_cell_data = layers.data(
             name='prev_cell', shape=[10, 30], dtype='float32')
         prev_cell = layers.fc(input=prev_cell_data, size=30)
         self.assertIsNotNone(
             layers.lstm_unit(
                 x_t=x_t, hidden_t_prev=prev_hidden, cell_t_prev=prev_cell))
     print(str(program))
    def test_forward(self):
        data = layers.data(name='X', shape=[1], dtype='float32')
        data.stop_gradient = False
        cond = layers.ConditionalBlock(inputs=[data])
        out = layers.create_tensor(dtype='float32')
        with cond.block():
            hidden = layers.fc(input=data, size=10)
            layers.assign(hidden, out)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        exe.run(default_startup_program())

        x = numpy.random.random(size=(10, 1)).astype('float32')

        outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
        print outs
        loss = layers.mean(out)
        append_backward(loss=loss)
        outs = exe.run(
            feed={'X': x},
            fetch_list=[
                default_main_program().block(0).var(data.name + "@GRAD")
            ])[0]
        print outs
Example #21
0
 def test_softmax(self):
     program = Program()
     with program_guard(program):
         data = layers.data(name='data', shape=[10], dtype='float32')
         hid = layers.fc(input=data, size=20)
         self.assertIsNotNone(layers.softmax(hid))
     print(str(program))
Example #22
0
 def test_sequence_reshape(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[8], dtype='float32', lod_level=1)
         out = layers.sequence_reshape(input=x, new_dim=16)
         self.assertIsNotNone(out)
     print(str(program))
Example #23
0
    def test_simple_conv2d(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(name='pixel', shape=[3, 48, 48], dtype='int32')
            layers.conv2d(input=images, num_filters=3, filter_size=[4, 4])

        print(str(program))
Example #24
0
    def test_nce(self):
        window_size = 5
        words = []
        for i in xrange(window_size):
            words.append(
                layers.data(
                    name='word_{0}'.format(i), shape=[1], dtype='int64'))

        dict_size = 10000
        label_word = int(window_size / 2) + 1

        embs = []
        for i in xrange(window_size):
            if i == label_word:
                continue

            emb = layers.embedding(
                input=words[i],
                size=[dict_size, 32],
                param_attr='emb.w',
                is_sparse=True)

            embs.append(emb)

        embs = layers.concat(input=embs, axis=1)
        loss = layers.nce(input=embs,
                          label=words[label_word],
                          num_total_classes=dict_size,
                          param_attr='nce.w',
                          bias_attr='nce.b')
        avg_loss = layers.mean(loss)
        self.assertIsNotNone(avg_loss)
        print(str(default_main_program()))
Example #25
0
 def test_row_conv(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[16], dtype='float32', lod_level=1)
         out = layers.row_conv(input=x, future_context_size=2)
         self.assertIsNotNone(out)
     print(str(program))
def train_decoder(context, is_sparse):
    # decoder
    trg_language_word = pd.data(
        name="target_language_word", shape=[1], dtype='int64', lod_level=1)
    trg_embedding = pd.embedding(
        input=trg_language_word,
        size=[dict_size, word_dim],
        dtype='float32',
        is_sparse=is_sparse,
        param_attr=fluid.ParamAttr(name='vemb'))

    rnn = pd.DynamicRNN()
    with rnn.block():
        current_word = rnn.step_input(trg_embedding)
        pre_state = rnn.memory(init=context)
        current_state = pd.fc(input=[current_word, pre_state],
                              size=decoder_size,
                              act='tanh')

        current_score = pd.fc(input=current_state,
                              size=target_dict_dim,
                              act='softmax')
        rnn.update_memory(pre_state, current_state)
        rnn.output(current_score)

    return rnn()
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        with program_guard(program):
            x = layers.data(
                name='x', shape=[1], dtype='float32', stop_gradient=False)
            table = layers.lod_rank_table(x, level=0)
            array = layers.lod_tensor_to_array(x, table)
            result = layers.array_to_lod_tensor(array, table)

            mean = layers.mean(result)

            append_backward(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum()
            for item in exe.run(program,
                                feed={'x': tensor},
                                fetch_list=[g_vars],
                                return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Example #28
0
 def test_mul(self):
     a = data(name='a', shape=[784], dtype='float32')
     b = data(
         name='b',
         shape=[784, 100],
         dtype='float32',
         append_batch_size=False)
     out = mul(x=a, y=b)
     place = core.CPUPlace()
     a_np = numpy.random.random((100, 784)).astype('float32')
     b_np = numpy.random.random((784, 100)).astype('float32')
     exe = Executor(place)
     outs = exe.run(feed={'a': a_np, 'b': b_np}, fetch_list=[out])
     out = outs[0]
     self.assertEqual((100, 100), out.shape)
     self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np)))
    def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
        place = self.place()
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[10])
            x.persistable = True
            table = layers.lod_rank_table(x, level=level)
            max_len = layers.max_sequence_len(table)
            max_len.persistable = True
            array = layers.lod_tensor_to_array(x, table)
            array.persistable = True

            result = layers.array_to_lod_tensor(array, table)
            result.persistable = True
        exe = Executor(place)
        scope = core.Scope()
        exe.run(program, feed={'x': tensor}, scope=scope)
        var = scope.find_var(array.name)
        array = var.get_lod_tensor_array()
        if expect_array is not None and expect_lod is not None:
            self.check_array_same(array, expect_array, expect_lod)
        self.check_tensor_same(scope.find_var(result.name).get_tensor(), tensor)

        self.assertEqual(
            numpy.array(scope.find_var(max_len.name).get_tensor())[0],
            expect_max_len)
Example #30
0
 def build_network(self, only_forward, **kargs):
     x = layers.data('x', shape=[3], dtype='float32', lod_level=1)
     x.stop_gradient = False
     printed = layers.Print(input=x, **kargs)
     if only_forward: return printed
     loss = layers.mean(printed)
     append_backward(loss=loss)
     return loss
def train_program(is_sparse):
    context = encoder(is_sparse)
    rnn_out = train_decoder(context, is_sparse)
    label = pd.data(name="target_language_next_word",
                    shape=[1],
                    dtype='int64',
                    lod_level=1)
    cost = pd.cross_entropy(input=rnn_out, label=label)
    avg_cost = pd.mean(cost)
    return avg_cost
Example #32
0
 def test_sequence_softmax(self):
     program = Program()
     with program_guard(program):
         seq_data = layers.data(name='seq_data',
                                shape=[10, 10],
                                dtype='float32',
                                lod_level=1)
         seq = layers.fc(input=seq_data, size=20)
         self.assertIsNotNone(layers.sequence_softmax(seq))
     print(str(program))
Example #33
0
 def test_im2sequence(self):
     print("test_im2sequence")
     program = Program()
     with program_guard(program):
         x = layers.data(name='x', shape=[3, 128, 128], dtype='float32')
         output = layers.im2sequence(input=x,
                                     stride=[1, 1],
                                     filter_size=[2, 2])
         self.assertIsNotNone(output)
     print(str(program))
Example #34
0
 def test_label_smooth(self):
     program = Program()
     with program_guard(program):
         label = layers.data(name="label", shape=[1], dtype="float32")
         one_hot_label = layers.one_hot(input=label, depth=10)
         smooth_label = layers.label_smooth(label=one_hot_label,
                                            epsilon=0.1,
                                            dtype="float32")
         self.assertIsNotNone(smooth_label)
     print(str(program))
Example #35
0
    def test_yolov3_loss_with_scale(self):
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[30, 7, 7], dtype='float32')
            gt_box = layers.data(name='gt_box', shape=[10, 4], dtype='float32')
            gt_label = layers.data(name='gt_label', shape=[10], dtype='int32')
            gt_score = layers.data(name='gt_score', shape=[10], dtype='float32')
            loss = layers.yolov3_loss(
                x,
                gt_box,
                gt_label, [10, 13, 30, 13], [0, 1],
                10,
                0.7,
                32,
                gt_score=gt_score,
                use_label_smooth=False,
                scale_x_y=1.2)

            self.assertIsNotNone(loss)
Example #36
0
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        shape = [2, 3, 7, 9]
        eps = 0.005
        dtype = np.float64

        x = layers.data('x', shape, False, dtype)
        y = layers.data('y', shape[:-1], False, dtype)
        x.persistable = True
        y.persistable = True
        out = layers.elementwise_add(x, y, axis=0)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, shape[:-1]).astype(dtype)

        gradient_checker.double_grad_check([x, y],
                                           out,
                                           x_init=[x_arr, y_arr],
                                           place=place,
                                           eps=eps)
Example #37
0
 def encoder(input_name):
     input_ids = layers.data(
         name=input_name, shape=[1], dtype='int64', lod_level=1)
     input_embedding = layers.embedding(
         input=input_ids,
         size=[source_dict_dim, embedding_dim],
         dtype='float32',
         is_sparse=True)
     encoder_out = bi_lstm_encoder(input_seq=input_embedding, gate_size=embedding_dim)
     return encoder_out
Example #38
0
    def test_serialize_program_and_persistables(self):
        init_program = fluid.default_startup_program()
        program = fluid.default_main_program()

        # fake program without feed/fetch
        with program_guard(program, init_program):
            x = layers.data(name='x', shape=[2], dtype='float32')
            y = layers.data(name='y', shape=[1], dtype='float32')

            y_predict = layers.fc(input=x, size=1, act=None)

            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)

            sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
            sgd_optimizer.minimize(avg_cost, init_program)

        place = core.CPUPlace()
        exe = executor.Executor(place)
        exe.run(init_program, feed={}, fetch_list=[])

        tensor_x = np.array([[1, 1], [1, 2], [5, 2]]).astype("float32")
        tensor_y = np.array([[-2], [-3], [-7]]).astype("float32")
        for i in six.moves.xrange(3):
            exe.run(program,
                    feed={
                        'x': tensor_x,
                        'y': tensor_y
                    },
                    fetch_list=[avg_cost])

        # test if return type of serialize_program is bytes
        res1 = paddle.static.io.serialize_program([x, y], [avg_cost])
        self.assertTrue(isinstance(res1, bytes))
        # test if return type of serialize_persistables is bytes
        res2 = paddle.static.io.serialize_persistables([x, y], [avg_cost], exe)
        self.assertTrue(isinstance(res2, bytes))
        # test if variables in program is empty
        res = paddle.static.io._serialize_persistables(Program(), None)
        self.assertEqual(res, None)
        self.assertRaises(TypeError, paddle.static.io.deserialize_persistables,
                          None, None, None)
Example #39
0
    def __init__(self, config, dataset, place):
        logging.info(['model is: ', self.__class__.__name__])
        self.config = config
        self.graph = dataset.graph
        self.placce = place
        self.edge_types = sorted(self.graph.edge_types_info())
        logging.info('edge_types in model: %s' % str(self.edge_types))
        neg_num = dataset.config['neg_num']

        # hyper parameters
        self.num_nodes = self.graph.num_nodes
        self.embedding_size = self.config['dimensions']
        self.embedding_u_size = self.config['edge_dim']
        self.dim_a = self.config['att_dim']
        self.att_head = self.config['att_head']
        self.edge_type_count = len(self.edge_types)
        self.u_num = self.edge_type_count

        self.gw = heter_graph_wrapper.HeterGraphWrapper(
            name="heter_graph",
            place=place,
            edge_types=self.graph.edge_types_info(),
            node_feat=self.graph.node_feat_info(),
            edge_feat=self.graph.edge_feat_info())

        self.train_inputs = fl.data('train_inputs',
                                    shape=[None],
                                    dtype='int64')

        self.train_labels = fl.data('train_labels',
                                    shape=[None, 1, 1],
                                    dtype='int64')

        self.train_types = fl.data('train_types',
                                   shape=[None, 1],
                                   dtype='int64')

        self.train_negs = fl.data('train_negs',
                                  shape=[None, neg_num, 1],
                                  dtype='int64')

        self.forward()
Example #40
0
    def test_word_embedding(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            dict_size = 10000
            embed_size = 32
            first_word = layers.data(name='firstw', shape=[1], dtype='int64')
            second_word = layers.data(name='secondw', shape=[1], dtype='int64')
            third_word = layers.data(name='thirdw', shape=[1], dtype='int64')
            forth_word = layers.data(name='forthw', shape=[1], dtype='int64')
            next_word = layers.data(name='nextw', shape=[1], dtype='int64')

            embed_first = layers.embedding(input=first_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_second = layers.embedding(input=second_word,
                                            size=[dict_size, embed_size],
                                            dtype='float32',
                                            param_attr='shared_w')

            embed_third = layers.embedding(input=third_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_forth = layers.embedding(input=forth_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
                axis=1)

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
            predict_word = layers.fc(input=hidden1,
                                     size=dict_size,
                                     act='softmax')
            cost = layers.cross_entropy(input=predict_word, label=next_word)
            avg_cost = layers.mean(cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
Example #41
0
 def test_dynamic_lstmp(self):
     program = Program()
     with program_guard(program):
         hidden_dim, proj_dim = 16, 8
         seq_data = layers.data(
             name='seq_data', shape=[10, 10], dtype='float32', lod_level=1)
         fc_out = layers.fc(input=seq_data, size=4 * hidden_dim)
         self.assertIsNotNone(
             layers.dynamic_lstmp(
                 input=fc_out, size=4 * hidden_dim, proj_size=proj_dim))
     print(str(program))
Example #42
0
    def func(self, place):
        x_shape = [2, 3, 4, 5]
        pad = [1, 0, 1, 0, 1, 0, 1, 0]
        dtype = np.float64

        x = layers.data('x', x_shape, False, dtype)
        x.persistable = True
        out = paddle.nn.functional.pad(x, pad)
        x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)

        gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place)
Example #43
0
 def run_local(self, place):
     main = fluid.Program()
     with fluid.program_guard(main):
         x = layers.data(shape=[32, 32],
                         dtype='float32',
                         name='X',
                         append_batch_size=False)
         fluid.initializer.Constant(value=2.3)(x, main.global_block())
         o = layers.scale(x=x, scale=10.0)
     exe = fluid.Executor(place)
     self.local_out = exe.run(main, fetch_list=[o])
 def get_model(self, main_prog, startup_program):
     ring_id = 0
     nranks = 2
     with fluid.program_guard(main_prog, startup_program):
         tindata = layers.data(name="tindata",
                               shape=[10, 1000],
                               dtype='float32')
         toutdata = fluid.layers.collective._c_reducescatter(
             tindata, nranks)
         toutdata = fluid.layers.collective._c_sync_comm_stream(toutdata, 0)
         return toutdata
Example #45
0
    def test_spectral_norm(self):
        program = Program()
        with program_guard(program):
            weight = layers.data(name='weight',
                                 shape=[2, 3, 32, 32],
                                 dtype="float32",
                                 append_batch_size=False)
            out = layers.spectral_norm(weight, dim=1, power_iters=1)
            self.assertIsNotNone(out)

        print(str(program))
Example #46
0
    def func(self, place):
        # the shape of input variable should be clearly specified, not inlcude -1.
        x_shape = [7, 11]
        y_shape = [11, 9]
        eps = 0.005
        dtype = np.float64

        x = layers.data('x', x_shape, False, dtype)
        x.persistable = True
        y = layers.data('y', y_shape, False, dtype)
        y.persistable = True
        out = layers.mul(x, y)
        x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
        y_arr = np.random.uniform(-1, 1, y_shape).astype(dtype)

        gradient_checker.double_grad_check([x, y],
                                           out,
                                           x_init=[x_arr, y_arr],
                                           place=place,
                                           eps=eps)
Example #47
0
 def test_multiclass_nms2(self):
     program = Program()
     with program_guard(program):
         bboxes = layers.data(name='bboxes',
                              shape=[-1, 10, 4],
                              dtype='float32')
         scores = layers.data(name='scores',
                              shape=[-1, 10],
                              dtype='float32')
         output = layers.multiclass_nms2(bboxes, scores, 0.3, 400, 200, 0.7)
         output2, index = layers.multiclass_nms2(bboxes,
                                                 scores,
                                                 0.3,
                                                 400,
                                                 200,
                                                 0.7,
                                                 return_index=True)
         self.assertIsNotNone(output)
         self.assertIsNotNone(output2)
         self.assertIsNotNone(index)
Example #48
0
 def func(self, place):
     shape = [2, 3, 7, 9]
     eps = 0.0005
     dtype = np.float64
     x = layers.data('x', shape, False, dtype=dtype)
     x.persistable = True
     y = layers.sigmoid(x)
     x_arr = np.random.random(shape).astype(dtype)
     x_arr[np.abs(x_arr) < 0.005] = 0.002
     gradient_checker.triple_grad_check(
         [x], y, x_init=x_arr, place=place, eps=eps)
Example #49
0
    def __init__(self, args, gw, num_class):
        self.args = args
        self.num_layers = self.args.num_layers
        self.hidden_size = self.args.hidden_size
        self.train_eps = self.args.train_eps
        self.pool_type = self.args.pool_type
        self.dropout_prob = self.args.dropout_prob
        self.num_class = num_class

        self.gw = gw
        self.labels = fl.data(name="labels", shape=[None, 1], dtype="int64")
Example #50
0
    def func(self, place):
        x_shape = [2, 3, 4, 5]
        pad = [1, 1, 1, 1]
        dtype = np.float64

        x1 = layers.data('x', x_shape, False, dtype)
        x2 = layers.data('x', x_shape, False, dtype)
        x1.persistable = True
        x2.persistable = True
        out = paddle.concat([x1, x2], axis=0)
        x2_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)
        x1_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)

        gradient_checker.double_grad_check(
            [x1, x2], out, x_init=[x1_arr, x2_arr], place=place)
        gradient_checker.double_grad_check_for_dygraph(
            self.concat_wrapper, [x1, x2],
            out,
            x_init=[x1_arr, x2_arr],
            place=place)
Example #51
0
    def test_slice(self):
        starts = [1, 0, 2]
        ends = [3, 3, 4]
        axes = [0, 1, 2]

        program = Program()
        with program_guard(program):
            input = layers.data(
                name="input", shape=[3, 4, 5, 6], dtype='float32')

            out = layers.slice(input, axes=axes, starts=starts, ends=ends)
Example #52
0
    def func(self, place):
        x_shape = [2, 3, 4, 5]
        perm = [0, 2, 3, 1]
        dtype = np.float64

        x = layers.data('x', x_shape, False, dtype)
        x.persistable = True
        out = paddle.transpose(x, perm)
        x_arr = np.random.uniform(-1, 1, x_shape).astype(dtype)

        gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place)
Example #53
0
    def test_gaussian_random_batch_size_like(self):
        program = Program()
        with program_guard(program):
            input = layers.data(name="input", shape=[13, 11], dtype='float32')

            out = layers.gaussian_random_batch_size_like(input,
                                                         shape=[-1, 11],
                                                         mean=1.0,
                                                         std=2.0)
            self.assertIsNotNone(out)
        print(str(program))
Example #54
0
    def test_save_inference_model(self):
        MODEL_DIR = "./tmp/inference_model2"
        init_program = Program()
        program = Program()

        # fake program without feed/fetch
        with program_guard(program, init_program):
            x = layers.data(name='x', shape=[2], dtype='float32')
            y = layers.data(name='y', shape=[1], dtype='float32')

            y_predict = layers.fc(input=x, size=1, act=None)

            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(cost)

        place = core.CPUPlace()
        exe = executor.Executor(place)
        exe.run(init_program, feed={}, fetch_list=[])

        save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program)
Example #55
0
    def test_sampling_id(self):
        program = Program()
        with program_guard(program):
            x = layers.data(name="X",
                            shape=[13, 11],
                            dtype='float32',
                            append_batch_size=False)

            out = layers.sampling_id(x)
            self.assertIsNotNone(out)
        print(str(program))
Example #56
0
 def test_sequence_scatter(self):
     program = Program()
     with program_guard(program):
         x = layers.data(name='x',
                         shape=[3, 6],
                         append_batch_size=False,
                         dtype='float32')
         idx = layers.data(name='idx',
                           shape=[12, 1],
                           append_batch_size=False,
                           dtype='int32',
                           lod_level=1)
         updates = layers.data(name='updates',
                               shape=[12, 1],
                               append_batch_size=False,
                               dtype='float32',
                               lod_level=1)
         out = layers.sequence_scatter(input=x, index=idx, updates=updates)
         self.assertIsNotNone(out)
     print(str(program))
Example #57
0
 def test_lstm_unit(self):
     program = Program()
     with program_guard(program):
         x_t_data = layers.data(name='x_t_data',
                                shape=[10, 10],
                                dtype='float32')
         x_t = layers.fc(input=x_t_data, size=10)
         prev_hidden_data = layers.data(name='prev_hidden_data',
                                        shape=[10, 30],
                                        dtype='float32')
         prev_hidden = layers.fc(input=prev_hidden_data, size=30)
         prev_cell_data = layers.data(name='prev_cell',
                                      shape=[10, 30],
                                      dtype='float32')
         prev_cell = layers.fc(input=prev_cell_data, size=30)
         self.assertIsNotNone(
             layers.lstm_unit(x_t=x_t,
                              hidden_t_prev=prev_hidden,
                              cell_t_prev=prev_cell))
     print(str(program))
Example #58
0
    def test_linear_chain_crf(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            label_dict_len = 10
            images = layers.data(name='pixel', shape=[784], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            hidden = layers.fc(input=images, size=128)
            crf = layers.linear_chain_crf(input=hidden,
                                          label=label,
                                          param_attr=ParamAttr(name="crfw"))
            crf_decode = layers.crf_decoding(input=hidden,
                                             param_attr=ParamAttr(name="crfw"))
            layers.chunk_eval(input=crf_decode,
                              label=label,
                              chunk_scheme="IOB",
                              num_chunk_types=(label_dict_len - 1) // 2)
            self.assertFalse(crf is None)
            self.assertFalse(crf_decode is None)

        print(str(program))
    def test_categorical_distribution(self,
                                      batch_size=2,
                                      dims=3,
                                      tolerance=1e-6):
        test_program = fluid.Program()

        logits_np = np.random.randn(batch_size, dims).astype('float32')
        other_logits_np = np.random.randn(batch_size, dims).astype('float32')

        with fluid.program_guard(test_program):
            logits = layers.data(name='logits', shape=[dims], dtype='float32')
            other_logits = layers.data(name='other_logits',
                                       shape=[dims],
                                       dtype='float32')

            categorical_np = Categorical(logits_np)
            other_categorical_np = Categorical(other_logits_np)

            entropy_np = categorical_np.entropy()
            kl_np = categorical_np.kl_divergence(other_categorical_np)

        self.executor.run(fluid.default_main_program())

        np_categorical = CategoricalNumpy(logits_np)
        np_other_categorical = CategoricalNumpy(other_logits_np)
        gt_entropy_np = np_categorical.entropy()
        gt_kl_np = np_categorical.kl_divergence(np_other_categorical)

        # result calculated by paddle
        [output_entropy_np,
         output_kl_np] = self.executor.run(program=test_program,
                                           feed={'logits': logits_np},
                                           fetch_list=[entropy_np, kl_np])
        np.testing.assert_allclose(output_entropy_np,
                                   gt_entropy_np,
                                   rtol=tolerance,
                                   atol=tolerance)
        np.testing.assert_allclose(output_kl_np,
                                   gt_kl_np,
                                   rtol=tolerance,
                                   atol=tolerance)
Example #60
0
    def __init__(self, args, dataset):
        self.args = args
        self.dataset = dataset
        self.hidden_size = self.args.hidden_size
        self.embed_dim = self.args.embed_dim
        self.dropout_prob = self.args.dropout_rate
        self.pool_type = self.args.pool_type
        self._init_vars = []

        graph_data = []
        g, label = self.dataset[0]
        graph_data.append(g)
        g, label = self.dataset[1]
        graph_data.append(g)

        batch_graph = pgl.graph.MultiGraph(graph_data)
        graph_data = batch_graph
        graph_data.edge_feat["feat"] = graph_data.edge_feat["feat"].astype(
            "int64")
        graph_data.node_feat["feat"] = graph_data.node_feat["feat"].astype(
            "int64")
        self.graph_wrapper = GraphWrapper(
            name="graph",
            place=F.CPUPlace(),
            node_feat=graph_data.node_feat_info(),
            edge_feat=graph_data.edge_feat_info())

        self.atom_encoder = AtomEncoder(name="atom", emb_dim=self.embed_dim)
        self.bond_encoder = BondEncoder(name="bond", emb_dim=self.embed_dim)

        self.labels = L.data("labels",
                             shape=[None, self.args.num_class],
                             dtype="float32",
                             append_batch_size=False)

        self.unmask = L.data("unmask",
                             shape=[None, self.args.num_class],
                             dtype="float32",
                             append_batch_size=False)

        self.build_model()