def test_forward(self):
        data = layers.data(name='X', shape=[1], dtype='float32')
        data.stop_gradient = False
        cond = layers.ConditionalBlock(inputs=[data])
        out = layers.create_tensor(dtype='float32')
        with cond.block():
            hidden = layers.fc(input=data, size=10)
            layers.assign(hidden, out)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        exe.run(default_startup_program())

        x = numpy.random.random(size=(10, 1)).astype('float32')

        outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
        print outs
        loss = layers.mean(x=out)
        append_backward_ops(loss=loss)
        outs = exe.run(feed={'X': x},
                       fetch_list=[
                           default_main_program().block(0).var(data.name +
                                                               "@GRAD")
                       ])[0]
        print outs
    def test_shrink_rnn_memory(self):
        x = layers.data('x', shape=[100], dtype='float32')
        x.stop_gradient = False
        table = layers.lod_rank_table(x=x)
        i = layers.zeros(dtype='int64', shape=[1])
        mem1 = layers.shrink_memory(x=x, i=i, table=table)
        i = layers.increment(x=i)
        i.stop_gradient = True
        mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
        i = layers.increment(x=i)
        i.stop_gradient = True
        mem3 = layers.shrink_memory(x=mem2, i=i, table=table)

        cpu = core.CPUPlace()
        tensor = core.LoDTensor()
        tensor.set_lod([[0, 2, 5, 6]])
        tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
        tensor.set(tensor_np, cpu)
        exe = Executor(cpu)
        outs = exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3])
        self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
        self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
        self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))

        mem3_mean = layers.mean(x=mem3)
        append_backward_ops(loss=mem3_mean)
        x_grad = exe.run(
            feed={'x': tensor},
            fetch_list=[main_program.global_block().var('x@GRAD')])[0]
        self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(
            name='x',
            shape=[1],
            dtype='float32',
            main_program=program,
            stop_gradient=False)
        y = layers.data(
            name='y',
            shape=[1],
            dtype='bool',
            main_program=program,
            stop_gradient=False)

        level = 0

        out_true, out_false = layers.split_lod_tensor(
            input=x, mask=y, level=level, main_program=program)
        out = layers.merge_lod_tensor(
            in_true=out_true,
            in_false=out_false,
            mask=y,
            x=x,
            level=level,
            main_program=program)
        mean = layers.mean(x=out, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        mask_np = np.array([0, 1, 0]).astype('bool')
        mask_np = np.expand_dims(mask_np, axis=1)

        mask = core.LoDTensor()
        mask.set(mask_np, place)

        exe = Executor(place)
        scope = core.Scope()

        g_vars = program.global_block().var(x.name + "@GRAD")
        g_out = [
            item.sum()
            for item in map(np.array,
                            exe.run(program,
                                    feed={'x': tensor,
                                          'y': mask},
                                    fetch_list=[g_vars],
                                    scope=scope,
                                    return_numpy=False))
        ]

        g_out_sum = np.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Exemplo n.º 4
0
    def test_forward(self):
        data = layers.data(name='X', shape=[1], dtype='float32')
        data.stop_gradient = False
        cond = layers.ConditionalBlock(inputs=[data])
        out = layers.create_tensor(dtype='float32')
        with cond.block():
            hidden = layers.fc(input=data, size=10)
            layers.assign(hidden, out)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        exe.run(default_startup_program())

        x = numpy.random.random(size=(10, 1)).astype('float32')

        outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
        print outs
        loss = layers.mean(x=out)
        append_backward_ops(loss=loss)
        outs = exe.run(
            feed={'X': x},
            fetch_list=[
                default_main_program().block(0).var(data.name + "@GRAD")
            ])[0]
        print outs
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(
            name='x',
            shape=[1],
            dtype='float32',
            main_program=program,
            stop_gradient=False)
        table = layers.lod_rank_table(x, level=0, main_program=program)
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        result = layers.array_to_lod_tensor(array, table, main_program=program)

        mean = layers.mean(x=result, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum()
            for item in exe.run(program,
                                feed={'x': tensor},
                                fetch_list=[g_vars],
                                return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Exemplo n.º 6
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(name='x',
                        shape=[1],
                        dtype='float32',
                        main_program=program,
                        stop_gradient=False)
        table = layers.lod_rank_table(x, level=0, main_program=program)
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        result = layers.array_to_lod_tensor(array, table, main_program=program)

        mean = layers.mean(x=result, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum() for item in exe.run(program,
                                                        feed={'x': tensor},
                                                        fetch_list=[g_vars],
                                                        return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Exemplo n.º 7
0
    def test_recognize_digits_conv(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(name='pixel',
                                 shape=[1, 28, 28],
                                 dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            conv_pool_1 = nets.simple_img_conv_pool(input=images,
                                                    filter_size=5,
                                                    num_filters=2,
                                                    pool_size=2,
                                                    pool_stride=2,
                                                    act="relu")
            conv_pool_2 = nets.simple_img_conv_pool(input=conv_pool_1,
                                                    filter_size=5,
                                                    num_filters=4,
                                                    pool_size=2,
                                                    pool_stride=2,
                                                    act="relu")

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(x=cost)

            program.append_backward(avg_cost)

        print(str(program))
Exemplo n.º 8
0
    def test_recognize_digits_conv(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            images = layers.data(
                name='pixel', shape=[1, 28, 28], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            conv_pool_1 = nets.simple_img_conv_pool(
                input=images,
                filter_size=5,
                num_filters=2,
                pool_size=2,
                pool_stride=2,
                act="relu")
            conv_pool_2 = nets.simple_img_conv_pool(
                input=conv_pool_1,
                filter_size=5,
                num_filters=4,
                pool_size=2,
                pool_stride=2,
                act="relu")

            predict = layers.fc(input=conv_pool_2, size=10, act="softmax")
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(x=cost)

            program.append_backward(avg_cost)

        print(str(program))
Exemplo n.º 9
0
    def test_ifelse(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(
            input=label, dtype='int64', shape=[1], value=5.0, **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)

        ie = layers.IfElse(cond, **kwargs)

        with ie.true_block():
            true_image = ie.input(image)
            hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        with ie.false_block():
            false_image = ie.input(image)
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        prob = ie()
        loss = layers.cross_entropy(input=prob[0], label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])
        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=8192),
            batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(kwargs['main_program'],
                               feed={'x': x_data,
                                     'y': y_data},
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
Exemplo n.º 10
0
    def setUp(self):
        self.setup_program()
        self.data_field = {"x", "h_boot"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape)

        self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
Exemplo n.º 11
0
    def setUp(self):
        self.setup_program()
        self.data_field = {"x", "h_boot"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = PySimpleRNN1(self.input_shape, self.output_shape)

        self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
Exemplo n.º 12
0
    def setUp(self):
        self.setup_program()

        self.data_field = {"x"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(self.input_shape,
                                                            self.output_shape)
        self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
        print self.main_program
Exemplo n.º 13
0
    def setUp(self):
        self.setup_program()

        self.data_field = {"x", "h_boot1", "h_boot2"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3(
            self.input_shape, self.output_shape)

        self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
Exemplo n.º 14
0
    def setUp(self):
        self.setup_program()

        self.data_field = {"x"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = RecurrentOpNoMemBootTest.PySimpleRNN4(
            self.input_shape, self.output_shape)
        self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
        print self.main_program
Exemplo n.º 15
0
    def setUp(self):
        self.setup_program()

        self.data_field = {"x", "h_boot1", "h_boot2"}

        self.input_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.output_shape = (self.sent_len, self.batch_size, self.input_dim)
        self.py_rnn = RecurrentOpMultipleMemoryTest.PySimpleRNN3(
            self.input_shape, self.output_shape)

        self.output = layers.mean(x=self.create_rnn_op(), **self.p_info)
Exemplo n.º 16
0
    def test_simple_forward(self):
        d0 = layers.data(
            "d0", shape=[10], append_batch_size=False, dtype='float32')
        d1 = layers.data(
            "d1", shape=[10], append_batch_size=False, dtype='float32')
        d2 = layers.data(
            "d2", shape=[10], append_batch_size=False, dtype='float32')
        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True
        init = layers.zeros(shape=[10], dtype='float32')
        mem_array = layers.array_write(x=init, i=i)
        data_array = layers.array_write(x=d0, i=i)

        i = layers.increment(i)
        layers.array_write(d1, i, array=data_array)

        i = layers.increment(i)
        layers.array_write(d2, i, array=data_array)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True

        array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
        array_len.stop_gradient = True
        cond = layers.less_than(x=i, y=array_len)

        while_op = layers.While(cond=cond)
        with while_op.block():
            d = layers.array_read(array=data_array, i=i)
            prev = layers.array_read(array=mem_array, i=i)
            result = layers.sums(input=[d, prev])

            i = layers.increment(x=i, in_place=True)
            layers.array_write(result, i=i, array=mem_array)
            layers.less_than(x=i, y=array_len, cond=cond)

        sum_result = layers.array_read(array=mem_array, i=i)
        loss = layers.mean(x=sum_result)

        append_backward_ops(loss)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        d = []

        for i in xrange(3):
            d.append(numpy.random.random(size=[10]).astype('float32'))

        outs = exe.run(feed={'d0': d[0],
                             'd1': d[1],
                             'd2': d[2]},
                       fetch_list=[sum_result])
        self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
Exemplo n.º 17
0
    def test_fit_a_line(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)
            program.append_backward(avg_cost)

        print(str(program))
Exemplo n.º 18
0
    def test_fit_a_line(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            x = layers.data(name='x', shape=[13], dtype='float32')
            y_predict = layers.fc(input=x, size=1, act=None)
            y = layers.data(name='y', shape=[1], dtype='float32')
            cost = layers.square_error_cost(input=y_predict, label=y)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)
            program.append_backward(avg_cost)

        print(str(program))
Exemplo n.º 19
0
    def test_recognize_digits_mlp(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            # Change g_program, so the rest layers use `g_program`
            images = layers.data(name='pixel', shape=[784], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            hidden1 = layers.fc(input=images, size=128, act='relu')
            hidden2 = layers.fc(input=hidden1, size=64, act='relu')
            predict = layers.fc(input=hidden2, size=10, act='softmax')
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
Exemplo n.º 20
0
    def test_recognize_digits_mlp(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            # Change g_program, so the rest layers use `g_program`
            images = layers.data(name='pixel', shape=[784], dtype='float32')
            label = layers.data(name='label', shape=[1], dtype='int32')
            hidden1 = layers.fc(input=images, size=128, act='relu')
            hidden2 = layers.fc(input=hidden1, size=64, act='relu')
            predict = layers.fc(input=hidden2, size=10, act='softmax')
            cost = layers.cross_entropy(input=predict, label=label)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
Exemplo n.º 21
0
def model():
    usr_combined_features = get_usr_combined_features()
    mov_combined_features = get_mov_combined_features()

    # need cos sim
    inference = layers.cos_sim(X=usr_combined_features, Y=mov_combined_features)

    label = layers.data(name='score', shape=[1], dtype='float32')

    square_cost = layers.square_error_cost(input=inference, label=label)

    avg_cost = layers.mean(x=square_cost)

    return avg_cost
def model():
    usr_combined_features = get_usr_combined_features()
    mov_combined_features = get_mov_combined_features()

    # need cos sim
    inference = layers.cos_sim(X=usr_combined_features,
                               Y=mov_combined_features)

    label = layers.data(name='score', shape=[1], dtype='float32')

    square_cost = layers.square_error_cost(input=inference, label=label)

    avg_cost = layers.mean(x=square_cost)

    return avg_cost
Exemplo n.º 23
0
    def test_word_embedding(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            dict_size = 10000
            embed_size = 32
            first_word = layers.data(name='firstw', shape=[1], dtype='int64')
            second_word = layers.data(name='secondw', shape=[1], dtype='int64')
            third_word = layers.data(name='thirdw', shape=[1], dtype='int64')
            forth_word = layers.data(name='forthw', shape=[1], dtype='int64')
            next_word = layers.data(name='nextw', shape=[1], dtype='int64')

            embed_first = layers.embedding(
                input=first_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w')
            embed_second = layers.embedding(
                input=second_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w')

            embed_third = layers.embedding(
                input=third_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w')
            embed_forth = layers.embedding(
                input=forth_word,
                size=[dict_size, embed_size],
                dtype='float32',
                param_attr='shared_w')

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
                axis=1)

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
            predict_word = layers.fc(input=hidden1,
                                     size=dict_size,
                                     act='softmax')
            cost = layers.cross_entropy(input=predict_word, label=next_word)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
Exemplo n.º 24
0
    def test_word_embedding(self):
        program = Program()
        with program_guard(program, startup_program=Program()):
            dict_size = 10000
            embed_size = 32
            first_word = layers.data(name='firstw', shape=[1], dtype='int64')
            second_word = layers.data(name='secondw', shape=[1], dtype='int64')
            third_word = layers.data(name='thirdw', shape=[1], dtype='int64')
            forth_word = layers.data(name='forthw', shape=[1], dtype='int64')
            next_word = layers.data(name='nextw', shape=[1], dtype='int64')

            embed_first = layers.embedding(input=first_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_second = layers.embedding(input=second_word,
                                            size=[dict_size, embed_size],
                                            dtype='float32',
                                            param_attr='shared_w')

            embed_third = layers.embedding(input=third_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')
            embed_forth = layers.embedding(input=forth_word,
                                           size=[dict_size, embed_size],
                                           dtype='float32',
                                           param_attr='shared_w')

            concat_embed = layers.concat(
                input=[embed_first, embed_second, embed_third, embed_forth],
                axis=1)

            hidden1 = layers.fc(input=concat_embed, size=256, act='sigmoid')
            predict_word = layers.fc(input=hidden1,
                                     size=dict_size,
                                     act='softmax')
            cost = layers.cross_entropy(input=predict_word, label=next_word)
            avg_cost = layers.mean(x=cost)
            self.assertIsNotNone(avg_cost)

        print(str(program))
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(name='x',
                        shape=[1],
                        dtype='float32',
                        main_program=program,
                        stop_gradient=False)
        y = layers.data(name='y',
                        shape=[1],
                        dtype='bool',
                        main_program=program,
                        stop_gradient=False)

        level = 0

        out_true, out_false = layers.split_lod_tensor(input=x,
                                                      mask=y,
                                                      level=level,
                                                      main_program=program)
        out = layers.merge_lod_tensor(in_true=out_true,
                                      in_false=out_false,
                                      mask=y,
                                      x=x,
                                      level=level,
                                      main_program=program)
        mean = layers.mean(x=out, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        mask_np = np.array([0, 1, 0]).astype('bool')
        mask_np = np.expand_dims(mask_np, axis=1)

        mask = core.LoDTensor()
        mask.set(mask_np, place)

        exe = Executor(place)
        scope = core.Scope()

        g_vars = program.global_block().var(x.name + "@GRAD")
        g_out = [
            item.sum() for item in map(
                np.array,
                exe.run(program,
                        feed={
                            'x': tensor,
                            'y': mask
                        },
                        fetch_list=[g_vars],
                        scope=scope,
                        return_numpy=False))
        ]

        g_out_sum = np.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Exemplo n.º 26
0
# TODO(dzhwinter) : refine the initializer and random seed settting
SIZE = 10
input_shape = conv_pool_2.shape
param_shape = [reduce(lambda a, b: a * b, input_shape[1:], 1)] + [SIZE]
scale = (2.0 / (param_shape[0]**2 * SIZE))**0.5

predict = layers.fc(input=conv_pool_2,
                    size=SIZE,
                    act="softmax",
                    param_initializer=NormalInitializer(loc=0.0,
                                                        scale=scale,
                                                        seed=SEED))

cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
optimizer = AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999)
opts = optimizer.minimize(avg_cost)

accuracy = evaluator.Accuracy(input=predict, label=label)

train_reader = paddle.batch(paddle.dataset.mnist.train(),
                            batch_size=BATCH_SIZE)

place = core.CPUPlace()
exe = Executor(place)

exe.run(framework.default_startup_program())

for pass_id in range(PASS_NUM):
    accuracy.reset(exe)
    def test_read_write(self):
        x = [
            layers.data(name='x0', shape=[100]),
            layers.data(name='x1', shape=[100]),
            layers.data(name='x2', shape=[100])
        ]

        for each_x in x:
            each_x.stop_gradient = False

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        arr = layers.array_write(x=x[0], i=i)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[1], i=i, array=arr)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[2], i=i, array=arr)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        a0 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a1 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a2 = layers.array_read(array=arr, i=i)

        mean_a0 = layers.mean(x=a0)
        mean_a1 = layers.mean(x=a1)
        mean_a2 = layers.mean(x=a2)

        a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])

        mean_x0 = layers.mean(x=x[0])
        mean_x1 = layers.mean(x=x[1])
        mean_x2 = layers.mean(x=x[2])

        x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])

        scope = core.Scope()
        cpu = core.CPUPlace()

        exe = Executor(cpu)

        tensor = numpy.random.random(size=(100, 100)).astype('float32')

        outs = exe.run(feed={
            'x0': tensor,
            'x1': tensor,
            'x2': tensor
        },
                       fetch_list=[a_sum, x_sum],
                       scope=scope)
        self.assertEqual(outs[0], outs[1])

        total_sum = layers.sums(input=[a_sum, x_sum])
        total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0)

        append_backward_ops(total_sum_scaled)

        g_vars = map(default_main_program().global_block().var,
                     [each_x.name + "@GRAD" for each_x in x])
        g_out = [
            item.sum() for item in exe.run(feed={
                'x0': tensor,
                'x1': tensor,
                'x2': tensor
            },
                                           fetch_list=g_vars)
        ]
        g_out_sum = numpy.array(g_out).sum()

        # since our final gradient is 1 and the neural network are all linear
        # with mean_op.
        # the input gradient should also be 1
        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Exemplo n.º 28
0
    def test_raw_api(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(
            input=label, dtype='int64', shape=[1], value=5.0, **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)
        true_image, false_image = layers.split_lod_tensor(
            input=image, mask=cond, **kwargs)

        true_out = layers.create_tensor(dtype='float32', **kwargs)
        true_cond = layers.ConditionalBlock([true_image], **kwargs)

        with true_cond.block():
            hidden = layers.fc(input=true_image, size=100, act='tanh', **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=true_out, **kwargs)

        false_out = layers.create_tensor(dtype='float32', **kwargs)
        false_cond = layers.ConditionalBlock([false_image], **kwargs)

        with false_cond.block():
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=false_out, **kwargs)

        prob = layers.merge_lod_tensor(
            in_true=true_out, in_false=false_out, mask=cond, x=image, **kwargs)
        loss = layers.cross_entropy(input=prob, label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])

        train_reader = paddle.batch(
            paddle.reader.shuffle(
                paddle.dataset.mnist.train(), buf_size=8192),
            batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = np.expand_dims(y_data, axis=1)

                outs = exe.run(kwargs['main_program'],
                               feed={'x': x_data,
                                     'y': y_data},
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
    def test_fit_line_inference_model(self):
        MODEL_DIR = "./tmp/inference_model"

        init_program = Program()
        program = Program()
        x = layers.data(
            name='x',
            shape=[2],
            dtype='float32',
            main_program=program,
            startup_program=init_program)
        y = layers.data(
            name='y',
            shape=[1],
            dtype='float32',
            main_program=program,
            startup_program=init_program)

        y_predict = layers.fc(input=x,
                              size=1,
                              act=None,
                              main_program=program,
                              startup_program=init_program)

        cost = layers.square_error_cost(
            input=y_predict,
            label=y,
            main_program=program,
            startup_program=init_program)
        avg_cost = layers.mean(
            x=cost, main_program=program, startup_program=init_program)

        sgd_optimizer = optimizer.SGDOptimizer(learning_rate=0.001)
        sgd_optimizer.minimize(avg_cost, init_program)

        place = core.CPUPlace()
        exe = executor.Executor(place)

        exe.run(init_program, feed={}, fetch_list=[])

        for i in xrange(100):
            tensor_x = np.array(
                [[1, 1], [1, 2], [3, 4], [5, 2]]).astype("float32")
            tensor_y = np.array([[-2], [-3], [-7], [-7]]).astype("float32")

            exe.run(program,
                    feed={'x': tensor_x,
                          'y': tensor_y},
                    fetch_list=[avg_cost])

        save_inference_model(MODEL_DIR, ["x", "y"], [avg_cost], exe, program)
        expected = exe.run(program,
                           feed={'x': tensor_x,
                                 'y': tensor_y},
                           fetch_list=[avg_cost])[0]

        reload(executor)  # reload to build a new scope
        exe = executor.Executor(place)

        [infer_prog, feed_var_names, fetch_vars] = load_inference_model(
            MODEL_DIR, exe)

        outs = exe.run(
            infer_prog,
            feed={feed_var_names[0]: tensor_x,
                  feed_var_names[1]: tensor_y},
            fetch_list=fetch_vars)
        actual = outs[0]

        self.assertEqual(feed_var_names, ["x", "y"])
        self.assertEqual(len(fetch_vars), 1)
        self.assertEqual(str(fetch_vars[0]), str(avg_cost))
        self.assertEqual(expected, actual)
Exemplo n.º 30
0
    def test_read_write(self):
        x = [
            layers.data(
                name='x0', shape=[100]), layers.data(
                    name='x1', shape=[100]), layers.data(
                        name='x2', shape=[100])
        ]

        for each_x in x:
            each_x.stop_gradient = False

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        arr = layers.array_write(x=x[0], i=i)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[1], i=i, array=arr)
        i = layers.increment(x=i)
        arr = layers.array_write(x=x[2], i=i, array=arr)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = False
        a0 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a1 = layers.array_read(array=arr, i=i)
        i = layers.increment(x=i)
        a2 = layers.array_read(array=arr, i=i)

        mean_a0 = layers.mean(x=a0)
        mean_a1 = layers.mean(x=a1)
        mean_a2 = layers.mean(x=a2)

        a_sum = layers.sums(input=[mean_a0, mean_a1, mean_a2])

        mean_x0 = layers.mean(x=x[0])
        mean_x1 = layers.mean(x=x[1])
        mean_x2 = layers.mean(x=x[2])

        x_sum = layers.sums(input=[mean_x0, mean_x1, mean_x2])

        scope = core.Scope()
        cpu = core.CPUPlace()

        exe = Executor(cpu)

        tensor = numpy.random.random(size=(100, 100)).astype('float32')

        outs = exe.run(feed={'x0': tensor,
                             'x1': tensor,
                             'x2': tensor},
                       fetch_list=[a_sum, x_sum],
                       scope=scope)
        self.assertEqual(outs[0], outs[1])

        total_sum = layers.sums(input=[a_sum, x_sum])
        total_sum_scaled = layers.scale(x=total_sum, scale=1 / 6.0)

        append_backward_ops(total_sum_scaled)

        g_vars = map(default_main_program().global_block().var,
                     [each_x.name + "@GRAD" for each_x in x])
        g_out = [
            item.sum()
            for item in exe.run(
                feed={'x0': tensor,
                      'x1': tensor,
                      'x2': tensor},
                fetch_list=g_vars)
        ]
        g_out_sum = numpy.array(g_out).sum()

        # since our final gradient is 1 and the neural network are all linear
        # with mean_op.
        # the input gradient should also be 1
        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Exemplo n.º 31
0
    def test_raw_api(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(input=label,
                                                     dtype='int64',
                                                     shape=[1],
                                                     value=5.0,
                                                     **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)
        true_image, false_image = layers.split_lod_tensor(input=image,
                                                          mask=cond,
                                                          **kwargs)

        true_out = layers.create_tensor(dtype='float32', **kwargs)
        true_cond = layers.ConditionalBlock([true_image], **kwargs)

        with true_cond.block():
            hidden = layers.fc(input=true_image,
                               size=100,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=true_out, **kwargs)

        false_out = layers.create_tensor(dtype='float32', **kwargs)
        false_cond = layers.ConditionalBlock([false_image], **kwargs)

        with false_cond.block():
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            layers.assign(input=prob, output=false_out, **kwargs)

        prob = layers.merge_lod_tensor(in_true=true_out,
                                       in_false=false_out,
                                       mask=cond,
                                       x=image,
                                       **kwargs)
        loss = layers.cross_entropy(input=prob, label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])

        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = np.expand_dims(y_data, axis=1)

                outs = exe.run(kwargs['main_program'],
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)
Exemplo n.º 32
0
    def test_ifelse(self):
        kwargs = {'startup_program': Program(), 'main_program': Program()}
        image = layers.data(name='x', shape=[784], dtype='float32', **kwargs)

        label = layers.data(name='y', shape=[1], dtype='int64', **kwargs)

        limit = layers.fill_constant_batch_size_like(input=label,
                                                     dtype='int64',
                                                     shape=[1],
                                                     value=5.0,
                                                     **kwargs)

        cond = layers.less_than(x=label, y=limit, **kwargs)

        ie = layers.IfElse(cond, **kwargs)

        with ie.true_block():
            true_image = ie.input(image)
            hidden = layers.fc(input=true_image,
                               size=100,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        with ie.false_block():
            false_image = ie.input(image)
            hidden = layers.fc(input=false_image,
                               size=200,
                               act='tanh',
                               **kwargs)
            prob = layers.fc(input=hidden, size=10, act='softmax', **kwargs)
            ie.output(prob)

        prob = ie()
        loss = layers.cross_entropy(input=prob[0], label=label, **kwargs)
        avg_loss = layers.mean(x=loss, **kwargs)

        optimizer = MomentumOptimizer(learning_rate=0.001, momentum=0.9)
        optimizer.minimize(avg_loss, kwargs['startup_program'])
        train_reader = paddle.batch(paddle.reader.shuffle(
            paddle.dataset.mnist.train(), buf_size=8192),
                                    batch_size=200)

        place = core.CPUPlace()
        exe = Executor(place)

        exe.run(kwargs['startup_program'])
        PASS_NUM = 100
        for pass_id in range(PASS_NUM):
            for data in train_reader():
                x_data = np.array(map(lambda x: x[0], data)).astype("float32")
                y_data = np.array(map(lambda x: x[1], data)).astype("int64")
                y_data = y_data.reshape((y_data.shape[0], 1))

                outs = exe.run(kwargs['main_program'],
                               feed={
                                   'x': x_data,
                                   'y': y_data
                               },
                               fetch_list=[avg_loss])
                print outs[0]
                if outs[0] < 1.0:
                    return
        self.assertFalse(True)