예제 #1
0
    def test_selected_rows(self):
        place = core.CPUPlace()
        height = 10
        rows = [0, 4, 7]
        row_numel = 12
        selected_rows = core.SelectedRows(rows, height)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0
        tensor = selected_rows.get_tensor()
        tensor.set(np_array, place)

        # compare rows
        self.assertEqual(0, selected_rows.rows()[0])
        self.assertEqual(4, selected_rows.rows()[1])
        self.assertEqual(7, selected_rows.rows()[2])

        # compare height
        self.assertEqual(10, selected_rows.height())

        # compare tensor
        self.assertAlmostEqual(2.0,
                               selected_rows.get_tensor().get_float_element(0))
        self.assertAlmostEqual(1.0,
                               selected_rows.get_tensor().get_float_element(1))
        self.assertAlmostEqual(
            4.0,
            selected_rows.get_tensor().get_float_element(2 * row_numel + 8))
    def test_forward(self):
        data = layers.data(name='X', shape=[1], dtype='float32')
        data.stop_gradient = False
        cond = layers.ConditionalBlock(inputs=[data])
        out = layers.create_tensor(dtype='float32')
        with cond.block():
            hidden = layers.fc(input=data, size=10)
            layers.assign(hidden, out)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        exe.run(default_startup_program())

        x = numpy.random.random(size=(10, 1)).astype('float32')

        outs = exe.run(feed={'X': x}, fetch_list=[out])[0]
        print outs
        loss = layers.mean(x=out)
        append_backward_ops(loss=loss)
        outs = exe.run(feed={'X': x},
                       fetch_list=[
                           default_main_program().block(0).var(data.name +
                                                               "@GRAD")
                       ])[0]
        print outs
예제 #3
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()

        x = layers.data(name='x',
                        shape=[1],
                        dtype='float32',
                        main_program=program,
                        stop_gradient=False)
        table = layers.lod_rank_table(x, level=0, main_program=program)
        array = layers.lod_tensor_to_array(x, table, main_program=program)
        result = layers.array_to_lod_tensor(array, table, main_program=program)

        mean = layers.mean(x=result, main_program=program)

        append_backward_ops(mean)

        tensor = core.LoDTensor()
        tensor.set(numpy.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_lod([[0, 3, 9, 10]])

        g_vars = program.global_block().var(x.name + "@GRAD")

        exe = Executor(place)
        g_out = [
            numpy.array(item).sum() for item in exe.run(program,
                                                        feed={'x': tensor},
                                                        fetch_list=[g_vars],
                                                        return_numpy=False)
        ]
        g_out_sum = numpy.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
예제 #4
0
    def setUp(self):
        self.program = Program()
        self.fake_program = Program()
        self.place = core.CPUPlace()

        self.input_names = ['X', 'Out']
        self.input_vars = {
            name: self.program.global_block().create_var(name=name,
                                                         shape=[2, 3],
                                                         dtype='float32')
            for name in self.input_names
        }
        self.input_vars["Out@GRAD"] = \
            self.fake_program.global_block().create_var(
                name="Out@GRAD", shape=[2, 3], dtype='float32')

        self.output_names = ['X@GRAD']
        self.output_vars = {
            name: self.program.global_block().create_var(name=name,
                                                         shape=[2, 3],
                                                         dtype='float32')
            for name in self.output_names
        }

        self.program.global_block().append_op(type='rnn_memory_helper_grad',
                                              inputs=self.input_vars,
                                              outputs=self.output_vars,
                                              attrs={})
예제 #5
0
    def test_get_set(self):
        scope = core.Scope()
        arr = scope.var('tmp_lod_tensor_array')
        tensor_array = arr.get_lod_tensor_array()
        self.assertEqual(0, len(tensor_array))
        cpu = core.CPUPlace()
        for i in xrange(10):
            t = core.LoDTensor()
            t.set(numpy.array([i], dtype='float32'), cpu)
            t.set_lod([[0, 1]])
            tensor_array.append(t)

        self.assertEqual(10, len(tensor_array))

        for i in xrange(10):
            t = tensor_array[i]
            self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32'))
            self.assertEqual([[0, 1]], t.lod())

            t = core.LoDTensor()
            t.set(numpy.array([i + 10], dtype='float32'), cpu)
            t.set_lod([[0, 2]])
            tensor_array[i] = t
            t = tensor_array[i]
            self.assertEqual(numpy.array(t),
                             numpy.array([i + 10], dtype='float32'))
            self.assertEqual([[0, 2]], t.lod())
    def test_shrink_rnn_memory(self):
        x = layers.data('x', shape=[100], dtype='float32')
        x.stop_gradient = False
        table = layers.lod_rank_table(x=x)
        i = layers.zeros(dtype='int64', shape=[1])
        mem1 = layers.shrink_memory(x=x, i=i, table=table)
        i = layers.increment(x=i)
        i.stop_gradient = True
        mem2 = layers.shrink_memory(x=mem1, i=i, table=table)
        i = layers.increment(x=i)
        i.stop_gradient = True
        mem3 = layers.shrink_memory(x=mem2, i=i, table=table)

        cpu = core.CPUPlace()
        tensor = core.LoDTensor()
        tensor.set_lod([[0, 2, 5, 6]])
        tensor_np = numpy.random.random(size=(3, 100)).astype('float32')
        tensor.set(tensor_np, cpu)
        exe = Executor(cpu)
        outs = exe.run(feed={'x': tensor}, fetch_list=[mem1, mem2, mem3])
        self.assertTrue(numpy.allclose(tensor_np[0:3], outs[0]))
        self.assertTrue(numpy.allclose(tensor_np[0:2], outs[1]))
        self.assertTrue(numpy.allclose(tensor_np[0:1], outs[2]))

        mem3_mean = layers.mean(x=mem3)
        append_backward_ops(loss=mem3_mean)
        x_grad = exe.run(
            feed={'x': tensor},
            fetch_list=[main_program.global_block().var('x@GRAD')])[0]
        self.assertAlmostEqual(1.0, x_grad.sum(), delta=0.1)
예제 #7
0
 def setUp(self):
     self.scope = core.Scope()
     self.ctx = core.DeviceContext.create(core.CPUPlace())
     self._create_ids()
     self._create_scores()
     self._create_pre_ids()
     self.scope.var('selected_ids')
     self.scope.var('selected_scores')
예제 #8
0
 def setup_program(self):
     self.main_program = Program()
     self.startup_program = Program()
     self.p_info = {
         "main_program": self.main_program,
         "startup_program": self.startup_program
     }
     self.place = core.CPUPlace()
예제 #9
0
 def forward(self):
     self.scope = core.Scope()
     self.create_global_variables()
     self.create_cond_op()
     self.create_sub_net()
     ctx = core.DeviceContext.create(core.CPUPlace())
     self.condop.run(self.scope, ctx)
     return np.array(self.scope.find_var("Out").get_tensor())
예제 #10
0
 def test_array_length(self):
     tmp = layers.zeros(shape=[10], dtype='int32')
     i = layers.fill_constant(shape=[1], dtype='int64', value=10)
     arr = layers.array_write(tmp, i=i)
     arr_len = layers.array_length(arr)
     cpu = core.CPUPlace()
     exe = Executor(cpu)
     result = exe.run(fetch_list=[arr_len])[0]
     self.assertEqual(11, result[0])
예제 #11
0
    def test_simple_forward(self):
        d0 = layers.data(
            "d0", shape=[10], append_batch_size=False, dtype='float32')
        d1 = layers.data(
            "d1", shape=[10], append_batch_size=False, dtype='float32')
        d2 = layers.data(
            "d2", shape=[10], append_batch_size=False, dtype='float32')
        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True
        init = layers.zeros(shape=[10], dtype='float32')
        mem_array = layers.array_write(x=init, i=i)
        data_array = layers.array_write(x=d0, i=i)

        i = layers.increment(i)
        layers.array_write(d1, i, array=data_array)

        i = layers.increment(i)
        layers.array_write(d2, i, array=data_array)

        i = layers.zeros(shape=[1], dtype='int64')
        i.stop_gradient = True

        array_len = layers.fill_constant(shape=[1], dtype='int64', value=3)
        array_len.stop_gradient = True
        cond = layers.less_than(x=i, y=array_len)

        while_op = layers.While(cond=cond)
        with while_op.block():
            d = layers.array_read(array=data_array, i=i)
            prev = layers.array_read(array=mem_array, i=i)
            result = layers.sums(input=[d, prev])

            i = layers.increment(x=i, in_place=True)
            layers.array_write(result, i=i, array=mem_array)
            layers.less_than(x=i, y=array_len, cond=cond)

        sum_result = layers.array_read(array=mem_array, i=i)
        loss = layers.mean(x=sum_result)

        append_backward_ops(loss)

        cpu = core.CPUPlace()
        exe = Executor(cpu)
        d = []

        for i in xrange(3):
            d.append(numpy.random.random(size=[10]).astype('float32'))

        outs = exe.run(feed={'d0': d[0],
                             'd1': d[1],
                             'd2': d[2]},
                       fetch_list=[sum_result])
        self.assertAlmostEqual(numpy.sum(d), numpy.sum(outs[0]), delta=0.01)
예제 #12
0
def run_benchmark(model, args):
    if args.use_cprof:
        pr = cProfile.Profile()
        pr.enable()
    start_time = time.time()
    word_dict = paddle.dataset.imdb.word_dict()

    print("load word dict successfully")

    dict_dim = len(word_dict)

    data = fluid.layers.data(name="words",
                             shape=[1],
                             dtype="int64",
                             lod_level=1)
    label = fluid.layers.data(name="label", shape=[1], dtype="int64")

    prediction = model(data, dict_dim)
    cost = fluid.layers.cross_entropy(input=prediction, label=label)
    avg_cost = fluid.layers.mean(x=cost)
    adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002)
    adam_optimizer.minimize(avg_cost)
    accuracy = fluid.evaluator.Accuracy(input=prediction, label=label)

    train_reader = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.imdb.train(word_dict), buf_size=25000),
                                batch_size=args.batch_size)
    place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())

    for it, pass_id in enumerate(xrange(args.pass_num)):
        accuracy.reset(exe)
        if iter == args.iterations:
            break
        for data in train_reader():
            tensor_words = to_lodtensor(map(lambda x: x[0], data), place)

            label = np.array(map(lambda x: x[1], data)).astype("int64")
            label = label.reshape([args.batch_size, 1])

            tensor_label = fluid.LoDTensor()
            tensor_label.set(label, place)

            loss, acc = exe.run(fluid.default_main_program(),
                                feed={
                                    "words": tensor_words,
                                    "label": tensor_label
                                },
                                fetch_list=[avg_cost] + accuracy.metrics)
            pass_acc = accuracy.eval(exe)
            print("Iter: %d, loss: %s, acc: %s, pass_acc: %s" %
                  (it, str(loss), str(acc), str(pass_acc)))
def run_benchmark(model, args):
    if args.use_cprof:
        pr = cProfile.Profile()
        pr.enable()
    start_time = time.time()
    word_dict = paddle.dataset.imdb.word_dict()

    print("load word dict successfully")

    dict_dim = len(word_dict)
    data = fluid.layers.data(name="words",
                             shape=[args.seq_len * args.batch_size, 1],
                             append_batch_size=False,
                             dtype="int64",
                             lod_level=1)
    label = fluid.layers.data(name="label",
                              shape=[args.batch_size, 1],
                              append_batch_size=False,
                              dtype="int64")
    prediction = model(data, dict_dim)
    cost = fluid.layers.cross_entropy(input=prediction, label=label)
    avg_cost = fluid.layers.mean(x=cost)
    adam_optimizer = fluid.optimizer.Adam(learning_rate=0.002)
    adam_optimizer.minimize(avg_cost)
    accuracy = fluid.evaluator.Accuracy(input=prediction, label=label)

    train_reader = paddle.batch(
        paddle.reader.shuffle(paddle.dataset.imdb.train(word_dict),
                              buf_size=25000),  # only for speed
        batch_size=args.batch_size)
    place = core.CPUPlace() if args.device == 'CPU' else core.CUDAPlace(0)
    exe = fluid.Executor(place)
    exe.run(fluid.default_startup_program())
    for it, pass_id in enumerate(xrange(args.pass_num)):
        accuracy.reset(exe)
        if it == args.iterations:
            break
        for batch_id, data in enumerate(train_reader()):
            chopped_data = chop_data(data,
                                     chop_len=args.seq_len,
                                     batch_size=args.batch_size)
            tensor_words, tensor_label = prepare_feed_data(chopped_data, place)

            loss, acc = exe.run(fluid.default_main_program(),
                                feed={
                                    "words": tensor_words,
                                    "label": tensor_label
                                },
                                fetch_list=[avg_cost] + accuracy.metrics)
            pass_acc = accuracy.eval(exe)
            print("pass=%d, batch=%d, loss=%f, acc=%f, pass_acc=%f" %
                  (it, batch_id, loss, acc, pass_acc))
예제 #14
0
    def setUp(self):
        self.program = Program()
        self.place = core.CPUPlace()

        self.X = self.program.global_block().create_var(name='X',
                                                        shape=[2, 3],
                                                        dtype='float32')
        self.Out = self.program.global_block().create_var(name='Out',
                                                          shape=[2, 3],
                                                          dtype='float32')
        self.program.global_block().append_op(type='rnn_memory_helper',
                                              inputs={"X": self.X},
                                              outputs={"Out": self.Out},
                                              attrs={})
예제 #15
0
def paddle_random_normal(shape, loc=.0, scale=1., seed=1, dtype="float32"):
    program = framework.Program()
    block = program.global_block()
    w = block.create_var(
        dtype="float32",
        shape=shape,
        lod_level=0,
        name="param",
        initializer=initializer.NormalInitializer(
            loc=.0, scale=scale, seed=seed))
    place = core.CPUPlace()
    exe = Executor(place)
    out = exe.run(program, fetch_list=[w])
    return np.array(out[0])
예제 #16
0
def run_benchmark(model, args):
    if args.use_cprof:
        pr = cProfile.Profile()
        pr.enable()
    start_time = time.time()
    images = fluid.layers.data(name='pixel', shape=[1, 28, 28], dtype=DTYPE)
    label = fluid.layers.data(name='label', shape=[1], dtype='int64')
    predict = model(images)

    cost = fluid.layers.cross_entropy(input=predict, label=label)
    avg_cost = fluid.layers.mean(x=cost)
    opt = fluid.optimizer.AdamOptimizer(
        learning_rate=0.001, beta1=0.9, beta2=0.999)
    opt.minimize(avg_cost)

    accuracy = fluid.evaluator.Accuracy(input=predict, label=label)

    train_reader = paddle.batch(
        paddle.dataset.mnist.train(), batch_size=args.batch_size)

    place = core.CPUPlace()
    exe = fluid.Executor(place)

    exe.run(fluid.default_startup_program())

    for pass_id in range(args.pass_num):
        accuracy.reset(exe)
        pass_start = time.clock()
        for batch_id, data in enumerate(train_reader()):
            img_data = np.array(
                map(lambda x: x[0].reshape([1, 28, 28]), data)).astype(DTYPE)
            y_data = np.array(map(lambda x: x[1], data)).astype("int64")
            y_data = y_data.reshape([len(y_data), 1])

            start = time.clock()
            outs = exe.run(fluid.default_main_program(),
                           feed={"pixel": img_data,
                                 "label": y_data},
                           fetch_list=[avg_cost] + accuracy.metrics)
            end = time.clock()
            loss = np.array(outs[0])
            acc = np.array(outs[1])
            print("pass=%d, batch=%d, loss=%f, error=%f, elapse=%f" %
                  (pass_id, batch_id, loss, 1 - acc, (end - start) / 1000))

        pass_end = time.clock()
        test_avg_acc = eval_test(exe, accuracy, avg_cost)
        pass_acc = accuracy.eval(exe)
        print("pass=%d, test_avg_acc=%f, test_avg_acc=%f, elapse=%f" %
              (pass_id, pass_acc, test_avg_acc, (pass_end - pass_start) / 1000))
예제 #17
0
    def test_lod_rank_table(self):
        x = data(name='x', shape=[100])
        cpu = core.CPUPlace()
        rank_table = lod_rank_table(x=x, level=1)
        rank_table.persistable = True
        exe = Executor(cpu)
        scope = core.Scope()

        tensor = core.LoDTensor()
        tensor.set(numpy.random.random(size=(17, 100)), cpu)
        tensor.set_lod([[0, 1, 3], [0, 5, 6, 7], [0, 3, 4, 9, 10, 13, 16, 17]])
        exe.run(scope=scope, feed={'x': tensor})
        var = scope.find_var(rank_table.name)
        table = var.get_lod_rank_table()
        self.assertEqual([(0, 5), (1, 1), (2, 1)], table.items())
예제 #18
0
 def test_mul(self):
     a = data(name='a', shape=[784], dtype='float32')
     b = data(
         name='b',
         shape=[784, 100],
         dtype='float32',
         append_batch_size=False)
     out = mul(x=a, y=b)
     place = core.CPUPlace()
     a_np = numpy.random.random((100, 784)).astype('float32')
     b_np = numpy.random.random((784, 100)).astype('float32')
     exe = Executor(place)
     outs = exe.run(feed={'a': a_np, 'b': b_np}, fetch_list=[out])
     out = outs[0]
     self.assertEqual((100, 100), out.shape)
     self.assertTrue(numpy.allclose(out, numpy.dot(a_np, b_np)))
예제 #19
0
    def check_grad(self,
                   inputs_to_check,
                   output_names,
                   no_grad_set=None,
                   numeric_grad_delta=0.005,
                   in_place=False,
                   max_relative_error=0.005,
                   user_defined_grads=None):
        self.scope = core.Scope()
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
        self.op = create_op(self.scope, self.op_type, op_inputs, op_outputs,
                            op_attrs)

        if no_grad_set is None:
            no_grad_set = set()

        if not type(output_names) is list:
            output_names = [output_names]

        numeric_grads = user_defined_grads or [
            get_numeric_gradient(
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
                output_names,
                delta=numeric_grad_delta,
                in_place=in_place) for input_to_check in inputs_to_check
        ]
        cpu_place = core.CPUPlace()
        cpu_analytic_grads = self._get_gradient(inputs_to_check, cpu_place,
                                                output_names, no_grad_set)

        self.__assert_is_close(numeric_grads, cpu_analytic_grads,
                               inputs_to_check, max_relative_error,
                               "Gradient Check On %s" % str(cpu_place))

        if core.is_compile_gpu() and self.op.support_gpu():
            gpu_place = core.GPUPlace(0)
            gpu_analytic_grads = self._get_gradient(inputs_to_check, gpu_place,
                                                    output_names, no_grad_set)

            self.__assert_is_close(numeric_grads, gpu_analytic_grads,
                                   inputs_to_check, max_relative_error,
                                   "Gradient Check On %s" % str(gpu_place))
예제 #20
0
    def test_lod_tensor_init(self):
        scope = core.Scope()
        place = core.CPUPlace()
        lod_py = [[0, 2, 5], [0, 2, 4, 5]]
        lod_tensor = core.LoDTensor(lod_py)

        lod_tensor.set_dims([5, 2, 3, 4])
        lod_tensor.alloc_float(place)
        tensor_array = numpy.array(lod_tensor)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
        self.assertListEqual(lod_py, lod_tensor.lod())
예제 #21
0
 def test_param(self):
     shape = [784, 100]
     val = 1.0625
     b = main_program.global_block()
     param = b.create_parameter(name='fc.w',
                                shape=shape,
                                dtype='float32',
                                initializer=ConstantInitializer(val))
     self.assertIsNotNone(param)
     self.assertEqual('fc.w', param.name)
     self.assertEqual((784, 100), param.shape)
     self.assertEqual(core.DataType.FP32, param.dtype)
     self.assertEqual(0, param.block.idx)
     exe = Executor(core.CPUPlace())
     p = exe.run(main_program, fetch_list=[param])[0]
     self.assertTrue(np.allclose(p, np.ones(shape) * val))
     p = io.get_parameter_value_by_name('fc.w', exe, main_program)
     self.assertTrue(np.allclose(np.array(p), np.ones(shape) * val))
예제 #22
0
    def test_int_tensor(self):
        scope = core.Scope()
        var = scope.var("test_tensor")
        place = core.CPUPlace()

        tensor = var.get_tensor()

        tensor.set_dims([1000, 784])
        tensor.alloc_int(place)
        tensor_array = numpy.array(tensor)
        self.assertEqual((1000, 784), tensor_array.shape)
        tensor_array[3, 9] = 1
        tensor_array[19, 11] = 2
        tensor.set(tensor_array, place)

        tensor_array_2 = numpy.array(tensor)
        self.assertEqual(1, tensor_array_2[3, 9])
        self.assertEqual(2, tensor_array_2[19, 11])
예제 #23
0
    def test_int_lod_tensor(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var_lod = scope.var("test_lod_tensor")
        lod_tensor = var_lod.get_tensor()

        lod_tensor.set_dims([4, 4, 6])
        lod_tensor.alloc_int(place)
        array = numpy.array(lod_tensor)
        array[0, 0, 0] = 3
        array[3, 3, 5] = 10
        lod_tensor.set(array, place)
        lod_tensor.set_lod([[0, 2, 4]])

        lod_v = numpy.array(lod_tensor)
        self.assertTrue(numpy.alltrue(array == lod_v))

        lod = lod_tensor.lod()
        self.assertEqual(0, lod[0][0])
        self.assertEqual(2, lod[0][1])
        self.assertEqual(4, lod[0][2])
예제 #24
0
    def test_feed_fetch(self):
        scope = core.Scope()
        place = core.CPUPlace()
        input_array = np.ones((4, 4, 6)).astype("float32")
        input_array[0, 0, 0] = 3
        input_array[3, 3, 5] = 10
        input_tensor = core.LoDTensor([[0, 2, 4]])
        input_tensor.set(input_array, place)

        core.set_feed_variable(scope, input_tensor, "feed", 0)

        output_tensor = core.get_fetch_variable(scope, "feed", 0)

        output_lod = output_tensor.lod()
        self.assertEqual(0, output_lod[0][0])
        self.assertEqual(2, output_lod[0][1])
        self.assertEqual(4, output_lod[0][2])

        output_array = np.array(output_tensor)
        self.assertEqual(3, output_array[0, 0, 0])
        self.assertEqual(10, output_array[3, 3, 5])
def main():
    rnn_out = encoder_decoder()
    label = layers.data(name="target_language_next_word",
                        shape=[1],
                        dtype='int64',
                        lod_level=1)
    cost = layers.cross_entropy(input=rnn_out, label=label)
    avg_cost = fluid.layers.mean(x=cost)

    optimizer = fluid.optimizer.Adagrad(learning_rate=1e-4)
    optimizer.minimize(avg_cost)

    train_data = paddle.batch(paddle.reader.shuffle(
        paddle.dataset.wmt14.train(dict_size), buf_size=1000),
                              batch_size=batch_size)

    place = core.CPUPlace()
    exe = Executor(place)

    exe.run(framework.default_startup_program())

    batch_id = 0
    for pass_id in xrange(2):
        for data in train_data():
            word_data = to_lodtensor(map(lambda x: x[0], data), place)
            trg_word = to_lodtensor(map(lambda x: x[1], data), place)
            trg_word_next = to_lodtensor(map(lambda x: x[2], data), place)
            outs = exe.run(framework.default_main_program(),
                           feed={
                               'src_word_id': word_data,
                               'target_language_word': trg_word,
                               'target_language_next_word': trg_word_next
                           },
                           fetch_list=[avg_cost])
            avg_cost_val = np.array(outs[0])
            print('pass_id=' + str(pass_id) + ' batch=' + str(batch_id) +
                  " avg_cost=" + str(avg_cost_val))
            if batch_id > 3:
                exit(0)
            batch_id += 1
예제 #26
0
    def test_float_lod_tensor(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var_lod = scope.var("test_lod_tensor")

        lod_tensor = var_lod.get_tensor()
        lod_tensor.set_dims([5, 2, 3, 4])
        lod_tensor.alloc_float(place)

        tensor_array = numpy.array(lod_tensor)
        self.assertEqual((5, 2, 3, 4), tensor_array.shape)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
        self.assertEqual(len(lod_tensor.lod()), 0)

        lod_py = [[0, 2, 5], [0, 2, 4, 5]]
        lod_tensor.set_lod(lod_py)
        lod = lod_tensor.lod()
        self.assertListEqual(lod_py, lod)
예제 #27
0
                    act="softmax",
                    param_initializer=NormalInitializer(loc=0.0,
                                                        scale=scale,
                                                        seed=SEED))

cost = layers.cross_entropy(input=predict, label=label)
avg_cost = layers.mean(x=cost)
optimizer = AdamOptimizer(learning_rate=0.001, beta1=0.9, beta2=0.999)
opts = optimizer.minimize(avg_cost)

accuracy = evaluator.Accuracy(input=predict, label=label)

train_reader = paddle.batch(paddle.dataset.mnist.train(),
                            batch_size=BATCH_SIZE)

place = core.CPUPlace()
exe = Executor(place)

exe.run(framework.default_startup_program())

for pass_id in range(PASS_NUM):
    accuracy.reset(exe)
    pass_start = time.clock()
    for batch_id, data in enumerate(train_reader()):
        img_data = np.array(map(lambda x: x[0].reshape([1, 28, 28]),
                                data)).astype(DTYPE)
        y_data = np.array(map(lambda x: x[1], data)).astype("int64")
        y_data = y_data.reshape([len(y_data), 1])

        tensor_img = core.LoDTensor()
        tensor_y = core.LoDTensor()
예제 #28
0
 def place(self):
     return core.CPUPlace()
예제 #29
0
 def test_sparse_sgd(self):
     places = [core.CPUPlace()]
     if core.is_compile_gpu():
         places.append(core.GPUPlace(0))
     for place in places:
         self.check_with_place(place)
예제 #30
0
def create_tensor(scope, name, shape, np_data):
    tensor = scope.var(name).get_tensor()
    tensor.set_dims(shape)
    tensor.set(np_data, core.CPUPlace())
    return tensor