예제 #1
0
파일: op_test.py 프로젝트: hedaoyuan/Paddle
    def check_output_with_place(self, place):
        self.scope = core.Scope()
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
        self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
                            op_attrs)
        if isinstance(place, core.GPUPlace) and not self.op.support_gpu():
            return
        set_input(self.scope, self.op, self.inputs, place)
        self.op.infer_shape(self.scope)
        ctx = core.DeviceContext.create(place)
        self.op.run(self.scope, ctx)

        for out_name, out_dup in Operator.get_op_outputs(self.op.type()):
            if out_dup:
                sub_out = self.outputs[out_name]
                for sub_out_name in sub_out:
                    actual = np.array(
                        self.scope.find_var(sub_out_name).get_tensor())
                    expect = sub_out[sub_out_name]
                    self.assertTrue(np.allclose(actual, expect, atol=1e-05),
                                    "output name: " + out_name + "has diff")
            else:
                actual = np.array(self.scope.find_var(out_name).get_tensor())
                expect = self.outputs[out_name]
                self.assertTrue(np.allclose(actual, expect, atol=1e-05),
                                "output name: " + out_name + "has diff")
예제 #2
0
파일: op_test.py 프로젝트: hedaoyuan/Paddle
    def check_grad(self,
                   inputs_to_check,
                   output_names,
                   no_grad_set=None,
                   in_place=False,
                   max_relative_error=0.005):
        self.scope = core.Scope()
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
        self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
                            op_attrs)
        if no_grad_set is None:
            no_grad_set = set()

        if not type(output_names) is list:
            output_names = [output_names]

        numeric_grads = [
            get_numeric_gradient(self.scope,
                                 self.op,
                                 self.inputs,
                                 input_to_check,
                                 output_names,
                                 in_place=in_place)
            for input_to_check in inputs_to_check
        ]
        grad_names = [
            grad_var_name(input_to_check) for input_to_check in inputs_to_check
        ]

        cpu_place = core.CPUPlace()
        cpu_analytic_grads = [
            get_gradient(self.scope, self.op, self.inputs, self.outputs,
                         grad_name, cpu_place, no_grad_set)
            for grad_name in grad_names
        ]

        self.__assert_is_close(numeric_grads, cpu_analytic_grads, grad_names,
                               max_relative_error,
                               "Gradient Check On %s" % str(cpu_place))

        if core.is_compile_gpu() and self.op.support_gpu():
            gpu_place = core.GPUPlace(0)
            gpu_analytic_grads = [
                get_gradient(self.scope, self.op, self.inputs, self.outputs,
                             grad_name, gpu_place, no_grad_set)
                for grad_name in grad_names
            ]

            self.__assert_is_close(numeric_grads, gpu_analytic_grads,
                                   grad_names, max_relative_error,
                                   "Gradient Check On %s" % str(gpu_place))

            for c_grad, g_grad, name in itertools.izip(cpu_analytic_grads,
                                                       gpu_analytic_grads,
                                                       grad_names):
                self.assertTrue(np.allclose(c_grad, g_grad, atol=1e-4),
                                "output name: " + name + " has diff")
예제 #3
0
 def forward(self):
     self.scope = core.Scope()
     self.create_global_variables()
     self.create_rnn_op()
     self.create_step_net()
     ctx = core.DeviceContext.create(core.CPUPlace())
     self.rnnop.infer_shape(self.scope)
     self.rnnop.run(self.scope, ctx)
     return np.array(self.scope.find_var("h").get_tensor())
예제 #4
0
 def test_add_op(self):
     x = np.random.random((10, 1)).astype("float32")
     y = np.random.random((10, 1)).astype("float32")
     z = x + y
     scope = core.Scope()
     add_op = create_op(scope, "add", {'X': x, 'Y': y}, {'Out': z}, dict())
     arr = get_numeric_gradient(scope, add_op, {
         'X': x,
         'Y': y
     }, 'X', ['Out'])
     self.assertAlmostEqual(arr.mean(), 1.0, delta=1e-4)
예제 #5
0
    def uniform_random_test(self, place):
        scope = core.Scope()
        scope.new_var("X").get_tensor()

        op = Operator("uniform_random",
                      Out="X",
                      dims=[1000, 784],
                      min=-5.0,
                      max=10.0,
                      seed=10)

        op.infer_shape(scope)
        ctx = core.DeviceContext.create(place)
        op.run(scope, ctx)
        tensor = numpy.array(scope.find_var("X").get_tensor())
        self.assertAlmostEqual(tensor.mean(), 2.5, delta=0.1)
예제 #6
0
    def gaussian_random_test(self, place):
        scope = core.Scope()
        scope.new_var("Out").get_tensor()

        op = Operator("gaussian_random",
                      Out="Out",
                      dims=[1000, 784],
                      mean=.0,
                      std=1.,
                      seed=10)

        op.infer_shape(scope)
        context = core.DeviceContext.create(place)
        op.run(scope, context)
        tensor = numpy.array(scope.find_var("Out").get_tensor())
        self.assertAlmostEqual(numpy.mean(tensor), .0, delta=0.1)
        self.assertAlmostEqual(numpy.std(tensor), 1., delta=0.1)
예제 #7
0
    def test_lod_tensor_init(self):
        scope = core.Scope()
        place = core.CPUPlace()
        lod_py = [[0, 2, 5], [0, 2, 4, 5]]
        lod_tensor = core.LoDTensor(lod_py)

        lod_tensor.set_dims([5, 2, 3, 4])
        lod_tensor.alloc_float(place)
        tensor_array = numpy.array(lod_tensor)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
        self.assertListEqual(lod_py, lod_tensor.lod())
예제 #8
0
        def test_all(self):
            scope = core.Scope()
            kwargs = dict()
            places = [core.CPUPlace()]
            if core.is_compile_gpu():
                places.append(core.GPUPlace(0))

            for place in places:
                for in_name in Operator.get_op_input_names(self.type):
                    if hasattr(self, "inputs") and in_name in self.inputs:
                        kwargs[in_name] = in_name
                        var = scope.new_var(in_name).get_tensor()
                        arr = self.inputs[in_name]
                        var.set_dims(arr.shape)
                        var.set(arr, place)
                    else:
                        kwargs[in_name] = "@EMPTY@"

                for out_name in Operator.get_op_output_names(self.type):
                    if not hasattr(self, "outputs"):
                        raise ValueError(
                            "The test op must set self.outputs dict.")
                    if out_name not in self.outputs:
                        raise ValueError(
                            "The %s is not in self.outputs dict." % (out_name))
                    kwargs[out_name] = out_name
                    scope.new_var(out_name).get_tensor()

                for attr_name in Operator.get_op_attr_names(self.type):
                    if hasattr(self, "attrs") and attr_name in self.attrs:
                        kwargs[attr_name] = self.attrs[attr_name]

                op = Operator(self.type, **kwargs)
                if isinstance(place, core.GPUPlace) and not op.support_gpu():
                    return

                op.infer_shape(scope)

                ctx = core.DeviceContext.create(place)
                op.run(scope, ctx)

                for out_name in Operator.get_op_output_names(self.type):
                    actual = numpy.array(scope.find_var(out_name).get_tensor())
                    expect = self.outputs[out_name]
                    self.assertTrue(numpy.allclose(actual, expect, atol=1e-05),
                                    "output name: " + out_name + "has diff")
예제 #9
0
    def test_int_tensor(self):
        scope = core.Scope()
        var = scope.new_var("test_tensor")
        place = core.CPUPlace()

        tensor = var.get_tensor()

        tensor.set_dims([1000, 784])
        tensor.alloc_int(place)
        tensor_array = numpy.array(tensor)
        self.assertEqual((1000, 784), tensor_array.shape)
        tensor_array[3, 9] = 1
        tensor_array[19, 11] = 2
        tensor.set(tensor_array, place)

        tensor_array_2 = numpy.array(tensor)
        self.assertEqual(1, tensor_array_2[3, 9])
        self.assertEqual(2, tensor_array_2[19, 11])
예제 #10
0
    def test_int_lod_tensor(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var_lod = scope.new_var("test_lod_tensor")
        lod_tensor = var_lod.get_tensor()

        lod_tensor.set_dims([4, 4, 6])
        lod_tensor.alloc_int(place)
        array = numpy.array(lod_tensor)
        array[0, 0, 0] = 3
        array[3, 3, 5] = 10
        lod_tensor.set(array, place)
        lod_tensor.set_lod([[0, 2, 4]])

        lod_v = numpy.array(lod_tensor)
        self.assertTrue(numpy.alltrue(array == lod_v))

        lod = lod_tensor.lod()
        self.assertEqual(0, lod[0][0])
        self.assertEqual(2, lod[0][1])
        self.assertEqual(4, lod[0][2])
예제 #11
0
    def test_softmax_op(self):
        def stable_softmax(x):
            """Compute the softmax of vector x in a numerically stable way."""
            shiftx = x - np.max(x)
            exps = np.exp(shiftx)
            return exps / np.sum(exps)

        def label_softmax_grad(Y, dY):
            dX = Y * 0.0
            for i in range(Y.shape[0]):
                d = np.dot(Y[i, :], dY[i, :])
                dX[i, :] = Y[i, :] * (dY[i, :] - d)
            return dX

        X = np.random.random((2, 2)).astype("float32")
        Y = np.apply_along_axis(stable_softmax, 1, X)
        dY = np.ones(Y.shape)
        dX = label_softmax_grad(Y, dY)

        scope = core.Scope()
        softmax_op = create_op(scope, "softmax", {"X": X}, {"Y": Y}, dict())

        arr = get_numeric_gradient(scope, softmax_op, {"X": X}, "X", "Y")
        np.testing.assert_almost_equal(arr, dX, decimal=1e-2)
예제 #12
0
    def test_float_lod_tensor(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var_lod = scope.new_var("test_lod_tensor")

        lod_tensor = var_lod.get_tensor()
        lod_tensor.set_dims([5, 2, 3, 4])
        lod_tensor.alloc_float(place)

        tensor_array = numpy.array(lod_tensor)
        self.assertEqual((5, 2, 3, 4), tensor_array.shape)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
        self.assertEqual(len(lod_tensor.lod()), 0)

        lod_py = [[0, 2, 5], [0, 2, 4, 5]]
        lod_tensor.set_lod(lod_py)
        lod = lod_tensor.lod()
        self.assertListEqual(lod_py, lod)
예제 #13
0
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
import numpy
import paddle.v2 as paddle

BATCH_SIZE = 100

scope = core.Scope()
place = core.CPUPlace()
# if you want to test GPU training, you can use gpu place
# place = core.GPUPlace(0)
dev_ctx = core.DeviceContext.create(place)

init_net = core.Net.create()
forward_net = core.Net.create()
backward_net = None
optimize_net = core.Net.create()


def atomic_id():
    id = 0
    while True:
        yield id
        id += 1


uniq_id = atomic_id().next


def data_layer(name, dims):
    var = scope.new_var(name)