Ejemplo n.º 1
0
def get_numeric_gradient(scope,
                         op,
                         inputs,
                         input_to_check,
                         output_names,
                         delta=0.005,
                         in_place=False):

    set_input(scope, op, inputs, core.CPUPlace())
    op.infer_shape(scope)

    tensor_to_check = scope.find_var(input_to_check).get_tensor()

    def product(dim):
        return reduce(lambda a, b: a * b, dim, 1)

    ctx = core.DeviceContext.create(core.CPUPlace())

    def get_output():
        sum = 0.0
        for output_name in output_names:
            op.run(scope, ctx)
            sum += np.array(scope.find_var(output_name).get_tensor()).sum()
        return sum

    tensor_to_check = scope.find_var(input_to_check).get_tensor()
    tensor_size = product(tensor_to_check.get_dims())
    gradient_flat = np.zeros(shape=(tensor_size, ), dtype='float32')
    # we only compute gradient of one element each time.
    # we use a for loop to compute the gradient of every element.
    for i in xrange(tensor_size):
        if in_place:
            set_input(scope, op, inputs, core.CPUPlace())

        # get one input element throw it's index i.
        origin = tensor_to_check.get_float_element(i)
        # add delta to it, run op and then get the sum of the result tensor.
        x_pos = origin + delta
        tensor_to_check.set_float_element(i, x_pos)
        y_pos = get_output()

        if in_place:
            set_input(scope, op, inputs, core.CPUPlace())

        x_neg = origin - delta
        tensor_to_check.set_float_element(i, x_neg)
        y_neg = get_output()

        tensor_to_check.set_float_element(i, origin)
        gradient_flat[i] = (y_pos - y_neg) / delta / 2

    return gradient_flat.reshape(tensor_to_check.get_dims())
Ejemplo n.º 2
0
    def check_grad(self,
                   inputs_to_check,
                   output_names,
                   no_grad_set=None,
                   in_place=False,
                   max_relative_error=0.005):
        self.scope = core.Scope()
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()
        self.op = create_op(self.scope, self.op_type, op_inputs, self.outputs,
                            op_attrs)
        if no_grad_set is None:
            no_grad_set = set()

        if not type(output_names) is list:
            output_names = [output_names]

        numeric_grads = [
            get_numeric_gradient(self.scope,
                                 self.op,
                                 self.inputs,
                                 input_to_check,
                                 output_names,
                                 in_place=in_place)
            for input_to_check in inputs_to_check
        ]
        grad_names = [
            grad_var_name(input_to_check) for input_to_check in inputs_to_check
        ]

        cpu_place = core.CPUPlace()
        cpu_analytic_grads = [
            get_gradient(self.scope, self.op, self.inputs, self.outputs,
                         grad_name, cpu_place, no_grad_set)
            for grad_name in grad_names
        ]

        self.__assert_is_close(numeric_grads, cpu_analytic_grads, grad_names,
                               max_relative_error,
                               "Gradient Check On %s" % str(cpu_place))

        if core.is_compile_gpu() and self.op.support_gpu():
            gpu_place = core.GPUPlace(0)
            gpu_analytic_grads = [
                get_gradient(self.scope, self.op, self.inputs, self.outputs,
                             grad_name, gpu_place, no_grad_set)
                for grad_name in grad_names
            ]

            self.__assert_is_close(numeric_grads, gpu_analytic_grads,
                                   grad_names, max_relative_error,
                                   "Gradient Check On %s" % str(gpu_place))

            for c_grad, g_grad, name in itertools.izip(cpu_analytic_grads,
                                                       gpu_analytic_grads,
                                                       grad_names):
                self.assertTrue(np.allclose(c_grad, g_grad, atol=1e-4),
                                "output name: " + name + " has diff")
Ejemplo n.º 3
0
 def forward(self):
     self.scope = core.Scope()
     self.create_global_variables()
     self.create_rnn_op()
     self.create_step_net()
     ctx = core.DeviceContext.create(core.CPUPlace())
     self.rnnop.infer_shape(self.scope)
     self.rnnop.run(self.scope, ctx)
     return np.array(self.scope.find_var("h").get_tensor())
Ejemplo n.º 4
0
    def test_lod_tensor_init(self):
        scope = core.Scope()
        place = core.CPUPlace()
        lod_py = [[0, 2, 5], [0, 2, 4, 5]]
        lod_tensor = core.LoDTensor(lod_py)

        lod_tensor.set_dims([5, 2, 3, 4])
        lod_tensor.alloc_float(place)
        tensor_array = numpy.array(lod_tensor)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
        self.assertListEqual(lod_py, lod_tensor.lod())
Ejemplo n.º 5
0
        def test_all(self):
            scope = core.Scope()
            kwargs = dict()
            places = [core.CPUPlace()]
            if core.is_compile_gpu():
                places.append(core.GPUPlace(0))

            for place in places:
                for in_name in Operator.get_op_input_names(self.type):
                    if hasattr(self, "inputs") and in_name in self.inputs:
                        kwargs[in_name] = in_name
                        var = scope.new_var(in_name).get_tensor()
                        arr = self.inputs[in_name]
                        var.set_dims(arr.shape)
                        var.set(arr, place)
                    else:
                        kwargs[in_name] = "@EMPTY@"

                for out_name in Operator.get_op_output_names(self.type):
                    if not hasattr(self, "outputs"):
                        raise ValueError(
                            "The test op must set self.outputs dict.")
                    if out_name not in self.outputs:
                        raise ValueError(
                            "The %s is not in self.outputs dict." % (out_name))
                    kwargs[out_name] = out_name
                    scope.new_var(out_name).get_tensor()

                for attr_name in Operator.get_op_attr_names(self.type):
                    if hasattr(self, "attrs") and attr_name in self.attrs:
                        kwargs[attr_name] = self.attrs[attr_name]

                op = Operator(self.type, **kwargs)
                if isinstance(place, core.GPUPlace) and not op.support_gpu():
                    return

                op.infer_shape(scope)

                ctx = core.DeviceContext.create(place)
                op.run(scope, ctx)

                for out_name in Operator.get_op_output_names(self.type):
                    actual = numpy.array(scope.find_var(out_name).get_tensor())
                    expect = self.outputs[out_name]
                    self.assertTrue(numpy.allclose(actual, expect, atol=1e-05),
                                    "output name: " + out_name + "has diff")
Ejemplo n.º 6
0
    def test_int_tensor(self):
        scope = core.Scope()
        var = scope.new_var("test_tensor")
        place = core.CPUPlace()

        tensor = var.get_tensor()

        tensor.set_dims([1000, 784])
        tensor.alloc_int(place)
        tensor_array = numpy.array(tensor)
        self.assertEqual((1000, 784), tensor_array.shape)
        tensor_array[3, 9] = 1
        tensor_array[19, 11] = 2
        tensor.set(tensor_array, place)

        tensor_array_2 = numpy.array(tensor)
        self.assertEqual(1, tensor_array_2[3, 9])
        self.assertEqual(2, tensor_array_2[19, 11])
Ejemplo n.º 7
0
    def test_int_lod_tensor(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var_lod = scope.new_var("test_lod_tensor")
        lod_tensor = var_lod.get_tensor()

        lod_tensor.set_dims([4, 4, 6])
        lod_tensor.alloc_int(place)
        array = numpy.array(lod_tensor)
        array[0, 0, 0] = 3
        array[3, 3, 5] = 10
        lod_tensor.set(array, place)
        lod_tensor.set_lod([[0, 2, 4]])

        lod_v = numpy.array(lod_tensor)
        self.assertTrue(numpy.alltrue(array == lod_v))

        lod = lod_tensor.lod()
        self.assertEqual(0, lod[0][0])
        self.assertEqual(2, lod[0][1])
        self.assertEqual(4, lod[0][2])
Ejemplo n.º 8
0
    def test_float_lod_tensor(self):
        place = core.CPUPlace()
        scope = core.Scope()
        var_lod = scope.new_var("test_lod_tensor")

        lod_tensor = var_lod.get_tensor()
        lod_tensor.set_dims([5, 2, 3, 4])
        lod_tensor.alloc_float(place)

        tensor_array = numpy.array(lod_tensor)
        self.assertEqual((5, 2, 3, 4), tensor_array.shape)
        tensor_array[0, 0, 0, 0] = 1.0
        tensor_array[0, 0, 0, 1] = 2.0
        lod_tensor.set(tensor_array, place)

        lod_v = numpy.array(lod_tensor)
        self.assertAlmostEqual(1.0, lod_v[0, 0, 0, 0])
        self.assertAlmostEqual(2.0, lod_v[0, 0, 0, 1])
        self.assertEqual(len(lod_tensor.lod()), 0)

        lod_py = [[0, 2, 5], [0, 2, 4, 5]]
        lod_tensor.set_lod(lod_py)
        lod = lod_tensor.lod()
        self.assertListEqual(lod_py, lod)
Ejemplo n.º 9
0
 def test_cpu(self):
     self.gaussian_random_test(place=core.CPUPlace())
Ejemplo n.º 10
0
def create_tensor(scope, name, shape, np_data):
    tensor = scope.new_var(name).get_tensor()
    tensor.set_dims(shape)
    tensor.set(np_data, core.CPUPlace())
    return tensor
Ejemplo n.º 11
0
 def check_output(self):
     places = [core.CPUPlace()]
     if core.is_compile_gpu():
         places.append(core.GPUPlace(0))
     for place in places:
         self.check_output_with_place(place)
Ejemplo n.º 12
0
import paddle.v2.framework.core as core
from paddle.v2.framework.op import Operator
import numpy
import paddle.v2 as paddle

BATCH_SIZE = 100

scope = core.Scope()
place = core.CPUPlace()
# if you want to test GPU training, you can use gpu place
# place = core.GPUPlace(0)
dev_ctx = core.DeviceContext.create(place)

init_net = core.Net.create()
forward_net = core.Net.create()
backward_net = None
optimize_net = core.Net.create()


def atomic_id():
    id = 0
    while True:
        yield id
        id += 1


uniq_id = atomic_id().next


def data_layer(name, dims):
    var = scope.new_var(name)
Ejemplo n.º 13
0
 def test_uniform_random_cpu(self):
     self.uniform_random_test(place=core.CPUPlace())