Exemplo n.º 1
0
 def _get_places(self):
     if self.dtype == np.float16:
         if core.is_compiled_with_cuda() and core.op_support_gpu(
                 self.op_type):
             place = core.CUDAPlace(0)
             if core.is_float16_supported(place):
                 return [place]
         else:
             return []
     places = [fluid.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
         places.append(core.CUDAPlace(0))
     return places
Exemplo n.º 2
0
    def test_static(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            exe = fluid.Executor(p)
            shape = [4, 10, 16, 16]

            def compute(x_np, is_test, trainable_statistics):
                with program_guard(Program(), Program()):
                    bn = fluid.dygraph.BatchNorm(
                        shape[1],
                        is_test=is_test,
                        trainable_statistics=trainable_statistics)
                    x = fluid.data(name='x',
                                   shape=x_np.shape,
                                   dtype=x_np.dtype)
                    y = bn(x)
                    exe.run(fluid.default_startup_program())
                    r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
                return r

            x = np.random.randn(*shape).astype("float32")
            y1 = compute(x, False, False)
            y2 = compute(x, True, True)
            self.assertTrue(np.allclose(y1, y2))
Exemplo n.º 3
0
 def test_name(self):
     places = [fluid.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
         places.append(fluid.CUDAPlace(0))
     for p in places:
         with fluid.dygraph.guard(p):
             batch_norm1d = paddle.nn.BatchNorm1D(1, name="test")
Exemplo n.º 4
0
    def test_numerical_accuracy(self):
        paddle.disable_static()
        shapes = [(2, 6), (2, 6, 4), (2, 6, 4, 4), (2, 6, 6, 6, 2), (2, 6, 6, 6,
                                                                     2, 3)]
        np.random.seed(10)
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
            places.append(fluid.CUDAPlace(0))

        for place in places:
            for shape in shapes:
                scale = np.array([1]).astype("float32")
                bias = np.array([0]).astype("float32")
                data = np.random.random(shape).astype("float32")
                expect_res1 = group_norm_naive_for_general_dimension(
                    data, scale, bias, epsilon=1e-5, groups=6)
                expect_res2 = group_norm_naive_for_general_dimension(
                    data, scale, bias, epsilon=1e-5, groups=2)

                gn1 = paddle.nn.GroupNorm(num_channels=6, num_groups=6)
                gn2 = paddle.nn.GroupNorm(num_channels=6, num_groups=2)
                data_pd = paddle.to_tensor(data)
                result1 = gn1(data_pd).numpy()
                result2 = gn2(data_pd).numpy()
                self.assertTrue(np.allclose(result1, expect_res1, atol=1e-5))
                self.assertTrue(np.allclose(result2, expect_res2, atol=1e-5))
Exemplo n.º 5
0
    def test_static(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu(
                "instance_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            exe = fluid.Executor(p)
            shape = [4, 10, 16, 16]

            def compute_v1(x_np):
                with program_guard(Program(), Program()):
                    ins = fluid.dygraph.InstanceNorm(shape[1])
                    x = fluid.data(name='x',
                                   shape=x_np.shape,
                                   dtype=x_np.dtype)
                    y = ins(x)
                    exe.run(fluid.default_startup_program())
                    r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
                return r

            def compute_v2(x_np):
                with program_guard(Program(), Program()):
                    ins = paddle.nn.InstanceNorm2D(shape[1])
                    x = fluid.data(name='x',
                                   shape=x_np.shape,
                                   dtype=x_np.dtype)
                    y = ins(x)
                    exe.run(fluid.default_startup_program())
                    r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
                return r

            x = np.random.randn(*shape).astype("float32")
            y1 = compute_v1(x)
            y2 = compute_v2(x)
            self.assertTrue(np.allclose(y1, y2))
Exemplo n.º 6
0
    def test_dygraph(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            shape = [2, 2, 2, 2]

            def compute_v1(x):
                with fluid.dygraph.guard(p):
                    gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
                    y = gn(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v2(x):
                with fluid.dygraph.guard(p):
                    gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
                    y = gn(fluid.dygraph.to_variable(x))
                return y.numpy()

            def test_weight_bias_false():
                with fluid.dygraph.guard(p):
                    gn = paddle.nn.GroupNorm(
                        num_channels=2,
                        num_groups=2,
                        weight_attr=False,
                        bias_attr=False)

            x = np.random.randn(*shape).astype("float32")
            y1 = compute_v1(x)
            y2 = compute_v2(x)
            result = np.allclose(y1, y2, atol=1e-5)
            if not result:
                print("y1:", y1, "\ty2:", y2)
            self.assertTrue(result)
            test_weight_bias_false()
Exemplo n.º 7
0
    def test_static(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            exe = fluid.Executor(p)
            shape = [2, 6, 2, 2]

            def compute_v1(x_np):
                with program_guard(Program(), Program()):
                    gn = fluid.dygraph.GroupNorm(channels=6, groups=2)
                    x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
                    y = gn(x)
                    exe.run(fluid.default_startup_program())
                    r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
                return r

            def compute_v2(x_np):
                with program_guard(Program(), Program()):
                    gn = paddle.nn.GroupNorm(num_channels=6, num_groups=2)
                    x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype)
                    y = gn(x)
                    exe.run(fluid.default_startup_program())
                    r = exe.run(feed={'x': x_np}, fetch_list=[y])[0]
                return r

            x = np.random.randn(*shape).astype("float32")
            y1 = compute_v1(x)
            y2 = compute_v2(x)
            self.assertTrue(np.allclose(y1, y2, atol=1e-5))
Exemplo n.º 8
0
 def check_output_customized(self, checker):
     places = [core.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
         places.append(core.CUDAPlace(0))
     for place in places:
         outs = self.calc_output(place)
         outs = [np.array(out) for out in outs]
         checker(outs)
Exemplo n.º 9
0
 def check_output_customized(self, checker):
     places = [core.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
         places.append(core.CUDAPlace(0))
     for place in places:
         outs = self.calc_output(place)
         outs = [np.array(out) for out in outs]
         checker(outs)
Exemplo n.º 10
0
 def setUp(self):
     self.original_dtyep = paddle.get_default_dtype()
     # MIOPEN not support data type of double
     if core.is_compiled_with_rocm():
         paddle.set_default_dtype("float32")
     else:
         paddle.set_default_dtype("float64")
     self.places = [fluid.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
         self.places.append(fluid.CUDAPlace(0))
Exemplo n.º 11
0
    def test_check_output(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(core.CUDAPlace(0))

        for place in places:
            for data_format in ["NCHW", "NHWC"]:
                self.check_with_place(place, data_format, self.dtype,
                                      [2, 3, 4, 5])
                self.check_with_place(place, data_format, self.dtype, [2, 3])
Exemplo n.º 12
0
    def test_check_output(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(core.CUDAPlace(0))

        for place in places:
            for data_format in ["NCHW", "NHWC"]:
                self.check_with_place(place, data_format, self.dtype,
                                      [2, 3, 4, 5])
                self.check_with_place(place, data_format, self.dtype, [2, 3])
Exemplo n.º 13
0
 def _get_places(self):
     if self.dtype == np.float16:
         if core.is_compiled_with_cuda() and core.op_support_gpu(
                 self.op_type):
             place = core.CUDAPlace(0)
             if core.is_float16_supported(place):
                 return [place]
             else:
                 return []
         else:
             return []
     places = [fluid.CPUPlace()]
     cpu_only = self._cpu_only if hasattr(self, '_cpu_only') else False
     use_ngraph = bool(os.getenv("FLAGS_use_ngraph", False))
     if use_ngraph:
         cpu_only = True
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type)\
        and not cpu_only:
         places.append(core.CUDAPlace(0))
     return places
Exemplo n.º 14
0
    def test_dygraph(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            shape = [4, 10, 4, 4]

            def compute_v1(x, is_test, trainable_statistics):
                with fluid.dygraph.guard(p):
                    bn = fluid.dygraph.BatchNorm(
                        shape[1],
                        is_test=is_test,
                        trainable_statistics=trainable_statistics)
                    y = bn(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v2(x):
                with fluid.dygraph.guard(p):
                    bn = paddle.nn.BatchNorm2d(shape[1])
                    y = bn(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v3(x, is_test, trainable_statistics):
                with fluid.dygraph.guard(p):
                    bn = fluid.dygraph.BatchNorm(
                        shape[1],
                        is_test=is_test,
                        param_attr=fluid.ParamAttr(
                            initializer=fluid.initializer.Constant(1.0),
                            trainable=False),
                        bias_attr=fluid.ParamAttr(
                            initializer=fluid.initializer.Constant(0.0),
                            trainable=False),
                        trainable_statistics=trainable_statistics)
                    y = bn(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v4(x):
                with fluid.dygraph.guard(p):
                    bn = paddle.nn.BatchNorm2d(shape[1],
                                               weight_attr=False,
                                               bias_attr=False)
                    y = bn(fluid.dygraph.to_variable(x))
                return y.numpy()

            x = np.random.randn(*shape).astype("float32")
            y1 = compute_v1(x, False, False)
            y2 = compute_v2(x)
            y3 = compute_v3(x, False, False)
            y4 = compute_v4(x)
            self.assertTrue(np.allclose(y1, y2))
            self.assertTrue(np.allclose(y3, y4))
Exemplo n.º 15
0
    def test_dygraph(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            shape = [4, 10, 4, 4]

            def compute_v0(x):
                with fluid.dygraph.guard(p):
                    ln = fluid.dygraph.LayerNorm(shape[1:])
                    y = ln(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v1(x):
                with fluid.dygraph.guard(p):
                    x = fluid.dygraph.to_variable(x)
                    y = paddle.nn.functional.layer_norm(x, shape[1:])
                return y.numpy()

            def compute_v2(x):
                with fluid.dygraph.guard(p):
                    x = fluid.dygraph.to_variable(x)
                    y = paddle.nn.functional.layer_norm(x, tuple(shape[1:]))
                return y.numpy()

            def compute_v3(x):
                with fluid.dygraph.guard(p):
                    ln = fluid.dygraph.LayerNorm(shape[-1])
                    y = ln(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v4(x):
                with fluid.dygraph.guard(p):
                    x = fluid.dygraph.to_variable(x)
                    y = paddle.nn.functional.layer_norm(x, shape[-1])
                return y.numpy()

            x = np.random.randn(*shape).astype("float32")
            y0 = compute_v0(x)
            y1 = compute_v1(x)
            y2 = compute_v2(x)
            self.assertTrue(np.allclose(y0, y1))
            self.assertTrue(np.allclose(y0, y2))
            y3 = compute_v3(x)
            y4 = compute_v4(x)
            self.assertTrue(np.allclose(y3, y4))

            self.assertRaises(ValueError,
                              paddle.nn.functional.layer_norm,
                              x=x,
                              normalized_shape=1.0)
Exemplo n.º 16
0
    def test_error(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            #paddle.disable_static()
            x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32')
            x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')

            def error1d_dataformat():
                x_data_4 = np.random.random(size=(2, 1, 3,
                                                  3)).astype('float32')
                batch_norm1d = paddle.nn.BatchNorm1d(1, data_format='NCDHW')
                batch_norm1d(fluid.dygraph.to_variable(x_data_4))

            def error2d_dataformat():
                x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
                batch_norm2d = paddle.nn.BatchNorm2d(1, data_format='NCDHW')
                batch_norm2d(fluid.dygraph.to_variable(x_data_3))

            def error3d_dataformat():
                x_data_4 = np.random.random(size=(2, 1, 3,
                                                  3)).astype('float32')
                batch_norm3d = paddle.nn.BatchNorm3d(1, data_format='NCL')
                batch_norm3d(fluid.dygraph.to_variable(x_data_4))

            def error1d():
                x_data_4 = np.random.random(size=(2, 1, 3,
                                                  3)).astype('float32')
                batch_norm1d = paddle.nn.BatchNorm1d(1)
                batch_norm1d(fluid.dygraph.to_variable(x_data_4))

            def error2d():
                x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
                batch_norm2d = paddle.nn.BatchNorm2d(1)
                batch_norm2d(fluid.dygraph.to_variable(x_data_3))

            def error3d():
                x_data_4 = np.random.random(size=(2, 1, 3,
                                                  3)).astype('float32')
                batch_norm3d = paddle.nn.BatchNorm3d(1)
                batch_norm3d(fluid.dygraph.to_variable(x_data_4))

            with fluid.dygraph.guard(p):
                self.assertRaises(ValueError, error1d)
                self.assertRaises(ValueError, error2d)
                self.assertRaises(ValueError, error3d)
                self.assertRaises(ValueError, error1d_dataformat)
                self.assertRaises(ValueError, error2d_dataformat)
                self.assertRaises(ValueError, error3d_dataformat)
Exemplo n.º 17
0
 def check_grad(self,
                inputs_to_check,
                output_names,
                no_grad_set=None,
                numeric_grad_delta=0.005,
                in_place=False,
                max_relative_error=0.005,
                user_defined_grads=None):
     places = [core.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
         places.append(core.CUDAPlace(0))
     for place in places:
         self.check_grad_with_place(place, inputs_to_check, output_names,
                                    no_grad_set, numeric_grad_delta,
                                    in_place, max_relative_error,
                                    user_defined_grads)
Exemplo n.º 18
0
 def check_grad(self,
                inputs_to_check,
                output_names,
                no_grad_set=None,
                numeric_grad_delta=0.005,
                in_place=False,
                max_relative_error=0.005,
                user_defined_grads=None):
     places = [core.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
         places.append(core.CUDAPlace(0))
     for place in places:
         self.check_grad_with_place(place, inputs_to_check, output_names,
                                    no_grad_set, numeric_grad_delta,
                                    in_place, max_relative_error,
                                    user_defined_grads)
Exemplo n.º 19
0
    def test_dygraph(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("group_norm"):
            places.append(fluid.CUDAPlace(0))
        shapes = [[2, 2, 2, 2], [2, 2, 4], [4, 2], [4, 2, 6, 6, 2],
                  [2, 2, 2, 2, 2, 2]]
        for p in places:

            def compute_v1(x):
                with fluid.dygraph.guard(p):
                    gn = fluid.dygraph.GroupNorm(channels=2, groups=2)
                    y = gn(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v2(x):
                with fluid.dygraph.guard(p):
                    gn = paddle.nn.GroupNorm(num_channels=2, num_groups=2)
                    y = gn(fluid.dygraph.to_variable(x))
                return y.numpy()

            def test_weight_bias_false():
                with fluid.dygraph.guard(p):
                    gn = paddle.nn.GroupNorm(
                        num_channels=2,
                        num_groups=2,
                        weight_attr=False,
                        bias_attr=False)

            def test_nn_exception():
                with fluid.dygraph.guard(p):

                    def attr_data_format():
                        out = paddle.nn.GroupNorm(
                            num_groups=2, num_channels=2, data_format="NHWC")

                    self.assertRaises(ValueError, attr_data_format)

            for shape in shapes:
                x = np.random.randn(*shape).astype("float32")
                y1 = compute_v1(x)
                y2 = compute_v2(x)
                result = np.allclose(y1, y2, atol=1e-5)
                if not result:
                    print("y1:", y1, "\ty2:", y2)
                self.assertTrue(result)
                test_weight_bias_false()
                test_nn_exception()
Exemplo n.º 20
0
    def test_dygraph(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            shape = [4, 10, 4, 4]

            def compute(x, is_test, trainable_statistics):
                with fluid.dygraph.guard(p):
                    bn = fluid.dygraph.BatchNorm(
                        shape[1],
                        is_test=is_test,
                        trainable_statistics=trainable_statistics)
                    y = bn(fluid.dygraph.to_variable(x))
                return y.numpy()

            x = np.random.randn(*shape).astype("float32")
            y1 = compute(x, False, False)
            y2 = compute(x, True, True)
            self.assertTrue(np.allclose(y1, y2))
Exemplo n.º 21
0
    def test_dygraph(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:
            shape = [4, 10, 4, 4]

            def compute_v1(x):
                with fluid.dygraph.guard(p):
                    ln = fluid.dygraph.LayerNorm(shape[1:])
                    y = ln(fluid.dygraph.to_variable(x))
                return y.numpy()

            def compute_v2(x):
                with fluid.dygraph.guard(p):
                    ln = paddle.nn.LayerNorm(shape[1:])
                    y = ln(fluid.dygraph.to_variable(x))
                return y.numpy()

            x = np.random.randn(*shape).astype("float32")
            y1 = compute_v1(x)
            y2 = compute_v2(x)
            self.assertTrue(np.allclose(y1, y2))
Exemplo n.º 22
0
    def test_error(self):
        places = [fluid.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu(
                "instance_norm"):
            places.append(fluid.CUDAPlace(0))
        for p in places:

            def error1d():
                x_data_4 = np.random.random(size=(2, 1, 3,
                                                  3)).astype('float32')
                instance_norm1d = paddle.nn.InstanceNorm1D(1)
                instance_norm1d(fluid.dygraph.to_variable(x_data_4))

            def error2d():
                x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32')
                instance_norm2d = paddle.nn.InstanceNorm2D(1)
                instance_norm2d(fluid.dygraph.to_variable(x_data_3))

            def error3d():
                x_data_4 = np.random.random(size=(2, 1, 3,
                                                  3)).astype('float32')
                instance_norm3d = paddle.nn.InstanceNorm3D(1)
                instance_norm3d(fluid.dygraph.to_variable(x_data_4))

            def weight_bias_false():
                x_data_4 = np.random.random(size=(2, 1, 3,
                                                  3)).astype('float32')
                instance_norm3d = paddle.nn.InstanceNorm3D(1,
                                                           weight_attr=False,
                                                           bias_attr=False)

            with fluid.dygraph.guard(p):
                weight_bias_false()
                self.assertRaises(ValueError, error1d)
                self.assertRaises(ValueError, error2d)
                self.assertRaises(ValueError, error3d)
Exemplo n.º 23
0
    def test_forward_backward(self):
        def test_with_place(place, data_layout, shape):
            # attr
            epsilon = 0.00001
            momentum = 0.9
            if data_layout == "NCHW":
                n, c, h, w = shape[0], shape[1], shape[2], shape[3]
            else:
                n, h, w, c = shape[0], shape[1], shape[2], shape[3]
            scale_shape = [c]

            np.random.seed(123)
            x = np.random.random_sample(shape).astype(np.float32)
            scale = np.random.random_sample(scale_shape).astype(np.float32)
            bias = np.random.random_sample(scale_shape).astype(np.float32)
            mean = np.zeros(scale_shape).astype(np.float32)
            variance = np.ones(scale_shape).astype(np.float32)

            y_grad = np.random.random_sample(shape).astype(np.float32)

            y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward(
                x, y_grad, scale, bias, mean, variance, epsilon, momentum,
                shape, data_layout)

            var_dict = locals()
            var_dict['y@GRAD'] = y_grad

            var_names = [
                'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean',
                'saved_variance'
            ]
            ground_truth = {name: var_dict[name] for name in var_names}

            program = fluid.Program()
            with fluid.program_guard(program):
                block = program.global_block()
                for name in ground_truth:
                    block.create_var(name=name,
                                     dtype='float32',
                                     shape=ground_truth[name].shape)
                bn_op = block.append_op(
                    type="batch_norm",
                    inputs={
                        "X": block.var('x'),
                        "Scale": block.var('scale'),
                        "Bias": block.var('bias'),
                        "Mean": block.var('mean'),
                        "Variance": block.var('variance')
                    },
                    outputs={
                        "Y": block.var('y'),
                        "MeanOut": block.var('mean'),  # share the same memory
                        "VarianceOut":
                        block.var('variance'),  # share the same memory
                        "SavedMean": block.var('saved_mean'),
                        "SavedVariance": block.var('saved_variance')
                    },
                    attrs={
                        "momentum": momentum,
                        "epsilon": epsilon,
                        "is_test": False,
                        "data_layout": data_layout,
                        "use_mkldnn": self.use_mkldnn
                    })
                block.create_var(name='y@GRAD', dtype='float32', shape=y.shape)

                # generate backward op_desc
                grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
                    bn_op.desc, set(), [])
                grad_op_desc = grad_op_desc_list[0]
                new_op_desc = block.desc.append_op()
                new_op_desc.copy_from(grad_op_desc)
                for var_name in grad_op_desc.output_arg_names():
                    block.desc.var(var_name.encode("ascii"))
                grad_op_desc.infer_var_type(block.desc)
                grad_op_desc.infer_shape(block.desc)
                for arg in grad_op_desc.output_arg_names():
                    grad_var = block.desc.find_var(arg.encode("ascii"))
                    grad_var.set_dtype(core.VarDesc.VarType.FP32)

                exe = fluid.Executor(place)
                out = exe.run(
                    program,
                    feed={
                        name: var_dict[name]
                        for name in
                        ['x', 'scale', 'bias', 'mean', 'variance', 'y@GRAD']
                    },
                    fetch_list=[
                        'y', 'mean', 'variance', 'saved_mean',
                        'saved_variance', 'x@GRAD', 'scale@GRAD', 'bias@GRAD'
                    ])

            self.__assert_close(y, out[0], "y")
            self.__assert_close(mean_out, out[1], "mean")
            self.__assert_close(variance_out, out[2], "variance", 1e-3)
            self.__assert_close(saved_mean, out[3], "saved_mean")
            self.__assert_close(saved_variance, out[4], "saved_variance", 1e-3)
            self.__assert_close(x_grad, out[5], "x_grad")
            self.__assert_close(scale_grad, out[6], "scale_grad")
            self.__assert_close(bias_grad, out[7], "bias_grad")

            print "op test forward passed: ", str(place), data_layout

        places = [core.CPUPlace()]

        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(core.CUDAPlace(0))

        for place in places:
            for data_format in self.data_formats:
                test_with_place(place, data_format, [2, 3, 4, 5])
Exemplo n.º 24
0
    def check_forward_backward(self, shape, begin_norm_axis):
        def test_with_place(place, shape, begin_norm_axis):
            # attr
            epsilon = 0.00001
            x_shape = shape
            D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)
            scale_shape = [D]

            np.random.seed(123)
            x = np.random.random_sample(x_shape).astype(np.float32)
            scale = np.random.random_sample(scale_shape).astype(np.float32)
            bias = np.random.random_sample(scale_shape).astype(np.float32)
            y_grad = np.random.random_sample(x_shape).astype(np.float32)

            # reference forward & backward
            y, mean, variance = _reference_layer_norm_naive(
                x, scale, bias, epsilon, begin_norm_axis)
            x_grad, scale_grad, bias_grad = _reference_layer_norm_grad(
                x, y_grad, scale, mean, variance, begin_norm_axis)

            var_dict = locals()
            var_dict['y@GRAD'] = y_grad
            var_names = [
                'x', 'scale', 'bias', 'mean', 'variance', 'y', 'y@GRAD'
            ]
            ground_truth = {name: var_dict[name] for name in var_names}

            program = fluid.Program()
            with fluid.program_guard(program):
                block = program.global_block()
                for name in ground_truth:
                    block.create_var(
                        name=name,
                        dtype='float32',
                        shape=ground_truth[name].shape)
                layer_norm_op = block.append_op(
                    type="layer_norm",
                    inputs={
                        "X": block.var('x'),
                        "Scale": block.var('scale'),
                        "Bias": block.var('bias'),
                    },
                    outputs={
                        "Y": block.var('y'),
                        "Mean": block.var('mean'),  # share the same memory
                        "Variance":
                        block.var('variance'),  # share the same memory
                    },
                    attrs={
                        "epsilon": epsilon,
                        "begin_norm_axis": begin_norm_axis
                    })

                # generate backward op_desc
                grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
                    layer_norm_op.desc, set(), [])
                grad_op_desc = grad_op_desc_list[0]
                new_op_desc = block.desc.append_op()
                new_op_desc.copy_from(grad_op_desc)
                for var_name in grad_op_desc.output_arg_names():
                    block.desc.var(var_name.encode("ascii"))
                grad_op_desc.infer_var_type(block.desc)
                grad_op_desc.infer_shape(block.desc)
                for arg in grad_op_desc.output_arg_names():
                    grad_var = block.desc.find_var(arg.encode("ascii"))
                    grad_var.set_dtype(core.VarDesc.VarType.FP32)

                exe = fluid.Executor(place)
                out = exe.run(program,
                              feed={
                                  name: var_dict[name]
                                  for name in ['x', 'scale', 'bias', 'y@GRAD']
                              },
                              fetch_list=[
                                  'y', 'mean', 'variance', 'x@GRAD',
                                  'scale@GRAD', 'bias@GRAD'
                              ])
                self.__assert_close(y, out[0], "y")
                self.__assert_close(mean, out[1], "mean")
                self.__assert_close(variance, out[2], "variance", 1e-3)
                self.__assert_close(x_grad, out[3], "x_grad")
                self.__assert_close(scale_grad, out[4], "scale_grad", 1e-3)
                self.__assert_close(bias_grad, out[5], "bias_grad")

        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu("layer_norm"):
            places.append(core.CUDAPlace(0))

        for place in places:
            test_with_place(place, shape, begin_norm_axis)
Exemplo n.º 25
0
            'dropout_prob': 0.0,
        }
        self.outputs = {
            'Out': self.inputs['X'],
            'Mask': np.ones((32, 64)).astype('uint8')
        }

    def test_check_output(self):
        self.check_output()

    def test_check_grad_normal(self):
        self.check_grad(['X'], 'Out', max_relative_error=0.05)


@unittest.skipIf(
    not core.is_compiled_with_cuda() or not core.op_support_gpu("dropout"),
    "core is not compiled with CUDA or core is not support dropout")
@skip_check_grad_ci(reason="For inference, check_grad is not required.")
class TestFP16DropoutOp(OpTest):
    def setUp(self):
        self.op_type = "dropout"
        self.init_test_case()

        x = np.random.random(self.input_size).astype("float16")
        out = x * (1.0 - self.prob)
        self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
        self.attrs = {
            'dropout_prob': self.prob,
            'fix_seed': self.fix_seed,
            'is_test': True
        }
Exemplo n.º 26
0
 def check_output(self, atol=1e-5):
     places = [core.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
         places.append(core.CUDAPlace(0))
     for place in places:
         self.check_output_with_place(place, atol)
Exemplo n.º 27
0
 def test_case(self):
     self.assertEqual(core.is_compiled_with_cuda(),
                      core.op_support_gpu("sum"))
Exemplo n.º 28
0
    def test_forward_backward(self):
        def test_with_place(place, data_layout, shape):
            # attr
            epsilon = 0.00001
            momentum = 0.9
            if data_layout == "NCHW":
                n, c, h, w = shape[0], shape[1], shape[2], shape[3]
            else:
                n, h, w, c = shape[0], shape[1], shape[2], shape[3]
            scale_shape = [c]

            np.random.seed(123)
            x = np.random.random_sample(shape).astype(np.float32)
            scale = np.random.random_sample(scale_shape).astype(np.float32)
            bias = np.random.random_sample(scale_shape).astype(np.float32)
            mean = np.zeros(scale_shape).astype(np.float32)
            variance = np.ones(scale_shape).astype(np.float32)

            y_grad = np.random.random_sample(shape).astype(np.float32)

            y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward(
                x, y_grad, scale, bias, mean, variance, epsilon, momentum,
                shape, data_layout)

            var_dict = locals()
            var_dict['y@GRAD'] = y_grad

            var_names = [
                'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean',
                'saved_variance'
            ]
            ground_truth = {name: var_dict[name] for name in var_names}

            program = fluid.Program()
            with fluid.program_guard(program):
                block = program.global_block()
                for name in ground_truth:
                    block.create_var(
                        name=name,
                        dtype='float32',
                        shape=ground_truth[name].shape)
                bn_op = block.append_op(
                    type="batch_norm",
                    inputs={
                        "X": block.var('x'),
                        "Scale": block.var('scale'),
                        "Bias": block.var('bias'),
                        "Mean": block.var('mean'),
                        "Variance": block.var('variance')
                    },
                    outputs={
                        "Y": block.var('y'),
                        "MeanOut": block.var('mean'),  # share the same memory
                        "VarianceOut":
                        block.var('variance'),  # share the same memory
                        "SavedMean": block.var('saved_mean'),
                        "SavedVariance": block.var('saved_variance')
                    },
                    attrs={
                        "momentum": momentum,
                        "epsilon": epsilon,
                        "is_test": False,
                        "data_layout": data_layout,
                        "use_mkldnn": self.use_mkldnn
                    })
                block.create_var(name='y@GRAD', dtype='float32', shape=y.shape)

                # generate backward op_desc
                grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
                    bn_op.desc, set(), [])
                grad_op_desc = grad_op_desc_list[0]
                new_op_desc = block.desc.append_op()
                new_op_desc.copy_from(grad_op_desc)
                for var_name in grad_op_desc.output_arg_names():
                    block.desc.var(var_name.encode("ascii"))
                grad_op_desc.infer_var_type(block.desc)
                grad_op_desc.infer_shape(block.desc)
                for arg in grad_op_desc.output_arg_names():
                    grad_var = block.desc.find_var(arg.encode("ascii"))
                    grad_var.set_dtype(core.VarDesc.VarType.FP32)

                exe = fluid.Executor(place)
                out = exe.run(
                    program,
                    feed={
                        name: var_dict[name]
                        for name in
                        ['x', 'scale', 'bias', 'mean', 'variance', 'y@GRAD']
                    },
                    fetch_list=[
                        'y', 'mean', 'variance', 'saved_mean', 'saved_variance',
                        'x@GRAD', 'scale@GRAD', 'bias@GRAD'
                    ])

            self.__assert_close(y, out[0], "y")
            self.__assert_close(mean_out, out[1], "mean")
            self.__assert_close(variance_out, out[2], "variance", 1e-3)
            self.__assert_close(saved_mean, out[3], "saved_mean")
            self.__assert_close(saved_variance, out[4], "saved_variance", 1e-3)
            self.__assert_close(x_grad, out[5], "x_grad")
            self.__assert_close(scale_grad, out[6], "scale_grad")
            self.__assert_close(bias_grad, out[7], "bias_grad")

            print "op test forward passed: ", str(place), data_layout

        places = [core.CPUPlace()]

        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(core.CUDAPlace(0))

        for place in places:
            for data_format in self.data_formats:
                test_with_place(place, data_format, [2, 3, 4, 5])
    def check_forward_backward(self):
        def test_with_place(place):
            out_grad = np.random.random_sample(self.x.shape).astype(np.float32)
            x_grad = out_grad
            sum_axis = range(0, len(self.x.shape))
            del sum_axis[self.axis]
            y_grad = np.sum(out_grad, axis=tuple(sum_axis))

            var_dict = locals()
            var_dict['y'] = self.y
            var_dict['x'] = self.x
            var_dict['out'] = self.out
            var_dict['y@GRAD'] = y_grad
            var_dict['x@GRAD'] = x_grad
            var_dict['out@GRAD'] = out_grad

            var_names = ['x', 'y', 'out', 'y@GRAD', 'x@GRAD', 'out@GRAD']
            ground_truth = {name: var_dict[name] for name in var_names}

            program = fluid.Program()
            with fluid.program_guard(program):
                block = program.global_block()
                for name in ground_truth:
                    block.create_var(
                        name=name,
                        dtype='float32',
                        shape=ground_truth[name].shape)
                elementwise_add_op = block.append_op(
                    type="elementwise_add",
                    inputs={
                        "X": block.var('x'),
                        "Y": block.var('y'),
                    },
                    outputs={"Out": block.var('out'), },
                    attrs={"axis": self.axis, })

                # generate backward op_desc
                grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
                    elementwise_add_op.desc, set(), [])
                grad_op_desc = grad_op_desc_list[0]
                new_op_desc = block.desc.append_op()
                new_op_desc.copy_from(grad_op_desc)
                for var_name in grad_op_desc.output_arg_names():
                    block.desc.var(var_name.encode("ascii"))
                grad_op_desc.infer_var_type(block.desc)
                grad_op_desc.infer_shape(block.desc)
                for arg in grad_op_desc.output_arg_names():
                    grad_var = block.desc.find_var(arg.encode("ascii"))
                    grad_var.set_dtype(core.VarDesc.VarType.FP32)

                exe = fluid.Executor(place)
                out = exe.run(program,
                              feed={
                                  name: var_dict[name]
                                  for name in ['x', 'y', 'out@GRAD']
                              },
                              fetch_list=['x@GRAD', 'y@GRAD'])
                self.__assert_close(x_grad, out[0], "x@GRAD")
                self.__assert_close(y_grad, out[1], "y@GRAD", atol=1.4)

        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu(
                "elementwise_add"):
            places.append(core.CUDAPlace(0))

        for place in places:
            test_with_place(place)
Exemplo n.º 30
0
 def setUp(self):
     self.original_dtyep = paddle.get_default_dtype()
     paddle.set_default_dtype("float64")
     self.places = [fluid.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
         self.places.append(fluid.CUDAPlace(0))
Exemplo n.º 31
0
 def setUp(self):
     self.places = [fluid.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
         self.places.append(fluid.CUDAPlace(0))
     self.init_test()
Exemplo n.º 32
0
 def test_case(self):
     self.assertEqual(core.is_compiled_with_cuda(),
                      core.op_support_gpu("sum"))
Exemplo n.º 33
0
 def init_test_case(self):
     self.epsilon = 1e-5
     self.places = [core.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(
             "instance_norm"):
         self.places.append(core.CUDAPlace(0))
Exemplo n.º 34
0
    def test_forward_backward(self):
        def test_with_place(place, shape):
            epsilon = self.epsilon
            n, c, h, w = shape[0], shape[1], shape[2], shape[3]
            scale_shape = [c]
            mean_shape = [n * c]

            np.random.seed()
            x = np.random.random_sample(shape).astype(np.float32)
            scale = np.random.random_sample(scale_shape).astype(np.float32)
            bias = np.random.random_sample(scale_shape).astype(np.float32)
            mean, variance = self.set_global_mean_var(mean_shape, x)
            d_y = np.random.random_sample(shape).astype(np.float32)

            y, saved_mean, variance_tmp = _reference_instance_norm_naive(
                x, scale, bias, epsilon, mean, variance)

            saved_variance = 1 / np.sqrt(variance_tmp + epsilon)

            d_x, d_scale, d_bias = _reference_instance_norm_grad(
                x, d_y, scale, saved_mean, saved_variance, epsilon)

            var_dict = locals()
            var_dict['y@GRAD'] = d_y
            var_dict['x@GRAD'] = d_x
            var_dict['scale@GRAD'] = d_scale
            var_dict['bias@GRAD'] = d_bias

            var_names = [
                'x', 'scale', 'bias', 'y', 'saved_mean', 'saved_variance'
            ]
            ground_truth = {name: var_dict[name] for name in var_names}

            program = fluid.Program()
            with fluid.program_guard(program):
                block = program.global_block()
                for name in ground_truth:
                    block.create_var(
                        name=name,
                        dtype='float32',
                        shape=ground_truth[name].shape)
                in_op = block.append_op(
                    type="instance_norm",
                    inputs={
                        "X": block.var("x"),
                        "Scale": block.var("scale"),
                        "Bias": block.var("bias"),
                    },
                    outputs={
                        "Y": block.var("y"),
                        "SavedMean": block.var("saved_mean"),
                        "SavedVariance": block.var("saved_variance")
                    },
                    attrs={"epsilon": epsilon, })

                block.create_var(name="y@GRAD", dtype='float32', shape=y.shape)

                grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
                    in_op.desc, self.no_grad_set, [])
                grad_op_desc = grad_op_desc_list[0]
                new_op_desc = block.desc.append_op()
                new_op_desc.copy_from(grad_op_desc)
                for var_name in grad_op_desc.output_arg_names():
                    block.desc.var(var_name.encode("ascii"))
                grad_op_desc.infer_var_type(block.desc)
                grad_op_desc.infer_shape(block.desc)
                for arg in grad_op_desc.output_arg_names():
                    grad_var = block.desc.find_var(arg.encode("ascii"))
                    grad_var.set_dtype(core.VarDesc.VarType.FP32)

                program._sync_with_cpp()

                exe = fluid.Executor(place)
                out = exe.run(program,
                              feed={
                                  name: var_dict[name]
                                  for name in ['x', 'scale', 'bias', 'y@GRAD']
                              },
                              fetch_list=self.fetch_list)

            for id, name in enumerate(self.fetch_list):
                self.__assert_close(var_dict[name], out[id], name)
            print("op test forward passes: ", str(place))

        places = [core.CPUPlace()]

        if core.is_compiled_with_cuda() and core.op_support_gpu(
                "instance_norm"):
            places.append(core.CUDAPlace(0))
        for place in places:
            test_with_place(place, self.shape)
    def check_forward_backward(self, shape, begin_norm_axis):
        def test_with_place(place, shape, begin_norm_axis):
            # attr
            epsilon = 0.00001
            x_shape = shape
            D = reduce(mul, x_shape[begin_norm_axis:len(x_shape)], 1)
            scale_shape = [D]

            np.random.seed(123)
            x = np.random.random_sample(x_shape).astype(np.float32)
            scale = np.random.random_sample(scale_shape).astype(np.float32)
            bias = np.random.random_sample(scale_shape).astype(np.float32)
            y_grad = np.random.random_sample(x_shape).astype(np.float32)

            # reference forward & backward
            y, mean, variance = _reference_layer_norm_naive(
                x, scale, bias, epsilon, begin_norm_axis)
            x_grad, scale_grad, bias_grad = _reference_layer_norm_grad(
                x, y_grad, scale, mean, variance, begin_norm_axis)

            var_dict = locals()
            var_dict['y@GRAD'] = y_grad
            var_names = [
                'x', 'scale', 'bias', 'mean', 'variance', 'y', 'y@GRAD'
            ]
            ground_truth = {name: var_dict[name] for name in var_names}

            program = fluid.Program()
            with fluid.program_guard(program):
                block = program.global_block()
                for name in ground_truth:
                    block.create_var(name=name,
                                     dtype='float32',
                                     shape=ground_truth[name].shape)
                layer_norm_op = block.append_op(
                    type="layer_norm",
                    inputs={
                        "X": block.var('x'),
                        "Scale": block.var('scale'),
                        "Bias": block.var('bias'),
                    },
                    outputs={
                        "Y": block.var('y'),
                        "Mean": block.var('mean'),  # share the same memory
                        "Variance":
                        block.var('variance'),  # share the same memory
                    },
                    attrs={
                        "epsilon": epsilon,
                        "begin_norm_axis": begin_norm_axis
                    })

                # generate backward op_desc
                grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
                    layer_norm_op.desc, set(), [])
                grad_op_desc = grad_op_desc_list[0]
                new_op_desc = block.desc.append_op()
                new_op_desc.copy_from(grad_op_desc)
                for var_name in grad_op_desc.output_arg_names():
                    block.desc.var(var_name.encode("ascii"))
                grad_op_desc.infer_var_type(block.desc)
                grad_op_desc.infer_shape(block.desc)
                for arg in grad_op_desc.output_arg_names():
                    grad_var = block.desc.find_var(arg.encode("ascii"))
                    grad_var.set_dtype(core.VarDesc.VarType.FP32)

                exe = fluid.Executor(place)
                out = exe.run(program,
                              feed={
                                  name: var_dict[name]
                                  for name in ['x', 'scale', 'bias', 'y@GRAD']
                              },
                              fetch_list=[
                                  'y', 'mean', 'variance', 'x@GRAD',
                                  'scale@GRAD', 'bias@GRAD'
                              ])
                self.__assert_close(y, out[0], "y")
                self.__assert_close(mean, out[1], "mean")
                self.__assert_close(variance, out[2], "variance", 1e-3)
                self.__assert_close(x_grad, out[3], "x_grad")
                self.__assert_close(scale_grad, out[4], "scale_grad", 1e-3)
                self.__assert_close(bias_grad, out[5], "bias_grad")

        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda() and core.op_support_gpu(
                "layer_norm") and self.use_cudnn:
            places.append(core.CUDAPlace(0))

        for place in places:
            test_with_place(place, shape, begin_norm_axis)
Exemplo n.º 36
0
    def test_forward_backward(self):
        def test_with_place(place, data_layout, shape):
            # attr
            epsilon = self.epsilon
            momentum = self.momentum
            if data_layout == "NCHW":
                n, c, h, w = shape[0], shape[1], shape[2], shape[3]
            else:
                n, h, w, c = shape[0], shape[1], shape[2], shape[3]
            scale_shape = [c]

            np.random.seed(123)
            x = np.random.random_sample(shape).astype(np.float32)
            scale = np.random.random_sample(scale_shape).astype(np.float32)
            bias = np.random.random_sample(scale_shape).astype(np.float32)
            mean, variance = self.set_mean_variance(scale_shape, x, data_layout)
            y_grad = np.random.random_sample(shape).astype(np.float32)
            momentum_var = np.array([momentum]).astype(np.float32)

            y, mean_out, variance_out, saved_mean, saved_variance, x_grad, scale_grad, bias_grad = self.ref_forward_backward(
                x, y_grad, scale, bias, mean, variance, epsilon, momentum,
                shape, data_layout)

            var_dict = locals()
            var_dict['y@GRAD'] = y_grad
            var_dict['x@GRAD'] = x_grad
            var_dict['scale@GRAD'] = scale_grad
            var_dict['bias@GRAD'] = bias_grad

            var_names = [
                'x', 'scale', 'bias', 'mean', 'variance', 'y', 'saved_mean',
                'saved_variance', 'momentum_var'
            ]
            ground_truth = {name: var_dict[name] for name in var_names}

            program = fluid.Program()
            with fluid.program_guard(program):
                block = program.global_block()
                for name in ground_truth:
                    block.create_var(
                        name=name,
                        dtype='float32',
                        shape=ground_truth[name].shape)
                inputs = {
                    "X": block.var('x'),
                    "Scale": block.var('scale'),
                    "Bias": block.var('bias'),
                    "Mean": block.var('mean'),
                    "Variance": block.var('variance')
                }
                attrs = {
                    "epsilon": epsilon,
                    "is_test": False,
                    "data_layout": data_layout,
                    "use_mkldnn": self.use_mkldnn,
                    "fuse_with_relu": self.fuse_with_relu,
                    "use_global_stats": self.use_global_stats
                }
                if self.use_momentum_variable:
                    inputs['MomentumTensor'] = block.var('momentum_var')
                else:
                    attrs['momentum'] = momentum

                outputs = {
                    "Y": block.var('y'),
                    "MeanOut": block.var('mean'),  # share memory
                    "VarianceOut": block.var('variance'),  # share memory
                    "SavedMean": block.var('saved_mean'),
                    "SavedVariance": block.var('saved_variance')
                }
                block.create_var(name="reserve_space", dtype='float32')
                outputs["ReserveSpace"] = block.var('reserve_space')
                bn_op = block.append_op(
                    type="batch_norm",
                    inputs=inputs,
                    outputs=outputs,
                    attrs=attrs)
                block.create_var(name='y@GRAD', dtype='float32', shape=y.shape)

                # generate backward op_desc
                grad_op_desc_list, op_grad_to_var = core.get_grad_op_desc(
                    bn_op.desc, self.no_grad_set, [])
                grad_op_desc = grad_op_desc_list[0]
                new_op_desc = block.desc.append_op()
                new_op_desc.copy_from(grad_op_desc)
                for var_name in grad_op_desc.output_arg_names():
                    block.desc.var(var_name.encode("ascii"))
                grad_op_desc.infer_var_type(block.desc)
                grad_op_desc.infer_shape(block.desc)
                for arg in grad_op_desc.output_arg_names():
                    grad_var = block.desc.find_var(arg.encode("ascii"))
                    grad_var.set_dtype(core.VarDesc.VarType.FP32)

                program._sync_with_cpp()

                exe = fluid.Executor(place)
                out = exe.run(program,
                              feed={
                                  name: var_dict[name]
                                  for name in [
                                      'x', 'scale', 'bias', 'mean', 'variance',
                                      'y@GRAD', 'momentum_var'
                                  ]
                              },
                              fetch_list=self.fetch_list)

            for id, name in enumerate(self.fetch_list):
                if name == 'variance':
                    self.__assert_close(
                        var_dict[name], out[id], name, atol=1e-3)
                    continue
                self.__assert_close(var_dict[name], out[id], name)
            print("op test forward passed: ", str(place), data_layout)

        places = [core.CPUPlace()]

        if core.is_compiled_with_cuda() and core.op_support_gpu("batch_norm"):
            places.append(core.CUDAPlace(0))

        for place in places:
            for data_format in self.data_formats:
                test_with_place(place, data_format, [2, 3, 4, 5])
Exemplo n.º 37
0
 def check_output(self, atol=1e-5):
     places = [core.CPUPlace()]
     if core.is_compiled_with_cuda() and core.op_support_gpu(self.op_type):
         places.append(core.CUDAPlace(0))
     for place in places:
         self.check_output_with_place(place, atol)