Пример #1
0
    def func(self, place):
        shape = [2, 4, 3, 3]
        eps = 0.005
        dtype = np.float64
        if core.is_compiled_with_rocm():
            dtype = np.float32
        x = layers.data('x', shape, False, dtype)
        y = layers.conv2d_transpose(
            x, 2, filter_size=1, groups=1, bias_attr=False)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
        if core.is_compiled_with_rocm():
            # HIP will sometimes fail if no atol
            gradient_checker.double_grad_check(
                [x] + w,
                y,
                x_init=[x_arr] + w_arr,
                place=place,
                eps=eps,
                atol=1e-4)
        else:
            gradient_checker.double_grad_check(
                [x] + w, y, x_init=[x_arr] + w_arr, place=place, eps=eps)
        gradient_checker.double_grad_check_for_dygraph(
            self.conv_transpose_wrapper, [x] + w,
            y,
            x_init=[x_arr] + w_arr,
            place=place)
Пример #2
0
    def func(self, place):
        shape = [2, 3, 3, 2]
        eps = 0.005
        dtype = np.float64
        if core.is_compiled_with_rocm():
            dtype = np.float32
        x = layers.data('x', shape, False, dtype)
        y = layers.conv2d_transpose(input=x,
                                    num_filters=2,
                                    filter_size=1,
                                    padding=[1, 1],
                                    bias_attr=False,
                                    use_cudnn=True,
                                    groups=1,
                                    data_format="NHWC")
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
        if core.is_compiled_with_rocm():
            # HIP will sometimes fail if no atol
            gradient_checker.double_grad_check([x] + w,
                                               y,
                                               x_init=[x_arr] + w_arr,
                                               place=place,
                                               eps=eps,
                                               atol=1e-4)
        else:
            gradient_checker.double_grad_check([x] + w,
                                               y,
                                               x_init=[x_arr] + w_arr,
                                               place=place,
                                               eps=eps)
    def net(self, input):
        c1, c2, c3, c4, c5 = self.backbone(input)
        channels = [64, 128, 256, 512]
        x = ConvUp(c5, c4, channels[2], name='up5')
        x = ConvUp(x, c3, channels[1], name='up6')
        x = ConvUp(x, c2, channels[0], name='up7')
        x = ConvUp(x, c1, channels[0], name='up8')
        x = FL.conv2d_transpose(x, num_filters=self.out_channels, filter_size=2, stride=2)

        return x
Пример #4
0
 def __call__(self, input):
     return layers.conv2d_transpose(
         input=input,
         num_filters=num_filters,
         output_size=output_size,
         filter_size=filter_size,
         padding=padding,
         stride=stride,
         dilation=dilation,
         groups=groups,
         param_attr=self.attr_holder.param_attr,
         bias_attr=self.attr_holder.bias_attr,
         use_cudnn=use_cudnn,
         act=act)
Пример #5
0
    def func(self, place):
        shape = [2, 4, 3, 3]
        eps = 0.005
        dtype = np.float64
        x = layers.data('x', shape, False, dtype)
        y = layers.conv2d_transpose(x,
                                    2,
                                    filter_size=1,
                                    groups=1,
                                    bias_attr=False)
        x_arr = np.random.uniform(-1, 1, shape).astype(dtype)

        w = fluid.default_main_program().global_block().all_parameters()
        w_arr = []
        for p in w:
            w_arr.append(np.random.uniform(-1, 1, p.shape).astype(dtype))
        gradient_checker.double_grad_check([x] + w,
                                           y,
                                           x_init=[x_arr] + w_arr,
                                           place=place,
                                           eps=eps)
Пример #6
0
 def test_conv2d_transpose(self):
     program = Program()
     with program_guard(program):
         img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
         layers.conv2d_transpose(input=img, num_filters=10, output_size=28)
     print(str(program))
def ConvUp(x1, x2, out_channels, name=None):
    x1 = FL.conv2d_transpose(x1, num_filters=x1.shape[1] // 2, filter_size=2, stride=2)
    x = FL.concat([x1,x2], axis=1)
    x = DoubleConv_up(x, out_channels, name=name+"_doubleconv")
    return x
Пример #8
0
 def test_conv2d_transpose(self):
     program = Program()
     with program_guard(program):
         img = layers.data(name='pixel', shape=[3, 2, 2], dtype='float32')
         layers.conv2d_transpose(input=img, num_filters=10, output_size=28)
     print(str(program))