def test_static_graph_functional(self): for use_cuda in ([False, True] if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.enable_static() x_1 = paddle.fluid.data(name="x", shape=[2, 9, 4, 4], dtype="float64") x_2 = paddle.fluid.data(name="x2", shape=[2, 4, 4, 9], dtype="float64") out_1 = F.pixel_shuffle(x_1, 3) out_2 = F.pixel_shuffle(x_2, 3, "NHWC") exe = paddle.static.Executor(place=place) res_1 = exe.run(fluid.default_main_program(), feed={"x": self.x_1_np}, fetch_list=out_1, use_prune=True) res_2 = exe.run(fluid.default_main_program(), feed={"x2": self.x_2_np}, fetch_list=out_2, use_prune=True) assert np.allclose(res_1, self.out_1_np) assert np.allclose(res_2, self.out_2_np)
def run_dygraph(self, up_factor, data_format): n, c, h, w = 2, 9, 4, 4 if data_format == "NCHW": shape = [n, c, h, w] if data_format == "NHWC": shape = [n, h, w, c] x = np.random.random(shape).astype("float64") npresult = pixel_shuffle_np(x, up_factor, data_format) for use_cuda in ([False, True] if core.is_compiled_with_cuda() else [False]): place = paddle.CUDAPlace(0) if use_cuda else paddle.CPUPlace() paddle.disable_static(place=place) pixel_shuffle = paddle.nn.PixelShuffle(up_factor, data_format=data_format) result = pixel_shuffle(paddle.to_tensor(x)) self.assertTrue(np.allclose(result.numpy(), npresult)) result_functional = F.pixel_shuffle(paddle.to_tensor(x), 3, data_format) self.assertTrue(np.allclose(result_functional.numpy(), npresult))
def forward(self, x): return F.pixel_shuffle(x, self.upscale_factor)
def error_data_format(): with paddle.fluid.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3, "WOW")
def error_upscale_factor(): with paddle.fluid.dygraph.guard(): x = np.random.random([2, 9, 4, 4]).astype("float64") pixel_shuffle = F.pixel_shuffle(paddle.to_tensor(x), 3.33)