def test_out(self): with fluid.program_guard(fluid.Program()): data = fluid.data(shape=[10], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) data_np = np.random.random([10]).astype("float32") result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): data = fluid.data(shape=[10, 5], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) data_np = np.random.random([10, 5]).astype("float32") result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) with fluid.program_guard(fluid.Program()): data = fluid.data(shape=[1, 5], dtype="float32", name="data") data_t = paddle.t(data) place = fluid.MLUPlace(0) exe = fluid.Executor(place) data_np = np.random.random([1, 5]).astype("float32") result, = exe.run(feed={"data": data_np}, fetch_list=[data_t]) expected_result = np.transpose(data_np) self.assertEqual((result == expected_result).all(), True) with fluid.dygraph.guard(): np_x = np.random.random([10]).astype("float32") data = fluid.dygraph.to_variable(np_x) z = paddle.t(data) np_z = z.numpy() z_expected = np.array(np.transpose(np_x)) self.assertEqual((np_z == z_expected).all(), True) with fluid.dygraph.guard(): np_x = np.random.random([10, 5]).astype("float32") data = fluid.dygraph.to_variable(np_x) z = paddle.t(data) np_z = z.numpy() z_expected = np.array(np.transpose(np_x)) self.assertEqual((np_z == z_expected).all(), True) with fluid.dygraph.guard(): np_x = np.random.random([1, 5]).astype("float32") data = fluid.dygraph.to_variable(np_x) z = paddle.t(data) np_z = z.numpy() z_expected = np.array(np.transpose(np_x)) self.assertEqual((np_z == z_expected).all(), True)
def test_errors(self): with program_guard(Program(), Program()): # the input of elementwise_add must be Variable. x1 = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.MLUPlace(0)) y1 = fluid.create_lod_tensor( np.array([-1, 3, 5, 5]), [[1, 1, 1, 1]], fluid.MLUPlace(0)) self.assertRaises(TypeError, fluid.layers.elementwise_add, x1, y1) # the input dtype of elementwise_add must be float16 or float32 x2 = fluid.layers.data(name='x2', shape=[3, 4, 5, 6], dtype="uint8") y2 = fluid.layers.data(name='y2', shape=[3, 4, 5, 6], dtype="uint8") self.assertRaises(TypeError, fluid.layers.elementwise_add, x2, y2)
def __test_vs(self, place=fluid.MLUPlace(0)): paddle.disable_static(place=place) linear_old = paddle.nn.Linear( 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), bias_attr=paddle.nn.initializer.Constant(value=2.0)) momentum_old = paddle.fluid.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_old.parameters(), regularization=paddle.fluid.regularizer.L2Decay( regularization_coeff=0.1)) self.__update_params(momentum=momentum_old, linear=linear_old) linear_new = paddle.nn.Linear( 2, 2, weight_attr=paddle.nn.initializer.Constant(value=2.0), bias_attr=paddle.nn.initializer.Constant(value=2.0)) momentum_new = paddle.fluid.contrib.optimizer.Momentum( learning_rate=0.01, momentum=0.9, parameter_list=linear_new.parameters(), regularization=paddle.fluid.regularizer.L2Decay( regularization_coeff=0.1)) self.__update_params(momentum=momentum_new, linear=linear_new) self.assertEqual( (linear_old.weight.numpy() == linear_new.weight.numpy()).all(), True, 'the param weight updated by two Momentum optimizers should equal')
def run_trainer(self, args): train_prog = fluid.Program() startup_prog = fluid.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] nranks = 2 paddle.distributed.init_parallel_env() device_id = int(os.getenv("FLAGS_selected_mlus", "0")) place = fluid.MLUPlace(device_id) np.random.seed(os.getpid()) np_data_type = DataTypeCast(args["data_type"]) indata = np.random.random((10, 1000)).astype(np_data_type) if args['static_mode']: result = self.get_model(train_prog, startup_prog, rank) exe = fluid.Executor(place) exe.run(startup_prog) fetch_list = [] for elem in result: fetch_list.append(elem.name) out = exe.run(train_prog, feed={'tindata': indata}, fetch_list=fetch_list) else: out = self.get_model(train_prog, startup_prog, rank, indata) #print(out, sys.stderr) sys.stdout.buffer.write(pickle.dumps(out))
def test_name(self): places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): places.append(fluid.MLUPlace(0)) for p in places: with fluid.dygraph.guard(p): batch_norm1d = paddle.nn.BatchNorm1D(1, name="test")
def test_static(self): places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): places.append(fluid.MLUPlace(0)) for p in places: exe = fluid.Executor(p) shape = [4, 10, 16, 16] def compute(x_np, is_test, trainable_statistics): with program_guard(Program(), Program()): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, trainable_statistics=trainable_statistics) x = fluid.data(name='x', shape=x_np.shape, dtype=x_np.dtype) y = bn(x) exe.run(fluid.default_startup_program()) r = exe.run(feed={'x': x_np}, fetch_list=[y])[0] return r x = np.random.randn(*shape).astype("float32") y1 = compute(x, False, False) y2 = compute(x, True, True) self.assertTrue(np.allclose(y1, y2))
def test_out(self): with fluid.program_guard(fluid.Program(), fluid.Program()): data = fluid.layers.data('data', shape=[-1, 10], dtype='float32') x0, x1 = paddle.split(data, num_or_sections=(3, 7), axis=1) place = fluid.MLUPlace(0) exe = fluid.Executor(place) input1 = np.random.random([1, 10]).astype('float32') r0, r1 = exe.run(feed={"data": input1}, fetch_list=[x0, x1]) ex_x0, ex_x1 = np.split(input1, (3, ), axis=1) self.assertTrue(np.allclose(ex_x0, r0)) self.assertTrue(np.allclose(ex_x1, r1))
def setUp(self): self.endpoints = os.getenv("PADDLE_TRAINER_ENDPOINTS").split(',') self.current_endpoint = os.getenv("PADDLE_CURRENT_ENDPOINT") self.nranks = len(self.endpoints) self.rank = self.endpoints.index(self.current_endpoint) self.mlu_id = int(os.getenv("FLAGS_selected_mlus")) self.place = fluid.MLUPlace(self.mlu_id) self.exe = fluid.Executor(self.place) self.endpoints.remove(self.current_endpoint) self.other_endpoints = self.endpoints if self.rank == 0: wait_server_ready(self.other_endpoints)
def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): places.append(fluid.MLUPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute_v1(x, is_test, trainable_statistics): with fluid.dygraph.guard(p): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, trainable_statistics=trainable_statistics) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() def compute_v2(x): with fluid.dygraph.guard(p): bn = paddle.nn.BatchNorm2D(shape[1]) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() def compute_v3(x, is_test, trainable_statistics): with fluid.dygraph.guard(p): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, param_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(1.0), trainable=False), bias_attr=fluid.ParamAttr( initializer=fluid.initializer.Constant(0.0), trainable=False), trainable_statistics=trainable_statistics) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() def compute_v4(x): with fluid.dygraph.guard(p): bn = paddle.nn.BatchNorm2D(shape[1], weight_attr=False, bias_attr=False) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() x = np.random.randn(*shape).astype("float32") y1 = compute_v1(x, False, False) y2 = compute_v2(x) y3 = compute_v3(x, False, False) y4 = compute_v4(x) self.assertTrue(np.allclose(y1, y2)) self.assertTrue(np.allclose(y3, y4))
def test_error(self): places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): places.append(fluid.MLUPlace(0)) for p in places: #paddle.disable_static() x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') def error1d_dataformat(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm1d = paddle.nn.BatchNorm1D(1, data_format='NCDHW') batch_norm1d(fluid.dygraph.to_variable(x_data_4)) def error2d_dataformat(): x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') batch_norm2d = paddle.nn.BatchNorm2D(1, data_format='NCDHW') batch_norm2d(fluid.dygraph.to_variable(x_data_3)) def error3d_dataformat(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm3d = paddle.nn.BatchNorm3D(1, data_format='NCL') batch_norm3d(fluid.dygraph.to_variable(x_data_4)) def error1d(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm1d = paddle.nn.BatchNorm1D(1) batch_norm1d(fluid.dygraph.to_variable(x_data_4)) def error2d(): x_data_3 = np.random.random(size=(2, 1, 3)).astype('float32') batch_norm2d = paddle.nn.BatchNorm2D(1) batch_norm2d(fluid.dygraph.to_variable(x_data_3)) def error3d(): x_data_4 = np.random.random(size=(2, 1, 3, 3)).astype('float32') batch_norm3d = paddle.nn.BatchNorm3D(1) batch_norm3d(fluid.dygraph.to_variable(x_data_4)) with fluid.dygraph.guard(p): self.assertRaises(ValueError, error1d) self.assertRaises(ValueError, error2d) self.assertRaises(ValueError, error3d) self.assertRaises(ValueError, error1d_dataformat) self.assertRaises(ValueError, error2d_dataformat) self.assertRaises(ValueError, error3d_dataformat)
def test_declarative(self): with fluid.program_guard(fluid.Program()): def gen_data(): return { "x": np.array([2, 3, 4]).astype('float32'), "y": np.array([1, 5, 2]).astype('float32') } x = fluid.data(name="x", shape=[3], dtype='float32') y = fluid.data(name="y", shape=[3], dtype='float32') z = self._executed_api(x, y) place = fluid.MLUPlace(0) exe = fluid.Executor(place) z_value = exe.run(feed=gen_data(), fetch_list=[z.name]) z_expected = np.array([3., 8., 6.]) self.assertEqual((z_value == z_expected).all(), True)
def test_errors(self): with program_guard(Program(), Program()): # The input type of accuracy_op must be Variable. x1 = fluid.create_lod_tensor(np.array([[-1]]), [[1]], fluid.MLUPlace(0)) label = fluid.layers.data(name='label', shape=[-1, 1], dtype="int32") self.assertRaises(TypeError, fluid.layers.accuracy, x1, label) self.assertRaises(TypeError, paddle.metric.accuracy, x1, label) # The input dtype of accuracy_op must be float32 or float64. x2 = fluid.layers.data(name='x2', shape=[4], dtype="int32") self.assertRaises(TypeError, fluid.layers.accuracy, x2, label) self.assertRaises(TypeError, paddle.metric.accuracy, x2, label) x3 = fluid.layers.data(name='input', shape=[-1, 2], dtype="float16") fluid.layers.accuracy(input=x3, label=label) paddle.metric.accuracy(input=x3, label=label)
def test_dygraph(self): places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): places.append(fluid.MLUPlace(0)) for p in places: shape = [4, 10, 4, 4] def compute(x, is_test, trainable_statistics): with fluid.dygraph.guard(p): bn = fluid.dygraph.BatchNorm( shape[1], is_test=is_test, trainable_statistics=trainable_statistics) y = bn(fluid.dygraph.to_variable(x)) return y.numpy() x = np.random.randn(*shape).astype("float32") y1 = compute(x, False, False) y2 = compute(x, True, True) self.assertTrue(np.allclose(y1, y2))
def run_trainer(self, args): train_prog = fluid.Program() startup_prog = fluid.Program() endpoints = args["endpoints"].split(",") rank = args["trainerid"] current_endpoint = args["currentendpoint"] nranks = 2 self.initCommunicator(startup_prog, rank, nranks, True, current_endpoint, endpoints) self.rank = rank result = self.get_model(train_prog, startup_prog, args["col_type"]) device_id = int(os.getenv("FLAGS_selected_mlus", "0")) place = fluid.MLUPlace(device_id) exe = fluid.Executor(place) exe.run(startup_prog) np.random.seed(os.getpid()) np_data_type = DataTypeCast(args["data_type"]) indata = np.random.random((10, 1000)).astype(np_data_type) out = exe.run(train_prog, feed={'tindata': indata}, fetch_list=[result.name]) sys.stdout.buffer.write(pickle.dumps(out))
def test_momentum_static(self): paddle.enable_static() place = fluid.MLUPlace(0) main = fluid.Program() with fluid.program_guard(main): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) momentum_optimizer = paddle.fluid.contrib.optimizer.Momentum( learning_rate=0.1, momentum=0.9) momentum_optimizer.minimize(avg_cost) fetch_list = [avg_cost] train_reader = paddle.batch(paddle.dataset.uci_housing.train(), batch_size=1) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
def test_api(self): x_NHWC = np.random.random([2, 5, 5, 3]).astype("float32") x_NCHW = np.random.random([2, 3, 5, 5]).astype("float32") input_NHWC = fluid.layers.data(name="input_NHWC", shape=[2, 5, 5, 3], append_batch_size=False, dtype="float32") input_NCHW = fluid.layers.data(name="input_NCHW", shape=[2, 3, 5, 5], append_batch_size=False, dtype="float32") input_NHWC_negetive = fluid.layers.data(name="input_NHWC_negetive", shape=[2, -1, 5, 3], append_batch_size=False, dtype="float32") input_NCHW_negetive = fluid.layers.data(name="input_NCHW_negetive", shape=[2, 3, -1, -1], append_batch_size=False, dtype="float32") ksize = [3, 3] out_1 = fluid.layers.pool2d(input=input_NHWC, pool_size=ksize, pool_type="max", pool_padding=[1, 1], data_format="NHWC") out_2 = fluid.layers.pool2d(input=input_NHWC, pool_size=ksize, pool_type="avg", pool_padding=[[0, 0], [1, 1], [1, 1], [0, 0]], data_format="NHWC") out_3 = fluid.layers.pool2d(input=input_NCHW, pool_size=ksize, pool_type="avg", pool_padding=[[0, 0], [0, 0], [1, 1], [1, 1]], data_format="NCHW") out_4 = fluid.layers.pool2d(input=input_NCHW, pool_size=ksize, pool_type="avg", pool_padding=[1, 2, 1, 0], data_format="NCHW") # test VALID out_5 = fluid.layers.pool2d(input=input_NCHW, pool_size=ksize, pool_type="avg", pool_padding="VALID", data_format="NCHW") out_6 = fluid.layers.pool2d(input=input_NHWC, pool_size=ksize, pool_type="max", pool_padding="VALID", data_format="NHWC") # test SAME out_7 = fluid.layers.pool2d(input=input_NCHW, pool_size=[4, 4], pool_type="avg", pool_padding="SAME", data_format="NCHW") out_8 = fluid.layers.pool2d(input=input_NHWC, pool_size=[4, 4], pool_type="max", pool_padding="SAME", data_format="NHWC") # test negetive out_9 = fluid.layers.pool2d(input=input_NHWC_negetive, pool_size=ksize, pool_type="avg", pool_padding=[0, 0], data_format="NHWC") assert out_9.shape == (2, -1, 3, 3) out_10 = fluid.layers.pool2d(input=input_NCHW_negetive, pool_size=ksize, pool_type="avg", pool_padding=[0, 0], data_format="NCHW") assert out_10.shape == (2, 3, -1, -1) exe = fluid.Executor(place=fluid.MLUPlace(0)) [res_1, res_2, res_3, res_4, res_5, res_6, res_7, res_8] = exe.run(fluid.default_main_program(), feed={ "input_NHWC": x_NHWC, "input_NCHW": x_NCHW, "input_NHWC_negetive": x_NHWC, "input_NCHW_negetive": x_NCHW }, fetch_list=[ out_1, out_2, out_3, out_4, out_5, out_6, out_7, out_8 ]) assert np.allclose( res_1, pool2D_forward_naive(x=x_NHWC, ksize=ksize, pool_type="max", strides=[1, 1], paddings=[1, 1], data_format="NHWC")) assert np.allclose( res_2, pool2D_forward_naive(x=x_NHWC, ksize=ksize, pool_type="avg", strides=[1, 1], paddings=[1, 1, 1, 1], data_format="NHWC")) assert np.allclose(res_3, pool2D_forward_naive(x=x_NCHW, ksize=ksize, pool_type="avg", strides=[1, 1], paddings=[1, 1, 1, 1], data_format="NCHW"), rtol=0.07, atol=1e-05) assert np.allclose(res_4, pool2D_forward_naive(x=x_NCHW, ksize=ksize, pool_type="avg", strides=[1, 1], paddings=[1, 2, 1, 0], data_format="NCHW"), rtol=0.07, atol=1e-05) # VALID assert np.allclose( res_5, pool2D_forward_naive( x=x_NCHW, ksize=ksize, pool_type="avg", strides=[1, 1], paddings=[10, 20], # any ele is ok padding_algorithm="VALID", data_format="NCHW"), rtol=0.07, atol=1e-05) assert np.allclose( res_6, pool2D_forward_naive(x=x_NHWC, ksize=ksize, pool_type="max", strides=[1, 1], paddings=[10, 20], padding_algorithm="VALID", data_format="NHWC")) # SAME assert np.allclose(res_7, pool2D_forward_naive(x=x_NCHW, ksize=[4, 4], pool_type="avg", strides=[1, 1], paddings=[10, 20], padding_algorithm="SAME", data_format="NCHW"), rtol=0.07, atol=1e-05) assert np.allclose( res_8, pool2D_forward_naive(x=x_NHWC, ksize=[4, 4], pool_type="max", strides=[1, 1], paddings=[10, 20], padding_algorithm="SAME", data_format="NHWC"))
def setUp(self): self.places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): self.places.append(fluid.MLUPlace(0)) self.init_test()
def test_vs(self, place=fluid.MLUPlace(0)): places = [fluid.MLUPlace(0)] for place in places: self.__test_vs(place=place)
def setUp(self): self.original_dtyep = paddle.get_default_dtype() paddle.set_default_dtype("float32") self.places = [fluid.CPUPlace()] if core.is_compiled_with_mlu(): self.places.append(fluid.MLUPlace(0))
def test_errors(self): with program_guard(Program(), Program()): # The input type of cast_op must be Variable. x1 = fluid.create_lod_tensor( np.array([[-1]]), [[1]], fluid.MLUPlace(0)) self.assertRaises(TypeError, fluid.layers.cast, x1, 'int32')