def test_case9(self): paddle.disable_static() model = ModelCase5() predictor = LatencyPredictor() model_file, param_file = save_det_model(model, input_shape=[1, 255, 13, 13], save_dir="./inference_model", data_type='fp32') pbmodel_file = opt_model(model_file=model_file, param_file=param_file, optimize_out_type='protobuf') paddle.enable_static() with open(pbmodel_file, "rb") as f: fluid_program = paddle.fluid.framework.Program.parse_from_string( f.read()) graph = paddleslim.core.GraphWrapper(fluid_program) graph_keys = predictor._get_key_info_from_graph(graph=graph) assert len(graph_keys) > 0
def test_moveaxis1(self): x_np = np.random.randn(2, 3, 4, 5, 7).astype('float32') expected = np.moveaxis(x_np, [0, 4, 3, 2], [1, 3, 2, 0]) paddle.enable_static() with paddle.static.program_guard(fluid.Program()): x = paddle.static.data("x", shape=[2, 3, 4, 5, 7], dtype='float32') out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) exe = paddle.static.Executor() out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] self.assertEqual(np.array_equal(out_np, expected), True) paddle.disable_static() x = paddle.to_tensor(x_np) out = paddle.moveaxis(x, [0, 4, 3, 2], [1, 3, 2, 0]) self.assertEqual(out.shape, [4, 2, 5, 7, 3]) self.assertEqual(np.array_equal(out.numpy(), expected), True) paddle.enable_static()
def test_matrix_nms_error(self): with self.static_graph(): bboxes = paddle.static.data(name='bboxes', shape=[7, 1200, 4], dtype='float32') scores = paddle.static.data(name='data_error', shape=[7, 21, 1200], dtype='int32') self.assertRaises(TypeError, ops.matrix_nms, bboxes=bboxes, scores=scores, score_threshold=0.01, post_threshold=0., nms_top_k=400, keep_top_k=200, return_index=True) paddle.disable_static()
def test_dygraph_functional(place, input_np, label_np, reduction='mean', weight_np=None): paddle.disable_static() input = paddle.to_tensor(input_np) label = paddle.to_tensor(label_np) if weight_np is not None: weight = paddle.to_tensor(weight_np) dy_res = paddle.nn.functional.binary_cross_entropy( input, label, weight=weight, reduction=reduction) else: dy_res = paddle.nn.functional.binary_cross_entropy( input, label, reduction=reduction) dy_result = dy_res.numpy() paddle.enable_static() return dy_result
def test_1(self): x_data = np.arange(3, 6).reshape((3, 1)).astype(np.int64) y_data = np.arange(6, 12).reshape((3, 2)).astype(np.float32) paddle.disable_static(paddle.CPUPlace()) x = paddle.to_tensor(x_data, stop_gradient=False) y = paddle.to_tensor(y_data, stop_gradient=False) embedding = paddle.nn.Embedding(10, 3, sparse=True) w0 = np.full(shape=(10, 3), fill_value=2).astype(np.float32) embedding.weight.set_value(w0) adam = paddle.optimizer.Adam(parameters=[embedding.weight], learning_rate=0.01) adam.clear_grad() out = embedding(x) out.backward() adam.step()
def run_gpu_fleet_api_trainer(self, args): import paddle.distributed.fleet as fleet import paddle.distributed.fleet.base.role_maker as role_maker # 1. enable dygraph paddle.disable_static() # 2. init seed seed = 90 paddle.static.default_startup_program().random_seed = seed paddle.static.default_main_program().random_seed = seed np.random.seed(seed) random.seed = seed # get trainer id args.trainer_id = paddle.distributed.get_rank() # 3. init parallel env if args.update_method == "nccl2": fleet.init(is_collective=True) # 4. train model model, train_reader, opt = self.get_model() if args.update_method == "nccl2": opt = fleet.distributed_optimizer(opt) model = fleet.distributed_model(model) out_losses = [] for step_id, data in enumerate(train_reader()): data = self._get_data(data, args) if step_id == RUN_STEP: break loss = self.run_one_loop(model, opt, data) out_losses.append(loss.numpy()) if args.update_method == "nccl2": loss = model.scale_loss(loss) loss.backward() if args.update_method == "nccl2": model.apply_collective_grads() opt.step() opt.clear_grad() print_to_out(out_losses)
def test_initialized_list_and_error(self): paddle.disable_static() init_data = [ numpy.random.random(shape).astype('float32') for shape in self.shapes ] array = paddle.tensor.create_array( 'float32', [paddle.to_tensor(x) for x in init_data]) for res, gt in zip(array, init_data): self.assertTrue(numpy.array_equal(res, gt)) # test for None array = paddle.tensor.create_array('float32') self.assertTrue(isinstance(array, list)) self.assertEqual(len(array), 0) # test error with self.assertRaises(TypeError): paddle.tensor.create_array('float32', 'str')
def test_api(self): shape = [1000, 784] place = paddle.CUDAPlace( 0) if core.is_compiled_with_cuda() else paddle.CPUPlace() paddle.disable_static(place) x1 = paddle.randn(shape, 'float32') x2 = paddle.randn(shape, 'float64') dim_1 = paddle.fluid.layers.fill_constant([1], "int64", 20) dim_2 = paddle.fluid.layers.fill_constant([1], "int32", 50) x3 = paddle.randn(shape=[dim_1, dim_2, 784]) var_shape = paddle.to_tensor(np.array(shape)) x4 = paddle.randn(var_shape) for out in [x1, x2, x3, x4]: self.assertAlmostEqual(np.mean(out.numpy()), .0, delta=0.1) self.assertAlmostEqual(np.std(out.numpy()), 1., delta=0.1) paddle.enable_static()
def setUp(self): # Since `set_device` is global, set `set_device` in `setUp` rather than # `__init__` to avoid using an error device set by another test case. place = paddle.set_device(self.place) paddle.disable_static(place) rnn1 = SimpleRNN(16, 32, 2, time_major=self.time_major, direction=self.direction) rnn2 = paddle.nn.SimpleRNN(16, 32, 2, time_major=self.time_major, direction=self.direction) convert_params_for_net(rnn1, rnn2) self.rnn1 = rnn1 self.rnn2 = rnn2
def test_class(self): paddle.disable_static() for place in self.places: input_shape = (3, 4, 5, 6, 7) pad = [1, 2, 2, 1, 1, 0] value = 100 input_data = np.random.rand(*input_shape).astype(np.float32) pad_reflection = nn.Pad3D(padding=pad, mode="reflect") pad_replication = nn.Pad3D(padding=pad, mode="replicate") pad_constant = nn.Pad3D(padding=pad, mode="constant", value=value) pad_circular = nn.Pad3D(padding=pad, mode="circular") data = paddle.to_tensor(input_data) output = pad_reflection(data) np_out = self._get_numpy_out(input_data, pad, "reflect", data_format="NCDHW") self.assertTrue(np.allclose(output.numpy(), np_out)) output = pad_replication(data) np_out = self._get_numpy_out(input_data, pad, "replicate", data_format="NCDHW") self.assertTrue(np.allclose(output.numpy(), np_out)) output = pad_constant(data) np_out = self._get_numpy_out(input_data, pad, "constant", value=value, data_format="NCDHW") self.assertTrue(np.allclose(output.numpy(), np_out)) output = pad_circular(data) np_out = self._get_numpy_out(input_data, pad, "circular", data_format="NCDHW") self.assertTrue(np.allclose(output.numpy(), np_out))
def test_moveaxis2(self): x_np = np.random.randn(2, 3, 5) expected = np.moveaxis(x_np, -2, -1) paddle.enable_static() with paddle.static.program_guard(fluid.Program()): x = paddle.static.data("x", shape=[2, 3, 5], dtype='float64') out = x.moveaxis(-2, -1) exe = paddle.static.Executor() out_np = exe.run(feed={"x": x_np}, fetch_list=[out])[0] self.assertEqual(np.array_equal(out_np, expected), True) paddle.disable_static() x = paddle.to_tensor(x_np) out = x.moveaxis(-2, -1) self.assertEqual(out.shape, [2, 5, 3]) self.assertEqual(np.array_equal(out.numpy(), expected), True) paddle.enable_static()
def test_dygraph_api(self): paddle.disable_static(self.place) # x dtype ["bool", "int32", "int64", "float16", "float32", "float64"] for x in [ self.x_bool, self.x_int32, self.x_int64, self.x_float16, self.x_float32, self.x_float64 ]: x_inputs = paddle.to_tensor(x) # self.dtype ["bool", "int32", "int64", "float16", "float32", "float64"] for dtype in self.dtype: out = paddle.randint_like(x_inputs, low=-100, high=100, dtype=dtype) self.assertTrue(out.numpy().dtype, np.dtype(dtype)) self.assertTrue( ((out.numpy() >= -100) & (out.numpy() <= 100)).all(), True) paddle.enable_static()
def test_variable(self): paddle.enable_static() with paddle.static.program_guard(paddle.static.Program(), paddle.static.Program()): x = paddle.static.data(name='x', shape=[3, 2], dtype='float32') y = paddle.static.data(name='y', shape=[3, 2], dtype='float32') self.assertEqual( paddle.jit.dy2static.convert_shape_compare( x, "is", x, "is not", y), True) self.assertEqual( paddle.jit.dy2static.convert_shape_compare( x, "is not", x, "is not", y), False) self.assertEqual( paddle.jit.dy2static.convert_shape_compare( x, "is", x, "is", y), False) eq_out = paddle.jit.dy2static.convert_shape_compare(x, "==", y) not_eq_out = paddle.jit.dy2static.convert_shape_compare(x, "!=", y) long_eq_out = paddle.jit.dy2static.convert_shape_compare( x, "==", x, "!=", y) place = paddle.CUDAPlace( 0) if paddle.is_compiled_with_cuda() else paddle.CPUPlace() exe = paddle.static.Executor(place) x_y_eq_out = exe.run(feed={ "x": np.ones([3, 2]).astype(np.float32), "y": np.ones([3, 2]).astype(np.float32) }, fetch_list=[eq_out, not_eq_out, long_eq_out]) np.testing.assert_array_equal(np.array(x_y_eq_out), np.array([[True], [False], [False]])) set_a_zero = np.ones([3, 2]).astype(np.float32) set_a_zero[0][0] = 0.0 x_y_not_eq_out = exe.run( feed={ "x": np.ones([3, 2]).astype(np.float32), "y": set_a_zero }, fetch_list=[eq_out, not_eq_out, long_eq_out]) np.testing.assert_array_equal(np.array(x_y_not_eq_out), np.array([[False], [True], [True]])) paddle.disable_static()
def test_static(self): paddle.enable_static() main_program = paddle.static.Program() startup_program = paddle.static.Program() x_np = np.random.random(size=(4, 4)).astype('float32') y_np = np.random.random(size=(4, 4)).astype('float32') label_np = np.random.randint(2, size=(4, 1)).astype('int64') with paddle.static.program_guard(main_program, startup_program): x = paddle.static.data(name="x", shape=[4, 4], dtype='float32') y = paddle.static.data(name="y", shape=[4, 4], dtype='float32') label = paddle.static.data(name="label", shape=[4, 1], dtype='int64') z = paddle.add(x, y) var = y[0, :] z[0, :] = var prediction = paddle.static.nn.fc(x=z, size=2, activation='softmax') cost = paddle.nn.functional.cross_entropy(input=prediction, label=label) loss = paddle.mean(cost) sgd = paddle.optimizer.SGD(learning_rate=0.01) sgd.minimize(loss) exe = paddle.static.Executor(paddle.CPUPlace()) exe.run(startup_program) var_grad, z_grad = exe.run( main_program, feed={ "x": x_np, "y": y_np, "label": label_np }, fetch_list=[var.name + "@GRAD", z.name + "@GRAD"]) self.assertTrue((var_grad == z_grad[0, :]).all()) paddle.disable_static()
def test_tensor_str(self): paddle.enable_static() paddle.disable_static(paddle.CPUPlace()) paddle.seed(10) a = paddle.rand([10, 20]) paddle.set_printoptions(4, 100, 3) a_str = str(a) expected = '''Tensor(shape=[10, 20], dtype=float32, place=CPUPlace, stop_gradient=True, [[0.2727, 0.5489, 0.8655, ..., 0.2916, 0.8525, 0.9000], [0.3806, 0.8996, 0.0928, ..., 0.9535, 0.8378, 0.6409], [0.1484, 0.4038, 0.8294, ..., 0.0148, 0.6520, 0.4250], ..., [0.3426, 0.1909, 0.7240, ..., 0.4218, 0.2676, 0.5679], [0.5561, 0.2081, 0.0676, ..., 0.9778, 0.3302, 0.9559], [0.2665, 0.8483, 0.5389, ..., 0.4956, 0.6862, 0.9178]])''' self.assertEqual(a_str, expected) paddle.enable_static()
def init_evaluate(self, eval_dataset: paddle.io.Dataset, batch_size: int, num_workers: int) -> paddle.io.DataLoader: use_gpu = True place = paddle.CUDAPlace( ParallelEnv().dev_id) if use_gpu else paddle.CPUPlace() paddle.disable_static(place) batch_sampler = paddle.io.DistributedBatchSampler( eval_dataset, batch_size=batch_size, shuffle=False, drop_last=False) loader = paddle.io.DataLoader(eval_dataset, batch_sampler=batch_sampler, places=place, num_workers=num_workers, return_list=True) return loader
def conj_static(func, shape, dtype, np_input): paddle.enable_static() paddle.set_device("cpu") with static.scope_guard(static.Scope()): with static.program_guard(static.Program()): x = static.data(name="x", shape=shape, dtype=dtype) x.stop_gradient = False out = func(x) sum_out = paddle.sum(out) static.append_backward(sum_out) exe = static.Executor() exe.run(static.default_startup_program()) out_v, x_grad_v = exe.run(static.default_main_program(), feed={"x": np_input}, fetch_list=[out.name, x.name + "@GRAD"]) paddle.disable_static() return out_v, x_grad_v
def test_dygraph_single(self): paddle.disable_static() paddle.distributed.init_parallel_env() layer = LinearNet() loss_fn = nn.MSELoss() adam = paddle.optimizer.Adam(learning_rate=0.001, parameters=layer.parameters()) adam = fleet.distributed_optimizer(adam) dp_layer = fleet.distributed_model(layer) for step in range(2): inputs = paddle.randn([10, 10], 'float32') outputs = dp_layer(inputs) labels = paddle.randn([10, 1], 'float32') loss = loss_fn(outputs, labels) loss.backward() adam.step() adam.clear_grad()
def main(): # 启动动态图模式 paddle.disable_static() content = load_image(FLAGS.content_image) style = load_image(FLAGS.style_image, shape=tuple(content.shape[-2:])) net = StyleTransferModel() model = paddle.Model(net) style_loss = StyleTransferLoss() # 使用内容图像初始化要生成的图像 target = net.create_parameter(shape=content.shape) target.set_value(content.numpy()) optimizer = fluid.optimizer.Adam(parameter_list=[target], learning_rate=FLAGS.lr) model.prepare(optimizer, style_loss) content_fetures = model.test_batch(content) style_features = model.test_batch(style) # 将两个特征组合,作为损失函数的label传给模型 feats = style_features + [content_fetures[-2]] # 训练5000个step,每500个step画一下生成的图像查看效果 steps = FLAGS.steps for i in range(steps): outs = model.train_batch(target, feats) if i % 500 == 0: print('iters:', i, 'loss:', outs[0][0]) if not os.path.exists(FLAGS.save_dir): os.makedirs(FLAGS.save_dir) # 保存生成好的图像 name = FLAGS.content_image.split(os.sep)[-1] output_path = os.path.join(FLAGS.save_dir, 'generated_' + name) cv2.imwrite( output_path, cv2.cvtColor((image_restore(target) * 255).astype('uint8'), cv2.COLOR_RGB2BGR))
def test_collect_fpn_proposals_error(self): def generate_input(bbox_type, score_type, name): multi_bboxes = [] multi_scores = [] for i in range(4): bboxes = paddle.static.data(name='rois' + name + str(i), shape=[10, 4], dtype=bbox_type, lod_level=1) scores = paddle.static.data(name='scores' + name + str(i), shape=[10, 1], dtype=score_type, lod_level=1) multi_bboxes.append(bboxes) multi_scores.append(scores) return multi_bboxes, multi_scores with self.static_graph(): bbox1 = paddle.static.data(name='rois', shape=[5, 10, 4], dtype='float32', lod_level=1) score1 = paddle.static.data(name='scores', shape=[5, 10, 1], dtype='float32', lod_level=1) bbox2, score2 = generate_input('int32', 'float32', '2') self.assertRaises(TypeError, ops.collect_fpn_proposals, multi_rois=bbox1, multi_scores=score1, min_level=2, max_level=5, post_nms_top_n=2000) self.assertRaises(TypeError, ops.collect_fpn_proposals, multi_rois=bbox2, multi_scores=score2, min_level=2, max_level=5, post_nms_top_n=2000) paddle.disable_static()
def tracking(self, video_stream, output_dir='mot_result', visualization=True, draw_threshold=0.5, use_gpu=False): ''' Track a video, and save the prediction results into output_dir, if visualization is set as True. video_stream: the video path output_dir: specify the dir to save the results visualization: if True, save the results as a video, otherwise not. draw_threshold: the threshold for the prediction results use_gpu: if True, use gpu to perform the computation, otherwise cpu. ''' self.video_stream = video_stream self.output_dir = output_dir self.visualization = visualization self.draw_threshold = draw_threshold self.use_gpu = use_gpu cfg = load_config( os.path.join(self.directory, 'config', 'jde_darknet53_30e_1088x608.yml')) check_config(cfg) place = 'gpu:0' if use_gpu else 'cpu' place = paddle.set_device(place) paddle.disable_static() tracker = StreamTracker(cfg, mode='test') # load weights tracker.load_weights_jde(self.pretrained_model) signal.signal(signal.SIGINT, self.signalhandler) # inference tracker.videostream_predict(video_stream=video_stream, output_dir=output_dir, data_type='mot', model_type='JDE', visualization=visualization, draw_threshold=draw_threshold)
def func_dirac(self): self.config() paddle.set_default_dtype(self.dtype) paddle.disable_static() conv = self.conv_layer( self.in_channels, self.out_channels, self.kernel_size, weight_attr=self.weight_attr) weight_dygraph = conv.weight.numpy() paddle.enable_static() start_prog = paddle.static.Program() main_prog = paddle.static.Program() with paddle.static.program_guard(main_prog, start_prog): inp = paddle.rand(self.input_shape) conv = self.conv_layer( self.in_channels, self.out_channels, self.kernel_size, weight_attr=self.weight_attr) output = conv(inp) block = start_prog.global_block() self.assertEqual(len(block.ops), self.num_ops) self.assertEqual(block.ops[0].type, 'fill_constant') self.assertEqual(block.ops[1].type, 'reshape2') self.assertEqual(block.ops[2].type, 'assign_value') self.assertEqual(block.ops[3].type, 'assign_value') self.assertEqual(block.ops[4].type, 'scatter') self.assertEqual(block.ops[5].type, 'reshape2') exe = paddle.static.Executor() exe.run(start_prog) fetch = exe.run(main_prog, fetch_list=[inp, output, conv.weight]) conv_input = fetch[0] conv_output = fetch[1] weight_static = fetch[2] self.check_result(weight_dygraph, weight_static, conv_input, conv_output)
def _test_dygraph(self, place, kwargs): paddle.disable_static(place) best = float("-10000") if kwargs['mode'] == "max" else float("10000") current_lr = 1.0 cooldown_counter = 0 num_bad_epochs = 0 var_list = [best, current_lr, cooldown_counter, num_bad_epochs] linear = paddle.nn.Linear(10, 10) scheduler = paddle.optimizer.lr.ReduceOnPlateau(**kwargs) adam = paddle.optimizer.Adam(learning_rate=scheduler, parameters=linear.parameters()) for epoch in range(20): for batch_id in range(1): x = paddle.to_tensor(epoch).astype('float32') loss = paddle.sin(x) loss.backward() adam.step() adam.clear_grad() scheduler.step(loss) # get lr from paddle current_lr = adam.get_lr() # get lr form python expected_lr = reduce_lr_on_plateau( kwargs['factor'], kwargs['threshold'], kwargs['cooldown'], kwargs['patience'], kwargs['mode'], kwargs['threshold_mode'], loss, var_list) self.assertEqual(current_lr, expected_lr) state_dict = adam.state_dict() scheduler1 = paddle.optimizer.lr.ReduceOnPlateau(**kwargs) adam1 = paddle.optimizer.Adam(learning_rate=scheduler1, parameters=linear.parameters()) adam1.set_state_dict(state_dict) self.assertEqual(scheduler.cooldown_counter, scheduler1.cooldown_counter) self.assertEqual(scheduler.best.numpy()[0], scheduler1.best) self.assertEqual(scheduler.num_bad_epochs, scheduler1.num_bad_epochs) self.assertEqual(scheduler.last_epoch, scheduler1.last_epoch) self.assertEqual(scheduler.last_lr, scheduler1.last_lr)
def test_save_load(self): paddle.disable_static() paddle.set_device('gpu') amp_level = {"level": "O1", "init_loss_scaling": 128} paddle.seed(2021) model = self.get_model(amp_level) transform = T.Compose([T.Transpose(), T.Normalize([127.5], [127.5])]) train_dataset = MNIST(mode='train', transform=transform) model.fit(train_dataset, epochs=1, batch_size=64, num_iters=2, log_freq=1) model.save('./lenet_amp') with paddle.fluid.unique_name.guard(): paddle.seed(2021) new_model = self.get_model(amp_level) train_dataset = MNIST(mode='train', transform=transform) new_model.fit(train_dataset, epochs=1, batch_size=64, num_iters=1, log_freq=1) # not equal before load self.assertNotEqual(new_model._scaler.state_dict()['incr_count'], model._scaler.state_dict()['incr_count']) print((new_model._scaler.state_dict()['incr_count'], model._scaler.state_dict()['incr_count'])) # equal after load new_model.load('./lenet_amp') self.assertEqual(new_model._scaler.state_dict()['incr_count'], model._scaler.state_dict()['incr_count']) self.assertEqual(new_model._scaler.state_dict()['decr_count'], model._scaler.state_dict()['decr_count']) self.assertTrue( np.array_equal( new_model._optimizer.state_dict() ['conv2d_1.w_0_moment1_0'].numpy(), model._optimizer.state_dict() ['conv2d_1.w_0_moment1_0'].numpy()))
def func_uva_sample_result(self): paddle.disable_static() if paddle.fluid.core.is_compiled_with_cuda(): row = None if fluid.framework.in_dygraph_mode(): row = paddle.fluid.core.eager.to_uva_tensor( self.row.astype(self.row.dtype), 0) sorted_eid = paddle.fluid.core.eager.to_uva_tensor( self.sorted_eid.astype(self.sorted_eid.dtype), 0) else: row = paddle.fluid.core.to_uva_tensor( self.row.astype(self.row.dtype)) sorted_eid = paddle.fluid.core.to_uva_tensor( self.sorted_eid.astype(self.sorted_eid.dtype)) colptr = paddle.to_tensor(self.colptr) nodes = paddle.to_tensor(self.nodes) edge_src, edge_dst, sample_index, reindex_nodes, edge_eids = \ paddle.incubate.graph_khop_sampler(row, colptr, nodes, self.sample_sizes, sorted_eids=sorted_eid, return_eids=True) edge_src = edge_src.reshape([-1]) edge_dst = edge_dst.reshape([-1]) sample_index = sample_index.reshape([-1]) for i in range(len(edge_src)): edge_src[i] = sample_index[edge_src[i]] edge_dst[i] = sample_index[edge_dst[i]] for n in self.nodes: edge_src_n = edge_src[edge_dst == n] if edge_src_n.shape[0] == 0: continue self.assertTrue( edge_src_n.shape[0] == paddle.unique(edge_src_n).shape[0]) self.assertTrue( edge_src_n.shape[0] == self.sample_sizes[0] or edge_src_n.shape[0] == len(self.dst_src_dict[n])) in_neighbors = np.isin(edge_src_n.numpy(), self.dst_src_dict[n]) self.assertTrue(np.sum(in_neighbors) == in_neighbors.shape[0])
def test_type_error(unit_test, use_gpu, type_str_map): def check_type(op_str, x, y, binary_op): op = getattr(paddle, op_str) error_type = TypeError if isinstance(x, np.ndarray): x = paddle.to_tensor(x) y = paddle.to_tensor(y) error_type = BaseException if binary_op: if type_str_map['x'] != 'bool' or type_str_map['y'] != 'bool': unit_test.assertRaises(error_type, op, x=x, y=y) if not fluid.in_dygraph_mode(): unit_test.assertRaises(error_type, op, x=x, y=y, out=1) else: if type_str_map['x'] != 'bool': unit_test.assertRaises(error_type, op, x=x) if not fluid.in_dygraph_mode(): unit_test.assertRaises(error_type, op, x=x, out=1) place = paddle.CPUPlace() if use_gpu and fluid.core.is_compiled_with_cuda(): place = paddle.CUDAPlace(0) for op_data in TEST_META_OP_DATA: meta_data = dict(op_data) binary_op = meta_data['binary_op'] paddle.disable_static(place) x = np.random.choice(a=[0, 1], size=[10]).astype(type_str_map['x']) y = np.random.choice(a=[0, 1], size=[10]).astype(type_str_map['y']) check_type(meta_data['op_str'], x, y, binary_op) paddle.enable_static() startup_program = paddle.static.Program() main_program = paddle.static.Program() with paddle.static.program_guard(main_program, startup_program): x = paddle.static.data(name='x', shape=[10], dtype=type_str_map['x']) y = paddle.static.data(name='y', shape=[10], dtype=type_str_map['y']) check_type(meta_data['op_str'], x, y, binary_op)
def run_trainer_with_spawn(self, args): paddle.disable_static() fluid.default_startup_program().random_seed = seed fluid.default_main_program().random_seed = seed np.random.seed(seed) random.seed(seed) args.trainer_id = dist.get_rank() if args.update_method == "nccl2": dist.init_parallel_env() model, train_reader, opt = self.get_model() if args.update_method == "nccl2": if args.find_unused_parameters: model = paddle.DataParallel(model, find_unused_parameters=True) else: model = paddle.DataParallel(model, find_unused_parameters=False) out_losses = [] for step_id, data in enumerate(train_reader()): data = self._get_data(data, args) if step_id == RUN_STEP: break if step_id % 3 != 0: if args.update_method == "nccl2": with model.no_sync(): loss = self.run_one_loop(model, opt, data) loss.backward() else: loss = self.run_one_loop(model, opt, data) loss.backward() else: loss = self.run_one_loop(model, opt, data) loss.backward() opt.minimize(loss) print_to_err( type(self).__name__, "loss at step %d: %f" % (step_id, loss.numpy())) out_losses.append(loss.numpy()) model.clear_gradients() print_to_out(out_losses) return out_losses
def test_export_deploy_model(self): self.set_seed() np.random.seed(201) save_dir = os.path.join(tempfile.mkdtemp(), '.cache_test_export_deploy_model') if not os.path.exists(save_dir): os.makedirs(save_dir) for dynamic in [True, False]: paddle.disable_static() if dynamic else None prog_translator = ProgramTranslator() prog_translator.enable(False) if not dynamic else None net = LeNet() inputs = [InputSpec([None, 1, 28, 28], 'float32', 'x')] model = Model(net, inputs) model.prepare() tensor_img = np.array( np.random.random((1, 1, 28, 28)), dtype=np.float32) model.save(save_dir, training=False) ori_results = model.predict_batch(tensor_img) fluid.disable_dygraph() if dynamic else None place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda( ) else fluid.CUDAPlace(0) new_scope = fluid.Scope() with fluid.scope_guard(new_scope): exe = fluid.Executor(place) [inference_program, feed_target_names, fetch_targets] = ( paddle.static.io.load_inference_model( path_prefix=save_dir, executor=exe)) results = exe.run(inference_program, feed={feed_target_names[0]: tensor_img}, fetch_list=fetch_targets) np.testing.assert_allclose( results, ori_results, rtol=1e-5, atol=1e-7) paddle.enable_static() shutil.rmtree(save_dir)
def test_yolo_box_error(self): with self.static_graph(): # x shape [N C H W], C=K * (5 + class_num), class_num=10, K=2 x = paddle.static.data(name='x', shape=[1, 30, 7, 7], dtype='float32') origin_shape = paddle.static.data(name='origin_shape', shape=[1, 2], dtype='int32') self.assertRaises(TypeError, ops.yolo_box, x, origin_shape, [10, 13, 30, 13], 10.123, 0.01, 32, scale_x_y=1.2) paddle.disable_static()
def setUp(self, use_gpu=False, batch_size=3, dims=5): self.use_gpu = use_gpu if not use_gpu: self.place = fluid.CPUPlace() self.gpu_id = -1 else: self.place = fluid.CUDAPlace(0) self.gpu_id = 0 self.batch_size = batch_size self.dims = dims self.init_numpy_data(batch_size, dims) paddle.disable_static(self.place) self.init_dynamic_data(batch_size, dims) paddle.enable_static() self.test_program = fluid.Program() self.executor = fluid.Executor(self.place) self.init_static_data(batch_size, dims)