def test_rmsprop(self): place = paddle.XPUPlace(0) paddle.enable_static() main = fluid.Program() with fluid.program_guard(main): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y = fluid.layers.data(name='y', shape=[1], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) cost = fluid.layers.square_error_cost(input=y_predict, label=y) avg_cost = fluid.layers.mean(cost) print(avg_cost.shape) linear = paddle.nn.Linear(13, 5) rms_optimizer = paddle.optimizer.RMSProp( learning_rate=0.1, parameters=linear.parameters()) rms_optimizer.minimize(avg_cost) fetch_list = [avg_cost] train_reader = paddle.batch(paddle.dataset.uci_housing.train(), batch_size=1) feeder = fluid.DataFeeder(place=place, feed_list=[x, y]) exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) for data in train_reader(): exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)
def test_check_grad_stopgrad_dscale_dbias(self): if core.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['X'], 'Out', no_grad_set=set(['Scale', 'Bias']))
def test_check_output(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) outs = self.calc_output(place) outs = [np.array(out) for out in outs] outs.sort(key=len) self.verify_output(outs)
def test_check_grad_normal(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( place, ['X', 'Y'], 'Out', check_dygraph=(self.use_mkldnn == False))
def setUp(self): self.place = paddle.XPUPlace(0) self.op_type = "batch_norm" self.dtype = np.float32 self.shape = [2, 3, 4, 5] self.data_layout = "NCHW" self.epsilon = 1e-05 self.momentum = 0.9 self.set_attrs() if self.data_layout == "NHWC": channel_size = self.shape[3] elif self.data_layout == "NCHW": channel_size = self.shape[1] else: raise ValueError( "Unsupported data layout! Only NCHW and NHWC is supported, but received " + self.data_layout) np.random.seed(1024) self.x_np = np.random.random_sample(self.shape).astype(self.dtype) self.scale_np = np.random.random_sample([channel_size ]).astype(self.dtype) self.bias_np = np.random.random_sample([channel_size ]).astype(self.dtype) self.mean_np = np.zeros([channel_size]).astype(self.dtype) self.variance_np = np.ones([channel_size]).astype(self.dtype) self.saved_mean_np = np.zeros([channel_size]).astype(self.dtype) self.saved_variance_np = np.ones([channel_size]).astype(self.dtype)
def test_check_grad(self): self.check_grad_with_place( inputs_to_check=['W'], output_names='Out', no_grad_set=set('Ids'), place=paddle.XPUPlace(0), in_place=True)
def _run_static_single(use_cuda, use_xpu, use_npu): """ Testing the simple network with executor running directly, using one CPU/GPU/XPU/NPU. Args: use_cuda (bool): Whether running with CUDA. use_xpu (bool): Whether running with XPU. use_npu (bool): Whether running with NPU. """ paddle.enable_static() with paddle.static.scope_guard(paddle.static.Scope()): train_prog = paddle.static.Program() startup_prog = paddle.static.Program() startup_prog.random_seed = 1 with paddle.static.program_guard(train_prog, startup_prog): input, out, weight = _simple_network() param_grads = paddle.static.append_backward( out, parameter_list=[weight.name])[0] if use_cuda: place = paddle.CUDAPlace(0) elif use_xpu: place = paddle.XPUPlace(0) elif use_npu: place = paddle.NPUPlace(0) else: place = paddle.CPUPlace() exe = paddle.static.Executor(place) exe.run(startup_prog) exe.run(train_prog, feed={input.name: _prepare_data(1)}, fetch_list=[out.name, param_grads[1].name]) paddle.disable_static()
def test_rmsprop(self): places = [paddle.XPUPlace(0)] size = (128, 320) for place in places: for centered in [False]: with fluid.scope_guard(core.Scope()): self.check_with_place(place, is_sparse=False, centered=centered, size=size) with fluid.scope_guard(core.Scope()): self.check_with_place(place, is_sparse=True, centered=centered, row_num=512, size=size) with fluid.scope_guard(core.Scope()): self.check_with_place( place, is_sparse=True, centered=centered, row_num=60, size=size, )
def test_check_grad(self): if self.need_check_grad: if core.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, set(['Input', 'Filter']), 'Output')
def test_check_grad_normal(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( place, ['X', 'Y'], 'Out', max_relative_error=self.max_relative_error)
def test_check_grad_ignore_y(self): place = paddle.XPUPlace(0) paddle.enable_static() self.check_grad_with_place(place, ['X'], 'Out', max_relative_error=0.1, no_grad_set=set('Y'))
def test_check_grad(self): if paddle.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, ["Logits"], "Loss", max_relative_error=0.2)
def test_static(self): with fluid.program_guard(fluid.Program(), fluid.Program()): input = np.random.random([12, 14]).astype("float32") x = fluid.layers.data(name='x', shape=[12, 14], append_batch_size=False, dtype="float32") positive_2 = fluid.layers.fill_constant([1], "int32", 12) expand_shape = fluid.layers.data(name="expand_shape", shape=[2], append_batch_size=False, dtype="int32") out_1 = paddle.expand(x, shape=[12, 14]) out_2 = paddle.expand(x, shape=[positive_2, 14]) out_3 = paddle.expand(x, shape=expand_shape) g0 = fluid.backward.calc_gradient(out_2, x) exe = fluid.Executor(place=paddle.XPUPlace(0)) res_1, res_2, res_3 = exe.run(fluid.default_main_program(), feed={ "x": input, "expand_shape": np.array([12, 14]).astype("int32") }, fetch_list=[out_1, out_2, out_3]) assert np.array_equal(res_1, np.tile(input, (1, 1))) assert np.array_equal(res_2, np.tile(input, (1, 1))) assert np.array_equal(res_3, np.tile(input, (1, 1)))
def test_check_output(self): ids = self.inputs['Ids'] flatten_idx = ids.flatten() padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.attrs = {'padding_idx': cpt.long_type(padding_idx)} self.check_output_with_place(place=paddle.XPUPlace(0))
def setUp(self): self.set_xpu() self.place = paddle.XPUPlace(0) self.op_type = "gather_nd" xnp = np.random.random((5, 20)).astype(typename) self.inputs = {'X': xnp, 'Index': np.array([1]).astype("int32")} self.outputs = {'Out': self.inputs["X"][self.inputs["Index"]]}
def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['X'], 'Out', max_relative_error=0.006, no_grad_set=set('Y'))
def test_checkout_grad(self): if paddle.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, ['X'], 'Out', max_relative_error=1.e1)
def setUp(self): self.init_dtype() self.set_xpu() self.op_type = "expand_as_v2" self.place = paddle.XPUPlace(0) self.set_inputs() self.set_output()
def setUp(self): self.init_dtype() self.set_xpu() self.op_type = "adam" self.place = paddle.XPUPlace(0) self.set_data() self.set_attrs() self.set_shape() self.set_inputs() self.set_steps() param_out, moment1_out, \ moment2_out = adam_step(self.inputs, self.attrs) self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': np.array([self.beta1_pow]).astype("float32") * self.beta1, 'Beta2PowOut': np.array([self.beta2_pow]).astype("float32") * self.beta2 }
def test_check_output(self): for _ in range(self.num_steps): param_out, moment1_out, \ moment2_out = adam_step(self.inputs, self.attrs) beta1_pow_out = self.inputs['Beta1Pow'] * self.beta1 beta2_pow_out = self.inputs['Beta2Pow'] * self.beta2 self.outputs = { 'Moment1Out': moment1_out, 'Moment2Out': moment2_out, 'ParamOut': param_out, 'Beta1PowOut': beta1_pow_out, 'Beta2PowOut': beta2_pow_out } # Verify output for this step self.check_output_with_place(place=paddle.XPUPlace(0), atol=1e-2) # Output of this step becomes input for next step self.inputs['Param'] = param_out self.inputs['Moment1'] = moment1_out self.inputs['Moment2'] = moment2_out # Update powers of Beta1 and Beta2 for next time step self.inputs['Beta1Pow'] = beta1_pow_out self.inputs['Beta2Pow'] = beta2_pow_out # Randomize gradient for next step self.inputs['Grad'] = np.random.uniform( -1, 1, (102, 105)).astype("float32")
def test_check_grad_ingore_x(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( place, ['Y'], 'Out', no_grad_set=set("X"), max_relative_error=self.max_relative_error)
def setUp(self): self.op_type = "flatten" self.use_xpu = True self.place = paddle.XPUPlace(0) self.init_test_case() self.inputs = {"X": np.random.random(self.in_shape).astype("float32")} self.init_attrs() self.outputs = {"Out": self.inputs["X"].reshape(self.new_shape)}
def test_check_grad(self): if self.dtype == 'int64' or self.dtype == 'int32': pass else: if paddle.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, self.get_x_names(), 'Y')
def test_check_grad(self): if core.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, {'Input', 'Offset', 'Mask', 'Filter'}, 'Output', max_relative_error=0.06)
def setUp(self): self.set_xpu() self.op_type = "sigmoid_cross_entropy_with_logits" self.place = paddle.XPUPlace(0) self.init_dtype() self.set_inputs() self.init_dtype() self.set_output()
def test_check_output(self): if paddle.is_compiled_with_xpu() and len( self.inputs['X'].shape) == len( self.inputs['Y'].shape ) and self.inputs['X'].shape[0] == self.inputs['Y'].shape[0]: place = paddle.XPUPlace(0) self.check_output_with_place(place, atol=1e-3)
def test_check_grad(self): if self.dtype == np.float16 or (hasattr(self, "no_need_check_grad") and self.no_need_check_grad == True): return if core.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, {'Input', 'Filter'}, 'Output')
def test_check_grad(self): # TODO(wangzhongpu): support mkldnn op in dygraph mode if self.dtype == np.float16: return if core.is_compiled_with_xpu(): paddle.enable_static() place = paddle.XPUPlace(0) self.check_grad_with_place(place, {'Input', 'Filter'}, 'Output')
def test_check_grad_ingore_y(self): if paddle.is_compiled_with_xpu(): place = paddle.XPUPlace(0) self.check_grad_with_place( place, ['X'], 'Out', no_grad_set=set('Y'), check_dygraph=(self.use_mkldnn == False))
def setUp(self): self.op_type = 'huber_loss' self.place = paddle.XPUPlace(0) self.dtype = self.in_type self.set_inputs() self.set_attrs() self.set_outputs()