def forward(self, x): for i in range(self.number_of_layers): x = self._param_encoder[i](x) x = F.selu(x) if self._dp_drop_prob > 0: # apply dropout only on code layer x = self.drop(x) for i in range(self.number_of_layers): x = self._param_decoder[i](x) x = F.selu(x) return x
def test_errors(self): with paddle.static.program_guard(paddle.static.Program()): # The input type must be Variable. self.assertRaises(TypeError, F.selu, 1) # The input dtype must be float16, float32, float64. x_int32 = paddle.fluid.data( name='x_int32', shape=[12, 10], dtype='int32') self.assertRaises(TypeError, F.selu, x_int32) # The scale must be greater than 1.0 x_fp32 = paddle.fluid.data( name='x_fp32', shape=[12, 10], dtype='float32') self.assertRaises(ValueError, F.selu, x_fp32, -1.0) # The alpha must be no less than 0 self.assertRaises(ValueError, F.selu, x_fp32, 1.6, -1.0) # support the input dtype is float16 x_fp16 = paddle.fluid.data( name='x_fp16', shape=[12, 10], dtype='float16') F.selu(x_fp16)
def test_dygraph_api(self): paddle.disable_static(self.place) x = paddle.to_tensor(self.x_np) out1 = F.selu(x, self.scale, self.alpha) selu = paddle.nn.SELU(self.scale, self.alpha) out2 = selu(x) out_ref = ref_selu(self.x_np, self.scale, self.alpha) for r in [out1, out2]: self.assertEqual(np.allclose(out_ref, r.numpy()), True) paddle.enable_static()
def test_static_api(self): with paddle.static.program_guard(paddle.static.Program()): x = paddle.fluid.data('X', self.x_np.shape, self.x_np.dtype) out1 = F.selu(x, self.scale, self.alpha) selu = paddle.nn.SELU(self.scale, self.alpha) out2 = selu(x) exe = paddle.static.Executor(self.place) res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2]) out_ref = ref_selu(self.x_np, self.scale, self.alpha) for r in res: self.assertEqual(np.allclose(out_ref, r), True)