def test_ceil_11(): """ api: paddle.ceil op version: 11 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'ceil', [11]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 3]).astype('float32'))) obj.run()
def test_nn_functional_LogSigmoid_dtype(): """ api: paddle.nn.functional.log_softmax op version: 12 """ op = Net(dtype='float64') op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_functional_LogSigmoid', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_argmax_dtype(): """ api: paddle.argmax op version: 11 """ op = Net(dtype="int32") op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'argmax', [11]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_tile_12(): """ api: paddle.tile op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'tile', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_any_axis(): """ api: paddle.any op version: 12 """ op = Net(axis=1) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'any', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [4, 3, 10]).astype('bool'))) obj.run()
def test_log1p_7(): """ api: paddle.log1p op version: 7 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'log1p', [7]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_mask_select_12(): """ api: paddle.mask_select op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'mask_select', [12]) obj.set_input_data("input_data", paddle.to_tensor([10, 4, 5, 6]).astype('float32'), paddle.to_tensor([1, 0, 1, 0]).astype('bool')) obj.run()
def test_softshrink_threshold(): """ api: paddle.softshrink op version: 12 """ op = Net(threshold=1) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'softshrink', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_Conv2DTranspose_9_output_padding_1(): """ api: paddle.Conv2DTranspose op version: 9 """ op = Net(output_padding=1, stride=[3, 2], padding=[1, 2]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv2DTranspose', [9]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()
def test_concat_10(): """ api: paddle.concat op version: 9 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'concat', [10]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32')), paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('float32'))) obj.run()
def test_Conv1D_11_padding_0(): """ api: paddle.nn.Conv1D op version: 11 """ op = Net(padding=[[0, 0], [0, 0], [1, 2]]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv1D', [11]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10]).astype('float32'))) obj.run()
def test_logical_xor_12(): """ api: paddle.logical_xor op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'logical_xor', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool')), paddle.to_tensor(randtool("float", 0, 1, [3, 10]).astype('bool'))) obj.run()
def test_meshgrid_3(): """ api: paddle.meshgrid op version: 11, 12 """ op = Net_3() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'meshgrid', [11, 12]) obj.set_input_data("input_data", paddle.to_tensor([1, 2, 3]).astype('float32'), paddle.to_tensor([5, 6]).astype('float32'), paddle.to_tensor([1, 2, 3, 4]).astype('float32')) obj.run()
def test_initializer_Uniform_base(): """ api: paddle.initializer.Uniform op version: 9 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_initializer_Uniform', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 2]).astype('float32'))) obj.run()
def test_BatchNorm2D_11(): """ api: paddle.nn.BatchNorm2D op version: 11 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_BatchNorm2D', [11]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()
def test_Conv2D_dilation_2_9(): """ api: paddle.nn.Conv2D op version: 9 """ op = Net(in_channels=16, out_channels=16, dilation=3) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'Conv2D', [9]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 16, 10, 10]).astype('float32'))) obj.run()
def test_MaxPool2D_base_Padding_0(): """ api: paddle.MaxPool2D op version: 9, 10, 11, 12 """ op = Net(kernel_size=5, padding=[[0, 0], [0, 0], [1, 2], [3, 4]]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_MaxPool2D', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()
def test_Conv1DTranspose_12_Padding_tuple1(): """ api: paddle.nn.Conv1DTranspose op version: 12 """ op = Net(padding=(1, 2)) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv1DTranspose', [9, 10, 11, 12, 13]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10]).astype('float32'))) obj.run()
def test_logsumexp_keepdim(): """ api: paddle.logsumexp op version: 12 """ op = Net(keepdim=True) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'logsumexp', [12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 3, 10]).astype('float32'))) obj.run()
def test_Conv3D_10(): """ api: paddle.Conv3D op version: 10 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'Conv2D_Dropout', [10]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 5, 10, 10]).astype('float32'))) obj.run()
def test_nonzero_base(): """ api: paddle.nonzero op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nonzero', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) obj.run()
def test_nn_functional_interpolate_nearest_date_format(): """ api: paddle.nn.functional.interpolate op version: 11 """ op = Net(size=[4, 12], data_format='NHWC') op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_functional_interpolate', [11]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [2, 2, 2, 10]).astype('float32'))) obj.run()
def test_atan_9(): """ api: paddle.atan op version: 9 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'atan', [9]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) obj.run()
def test_nn_functional_interpolate_bicubic_align_corners(): """ api: paddle.nn.functional.interpolate op version: 11 """ op = Net(mode='bicubic', scale_factor=1.5, align_corners=True) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_functional_interpolate', [11]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [1, 2, 2, 5]).astype('float32'))) obj.run()
def test_Conv1D_11_padding_replicate(): """ api: paddle.nn.Conv1D op version: 11 """ op = Net(padding=2, padding_mode='replicate') op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv1D', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10]).astype('float32'))) obj.run()
def test_nn_functional_interpolate_nearest_scale_factor_tuple(): """ api: paddle.nn.functional.interpolate op version: 11 """ op = Net(scale_factor=(1, 2)) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_functional_interpolate', [11]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [2, 3, 6, 10]).astype('float32'))) obj.run()
def test_Conv2DTranspose_9_10_11_12(): """ api: paddle.Conv2DTranspose op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv2DTranspose', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()
def test_GroupNorm_12(): """ api: paddle.nn.GroupNorm op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_GroupNorm', [12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [5, 10, 8, 8]).astype('float32'))) obj.run()
def test_Hardswish_10(): """ api: paddle.nn.Hardswish op version: 10 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Hardswish', [10]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()
def test_Conv2D_padding_1_9(): """ api: paddle.nn.Conv3D op version: 9 """ op = Net(padding=[1, 2, 3, 4]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'Conv2D', [9]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()