def test_Conv2DTranspose_9_10_11_12_group(): """ api: paddle.Conv2DTranspose op version: 9 """ op = Net(in_channels=16, out_channels=16, groups=16, padding=[[0, 0], [0, 0], [1, 2], [2, 3]]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv2DTranspose', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [16, 16, 10, 10]).astype('float32'))) obj.run() # if __name__ == '__main__': # test_Conv2DTranspose_9_10_11_12_group() # test_Conv2DTranspose_9_output_padding_3() # test_Conv2DTranspose_9_output_padding_2() # test_Conv2DTranspose_9_output_padding_1() # test_Conv2DTranspose_9_output_padding() # test_Conv2DTranspose_9_10_11_12()
def test_topk_base(): """ api: paddle.topk op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-10, rtol=1e-11 obj = APIOnnx(op, 'topk', [11, 12]) obj.set_input_data("input_data", paddle.to_tensor([[1, 4, 5, 7], [2, 6, 2, 5]])) obj.run()
def test_broadcast_to_base(): """ api: paddle.broadcast_to op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-10, rtol=1e-11 obj = APIOnnx(op, 'broadcast_to', [9, 10, 11, 12]) obj.set_input_data("input_data", paddle.to_tensor([1, 2, 3], dtype='int32')) obj.run()
def test_floor_divide_base(): """ api: paddle.floor_divide op version: 10 """ op = Net() op.eval() # net, name, ver_list, delta=1e-10, rtol=1e-11 obj = APIOnnx(op, 'floor_divide', [9, 10, 11, 12]) obj.set_input_data("input_data", paddle.to_tensor([2, 3, 8, 7]), paddle.to_tensor([1, 5, 3, 3])) obj.run()
def test_GRU_base(): """ api: paddle.nn.GRU op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-10, rtol=1e-11 obj = APIOnnx(op, 'nn_GRU', [9, 10, 11, 12]) obj.set_input_data("input_data", paddle.randn((4, 23, 16)), paddle.randn((2, 4, 32))) obj.run()
def test_mod_12(): """ api: paddle.mod op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'mod', [12]) obj.set_input_data("input_data", paddle.to_tensor([2, 3, 8, 7]), paddle.to_tensor([1, 5, 3, 3])) obj.run()
def test_floor_mod_broadcasting(): """ api: paddle.floor_mod op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'floor_mod', [12]) obj.set_input_data("input_data", paddle.to_tensor([2, 3, 8, 7]), paddle.to_tensor([3])) obj.run()
def test_clip_11(): """ api: paddle.clip op version: 11 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'clip', [11]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_Embedding_base(): """ api: paddle.Embedding op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Embedding', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor(np.arange(3, 6).reshape((3, 1)).astype(np.int64))) obj.run()
def test_expand_9(): """ api: paddle.expand op version: 9 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'expand', [9]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_nn_functional_thresholded_relu_threshold(): """ api: paddle.nn.thresholded_relu op version: 12 """ op = Net(threshold=2) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_functional_thresholded_relu', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_logical_not_12(): """ api: paddle.logical_not op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'logical_not', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool'))) obj.run()
def test_unsqueeze_9_multil_negative_axis(): """ api: paddle.unsqueeze op version: 9 """ op = Net(axis=[1, 2, 3, -1]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'unsqueeze', [9, 10, 11, 12, 13]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [5, 10]).astype('float32'))) obj.run()
def test_mask_select_12(): """ api: paddle.mask_select op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'mask_select', [12]) obj.set_input_data("input_data", paddle.to_tensor([10, 4, 5, 6]).astype('float32'), paddle.to_tensor([1, 0, 1, 0]).astype('bool')) obj.run()
def test_meshgrid_unlikeSize(): """ api: paddle.meshgrid op version: 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'meshgrid', [11, 12]) obj.set_input_data("input_data", paddle.to_tensor([1, 2, 3]).astype('float32'), paddle.to_tensor([5, 6]).astype('float32')) obj.run()
def test_register_buffer(): """ api: register_buffer op version: 9 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'register_buffer', [9]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [1]).astype('float32'))) obj.run()
def test_unique_return_all(): """ api: paddle.unique op version: 12 """ op = Net_mult_all() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'unique', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_nn_functional_LogSigmoid_axis(): """ api: paddle.nn.functional.log_softmax op version: 12 """ op = Net(axis=1) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_functional_LogSigmoid', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_any_10(): """ api: paddle.any op version: 10 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'any', [10]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('bool'))) obj.run()
def test_hardtanh_12(): """ api: paddle.hardtanh op version: 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'hardtanh', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_acos_7(): """ api: paddle.acos op version: 7 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'acos', [7]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) obj.run()
def test_hardshrink_10(): """ api: paddle.nn.hardshrink op version: 10 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_hardshrink', [10]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_softshrink_threshold(): """ api: paddle.softshrink op version: 12 """ op = Net(threshold=1) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'softshrink', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_all_keepdim(): """ api: paddle.all op version: 12 """ op = Net(keepdim=True) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'all', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [4, 3, 10]).astype('bool'))) obj.run()
def test_gather_axis(): """ api: paddle.gather op version: 12 """ op = Net(axis=1) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'gather', [12]) obj.set_input_data( "input_data", paddle.to_tensor(randtool("float", -1, 1, [3, 10]).astype('float32'))) obj.run()
def test_Conv2DTranspose_9_output_padding_1(): """ api: paddle.Conv2DTranspose op version: 9 """ op = Net(output_padding=1, stride=[3, 2], padding=[1, 2]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv2DTranspose', [9]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()
def test_Conv2DTranspose_9_10_11_12(): """ api: paddle.Conv2DTranspose op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv2DTranspose', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10, 10]).astype('float32'))) obj.run()
def test_nonzero_base(): """ api: paddle.nonzero op version: 9, 10, 11, 12 """ op = Net() op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nonzero', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 3, 3]).astype('float32'))) obj.run()
def test_Conv1D_11_padding_replicate(): """ api: paddle.nn.Conv1D op version: 11 """ op = Net(padding=2, padding_mode='replicate') op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv1D', [9, 10, 11, 12]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10]).astype('float32'))) obj.run()
def test_Conv1D_11_padding_0(): """ api: paddle.nn.Conv1D op version: 11 """ op = Net(padding=[[0, 0], [0, 0], [1, 2]]) op.eval() # net, name, ver_list, delta=1e-6, rtol=1e-5 obj = APIOnnx(op, 'nn_Conv1D', [11]) obj.set_input_data( "input_data", paddle.to_tensor( randtool("float", -1, 1, [3, 1, 10]).astype('float32'))) obj.run()