Exemplo n.º 1
0
    def test_reshape_with_copy(self):
        x = self._get_rnd([10, 20 * 30])
        shape = [0, 20, 30]
        if defs.onnx_opset_version() < 5:
            node_def = helper.make_node("Reshape", ["X"], ["Z"], shape=shape)
            output = run_node(node_def, [x])
        else:
            node_def = helper.make_node("Reshape", ["X", "Y"], ["Z"])
            output = run_node(node_def, [x, shape])

        np.testing.assert_almost_equal(output["Z"], x.reshape([10, 20, 30]))
Exemplo n.º 2
0
 def test_average_pool(self):
     # TODO: fix this test
     return
     device = "CUDA"
     if not supports_device(device):
         raise unittest.SkipTest(
             "Backend doesn't support device {}".format(device))
     shape = [1, 1, 40, 40]
     node_def = helper.make_node("AveragePool", ["X"], ["Y"],
                                 kernel_shape=[1, 2],
                                 pads=[1, 1],
                                 strides=[1, 1])
     x = self._get_rnd(shape)
     output = run_node(node_def, [x], device=device)
     test_output = np.zeros(shape)
     for i1 in range(0, shape[0]):
         for i2 in range(0, shape[1]):
             for j1 in range(0, shape[2]):
                 for j2 in range(0, shape[3]):
                     test_output[i1][i2][j1][j2] = 0
                     count = 0
                     for k in range(j2, min(j2 + 2, shape[3])):
                         test_output[i1][i2][j1][j2] += x[i1][i2][j1][k]
                         count += 1
                     test_output[i1][i2][j1][j2] /= count
     np.testing.assert_almost_equal(output["Y"], test_output)
Exemplo n.º 3
0
 def test_cast(self):
     if legacy_onnx_pre_1_2() or legacy_opset_pre_6():
         test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8),
                       ("INT8", tf.int8), ("UINT16", tf.uint16),
                       ("INT16", tf.int16), ("INT32", tf.int32),
                       ("INT64", tf.int64), ("BOOL", tf.bool),
                       ("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
                       ("COMPLEX64", tf.complex64),
                       ("COMPLEX128", tf.complex128)]
     else:
         test_cases = [(TensorProto.FLOAT, tf.float32),
                       (TensorProto.UINT8, tf.uint8),
                       (TensorProto.INT8, tf.int8),
                       (TensorProto.UINT16, tf.uint16),
                       (TensorProto.INT16, tf.int16),
                       (TensorProto.INT32, tf.int32),
                       (TensorProto.INT64, tf.int64),
                       (TensorProto.BOOL, tf.bool),
                       (TensorProto.FLOAT16, tf.float16),
                       (TensorProto.DOUBLE, tf.float64),
                       (TensorProto.COMPLEX64, tf.complex64),
                       (TensorProto.COMPLEX128, tf.complex128)]
     for ty, tf_type in test_cases:
         node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
         vector = [2, 3]
         output = run_node(node_def, [vector])
         np.testing.assert_equal(output["output"].dtype, tf_type)
Exemplo n.º 4
0
 def test_less(self):
     node_def = helper.make_node("Less", ["X", "Y"], ["Z"])
     x = self._get_rnd([5, 3, 3, 2])
     y = self._get_rnd([3, 3, 1])
     output = run_node(node_def, [x, y])
     np.testing.assert_equal(output["Z"],
                             np.less(x, np.reshape(y, [1, 3, 3, 1])))
Exemplo n.º 5
0
 def test_conv_transpose(self):
     # Fix test in the future.
     return
     device = "CUDA"
     if not supports_device(device):
         raise unittest.SkipTest(
             "Backend doesn't support device {}".format(device))
     node_def = helper.make_node("ConvTranspose", ["X", "weights"], ["Y"],
                                 pads=[1, 1])
     x_shape = [1, 5, 4]
     x = self._get_rnd(x_shape)
     weight_shape = [5, 3, 2]
     weights = self._get_rnd(weight_shape)
     output = run_node(node_def, [x, weights], device=device)
     out_shape = [x_shape[0], weight_shape[1], x_shape[2]]
     test_output = np.zeros(out_shape)
     for b in range(0, x_shape[0]):
         for m in range(0, weight_shape[1]):
             for h in range(0, x_shape[2]):
                 v = 0
                 for c in range(0, x_shape[1]):
                     for k in range(h, min(h + weight_shape[2],
                                           x_shape[2])):
                         v += x[b][c][k] * weights[c][m][k - h]
                 test_output[b][m][h] = v
     np.testing.assert_almost_equal(output["Y"], test_output, decimal=5)
Exemplo n.º 6
0
 def test_reduce_sum(self):
     node_def = helper.make_node("ReduceSum", ["X"], ["Y"], axes=[1, 2])
     x = self._get_rnd([5, 10, 10, 3])
     output = run_node(node_def, [x])
     np.testing.assert_allclose(output["Y"],
                                np.sum(x, (1, 2), keepdims=True),
                                rtol=1e-3)
Exemplo n.º 7
0
 def test_slice(self):
     # TODO: API update or fix onnx version
     return
     node_def = helper.make_node("Slice", ["X", "Y", "Z", "W"], ["S"])
     x = self._get_rnd([1000]).reshape([10, 10, 10])
     output = run_node(node_def, [x, [0, 1, 2], [0, 0, 0], [2, 2, 2]])
     np.testing.assert_almost_equal(output["S"], x[0:2, 0:2, 0:2])
Exemplo n.º 8
0
 def test_mul(self):
     node_def = helper.make_node("Mul", ["X", "Y"], ["Z"])
     x = self._get_rnd([5, 10, 5, 5])
     y = self._get_rnd([10, 1, 1])
     output = run_node(node_def, [x, y])
     np.testing.assert_almost_equal(
         output["Z"], np.multiply(x, y.reshape([1, 10, 1, 1])))
Exemplo n.º 9
0
 def test_lp_normalization(self):
     node_def = helper.make_node("LpNormalization", ["X"], ["Y"])
     x = self._get_rnd([5, 3, 3, 2])
     output = run_node(node_def, [x])
     np.testing.assert_allclose(output["Y"],
                                np.expand_dims(np.linalg.norm(x, axis=-1),
                                               -1),
                                rtol=1e-3)
Exemplo n.º 10
0
 def test_min(self):
     node_def = helper.make_node("Min", ["X1", "X2", "X3", "X4"], ["Z"])
     x1 = self._get_rnd([10, 10])
     x2 = self._get_rnd([10, 10])
     x3 = self._get_rnd([10, 10])
     x4 = self._get_rnd([10, 10])
     output = run_node(node_def, [x1, x2, x3, x4])
     test_output = np.minimum(np.minimum(np.minimum(x1, x2), x3), x4)
     np.testing.assert_almost_equal(output["Z"], test_output)
Exemplo n.º 11
0
 def test_constant(self):
     shape = [16, 16]
     values = np.random.randn(*shape).flatten().astype(float)
     const2_onnx = helper.make_tensor("const2", TensorProto.DOUBLE, shape,
                                      values)
     node_def = helper.make_node("Constant", [], ["Y"], value=const2_onnx)
     output = run_node(node_def, [])
     np.testing.assert_equal(output["Y"].shape, shape)
     np.testing.assert_almost_equal(output["Y"].flatten(), values)
Exemplo n.º 12
0
 def test_selu(self):
     node_def = helper.make_node("Selu", ["X"], ["Y"])
     x = self._get_rnd([1000])
     output = run_node(node_def, [x])
     alpha = 1.6732
     gamma = 1.0507
     x[x <= 0] = gamma * (alpha * np.exp(x[x <= 0]) - alpha)
     x[x > 0] = gamma * x[x > 0]
     np.testing.assert_allclose(output["Y"], x, rtol=1e-3, atol=1e-7)
Exemplo n.º 13
0
 def test_dot(self):
     # this op is removed
     # remove this test in the future
     return
     node_def = helper.make_node("Dot", ["X", "Y"], ["Z"])
     x = np.floor(self._get_rnd([10, 10]))
     y = np.floor(self._get_rnd([10, 10]))
     output = run_node(node_def, [x, y])
     np.testing.assert_almost_equal(output["Z"], np.dot(x, y))
Exemplo n.º 14
0
 def test_sum(self):
     node_def = helper.make_node("Sum", ["X1", "X2", "X3", "X4"], ["Z"])
     x1 = self._get_rnd([10, 10])
     x2 = self._get_rnd([10, 10])
     x3 = self._get_rnd([10, 10])
     x4 = self._get_rnd([10, 10])
     output = run_node(node_def, [x1, x2, x3, x4])
     test_output = x1 + x2 + x3 + x4
     np.testing.assert_almost_equal(output["Z"], test_output)
Exemplo n.º 15
0
 def test_space_to_depth(self):
     node_def = helper.make_node("SpaceToDepth", ["X"], ["Y"], blocksize=2)
     x_shape = [1, 3, 2, 2]
     x = self._get_rnd(x_shape)
     output = run_node(node_def, [x])
     x = np.transpose(x, (0, 2, 3, 1))
     y = np.reshape(np.swapaxes(x.reshape(1, 1, 1, 1, 1, 12), 2, 3),
                    (1, 1, 1, 12))
     y = np.transpose(y, (0, 3, 1, 2))
     np.testing.assert_allclose(output["Y"], y, rtol=1e-3)
Exemplo n.º 16
0
 def test_depth_to_space(self):
     node_def = helper.make_node("DepthToSpace", ["X"], ["Y"], blocksize=2)
     x_shape = [1, 12, 1, 1]
     x = self._get_rnd(x_shape)
     output = run_node(node_def, [x])
     x = np.transpose(x, (0, 2, 3, 1))
     y = np.reshape(np.swapaxes(x.reshape(1, 1, 1, 2, 2, 3), 2, 3),
                    (1, 2, 2, 3))
     y = np.transpose(y, (0, 3, 1, 2))
     np.testing.assert_almost_equal(output["Y"], y, decimal=5)
Exemplo n.º 17
0
 def test_tile(self):
     if legacy_onnx_pre_1_2():
         raise unittest.SkipTest(
             "The current version of ONNX does not record correctly the opset of Tile."
         )
     node_def = helper.make_node("Tile", ["X1", "X2"], ["Z"])
     x = self._get_rnd([3, 5, 5, 3])
     repeats = [1, 1, 2, 1]
     output = run_node(node_def, [x, repeats])
     np.testing.assert_allclose(output["Z"], np.tile(x, repeats), rtol=1e-3)
Exemplo n.º 18
0
 def test_concat(self):
     shape = [10, 20, 5]
     for axis in range(len(shape)):
         node_def = helper.make_node("Concat", ["X1", "X2"], ["Y"],
                                     axis=axis)
         x1 = self._get_rnd(shape)
         x2 = self._get_rnd(shape)
         output = run_node(node_def, [x1, x2])
         np.testing.assert_almost_equal(output["Y"],
                                        np.concatenate((x1, x2), axis))
Exemplo n.º 19
0
 def test_reduce_log_sum_exp(self):
     node_def = helper.make_node("ReduceLogSumExp", ["X"], ["Y"],
                                 axes=[1, 2])
     x = self._get_rnd([5, 10, 10, 3])
     output = run_node(node_def, [x])
     np.testing.assert_allclose(output["Y"],
                                np.log(
                                    np.sum(np.exp(x),
                                           axis=(1, 2),
                                           keepdims=True)),
                                rtol=1e-3)
Exemplo n.º 20
0
 def test_pad(self):
     node_def = helper.make_node("Pad", ["X"], ["Y"],
                                 mode="constant",
                                 pads=[1, 1, 1, 1],
                                 value=2.0)
     x = self._get_rnd([100, 100])
     output = run_node(node_def, [x])
     np.testing.assert_almost_equal(
         output["Y"],
         np.lib.pad(x, ((1, 1), (1, 1)), 'constant',
                    constant_values=(2, 2)))
Exemplo n.º 21
0
 def test_arg_min(self):
     # TODO: need to fix this test
     return
     for axis in [0, 1]:
         node_def = helper.make_node("ArgMin", ["data"], ["reduced"],
                                     axis=axis,
                                     keepdims=0)
         data = self._get_rnd([10, 10])
         output = run_node(node_def, [data])
         np.testing.assert_almost_equal(output["reduced"],
                                        np.argmin(data, axis=axis))
Exemplo n.º 22
0
    def test_split(self):
        split = [3, 3, 4]
        node_def = helper.make_node("Split", ["X"],
                                    ["Z%i" % i for i in range(len(split))],
                                    axis=0,
                                    split=split)
        x = self._get_rnd([100]).reshape([10, 10])

        output = run_node(node_def, [x])
        for a, b in zip(list(output), np.split(x, np.cumsum(split))[:-1]):
            np.testing.assert_almost_equal(a, b)
Exemplo n.º 23
0
 def test_gather(self):
     node_def = helper.make_node("Gather", ["X", "Y"], ["Z"])
     x = self._get_rnd([10, 10])
     y = [[0, 1], [1, 2]]
     output = run_node(node_def, [x, y])
     test_output = np.zeros((2, 2, 10))
     for i in range(0, 2):
         for j in range(0, 10):
             test_output[0][i][j] = x[i][j]
     for i in range(0, 2):
         for j in range(0, 10):
             test_output[1][i][j] = x[i + 1][j]
     np.testing.assert_almost_equal(output["Z"], test_output)
Exemplo n.º 24
0
 def test_gemm(self):
     # Compute Y = alpha * A * B + beta * C
     node_def = helper.make_node("Gemm", ["A", "B", "C"], ["Y"],
                                 transA=0,
                                 transB=0,
                                 alpha=1.0,
                                 beta=1.0)
     x = np.floor(self._get_rnd([10, 10]))
     y = np.floor(self._get_rnd([10, 10]))
     z = np.floor(self._get_rnd([10, 10]))
     output = run_node(node_def, [x, y, z])
     test_output = np.matmul(x, y) + z
     np.testing.assert_almost_equal(output["Y"], test_output)
Exemplo n.º 25
0
 def test_flatten(self):
     # If input tensor has shape (d_0, d_1, ... d_n) then the
     # output will have shape:
     #
     # (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)
     #
     # TODO: pass axis attribute which is supported in newer
     # versions of onnx
     node_def = helper.make_node("Flatten", ["X"], ["Y"])
     x = self._get_rnd([10, 2, 3, 4, 5])
     output = run_node(node_def, [x])
     # TODO: pass axis=3 and uncomment the line below
     # np.testing.assert_almost_equal(output["Y"], x.reshape([60, 20]))
     np.testing.assert_almost_equal(output["Y"], x.reshape([10, 120]))
Exemplo n.º 26
0
 def test_max_pool(self):
     return
     node_def = helper.make_node("MaxPool", ["X"], ["Y"],
                                 dilations=[1, 1],
                                 kernel_shape=[1, 2],
                                 pads=[0, 0],
                                 strides=[1, 2])
     x = self._get_rnd([10, 10, 4, 4])
     output = run_node(node_def, [x])
     test_output = np.zeros([10, 10, 4, 2])
     for i1 in range(0, 10):
         for i2 in range(0, 10):
             for j1 in range(0, 4):
                 for j2 in range(0, 2):
                     test_output[i1][i2][j1][j2] = \
                       max(x[i1][i2][j1][2*j2], x[i1][i2][j1][2*j2 + 1])
     np.testing.assert_almost_equal(output["Y"], test_output)
Exemplo n.º 27
0
 def test_constant_fill(self):
     shape = [1, 2, 3, 4]
     extra_shape = [5, 6]
     value = 3.
     node_def = helper.make_node(
         "ConstantFill",
         ["X"],
         ["Y"],
         value=value,
         extra_shape=extra_shape,
         dtype=1,
     )
     x = self._get_rnd(shape)
     y = np.zeros(shape + extra_shape)
     y.fill(value)
     output = run_node(node_def, [x])
     np.testing.assert_equal(output["Y"].dtype, tf.float32)
     np.testing.assert_equal(output["Y"], y)
Exemplo n.º 28
0
 def test_image_sacler(self):
     # Input:  (N x C x H x W), where N is the batch size,
     # C is the number of channels, and H and W are the height
     # and the width of the data
     # Scale: (flout, default 1.0) the scale to apply
     # Bias: applied to each channel, same size as C
     # Output has same shape and type as input
     x = self._get_rnd([1, 3, 224, 224])
     #random distribution over [0,1), so add 0.1
     scale = np.random.rand(1)[0] + 0.1
     bias = np.random.rand(3)
     node_def = helper.make_node("ImageScaler", ["X"], ["Y"],
                                 scale=scale,
                                 bias=bias)
     output = run_node(node_def, [x])
     test_out = np.multiply(x, scale)
     test_out = np.transpose(test_out, [0, 2, 3, 1])
     test_out = np.add(test_out, bias)
     test_out = np.transpose(test_out, [0, 3, 1, 2])
     np.testing.assert_almost_equal(output["Y"], test_out)
Exemplo n.º 29
0
 def test_global_lp_pool(self):
     #   Image case:  (N x C x H x W), where N is the batch size,
     # C is the number of channels, and H and W are the height
     # and the width of the data
     #
     #   Non-image case: (N x C x D1 x D2 ... Dn)
     #
     #   Output data tensor from pooling across the input tensor.
     # Dimensions will be N x C x 1 x 1
     node_def = helper.make_node("GlobalLpPool", ["X"], ["Y"])
     x = self._get_rnd([10, 10, 2, 3])
     output = run_node(node_def, [x])
     test_output = np.zeros([10, 10, 1, 1])
     for i1 in range(0, 10):
         for i2 in range(0, 10):
             tmp = np.zeros([2, 3])
             for j1 in range(0, 2):
                 for j2 in range(0, 3):
                     tmp[j1][j2] = x[i1][i2][j1][j2]
             test_output[i1][i2][0][0] = np.linalg.norm(tmp)
     np.testing.assert_almost_equal(output["Y"], test_output, decimal=5)
Exemplo n.º 30
0
 def test_batch_normalization(self):
     if legacy_opset_pre_6():
         raise unittest.SkipTest("Backend doesn't support consumed flag")
     node_def = helper.make_node("BatchNormalization",
                                 ["X", "scale", "bias", "mean", "var"],
                                 ["Y"],
                                 epsilon=0.001)
     x_shape = [3, 5, 4, 2]
     param_shape = [5]
     _param_shape = [1, 5, 1, 1]
     x = self._get_rnd(x_shape, 0, 1)
     m = self._get_rnd(param_shape, 0, 1)
     _m = m.reshape(_param_shape)
     v = self._get_rnd(param_shape, 0, 1)
     _v = v.reshape(_param_shape)
     scale = self._get_rnd(param_shape, 0, 1)
     _scale = scale.reshape(_param_shape)
     bias = self._get_rnd(param_shape, 0, 1)
     _bias = bias.reshape(_param_shape)
     golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
     output = run_node(node_def, [x, scale, bias, m, v])
     np.testing.assert_almost_equal(output["Y"], golden, decimal=5)