Exemple #1
0
 def test_flatten(self):
     # If input tensor has shape (d_0, d_1, ... d_n) then the
     # output will have shape:
     #
     # (d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)
     #
     # TODO: pass axis attribute which is supported in newer
     # versions of onnx
     node_def = helper.make_node("Flatten", ["X"], ["Y"])
     x = self._get_rnd([10, 2, 3, 4, 5])
     output = run_node(node_def, [x])
     # TODO: pass axis=3 and uncomment the line below
     # np.testing.assert_almost_equal(output["Y"], x.reshape([60, 20]))
     np.testing.assert_almost_equal(output["Y"], x.reshape([10, 120]))
 def test_gemm(self):
     # Compute Y = alpha * A * B + beta * C
     node_def = helper.make_node("Gemm", ["A", "B", "C"], ["Y"],
                                 transA=0,
                                 transB=0,
                                 broadcast=1,
                                 alpha=1.0,
                                 beta=1.0)
     x = np.floor(self._get_rnd([10, 10]))
     y = np.floor(self._get_rnd([10, 10]))
     z = np.floor(self._get_rnd([10, 10]))
     output = run_node(node_def, [x, y, z])
     test_output = np.matmul(x, y) + z
     np.testing.assert_almost_equal(output["Y"], test_output)
 def test_slice(self):
     # test case 1 with normal inputs
     axes = [0, 1, 2]
     starts = [0, 0, 0]
     ends = [2, 2, 2]
     node_def = helper.make_node("Slice", ["X"], ["S"],
                                 axes=axes,
                                 starts=starts,
                                 ends=ends)
     x = self._get_rnd([1000]).reshape([10, 10, 10])
     output = run_node(node_def, [x])
     np.testing.assert_almost_equal(output["S"], x[0:2, 0:2, 0:2])
     # test case 2 with negative, out-of-bound and default inputs
     axes = [0, 2]
     starts = [0, -7]
     ends = [-8, 20]
     node_def = helper.make_node("Slice", ["X"], ["S"],
                                 axes=axes,
                                 starts=starts,
                                 ends=ends)
     x = self._get_rnd([1000]).reshape([10, 10, 10])
     output = run_node(node_def, [x])
     np.testing.assert_almost_equal(output["S"], x[0:-8, :, -7:20])
 def test_compress(self):
     if legacy_opset_pre_ver(9):
         raise unittest.SkipTest(
             "ONNX version {} doesn't support Compress.".format(
                 defs.onnx_opset_version()))
     axis = 1
     node_def = helper.make_node("Compress",
                                 inputs=['X', 'condition'],
                                 outputs=['Y'],
                                 axis=axis)
     x = self._get_rnd([5, 5, 5])
     cond = np.array([1, 0, 1])
     output = run_node(node_def, inputs=[x, cond])
     np.testing.assert_almost_equal(output['Y'],
                                    np.compress(cond, x, axis=axis))
Exemple #5
0
 def test_max_pool(self):
     node_def = helper.make_node("MaxPool", ["X"], ["Y"],
                                 kernel_shape=[1, 2],
                                 pads=[0, 0],
                                 strides=[1, 2])
     x = self._get_rnd([10, 10, 4, 4])
     output = run_node(node_def, [x])
     test_output = np.zeros([10, 10, 4, 2])
     for i1 in range(0, 10):
         for i2 in range(0, 10):
             for j1 in range(0, 4):
                 for j2 in range(0, 2):
                     test_output[i1][i2][j1][j2] = \
                       max(x[i1][i2][j1][2*j2], x[i1][i2][j1][2*j2 + 1])
     np.testing.assert_almost_equal(output["Y"], test_output)
Exemple #6
0
 def test_onehot(self):
   if legacy_opset_pre_ver(9):
     raise unittest.SkipTest("ONNX version {} doesn't support OneHot.".format(
         defs.onnx_opset_version()))
   indices = np.array([[0, 2], [1, 2], [0, 1]])
   depth = np.array([5], dtype=np.int32)
   on_value = 6.0
   off_value = 2.0
   values = np.array([off_value, on_value])
   node_def = helper.make_node(
       'OneHot', inputs=['indices', 'depth', 'values'], outputs=['y'], axis=-1)
   y = (np.arange(depth) == indices[..., None]).astype(int)
   y = y * (on_value - off_value) + off_value
   output = run_node(node_def, inputs=[indices, depth, values])
   np.testing.assert_equal(output['y'], y)
Exemple #7
0
  def test_mean_variance_normalization(self):
    if legacy_opset_pre_ver(9):
      raise unittest.SkipTest(
      "ONNX version {} doesn't have test for MeanVarianceNormalization"
      .format(defs.onnx_opset_version()))

    input_data = self._get_rnd([2,2,2,2])
    # Calculate expected output data using formula:
    # (Input - Mean)/SD
    mean = np.mean(input_data, keepdims=1, axis=(0,2,3))
    std = np.std(input_data, keepdims=1, axis=(0,2,3))
    expected_output = (input_data - mean) / std
    # Testing without "axes" argument should default to axes=[0,2,3]
    node_def = helper.make_node("MeanVarianceNormalization", ["X"], ["Y"])
    output = run_node(node_def, [input_data])
    np.testing.assert_almost_equal(output["Y"], expected_output, decimal=5)
 def test_eye_like(self):
     if legacy_opset_pre_ver(9):
         raise unittest.SkipTest(
             "ONNX version {} doesn't support EyeLike.".format(
                 defs.onnx_opset_version()))
     for shape in [[6, 10], [10, 6]]:
         for off_diagonal_offset in [-10, -6, -3, 0, 3, 6, 7, 10]:
             node_def = helper.make_node("EyeLike", ['x'], ['y'],
                                         dtype=1,
                                         k=off_diagonal_offset)
             x = np.random.randint(0, 100, size=shape, dtype=np.int32)
             y = np.eye(shape[0],
                        shape[1],
                        k=off_diagonal_offset,
                        dtype=np.float32)
             output = run_node(node_def, [x])
             np.testing.assert_equal(output['y'], y)
 def test_cast(self):
     for ty, tf_type in [(TensorProto.FLOAT, tf.float32),
                         (TensorProto.UINT8, tf.uint8),
                         (TensorProto.INT8, tf.int8),
                         (TensorProto.UINT16, tf.uint16),
                         (TensorProto.INT16, tf.int16),
                         (TensorProto.INT32, tf.int32),
                         (TensorProto.INT64, tf.int64),
                         (TensorProto.BOOL, tf.bool),
                         (TensorProto.FLOAT16, tf.float16),
                         (TensorProto.DOUBLE, tf.float64),
                         (TensorProto.COMPLEX64, tf.complex64),
                         (TensorProto.COMPLEX128, tf.complex128)]:
         node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
         vector = [2, 3]
         output = run_node(node_def, [vector])
         np.testing.assert_equal(output["output"].dtype, tf_type)
 def test_constant_fill(self):
     shape = [1, 2, 3, 4]
     extra_shape = [5, 6]
     value = 3.
     node_def = helper.make_node(
         "ConstantFill",
         ["X"],
         ["Y"],
         value=value,
         extra_shape=extra_shape,
         dtype=1,
     )
     x = self._get_rnd(shape)
     y = np.zeros(shape + extra_shape)
     y.fill(value)
     output = run_node(node_def, [x])
     np.testing.assert_equal(output["Y"].dtype, tf.float32)
     np.testing.assert_equal(output["Y"], y)
Exemple #11
0
 def test_cast(self):
   for ty, tf_type in [("float", tf.float32),
                       ("uint8", tf.uint8),
                       ("int8", tf.int8),
                       ("uint16", tf.uint16),
                       ("int16", tf.int16),
                       ("int32", tf.int32),
                       ("int64", tf.int64),
                       ("bool", tf.bool),
                       ("float16", tf.float16),
                       ("double", tf.float64),
                       ("complex64", tf.complex64),
                       ("complex128", tf.complex128)]:
     node_def = helper.make_node("Cast", ["input"], ["output"],
                                 to=ty)
     vector = [2, 3]
     output = run_node(node_def, [vector])
     np.testing.assert_equal(output["output"].dtype, tf_type)
Exemple #12
0
 def test_image_sacler(self):
   # Input:  (N x C x H x W), where N is the batch size,
   # C is the number of channels, and H and W are the height
   # and the width of the data
   # Scale: (flout, default 1.0) the scale to apply
   # Bias: applied to each channel, same size as C
   # Output has same shape and type as input
   x = self._get_rnd([1, 3, 224, 224])
   #random distribution over [0,1), so add 0.1
   scale = np.random.rand(1)[0] + 0.1
   bias = np.random.rand(3)
   node_def = helper.make_node(
       "ImageScaler", ["X"], ["Y"], scale=scale, bias=bias)
   output = run_node(node_def, [x])
   test_out = np.multiply(x, scale)
   test_out = np.transpose(test_out, [0, 2, 3, 1])
   test_out = np.add(test_out, bias)
   test_out = np.transpose(test_out, [0, 3, 1, 2])
   np.testing.assert_almost_equal(output["Y"], test_out)
Exemple #13
0
 def test_batch_normalization(self):
     node_def = helper.make_node("BatchNormalization",
                                 ["X", "scale", "bias", "mean", "var"],
                                 ["Y"],
                                 consumed_inputs=[0, 0, 0, 1, 1],
                                 epsilon=0.001)
     x_shape = [3, 5, 4, 2]
     param_shape = [5]
     _param_shape = [1, 5, 1, 1]
     x = self._get_rnd(x_shape, 0, 1)
     m = self._get_rnd(param_shape, 0, 1)
     _m = m.reshape(_param_shape)
     v = self._get_rnd(param_shape, 0, 1)
     _v = v.reshape(_param_shape)
     scale = self._get_rnd(param_shape, 0, 1)
     _scale = scale.reshape(_param_shape)
     bias = self._get_rnd(param_shape, 0, 1)
     _bias = bias.reshape(_param_shape)
     golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
     output = run_node(node_def, [x, scale, bias, m, v])
     np.testing.assert_almost_equal(output["Y"], golden, decimal=5)
Exemple #14
0
 def test_batch_normalization(self):
   if legacy_opset_pre_ver(6):
     raise unittest.SkipTest("Backend doesn't support consumed flag")
   node_def = helper.make_node(
       "BatchNormalization", ["X", "scale", "bias", "mean", "var"], ["Y"],
       epsilon=0.001)
   x_shape = [3, 5, 4, 2]
   param_shape = [5]
   _param_shape = [1, 5, 1, 1]
   x = self._get_rnd(x_shape, 0, 1)
   m = self._get_rnd(param_shape, 0, 1)
   _m = m.reshape(_param_shape)
   v = self._get_rnd(param_shape, 0, 1)
   _v = v.reshape(_param_shape)
   scale = self._get_rnd(param_shape, 0, 1)
   _scale = scale.reshape(_param_shape)
   bias = self._get_rnd(param_shape, 0, 1)
   _bias = bias.reshape(_param_shape)
   golden = self._batch_normalization(x, _m, _v, _bias, _scale, 0.001)
   output = run_node(node_def, [x, scale, bias, m, v])
   np.testing.assert_almost_equal(output["Y"], golden, decimal=5)
Exemple #15
0
 def test_global_lp_pool(self):
   #   Image case:  (N x C x H x W), where N is the batch size,
   # C is the number of channels, and H and W are the height
   # and the width of the data
   #
   #   Non-image case: (N x C x D1 x D2 ... Dn)
   #
   #   Output data tensor from pooling across the input tensor.
   # Dimensions will be N x C x 1 x 1
   node_def = helper.make_node("GlobalLpPool", ["X"], ["Y"])
   x = self._get_rnd([10, 10, 2, 3])
   output = run_node(node_def, [x])
   test_output = np.zeros([10, 10, 1, 1])
   for i1 in range(0, 10):
     for i2 in range(0, 10):
       tmp = np.zeros([2, 3])
       for j1 in range(0, 2):
         for j2 in range(0, 3):
           tmp[j1][j2] = x[i1][i2][j1][j2]
       test_output[i1][i2][0][0] = np.linalg.norm(tmp)
   np.testing.assert_almost_equal(output["Y"], test_output, decimal=5)
Exemple #16
0
 def test_constant_fill(self):
   if not legacy_opset_pre_ver(9):
     raise unittest.SkipTest(
         "ONNX version {} doesn't support ConstantFill.".format(
             defs.onnx_opset_version()))
   shape = [1, 2, 3, 4]
   extra_shape = [5, 6]
   value = 3.
   node_def = helper.make_node(
       "ConstantFill",
       ["X"],
       ["Y"],
       value=value,
       extra_shape=extra_shape,
       dtype=1,
   )
   x = self._get_rnd(shape)
   y = np.zeros(shape + extra_shape)
   y.fill(value)
   output = run_node(node_def, [x])
   np.testing.assert_equal(output["Y"].dtype, tf.float32)
   np.testing.assert_equal(output["Y"], y)
Exemple #17
0
 def test_is_inf(self):
   if legacy_opset_pre_ver(10):
     raise unittest.SkipTest(
     "ONNX version {} doesn't support IsInf.".format(
         defs.onnx_opset_version()))
   input = np.array([-1.2, np.nan, np.inf, 2.8, np.NINF, np.inf],
         dtype=np.float32)
   expected_output = {
     "node_def": np.isinf(input),
     "node_def_neg_false": np.isposinf(input),
     "node_def_pos_false": np.isneginf(input)}
   node_defs = {
     "node_def" :
     helper.make_node("IsInf", ["X"], ["Y"]),
     "node_def_neg_false" :
     helper.make_node("IsInf", ["X"], ["Y"], detect_negative = 0),
     "node_def_pos_false" :
     helper.make_node("IsInf", ["X"], ["Y"], detect_positive = 0)
   }
   for key in node_defs:
     output = run_node(node_defs[key], [input])
     np.testing.assert_equal(output["Y"], expected_output[key])
Exemple #18
0
  def test_conv(self):
    device = "CUDA"
    if not supports_device(device):
      raise unittest.SkipTest(
          "Backend doesn't support device {}".format(device))

    N, C, H, W = 4, 3, 5, 5
    x_shape = [N, C, H, W]
    K, kH, kW = 6, 3, 3
    weight_shape = [K, C, kH, kW]
    node_def = helper.make_node(
        "Conv", ["X", "weights"], ["Y"],
        pads=[1, 1, 1, 1],
        kernel_shape=[kH, kW])

    x = self._get_rnd(x_shape)
    weights = self._get_rnd(weight_shape)
    output = run_node(node_def, [x, weights], device=device)

    out_shape = [N, K, H, W]
    test_output = np.zeros(out_shape)
    for n in range(N):
      for c in range(C):
        for h in range(H):
          for w in range(W):
            for k in range(K):
              for kh in range(kH):
                for kw in range(kW):
                  h_in_range = (h - kH // 2 + kh) < H and (
                      h - kH // 2 + kh) >= 0
                  w_in_range = (w - kW // 2 + kw) < W and (
                      w - kW // 2 + kw) >= 0
                  if h_in_range and w_in_range:
                    test_output[n][k][h][w] += (
                        x[n][c][h - kH // 2 + kh][w - kW // 2 + kw] *
                        weights[k][c][kh][kw])

    np.testing.assert_almost_equal(output["Y"], test_output, decimal=5)
 def test_l_r_n(self):
     # Each input value is divided by:
     #
     # (bias+(alpha/size)*sum(xi^2 for every xi in the local region))^beta
     alpha = 2.0
     beta = 1.0
     bias = 5.0
     size = 3
     node_def = helper.make_node("LRN", ["X"], ["Y"],
                                 alpha=alpha,
                                 beta=beta,
                                 bias=bias,
                                 size=size)
     x = self._get_rnd([10, 2, 10, 10])
     output = run_node(node_def, [x])
     test_output = np.zeros([10, 10, 10, 2])
     x = np.transpose(x, axes=[0, 2, 3, 1])
     for i1 in range(0, 10):
         for i2 in range(0, 10):
             for j1 in range(0, 10):
                 for j2 in range(0, 2):
                     sqr_sum = 0.
                     # size of 3 means radius 1 in TF speak
                     # i.e. the immediate neighbouring values
                     # if "previous" neighbour exists
                     if j2 > 0:
                         sqr_sum += x[i1][i2][j1][j2 -
                                                  1] * x[i1][i2][j1][j2 - 1]
                     # current value
                     sqr_sum += x[i1][i2][j1][j2] * x[i1][i2][j1][j2]
                     # if "next" neighbour exists
                     if j2 < 2 - 1:
                         sqr_sum += x[i1][i2][j1][j2 +
                                                  1] * x[i1][i2][j1][j2 + 1]
                     test_output[i1][i2][j1][j2] = \
                       x[i1][i2][j1][j2] / ((bias + (alpha * 1. / size) * sqr_sum) ** beta)
     test_output = np.transpose(test_output, axes=[0, 3, 1, 2])
     np.testing.assert_almost_equal(output["Y"], test_output)
 def test_cast(self):
   if legacy_onnx_pre_ver(1, 2) or legacy_opset_pre_ver(6):
     test_cases = [("FLOAT", tf.float32), ("UINT8", tf.uint8), ("INT8",
                                                                tf.int8),
                   ("UINT16", tf.uint16), ("INT16", tf.int16),
                   ("INT32", tf.int32), ("INT64", tf.int64), ("BOOL", tf.bool),
                   ("FLOAT16", tf.float16), ("DOUBLE", tf.float64),
                   ("COMPLEX64", tf.complex64), ("COMPLEX128", tf.complex128)]
   else:
     test_cases = [(TensorProto.FLOAT, tf.float32),
                   (TensorProto.UINT8, tf.uint8), (TensorProto.INT8, tf.int8),
                   (TensorProto.UINT16, tf.uint16),
                   (TensorProto.INT16, tf.int16), (TensorProto.INT32,
                                                   tf.int32),
                   (TensorProto.INT64, tf.int64), (TensorProto.BOOL, tf.bool),
                   (TensorProto.FLOAT16, tf.float16),
                   (TensorProto.DOUBLE, tf.float64),
                   (TensorProto.COMPLEX64, tf.complex64),
                   (TensorProto.COMPLEX128, tf.complex128)]
   for ty, tf_type in test_cases:
     node_def = helper.make_node("Cast", ["input"], ["output"], to=ty)
     vector = [2, 3]
     output = run_node(node_def, [vector])
     np.testing.assert_equal(output["output"].dtype, tf_type)
Exemple #21
0
 def test_transpose(self):
   node_def = helper.make_node("Transpose", ["X"], ["Y"], perm=[0, 2, 1])
   x = self._get_rnd([1000]).reshape([10, 10, 10])
   output = run_node(node_def, [x])
   np.testing.assert_almost_equal(output["Y"], np.transpose(x, (0, 2, 1)))
Exemple #22
0
 def test_tanh(self):
   node_def = helper.make_node("Tanh", ["X"], ["Y"])
   x = self._get_rnd([1000]) + 1.0
   output = run_node(node_def, [x])
   np.testing.assert_almost_equal(output["Y"], np.tanh(x), decimal=5)
Exemple #23
0
 def test_sub(self):
   node_def = helper.make_node("Sub", ["X", "Y"], ["Z"])
   x = self._get_rnd([10, 10])
   y = self._get_rnd([10, 10])
   output = run_node(node_def, [x, y])
   np.testing.assert_almost_equal(output["Z"], np.subtract(x, y))
Exemple #24
0
 def test_squeeze(self):
   node_def = helper.make_node("Squeeze", ["X"], ["Y"], axes=[2])
   x = np.array([[[0], [1], [2]]])
   output = run_node(node_def, [x])
   np.testing.assert_almost_equal(output["Y"], np.squeeze(x, axis=2))
Exemple #25
0
 def test_softsign(self):
   node_def = helper.make_node("Softsign", ["X"], ["Y"])
   x = self._get_rnd([3, 4, 5])
   output = run_node(node_def, [x])
   np.testing.assert_almost_equal(output["Y"], x / (1 + np.abs(x)))
Exemple #26
0
 def test_softplus(self):
   node_def = helper.make_node("Softplus", ["X"], ["Y"])
   x = self._get_rnd([3, 4, 5])
   output = run_node(node_def, [x])
   np.testing.assert_almost_equal(output["Y"], np.log(np.exp(x) + 1))
Exemple #27
0
 def test_size(self):
   node_def = helper.make_node("Size", ["X"], ["Y"])
   x = self._get_rnd([5, 10, 10, 3])
   output = run_node(node_def, [x])
   np.testing.assert_almost_equal(output["Y"], np.size(x))
Exemple #28
0
 def test_sigmoid(self):
   node_def = helper.make_node("Sigmoid", ["X"], ["Y"])
   x = self._get_rnd([1000])
   output = run_node(node_def, [x])
   np.testing.assert_almost_equal(output["Y"], 1 / (1 + np.exp(-x)))
Exemple #29
0
 def test_shape(self):
   node_def = helper.make_node("Shape", ["X"], ["Y"])
   x = self._get_rnd([5, 10, 10, 3])
   output = run_node(node_def, [x])
   np.testing.assert_allclose(output["Y"], np.shape(x))
Exemple #30
0
 def test_pow(self):
   node_def = helper.make_node("Pow", ["X", "Y"], ["Z"])
   x = self._get_rnd(1000) / 2.0 + 0.5
   y = self._get_rnd(1000) / 2.0 + 0.5
   output = run_node(node_def, [x, y])
   np.testing.assert_almost_equal(output["Z"], np.power(x, y))