Пример #1
0
 def rnn_get_activation(cls, name, alpha, beta):
     if name not in cls.ONNX_ACTIVATION_MAPPING:
         exception.OP_UNSUPPORTED_EXCEPT(
             "Activation function {} for {}".format(name, cls.__name__),
             "Tensorflow")
     activation = cls.ONNX_ACTIVATION_MAPPING[name]
     kwargs = {}
     if name == "affine":
         kwargs["scale"] = alpha
         kwargs["shift"] = beta
         activation = activation(**kwargs)
     elif name == "elu":
         if alpha != 1:
             exception.OP_UNSUPPORTED_EXCEPT(
                 "Activation function {} with alpha={} for {}".format(
                     name, alpha, cls.__name__), "Tensorflow")
     elif name == "hard_sigmoid":
         if alpha != 0.2 or beta != 0.5:
             exception.OP_UNSUPPORTED_EXCEPT(
                 "Activation function {} with alpha={}, beta={} for {}".
                 format(name, alpha, beta, cls.__name__), "Tensorflow")
     elif name == "leaky_relu":
         kwargs["alpha"] = alpha or 0.01
         activation = partial(activation, **kwargs)
     elif name == "thresholded_relu":
         kwargs["theta"] = alpha
         activation = activation(**kwargs)
     return activation
Пример #2
0
    def pool_v11(cls, node, input_dict, pooling_type, strict=True):
        x = input_dict[node.inputs[0]]

        kernel_shape = node.attrs["kernel_shape"]

        spatial_size = len(kernel_shape)
        x_rank = spatial_size + 2

        kernel_shape = node.attrs["kernel_shape"]
        strides = node.attrs.get("strides", [1] * spatial_size)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pads = node.attrs.get("auto_pad", "NOTSET")
        if pads == "NOTSET":
            pads = node.attrs.get("pads", [0] * spatial_size * 2)

        if spatial_size > 3:
            exception.OP_UNSUPPORTED_EXCEPT(
                "MaxPool with {}D input".format(x_rank), "Tensorflow")
        if pooling_type == "MAX_WITH_ARGMAX" and x_rank != 4:
            exception.OP_UNSUPPORTED_EXCEPT(
                "MaxPool with {}D input".format(x_rank), "Tensorflow")
        if node.attrs.get("storage_order", 0) != 0:
            exception.OP_UNSUPPORTED_EXCEPT("MaxPool with column major",
                                            "Tensorflow")

        storage_format, _ = get_data_format(x_rank)

        need_trans = storage_format.startswith("NC")
        if need_trans:
            compute_format = "N" + storage_format[2:] + "C"
            x = tf.transpose(x,
                             perm=get_perm_from_formats(
                                 storage_format, compute_format))

        dp = DilatedPooling(input=x,
                            kernel_shape=kernel_shape,
                            strides=strides,
                            dilations=dilations,
                            padding=pads,
                            ceil_mode=ceil_mode)

        # select correct op depending on the pooling type
        pooling_op = lambda : (dp.dilated_maxpool(), None) if \
            pooling_type == "MAX" else dp.dilated_maxpool_with_argmax()

        # select the correct transpose ops depending on the input storage format
        perm = get_perm_from_formats(compute_format, storage_format)
        postprocess_op = lambda pooled, argmax: (
            tf.transpose(pooled, perm=perm)
            if need_trans else pooled, tf.transpose(argmax, perm=perm)
            if need_trans and argmax is not None else argmax)

        pooled, argmax = pooling_op()
        pooled, argmax = postprocess_op(pooled, argmax)

        result = [pooled] if argmax is None else [pooled, argmax]

        return result
Пример #3
0
    def args_check(cls, node, **kwargs):
        x = kwargs["tensor_dict"][node.inputs[0]]
        x_shape = x.get_shape().as_list()
        if len(x_shape) != 4:
            exception.OP_UNSUPPORTED_EXCEPT("Upsample without 4D input",
                                            "Tensorflow")

        if node.attrs.get("mode",
                          "nearest").lower() not in ["nearest", "bilinear"]:
            exception.OP_UNSUPPORTED_EXCEPT(
                "Upsample without nearest or bilinear", "Tensorflow")
Пример #4
0
 def args_check(cls, node, **kwargs):
     unsupported_dtype = [
         tf.int8, tf.int16, tf.uint8, tf.uint16, tf.uint32, tf.uint64
     ]
     x = kwargs["tensor_dict"][node.inputs[0]]
     y = kwargs["tensor_dict"][node.inputs[1]]
     if x.dtype in unsupported_dtype:
         exception.OP_UNSUPPORTED_EXCEPT("Mod Dividend in " + str(x.dtype),
                                         "Tensorflow")
     if y.dtype in unsupported_dtype:
         exception.OP_UNSUPPORTED_EXCEPT("Mod Divisor in " + str(y.dtype),
                                         "Tensorflow")
Пример #5
0
 def args_check(cls, node, **kwargs):
     tensor_dict = kwargs["tensor_dict"]
     indices = tensor_dict[node.inputs[0]]
     depth = tensor_dict[node.inputs[1]]
     if indices.dtype not in [tf.uint8, tf.int32, tf.int64]:
         exception.OP_UNSUPPORTED_EXCEPT(
             "OneHot indices must be in uint8 or int32 or int64 " +
             "but it is currently in " + str(indices.dtype) + " which",
             "Tensorflow")
     if depth.dtype not in [tf.int32]:
         exception.OP_UNSUPPORTED_EXCEPT(
             "OneHot depth must be in int32 but it is currently in " +
             str(depth.dtype) + " which", "Tensorflow")
Пример #6
0
 def args_check(cls, node, **kwargs):
   direction = node.attrs.get("direction", "forward")
   num_directions = 2 if direction == "bidirectional" else 1
   if "clip" in node.attrs:
     exception.OP_UNSUPPORTED_EXCEPT("GRU with clip", "Tensorflow")
   if node.attrs.get("linear_before_reset", 0):
     exception.OP_UNSUPPORTED_EXCEPT("GRU with linear_before_reset",
                                     "Tensorflow")
   if "activations" in node.attrs:
     activations = list(map(lambda x: x.lower(), node.attrs["activations"]))
     if activations[0] != "sigmoid":
       exception.OP_UNSUPPORTED_EXCEPT("GRU without sigmoid for `z` and `r`",
                                       "Tensorflow")
     if num_directions == 2:
       if activations[2] != "sigmoid":
         exception.OP_UNSUPPORTED_EXCEPT("GRU without sigmoid for `z` and `r`",
                                         "Tensorflow")
Пример #7
0
 def args_check(cls, node, **kwargs):
     x = kwargs["tensor_dict"][node.inputs[0]]
     # uint64 cannot upcast to any tensorflow supported datatype
     # for tf.clip_by_value that didn't lose precision
     if x.dtype == tf.uint64:
         exception.OP_UNSUPPORTED_EXCEPT(
             "Clip input, min and max in " + str(x.dtype) + " datatype",
             "Tensorflow")
Пример #8
0
 def args_check(cls, node, **kwargs):
     supported_dtype = [
         tf.bfloat16, tf.half, tf.float32, tf.float64, tf.uint8, tf.uint16,
         tf.int8, tf.int16, tf.int32, tf.int64, tf.complex64, tf.complex128
     ]
     x = kwargs["tensor_dict"][node.inputs[0]]
     if x.dtype not in supported_dtype:
         exception.OP_UNSUPPORTED_EXCEPT(
             "CumSum input in " + str(x.dtype) + " which", "Tensorflow")
Пример #9
0
 def version_1(cls, node, **kwargs):
     if node.op_type == "Conv1D":
         d = 1
     elif node.op_type == "Conv2D":
         d = 2
     elif node.op_type == "Conv3D":
         d = 3
     else:
         exception.OP_UNSUPPORTED_EXCEPT(node.op_type, "Tensorflow")
     return cls.conv_op(node, d=d, **kwargs)
Пример #10
0
 def args_check(cls, node, **kwargs):
   direction = node.attrs.get("direction", "forward")
   num_directions = 2 if direction == "bidirectional" else 1
   if node.attrs.get("input_forget", 0):
     # TODO(fumihwh): warning
     pass
   if "activations" in node.attrs:
     activations = list(map(lambda x: x.lower(), node.attrs["activations"]))
     if activations[0] != "sigmoid":
       exception.OP_UNSUPPORTED_EXCEPT("LSTM without sigmoid for `f`",
                                       "Tensorflow")
     if activations[1] != activations[2]:
       exception.OP_UNSUPPORTED_EXCEPT(
           "LSTM without same activation for `g` and `h`", "Tensorflow")
     if num_directions == 2:
       if activations[3] != "sigmoid":
         exception.OP_UNSUPPORTED_EXCEPT("LSTM without sigmoid for `f`",
                                         "Tensorflow")
       if activations[4] != activations[5]:
         exception.OP_UNSUPPORTED_EXCEPT(
             "LSTM without same activation for `g` and `h`", "Tensorflow")
Пример #11
0
  def args_check(cls, node, **kwargs):
    x = kwargs["tensor_dict"][node.inputs[0]]
    y = kwargs["tensor_dict"][node.inputs[1]]

    # throw an error if the data type is not natively supported by
    # Tensorflow, cannot be safely cast, and auto-cast option is False
    if x.dtype in cls.cast_map and cls.cast_map[x.dtype] is None:
      exception.DTYPE_NOT_CAST_EXCEPT(
          "Mod input " + node.inputs[0] + " with data type '" +
          data_type.tf_to_np_str(x.dtype) + "'",
          data_type.tf_to_np_str_list(cls.supported_types))

    # throw an error if inputs A and B are not in the same data type
    if x.dtype != y.dtype:
      exception.OP_UNSUPPORTED_EXCEPT("Mod with inputs in different data types",
                                      "Tensorflow")
Пример #12
0
 def args_check(cls, node, **kwargs):
     x = kwargs["tensor_dict"][node.inputs[0]]
     x_shape = x.get_shape().as_list()
     if len(x_shape) != 4:
         exception.OP_UNSUPPORTED_EXCEPT("Resize required 4D input",
                                         "Tensorflow")
Пример #13
0
  def conv(cls, node, input_dict, transpose=False):
    """ Convolution method for both conv and transposed conv
    For transposed conv,
      Attr pads is not used for input, but declares how much output is padded.
      Here, output means output from transposed conv which already pad output_padding if set.
      So the pseudo explanation for output should be:
        output = conv_transpose_output + output_padding - pads
      And conv_transpose_output shape should be:
        conv_transpose_output_shape[i] = strides[i] * (input_shape[i] - 1) + kernel_shape[i]
    """
    x = input_dict[node.inputs[0]]
    x_rank = len(x.get_shape())
    x_shape = tf_shape(x, tf.int32)
    spatial_size = x_rank - 2

    storage_format, compute_format = get_data_format(x_rank)
    compute_c_idx = compute_format.find("C")
    spatial_format = "".join([d for d in compute_format if d not in ["N", "C"]])

    in_weights = input_dict[node.inputs[1]]
    weights_rank = len(in_weights.get_shape())
    if transpose:
      # Translate weights from (C x M x KH x KW) to (KH x KW X M X C)
      perm = list(range(2, weights_rank)) + [1, 0]
    else:
      # Translate weights from (M x C x KH x KW) to (KH x KW X C X M)
      perm = list(range(2, weights_rank)) + [1, 0]

    if "kernel_shape" in node.attrs.keys():
      kernel_shape = node.attrs["kernel_shape"]
      if in_weights.get_shape().is_fully_defined():
        assert in_weights.get_shape().as_list()[2:] == kernel_shape, (
            "kernel_shape "
            "attr of convolution does not match the actual weight "
            "passed to this operation, attr {}, actual {}").format(
                kernel_shape,
                in_weights.get_shape().as_list())
    else:
      kernel_shape = tf_shape(in_weights, tf.int32)[2:]

    weights = tf.transpose(in_weights, perm)
    dilations = node.attrs.get("dilations", [1] * spatial_size)
    strides = node.attrs.get("strides", [1] * spatial_size)

    pads = node.attrs.get("pads", [0, 0] * spatial_size)

    # Check auto_pad nonexistent or NOTSET first
    if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET":
      if not transpose:
        if pads != [0, 0] * spatial_size:
          x = PadMixin.get_padding_as_op(x, pads)
        pad_mode = "VALID"
      else:
        pad_mode = "NOTSET"
    # Then we use auto_pad to setup pad_mode
    elif node.attrs["auto_pad"] == "SAME_UPPER":
      pad_mode = "SAME"
    elif node.attrs["auto_pad"] == "VALID":
      pad_mode = "VALID"
    elif node.attrs["auto_pad"] == "SAME_LOWER":
      pad_mode = PAD_TF_INCOMPATIBLE
    else:
      raise ValueError("Invalid auto_pad attribute: {}".format(
          node.attrs["auto_pad"]))

    # Currently auto_pad = SAME_LOWER is not supported
    if pad_mode is PAD_TF_INCOMPATIBLE:
      if transpose:
        exception.OP_UNSUPPORTED_EXCEPT(
            "ConvTranspose with auto_pad `SAME_LOWER`", "Tensorflow")
      else:
        exception.OP_UNSUPPORTED_EXCEPT("Conv with auto_pad `SAME_LOWER`",
                                        "Tensorflow")

    group = node.attrs.get("group", 1)
    weight_shape = weights.get_shape().as_list()
    # Is this convolution depthwise we can support?
    depthwise = (x_rank == 4 and len(weight_shape) == 4 and group != 1 and
                 not transpose and not (None in weight_shape))
    if depthwise and isinstance(x_shape, np.ndarray):
      depthwise = bool(group == x_shape[1])

    if depthwise is True:
      # Depthwise convolution.
      # The convolution kernel layout in tf.depthwise_conv is:
      # [filter_height, filter_width, in_channels, channel_multiplier]
      # Weight is now (KH x KW X C/g X M), or more precisely, (KH x KW X C/g X (g * M/g)),
      # we reshape it to (KH x KW x C x M/g)
      # NOTE: Assuming weight has fixed shape.

      depthwise_filter_shape = weight_shape[0:2] + [
          -1, weight_shape[3] // group
      ]
      weights = tf.reshape(weights, depthwise_filter_shape)

      if not sys_config.device == 'CUDA':
        # transpose input to NHWC layout
        x = tf.transpose(x,
                         perm=get_perm_from_formats(storage_format,
                                                    compute_format))
      weight_groups = [weights]
      xs = [x]
    else:
      weight_groups = tf.split(weights, num_or_size_splits=group, axis=-1)
      if sys_config.device == 'CUDA':
        if group == 1:
          xs = [x]
        else:
          xs = tf.split(x, num_or_size_splits=group, axis=1)
      else:
        x = tf.transpose(x,
                         perm=get_perm_from_formats(storage_format,
                                                    compute_format))
        if group == 1:
          xs = [x]
        else:
          xs = tf.split(x, num_or_size_splits=group, axis=-1)

    if transpose:
      if dilations != [1] * spatial_size:
        raise RuntimeError("Cannot set non-1 dilation for conv transpose.")
      convolved = []
      # this is a workaround for tensorflow AutoGraph not detecting
      # corretly x. This is fixed in tf>=2.2.0
      x = None
      for (x, weight) in zip(xs, weight_groups):
        x_spatial_shape = [
            x_shape[storage_format.find(d)] for d in spatial_format
        ]
        weights_shape = tf_shape(weights, tf.int32)
        output_shape = node.attrs.get("output_shape", None)
        conv_output_shape = [x_shape[storage_format.find("N")]]

        # calculate output shape
        if pad_mode == "NOTSET":
          if output_shape is None:
            conv_output_shape += [
                strides[i] * x_spatial_shape[i] - strides[i] +
                (kernel_shape[i] - 1) * dilations[i] + 1
                for i in list(range(spatial_size))
            ]
          else:
            conv_output_shape += [
                s + pads[i] + pads[spatial_size + i]
                for i, s in enumerate(output_shape[-2:])
            ]
          conv_output_shape.insert(compute_c_idx, weights_shape[-2])

          # make strides to match input rank
          strides_full = [1] + strides
          strides_full.insert(compute_c_idx, 1)

          # get corresponding function in tf
          if spatial_size == 1:
            conv_func = tf.nn.conv1d_transpose
            strides_full = strides[0]
          elif spatial_size == 2:
            conv_func = tf.nn.conv2d_transpose
          elif spatial_size == 3:
            conv_func = tf.nn.conv3d_transpose
          else:
            raise NotImplementedError(
                "Transposed convolution for {}d is not implemented in Tensorflow"
                .format(spatial_size))

          # use raw input x to do transposed conv
          conv_rs = conv_func(x,
                              weight,
                              conv_output_shape,
                              strides_full,
                              padding="VALID",
                              data_format=compute_format)

          # pad output first by output_padding attr
          if "output_padding" in node.attrs and output_shape is None:
            output_padding = [[0, 0]
                             ] + [[0, p] for p in node.attrs["output_padding"]]
            output_padding.insert(compute_c_idx, [0, 0])
            conv_rs = tf.pad(conv_rs, output_padding)

          # remove pads set in pads attr
          conv_rs_shape = tf_shape(conv_rs, tf.int32)
          conv_rs_shape_list = [
              conv_rs_shape[i] for i in range(conv_rs.shape.rank)
          ]
          begin = [0] + pads[:spatial_size]
          begin.insert(compute_c_idx, 0)
          size = [
              s if d in ["N", "C"] else s - pads[spatial_format.find(d)] -
              pads[spatial_format.find(d) + spatial_size]
              for d, s in zip(compute_format, conv_rs_shape_list)
          ]

          conv_rs = tf.slice(conv_rs, begin=begin, size=size)

          convolved.append(conv_rs)
        else:
          # No need to check pads if auto_pad is specifically provided.
          # The assumption is that once auto_pad is provided as either VALID
          # or SAME_UPPER (SAME_LOWER is currently not supported in TF) the
          # output_shape will always be inferred. That is, the output_shape
          # and output_padding will not be used in this case.
          if pad_mode == "VALID":
            conv_output_shape += [
                strides[i] * (x_spatial_shape[i] - 1) + weights_shape[i]
                for i in list(range(spatial_size))
            ]
          else:
            conv_output_shape += [
                strides[i] * x_spatial_shape[i]
                for i in list(range(spatial_size))
            ]
          conv_output_shape.insert(compute_c_idx, weights_shape[-2])

          # make strides to match input rank
          strides_full = [1] + strides
          strides_full.insert(compute_c_idx, 1)

          # get corresponding function in tf
          if spatial_size == 1:
            conv_func = tf.nn.conv1d_transpose
            strides_full = strides[0]
          elif spatial_size == 2:
            conv_func = tf.nn.conv2d_transpose
          elif spatial_size == 3:
            conv_func = tf.nn.conv3d_transpose
          else:
            raise NotImplementedError(
                "Transposed convolution for {}d is not implemented in Tensorflow"
                .format(spatial_size))

          # use raw input x to do transposed conv
          conv_rs = conv_func(x,
                              weight,
                              conv_output_shape,
                              strides_full,
                              padding=pad_mode,
                              data_format=compute_format)
          convolved.append(conv_rs)

    else:  # not transpose:
      if depthwise is True:
        if compute_format == "NHWC":
          strides = [1] + strides + [1]
        elif compute_format == 'NCHW':
          strides = [1, 1] + strides
        else:
          raise ValueError("Invalid compute_format: {}".format(compute_format))

        convolved = [
            tf.nn.depthwise_conv2d(x,
                                   weight,
                                   padding=pad_mode,
                                   strides=strides,
                                   dilations=dilations,
                                   data_format=compute_format)
            for (x, weight) in zip(xs, weight_groups)
        ]

      else:
        convolved = [
            tf.nn.convolution(x,
                              weight,
                              padding=pad_mode,
                              strides=strides,
                              dilations=dilations,
                              data_format=compute_format)
            for (x, weight) in zip(xs, weight_groups)
        ]

    if len(node.inputs) == 2:
      if sys_config.device == 'CUDA':
        output = tf.concat(convolved, axis=1)
      else:
        output = tf.concat(convolved, axis=-1)
        output = tf.transpose(output,
                              perm=get_perm_from_formats(
                                  compute_format, storage_format))
    else:
      bias = input_dict[node.inputs[2]]
      bias = cls.explicit_broadcast([x, bias], compute_c_idx)

      if sys_config.device == 'CUDA':
        output = tf.concat(convolved, axis=1)
        output = tf.add(output, bias)
      else:
        output = tf.concat(convolved, axis=-1)
        output = tf.add(output, bias)
        output = tf.transpose(output,
                              perm=get_perm_from_formats(
                                  compute_format, storage_format))

    return [output]
Пример #14
0
  def pool(cls, node, input_dict, pooling_type, strict=True):
    x = input_dict[node.inputs[0]]

    kernel_shape = node.attrs["kernel_shape"]

    spatial_size = len(kernel_shape)
    x_rank = spatial_size + 2

    kernel_shape = node.attrs["kernel_shape"]
    strides = node.attrs.get("strides", [1] * spatial_size)
    dilations = node.attrs.get("dilations", [1] * spatial_size)
    ceil_mode = bool(node.attrs.get("ceil_mode", 0))
    pads = node.attrs.get("auto_pad", "NOTSET")
    p = node.attrs.get("p", 2)

    if pads == "NOTSET":
      pads = node.attrs.get("pads", [0] * spatial_size * 2)
      # In case shape is fully defined, check if pads match
      # SAME padding in Tensorflow
      if x.shape.is_fully_defined() and pads != [0] * spatial_size * 2:
        in_shape = x.get_shape()
        same_paddings = calc_pads_same(in_shape[1:x_rank - 1], kernel_shape,
                                       strides, dilations, "SAME_UPPER")
        if pads == same_paddings:
          pads = "SAME_UPPER"

    count_include_pad = bool(node.attrs.get("count_include_pad", 0))
    if pooling_type == "AVG":
      pooling_name = "AveragePool"
    elif pooling_type == "MAX":
      pooling_name = "MaxPool"
    elif pooling_type == "MAX_WITH_ARGMAX":
      pooling_name = "MaxPoolWithArgmax"
    elif pooling_type == "LP":
      pooling_name = "LpPool"

    if spatial_size > 3:
      exception.OP_UNSUPPORTED_EXCEPT(
          pooling_name + " with {}D input".format(x_rank), "Tensorflow")
    if pooling_type == "MAX_WITH_ARGMAX" and x_rank != 4:
      exception.OP_UNSUPPORTED_EXCEPT(
          pooling_name + " with {}D input".format(x_rank), "Tensorflow")
    if node.attrs.get("storage_order", 0) != 0:
      exception.OP_UNSUPPORTED_EXCEPT(pooling_name + " with column major",
                                      "Tensorflow")

    x_dtype = x.dtype
    # For max_pool and max_pool_with_argmax tensoflow don't support
    # NCHW data format input in int8 or uint8 datatype, therefore
    # need to cast to float16 in order to run with NCHW data format
    need_cast = pooling_type in [
        'MAX', 'MAX_WITH_ARGMAX'
    ] and sys_config.device == 'CUDA' and x_dtype in [tf.int8, tf.uint8]
    x = tf.cast(x, tf.float16) if need_cast else x

    dp = DilatedPooling(input=x,
                        kernel_shape=kernel_shape,
                        strides=strides,
                        dilations=dilations,
                        padding=pads,
                        ceil_mode=ceil_mode,
                        pooling_type=pooling_type,
                        count_include_pad=count_include_pad,
                        p=p)
    if not dp.is_supported():
      if strict:
        logger.warning("Using the pooling op in compatibility mode. "
                       "This means your graph cannot be serialized.")

        result = tf.numpy_function(py_pool, [
            x, kernel_shape, strides, dilations, pads, ceil_mode, pooling_type,
            False
        ], x.dtype)

        if x.shape.is_fully_defined():
          shape = x.get_shape()
          output_shape = shape[0:2] + calc_output_shape(
              shape[2:x_rank], kernel_shape, strides, dilations, pads,
              ceil_mode)
        else:
          output_shape = [None] * x_rank
        result.set_shape(output_shape)
        return [result]
      else:
        exception.OP_UNSUPPORTED_EXCEPT(
            "strict == 0 and " + pooling_name + " arguments not compatible",
            "Tensorflow")

    from absl import logging
    logging.set_verbosity(logging.INFO)

    def dilated_pool():
      return (dp.dilated_pool(), None)

    # select correct op depending on the pooling type
    pooling_op = dilated_pool if pooling_type in ["MAX", "AVG", "LP"] else \
        dp.dilated_maxpool_with_argmax

    def postprocess(pooled, argmax):

      def convert_NHWC_indices_to_NCHW_indices(argmax):
        # i - index in NCHW
        # I - index in NHWC
        # C - number of channels
        # b - batch = I // CHW
        # c - channel = I % C
        # H - height
        # W - weight
        # I = i - c(HW - 1) + (C - 1)(i - bCHW - cHW)
        # i = (I + c(HW - 1) + (C - 1)(bCHW + cHW))/C

        # x_shape will always be in NCHW format here,
        # because maxpool_with_argmax only support 2d input
        x_shape = tf_shape(x)
        N = x_shape[0]
        C = x_shape[1]
        H = x_shape[2]
        W = x_shape[3]
        HW = tf.math.multiply(H, W)
        CHW = tf.math.multiply(C, HW)
        argmax_b = tf.math.floordiv(argmax, CHW)
        argmax_c = tf.math.floormod(argmax, C)
        new_ind = tf.math.add(
            argmax, tf.math.multiply(argmax_c, tf.math.subtract(HW, 1)))
        new_ind = tf.math.add(
            new_ind,
            tf.math.multiply(
                tf.math.subtract(C, 1),
                tf.math.add(tf.math.multiply(argmax_b, CHW),
                            tf.math.multiply(argmax_c, HW))))
        new_ind = tf.math.floordiv(new_ind, C)

        # add batch dimension into the argmax index
        batch_offsets = tf.math.multiply(tf.range(N, dtype=new_ind.dtype), CHW)
        for _ in range(new_ind.shape.rank - 1):
          batch_offsets = tf.expand_dims(batch_offsets, -1)
        new_ind = tf.math.add(new_ind, batch_offsets)

        return new_ind

      if argmax is not None:
        argmax = convert_NHWC_indices_to_NCHW_indices(argmax)

      # select the correct transpose ops depending on the input storage format
      perm = get_perm_from_formats(dp.compute_format, dp.storage_format)

      pooled = tf.transpose(pooled, perm=perm) if dp.need_trans else pooled
      pooled = tf.cast(pooled, x_dtype) if need_cast else pooled
      argmax = tf.transpose(
          argmax, perm=perm) if dp.need_trans and argmax is not None else argmax

      return pooled, argmax

    pooled, argmax = pooling_op()
    pooled, argmax = postprocess(pooled, argmax)

    result = [pooled] if argmax is None else [pooled, argmax]

    return result
Пример #15
0
 def args_check(cls, node, **kwargs):
   x = kwargs["tensor_dict"][node.inputs[0]]
   x_shape = x.get_shape().as_list()
   if len(x_shape) != 4:
     exception.OP_UNSUPPORTED_EXCEPT("Resize required 4D input", "Tensorflow")
   if cls.SINCE_VERSION >= 11:
     # supported attributes combination
     # ____________________________________________________________________________________________________________________________________________________
     # | mode    | coordinate_transformation_mode | cubic_coeff_a | exclude_outside | extrapolation_value | nearest_mode      | scales        | sizes     |
     # |_________|________________________________|_______________|_________________|_____________________|___________________|_______________|___________|
     # | nearest | align_corners                  | not apply     | 0               | not apply           | round_prefer_ceil | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | nearest | asymmetric                     | not apply     | 0               | not apply           | floor             | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | nearest | tf_half_pixel_for_nn           | not apply     | 0               | not apply           | floor             | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | linear  | align_corners                  | not apply     | 0               | not apply           | not apply         | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | linear  | asymmetric                     | not apply     | 0               | not apply           | not apply         | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | linear  | half_pixel                     | not apply     | 0               | not apply           | not apply         | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | cubic   | align_corners                  | -0.5          | 1               | not apply           | not apply         | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | cubic   | asymmetric                     | -0.5          | 1               | not apply           | not apply         | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | cubic   | half_pixel                     | -0.5          | 1               | not apply           | not apply         | supported (1) | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | nearest | tf_crop_and_resize             | not apply     | 0               | any float value     | round_prefer_ceil | supported     | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # | linear  | tf_crop_and_resize             | not apply     | 0               | any float value     | not apply         | supported     | supported |
     # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
     # Note:
     # 1. The accuracy of your model will go down, if the height and the width of the new sizes(scales * origial sizes) are not in whole numbers.
     coordinate_transformation_mode = node.attrs.get(
         "coordinate_transformation_mode", "half_pixel")
     cubic_coeff_a = node.attrs.get("cubic_coeff_a", -0.75)
     exclude_outside = node.attrs.get("exclude_outside", 0)
     mode = node.attrs.get("mode", "nearest")
     nearest_mode = node.attrs.get("nearest_mode", "round_prefer_floor")
     if coordinate_transformation_mode == "pytorch_half_pixel":
       exception.OP_UNSUPPORTED_EXCEPT(
           "Resize coordinate_transformation_mode=pytorch_half_pixel",
           "Tensorflow")
     if (coordinate_transformation_mode == "half_pixel" and mode == "nearest"
        ) or (coordinate_transformation_mode == "tf_half_pixel_for_nn" and
              mode in ["linear", "cubic"]) or (
                  coordinate_transformation_mode == "tf_crop_and_resize" and
                  mode == "cubic"):
       exception.OP_UNSUPPORTED_EXCEPT(
           "Resize coordinate_transformation_mode=" +
           coordinate_transformation_mode + " and  mode=" + mode, "Tensorflow")
     if (exclude_outside == 1 and
         mode in ["nearest", "linear"]) or (exclude_outside == 0 and
                                            mode == "cubic"):
       exception.OP_UNSUPPORTED_EXCEPT(
           "Resize mode=" + mode + " and exclude_outside=" +
           str(exclude_outside), "Tensorflow")
     if cubic_coeff_a != -0.5 and mode == "cubic":
       exception.OP_UNSUPPORTED_EXCEPT(
           "Resize mode=cubic and cubic_coeff_a=" + cubic_coeff_a,
           "Tensorflow")
     if mode == "nearest":
       if (nearest_mode in [
           "round_prefer_floor", "ceil"
       ]) or (coordinate_transformation_mode in [
           "align_corners", "tf_crop_and_resize"
       ] and nearest_mode == "floor") or (coordinate_transformation_mode in [
           "asymmetric", "tf_half_pixel_for_nn"
       ] and nearest_mode == "round_prefer_ceil"):
         exception.OP_UNSUPPORTED_EXCEPT(
             "Resize coordinate_transformation_mode=" +
             coordinate_transformation_mode +
             ", mode=nearest and nearest_mode=" + nearest_mode, "Tensorflow")
Пример #16
0
  def pool_v11(cls, node, input_dict, pooling_type, strict=True):
    x = input_dict[node.inputs[0]]
    orig_x = x

    kernel_shape = node.attrs["kernel_shape"]

    spatial_size = len(kernel_shape)
    x_rank = spatial_size + 2

    kernel_shape = node.attrs["kernel_shape"]
    strides = node.attrs.get("strides", [1] * spatial_size)
    dilations = node.attrs.get("dilations", [1] * spatial_size)
    ceil_mode = bool(node.attrs.get("ceil_mode", 0))
    pads = node.attrs.get("auto_pad", "NOTSET")
    if pads == "NOTSET":
      pads = node.attrs.get("pads", [0] * spatial_size * 2)

    count_include_pad = bool(node.attrs.get("count_include_pad", 0))
    if pooling_type == "AVG":
        pooling_name = "AveragePool"
    elif pooling_type == "MAX":
        pooling_name = "MaxPool"
    elif pooling_type == "MAX_WITH_ARGMAX":
        pooling_name = "MaxPoolWithArgmax"

    if spatial_size > 3:
      exception.OP_UNSUPPORTED_EXCEPT(
          pooling_name + " with {}D input".format(x_rank), "Tensorflow")
    if pooling_type == "MAX_WITH_ARGMAX" and x_rank != 4:
      exception.OP_UNSUPPORTED_EXCEPT(
          pooling_name + " with {}D input".format(x_rank), "Tensorflow")
    if node.attrs.get("storage_order", 0) != 0:
      exception.OP_UNSUPPORTED_EXCEPT(pooling_name + " with column major",
                                      "Tensorflow")

    storage_format, _ = get_data_format(x_rank)

    need_trans = storage_format.startswith("NC")
    if need_trans:
      compute_format = "N" + storage_format[2:] + "C"
      x = tf.transpose(x, perm=get_perm_from_formats(storage_format,
                                                     compute_format))

    dp = DilatedPooling(input=x, kernel_shape=kernel_shape, strides=strides,
                        dilations=dilations, padding=pads, ceil_mode=ceil_mode,
                        pooling_type=pooling_type,
                        count_include_pad=count_include_pad)
    if not dp.is_supported():
      if strict:
        warnings.warn(
            "Using the pooling op in compatibility mode. "
            "This means your graph cannot be serialized.", UserWarning)

        return [tf.py_func(py_pool, [orig_x, kernel_shape, strides,
                                     dilations, pads, ceil_mode, "AVG",
                                     False], orig_x.dtype)]
      else:
        exception.OP_UNSUPPORTED_EXCEPT("strict == 0 and average pool"
                                        " arguments not compatible",
                                        "Tensorflow")

    def dilated_pool():
      return (dp.dilated_pool(), None)

    # select correct op depending on the pooling type
    pooling_op = dilated_pool if pooling_type in ["MAX", "AVG"] else \
        dp.dilated_maxpool_with_argmax

    # select the correct transpose ops depending on the input storage format
    perm = get_perm_from_formats(compute_format, storage_format)

    def postprocess(pooled, argmax):
      return (tf.transpose(pooled, perm=perm) if need_trans else pooled,
              tf.transpose(argmax, perm=perm) if need_trans and argmax
              is not None else argmax)

    pooled, argmax = pooling_op()
    pooled, argmax = postprocess(pooled, argmax)

    result = [pooled] if argmax is None else [pooled, argmax]

    return result
Пример #17
0
    def pool(cls, node, input_dict, pool_func, pooling_type, strict=True):
        x = input_dict[node.inputs[0]]
        x_rank = len(x.get_shape())
        x_shape = x.get_shape().as_list()
        spatial_size = x_rank - 2

        if spatial_size > 3:
            exception.OP_UNSUPPORTED_EXCEPT(
                "MaxPool with {}D input".format(x_rank), "Tensorflow")

        support_cuda = supports_device("CUDA")
        storage_format, compute_format = get_data_format(x_rank)

        kernel_shape = node.attrs["kernel_shape"]
        strides = node.attrs.get("strides", [1] * spatial_size)
        pads = node.attrs.get("pads", None)
        pad = PAD_TF_INCOMPATIBLE
        # from version 7
        count_include_pad = node.attrs.get("count_include_pad", 0)

        auto_pad = node.attrs.get("auto_pad", "NOTSET")
        # if auto_pad is NOTSET, we check pads
        if auto_pad == "NOTSET":
            # If padding is specified, try to recover it from explicit padding
            # specification to tensorflow padding mode:
            if pads is not None:
                pad = cls._get_tf_pad(x_shape[2:], kernel_shape, strides, pads)
            else:
                pad = "VALID"
        else:
            if auto_pad == "SAME_UPPER":
                pad = "SAME"
            elif auto_pad == "VALID":
                pad = "VALID"
            elif auto_pad == "SAME_LOWER":
                pad = PAD_TF_INCOMPATIBLE
            if count_include_pad == 1:
                _, pads = cls._pool_get_shapes(auto_pad, x_shape[2:],
                                               kernel_shape, strides,
                                               [0] * spatial_size * 2)

        if pooling_type in ("AVG", "MAX"):
            if strict and count_include_pad == 0:
                if pad is PAD_TF_INCOMPATIBLE:
                    return cls._compatibility_pool(node, input_dict,
                                                   pooling_type)
            else:
                if pads != [0] * spatial_size * 2:
                    x = PadMixin.get_padding_as_op(x, pads)
                pad = "VALID"
        elif pooling_type == "MAX_WITH_ARGMAX":
            if pad is PAD_TF_INCOMPATIBLE:
                exception.OP_UNSUPPORTED_EXCEPT(
                    "MaxPoolWithArgmax with pad is None or incompatible mode",
                    "Tensorflow")
            if x_rank != 4:
                exception.OP_UNSUPPORTED_EXCEPT(
                    "MaxPoolWithArgmax with {}D input".format(x_rank),
                    "Tensorflow")
            if node.attrs.get("storage_order", 0) != 0:
                exception.OP_UNSUPPORTED_EXCEPT(
                    "MaxPoolWithArgmax with column major", "Tensorflow")

            need_trans = storage_format != "NHWC"
            if need_trans:
                x = tf.transpose(x,
                                 perm=get_perm_from_formats(
                                     storage_format, "NHWC"))
            pooled, argmax = pool_func(x, [1] + kernel_shape + [1],
                                       padding=pad,
                                       strides=[1] + strides + [1])
            if need_trans:
                pooled = tf.transpose(pooled,
                                      perm=get_perm_from_formats(
                                          "NHWC", storage_format))
                argmax = tf.transpose(argmax,
                                      perm=get_perm_from_formats(
                                          "NHWC", storage_format))

            return [pooled, argmax]

        if support_cuda:
            pooled = pool_func(x,
                               kernel_shape,
                               padding=pad,
                               strides=strides,
                               data_format=compute_format)
        else:
            x = tf.transpose(x,
                             perm=get_perm_from_formats(
                                 storage_format, compute_format))
            pooled = pool_func(x,
                               kernel_shape,
                               padding=pad,
                               strides=strides,
                               data_format=compute_format)
            pooled = tf.transpose(pooled,
                                  perm=get_perm_from_formats(
                                      compute_format, storage_format))

        return [pooled]
Пример #18
0
 def args_check(cls, node, **kwargs):
     if "clip" in node.attrs:
         exception.OP_UNSUPPORTED_EXCEPT("RNN with clip", "Tensorflow")
 def args_check(cls, node, **kwargs):
     data_format = node.attr.get("data_format", "NHWC").decode()
     if data_format not in ["NHWC", "NCHW"]:
         exception.OP_UNSUPPORTED_EXCEPT(
             "{} with data_format {}".format(node.op_type, data_format),
             "ONNX")
Пример #20
0
    def conv(cls, node, input_dict, transpose=False):
        """ Convolution method for both conv and transposed conv
    For transposed conv,
      Attr pads is not used for input, but declares how much output is padded.
      Here, output means output from transposed conv which already pad output_padding if set.
      So the pseudo explanation for output should be:
        output = conv_transpose_output + output_padding - pads
      And conv_transpose_output shape should be:
        conv_transpose_output_shape[i] = strides[i] * (input_shape[i] - 1) + kernel_shape[i]
    """
        x = input_dict[node.inputs[0]]
        x_rank = len(x.get_shape())
        x_shape = x.get_shape().as_list()
        spatial_size = x_rank - 2

        support_cuda = supports_device("CUDA")
        storage_format, compute_format = get_data_format(x_rank)
        compute_c_idx = compute_format.find("C")
        spatial_format = "".join(
            [d for d in compute_format if d not in ["N", "C"]])

        in_weights = input_dict[node.inputs[1]]
        weights_rank = len(in_weights.get_shape())
        if transpose:
            # Translate weights from (C x M x KH x KW) to (KH x KW X M X C)
            perm = list(range(2, weights_rank)) + [1, 0]
        else:
            # Translate weights from (M x C x KH x KW) to (KH x KW X C X M)
            perm = list(range(2, weights_rank)) + [1, 0]

        if "kernel_shape" in node.attrs.keys():
            kernel_shape = node.attrs["kernel_shape"]
            assert in_weights.get_shape().as_list()[2:] == kernel_shape, (
                "kernel_shape "
                "attr of convolution does not match the actual weight "
                "passed to this operation, attr {}, actual {}").format(
                    kernel_shape,
                    in_weights.get_shape().as_list())

        weights = tf.transpose(in_weights, perm)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        strides = node.attrs.get("strides", [1] * spatial_size)

        pads = node.attrs.get("pads", [0, 0] * spatial_size)

        # Check auto_pad nonexistent or NOTSET first
        if "auto_pad" not in node.attrs or node.attrs["auto_pad"] == "NOTSET":
            if not transpose:
                if pads != [0, 0] * spatial_size:
                    x = PadMixin.get_padding_as_op(x, pads)
                pad_mode = "VALID"
            else:
                pad_mode = "NOTSET"
        # Then we use auto_pad to setup pad_mode
        elif node.attrs["auto_pad"] == "SAME_UPPER":
            pad_mode = "SAME"
        elif node.attrs["auto_pad"] == "VALID":
            pad_mode = "VALID"
        elif node.attrs["auto_pad"] == "SAME_LOWER":
            pad_mode = PAD_TF_INCOMPATIBLE
        else:
            raise ValueError("Invalid auto_pad attribute: {}".format(
                node.attrs["auto_pad"]))

        # Currently auto_pad = SAME_LOWER is not supported
        if pad_mode is PAD_TF_INCOMPATIBLE:
            if transpose:
                exception.OP_UNSUPPORTED_EXCEPT(
                    "ConvTranspose with auto_pad `SAME_LOWER`", "Tensorflow")
            else:
                exception.OP_UNSUPPORTED_EXCEPT(
                    "Conv with auto_pad `SAME_LOWER`", "Tensorflow")

        group = node.attrs.get("group", 1)

        weight_groups = tf.split(weights, num_or_size_splits=group, axis=-1)

        if support_cuda:
            xs = tf.split(x, num_or_size_splits=group, axis=1)
        else:
            x = tf.transpose(x,
                             perm=get_perm_from_formats(
                                 storage_format, compute_format))
            xs = tf.split(x, num_or_size_splits=group, axis=-1)

        if transpose:
            if dilations != [1] * spatial_size:
                raise RuntimeError(
                    "Cannot set non-1 dilation for conv transpose.")
            convolved = []
            for (x, weight) in zip(xs, weight_groups):
                x_spatial_shape = [
                    x_shape[storage_format.find(d)] for d in spatial_format
                ]
                weights_shape = weights.get_shape().as_list()
                output_shape = node.attrs.get("output_shape", None)
                conv_output_shape = [x_shape[storage_format.find("N")]]

                # calculate output shape
                if pad_mode == "NOTSET":
                    if output_shape is None:
                        conv_output_shape += [
                            strides[i] * x_spatial_shape[i] +
                            max(weights_shape[i] - strides[i], 0)
                            for i in list(range(spatial_size))
                        ]
                    else:
                        conv_output_shape += [
                            s + pads[i] + pads[spatial_size + i]
                            for i, s in enumerate(output_shape[-2:])
                        ]
                    conv_output_shape.insert(compute_c_idx, weights_shape[-2])

                    # make strides to match input rank
                    strides_full = [1] + strides
                    strides_full.insert(compute_c_idx, 1)

                    # get corresponding function in tf
                    if spatial_size == 1:
                        conv_func = tf.nn.conv1d_transpose
                        strides_full = strides[0]
                    elif spatial_size == 2:
                        conv_func = tf.nn.conv2d_transpose
                    elif spatial_size == 3:
                        conv_func = tf.nn.conv3d_transpose
                    else:
                        raise NotImplementedError(
                            "Transposed convolution for {}d is not implemented in Tensorflow"
                            .format(spatial_size))

                    # use raw input x to do transposed conv
                    conv_rs = conv_func(x,
                                        weight,
                                        conv_output_shape,
                                        strides_full,
                                        padding="VALID",
                                        data_format=compute_format)

                    # pad output first by output_padding attr
                    if "output_padding" in node.attrs and output_shape is None:
                        output_padding = [[
                            0, 0
                        ]] + [[0, p] for p in node.attrs["output_padding"]]
                        output_padding.insert(compute_c_idx, [0, 0])
                        conv_rs = tf.pad(conv_rs, output_padding)

                    # remove pads set in pads attr
                    conv_rs_shape = conv_rs.get_shape().as_list()
                    begin = [0] + pads[:spatial_size]
                    begin.insert(compute_c_idx, 0)
                    size = [
                        s if d in ["N", "C"] else s -
                        pads[spatial_format.find(d)] -
                        pads[spatial_format.find(d) + spatial_size]
                        for d, s in zip(compute_format, conv_rs_shape)
                    ]
                    conv_rs = tf.slice(conv_rs, begin=begin, size=size)

                    convolved.append(conv_rs)
                else:
                    # No need to check pads if auto_pad is specifically provided.
                    # The assumption is that once auto_pad is provided as either VALID
                    # or SAME_UPPER (SAME_LOWER is currently not supported in TF) the
                    # output_shape will always be inferred. That is, the output_shape
                    # and output_padding will not be used in this case.
                    if pad_mode == "VALID":
                        conv_output_shape += [
                            strides[i] * (x_spatial_shape[i] - 1) +
                            weights_shape[i] for i in list(range(spatial_size))
                        ]
                    else:
                        conv_output_shape += [
                            strides[i] * x_spatial_shape[i]
                            for i in list(range(spatial_size))
                        ]
                    conv_output_shape.insert(compute_c_idx, weights_shape[-2])

                    # make strides to match input rank
                    strides_full = [1] + strides
                    strides_full.insert(compute_c_idx, 1)

                    # get corresponding function in tf
                    if spatial_size == 1:
                        conv_func = tf.contrib.nn.conv1d_transpose
                        strides_full = strides[0]
                    elif spatial_size == 2:
                        conv_func = tf.nn.conv2d_transpose
                    elif spatial_size == 3:
                        conv_func = tf.nn.conv3d_transpose
                    else:
                        raise NotImplementedError(
                            "Transposed convolution for {}d is not implemented in Tensorflow"
                            .format(spatial_size))

                    # use raw input x to do transposed conv
                    conv_rs = conv_func(x,
                                        weight,
                                        conv_output_shape,
                                        strides_full,
                                        padding=pad_mode,
                                        data_format=compute_format)
                    convolved.append(conv_rs)

        else:
            convolved = [
                tf.nn.convolution(x,
                                  weight,
                                  padding=pad_mode,
                                  strides=strides,
                                  dilations=dilations,
                                  data_format=compute_format)
                for (x, weight) in zip(xs, weight_groups)
            ]

        if len(node.inputs) == 2:
            if support_cuda:
                output = tf.concat(convolved, axis=1)
            else:
                output = tf.concat(convolved, axis=-1)
                output = tf.transpose(output,
                                      perm=get_perm_from_formats(
                                          compute_format, storage_format))
        else:
            bias = input_dict[node.inputs[2]]
            bias = cls.explicit_broadcast([x, bias], compute_c_idx)

            if support_cuda:
                output = tf.concat(convolved, axis=1)
                output = tf.add(output, bias)
            else:
                output = tf.concat(convolved, axis=-1)
                output = tf.add(output, bias)
                output = tf.transpose(output,
                                      perm=get_perm_from_formats(
                                          compute_format, storage_format))

        return [output]
Пример #21
0
    def args_check(cls, node, **kwargs):
        # update cast maps based on the auto_cast config option
        cls.x_cast_map[tf.uint64] = tf.int64 if sys_config.auto_cast else None
        cls.x_cast_map[
            tf.complex64] = tf.float64 if sys_config.auto_cast else None
        cls.x_cast_map[
            tf.complex128] = tf.float64 if sys_config.auto_cast else None
        cls.roi_cast_map[
            tf.float64] = tf.float32 if sys_config.auto_cast else None

        x = kwargs["tensor_dict"][node.inputs[0]]
        x_shape = x.get_shape().as_list()
        x_dtype = x.dtype
        if len(x_shape) != 4:
            exception.OP_UNSUPPORTED_EXCEPT("Resize required 4D input",
                                            "Tensorflow")
        if x_dtype in cls.x_cast_map and cls.x_cast_map[x_dtype] is None:
            exception.DTYPE_NOT_CAST_EXCEPT(
                "Resize input " + node.inputs[0] + " with data type '" +
                data_type.tf_to_np_str(x_dtype) + "'",
                data_type.tf_to_np_str_list(cls.x_supported_types))
        if cls.SINCE_VERSION >= 11:
            # supported attributes combination
            # ____________________________________________________________________________________________________________________________________________________
            # | mode    | coordinate_transformation_mode | cubic_coeff_a | exclude_outside | extrapolation_value | nearest_mode      | scales        | sizes     |
            # |_________|________________________________|_______________|_________________|_____________________|___________________|_______________|___________|
            # | nearest | align_corners                  | not apply     | 0               | not apply           | round_prefer_ceil | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | nearest | asymmetric                     | not apply     | 0               | not apply           | floor             | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | nearest | tf_half_pixel_for_nn           | not apply     | 0               | not apply           | floor             | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | linear  | align_corners                  | not apply     | 0               | not apply           | not apply         | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | linear  | asymmetric                     | not apply     | 0               | not apply           | not apply         | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | linear  | half_pixel                     | not apply     | 0               | not apply           | not apply         | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | cubic   | align_corners                  | -0.5          | 1               | not apply           | not apply         | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | cubic   | asymmetric                     | -0.5          | 1               | not apply           | not apply         | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | cubic   | half_pixel                     | -0.5          | 1               | not apply           | not apply         | supported (1) | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | nearest | tf_crop_and_resize             | not apply     | 0               | any float value     | round_prefer_ceil | supported     | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # | linear  | tf_crop_and_resize             | not apply     | 0               | any float value     | not apply         | supported     | supported |
            # |---------|--------------------------------|---------------|-----------------|---------------------|-------------------|---------------|-----------|
            # Note:
            # 1. The accuracy of your model will go down, if the height and the width of the new sizes(scales * origial sizes) are not in whole numbers.
            coordinate_transformation_mode = node.attrs.get(
                "coordinate_transformation_mode", "half_pixel")
            cubic_coeff_a = node.attrs.get("cubic_coeff_a", -0.75)
            exclude_outside = node.attrs.get("exclude_outside", 0)
            mode = node.attrs.get("mode", "nearest")
            nearest_mode = node.attrs.get("nearest_mode", "round_prefer_floor")
            if coordinate_transformation_mode == "tf_crop_and_resize":
                if x_dtype in cls.cr_x_cast_map and cls.cr_x_cast_map[
                        x_dtype] is None:
                    exception.DTYPE_NOT_CAST_EXCEPT(
                        "Resize input " + node.inputs[0] +
                        " with data type '" + data_type.tf_to_np_str(x_dtype) +
                        "'",
                        data_type.tf_to_np_str_list(cls.cr_x_supported_types))
                roi = kwargs["tensor_dict"][node.inputs[1]]
                roi_dtype = roi.dtype
                if roi_dtype in cls.roi_cast_map and cls.roi_cast_map[
                        roi_dtype] is None:
                    exception.DTYPE_NOT_CAST_EXCEPT(
                        "Resize input " + node.inputs[1] +
                        " with data type '" +
                        data_type.tf_to_np_str(roi_dtype) + "'",
                        data_type.tf_to_np_str_list(cls.roi_supported_types))
            if coordinate_transformation_mode == "pytorch_half_pixel":
                exception.OP_UNSUPPORTED_EXCEPT(
                    "Resize coordinate_transformation_mode=pytorch_half_pixel",
                    "Tensorflow")
            if (coordinate_transformation_mode == "half_pixel"
                    and mode == "nearest"
                ) or (coordinate_transformation_mode == "tf_half_pixel_for_nn"
                      and mode in ["linear", "cubic"]) or (
                          coordinate_transformation_mode
                          == "tf_crop_and_resize" and mode == "cubic"):
                exception.OP_UNSUPPORTED_EXCEPT(
                    "Resize coordinate_transformation_mode=" +
                    coordinate_transformation_mode + " and  mode=" + mode,
                    "Tensorflow")
            if (exclude_outside == 1 and mode in ["nearest", "linear"]) or (
                    exclude_outside == 0 and mode == "cubic"):
                exception.OP_UNSUPPORTED_EXCEPT(
                    "Resize mode=" + mode + " and exclude_outside=" +
                    str(exclude_outside), "Tensorflow")
            if cubic_coeff_a != -0.5 and mode == "cubic":
                exception.OP_UNSUPPORTED_EXCEPT(
                    "Resize mode=cubic and cubic_coeff_a=" + cubic_coeff_a,
                    "Tensorflow")
            if mode == "nearest":
                if (nearest_mode in [
                        "round_prefer_floor", "ceil"
                ]) or (coordinate_transformation_mode
                       in ["align_corners", "tf_crop_and_resize"]
                       and nearest_mode == "floor") or (
                           coordinate_transformation_mode
                           in ["asymmetric", "tf_half_pixel_for_nn"]
                           and nearest_mode == "round_prefer_ceil"):
                    exception.OP_UNSUPPORTED_EXCEPT(
                        "Resize coordinate_transformation_mode=" +
                        coordinate_transformation_mode +
                        ", mode=nearest and nearest_mode=" + nearest_mode,
                        "Tensorflow")
Пример #22
0
    def _common(cls, node, **kwargs):
        body = node.attrs["body"]
        tensor_dict = kwargs["tensor_dict"]
        M = tensor_dict[node.inputs[0]] if tensor_dict[
            node.inputs[0]].dtype == tf.int64 else None
        cond = None if tensor_dict[
            node.inputs[1]].dtype == tf.string else tf.cast(
                tensor_dict[node.inputs[1]], tf.bool)
        v_initial = [
            tensor_dict[graph_input] for graph_input in node.inputs[2:]
        ]
        v_shapes = [v.get_shape() for v in v_initial]
        current_opset = [make_opsetid(cls.DOMAIN, cls.VERSION)]
        # outputs of the body will be in this format:
        # (condition, loop carried dependencies..., scan_outputs...)
        scan_outputs_start_index = 1 + len(v_initial)
        scan_outputs = [
            tf.TensorArray(dtype=data_type.onnx2tf(
                body.output[i].type.tensor_type.elem_type),
                           size=0,
                           dynamic_size=True)
            for i in range(scan_outputs_start_index, len(body.output))
        ]
        scan_outputs_shapes = [tf.TensorShape(None) for o in scan_outputs]

        def run_subgraph(cond, v, scan_outputs):
            input_values = {}
            input_values[body.input[0].name] = M
            input_values[body.input[1].name] = cond
            for i in range(2, len(body.input)):
                input_values[body.input[i].name] = v[i - 2]
            tensor_dict = onnx_tf.backend.onnx_graph_to_tensorflow_ops(
                graph_def=body, input_values=input_values, opset=current_opset)
            outputs = [tensor_dict[output.name] for output in body.output]
            for i in range(scan_outputs_start_index, len(outputs)):
                s_index = i - scan_outputs_start_index
                insert_index = scan_outputs[s_index].size()
                scan_outputs[s_index] = scan_outputs[s_index].write(
                    insert_index, outputs[i])
            return outputs[0], outputs[
                1:scan_outputs_start_index], scan_outputs

        # for loop
        if M is not None and cond is None:
            M = tf.cast(M, tf.int32)
            condition = lambda cond, v, scan_outputs: True
            _, v_final, scan_outputs = tf.while_loop(
                cond=condition,
                body=run_subgraph,
                loop_vars=["", v_initial, scan_outputs],
                shape_invariants=[
                    tf.TensorShape(None), v_shapes, scan_outputs_shapes
                ],
                maximum_iterations=M)
        # while and do-while loop
        elif M is None and cond is not None:
            condition = lambda cond, v, scan_outputs: tf.reduce_all(
                tf.equal(cond, True))
            cond, v_final, scan_outputs = tf.while_loop(
                cond=condition,
                body=run_subgraph,
                loop_vars=[cond, v_initial, scan_outputs],
                shape_invariants=[
                    tf.TensorShape(None), v_shapes, scan_outputs_shapes
                ])
        # combine for loop and while loop together
        elif M is not None and cond is not None:
            M = tf.cast(M, tf.int32)
            condition = lambda cond, v, scan_outputs: tf.reduce_all(
                tf.equal(cond, True))
            cond, v_final, scan_outputs = tf.while_loop(
                cond=condition,
                body=run_subgraph,
                loop_vars=[cond, v_initial, scan_outputs],
                shape_invariants=[
                    tf.TensorShape(None), v_shapes, scan_outputs_shapes
                ],
                maximum_iterations=M)
        else:  # M is None and cond is None
            exception.OP_UNSUPPORTED_EXCEPT(
                "Both M and cond in Loop are not set at the same time",
                "Tensorflow.(PS. if you want to create a do-while loop " +
                "then please set cond to True or 1)")

        scan_outputs_tensors = [o.stack() for o in scan_outputs]
        if scan_outputs_start_index == len(body.output):
            # there is no scan_output in the body graph
            return [v_final]
        else:
            return [v_final, scan_outputs_tensors]
Пример #23
0
    def pool(cls, node, input_dict, pooling_type, strict=True):
        x = input_dict[node.inputs[0]]
        orig_x = x

        kernel_shape = node.attrs["kernel_shape"]

        spatial_size = len(kernel_shape)
        x_rank = spatial_size + 2

        kernel_shape = node.attrs["kernel_shape"]
        strides = node.attrs.get("strides", [1] * spatial_size)
        dilations = node.attrs.get("dilations", [1] * spatial_size)
        ceil_mode = bool(node.attrs.get("ceil_mode", 0))
        pads = node.attrs.get("auto_pad", "NOTSET")
        p = node.attrs.get("p", 2)

        if pads == "NOTSET":
            pads = node.attrs.get("pads", [0] * spatial_size * 2)
            # In case shape is fully defined, check if pads match
            # SAME padding in Tensorflow
            if x.shape.is_fully_defined() and pads != [0] * spatial_size * 2:
                in_shape = x.get_shape().as_list()
                same_paddings = calc_pads_same(in_shape[1:x_rank - 1],
                                               kernel_shape, strides,
                                               dilations, "SAME_UPPER")
                if pads == same_paddings:
                    pads = "SAME_UPPER"

        count_include_pad = bool(node.attrs.get("count_include_pad", 0))
        if pooling_type == "AVG":
            pooling_name = "AveragePool"
        elif pooling_type == "MAX":
            pooling_name = "MaxPool"
        elif pooling_type == "MAX_WITH_ARGMAX":
            pooling_name = "MaxPoolWithArgmax"
        elif pooling_type == "LP":
            pooling_name = "LpPool"

        if spatial_size > 3:
            exception.OP_UNSUPPORTED_EXCEPT(
                pooling_name + " with {}D input".format(x_rank), "Tensorflow")
        if pooling_type == "MAX_WITH_ARGMAX" and x_rank != 4:
            exception.OP_UNSUPPORTED_EXCEPT(
                pooling_name + " with {}D input".format(x_rank), "Tensorflow")
        if node.attrs.get("storage_order", 0) != 0:
            exception.OP_UNSUPPORTED_EXCEPT(
                pooling_name + " with column major", "Tensorflow")

        storage_format, _ = get_data_format(x_rank)

        need_trans = storage_format.startswith("NC")
        if need_trans:
            compute_format = "N" + storage_format[2:] + "C"
            x = tf.transpose(x,
                             perm=get_perm_from_formats(
                                 storage_format, compute_format))

        dp = DilatedPooling(input=x,
                            kernel_shape=kernel_shape,
                            strides=strides,
                            dilations=dilations,
                            padding=pads,
                            ceil_mode=ceil_mode,
                            pooling_type=pooling_type,
                            count_include_pad=count_include_pad,
                            p=p)
        if not dp.is_supported():
            if strict:
                logger.warning(
                    "Using the pooling op in compatibility mode. "
                    "This means your graph cannot be serialized.", UserWarning)

                result = tf.py_func(py_pool, [
                    orig_x, kernel_shape, strides, dilations, pads, ceil_mode,
                    pooling_type, False
                ], orig_x.dtype)

                if orig_x.shape.is_fully_defined():
                    shape = orig_x.get_shape().as_list()
                    output_shape = shape[0:2] + calc_output_shape(
                        shape[2:x_rank], kernel_shape, strides, dilations,
                        pads, ceil_mode)
                else:
                    output_shape = [None] * x_rank
                result.set_shape(output_shape)
                return [result]
            else:
                exception.OP_UNSUPPORTED_EXCEPT(
                    "strict == 0 and " + pooling_name +
                    " arguments not compatible", "Tensorflow")

        def dilated_pool():
            return (dp.dilated_pool(), None)

        # select correct op depending on the pooling type
        pooling_op = dilated_pool if pooling_type in ["MAX", "AVG", "LP"] else \
            dp.dilated_maxpool_with_argmax

        # select the correct transpose ops depending on the input storage format
        perm = get_perm_from_formats(compute_format, storage_format)

        def postprocess(pooled, argmax):
            return (tf.transpose(pooled, perm=perm) if need_trans else pooled,
                    tf.transpose(argmax, perm=perm)
                    if need_trans and argmax is not None else argmax)

        pooled, argmax = pooling_op()
        pooled, argmax = postprocess(pooled, argmax)

        result = [pooled] if argmax is None else [pooled, argmax]

        return result
Пример #24
0
    def _common(cls, node, **kwargs):
        body = node.attrs["body"]
        tensor_dict = kwargs["tensor_dict"]
        M = tensor_dict[node.inputs[0]] if node.inputs[0] != "" else None
        M = tf.where(tf.greater(M, tf.int32.max),
                     tf.constant(tf.int32.max, tf.int32), tf.cast(
                         M, tf.int32)) if M is not None else M
        cond_init = tf.cast(tensor_dict[node.inputs[1]],
                            tf.bool) if node.inputs[1] != "" else None
        v_init = [tensor_dict[graph_input] for graph_input in node.inputs[2:]]
        v_shapes = [
            tf.TensorShape([None for i in range(v.shape.rank)]) for v in v_init
        ]
        iter_cnt_init = np.int64(0)
        current_opset = [make_opsetid(cls.DOMAIN, cls.VERSION)]
        # outputs of the body will be in this format:
        # (condition, loop carried dependencies..., scan_outputs...)
        scan_outputs_start_index = 1 + len(v_init)
        scan_outputs_init = [
            tf.TensorArray(dtype=data_type.onnx2tf(
                body.output[i].type.tensor_type.elem_type),
                           size=0,
                           dynamic_size=True)
            for i in range(scan_outputs_start_index, len(body.output))
        ]
        scan_outputs_shapes = [tf.TensorShape(None) for o in scan_outputs_init]

        def run_subgraph(iter_cnt, cond, v, scan_outputs):
            subgraph_tensor_dict = dict(tensor_dict)
            subgraph_tensor_dict[body.input[0].name] = iter_cnt
            subgraph_tensor_dict[body.input[1].name] = cond
            for i in range(2, len(body.input)):
                subgraph_tensor_dict[body.input[i].name] = v[i - 2]
            subgraph_tensor_dict = onnx_tf.backend.onnx_graph_to_tensorflow_ops(
                subgraph=body,
                tensor_dict=subgraph_tensor_dict,
                opset=current_opset)
            outputs = [
                subgraph_tensor_dict[output.name] for output in body.output
            ]
            for i in range(scan_outputs_start_index, len(outputs)):
                s_index = i - scan_outputs_start_index
                insert_index = scan_outputs[s_index].size()
                scan_outputs[s_index] = scan_outputs[s_index].write(
                    insert_index, outputs[i])
            iter_cnt += 1
            return iter_cnt, outputs[0], outputs[
                1:scan_outputs_start_index], scan_outputs

        # for loop
        if M is not None and cond_init is None:
            condition = lambda iter_cnt, cond, v, scan_outputs: True
            iter_cnt_final, _, v_final, scan_outputs_final = tf.while_loop(
                cond=condition,
                body=run_subgraph,
                loop_vars=[iter_cnt_init, "", v_init, scan_outputs_init],
                shape_invariants=[
                    tf.TensorShape([]),
                    tf.TensorShape(None), v_shapes, scan_outputs_shapes
                ],
                maximum_iterations=M)
        # while and do-while loop
        elif M is None and cond_init is not None:
            condition = lambda iter_cnt, cond, v, scan_outputs: tf.reduce_all(
                tf.equal(cond, True))
            iter_cnt_final, cond_final, v_final, scan_outputs_final = tf.while_loop(
                cond=condition,
                body=run_subgraph,
                loop_vars=[
                    iter_cnt_init, cond_init, v_init, scan_outputs_init
                ],
                shape_invariants=[
                    tf.TensorShape([]),
                    tf.TensorShape(None), v_shapes, scan_outputs_shapes
                ])
        # combine for loop and while loop together
        elif M is not None and cond_init is not None:
            condition = lambda iter_cnt, cond, v, scan_outputs: tf.reduce_all(
                tf.equal(cond, True))
            iter_cnt_final, cond_final, v_final, scan_outputs_final = tf.while_loop(
                cond=condition,
                body=run_subgraph,
                loop_vars=[
                    iter_cnt_init, cond_init, v_init, scan_outputs_init
                ],
                shape_invariants=[
                    tf.TensorShape([]),
                    tf.TensorShape(None), v_shapes, scan_outputs_shapes
                ],
                maximum_iterations=M)
        else:  # M is None and cond is None
            exception.OP_UNSUPPORTED_EXCEPT(
                "Both M and cond in Loop are not set at the same time",
                "Tensorflow.(PS. if you want to create a do-while loop " +
                "then please set cond to True or 1)")

        if scan_outputs_start_index == len(body.output):
            # there is no scan_output in the body graph
            return v_final
        else:
            # if the loop has run >= 1 time then do nothing
            def true_fn():
                return scan_outputs_final

            # if the loop didn't run at all then recreate the scan_outputs'
            # TensorArray and set the element_shape to [0].
            # Then tensorflow will allow to append the empty tensor
            # to v_final
            def false_fn():
                new_scan_outputs = []
                for i in range(scan_outputs_start_index, len(body.output)):
                    exp_elem_shape = scan_outputs_init[
                        i - scan_outputs_start_index].element_shape
                    elem_shape = []
                    for j in range(exp_elem_shape.rank):
                        shape_j = 0 if exp_elem_shape[
                            j] is None else exp_elem_shape[j]
                        elem_shape.append(shape_j)
                    new_scan_outputs.append(
                        tf.TensorArray(
                            dtype=data_type.onnx2tf(
                                body.output[i].type.tensor_type.elem_type),
                            size=0,
                            element_shape=tf.TensorShape(elem_shape)))
                return new_scan_outputs

            scan_out_final = tf.cond(tf.greater(iter_cnt_final, 0), true_fn,
                                     false_fn)
            scan_outputs_tensors = [o.stack() for o in scan_out_final]
            return v_final + scan_outputs_tensors
Пример #25
0
 def args_check(cls, node, **kwargs):
     if node.attrs.get("training_mode", 0) == 1:
         exception.OP_UNSUPPORTED_EXCEPT(
             "BatchNormalization with training_mode=1",
             "Tensorflow converter")