Пример #1
0
 def rnn_get_activation(cls, name, alpha, beta):
     if name not in cls.ONNX_ACTIVATION_MAPPING:
         exception.OP_UNSUPPORTED_EXCEPT(
             "Activation function {} for {}".format(name, cls.__name__),
             "Tensorflow")
     activation = cls.ONNX_ACTIVATION_MAPPING[name]
     kwargs = {}
     if name == "affine":
         kwargs["scale"] = alpha
         kwargs["shift"] = beta
         activation = activation(**kwargs)
     elif name == "elu":
         if alpha != 1:
             exception.OP_UNSUPPORTED_EXCEPT(
                 "Activation function {} with alpha={} for {}".format(
                     name, alpha, cls.__name__), "Tensorflow")
     elif name == "hard_sigmoid":
         if alpha != 0.2 or beta != 0.5:
             exception.OP_UNSUPPORTED_EXCEPT(
                 "Activation function {} with alpha={}, beta={} for {}".
                 format(name, alpha, beta, cls.__name__), "Tensorflow")
     elif name == "leaky_relu":
         kwargs["alpha"] = alpha or 0.01
         activation = partial(activation, **kwargs)
     elif name == "thresholded_relu":
         kwargs["theta"] = alpha
         activation = activation(**kwargs)
     return activation
Пример #2
0
    def args_check(cls, node, **kwargs):
        x = kwargs["tensor_dict"][node.inputs[0]]
        x_shape = x.get_shape().as_list()
        if len(x_shape) != 4:
            exception.OP_UNSUPPORTED_EXCEPT("Upsample without 4D input",
                                            "Tensorflow")

        if node.attrs.get("mode",
                          "nearest").lower() not in ["nearest", "bilinear"]:
            exception.OP_UNSUPPORTED_EXCEPT(
                "Upsample without nearest or bilinear", "Tensorflow")
Пример #3
0
 def args_check(cls, node, **kwargs):
   direction = node.attrs.get("direction", "forward")
   num_directions = 2 if direction == "bidirectional" else 1
   if "clip" in node.attrs:
     exception.OP_UNSUPPORTED_EXCEPT("GRU with clip", "Tensorflow")
   if node.attrs.get("linear_before_reset", 0):
     exception.OP_UNSUPPORTED_EXCEPT("GRU with linear_before_reset",
                                     "Tensorflow")
   if "activations" in node.attrs:
     activations = list(map(lambda x: x.lower(), node.attrs["activations"]))
     if activations[0] != "sigmoid":
       exception.OP_UNSUPPORTED_EXCEPT("GRU without sigmoid for `z` and `r`",
                                       "Tensorflow")
     if num_directions == 2:
       if activations[2] != "sigmoid":
         exception.OP_UNSUPPORTED_EXCEPT("GRU without sigmoid for `z` and `r`",
                                         "Tensorflow")
Пример #4
0
 def version_1(cls, node, **kwargs):
   if node.op_type == "Conv1D":
     d = 1
   elif node.op_type == "Conv2D":
     d = 2
   elif node.op_type == "Conv3D":
     d = 3
   else:
     exception.OP_UNSUPPORTED_EXCEPT(node.op_type, "Tensorflow")
   return cls.conv_op(node, d=d, **kwargs)
Пример #5
0
 def args_check(cls, node, **kwargs):
     direction = node.attrs.get("direction", "forward")
     num_directions = 2 if direction == "bidirectional" else 1
     if node.attrs.get("input_forget", 0):
         # TODO(fumihwh): warning
         pass
     if "activations" in node.attrs:
         activations = list(
             map(lambda x: x.lower(), node.attrs["activations"]))
         if activations[0] != "sigmoid":
             exception.OP_UNSUPPORTED_EXCEPT("LSTM without sigmoid for `f`",
                                             "Tensorflow")
         if activations[1] != activations[2]:
             exception.OP_UNSUPPORTED_EXCEPT(
                 "LSTM without same activation for `g` and `h`",
                 "Tensorflow")
         if num_directions == 2:
             if activations[3] != "sigmoid":
                 exception.OP_UNSUPPORTED_EXCEPT(
                     "LSTM without sigmoid for `f`", "Tensorflow")
             if activations[4] != activations[5]:
                 exception.OP_UNSUPPORTED_EXCEPT(
                     "LSTM without same activation for `g` and `h`",
                     "Tensorflow")
Пример #6
0
  def pool(cls, node, input_dict, pool_func, pooling_type, strict=True):
    x = input_dict[node.inputs[0]]
    x_rank = len(x.get_shape())
    x_shape = x.get_shape().as_list()
    spatial_size = x_rank - 2

    support_cuda = supports_device("CUDA")
    storage_format, compute_format = get_data_format(x_rank)

    kernel_shape = node.attrs["kernel_shape"]
    strides = node.attrs.get("strides", [1] * spatial_size)
    pads = node.attrs.get("pads", None)
    pad = PAD_TF_INCOMPATIBLE
    # from version 7
    count_include_pad = node.attrs.get("count_include_pad", 0)

    # If padding is specified, try to recover it from explicit padding
    # specification to tensorflow padding mode:
    if pads is not None:
      pad = cls._get_tf_pad(x_shape[2:], kernel_shape, strides, pads)
    else:
      # Neither pad nor auto_pad is specified, assume no padding.
      if "auto_pad" not in node.attrs:
        pad = "VALID"
      # We consult auto_pad if pad is not specified and auto_pad
      # is available.
      else:
        if node.attrs["auto_pad"] == "SAME_UPPER":
          pad = "SAME"
        elif node.attrs["auto_pad"] == "VALID":
          pad = "VALID"
        elif node.attrs["auto_pad"] == "SAME_LOWER":
          pad = PAD_TF_INCOMPATIBLE
        if count_include_pad == 1:
          _, pads = cls._pool_get_shapes(node.attrs["auto_pad"], x_shape[2:],
                                         kernel_shape, strides,
                                         [0] * spatial_size * 2)

    if pooling_type in ("AVG", "MAX"):
      if strict and count_include_pad == 0:
        if pad is PAD_TF_INCOMPATIBLE:
          return cls._compatibility_pool(node, input_dict, pooling_type)
      else:
        if pads != [0] * spatial_size * 2:
          x = PadMixin.get_padding_as_op(x, pads)
        pad = "VALID"
    elif pooling_type == "MAX_WITH_ARGMAX":
      if pad is PAD_TF_INCOMPATIBLE:
        exception.OP_UNSUPPORTED_EXCEPT(
            "MaxPoolWithArgmax with pad is None or incompatible mode",
            "Tensorflow")
      if x_rank != 4:
        exception.OP_UNSUPPORTED_EXCEPT(
            "MaxPoolWithArgmax with {}D input".format(x_rank), "Tensorflow")
      if node.attrs.get("storage_order", 0) != 0:
        exception.OP_UNSUPPORTED_EXCEPT("MaxPoolWithArgmax with column major",
                                        "Tensorflow")

      need_trans = storage_format != "NHWC"
      if need_trans:
        x = tf.transpose(x, perm=get_perm_from_formats(storage_format, "NHWC"))
      pooled, argmax = pool_func(
          x, [1] + kernel_shape + [1], padding=pad, strides=[1] + strides + [1])
      if need_trans:
        pooled = tf.transpose(
            pooled, perm=get_perm_from_formats("NHWC", storage_format))
        argmax = tf.transpose(
            argmax, perm=get_perm_from_formats("NHWC", storage_format))

      return [pooled, argmax]

    if support_cuda:
      pooled = pool_func(
          x,
          kernel_shape,
          padding=pad,
          strides=strides,
          data_format=compute_format)
    else:
      x = tf.transpose(
          x, perm=get_perm_from_formats(storage_format, compute_format))
      pooled = pool_func(
          x,
          kernel_shape,
          padding=pad,
          strides=strides,
          data_format=compute_format)
      pooled = tf.transpose(
          pooled, perm=get_perm_from_formats(compute_format, storage_format))

    return [pooled]
Пример #7
0
 def args_check(cls, node, **kwargs):
   data_format = node.attr.get("data_format", "NHWC").decode()
   if data_format not in ["NHWC", "NCHW"]:
     exception.OP_UNSUPPORTED_EXCEPT("{} with data_format {}".format(
         node.op_type, data_format), "ONNX")
Пример #8
0
 def args_check(cls, node, **kwargs):
     if "clip" in node.attrs:
         exception.OP_UNSUPPORTED_EXCEPT("RNN with clip", "Tensorflow")