Exemple #1
0
def nll_loss(g, self, target, weight, reduction, ignore_index):
    # none reduction : onnx::Constant[value={0}]
    # mean reduction : onnx::Constant[value={1}]
    # sum reduction : onnx::Constant[value={2}]
    reduction = sym_help._maybe_get_const(reduction, "i")
    reduction_vals = ["none", "mean", "sum"]
    reduction = reduction_vals[reduction]

    # in onnx NegativeLogLikelihoodLoss specification, ignore_index is optional without default value.
    # therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
    ignore_index = sym_help._maybe_get_const(ignore_index, "i")
    if weight.node().mustBeNone():
        nllloss = g.op("NegativeLogLikelihoodLoss", self, target, reduction_s=reduction, ignore_index_i=ignore_index)
    else:
        nllloss = g.op("NegativeLogLikelihoodLoss", self, target, weight, reduction_s=reduction, ignore_index_i=ignore_index)

    return nllloss
Exemple #2
0
def full(g, sizes, value, dtype, layout, device, pin_memory=False):
    const_value = sym_help._maybe_get_const(value, 't')
    if sym_help._is_value(const_value):
        tmp = zeros(g, sizes, dtype, layout, device)
        return sym_opset9.add(g, tmp, value, g.op("Constant", value_t=torch.tensor(1)))
    else:
        dtype = sym_help._get_const(dtype, 'i', 'dtype')
        return _constant_fill(g, sizes, dtype, const_value)
Exemple #3
0
def full(g, sizes, value, dtype, layout, device, pin_memory=False):
    const_value = symbolic_helper._maybe_get_const(value, "t")
    if symbolic_helper._is_value(const_value):
        tmp = zeros(g, sizes, dtype, layout, device)
        return opset9.add(g, tmp, value,
                          g.op("Constant", value_t=torch.tensor(1)))
    else:
        dtype = symbolic_helper._get_const(dtype, "i", "dtype")
        return _constant_fill(g, sizes, dtype, const_value)
def nll_loss(g, self, target, weight, reduction, ignore_index):
    # reduction: 0->none, 1->mean, 2->sum
    reduction = sym_help._maybe_get_const(reduction, 'i')
    reduction_vals = ['none', 'mean', 'sum']
    reduction = reduction_vals[reduction]
    output = g.op("com.microsoft::NegativeLogLikelihoodLossInternal",
                    self, target, weight, ignore_index, reduction_s=reduction)
    output.setType(self.type())
    return output
def celu(g, self, alpha):
    alpha = symbolic_helper._maybe_get_const(alpha, "f")
    # if the input is of type double cast it to float
    if self.type().scalarType() == "Double":
        self = g.op("Cast", self, to_i=_C_onnx.TensorProtoDataType.FLOAT)
        out = g.op("Celu", self, alpha_f=alpha)
        return g.op("Cast", out, to_i=_C_onnx.TensorProtoDataType.DOUBLE)

    return g.op("Celu", self, alpha_f=alpha)
def celu(g, self, alpha):
    alpha = sym_help._maybe_get_const(alpha, 'f')
    # if the input is of type double cast it to float
    if self.type().scalarType() == 'Double':
        self = g.op("Cast", self, to_i=sym_help.cast_pytorch_to_onnx['Float'])
        out = g.op("Celu", self, alpha_f=alpha)
        return g.op("Cast", out, to_i=sym_help.cast_pytorch_to_onnx['Double'])

    return g.op("Celu", self, alpha_f=alpha)
Exemple #7
0
def __interpolate(g, input, size, scale_factor, mode, align_corners):
    mode = sym_help._maybe_get_const(mode, 's')
    if 'linear' in mode:
        mode = 'linear'
    if 'cubic' in mode:
        mode = 'cubic'
    align_corners = sym_help._maybe_get_const(align_corners, 'b')
    align_corners = False if sym_help._is_none(align_corners) else align_corners
    coordinate_transformation_mode = "asymmetric" if mode == "nearest" \
        else "align_corners" if align_corners else "pytorch_half_pixel"
    # roi only takes effect whith coordinate_transformation_mode="tf_crop_and_resize"
    roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))

    if not sym_help._is_none(size) :
        input_size = input.type().sizes()
        input_size = g.op("Constant", value_t=torch.tensor(input_size[0:2], dtype=torch.int64))
        is_scalar = ((sym_help._maybe_get_const(size, 't').dim() == 0))
        if is_scalar:
            size = unsqueeze(g, size, 0)
            size = [size for i in range(input.type().dim() - 2)]
            size = g.op("Concat", *size, axis_i=0)
        size = g.op("Concat", input_size, size, axis_i=0)
        scales = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))
        return g.op("Resize",
                    input,
                    roi,
                    scales,
                    size,
                    coordinate_transformation_mode_s=coordinate_transformation_mode,
                    cubic_coeff_a_f=-0.75,  # only valid when mode="cubic"
                    mode_s=mode,  # nearest, linear, or cubic
                    nearest_mode_s="floor")
    else:  # if not sym_help._is_none(scales)
        scales = sym_help._interpolate_get_scales(g, scale_factor, input.type().dim())
        return g.op("Resize",
                    input,
                    roi,
                    scales,
                    coordinate_transformation_mode_s=coordinate_transformation_mode,
                    cubic_coeff_a_f=-0.75,  # only valid when mode="cubic"
                    mode_s=mode,  # nearest, linear, or cubic
                    nearest_mode_s="floor")  # only valid when mode="nearest"
Exemple #8
0
def topk(g, self, k, dim, largest, sorted, out=None):
    if out is not None:
        _unimplemented("TopK", "Out parameter is not supported for topk")
    if not largest:
        _unimplemented("TopK", "Ascending TopK is not supported")
    k = sym_help._maybe_get_const(k, 'i')
    if not sym_help._is_value(k):
        k = g.op("Constant", value_t=torch.tensor(k, dtype=torch.int64))
    from torch.onnx.symbolic_opset9 import unsqueeze
    k = unsqueeze(g, k, 0)
    return g.op("TopK", self, k, axis_i=dim, outputs=2)
def cross_entropy_loss(g, self, target, weight, reduction, ignore_index):
    # reduction: 0->none, 1->mean, 2->sum
    reduction = sym_help._maybe_get_const(reduction, 'i')
    reduction_vals = ['none', 'mean', 'sum']
    reduction = reduction_vals[reduction]
    output, log_prob = g.op("com.microsoft::SoftmaxCrossEntropyLossInternal",
                            self, target, weight, ignore_index,
                            reduction_s=reduction, outputs=2)
    output.setType(self.type())
    log_prob.setType(self.type())
    return output
Exemple #10
0
def cross_entropy_loss(g, self, target, weight, reduction, ignore_index, label_smoothing):
    # none reduction : onnx::Constant[value={0}]
    # mean reduction : onnx::Constant[value={1}]
    # sum reduction : onnx::Constant[value={2}]
    reduction = sym_help._maybe_get_const(reduction, "i")
    reduction_vals = ["none", "mean", "sum"]
    reduction = reduction_vals[reduction]

    label_smoothing = sym_help._maybe_get_const(label_smoothing, "f")
    if label_smoothing > 0.0:
        raise RuntimeError("Unsupported: ONNX does not support label_smoothing")

    # in onnx SoftmaxCrossEntropyLoss specification, ignore_index is optional without default value.
    # therefore we need to set ignore_index attribute even if it is not specified (e.g. ignore_index=-100).
    ignore_index = sym_help._maybe_get_const(ignore_index, "i")
    if weight.node().mustBeNone():
        celoss = g.op("SoftmaxCrossEntropyLoss", self, target, reduction_s=reduction, ignore_index_i=ignore_index)
    else:
        celoss = g.op("SoftmaxCrossEntropyLoss", self, target, weight, reduction_s=reduction, ignore_index_i=ignore_index)

    return celoss
def cross_entropy_loss(g, self, target, weight, reduction, ignore_index, label_smoothing=0.0):
    label_smoothing = sym_help._maybe_get_const(label_smoothing, "f")
    if label_smoothing > 0.0:
        raise RuntimeError("Unsupported: ONNX does not support label_smoothing")

    # reduction: 0->none, 1->mean, 2->sum
    reduction = sym_help._maybe_get_const(reduction, "i")
    reduction_vals = ["none", "mean", "sum"]
    reduction = reduction_vals[reduction]
    output, log_prob = g.op(
        "com.microsoft::SoftmaxCrossEntropyLossInternal",
        self,
        target,
        weight,
        ignore_index,
        reduction_s=reduction,
        outputs=2,
    )
    output.setType(self.type())
    log_prob.setType(self.type())
    return output
Exemple #12
0
def view(g, self, size):
    size = sym_help._maybe_get_const(size, 'is')
    if sym_help._is_value(size):
        shape = size
    else:
        if self.isCompleteTensor():
            self_sizes = self.type().sizes()
            if self_sizes and len(size) == 2 and self_sizes[0] == size[0]:
                old_type, self = _try_cast_integer_to_float(g, self)
                return _cast_to_type(g, g.op("Flatten", self, axis_i=1), old_type)
        shape = g.op("Constant", value_t=torch.LongTensor(size))
    return g.op("Reshape", self, shape)
def nll_loss(g, self, target, weight, reduction, ignore_index):
    # none reduction : onnx::Constant[value={0}]
    # mean reduction : onnx::Constant[value={1}]
    # sum reduction : onnx::Constant[value={2}]
    reduction = sym_help._maybe_get_const(reduction, 'i')
    reduction_vals = ['none', 'mean', 'sum']
    reduction = reduction_vals[reduction]

    # when ignore_index is not specified, ignore_index == onnx::Constant[value={-100}]
    ignore_index = sym_help._maybe_get_const(ignore_index, 'i')
    if ignore_index == -100:
        if weight.node().mustBeNone():
            return g.op("NegativeLogLikelihoodLoss",
                        self,
                        target,
                        reduction_s=reduction)
        else:
            return g.op("NegativeLogLikelihoodLoss",
                        self,
                        target,
                        weight,
                        reduction_s=reduction)

    # if ignore_index is specified, compute nllloss with no reduction and apply the reduction afterwards
    if weight.node().mustBeNone():
        nllloss = g.op("NegativeLogLikelihoodLoss",
                       self,
                       target,
                       reduction_s=reduction,
                       ignore_index_i=ignore_index)
    else:
        nllloss = g.op("NegativeLogLikelihoodLoss",
                       self,
                       target,
                       weight,
                       reduction_s=reduction,
                       ignore_index_i=ignore_index)

    return nllloss
Exemple #14
0
def max_pool2d(g, self, kernel_size, stride, padding, dilation, ceil_mode):
    stride_val = sym_help._maybe_get_const(stride, 'is')
    if not stride_val:
        stride = kernel_size
    return g.op("com.microsoft::ATenOp",
                self,
                kernel_size,
                stride,
                padding,
                dilation,
                ceil_mode,
                name_s='aten::max_pool2d_with_indices',
                outputs=2)[0]
Exemple #15
0
def max_pool2d(g, self, kernel_size, stride, padding, dilation, ceil_mode):
    stride_val = sym_help._maybe_get_const(stride, 'is')
    if not stride_val:
        stride = kernel_size
    return g.op("org.pytorch.aten::ATen",
                self,
                kernel_size,
                stride,
                padding,
                dilation,
                ceil_mode,
                operator_s='aten::max_pool2d_with_indices',
                outputs=2)[0]
Exemple #16
0
 def symbolic_fn(g, input, output_size, align_corners=None):
     sym_help._interpolate_warning(interpolate_mode)
     align_corners = sym_help._maybe_get_scalar(align_corners)
     if align_corners:
         return _unimplemented(name, "align_corners == True")
     output_size = sym_help._maybe_get_const(output_size, 'is')
     if sym_help._is_value(output_size):
         return _unimplemented(name, "torch._C.Value (output_size) indexing")
     else:
         scales = [1. if i < 2 else
                   float(output_size[-(dim - i)]) / float(input.type().sizes()[-(dim - i)])
                   for i in range(0, dim)]
     return g.op("Upsample", input, mode_s=interpolate_mode, scales_f=scales)
Exemple #17
0
def celu(g, self, alpha):
    alpha = symbolic_helper._maybe_get_const(alpha, "f")
    # if the input is of type double cast it to float
    if self.type().scalarType() == "Double":
        self = g.op("Cast",
                    self,
                    to_i=symbolic_helper.cast_pytorch_to_onnx["Float"])
        out = g.op("Celu", self, alpha_f=alpha)
        return g.op("Cast",
                    out,
                    to_i=symbolic_helper.cast_pytorch_to_onnx["Double"])

    return g.op("Celu", self, alpha_f=alpha)
Exemple #18
0
def avg_pool2d(g, self, kernel_size, stride, padding, ceil_mode,
               count_include_pad, divisor_override):
    stride_val = sym_help._maybe_get_const(stride, 'is')
    if not stride_val:
        stride = kernel_size
    return g.op("com.microsoft::ATenOp",
                self,
                kernel_size,
                stride,
                padding,
                ceil_mode,
                count_include_pad,
                divisor_override,
                name_s='aten::avg_pool2d')
Exemple #19
0
def avg_pool2d(g, self, kernel_size, stride, padding, ceil_mode,
               count_include_pad, divisor_override):
    stride_val = sym_help._maybe_get_const(stride, 'is')
    if not stride_val:
        stride = kernel_size
    return g.op("org.pytorch.aten::ATen",
                self,
                kernel_size,
                stride,
                padding,
                ceil_mode,
                count_include_pad,
                divisor_override,
                operator_s='aten::avg_pool2d')
Exemple #20
0
def __interpolate(g, input, size, scale_factor, mode, align_corners, recompute_scale_factor):
    align_corners = sym_help._maybe_get_const(align_corners, 'b')
    if not sym_help._is_none(align_corners) and align_corners:
        return _unimplemented("interpolate", "align_corners == True")

    if not sym_help._is_none(scale_factor) and sym_help._is_value(scale_factor):
        return _unimplemented("interpolate", "dynamic scales in opset 8")

    if not sym_help._is_none(size) and sym_help._is_value(size):
        return _unimplemented("interpolate", "dynamic size in opset 8")

    scales, mode = sym_help._interpolate_get_scales_and_mode(g, input, size, scale_factor,
                                                             mode , align_corners)
    return g.op("Upsample", input, mode_s=mode, scales_f=scales)
Exemple #21
0
def repeat(g, self, repeats):
    if not sym_help._is_value(repeats):
        repeats = g.op("Constant", value_t=torch.LongTensor(repeats))
    if sym_help._is_packed_list(repeats):  
        repeat_size_len = len(sym_help._unpack_list(repeats))
    else:
        const_repeats = sym_help._maybe_get_const(repeats, 'is')
        repeat_size_len = len(const_repeats)
    if self.isCompleteTensor():
        sizes = self.type().sizes()
        diff_dims = repeat_size_len - len(sizes)
        if diff_dims > 0:
            self = sym_opset9.view(g, self, [1] * diff_dims + sizes)
    return g.op("Tile", self, repeats)
Exemple #22
0
def upsample_nearest2d(g, input, output_size, align_corners=None):
    align_corners = sym_help._maybe_get_scalar(align_corners)
    if align_corners:
        return _unimplemented("upsample_neareset2d", "align_corners == True")

    output_size = sym_help._maybe_get_const(output_size, 'is')
    if sym_help._is_value(output_size):
        return _unimplemented("upsample_nearest2d",
                              "torch._C.Value (output_size) indexing")
    else:
        height_scale = float(output_size[-2]) / input.type().sizes()[-2]
        width_scale = float(output_size[-1]) / input.type().sizes()[-1]
        scales = [1., 1., height_scale, width_scale]
        return g.op("Upsample", input, mode_s="nearest", scales_f=scales)
Exemple #23
0
    def symbolic_fn(g, input, output_size, align_corners=None):
        if align_corners:
            return _unimplemented(name, "align_corners == True")

        output_size = sym_help._maybe_get_const(output_size, 'is')
        if sym_help._is_value(output_size):
            offset = 2
            offsets = g.op("Constant", value_t=torch.tensor([1. for i in range(offset)]))
            dividend = g.op("Cast", output_size, to_i=sym_help.cast_pytorch_to_onnx["Float"])
            divisor = sym_help._slice_helper(g, g.op("Shape", input), axes=[0], ends=[dim], starts=[offset])
            divisor = g.op("Cast", divisor, to_i=sym_help.cast_pytorch_to_onnx["Float"])
            scale_dims = g.op("Div", dividend, divisor)
            scales = g.op("Concat", offsets, scale_dims, axis_i=0)
        else:
            scales_constant = [1. if i < 2 else
                               float(output_size[-(dim - i)]) / float(input.type().sizes()[-(dim - i)])
                               for i in range(0, dim)]
            scales = g.op("Constant", value_t=torch.tensor(scales_constant))
        return g.op("Resize", input, scales, mode_s=interpolate_mode)
Exemple #24
0
 def symbolic_fn(g, input, output_size, align_corners=None):
     align_corners = sym_help._maybe_get_scalar(align_corners)
     output_size = sym_help._maybe_get_const(output_size, 'is')
     if sym_help._is_value(output_size):
         offsets = g.op("Constant", value_t=torch.ones(offset, dtype=torch.int64))
         output_size = g.op("Concat", offsets, output_size, axis_i=0)
     else:
         output_size = [1 if i < 2 else output_size[-(dim - i)] for i in range(0, dim)]
         output_size = g.op("Constant", value_t=torch.tensor(output_size))
     coordinate_transformation_mode = "asymmetric" if interpolate_mode == "nearest" \
         else "align_corners" if align_corners else "pytorch_half_pixel"
     empty_tensor = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))
     return g.op("Resize",
                 input,
                 empty_tensor,  # roi only takes effect whith coordinate_transformation_mode="tf_crop_and_resize"
                 empty_tensor,  # scales is not needed since we are sending out_size
                 output_size,
                 coordinate_transformation_mode_s=coordinate_transformation_mode,
                 cubic_coeff_a_f=-0.75,  # only valid when mode="cubic"
                 mode_s=interpolate_mode,  # nearest, linear, or cubic
                 nearest_mode_s="floor")  # only valid when mode="nearest"
Exemple #25
0
 def symbolic_fn(g, input, output_size, *args):
     scales, align_corners = symbolic_helper._get_interpolate_attributes(
         g, interpolate_mode, args
     )
     symbolic_helper._interpolate_warning(interpolate_mode)
     align_corners = symbolic_helper._maybe_get_scalar(align_corners)
     if align_corners:
         return symbolic_helper._unimplemented(name, "align_corners == True", input)
     output_size = symbolic_helper._maybe_get_const(output_size, "is")
     if symbolic_helper._is_value(output_size):
         return symbolic_helper._unimplemented(
             name, "torch._C.Value (output_size) indexing"
         )
     if scales is None:
         scales = [
             1.0
             if i < 2
             else float(output_size[-(dim - i)])
             / float(input.type().sizes()[-(dim - i)])
             for i in range(0, dim)
         ]
     return g.op("Upsample", input, mode_s=interpolate_mode, scales_f=scales)
Exemple #26
0
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight,
                                     reduction):
    p = g.op("Constant", value_t=torch.tensor([1]))
    sig_x = opset9.sigmoid(g, input)
    log_sig_x = opset9.log(g, sig_x)
    sub_1_x = opset9.sub(g, p, sig_x)
    sub_1_y = opset9.sub(g, p, target)
    log_1_x = opset9.log(g, sub_1_x)
    if pos_weight is None or symbolic_helper._is_none(pos_weight):
        output = opset9.neg(
            g,
            opset9.add(g, opset9.mul(g, target, log_sig_x),
                       opset9.mul(g, sub_1_y, log_1_x)),
        )
    else:
        output = opset9.neg(
            g,
            opset9.add(
                g,
                opset9.mul(g, opset9.mul(g, target, log_sig_x), pos_weight),
                opset9.mul(g, sub_1_y, log_1_x),
            ),
        )

    if weight is not None and not symbolic_helper._is_none(weight):
        output = opset9.mul(g, weight, output)

    reduction = symbolic_helper._maybe_get_const(reduction, "i")
    if reduction == 0:
        return output
    elif reduction == 1:
        return g.op("ReduceMean", output, keepdims_i=0)
    elif reduction == 2:
        return g.op("ReduceSum", output, keepdims_i=0)
    else:
        return symbolic_helper._onnx_unsupported(
            "binary_cross_entropy_with_logits with reduction other than none, mean, or sum",
            input,
        )
 def _get_arange_dtype(dtype):
     dtype = sym_help._maybe_get_const(dtype, 'i')
     return dtype
Exemple #28
0
def __interpolate(g, input, size, scale_factor, mode, align_corners):
    mode = sym_help._maybe_get_const(mode, 's')
    if 'linear' in mode:
        mode = 'linear'
    if 'cubic' in mode:
        mode = 'cubic'
    align_corners = sym_help._maybe_get_const(align_corners, 'b')
    align_corners = False if not isinstance(align_corners,
                                            bool) else align_corners
    coordinate_transformation_mode = "asymmetric" if mode == "nearest" \
        else "align_corners" if align_corners else "pytorch_half_pixel"
    # roi only takes effect whith coordinate_transformation_mode="tf_crop_and_resize"
    roi = g.op("Constant", value_t=torch.tensor([], dtype=torch.float32))

    if not sym_help._is_none(size):
        input_size = g.op("Shape", input)
        input_size = sym_help._slice_helper(g,
                                            input_size,
                                            axes=[0],
                                            ends=[2],
                                            starts=[0])
        # in some cases size is not a packed list but size is a scalar
        # We need to also verify that (sym_help._maybe_get_const(size, 't').dim() == 0)
        # but this information is not always available. Try to get the dim,
        # and if not assume that it is not a scalar.
        try:
            is_scalar = not sym_help._is_packed_list(size) and (
                (sym_help._maybe_get_const(size, 't').dim() == 0))
        except AttributeError:
            is_scalar = not sym_help._is_packed_list(size)
            if not is_scalar:
                warnings.warn(
                    "Cannot verify if the output_size is a scalar while exporting interpolate. Assuming that it is not a scalar."
                )

        if is_scalar:
            if not input.type().dim():
                return sym_help._unimplemented(
                    "interpolate (with a scalar output_size)",
                    "missing input shape (try giving an array of output_size values)"
                )
            size = unsqueeze(g, size, 0)
            size = [size for i in range(input.type().dim() - 2)]
            size = g.op("Concat", *size, axis_i=0)
        size = g.op("Cast", size, to_i=sym_help.cast_pytorch_to_onnx['Long'])
        size = g.op("Concat", input_size, size, axis_i=0)
        scales = g.op("Constant",
                      value_t=torch.tensor([], dtype=torch.float32))
        return g.op(
            "Resize",
            input,
            roi,
            scales,
            size,
            coordinate_transformation_mode_s=coordinate_transformation_mode,
            cubic_coeff_a_f=-0.75,  # only valid when mode="cubic"
            mode_s=mode,  # nearest, linear, or cubic
            nearest_mode_s="floor")
    else:  # if not sym_help._is_none(scales)
        if not input.type().dim():
            return sym_help._unimplemented("interpolate (with scales)",
                                           "missing input shape")
        scales = sym_help._interpolate_get_scales(g, scale_factor,
                                                  input.type().dim())
        return g.op(
            "Resize",
            input,
            roi,
            scales,
            coordinate_transformation_mode_s=coordinate_transformation_mode,
            cubic_coeff_a_f=-0.75,  # only valid when mode="cubic"
            mode_s=mode,  # nearest, linear, or cubic
            nearest_mode_s="floor")  # only valid when mode="nearest"
def gather(g, self, dim, index, sparse_grad=False):
    if sym_help._maybe_get_const(sparse_grad, 'i'):
        return _unimplemented("gather", "sparse_grad == True")
    if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
        return g.op("ATen", self, dim, index, sparse_grad, operator_s="gather")
    return g.op("GatherElements", self, index, axis_i=dim)
def unfold(g, input, dimension, size, step):
    const_size = sym_help._maybe_get_const(size, 'i')
    const_step = sym_help._maybe_get_const(step, 'i')
    if not sym_help._is_value(const_size) and not sym_help._is_value(
            const_step):
        from torch.onnx.symbolic_opset9 import unfold as _unfold
        return _unfold(g, input, dimension, const_size, const_step)
    if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:
        return g.op("ATen",
                    input,
                    operator_s="unfold",
                    dimension_i=dimension,
                    size_i=size,
                    step_i=step)

    sizedim = sym_help._get_tensor_dim_size(input, dimension)
    if sizedim is not None:
        low_start = g.op("Constant", value_t=torch.tensor(0))
        low_end = g.op("Constant", value_t=torch.tensor(sizedim))
        hi_end = g.op("Constant", value_t=torch.tensor(sizedim + 1))
        low_indices = g.op("Range", low_start, low_end, step)
        hi_indices = g.op("Range", size, hi_end, step)

        low_size = sym_help._size_helper(
            g, low_indices, g.op("Constant", value_t=torch.tensor(0)))
        hi_size = sym_help._size_helper(
            g, hi_indices, g.op("Constant", value_t=torch.tensor(0)))

        ndim = sym_help._get_tensor_rank(input)
        perm = list(range(0, ndim))
        perm.append(perm.pop(dimension))

        unsqueeze_list = []
        loop_condition = g.op("Constant", value_t=torch.tensor(1))
        loop_condition = g.op("Cast", loop_condition, to_i=9)
        loop_len = g.op("Min", low_size, hi_size)
        loop = g.op("Loop", loop_len, loop_condition)

        loop_block = _add_block(loop.node())
        block_input_iter = _add_input_to_block(loop_block)
        cond = _add_input_to_block(loop_block)

        starts = loop_block.op("Gather", low_indices, block_input_iter)
        ends = loop_block.op("Gather", hi_indices, block_input_iter)
        axes = loop_block.op("Constant", value_t=torch.tensor([2]))
        starts = sym_help._unsqueeze_helper(loop_block, starts, [0])
        ends = sym_help._unsqueeze_helper(loop_block, ends, [0])
        stack = loop_block.op("Slice", input, starts, ends, axes)

        unsqueeze = sym_help._unsqueeze_helper(
            loop_block, loop_block.op("Transpose", stack, perm_i=perm),
            [dimension])
        unsqueeze_list.append(unsqueeze)
        concat = loop_block.op("Concat", *unsqueeze_list, axis_i=0)

        cond_out = loop_block.op("Cast", loop_condition, to_i=9)
        _add_output_to_block(loop_block, cond_out)
        _add_output_to_block(loop_block, concat)

        loop_output = loop.node().output()
        perm = [0, 1, 2, 3, 4]
        perm[0], perm[dimension + 1] = perm[dimension + 1], perm[0]
        transpose = g.op("Transpose", loop_output, perm_i=perm)
        squeeze = sym_help._squeeze_helper(g, transpose, [0])

        return squeeze
    else:
        return _unimplemented("Unfold", "input size not accessible")