Exemplo n.º 1
0
    def mul(g, x, y, op_scale, op_zero_point):
        x, _, _, _ = sym_help.dequantize_helper(g, x)
        y, _, _, _ = sym_help.dequantize_helper(g, y)

        output = mul(g, x, y)

        return sym_help.quantize_helper(g, output, op_scale, op_zero_point)
Exemplo n.º 2
0
def group_norm_symbolic(g, input, num_groups, weight, bias, eps,
                        cudnn_enabled):
    from torch.onnx.symbolic_opset9 import reshape, mul, add, reshape_as

    channels_num = input.type().sizes()[1]

    if num_groups == channels_num:
        output = g.op('InstanceNormalization',
                      input,
                      weight,
                      bias,
                      epsilon_f=eps)
    else:
        # Reshape from [n, g * cg, h, w] to [1, n * g, cg * h, w].
        x = reshape(g, input, [0, num_groups, -1, 0])
        x = reshape(g, x, [1, -1, 0, 0])
        # Normalize channel-wise.
        x = g.op('MeanVarianceNormalization', x, axes_i=[2, 3])
        # Reshape back.
        x = reshape_as(g, x, input)
        # Apply affine transform.
        x = mul(g, x, reshape(g, weight, [1, channels_num, 1, 1]))
        output = add(g, x, reshape(g, bias, [1, channels_num, 1, 1]))

    return output
Exemplo n.º 3
0
def addcmul_symbolic(g, self, tensor1, tensor2, value=1, out=None):
    from torch.onnx.symbolic_opset9 import add, mul

    if out is not None:
        sym_help._unimplemented("addcmul",
                                "Out parameter is not supported for addcmul")

    x = mul(g, tensor1, tensor2)
    value = sym_help._maybe_get_scalar(value)
    if sym_help._scalar(value) != 1:
        value = sym_help._if_scalar_type_as(g, value, x)
        if not sym_help._is_value(value):
            value = g.op("Constant",
                         value_t=torch.tensor(value, dtype=torch.float32))
        x = mul(g, x, value)
    return add(g, self, x)
Exemplo n.º 4
0
def normal(g, loc, scale, seed):
    # If you can sample from a given distribution with mean 0 and variance 1, then you can easily sample from a
    # scale-location transformation of that distribution, which has mean μ and variance σ's square. If x is a sample
    # from a mean 0 and variance 1 distribution then
    #       σx+μ
    # is a sample with mean μ and variance σ's square.
    result = mul(g, scale, g.op("RandomNormalLike", loc))
    return add(g, result, loc)
Exemplo n.º 5
0
    def mul(g, x, y, op_scale, op_zero_point):
        x, _, _, _ = symbolic_helper.dequantize_helper(g, x)
        y, _, _, _ = symbolic_helper.dequantize_helper(g, y)

        output = opset9.mul(g, x, y)

        return symbolic_helper.quantize_helper(g, output, op_scale,
                                               op_zero_point)
Exemplo n.º 6
0
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight,
                                     reduction):
    from torch.onnx.symbolic_opset9 import sigmoid, log, sub, neg, mul, add
    p = g.op("Constant", value_t=torch.tensor([1]))
    sig_x = sigmoid(g, input)
    log_sig_x = log(g, sig_x)
    sub_1_x = sub(g, p, sig_x)
    sub_1_y = sub(g, p, target)
    log_1_x = log(g, sub_1_x)
    if pos_weight is None or sym_help._is_none(pos_weight):
        output = neg(
            g, add(g, mul(g, target, log_sig_x), mul(g, sub_1_y, log_1_x)))
    else:
        output = neg(
            g,
            add(g, mul(g, mul(g, target, log_sig_x), pos_weight),
                mul(g, sub_1_y, log_1_x)))

    if weight is not None and not sym_help._is_none(weight):
        output = mul(g, weight, output)

    reduction = sym_help._maybe_get_const(reduction, 'i')
    if reduction == 0:
        return output
    elif reduction == 1:
        return g.op("ReduceMean", output)
    elif reduction == 2:
        return g.op("ReduceSum", output)
    else:
        return sym_help._onnx_unsupported(
            "binary_cross_entropy_with_logits with reduction other than none, mean, or sum"
        )
Exemplo n.º 7
0
def binary_cross_entropy_with_logits(g, input, target, weight, pos_weight,
                                     reduction):
    p = g.op("Constant", value_t=torch.tensor([1]))
    sig_x = opset9.sigmoid(g, input)
    log_sig_x = opset9.log(g, sig_x)
    sub_1_x = opset9.sub(g, p, sig_x)
    sub_1_y = opset9.sub(g, p, target)
    log_1_x = opset9.log(g, sub_1_x)
    if pos_weight is None or symbolic_helper._is_none(pos_weight):
        output = opset9.neg(
            g,
            opset9.add(g, opset9.mul(g, target, log_sig_x),
                       opset9.mul(g, sub_1_y, log_1_x)),
        )
    else:
        output = opset9.neg(
            g,
            opset9.add(
                g,
                opset9.mul(g, opset9.mul(g, target, log_sig_x), pos_weight),
                opset9.mul(g, sub_1_y, log_1_x),
            ),
        )

    if weight is not None and not symbolic_helper._is_none(weight):
        output = opset9.mul(g, weight, output)

    reduction = symbolic_helper._maybe_get_const(reduction, "i")
    if reduction == 0:
        return output
    elif reduction == 1:
        return g.op("ReduceMean", output, keepdims_i=0)
    elif reduction == 2:
        return g.op("ReduceSum", output, keepdims_i=0)
    else:
        return symbolic_helper._onnx_unsupported(
            "binary_cross_entropy_with_logits with reduction other than none, mean, or sum",
            input,
        )
Exemplo n.º 8
0
def multiclass_nms_core_symbolic(g,
                                 multi_bboxes,
                                 multi_scores,
                                 score_thr,
                                 nms_cfg,
                                 max_num=-1):

    from torch.onnx.symbolic_opset9 import reshape, squeeze
    from torch.onnx.symbolic_opset10 import _slice

    def cast(x, dtype):
        return g.op('Cast', x, to_i=sym_help.cast_pytorch_to_onnx[dtype])

    def get_size(x, dim):
        shape = g.op('Shape', x)
        dim = _slice(g, shape, axes=[0], starts=[dim], ends=[dim + 1])
        return cast(dim, 'Long')

    nms_op_type = nms_cfg.get('type', 'nms')
    assert nms_op_type == 'nms'
    assert 'iou_thr' in nms_cfg
    iou_threshold = nms_cfg['iou_thr']
    assert 0 <= iou_threshold <= 1

    # Transpose and reshape input tensors to fit ONNX NonMaxSuppression.
    multi_bboxes = reshape(g, multi_bboxes, [0, -1, 4])
    multi_bboxes = g.op('Transpose', multi_bboxes, perm_i=[1, 0, 2])

    batches_num = get_size(multi_bboxes, 0)
    spatial_num = get_size(multi_bboxes, 1)

    multi_scores = g.op('Transpose', multi_scores, perm_i=[1, 0])
    scores_shape = g.op('Concat',
                        batches_num,
                        g.op('Constant', value_t=torch.LongTensor([-1])),
                        spatial_num,
                        axis_i=0)
    multi_scores = reshape(g, multi_scores, scores_shape)
    classes_num = get_size(multi_scores, 1)

    assert max_num > 0

    indices = g.op(
        'NonMaxSuppression', multi_bboxes, multi_scores,
        g.op('Constant', value_t=torch.LongTensor([max_num])),
        g.op('Constant', value_t=torch.FloatTensor([iou_threshold])),
        g.op('Constant', value_t=torch.FloatTensor([score_thr])))

    # Flatten bboxes and scores.
    multi_bboxes_flat = reshape(g, multi_bboxes, [-1, 4])
    multi_scores_flat = reshape(g, multi_scores, [
        -1,
    ])

    # Flatten indices.
    batch_indices = _slice(g, indices, axes=[1], starts=[0], ends=[1])
    class_indices = _slice(g, indices, axes=[1], starts=[1], ends=[2])
    box_indices = _slice(g, indices, axes=[1], starts=[2], ends=[3])

    def add(*args, dtype='Long'):
        x = g.op('Add', args[0], args[1])
        if dtype is not None:
            x = cast(x, dtype)
        return x

    def mul(*args, dtype='Long'):
        x = g.op('Mul', args[0], args[1])
        if dtype is not None:
            x = cast(x, dtype)
        return x

    flat_box_indices = add(mul(batch_indices, spatial_num), box_indices)
    flat_score_indices = add(
        mul(add(mul(batch_indices, classes_num), class_indices), spatial_num),
        box_indices)

    # Select bboxes.
    out_bboxes = reshape(
        g, g.op('Gather', multi_bboxes_flat, flat_box_indices, axis_i=0),
        [-1, 4])
    out_scores = reshape(
        g, g.op('Gather', multi_scores_flat, flat_score_indices, axis_i=0),
        [-1, 1])
    # Having either batch size or number of classes here equal to one is the limitation of implementation.
    class_indices = reshape(g, cast(add(class_indices, batch_indices),
                                    'Float'), [-1, 1])

    # Combine bboxes, scores and labels into a single tensor.
    # This a workaround for a PyTorch bug (feature?),
    # limiting ONNX operations to output only single tensor.
    out_combined_bboxes = g.op('Concat',
                               out_bboxes,
                               out_scores,
                               class_indices,
                               axis_i=1)

    # Get the top scored bboxes only.
    elements_num = sym_help._size_helper(g,
                                         out_scores,
                                         dim=g.op('Constant',
                                                  value_t=torch.LongTensor(
                                                      [0])))
    max_num = g.op('Constant', value_t=torch.LongTensor([max_num]))
    if sym_help._export_onnx_opset_version < 12:
        kn = g.op('Concat', max_num, elements_num, axis_i=0)
        kn = g.op('ReduceMin', kn, keepdims_i=0)
    else:
        kn = g.op('Min', max_num, elements_num)
    _, top_indices = sym_help._topk_helper(g, out_scores, kn, dim=0)
    # top_indices = squeeze(g, top_indices, dim=1)
    top_indices = reshape(g, top_indices, [
        -1,
    ])
    out_combined_bboxes = g.op('Gather',
                               out_combined_bboxes,
                               top_indices,
                               axis_i=0)

    return out_combined_bboxes