Пример #1
0
def elem_cnt(
    inputs: remote_blob_util.BlobDef,
    dtype: Optional[dtype_util.dtype] = None,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name",
            name if name is not None else id_util.UniqueStr("ElemCnt_"))
    op_conf.shape_elem_cnt_conf.x = inputs.unique_name

    op_conf.shape_elem_cnt_conf.exclude_axis_conf.SetInParent()
    if dtype is not None:
        op_conf.shape_elem_cnt_conf.data_type = dtype.oneflow_proto_dtype
    op_conf.shape_elem_cnt_conf.y = "y"
    interpret_util.Forward(op_conf)
    out_lbi = logical_blob_id_util.LogicalBlobId()
    setattr(out_lbi, "op_name", op_conf.name)
    setattr(out_lbi, "blob_name", "y")
    return remote_blob_util.RemoteBlob(out_lbi)
Пример #2
0
def distribute_clone(x, name=None):
    if name is None:
        name = id_util.UniqueStr("DistributeClone_")
    op_conf = op_conf_util.OperatorConf()
    op_conf.name = name
    setattr(op_conf.distribute_clone_conf, "in", x.unique_name)
    parallel_size = oneflow.current_scope().device_parallel_desc_symbol.parallel_num
    op_conf.distribute_clone_conf.out.extend(
        ["out_%d" % i for i in range(parallel_size)]
    )
    interpret_util.ConsistentForward(op_conf)
    ret = []
    for i in range(parallel_size):
        out = "out_%d" % i
        lbi = logical_blob_id_util.LogicalBlobId()
        lbi.op_name = op_conf.name
        lbi.blob_name = out
        ret.append(remote_blob_util.RemoteBlob(lbi))
    return tuple(ret)
Пример #3
0
def decode_random(
    shape: Sequence[int],
    dtype: flow.dtype,
    batch_size: int = 1,
    initializer: Optional[initializer_conf_util.InitializerConf] = None,
    tick: Optional[oneflow._oneflow_internal.BlobDesc] = None,
    name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
    op_conf = op_conf_util.OperatorConf()

    if name is None:
        name = id_util.UniqueStr("DecodeRandom_")
    assert isinstance(name, str)
    op_conf.name = name

    assert isinstance(shape, (list, tuple))
    op_conf.decode_random_conf.shape.dim.extend(shape)

    assert dtype is not None
    setattr(
        op_conf.decode_random_conf,
        "data_type",
        oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(dtype),
    )

    op_conf.decode_random_conf.batch_size = batch_size

    if initializer is not None:
        op_conf.decode_random_conf.data_initializer.CopyFrom(initializer)
    else:
        op_conf.decode_random_conf.data_initializer.CopyFrom(
            flow.random_uniform_initializer())

    if tick:
        op_conf.decode_random_conf.tick = tick.unique_name
    op_conf.decode_random_conf.out = "out"

    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"

    interpret_util.ConsistentForward(op_conf)
    return remote_blob_util.RemoteBlob(lbi)
Пример #4
0
def argsort(
    input: remote_blob_util.BlobDef,
    direction: str = "ASCENDING",
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    """This operator sorts the input Blob and return the indices of sorted Blob. 

    Args:
        input (remote_blob_util.BlobDef): A Blob
        direction (str, optional): The direction in which to sort the Blob values. If the direction is "ASCENDING", The order of input will be sorted as ascending, else, the order of input will be sorted as descending. Defaults to "ASCENDING".
        name (Optional[str], optional): The name for the operation. Defaults to None.

    Returns:
        remote_blob_util.BlobDef: The indices of sorted Blob

    For example: 

    .. code-block:: python 

        import oneflow as flow
        import numpy as np
        import oneflow.typing as tp


        @flow.global_function()
        def argsort_Job(x: tp.Numpy.Placeholder((5, ))
        ) -> tp.Numpy:
            return flow.argsort(input=x, 
                                direction='ASCENDING')

        x = np.array([10, 2, 9, 3, 7]).astype("float32")
        out = argsort_Job(x)

        # out [1 3 4 2 0]

    """
    assert direction in ["ASCENDING", "DESCENDING"]
    return (
        flow.user_op_builder(name if name is not None else id_util.UniqueStr(
            "ArgSort_")).Op("arg_sort").Input(
                "in", [input]).Output("out").Attr(
                    "direction",
                    direction).Build().InferAndTryRun().RemoteBlobList()[0])
Пример #5
0
def dynamic_reshape(x: remote_blob_util.BlobDef,
                    shape: Sequence[int],
                    name: Optional[str] = None) -> remote_blob_util.BlobDef:
    assert isinstance(shape, tuple) or isinstance(shape, list)
    shape = list(shape)
    op_conf = op_conf_util.OperatorConf()
    setattr(
        op_conf,
        "name",
        name if name is not None else id_util.UniqueStr("DynamicReshape_"),
    )
    setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name)
    op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape))
    setattr(op_conf.dynamic_reshape_conf, "out", "out")
    interpret_util.Forward(op_conf)
    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"
    return remote_blob_util.RemoteBlob(lbi)
Пример #6
0
def object_segm_poly_to_mask(poly: BlobDef,
                             poly_index: BlobDef,
                             image_size: BlobDef,
                             name: Optional[str] = None) -> BlobDef:
    assert isinstance(poly, BlobDef)
    assert isinstance(poly_index, BlobDef)
    assert isinstance(image_size, BlobDef)
    assert poly.shape[0] == poly_index.shape[0]
    assert poly.shape[0] == image_size.shape[0]

    if name is None:
        name = id_util.UniqueStr("ObjectSegmPolyToMask_")

    op = (flow.user_op_builder(name).Op(
        "object_segmentation_polygon_to_mask").Input("poly", [poly]).Input(
            "poly_index",
            [poly_index]).Input("image_size",
                                [image_size]).Output("out").Build())
    return op.InferAndTryRun().SoleOutputBlob()
Пример #7
0
def _default_initializer_for_determining(tensor):
    assert not tensor.is_determined
    undetermined_tensor = tensor._undetermined_tensor
    variable_name = id_util.UniqueStr("tensor_")

    blob = None

    @global_function_or_identity()
    def job():
        nonlocal blob
        with tensor._placement_scope():
            blob = flow.get_variable(
                name=variable_name,
                shape=tuple(undetermined_tensor.shape),
                dtype=undetermined_tensor.dtype,
                initializer=undetermined_tensor.data_initializer,
            )

    job()
    if undetermined_tensor.is_consistent:
        determined_tensor = oneflow_api.ConsistentTensor(
            undetermined_tensor.shape,
            undetermined_tensor.dtype,
            undetermined_tensor.sbp,
            undetermined_tensor.placement,
            undetermined_tensor.is_lazy,
            undetermined_tensor.requires_grad,
            True,
            undetermined_tensor.retain_grad,
        )
    else:
        determined_tensor = oneflow_api.LocalTensor(
            undetermined_tensor.shape,
            undetermined_tensor.dtype,
            undetermined_tensor.device,
            undetermined_tensor.is_lazy,
            undetermined_tensor.requires_grad,
            True,
            undetermined_tensor.retain_grad,
        )
    determined_tensor._set_blob_object(blob.blob_object)
    return determined_tensor
Пример #8
0
    def Version_1(cls, ctx, node, **kwargs):
        """Sign op."""
        # T sign = Sign(T Input)
        node_dtype = ctx.get_dtype(node.output[0])
        util.MakeSure(node_dtype, "Dtype of {} is None".format(node.name))
        if node_dtype in [
                onnx_pb.TensorProto.COMPLEX64,
                onnx_pb.TensorProto.COMPLEX128,
        ]:
            raise ValueError("dtype " + str(node_dtype) +
                             " is not supported in onnx for now")
        zero_name = id_util.UniqueStr("{}_zero".format(node.name))
        ctx.MakeConst(zero_name, np.array(0, dtype=np.float32))
        if node_dtype not in [
                onnx_pb.TensorProto.FLOAT16,
                onnx_pb.TensorProto.FLOAT,
                onnx_pb.TensorProto.DOUBLE,
        ]:
            cast_node_0 = ctx.MakeNode("Cast", [node.input[0]],
                                       {"to": onnx_pb.TensorProto.FLOAT})
            greater_node = ctx.MakeNode("Greater",
                                        [cast_node_0.output[0], zero_name])
            less_node = ctx.MakeNode("Less",
                                     [cast_node_0.output[0], zero_name])
        else:
            greater_node = ctx.MakeNode("Greater", [node.input[0], zero_name])
            less_node = ctx.MakeNode("Less", [node.input[0], zero_name])
        cast_node_1 = ctx.MakeNode("Cast", [greater_node.output[0]],
                                   {"to": node_dtype})
        cast_node_2 = ctx.MakeNode("Cast", [less_node.output[0]],
                                   {"to": node_dtype})

        shapes = node.output_shapes
        dtypes = node.output_dtypes
        ctx.RemoveNode(node.name)
        ctx.MakeNode(
            "Sub",
            [cast_node_1.output[0], cast_node_2.output[0]],
            outputs=[node.output[0]],
            shapes=shapes,
            dtypes=dtypes,
        )
Пример #9
0
def clip_by_value(
    values: remote_blob_util.BlobDef,
    min_value: Optional[Union[int, float]] = None,
    max_value: Optional[Union[int, float]] = None,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    if name is None:
        name = id_util.UniqueStr("ClipByValue_")

    if min_value is not None and max_value is not None:
        op_builder = (
            flow.user_op_builder(name)
            .Op("clip_by_scalar")
            .Attr("floating_min", float(min_value))
            .Attr("integral_min", int(min_value))
            .Attr("floating_max", float(max_value))
            .Attr("integral_max", int(max_value))
        )
    elif min_value is not None:
        op_builder = (
            flow.user_op_builder(name)
            .Op("clip_by_scalar_min")
            .Attr("floating_min", float(min_value))
            .Attr("integral_min", int(min_value))
        )
    elif max_value is not None:
        op_builder = (
            flow.user_op_builder(name)
            .Op("clip_by_scalar_max")
            .Attr("floating_max", float(max_value))
            .Attr("integral_max", int(max_value))
        )
    else:
        raise ValueError("min_value and max_value cannot be None at the same time")

    return (
        op_builder.Input("x", [values])
        .Output("y")
        .Build()
        .InferAndTryRun()
        .RemoteBlobList()[0]
    )
Пример #10
0
def _numpy_initializer_for_determining(tensor):
    assert not tensor.is_determined
    undetermined_tensor = tensor._undetermined_tensor
    assert undetermined_tensor.numpy_data is not None
    variable_name = id_util.UniqueStr("tensor_")

    @global_function_or_identity()
    def set_numpy_data():
        with tensor._placement_scope():
            flow.get_variable(
                name=variable_name,
                shape=tuple(undetermined_tensor.shape),
                dtype=undetermined_tensor.dtype,
                initializer=undetermined_tensor.data_initializer,
            )

    set_numpy_data()
    flow.load_variables({variable_name: undetermined_tensor.numpy_data})
    blob = flow.get_all_variables()[variable_name]
    if undetermined_tensor.is_consistent:
        determined_tensor = oneflow_api.ConsistentTensor(
            undetermined_tensor.shape,
            undetermined_tensor.dtype,
            undetermined_tensor.sbp,
            undetermined_tensor.placement,
            undetermined_tensor.is_lazy,
            undetermined_tensor.requires_grad,
            True,
            undetermined_tensor.retain_grad,
        )
    else:
        determined_tensor = oneflow_api.LocalTensor(
            undetermined_tensor.shape,
            undetermined_tensor.dtype,
            undetermined_tensor.device,
            undetermined_tensor.is_lazy,
            undetermined_tensor.requires_grad,
            True,
            undetermined_tensor.retain_grad,
        )
    determined_tensor._set_blob_object(blob.blob_object)
    return determined_tensor
Пример #11
0
def gen_tensor_buffer(
    shape: Sequence[int],
    shape_list: Sequence[Sequence[int]],
    value_list: Sequence[float],
    data_type: Optional[flow.dtype] = flow.float32,
    dynamic_out: Optional[bool] = False,
    name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
    r"""This operator generates a tensor buffer blob.

    Args:
        shape (Sequence[int]): shape of output blob
        shape_list ( Sequence[Sequence[int]]): shapes for tensor buffer in output blob
        value_list (Sequence[float]): values for tensor buffer in output blob
        data_type (Optional[flow.dtype]): data type for tensor buffer in output blob
        dynamic_out (Optional[bool]): if output is a dynamic blob
        name (Optional[str]): The name for the operation. Defaults to None.

    Returns:
        BlobDesc: The result Blob.

    For example:
    .. code-block:: python 

        import oneflow as flow

        @flow.global_function(function_config=func_config)
        def GenTensorBufferJob():
            with flow.scope.placement("cpu", "0:0"):
                x = flow.gen_tensor_buffer([(2,)], [(2, 1), (1, 2)], [0.0, 1.0])
                y = flow.tensor_buffer_to_list_of_tensors(x, (100, 100), flow.float, True)
                return y
                
        # y_0.shape (2, 1), y_1.shape (1. 2)
    """
    return (flow.user_op_builder(
        name if name is not None else id_util.UniqueStr("GenTensorBuffer_")
    ).Op("gen_tensor_buffer").Output("out").Attr("shape", shape).Attr(
        "shape_list", shape_list).Attr(
            "value_list", value_list).Attr("data_type", data_type).Attr(
                "dynamic_out",
                dynamic_out).Build().InferAndTryRun().RemoteBlobList()[0])
Пример #12
0
def relu(
    x: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
    r"""ReLU activation

    Args:
        x: Input `Blob`.
    Returns:
        A `Blob`
    """

    return (
        flow.user_op_builder(name if name is not None else id_util.UniqueStr("Relu_"))
        .Op("relu")
        .Input("in", [x])
        .Output("out")
        .Build()
        .InferAndTryRun()
        .RemoteBlobList()[0]
    )
Пример #13
0
 def FollowInputs(self, node, num, space=""):
     """Follow inputs for (helpful for debugging)."""
     val = []
     top = space == ""
     if num == 0:
         return []
     val.append("{}{} {} {}".format(
         space,
         node.type,
         node.name,
         self.get_shape(id_util.UniqueStr(node.name)),
     ))
     space += "    "
     for j in node.inputs:
         val.extend(self.FollowInputs(j, num - 1, space))
     if top:
         print("\n".join(reversed(val)))
         print()
         return []
     return val
Пример #14
0
def _WrapConcatWithCast(ctx, node):
    """wrap concat in casts for opset < 8 since it only supports."""
    supported_types = [onnx_pb.TensorProto.FLOAT, onnx_pb.TensorProto.FLOAT16]
    dtype = ctx.get_dtype(node.output[0])
    need_casting = dtype not in supported_types
    if need_casting:
        output_name = node.output[0]
        # cast each inputs to float
        for i, inp in enumerate(node.inputs):
            input_cast = ctx.InsertNewNodeOnInput(node, "Cast", node.input[i])
            input_cast.set_attr("to", onnx_pb.TensorProto.FLOAT)
            ctx.set_dtype(input_cast.output[0], onnx_pb.TensorProto.FLOAT)
        next_nodes = ctx.FindOutputConsumers(node.output[0])
        # cast output back to dtype unless the next op is a cast
        if next_nodes[0].type != "Cast":
            op_name = id_util.UniqueStr(node.name)
            output_cast = ctx.InsertNewNodeOnOutput("Cast", output_name, name=op_name)
            output_cast.set_attr("to", dtype)
            ctx.set_dtype(output_cast.output[0], dtype)
            ctx.CopyShape(output_name, output_cast.output[0])
Пример #15
0
def OFRecordRawDecoder(
    input_blob: BlobDef,
    blob_name: str,
    shape: Sequence[int],
    dtype: dtype_util.dtype,
    dim1_varying_length: bool = False,
    auto_zero_padding: bool = False,
    name: Optional[str] = None,
) -> BlobDef:
    if name is None:
        name = id_util.UniqueStr("OFRecordRawDecoder_")
    return (flow.user_op_builder(name).Op("ofrecord_raw_decoder").Input(
        "in", [
            input_blob
        ]).Output("out").Attr("name", blob_name).Attr("shape", shape).Attr(
            "data_type",
            dtype).Attr("dim1_varying_length", dim1_varying_length).Attr(
                "auto_zero_padding",
                auto_zero_padding).Build().InferAndTryRun().RemoteBlobList()[0]
            )
Пример #16
0
 def __init__(
         self,
         shape,
         dtype,
         name=None,
         distribute=oneflow_api.distribute.auto(),
 ):
     lbi = lbi_util.LogicalBlobId()
     if name is None:
         name = id_util.UniqueStr("Input_")
     lbi.set_op_name(name)
     lbi.set_blob_name("out")
     self.lbi_ = lbi
     assert type(shape) is tuple
     for dim in shape:
         assert type(dim) is int
         assert dim > 0
     self.shape_ = shape
     self.dtype_ = dtype
     self.distribute_ = distribute
Пример #17
0
def image_decode(
    images_bytes_buffer: BlobDef,
    dtype: dtype_util.dtype = dtype_util.uint8,
    color_space: str = "BGR",
    name: Optional[str] = None,
) -> BlobDef:
    # TODO: check color_space valiad
    if name is None:
        name = id_util.UniqueStr("ImageDecode_")

    op = (
        flow.user_op_builder(name)
        .Op("image_decode")
        .Input("in", [images_bytes_buffer])
        .Output("out")
        .Attr("color_space", color_space)
        .Attr("data_type", dtype)
        .Build()
    )
    return op.InferAndTryRun().SoleOutputBlob()
Пример #18
0
def _default_initializer_for_determining(undetermined_tensor):
    assert not undetermined_tensor.is_consistent
    variable_name = id_util.UniqueStr("tensor_")
    blob = flow.get_variable(
        name=variable_name,
        shape=tuple(undetermined_tensor.shape),
        dtype=undetermined_tensor.dtype,
        initializer=undetermined_tensor.data_initializer,
    )
    determined_tensor = oneflow_api.LocalTensor(
        undetermined_tensor.shape,
        undetermined_tensor.dtype,
        undetermined_tensor.device,
        undetermined_tensor.is_lazy,
        undetermined_tensor.requires_grad,
        True,
        undetermined_tensor.retain_grad,
    )
    determined_tensor._set_blob_object(blob.blob_object)
    return determined_tensor, variable_name
Пример #19
0
def tanh(
    x: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
    r"""Computes hyperbolic tangent of `x` element-wise.

    Args:
        x: Input `Blob`.
    Returns:
        A `Blob`
    """

    return (
        flow.user_op_builder(name if name is not None else id_util.UniqueStr("TanH_"))
        .Op("tanh")
        .Input("in", [x])
        .Output("out")
        .Build()
        .InferAndTryRun()
        .RemoteBlobList()[0]
    )
Пример #20
0
def _MakeNewBlobObjectLike(builder, blob_object, new_parallel_desc_symbol):
    op_conf = op_conf_pb.OperatorConf()
    op_conf.name = id_util.UniqueStr("Input")
    op_conf.device_tag = new_parallel_desc_symbol.device_tag
    op_conf.input_conf.out = "out"
    cfg_interface_blob_conf = (
        oneflow_api.oneflow.core.operator.interface_blob_conf.InterfaceBlobConf()
    )
    blob_object.op_arg_parallel_attr.DumpToInterfaceBlobConf(cfg_interface_blob_conf)
    blob_object.op_arg_blob_attr.DumpToInterfaceBlobConf(cfg_interface_blob_conf)
    text_format.Parse(str(cfg_interface_blob_conf), op_conf.input_conf.blob_conf)
    op_conf.scope_symbol_id = oneflow.current_scope().symbol_id
    upstream_signature = op_node_signature_pb.OpNodeSignature()
    op_attribute = c_api_util.InferOpConf(op_conf, upstream_signature)
    parallel_conf = new_parallel_desc_symbol.parallel_conf
    bn_in_op2blob_object = {}
    builder.RawStatelessCall(
        op_attribute, parallel_conf, bn_in_op2blob_object=bn_in_op2blob_object
    )
    return bn_in_op2blob_object["out"]
Пример #21
0
def tensor_to_tensor_buffer(
    x: BlobDef,
    instance_dims: int,
    name: Optional[str] = None,
) -> BlobDef:
    r"""Converts the TensorBuffer Blob to dense Tensor.

    Args:
        x: Input `Blob`.
        instance_dims: The number of dimensions to convert to TensorBuffer.
        name: Name for the operator.
    Returns:
        A `Blob`.
    """
    if name is None:
        name = id_util.UniqueStr("TensorToTensorBuffer_")
    return (flow.user_op_builder(name).Op("tensor_to_tensor_buffer").Input(
        "in", [x]).Output("out").Attr(
            "instance_dims",
            instance_dims).Build().InferAndTryRun().RemoteBlobList()[0])
def dynamic_binary_concat(
    input_blob_list: Sequence[oneflow_api.BlobDesc],
    source_blob: input_blob_util.ArgBlobDef,
    source_sbp: str = "S:0",
    name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
    op_conf = op_conf_util.OperatorConf()
    if name is None:
        op_conf.name = id_util.UniqueStr("DynamicBinaryConcat_")
    else:
        op_conf.name = name

    in_lbns = []
    for in_blob in input_blob_list:
        in_lbns.append(in_blob.unique_name)

    getattr(op_conf.dynamic_binary_concat_conf, "in").extend(in_lbns)
    # op_conf.dynamic_binary_concat_conf.in[:] = in_lbns
    op_conf.dynamic_binary_concat_conf.out = "out"
    op_conf.dynamic_binary_concat_conf.out_data_type = (
        source_blob.dtype.oneflow_proto_dtype)
    op_conf.dynamic_binary_concat_conf.out_shape.dim.extend(
        list(source_blob.shape))
    if "S" in source_sbp:
        axis = int(source_sbp.split(":")[-1])
        op_conf.dynamic_binary_concat_conf.out_sbp.split_parallel.axis = axis
    elif "B" in source_sbp:
        op_conf.dynamic_binary_concat_conf.out_sbp.broadcast_parallel.SetInParent(
        )
    elif "P" in source_sbp:
        op_conf.dynamic_binary_concat_conf.out_sbp.partial_sum_parallel.SetInParent(
        )
    else:
        print("Error! invalid sbp str:", source_sbp)
        op_conf.dynamic_binary_concat_conf.out_sbp.SetInParent()

    interpret_util.Forward(op_conf)
    out_lbi = logical_blob_id_util.LogicalBlobId()
    out_lbi.op_name = op_conf.name
    out_lbi.blob_name = "out"
    return remote_blob_util.RemoteBlob(out_lbi)
Пример #23
0
def tensor_list_split(
        input_tensor_list: remote_blob_util.BlobDef,
        name: Optional[str] = None) -> Tuple[remote_blob_util.BlobDef]:
    if name is None:
        name = id_util.UniqueStr("TensorListSplit_")

    output_size = input_tensor_list.shape[0]
    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name", name)
    setattr(op_conf.tensor_list_split_conf, "in",
            input_tensor_list.unique_name)
    op_conf.tensor_list_split_conf.out.extend(
        ["out_{}".format(i) for i in range(output_size)])
    interpret_util.Forward(op_conf)
    ret = []
    for i in range(output_size):
        out_lbi = logical_blob_id_util.LogicalBlobId()
        setattr(out_lbi, "op_name", op_conf.name)
        setattr(out_lbi, "blob_name", "out_{}".format(i))
        ret.append(remote_blob_util.RemoteBlob(out_lbi))
    return tuple(ret)
Пример #24
0
def image_batch_align(
    images: BlobDef,
    shape: Sequence[int],
    dtype: dtype_util.dtype,
    alignment: int,
    name: Optional[str] = None,
) -> BlobDef:
    if name is None:
        name = id_util.UniqueStr("ImageBatchAlign_")

    op = (
        flow.user_op_builder(name)
        .Op("image_batch_align")
        .Input("in", [images])
        .Output("out")
        .Attr("shape", shape)
        .Attr("data_type", dtype)
        .Attr("alignment", alignment)
        .Build()
    )
    return op.InferAndTryRun().SoleOutputBlob()
Пример #25
0
def cast(x: remote_blob_util.BlobDef,
         dtype: dtype_util.dtype,
         name: Optional[str] = None) -> remote_blob_util.BlobDef:
    r"""Return a `Blob` of given data type `dtype` and indentical shape to `x`

    Args:
        x (remote_blob_util.BlobDef): Input `Blob`.
        dtype (int): A OneFlow data type. For instance, `oneflow.float`.
        name (Optional[str], optional): This operator's name. Defaults to None.

    Returns:
        remote_blob_util.BlobDef: A `Blob`
    """
    if x.dtype == dtype:
        return x
    if name is None:
        name = id_util.UniqueStr("Cast_")

    return (flow.user_op_builder(name).Op("cast").Input(
        "in", [x]).Output("out").Attr(
            "dtype", dtype).Build().InferAndTryRun().RemoteBlobList()[0])
Пример #26
0
def unsorted_segment_sum_like(
    data: remote_blob_util.BlobDef,
    segment_ids: remote_blob_util.BlobDef,
    like: remote_blob_util.BlobDef,
    axis: int = 0,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    return (
        flow.user_op_builder(
            name if name is not None else id_util.UniqueStr("UnsortedSegmentSumLike_")
        )
        .Op("unsorted_segment_sum_like")
        .Input("data", [data])
        .Input("segment_ids", [segment_ids])
        .Input("like", [like])
        .Output("out")
        .Attr("axis", int(axis))
        .Build()
        .InferAndTryRun()
        .RemoteBlobList()[0]
    )
Пример #27
0
    def _ShapeHandler(self, trans, node):
        # input > trans > shape  can be changed into  input > shape > gather
        if not self._NodesHasSingleConsumerNode([trans]):
            return False

        output_shape = self._g.get_shape(node.output_tensor_names[0])
        output_dtype = self._g.get_dtype(node.output_tensor_names[0])
        self._g.RemoveNode(trans.name)
        self._g.RemoveNode(node.name)
        shape_node = self._g.MakeNode("Shape", [trans.input_tensor_names[0]])
        const_node = self._g.MakeConst(
            id_util.UniqueStr("Const"), np.array(trans.attrs["perm"])
        )
        gather_node = self._g.MakeNode(
            "Gather",
            [shape_node.output_tensor_names[0], const_node.output_tensor_names[0]],
            outputs=node.output_tensor_names,
        )
        self._g.set_shape(gather_node.output_tensor_names[0], output_shape)
        self._g.set_dtype(gather_node.output_tensor_names[0], output_dtype)
        return True
Пример #28
0
def stack(
    inputs: Sequence[remote_blob_util.BlobDef], axis: int, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
    if not isinstance(inputs, (list, tuple)):
        inputs = [inputs]

    if axis < 0:
        axis = axis + len(inputs[0].shape)

    assert axis == 0, "Only support dim0 stack now."

    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name", name or id_util.UniqueStr("Stack_"))
    getattr(op_conf.stack_conf, "in").extend([input.unique_name for input in inputs])
    setattr(op_conf.stack_conf, "axis", axis)
    setattr(op_conf.stack_conf, "out", "out")
    interpret_util.Forward(op_conf)
    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"
    return remote_blob_util.RemoteBlob(lbi)
Пример #29
0
def sync_dynamic_resize(
    inputs: remote_blob_util.BlobDef,
    size: remote_blob_util.BlobDef,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    op_conf = op_conf_util.OperatorConf()
    setattr(
        op_conf,
        "name",
        name if name is not None else id_util.UniqueStr("SyncDynamicResize_"),
    )
    setattr(op_conf.sync_dynamic_resize_conf, "in", inputs.unique_name)
    setattr(op_conf.sync_dynamic_resize_conf, "size", size.unique_name)
    setattr(op_conf.sync_dynamic_resize_conf, "axis", 0)
    setattr(op_conf.sync_dynamic_resize_conf, "out", "out")
    setattr(op_conf.sync_dynamic_resize_conf, "eager", flow.eager_execution_enabled())
    interpret_util.Forward(op_conf)
    out_lbi = logical_blob_id_util.LogicalBlobId()
    setattr(out_lbi, "op_name", op_conf.name)
    setattr(out_lbi, "blob_name", "out")
    return remote_blob_util.RemoteBlob(out_lbi)
Пример #30
0
def TensorProtoFromNumpy(
    arr: np.ndarray, name=None, external_data=False, export_path=None
):
    if name is None:
        name = id_util.UniqueStr("tensor_")
    tp = numpy_helper.from_array(arr, name)
    # value with size < 1024 bytes will remain in .onnx file
    # (like what pytorch does)
    if (not external_data) or arr.nbytes < 1024:
        return tp
    assert tp.HasField("raw_data")
    tp.ClearField("raw_data")
    export_dir = os.path.dirname(export_path)
    filename = GenerateValidFilename(name)
    with open(os.path.join(export_dir, filename), "wb") as f:
        arr.tofile(f)
    tp.data_location = onnx_pb.TensorProto.EXTERNAL
    external_data = tp.external_data.add()
    external_data.key = "location"
    external_data.value = filename
    return tp