Esempio n. 1
0
def CheckAndCompleteUserOpConf(op_conf_proto):
    serialized_op_conf = str(text_format.MessageToString(op_conf_proto))
    new_op_conf, error = oneflow_api.CheckAndCompleteUserOpConf(
        serialized_op_conf)
    if error.has_error_type():
        raise JobBuildAndInferCfgError(error)
    return text_format.Parse(new_op_conf, op_conf_util.OperatorConf())
Esempio n. 2
0
def dynamic_binary_split(
    x: input_blob_util.ArgBlobDef,
    base_shift: int = 2,
    out_num: int = 2,
    name: Optional[str] = None,
) -> List[remote_blob_util.BlobDef]:
    op_conf = op_conf_util.OperatorConf()
    if name is None:
        op_conf.name = id_util.UniqueStr("DynamicBinarySplit_")
    else:
        op_conf.name = name

    obns = []
    out_remote_blobs = []
    for i in range(out_num):
        obns.append("out_" + str(i))

    setattr(op_conf.dynamic_binary_split_conf, "in", x.unique_name)
    # op_conf.dynamic_binary_split_conf.in = x.unique_name
    op_conf.dynamic_binary_split_conf.out[:] = obns
    op_conf.dynamic_binary_split_conf.base_shift = base_shift

    interpret_util.Forward(op_conf)
    for i in range(out_num):
        out_lbi = logical_blob_id_util.LogicalBlobId()
        out_lbi.op_name = op_conf.name
        out_lbi.blob_name = obns[i]
        out_remote_blobs.append(remote_blob_util.RemoteBlob(out_lbi))

    return out_remote_blobs
Esempio n. 3
0
def decode_random(
    shape: Sequence[int],
    dtype: flow.dtype,
    batch_size: int = 1,
    initializer: Optional[initializer_conf_util.InitializerConf] = None,
    tick: Optional[oneflow._oneflow_internal.BlobDesc] = None,
    name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
    op_conf = op_conf_util.OperatorConf()
    if name is None:
        name = id_util.UniqueStr("DecodeRandom_")
    assert isinstance(name, str)
    op_conf.name = name
    assert isinstance(shape, (list, tuple))
    op_conf.decode_random_conf.shape.dim.extend(shape)
    assert dtype is not None
    setattr(
        op_conf.decode_random_conf,
        "data_type",
        oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(dtype),
    )
    op_conf.decode_random_conf.batch_size = batch_size
    if initializer is not None:
        op_conf.decode_random_conf.data_initializer.CopyFrom(initializer)
    else:
        op_conf.decode_random_conf.data_initializer.CopyFrom(
            flow.random_uniform_initializer())
    if tick:
        op_conf.decode_random_conf.tick = tick.unique_name
    op_conf.decode_random_conf.out = "out"
    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"
    interpret_util.ConsistentForward(op_conf)
    return remote_blob_util.RemoteBlob(lbi)
Esempio n. 4
0
def elem_cnt(
    input_blob: remote_blob_util.BlobDef,
    axis: Optional[Sequence[int]] = None,
    dtype: Optional[dtype_util.dtype] = None,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    op_conf = op_conf_util.OperatorConf()
    setattr(
        op_conf,
        "name",
        name if name is not None else id_util.UniqueStr("ShapeElemCnt_"),
    )
    op_conf.shape_elem_cnt_conf.x = input_blob.unique_name
    if axis is None:
        op_conf.shape_elem_cnt_conf.exclude_axis_conf.SetInParent()
    else:
        assert isinstance(axis, (tuple, list))
        op_conf.shape_elem_cnt_conf.include_axis_conf.axis.extend(axis)
    if dtype is not None:
        op_conf.shape_elem_cnt_conf.data_type = dtype.oneflow_proto_dtype
    op_conf.shape_elem_cnt_conf.y = "y"
    interpret_util.Forward(op_conf)
    out_lbi = logical_blob_id_util.LogicalBlobId()
    out_lbi.op_name = op_conf.name
    out_lbi.blob_name = "y"
    return remote_blob_util.RemoteBlob(out_lbi)
Esempio n. 5
0
def indexed_slices_reduce_sum(
    indices: input_blob_util.ArgBlobDef,
    values: input_blob_util.ArgBlobDef,
    name: Optional[str] = None,
) -> Tuple[remote_blob_util.BlobDef]:
    op_conf = op_conf_util.OperatorConf()
    if name is None:
        op_conf.name = id_util.UniqueStr("IndexedSlicesReduceSum_")
    else:
        op_conf.name = name

    op_conf.indexed_slices_reduce_sum_conf.x_indices = indices.unique_name
    op_conf.indexed_slices_reduce_sum_conf.x_values = values.unique_name
    op_conf.indexed_slices_reduce_sum_conf.y_indices = "y_indices"
    op_conf.indexed_slices_reduce_sum_conf.y_values = "y_values"
    op_conf.indexed_slices_reduce_sum_conf.num_unique = "num_unique"

    interpret_util.Forward(op_conf)
    y_indices_lbi = logical_blob_id_util.LogicalBlobId()
    y_indices_lbi.op_name = op_conf.name
    y_indices_lbi.blob_name = "y_indices"
    y_values_lbi = logical_blob_id_util.LogicalBlobId()
    y_values_lbi.op_name = op_conf.name
    y_values_lbi.blob_name = "y_values"
    num_unique_lbi = logical_blob_id_util.LogicalBlobId()
    num_unique_lbi.op_name = op_conf.name
    num_unique_lbi.blob_name = "num_unique"

    return (
        remote_blob_util.RemoteBlob(y_indices_lbi),
        remote_blob_util.RemoteBlob(y_values_lbi),
        remote_blob_util.RemoteBlob(num_unique_lbi),
    )
Esempio n. 6
0
 def build(builder):
     op_conf = op_conf_pb.OperatorConf()
     # device_tag doesn't matter for logical_slice op
     device_tag = oneflow.current_scope(
     ).device_parallel_desc_symbol.device_tag
     op_conf.device_tag = device_tag
     op_conf.name = op_name
     op_conf.user_conf.op_type_name = "logical_slice"
     op_conf.user_conf.input["x"].s.append("{}/x_0".format(op_name))
     op_conf.user_conf.output["y"].s.append("{}/y_0".format(op_name))
     input_blob_object = input_blob.blob_object
     parallel_conf = input_blob_object.parallel_desc_symbol.parallel_conf
     op_conf.user_conf.attr["parallel_conf"].at_string = str(
         parallel_conf)
     op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
     op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
     op_conf.user_conf.attr["step"].at_list_int64.val[:] = [
         1
     ] * len(start)
     bn_in_op2blob_object = oneflow_api.deprecated.BnInOp2BlobObject()
     bn_in_op2blob_object["x_0"] = input_blob_object
     scope_symbol_id = _GetScopeSymbolIdFromEagerBlob(input_blob)
     op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object,
                                        scope_symbol_id)
     cfg_op_attribute = oneflow_api.deprecated.MakeOpAttributeByString(
         str(op_attribute))
     builder.StatelessCall(
         cfg_op_attribute,
         parallel_conf,
         bn_in_op2blob_object,
         boxing_util.BoxingTo,
         vm_util._FindOrCreateDelegateBlobObject,
     )
     Yield(bn_in_op2blob_object["y_0"])
Esempio n. 7
0
def tensor_buffer_to_tensor_list(
    input: remote_blob_util.BlobDef,
    shape: Sequence[int],
    dtype: dtype_util.dtype,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    if name is None:
        name = id_util.UniqueStr("TensorBufferToList_")

    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name", name)
    setattr(op_conf.tensor_buffer_to_tensor_list_conf, "in", input.unique_name)
    setattr(op_conf.tensor_buffer_to_tensor_list_conf, "out", "out")
    op_conf.tensor_buffer_to_tensor_list_conf.shape.dim[:] = list(shape)
    setattr(
        op_conf.tensor_buffer_to_tensor_list_conf,
        "data_type",
        dtype.oneflow_proto_dtype,
    )
    interpret_util.Forward(op_conf)

    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"
    return remote_blob_util.RemoteBlob(lbi)
Esempio n. 8
0
 def BuildAssignInstruction(builder):
     op_conf = op_conf_pb.OperatorConf()
     # device_tag doesn't matter for logical_slice_assign op
     device_tag = oneflow.current_scope(
     ).device_parallel_desc_symbol.device_tag
     op_conf.device_tag = device_tag
     op_name = id_util.UniqueStr(OP_PREFIX)
     op_conf.name = op_name
     op_conf.user_conf.op_type_name = "logical_slice_assign"
     op_conf.user_conf.input["value"].s.append("{}/value_0".format(op_name))
     op_conf.user_conf.input["ref"].s.append("{}/ref_0".format(op_name))
     parallel_conf = ref_blob_object.parallel_desc_symbol.parallel_conf
     op_conf.user_conf.attr["parallel_conf"].at_string = str(parallel_conf)
     op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
     op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
     op_conf.user_conf.attr["step"].at_list_int64.val[:] = [1] * len(start)
     bn_in_op2blob_object = oneflow_api.deprecated.BnInOp2BlobObject()
     bn_in_op2blob_object["ref_0"] = ref_blob_object
     bn_in_op2blob_object["value_0"] = value_blob_object
     scope_symbol_id = _GetScopeSymbolIdFromEagerBlob(ref_blob)
     op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object,
                                        scope_symbol_id)
     cfg_op_attribute = oneflow_api.deprecated.MakeOpAttributeByString(
         str(op_attribute))
     builder.StatelessCall(
         cfg_op_attribute,
         parallel_conf,
         bn_in_op2blob_object,
         boxing_util.BoxingTo,
         vm_util._FindOrCreateDelegateBlobObject,
     )
Esempio n. 9
0
def parallel_cast(input, name=None, distribute=None, gradient_distribute=None):
    assert not oneflow.eager_execution_enabled()
    op_conf = op_conf_util.OperatorConf()
    setattr(
        op_conf,
        "name",
        name if name is not None else id_util.UniqueStr("ParallelCast_"),
    )
    op_conf.parallel_cast_conf.out = "out"
    setattr(op_conf.parallel_cast_conf, "in", input.unique_name)

    def to_split_axis(dist):
        split_axis = data_type_util.OptInt64()
        if type(dist) is distribute_util.SplitDistribute:
            split_axis.value = dist.axis
        elif type(dist) is distribute_util.BroadcastDistribute:
            split_axis.ClearField("value")
        else:
            raise NotImplementedError
        return split_axis

    if distribute is not None:
        op_conf.parallel_cast_conf.split_axis.CopyFrom(to_split_axis(distribute))
    if gradient_distribute is not None:
        op_conf.parallel_cast_conf.gradient_split_axis.CopyFrom(
            to_split_axis(gradient_distribute)
        )
    compile_context.CurJobAddOp(op_conf)
    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"
    return remote_blob_util.RemoteBlob(lbi)
Esempio n. 10
0
def broadcast_to_compatible_with(
    x: remote_blob_util.BlobDef,
    compatible: Sequence[remote_blob_util.BlobDef],
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    r"""Returns a 'Blob' with the shape can be broadcasted by other shapes

    Args:
        x (remote_blob_util.BlobDef): a 'Blob'
        compatible (Sequence[remote_blob_util.BlobDef]): Sequence of different shape
        name (Optional[str], optional): This operator's name. Defaults to None.

    Returns:
        remote_blob_util.BlobDef: A 'Blob' with the biggest shape
    """
    assert isinstance(compatible, (list, tuple))
    if name is None:
        name = id_util.UniqueStr("BroadcastToCompatibleWith_")

    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name", name)
    setattr(op_conf.broadcast_to_compatible_with_conf, "x", x.unique_name)
    setattr(op_conf.broadcast_to_compatible_with_conf, "y", "y")
    op_conf.broadcast_to_compatible_with_conf.compatible.extend(
        [cp.unique_name for cp in compatible])
    interpret_util.Forward(op_conf)

    ret_lbi = logical_blob_id_util.LogicalBlobId()
    ret_lbi.op_name = op_conf.name
    ret_lbi.blob_name = "y"
    return remote_blob_util.RemoteBlob(ret_lbi)
Esempio n. 11
0
def ofrecord_loader(
    ofrecord_dir: str,
    batch_size: int = 1,
    data_part_num: int = 1,
    part_name_prefix: str = "part-",
    part_name_suffix_length: int = -1,
    shuffle: bool = False,
    shuffle_buffer_size: int = 1024,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    if name is None:
        name = id_util.UniqueStr("OFRecord_Loader_")

    op_conf = op_conf_util.OperatorConf()
    op_conf.name = name

    op_conf.record_load_conf.out = "out"
    op_conf.record_load_conf.data_dir = ofrecord_dir
    op_conf.record_load_conf.data_part_num = data_part_num
    op_conf.record_load_conf.batch_size = batch_size
    op_conf.record_load_conf.part_name_prefix = part_name_prefix
    if part_name_suffix_length != -1:
        op_conf.record_load_conf.part_name_suffix_length = part_name_suffix_length
    if shuffle:
        op_conf.record_load_conf.random_shuffle_conf.buffer_size = shuffle_buffer_size
    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = name
    lbi.blob_name = "out"

    interpret_util.ConsistentForward(op_conf)
    return remote_blob_util.RemoteBlob(lbi)
Esempio n. 12
0
def ConstructNaiveBoxingOpConf(
    produced_blob_object,
    consumer_op_arg_parallel_attr,
    in_parallel_num,
    out_parallel_num,
):
    op_conf = op_conf_pb.OperatorConf()
    op_conf.name = "undefined_boxing_op_name"
    op_conf.device_tag = "cpu"
    op_conf.boxing_conf.lbi.op_name = "undefined_boxing_op_name"
    op_conf.boxing_conf.lbi.blob_name = "undefined_boxing_blob_name"
    op_conf.boxing_conf.in_num = in_parallel_num
    op_conf.boxing_conf.out_num = out_parallel_num
    in_sbp_parallel = produced_blob_object.op_arg_parallel_attr.sbp_parallel
    if in_sbp_parallel.HasField("split_parallel"):
        op_conf.boxing_conf.concat_box.axis = in_sbp_parallel.split_parallel.axis
    elif in_parallel_num == 1:
        op_conf.boxing_conf.concat_box.axis = 0
    else:
        assert in_sbp_parallel.HasField("partial_sum_parallel")
        op_conf.boxing_conf.add_box.SetInParent()
    out_sbp_parallel = consumer_op_arg_parallel_attr.sbp_parallel
    if out_sbp_parallel.HasField("split_parallel"):
        out_axis = out_sbp_parallel.split_parallel.axis
    else:
        assert out_parallel_num == 1
        out_axis = 0
    op_conf.boxing_conf.split_box.axis = out_axis
    shape = produced_blob_object.op_arg_blob_attr.shape
    op_conf.boxing_conf.split_box.part_num.extend(
        balanced_splitter.BalancedPartNums(shape[out_axis], out_parallel_num))
    bn_in_op2blob_object = {("in_%s" % i): produced_blob_object
                            for i in range(in_parallel_num)}
    return op_infer_util.Infer(op_conf, bn_in_op2blob_object)
Esempio n. 13
0
 def BuildAssignInstruction(builder):
     op_conf = op_conf_pb.OperatorConf()
     # device_tag doesn't matter for logical_slice_assign op
     device_tag = oneflow.current_scope(
     ).device_parallel_desc_symbol.device_tag
     op_conf.device_tag = device_tag
     op_name = id_util.UniqueStr(OP_PREFIX)
     op_conf.name = op_name
     op_conf.user_conf.op_type_name = "logical_slice_assign"
     op_conf.user_conf.input["value"].s.append("{}/value_0".format(op_name))
     op_conf.user_conf.input["ref"].s.append("{}/ref_0".format(op_name))
     parallel_conf = ref_blob_object.parallel_desc_symbol.parallel_conf
     op_conf.user_conf.attr["parallel_conf"].at_string = str(parallel_conf)
     op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
     op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
     op_conf.user_conf.attr["step"].at_list_int64.val[:] = [1] * len(start)
     bn_in_op2blob_object = {
         "ref_0": ref_blob_object,
         "value_0": value_blob_object
     }
     scope_symbol_id = _GetScopeSymbolIdFromEagerBlob(ref_blob)
     op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object,
                                        scope_symbol_id)
     builder.StatelessCall(
         op_attribute,
         parallel_conf=parallel_conf,
         bn_in_op2blob_object=bn_in_op2blob_object,
     )
Esempio n. 14
0
def constant_like(
    like: remote_blob_util.BlobDef,
    value: Union[int, float],
    dtype: Optional[dtype_util.dtype] = None,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    op_conf = op_conf_util.OperatorConf()
    setattr(
        op_conf,
        "name",
        name if name is not None else id_util.UniqueStr("ConstantLike_"),
    )
    setattr(op_conf.constant_like_conf, "like", like.unique_name)
    if isinstance(value, int):
        op_conf.constant_like_conf.int_operand = value
    elif isinstance(value, float):
        op_conf.constant_like_conf.float_operand = value
    else:
        raise NotImplementedError
    if dtype is not None:
        setattr(op_conf.constant_like_conf, "data_type",
                dtype.oneflow_proto_dtype)
    setattr(op_conf.constant_like_conf, "out", "out")
    interpret_util.Forward(op_conf)
    out_lbi = logical_blob_id_util.LogicalBlobId()
    setattr(out_lbi, "op_name", op_conf.name)
    setattr(out_lbi, "blob_name", "out")
    return remote_blob_util.RemoteBlob(out_lbi)
Esempio n. 15
0
 def build(builder):
     op_conf = op_conf_pb.OperatorConf()
     # device_tag doesn't matter for logical_slice op
     device_tag = oneflow.current_scope(
     ).device_parallel_desc_symbol.device_tag
     op_conf.device_tag = device_tag
     op_conf.name = op_name
     op_conf.user_conf.op_type_name = "logical_slice"
     op_conf.user_conf.input["x"].s.append("{}/x_0".format(op_name))
     op_conf.user_conf.output["y"].s.append("{}/y_0".format(op_name))
     input_blob_object = input_blob.blob_object
     parallel_conf = input_blob_object.parallel_desc_symbol.parallel_conf
     op_conf.user_conf.attr["parallel_conf"].at_string = str(
         parallel_conf)
     op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
     op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
     op_conf.user_conf.attr["step"].at_list_int64.val[:] = [
         1
     ] * len(start)
     bn_in_op2blob_object = {"x_0": input_blob_object}
     scope_symbol_id = _GetScopeSymbolIdFromEagerBlob(input_blob)
     op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object,
                                        scope_symbol_id)
     builder.StatelessCall(
         op_attribute,
         parallel_conf=parallel_conf,
         bn_in_op2blob_object=bn_in_op2blob_object,
     )
     Yield(bn_in_op2blob_object["y_0"])
Esempio n. 16
0
 def build(builder):
     op_conf = op_conf_pb.OperatorConf()
     device_tag = flow.current_scope(
     ).device_parallel_desc_symbol.device_tag
     op_conf.device_tag = device_tag
     op_conf.name = op_name
     op_conf.user_conf.op_type_name = "logical_slice"
     op_conf.user_conf.input["x"].s.append("{}/x_0".format(op_name))
     op_conf.user_conf.output["y"].s.append("{}/y_0".format(op_name))
     parallel_conf = input_blob_object.parallel_desc_symbol.parallel_conf
     op_conf.user_conf.attr["parallel_conf"].at_string = str(
         parallel_conf)
     op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
     op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
     op_conf.user_conf.attr["step"].at_list_int64.val[:] = [
         1
     ] * len(start)
     bn_in_op2blob_object = (
         oneflow._oneflow_internal.deprecated.BnInOp2BlobObject())
     bn_in_op2blob_object["x_0"] = input_blob_object
     op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object,
                                        scope_symbol_id)
     cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString(
         str(op_attribute))
     builder.StatelessCall(
         cfg_op_attribute,
         parallel_conf,
         bn_in_op2blob_object,
         boxing_util.BoxingTo,
     )
     Yield(bn_in_op2blob_object["y_0"])
Esempio n. 17
0
def argwhere(
    condition: remote_blob_util.BlobDef,
    dtype: Optional[dtype_util.dtype] = None,
    name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
    if name is None:
        name = id_util.UniqueStr("ArgWhere_")

    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name", name)
    setattr(op_conf.arg_where_conf, "in", condition.unique_name)
    setattr(op_conf.arg_where_conf, "out", "out")
    setattr(op_conf.arg_where_conf, "out_size", "out_size")
    if dtype is not None:
        setattr(op_conf.arg_where_conf, "data_type", dtype.oneflow_proto_dtype)
    interpret_util.Forward(op_conf)

    arg_where_out_lbi = logical_blob_id_util.LogicalBlobId()
    setattr(arg_where_out_lbi, "op_name", op_conf.name)
    setattr(arg_where_out_lbi, "blob_name", "out")

    arg_where_out_size_lbi = logical_blob_id_util.LogicalBlobId()
    setattr(arg_where_out_size_lbi, "op_name", op_conf.name)
    setattr(arg_where_out_size_lbi, "blob_name", "out_size")

    arg_where_out = remote_blob_util.RemoteBlob(arg_where_out_lbi)
    arg_where_out_size = remote_blob_util.RemoteBlob(arg_where_out_size_lbi)
    return sync_dynamic_resize(arg_where_out, arg_where_out_size)
Esempio n. 18
0
def InputOpByArgBlobDef(blob_def):
    assert isinstance(blob_def, input_blob_util.ArgBlobDef)
    op_conf = op_conf_util.OperatorConf()
    op_conf.name = blob_def.op_name
    op_conf.input_conf.out = blob_def.blob_name
    op_conf.input_conf.blob_conf.CopyFrom(blob_def.ToInterfaceBlobConf())
    blob_def.AddAndInferOp(op_conf)
    return remote_blob_util.RemoteBlob(blob_def.lbi)
Esempio n. 19
0
def CheckAndCompleteUserOpConf(op_conf_proto):
    serialized_op_conf = str(text_format.MessageToString(op_conf_proto))
    new_op_conf, error_str = oneflow_internal.CheckAndCompleteUserOpConf(
        serialized_op_conf)
    error = text_format.Parse(error_str, error_util.ErrorProto())
    if error.HasField("error_type"):
        raise JobBuildAndInferError(error)
    return text_format.Parse(new_op_conf, op_conf_util.OperatorConf())
Esempio n. 20
0
def _AssignOpConf():
    op_conf = op_conf_pb.OperatorConf()
    op_conf.name = "assign"
    op_conf.assign_conf.ref = "assign/ref"
    op_conf.assign_conf.value = "assign/value"
    device_tag = oneflow.current_scope().device_parallel_desc_symbol.device_tag
    op_conf.device_tag = device_tag
    return op_conf
Esempio n. 21
0
 def __init__(self, op_name, op_type_name=None):
     self.op_conf_ = op_conf_util.OperatorConf()
     self.op_conf_.name = op_name
     if op_type_name is not None:
         self.op_conf_.user_conf.op_type_name = op_type_name
     device_tag = oneflow.current_scope().device_parallel_desc_symbol.device_tag
     self.op_conf_.device_tag = device_tag
     self.output_arg_key_list_ = []
Esempio n. 22
0
def _SystemAssignOpConf(ref, value, name=None):
    if name is None:
        name = id_util.UniqueStr("Assign_")
    op_conf = op_conf_util.OperatorConf()
    op_conf.name = name
    op_conf.assign_conf.ref = ref.unique_name
    op_conf.assign_conf.value = value.unique_name
    return op_conf
Esempio n. 23
0
def tensor_list_split(
        input_tensor_list: oneflow_api.BlobDesc,
        name: Optional[str] = None) -> Tuple[oneflow_api.BlobDesc]:
    """This operator splits the input `TensorList`. 

    Args:
        input_tensor_list (oneflow_api.BlobDesc): The input `TensorList`. 
        name (Optional[str], optional): The name for the operation. Defaults to None.

    Returns:
        Tuple[oneflow_api.BlobDesc]: A Tuple of `ListNumpy`. 

    For example: 

    .. code-block:: python 

        import oneflow as flow
        import numpy as np
        import oneflow.typing as tp
        from typing import Tuple


        func_config = flow.FunctionConfig()
        func_config.default_data_type(flow.float)
        func_config.default_logical_view(flow.scope.mirrored_view())
        @flow.global_function(function_config=func_config)
        def tensorList_split_Job(x: tp.ListListNumpy.Placeholder(shape=(2, 5, 4), dtype=flow.float32),
        ) -> Tuple[tp.ListNumpy, tp.ListNumpy]:
            return flow.tensor_list_split(x)


        x = np.random.rand(1, 3, 2).astype(np.float32)
        y = np.random.rand(1, 2, 2).astype(np.float32)
        out = tensorList_split_Job([[x, y]])

        # out[0][0].shape (3, 2)
        # out[1][0].shape (2, 2)

    """
    if name is None:
        name = id_util.UniqueStr("TensorListSplit_")

    output_size = input_tensor_list.shape[0]
    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name", name)
    setattr(op_conf.tensor_list_split_conf, "in",
            input_tensor_list.unique_name)
    op_conf.tensor_list_split_conf.out.extend(
        ["out_{}".format(i) for i in range(output_size)])
    interpret_util.Forward(op_conf)
    ret = []
    for i in range(output_size):
        out_lbi = logical_blob_id_util.LogicalBlobId()
        setattr(out_lbi, "op_name", op_conf.name)
        setattr(out_lbi, "blob_name", "out_{}".format(i))
        ret.append(remote_blob_util.RemoteBlob(out_lbi))
    return tuple(ret)
Esempio n. 24
0
def _GetEagerNcclAllReduce(parallel_conf, ibn2blob_object):
    op_conf = op_conf_pb.OperatorConf()
    op_conf.device_tag = "gpu"
    op_conf.name = "eager_nccl_all_reduce"
    op_conf.user_conf.op_type_name = "eager_nccl_all_reduce"
    op_conf.user_conf.input["in"].s.append("eager_nccl_all_reduce/in_0")
    op_conf.user_conf.output["out"].s.append("eager_nccl_all_reduce/out_0")
    op_conf.user_conf.attr["parallel_conf"].at_string = str(parallel_conf)
    return op_infer_util.Infer(op_conf, ibn2blob_object)
Esempio n. 25
0
def _GenModelSaveOpConf(var_blobs, path_lbi):
    op_conf = op_conf_util.OperatorConf()
    op_conf.name = "model_save"
    op_conf.device_tag = "cpu"
    op_conf.model_save_conf.path = "{}/{}".format(path_lbi.op_name, path_lbi.blob_name)
    for blob in var_blobs:
        getattr(op_conf.model_save_conf, "in").append(blob.logical_blob_name)
        getattr(op_conf.model_save_conf, "key").append(blob.logical_blob_name)

    return op_conf
Esempio n. 26
0
def _MakeCopyHdOpConfAndRetLbi():
    op_conf = op_conf_pb.OperatorConf()
    op_conf.name = "copy_hd"
    op_conf.device_tag = "gpu"
    setattr(op_conf.copy_conf, "in", "%s/in" % op_conf.name)
    op_conf.copy_conf.out = "out"
    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"
    return op_conf, lbi
Esempio n. 27
0
def LazyConsistentWatch(blob_watched, handler):
    handler_uuid = str(uuid.uuid1())
    op_conf = op_conf_util.OperatorConf()
    op_conf.name = id_util.UniqueStr("ForeignWatch_")
    setattr(op_conf.foreign_watch_conf, "in", blob_watched.unique_name)
    op_conf.foreign_watch_conf.handler_uuid = handler_uuid
    device_name = blob_watched.parallel_conf.device_name(0)
    with oneflow.scope.placement("cpu", "0:0"):
        compile_context.CurJobAddOp(op_conf)
    watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler)
Esempio n. 28
0
def _MakeInputOpConfAndRetLbi(arg_blob_def):
    assert isinstance(arg_blob_def, input_blob_def.ArgBlobDef)
    op_conf = op_conf_util.OperatorConf()
    op_conf.name = id_util.UniqueStr("Input_")
    op_conf.input_conf.out = "out"
    op_conf.input_conf.blob_conf.CopyFrom(arg_blob_def.ToInterfaceBlobConf())
    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = op_conf.input_conf.out
    return op_conf, lbi
Esempio n. 29
0
def GenerateVariableOpConf(
        name,
        shape,
        dtype=None,
        initializer=None,
        regularizer=None,
        trainable=None,
        model_name=None,
        random_seed=None,
        distribute=oneflow_api.distribute.broadcast(),
):
    op_conf = op_conf_util.OperatorConf()
    op_conf.name = name
    op_conf.variable_conf.shape.dim.extend(shape)

    assert dtype is not None
    op_conf.variable_conf.data_type = oneflow_api.deprecated.GetProtoDtype4OfDtype(
        dtype)

    if rt_mode.CurrentMode() == rt_mode.NORMAL_MODE:
        root_path = None
    else:
        root_path = (compile_context.GetCurJobConfigProto().
                     default_initialize_with_snapshot_path())
        dir_path = os.path.join(root_path, name)
        file_path = os.path.join(dir_path, "out")
    if root_path and os.path.isfile(file_path):
        op_conf.variable_conf.initialize_with_snapshot.path = dir_path
        op_conf.variable_conf.initialize_with_snapshot.key = "out"
    else:
        if root_path:
            print("{} not found, will be initialized".format(file_path))
        if initializer is not None:
            op_conf.variable_conf.initializer.CopyFrom(initializer)

    if regularizer is not None:
        op_conf.variable_conf.regularizer.CopyFrom(regularizer)

    if trainable is not None:
        op_conf.variable_conf.trainable = trainable

    if model_name is not None:
        op_conf.variable_conf.model_name = model_name

    if type(distribute) is oneflow_api.distribute.SplitDistribute:
        op_conf.variable_conf.split_axis.value = distribute.axis
    else:
        op_conf.variable_conf.split_axis.ClearField("value")

    if random_seed is not None:
        op_conf.variable_conf.random_seed = random_seed

    op_conf.variable_conf.out = "out"
    return op_conf
Esempio n. 30
0
def tensor_list_to_tensor_buffer(
        input: oneflow_api.BlobDesc,
        name: Optional[str] = None) -> oneflow_api.BlobDesc:
    """This operator converts `TensorList` to `TensorBuffer`. 

    Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_ 
    for more about TensorList. 

    Args:
        input (oneflow_api.BlobDesc): The input `TensorList`. 
        name (Optional[str], optional): The name for the operation. Defaults to None.

    Returns:
        oneflow_api.BlobDesc: The result Blob. 

    For example: 

    .. code-block:: python 

        import oneflow as flow
        import numpy as np
        import oneflow.typing as tp

        func_config = flow.FunctionConfig()
        func_config.default_data_type(flow.float)
        func_config.default_logical_view(flow.scope.mirrored_view())
        @flow.global_function(function_config=func_config)
        def tensorList_to_tensorBuffer_Job(x: tp.ListListNumpy.Placeholder(shape=(2, 5, 4), dtype=flow.float32),
        ) -> tp.ListListNumpy:
            x = flow.tensor_list_to_tensor_buffer(input=x)
            return flow.tensor_buffer_to_tensor_list(x, 
                                                    shape=(5, 4), 
                                                    dtype=flow.float32)

        x = np.random.rand(1, 3, 2).astype(np.float32)
        y = np.random.rand(1, 2, 2).astype(np.float32)
        out = tensorList_to_tensorBuffer_Job([[x, y]])

        # out[0][0].shape (1, 3, 2)

    """
    if name is None:
        name = id_util.UniqueStr("TensorListToBuffer_")

    op_conf = op_conf_util.OperatorConf()
    setattr(op_conf, "name", name)
    setattr(op_conf.tensor_list_to_tensor_buffer_conf, "in", input.unique_name)
    setattr(op_conf.tensor_list_to_tensor_buffer_conf, "out", "out")
    interpret_util.Forward(op_conf)

    lbi = logical_blob_id_util.LogicalBlobId()
    lbi.op_name = op_conf.name
    lbi.blob_name = "out"
    return remote_blob_util.RemoteBlob(lbi)