def indexed_slices_reduce_sum( indices: input_blob_util.ArgBlobDef, values: input_blob_util.ArgBlobDef, name: Optional[str] = None, ) -> Tuple[remote_blob_util.BlobDef]: op_conf = op_conf_util.OperatorConf() if name is None: op_conf.name = id_util.UniqueStr("IndexedSlicesReduceSum_") else: op_conf.name = name op_conf.indexed_slices_reduce_sum_conf.x_indices = indices.unique_name op_conf.indexed_slices_reduce_sum_conf.x_values = values.unique_name op_conf.indexed_slices_reduce_sum_conf.y_indices = "y_indices" op_conf.indexed_slices_reduce_sum_conf.y_values = "y_values" op_conf.indexed_slices_reduce_sum_conf.num_unique = "num_unique" interpret_util.Forward(op_conf) y_indices_lbi = logical_blob_id_util.LogicalBlobId() y_indices_lbi.op_name = op_conf.name y_indices_lbi.blob_name = "y_indices" y_values_lbi = logical_blob_id_util.LogicalBlobId() y_values_lbi.op_name = op_conf.name y_values_lbi.blob_name = "y_values" num_unique_lbi = logical_blob_id_util.LogicalBlobId() num_unique_lbi.op_name = op_conf.name num_unique_lbi.blob_name = "num_unique" return ( remote_blob_util.RemoteBlob(y_indices_lbi), remote_blob_util.RemoteBlob(y_values_lbi), remote_blob_util.RemoteBlob(num_unique_lbi), )
def argwhere( condition: remote_blob_util.BlobDef, dtype: Optional[dtype_util.dtype] = None, name: Optional[str] = None, ) -> remote_blob_util.BlobDef: if name is None: name = id_util.UniqueStr("ArgWhere_") op_conf = op_conf_util.OperatorConf() setattr(op_conf, "name", name) setattr(op_conf.arg_where_conf, "in", condition.unique_name) setattr(op_conf.arg_where_conf, "out", "out") setattr(op_conf.arg_where_conf, "out_size", "out_size") if dtype is not None: setattr(op_conf.arg_where_conf, "data_type", dtype.oneflow_proto_dtype) interpret_util.Forward(op_conf) arg_where_out_lbi = logical_blob_id_util.LogicalBlobId() setattr(arg_where_out_lbi, "op_name", op_conf.name) setattr(arg_where_out_lbi, "blob_name", "out") arg_where_out_size_lbi = logical_blob_id_util.LogicalBlobId() setattr(arg_where_out_size_lbi, "op_name", op_conf.name) setattr(arg_where_out_size_lbi, "blob_name", "out_size") arg_where_out = remote_blob_util.RemoteBlob(arg_where_out_lbi) arg_where_out_size = remote_blob_util.RemoteBlob(arg_where_out_size_lbi) return sync_dynamic_resize(arg_where_out, arg_where_out_size)
def ofrecord_loader( ofrecord_dir: str, batch_size: int = 1, data_part_num: int = 1, part_name_prefix: str = "part-", part_name_suffix_length: int = -1, shuffle: bool = False, shuffle_buffer_size: int = 1024, name: Optional[str] = None, ) -> remote_blob_util.BlobDef: if name is None: name = id_util.UniqueStr("OFRecord_Loader_") op_conf = op_conf_util.OperatorConf() op_conf.name = name op_conf.record_load_conf.out = "out" op_conf.record_load_conf.data_dir = ofrecord_dir op_conf.record_load_conf.data_part_num = data_part_num op_conf.record_load_conf.batch_size = batch_size op_conf.record_load_conf.part_name_prefix = part_name_prefix if part_name_suffix_length != -1: op_conf.record_load_conf.part_name_suffix_length = part_name_suffix_length if shuffle: op_conf.record_load_conf.random_shuffle_conf.buffer_size = shuffle_buffer_size lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = name lbi.blob_name = "out" interpret_util.ConsistentForward(op_conf) return remote_blob_util.RemoteBlob(lbi)
def parallel_cast(input, name=None, distribute=None, gradient_distribute=None): assert not oneflow.eager_execution_enabled() op_conf = op_conf_util.OperatorConf() setattr( op_conf, "name", name if name is not None else id_util.UniqueStr("ParallelCast_"), ) op_conf.parallel_cast_conf.out = "out" setattr(op_conf.parallel_cast_conf, "in", input.unique_name) def to_split_axis(dist): split_axis = data_type_util.OptInt64() if type(dist) is distribute_util.SplitDistribute: split_axis.value = dist.axis elif type(dist) is distribute_util.BroadcastDistribute: split_axis.ClearField("value") else: raise NotImplementedError return split_axis if distribute is not None: op_conf.parallel_cast_conf.split_axis.CopyFrom(to_split_axis(distribute)) if gradient_distribute is not None: op_conf.parallel_cast_conf.gradient_split_axis.CopyFrom( to_split_axis(gradient_distribute) ) compile_context.CurJobAddOp(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def dynamic_binary_split( x: input_blob_util.ArgBlobDef, base_shift: int = 2, out_num: int = 2, name: Optional[str] = None, ) -> List[remote_blob_util.BlobDef]: op_conf = op_conf_util.OperatorConf() if name is None: op_conf.name = id_util.UniqueStr("DynamicBinarySplit_") else: op_conf.name = name obns = [] out_remote_blobs = [] for i in range(out_num): obns.append("out_" + str(i)) setattr(op_conf.dynamic_binary_split_conf, "in", x.unique_name) # op_conf.dynamic_binary_split_conf.in = x.unique_name op_conf.dynamic_binary_split_conf.out[:] = obns op_conf.dynamic_binary_split_conf.base_shift = base_shift interpret_util.Forward(op_conf) for i in range(out_num): out_lbi = logical_blob_id_util.LogicalBlobId() out_lbi.op_name = op_conf.name out_lbi.blob_name = obns[i] out_remote_blobs.append(remote_blob_util.RemoteBlob(out_lbi)) return out_remote_blobs
def elem_cnt( input_blob: remote_blob_util.BlobDef, axis: Optional[Sequence[int]] = None, dtype: Optional[dtype_util.dtype] = None, name: Optional[str] = None, ) -> remote_blob_util.BlobDef: op_conf = op_conf_util.OperatorConf() setattr( op_conf, "name", name if name is not None else id_util.UniqueStr("ShapeElemCnt_"), ) op_conf.shape_elem_cnt_conf.x = input_blob.unique_name if axis is None: op_conf.shape_elem_cnt_conf.exclude_axis_conf.SetInParent() else: assert isinstance(axis, (tuple, list)) op_conf.shape_elem_cnt_conf.include_axis_conf.axis.extend(axis) if dtype is not None: op_conf.shape_elem_cnt_conf.data_type = dtype.oneflow_proto_dtype op_conf.shape_elem_cnt_conf.y = "y" interpret_util.Forward(op_conf) out_lbi = logical_blob_id_util.LogicalBlobId() out_lbi.op_name = op_conf.name out_lbi.blob_name = "y" return remote_blob_util.RemoteBlob(out_lbi)
def EagerReturnRemoteBlob(remote_blob, allow_cpu_return_op=True): if not hob.is_trainable(None): return remote_blob op_conf, lbi, scope = _GetReturnOpConfAndOutLbiAndScope( remote_blob, allow_cpu_return_op) if remote_blob.blob_object.op_arg_parallel_attr.is_mirrored(): add_and_infer = compile_context.CurJobAddMirroredOp else: add_and_infer = compile_context.CurJobAddConsistentOp op_attribute = add_and_infer(op_conf, scope) def BuildInstruction(builder): get_blob_scope = blob_register_util.BnInOp2BlobObjectScope with get_blob_scope(blob_register, op_attribute) as bn_in_op2blob_object: cfg_op_attribute = oneflow_api.deprecated.MakeOpAttributeByString( str(op_attribute)) builder.StatelessCall( cfg_op_attribute, remote_blob.blob_object.parallel_desc_symbol.parallel_conf, bn_in_op2blob_object, boxing_util.BoxingTo, vm_util._FindOrCreateDelegateBlobObject, ) vm_util.LogicalRun(BuildInstruction) return remote_blob_util.RemoteBlob(lbi)
def constant_like( like: remote_blob_util.BlobDef, value: Union[int, float], dtype: Optional[dtype_util.dtype] = None, name: Optional[str] = None, ) -> remote_blob_util.BlobDef: op_conf = op_conf_util.OperatorConf() setattr( op_conf, "name", name if name is not None else id_util.UniqueStr("ConstantLike_"), ) setattr(op_conf.constant_like_conf, "like", like.unique_name) if isinstance(value, int): op_conf.constant_like_conf.int_operand = value elif isinstance(value, float): op_conf.constant_like_conf.float_operand = value else: raise NotImplementedError if dtype is not None: setattr(op_conf.constant_like_conf, "data_type", dtype.oneflow_proto_dtype) setattr(op_conf.constant_like_conf, "out", "out") interpret_util.Forward(op_conf) out_lbi = logical_blob_id_util.LogicalBlobId() setattr(out_lbi, "op_name", op_conf.name) setattr(out_lbi, "blob_name", "out") return remote_blob_util.RemoteBlob(out_lbi)
def tensor_buffer_to_tensor_list( input: remote_blob_util.BlobDef, shape: Sequence[int], dtype: dtype_util.dtype, name: Optional[str] = None, ) -> remote_blob_util.BlobDef: if name is None: name = id_util.UniqueStr("TensorBufferToList_") op_conf = op_conf_util.OperatorConf() setattr(op_conf, "name", name) setattr(op_conf.tensor_buffer_to_tensor_list_conf, "in", input.unique_name) setattr(op_conf.tensor_buffer_to_tensor_list_conf, "out", "out") op_conf.tensor_buffer_to_tensor_list_conf.shape.dim[:] = list(shape) setattr( op_conf.tensor_buffer_to_tensor_list_conf, "data_type", dtype.oneflow_proto_dtype, ) interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def broadcast_to_compatible_with( x: remote_blob_util.BlobDef, compatible: Sequence[remote_blob_util.BlobDef], name: Optional[str] = None, ) -> remote_blob_util.BlobDef: r"""Returns a 'Blob' with the shape can be broadcasted by other shapes Args: x (remote_blob_util.BlobDef): a 'Blob' compatible (Sequence[remote_blob_util.BlobDef]): Sequence of different shape name (Optional[str], optional): This operator's name. Defaults to None. Returns: remote_blob_util.BlobDef: A 'Blob' with the biggest shape """ assert isinstance(compatible, (list, tuple)) if name is None: name = id_util.UniqueStr("BroadcastToCompatibleWith_") op_conf = op_conf_util.OperatorConf() setattr(op_conf, "name", name) setattr(op_conf.broadcast_to_compatible_with_conf, "x", x.unique_name) setattr(op_conf.broadcast_to_compatible_with_conf, "y", "y") op_conf.broadcast_to_compatible_with_conf.compatible.extend( [cp.unique_name for cp in compatible]) interpret_util.Forward(op_conf) ret_lbi = logical_blob_id_util.LogicalBlobId() ret_lbi.op_name = op_conf.name ret_lbi.blob_name = "y" return remote_blob_util.RemoteBlob(ret_lbi)
def InputOpByArgBlobDef(blob_def): assert isinstance(blob_def, input_blob_util.ArgBlobDef) op_conf = op_conf_util.OperatorConf() op_conf.name = blob_def.op_name op_conf.input_conf.out = blob_def.blob_name op_conf.input_conf.blob_conf.CopyFrom(blob_def.ToInterfaceBlobConf()) blob_def.AddAndInferOp(op_conf) return remote_blob_util.RemoteBlob(blob_def.lbi)
def LazyReturnRemoteBlob(remote_blob, allow_cpu_return_op=True): assert isinstance( remote_blob, (oneflow_api.LazyMirroredBlob, oneflow_api.LazyConsistentBlob), ) op_conf, lbi, scope = _GetReturnOpConfAndOutLbiAndScope( remote_blob, allow_cpu_return_op) compile_context.CurJobAddOp(op_conf, scope) return remote_blob_util.RemoteBlob(lbi)
def tensor_list_split( input_tensor_list: oneflow_api.BlobDesc, name: Optional[str] = None) -> Tuple[oneflow_api.BlobDesc]: """This operator splits the input `TensorList`. Args: input_tensor_list (oneflow_api.BlobDesc): The input `TensorList`. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: Tuple[oneflow_api.BlobDesc]: A Tuple of `ListNumpy`. For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp from typing import Tuple func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(function_config=func_config) def tensorList_split_Job(x: tp.ListListNumpy.Placeholder(shape=(2, 5, 4), dtype=flow.float32), ) -> Tuple[tp.ListNumpy, tp.ListNumpy]: return flow.tensor_list_split(x) x = np.random.rand(1, 3, 2).astype(np.float32) y = np.random.rand(1, 2, 2).astype(np.float32) out = tensorList_split_Job([[x, y]]) # out[0][0].shape (3, 2) # out[1][0].shape (2, 2) """ if name is None: name = id_util.UniqueStr("TensorListSplit_") output_size = input_tensor_list.shape[0] op_conf = op_conf_util.OperatorConf() setattr(op_conf, "name", name) setattr(op_conf.tensor_list_split_conf, "in", input_tensor_list.unique_name) op_conf.tensor_list_split_conf.out.extend( ["out_{}".format(i) for i in range(output_size)]) interpret_util.Forward(op_conf) ret = [] for i in range(output_size): out_lbi = logical_blob_id_util.LogicalBlobId() setattr(out_lbi, "op_name", op_conf.name) setattr(out_lbi, "blob_name", "out_{}".format(i)) ret.append(remote_blob_util.RemoteBlob(out_lbi)) return tuple(ret)
def tensor_list_to_tensor_buffer( input: oneflow_api.BlobDesc, name: Optional[str] = None) -> oneflow_api.BlobDesc: """This operator converts `TensorList` to `TensorBuffer`. Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_ for more about TensorList. Args: input (oneflow_api.BlobDesc): The input `TensorList`. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow_api.BlobDesc: The result Blob. For example: .. code-block:: python import oneflow as flow import numpy as np import oneflow.typing as tp func_config = flow.FunctionConfig() func_config.default_data_type(flow.float) func_config.default_logical_view(flow.scope.mirrored_view()) @flow.global_function(function_config=func_config) def tensorList_to_tensorBuffer_Job(x: tp.ListListNumpy.Placeholder(shape=(2, 5, 4), dtype=flow.float32), ) -> tp.ListListNumpy: x = flow.tensor_list_to_tensor_buffer(input=x) return flow.tensor_buffer_to_tensor_list(x, shape=(5, 4), dtype=flow.float32) x = np.random.rand(1, 3, 2).astype(np.float32) y = np.random.rand(1, 2, 2).astype(np.float32) out = tensorList_to_tensorBuffer_Job([[x, y]]) # out[0][0].shape (1, 3, 2) """ if name is None: name = id_util.UniqueStr("TensorListToBuffer_") op_conf = op_conf_util.OperatorConf() setattr(op_conf, "name", name) setattr(op_conf.tensor_list_to_tensor_buffer_conf, "in", input.unique_name) setattr(op_conf.tensor_list_to_tensor_buffer_conf, "out", "out") interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def unique_with_counts( x: input_blob_util.ArgBlobDef, out_idx: flow.dtype = flow.int32, name: Optional[str] = None, ) -> Tuple[oneflow_api.BlobDesc]: op_conf = op_conf_util.OperatorConf() if name is None: op_conf.name = id_util.UniqueStr("UniqueWithCounts_") else: op_conf.name = name op_conf.unique_with_counts_conf.x = x.unique_name op_conf.unique_with_counts_conf.y = "y" op_conf.unique_with_counts_conf.idx = "idx" op_conf.unique_with_counts_conf.count = "count" op_conf.unique_with_counts_conf.num_unique = "num_unique" op_conf.unique_with_counts_conf.out_idx = oneflow_api.deprecated.GetProtoDtype4OfDtype( out_idx ) interpret_util.Forward(op_conf) y_lbi = logical_blob_id_util.LogicalBlobId() y_lbi.op_name = op_conf.name y_lbi.blob_name = "y" idx_lbi = logical_blob_id_util.LogicalBlobId() idx_lbi.op_name = op_conf.name idx_lbi.blob_name = "idx" count_lbi = logical_blob_id_util.LogicalBlobId() count_lbi.op_name = op_conf.name count_lbi.blob_name = "count" num_unique_lbi = logical_blob_id_util.LogicalBlobId() num_unique_lbi.op_name = op_conf.name num_unique_lbi.blob_name = "num_unique" return ( remote_blob_util.RemoteBlob(y_lbi), remote_blob_util.RemoteBlob(idx_lbi), remote_blob_util.RemoteBlob(count_lbi), remote_blob_util.RemoteBlob(num_unique_lbi), )
def distribute_add(xs, name=None): assert oneflow.placement.current_scope().parallel_size == len(xs) if name is None: name = id_util.UniqueStr("DistributeAdd_") op_conf = op_conf_util.OperatorConf() op_conf.name = name getattr(op_conf.distribute_add_conf, "in").extend([_SoleConsistentLbn(x) for x in xs]) op_conf.distribute_add_conf.out = "out" interpret_util.ConsistentForward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def image_decoder_random_crop_resize( input_blob: oneflow_api.BlobDesc, target_width: int, target_height: int, num_attempts: Optional[int] = None, seed: Optional[int] = None, random_area: Optional[Sequence[float]] = None, random_aspect_ratio: Optional[Sequence[float]] = None, num_workers: Optional[int] = None, warmup_size: Optional[int] = None, max_num_pixels: Optional[int] = None, name: Optional[str] = None, ) -> Tuple[oneflow_api.BlobDesc]: if name is None: name = id_util.UniqueStr("ImageDecoderRandomCropResize_") op_conf = op_conf_util.OperatorConf() op_conf.name = name setattr(op_conf.image_decoder_random_crop_resize_conf, "in", input_blob.unique_name) op_conf.image_decoder_random_crop_resize_conf.out = "out" op_conf.image_decoder_random_crop_resize_conf.target_width = target_width op_conf.image_decoder_random_crop_resize_conf.target_height = target_height if num_attempts is not None: op_conf.image_decoder_random_crop_resize_conf.num_attempts = num_attempts if seed is not None: op_conf.image_decoder_random_crop_resize_conf.seed = seed if random_area is not None: assert len(random_area) == 2 op_conf.image_decoder_random_crop_resize_conf.random_area_min = random_area[ 0] op_conf.image_decoder_random_crop_resize_conf.random_area_max = random_area[ 1] if random_aspect_ratio is not None: assert len(random_aspect_ratio) == 2 op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_min = random_aspect_ratio[ 0] op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_max = random_aspect_ratio[ 1] if num_workers is not None: op_conf.image_decoder_random_crop_resize_conf.num_workers = num_workers if warmup_size is not None: op_conf.image_decoder_random_crop_resize_conf.warmup_size = warmup_size if max_num_pixels is not None: op_conf.image_decoder_random_crop_resize_conf.max_num_pixels = max_num_pixels interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def unpack(input, unpack_num, name=None): assert not oneflow.eager_execution_enabled() op_conf = op_conf_util.OperatorConf() setattr( op_conf, "name", name if name is not None else id_util.UniqueStr("Unpack_"), ) setattr(op_conf.unpack_conf, "in", input.unique_name) op_conf.unpack_conf.out = "out" op_conf.unpack_conf.unpack_num = unpack_num compile_context.CurJobAddOp(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def distribute_concat(xs, axis=0, name=None): assert oneflow.current_scope( ).device_parallel_desc_symbol.parallel_num == len(xs) if name is None: name = id_util.UniqueStr("DistributeConcat_") op_conf = op_conf_util.OperatorConf() op_conf.name = name getattr(op_conf.distribute_concat_conf, "in").extend([_SoleConsistentLbn(x) for x in xs]) op_conf.distribute_concat_conf.axis = axis op_conf.distribute_concat_conf.out = "out" interpret_util.ConsistentForward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def acc(one, max_acc_num, name=None): assert not oneflow.eager_execution_enabled() op_conf = op_conf_util.OperatorConf() setattr( op_conf, "name", name if name is not None else id_util.UniqueStr("Acc_"), ) op_conf.acc_conf.one = one.unique_name op_conf.acc_conf.acc = "acc" op_conf.acc_conf.max_acc_num = max_acc_num compile_context.CurJobAddOp(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "acc" return remote_blob_util.RemoteBlob(lbi)
def tensor_list_to_tensor_buffer( input: remote_blob_util.BlobDef, name: Optional[str] = None) -> remote_blob_util.BlobDef: if name is None: name = id_util.UniqueStr("TensorListToBuffer_") op_conf = op_conf_util.OperatorConf() setattr(op_conf, "name", name) setattr(op_conf.tensor_list_to_tensor_buffer_conf, "in", input.unique_name) setattr(op_conf.tensor_list_to_tensor_buffer_conf, "out", "out") interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def square_sum( x: input_blob_util.ArgBlobDef, name: Optional[str] = None ) -> remote_blob_util.BlobDef: op_conf = op_conf_util.OperatorConf() if name is None: op_conf.name = id_util.UniqueStr("SquareSum_") else: op_conf.name = name op_conf.square_sum_conf.x = x.unique_name op_conf.square_sum_conf.y = "y" interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "y" return remote_blob_util.RemoteBlob(lbi)
def dynamic_binary_concat( input_blob_list: Sequence[remote_blob_util.BlobDef], source_blob: input_blob_util.ArgBlobDef, source_sbp: str = "S:0", name: Optional[str] = None, ) -> remote_blob_util.BlobDef: op_conf = op_conf_util.OperatorConf() if name is None: op_conf.name = id_util.UniqueStr("DynamicBinaryConcat_") else: op_conf.name = name in_lbns = [] for in_blob in input_blob_list: in_lbns.append(in_blob.unique_name) getattr(op_conf.dynamic_binary_concat_conf, "in").extend(in_lbns) # op_conf.dynamic_binary_concat_conf.in[:] = in_lbns op_conf.dynamic_binary_concat_conf.out = "out" op_conf.dynamic_binary_concat_conf.out_data_type = ( source_blob.dtype.oneflow_proto_dtype) op_conf.dynamic_binary_concat_conf.out_shape.dim.extend( list(source_blob.shape)) if source_blob.batch_axis is not None: op_conf.dynamic_binary_concat_conf.out_batch_axis.value = source_blob.batch_axis else: op_conf.dynamic_binary_concat_conf.out_batch_axis.SetInParent() if "S" in source_sbp: axis = int(source_sbp.split(":")[-1]) op_conf.dynamic_binary_concat_conf.out_sbp.split_parallel.axis = axis elif "B" in source_sbp: op_conf.dynamic_binary_concat_conf.out_sbp.broadcast_parallel.SetInParent( ) elif "P" in source_sbp: op_conf.dynamic_binary_concat_conf.out_sbp.partial_sum_parallel.SetInParent( ) else: print("Error! invalid sbp str:", source_sbp) op_conf.dynamic_binary_concat_conf.out_sbp.SetInParent() interpret_util.Forward(op_conf) out_lbi = logical_blob_id_util.LogicalBlobId() out_lbi.op_name = op_conf.name out_lbi.blob_name = "out" return remote_blob_util.RemoteBlob(out_lbi)
def reshape( x: remote_blob_util.BlobDef, shape: Sequence[int], name: Optional[str] = None ) -> remote_blob_util.BlobDef: r"""Reshapes a blob. Args: x: A `Blob`. shape: Shape of the output blob. name: A name for the operation (optional). Returns: A `Blob`, has the same type as `x`. """ x = flow.cast_to_current_logical_view(x) assert isinstance(shape, tuple) or isinstance(shape, list) shape = list(shape) assert all(dim == -1 or dim > 0 for dim in shape) assert shape.count(-1) <= 1 if not x.is_dynamic: if name is None: name = id_util.UniqueStr("Reshape_") return ( flow.user_op_builder(name) .Op("reshape") .Input("in", [x]) .Output("out") .Attr("shape", infer_shape(x, shape)) .Build() .InferAndTryRun() .RemoteBlobList()[0] ) else: op_conf = op_conf_util.OperatorConf() setattr( op_conf, "name", name if name is not None else id_util.UniqueStr("DynamicReshape_"), ) setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name) op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape)) setattr(op_conf.dynamic_reshape_conf, "out", "out") interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def decode_ofrecord( ofrecord_dir: str, blobs: Sequence[BlobConf], batch_size: int = 1, data_part_num: int = 1, part_name_prefix: str = "part-", part_name_suffix_length: int = -1, shuffle: bool = False, buffer_size: int = 1024, name: str = None, ) -> Tuple[remote_blob_util.BlobDef]: print( "WARNING:", "oneflow.data.decode_ofrecord is deprecated, and NOT work in eager mode, please use: \n", " 1) ofrecord = oneflow.data.ofrecord_reader(...) to read ofrecord; \n", " 2) image = oneflow.data.ofrecord_image_decoder(...) to decode image; \n", " 3) raw = oneflow.data.ofrecord_raw_decoder(...) to decode raw data like label; \n", traceback.format_stack()[-2], ) assert not flow.eager_execution_enabled() if name is None: name = id_util.UniqueStr("Decode_") lbis = [] op_conf = op_conf_util.OperatorConf() op_conf.name = name op_conf.decode_ofrecord_conf.data_dir = ofrecord_dir op_conf.decode_ofrecord_conf.data_part_num = data_part_num op_conf.decode_ofrecord_conf.batch_size = batch_size op_conf.decode_ofrecord_conf.part_name_prefix = part_name_prefix op_conf.decode_ofrecord_conf.part_name_suffix_length = part_name_suffix_length if shuffle == True: op_conf.decode_ofrecord_conf.random_shuffle_conf.buffer_size = buffer_size for blob_conf in blobs: op_conf.decode_ofrecord_conf.blob.extend([blob_conf.to_proto()]) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = name lbi.blob_name = blob_conf.name lbis.append(lbi) interpret_util.ConsistentForward(op_conf) return tuple(map(lambda x: remote_blob_util.RemoteBlob(x), lbis))
def distribute_clone(x, name=None): if name is None: name = id_util.UniqueStr("DistributeClone_") op_conf = op_conf_util.OperatorConf() op_conf.name = name setattr(op_conf.distribute_clone_conf, "in", x.unique_name) parallel_size = oneflow.placement.current_scope().parallel_size op_conf.distribute_clone_conf.out.extend( ["out_%d" % i for i in range(parallel_size)]) interpret_util.ConsistentForward(op_conf) ret = [] for i in range(parallel_size): out = "out_%d" % i lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = out ret.append(remote_blob_util.RemoteBlob(lbi)) return tuple(ret)
def decode_random( shape: Sequence[int], dtype: flow.dtype, batch_size: int = 1, initializer: Optional[initializer_conf_util.InitializerConf] = None, tick: Optional[oneflow_api.BlobDesc] = None, name: Optional[str] = None, ) -> oneflow_api.BlobDesc: op_conf = op_conf_util.OperatorConf() if name is None: name = id_util.UniqueStr("DecodeRandom_") assert isinstance(name, str) op_conf.name = name assert isinstance(shape, (list, tuple)) op_conf.decode_random_conf.shape.dim.extend(shape) assert dtype is not None setattr( op_conf.decode_random_conf, "data_type", oneflow_api.deprecated.GetProtoDtype4OfDtype(dtype), ) op_conf.decode_random_conf.batch_size = batch_size if initializer is not None: op_conf.decode_random_conf.data_initializer.CopyFrom(initializer) else: op_conf.decode_random_conf.data_initializer.CopyFrom( flow.random_uniform_initializer()) if tick: op_conf.decode_random_conf.tick = tick.unique_name op_conf.decode_random_conf.out = "out" lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" interpret_util.ConsistentForward(op_conf) return remote_blob_util.RemoteBlob(lbi)
def elem_cnt( inputs: remote_blob_util.BlobDef, dtype: Optional[dtype_util.dtype] = None, name: Optional[str] = None, ) -> remote_blob_util.BlobDef: op_conf = op_conf_util.OperatorConf() setattr(op_conf, "name", name if name is not None else id_util.UniqueStr("ElemCnt_")) op_conf.shape_elem_cnt_conf.x = inputs.unique_name op_conf.shape_elem_cnt_conf.exclude_axis_conf.SetInParent() if dtype is not None: op_conf.shape_elem_cnt_conf.data_type = dtype.oneflow_proto_dtype op_conf.shape_elem_cnt_conf.y = "y" interpret_util.Forward(op_conf) out_lbi = logical_blob_id_util.LogicalBlobId() setattr(out_lbi, "op_name", op_conf.name) setattr(out_lbi, "blob_name", "y") return remote_blob_util.RemoteBlob(out_lbi)
def dynamic_reshape( x: remote_blob_util.BlobDef, shape: Sequence[int], name: Optional[str] = None ) -> remote_blob_util.BlobDef: assert isinstance(shape, tuple) or isinstance(shape, list) shape = list(shape) op_conf = op_conf_util.OperatorConf() setattr( op_conf, "name", name if name is not None else id_util.UniqueStr("DynamicReshape_"), ) setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name) op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape)) setattr(op_conf.dynamic_reshape_conf, "out", "out") interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)
def distribute_split(x, axis=0, name=None): if name is None: name = id_util.UniqueStr("DistributeSplit_") op_conf = op_conf_util.OperatorConf() op_conf.name = name setattr(op_conf.distribute_split_conf, "in", x.unique_name) op_conf.distribute_split_conf.axis = axis parallel_size = oneflow.current_scope( ).device_parallel_desc_symbol.parallel_num op_conf.distribute_split_conf.out.extend( ["out_%d" % i for i in range(parallel_size)]) interpret_util.ConsistentForward(op_conf) ret = [] for i in range(parallel_size): out = "out_%d" % i lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = out ret.append(remote_blob_util.RemoteBlob(lbi)) return tuple(ret)