def tensor_buffer_to_tensor( x: oneflow._oneflow_internal.BlobDesc, dtype: flow.dtype, instance_shape: Sequence[int], name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator converts the Blob's type from TensorBuffer to Tensor. Some operator's output data type is `TensorBuffer`, you can use this operator to convert back to `Tensor`. Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_ for more about TensorBuffer. Args: x (oneflow._oneflow_internal.BlobDesc): Input `Blob`. dtype (flow.dtype): The data dtype. instance_shape (Sequence[int]): The shape of each TensorBuffer instance. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: A `Blob`. For example: .. code-block:: python import oneflow.compatible.single_client as flow import numpy as np import oneflow.compatible.single_client.typing as tp @flow.global_function() def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32), ) -> tp.Numpy: x = flow.tensor_to_tensor_buffer(x, instance_dims=2) return flow.tensor_buffer_to_tensor(x, instance_shape=(64, 64), dtype=flow.float) x = np.random.randn(4, 16, 64, 64).astype(np.float32) out = tensor_buffer_to_tensor_Job(x) # out.shape (4, 16, 64, 64) """ if name is None: name = id_util.UniqueStr("TensorBufferToTensor_") return ( flow.user_op_builder(name) .Op("tensor_buffer_to_tensor") .Input("in", [x]) .Output("out") .Attr("dtype", dtype) .Attr("instance_shape", instance_shape) .Build() .InferAndTryRun() .RemoteBlobList()[0] )
def __init__(self, op_type_name, op_name=None): if op_name is None: op_name = id_util.UniqueStr(op_type_name) self._builder = oneflow._oneflow_internal.one.OpBuilder( op_type_name, op_name) self._op = None self._op_type_name = op_type_name
def parallel_cast(input, name=None, distribute=None, gradient_distribute=None): if name is None: name = id_util.UniqueStr("ParallelCast_") def distribute_to_str(dist): dist_str = "" if dist is None: pass elif type(dist) is oneflow._oneflow_internal.distribute.SplitDistribute: dist_str = "S({})".format(dist.axis) elif type(dist) is oneflow._oneflow_internal.distribute.BroadcastDistribute: dist_str = "B" else: raise ValueError("unsupported distribute") return dist_str sbp_parallel = distribute_to_str(distribute) grad_sbp_parallel = distribute_to_str(gradient_distribute) op = ( flow.user_op_builder(name) .Op("parallel_cast") .Input("in", [input]) .Output("out") .Attr("sbp_parallel", sbp_parallel) .Attr("grad_sbp_parallel", grad_sbp_parallel) .Build() ) return op.InferAndTryRun().SoleOutputBlob()
def hierarchical_parallel_cast(input, nd_sbp, grad_mode, grad_nd_sbp, name): if name is None: name = id_util.UniqueStr("HierarchicalParallelCast_") def distribute_to_str(dist): if dist is None: return "" elif type(dist) is str: return dist elif type(dist) is oneflow._oneflow_internal.distribute.SplitDistribute: return "S({})".format(dist.axis) elif type(dist) is oneflow._oneflow_internal.distribute.BroadcastDistribute: return "B" else: raise ValueError("unsupported distribute") op = ( flow.user_op_builder(name) .Op("hierarchical_parallel_cast") .Input("in", [input]) .Output("out") .Attr("nd_sbp", list(map(distribute_to_str, nd_sbp))) .Attr("grad_mode", grad_mode or "restore") .Attr( "grad_nd_sbp", list(map(distribute_to_str, grad_nd_sbp)) if grad_nd_sbp else [], ) .Build() ) return op.InferAndTryRun().SoleOutputBlob()
def decode_random( shape: Sequence[int], dtype: flow.dtype, batch_size: int = 1, initializer: Optional[initializer_conf_util.InitializerConf] = None, tick: Optional[oneflow._oneflow_internal.BlobDesc] = None, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: op_conf = op_conf_util.OperatorConf() if name is None: name = id_util.UniqueStr("DecodeRandom_") assert isinstance(name, str) op_conf.name = name assert isinstance(shape, (list, tuple)) op_conf.decode_random_conf.shape.dim.extend(shape) assert dtype is not None setattr( op_conf.decode_random_conf, "data_type", oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(dtype), ) op_conf.decode_random_conf.batch_size = batch_size if initializer is not None: op_conf.decode_random_conf.data_initializer.CopyFrom(initializer) else: op_conf.decode_random_conf.data_initializer.CopyFrom( flow.random_uniform_initializer()) if tick: op_conf.decode_random_conf.tick = tick.unique_name op_conf.decode_random_conf.out = "out" lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" interpret_util.ConsistentForward(op_conf) return remote_blob_util.RemoteBlob(lbi)
def _GetCpu0VariableBlobFromNumpy( np_array: np.ndarray, dtype: flow.dtype) -> oneflow._oneflow_internal.EagerConsistentBlob: """ Add a variable on cpu 0, and feed the value of `np_array` Note: dtype argument cannot be eliminated by convert_numpy_dtype_to_oneflow_dtype(np_array.dtype), because np.int8 == np.char and numpy_dtype_to_oneflow_dtype(oneflow_dtype_to_numpy_dtype(flow.int8)) may be flow.char """ with flow.scope.placement("cpu", "0:0"): op_name = id_util.UniqueStr(OP_PREFIX) op_conf = get_variable.GenerateVariableOpConf( name=op_name, shape=np_array.shape, dtype=dtype, initializer=initializer_util.zeros_initializer(dtype=dtype), trainable=False, ) current_parallel_desc_sym = flow.current_scope( ).device_parallel_desc_symbol device_tag = current_parallel_desc_sym.device_tag op_conf.device_tag = device_tag op_attribute = op_infer_util.Infer(op_conf, {}) var_blob = get_variable.CreateEagerVariableBlob(op_attribute, job_name=FAKE_JOB_NAME) interface_op_read_and_write.FeedValueToInterfaceBlobObject( var_blob.blob_object, np_array) return var_blob
def replication_pad2d( x: oneflow._oneflow_internal.BlobDesc, padding: Union[int, tuple, list], name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """Pads the input tensor using the replication of the input boundary. Args: x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format. padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension; if 4-dims tuple, uses ( ext{padding\\_left}padding_left , ext{padding\\_right}padding_right , ext{padding\\_top}padding_top , ext{padding\\_bottom}padding_bottom ) name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: [description] For example: .. code-block:: python import oneflow.compatible.single_client as flow import oneflow.compatible.single_client.typing as tp import numpy as np @flow.global_function() def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3)) ) -> tp.Numpy: return flow.reflection_pad2d(x, padding=[2, 2, 1, 1]) x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32) out = pad_Job(x) # out [[[[ 0. 0. 0. 1. 2. 2. 2.] # [ 0. 0. 0. 1. 2. 2. 2.] # [ 3. 3. 3. 4. 5. 5. 5.] # [ 6. 6. 6. 7. 8. 8. 8.] # [ 6. 6. 6. 7. 8. 8. 8.]] # [[ 9. 9. 9. 10. 11. 11. 11.] # [ 9. 9. 9. 10. 11. 11. 11.] # [ 12. 12. 12. 13. 14. 14. 14.] # [ 15. 15. 15. 16. 17. 17. 17.] # [ 15. 15. 15. 16. 17. 17. 17.]]]] """ (H, W) = (x.shape[2], x.shape[3]) if isinstance(padding, (tuple, list)): assert len(padding) == len(x.shape), ValueError( "padding boundry must be the same size of input dims") boundry = [padding[0], padding[1], padding[2], padding[3]] elif isinstance(padding, int): boundry = [padding, padding, padding, padding] else: raise ValueError("padding must be in or list or tuple!") return (flow.user_op_builder( name if name is not None else id_util.UniqueStr("Replication_Pad2d_") ).Op("replication_pad2d").Input("x", [x]).Output("y").Attr( "padding", list(boundry)).Build().InferAndTryRun().RemoteBlobList()[0])
def distributed_partial_fc_sample( weight: oneflow._oneflow_internal.BlobDesc, label: oneflow._oneflow_internal.BlobDesc, num_sample: int, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: parallel_num = flow.current_scope().device_parallel_desc_symbol.parallel_num assert num_sample % parallel_num == 0 assert weight.shape[0] % parallel_num == 0 return ( flow.user_op_builder( name if name is not None else id_util.UniqueStr("DistributedPartialFcSample_") ) .Op("distributed_partial_fc_sample") .Input("weight", [weight]) .Input("label", [label]) .Attr("num_sample", num_sample) .Output("mapped_label") .Output("sampled_label") .Output("sampled_weight") .Build() .InferAndTryRun() .RemoteBlobList() )
def BuildAssignInstruction(builder): op_conf = op_conf_pb.OperatorConf() device_tag = flow.current_scope( ).device_parallel_desc_symbol.device_tag op_conf.device_tag = device_tag op_name = id_util.UniqueStr(OP_PREFIX) op_conf.name = op_name op_conf.user_conf.op_type_name = "logical_slice_assign" op_conf.user_conf.input["value"].s.append("{}/value_0".format(op_name)) op_conf.user_conf.input["ref"].s.append("{}/ref_0".format(op_name)) parallel_conf = ref_blob_object.parallel_desc_symbol.parallel_conf op_conf.user_conf.attr["parallel_conf"].at_string = str(parallel_conf) op_conf.user_conf.attr["start"].at_list_int64.val[:] = start op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop op_conf.user_conf.attr["step"].at_list_int64.val[:] = [1] * len(start) bn_in_op2blob_object = oneflow._oneflow_internal.deprecated.BnInOp2BlobObject( ) bn_in_op2blob_object["ref_0"] = ref_blob_object bn_in_op2blob_object["value_0"] = value_blob_object op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object, scope_symbol_id) cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString( str(op_attribute)) builder.StatelessCall(cfg_op_attribute, parallel_conf, bn_in_op2blob_object, boxing_util.BoxingTo)
def square_sum( x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None) -> oneflow._oneflow_internal.BlobDesc: return ( flow.user_op_builder(name if name is not None else id_util.UniqueStr( "SquareSum_")).Op("square_sum").Input( "x", [x]).Output("y").Build().InferAndTryRun().RemoteBlobList()[0])
def count_not_finite( x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None) -> oneflow._oneflow_internal.BlobDesc: return ( flow.user_op_builder(name if name is not None else id_util.UniqueStr( "CountNotFinite_")).Op("count_not_finite").Input( "x", [x]).Output("y").Build().InferAndTryRun().RemoteBlobList()[0])
def _SystemAssignOpConf(ref, value, name=None): if name is None: name = id_util.UniqueStr("Assign_") op_conf = op_conf_util.OperatorConf() op_conf.name = name op_conf.assign_conf.ref = ref.unique_name op_conf.assign_conf.value = value.unique_name return op_conf
def _LogicalSlice( input_blob_object: oneflow._oneflow_internal.BlobObject, start: Sequence[int], stop: Sequence[int], scope_symbol_id: int, ) -> np.ndarray: """ Construct a logical_slice op and run it by oneflow eager, return the sliced result as a numpy ndarray """ op_name = id_util.UniqueStr(OP_PREFIX) def AsyncSlice(Yield): def build(builder): op_conf = op_conf_pb.OperatorConf() device_tag = flow.current_scope( ).device_parallel_desc_symbol.device_tag op_conf.device_tag = device_tag op_conf.name = op_name op_conf.user_conf.op_type_name = "logical_slice" op_conf.user_conf.input["x"].s.append("{}/x_0".format(op_name)) op_conf.user_conf.output["y"].s.append("{}/y_0".format(op_name)) parallel_conf = input_blob_object.parallel_desc_symbol.parallel_conf op_conf.user_conf.attr["parallel_conf"].at_string = str( parallel_conf) op_conf.user_conf.attr["start"].at_list_int64.val[:] = start op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop op_conf.user_conf.attr["step"].at_list_int64.val[:] = [ 1 ] * len(start) bn_in_op2blob_object = ( oneflow._oneflow_internal.deprecated.BnInOp2BlobObject()) bn_in_op2blob_object["x_0"] = input_blob_object op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object, scope_symbol_id) cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString( str(op_attribute)) builder.StatelessCall( cfg_op_attribute, parallel_conf, bn_in_op2blob_object, boxing_util.BoxingTo, ) Yield(bn_in_op2blob_object["y_0"]) oneflow._oneflow_internal.deprecated.LogicalRun(build) lbi = lbi_util.LogicalBlobId() lbi.set_op_name(op_name) lbi.set_blob_name(op_name) blob_object = async_util.Await(1, AsyncSlice)[0] blob = oneflow._oneflow_internal.EagerConsistentBlob( lbi, blob_object=blob_object, blob_register=blob_register, job_name=FAKE_JOB_NAME, ) return blob.numpy()
def multi_count_not_finite( x: Optional[Sequence[oneflow._oneflow_internal.BlobDesc]] = None, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: return ( flow.user_op_builder(name if name is not None else id_util.UniqueStr( "MultiCountNotFinite_")).Op("multi_count_not_finite").Input( "x", x).Output("y").Build().InferAndTryRun().RemoteBlobList()[0])
def indexed_slices_reduce_sum( indices: input_blob_util.ArgBlobDef, values: input_blob_util.ArgBlobDef, name: Optional[str] = None, ) -> Tuple[oneflow._oneflow_internal.BlobDesc]: op = (flow.user_op_builder(name if name is not None else id_util.UniqueStr( "IndexedSlicesReduceSum_")).Op("indexed_slices_reduce_sum").Input( "x_indices", [indices]).Input("x_values", [values]).Output( "y_indices").Output("y_values").Output("num_unique").Build()) return op.InferAndTryRun().RemoteBlobList()
def eager_nccl_all_reduce( x: oneflow._oneflow_internal.BlobDesc, parallel_conf: str, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: return (flow.user_op_builder( name if name is not None else id_util.UniqueStr("EagerNcclAllReduce_") ).Op("eager_nccl_all_reduce").Input("in", [x]).Output("out").Attr( "parallel_conf", parallel_conf).Build().InferAndTryRun().RemoteBlobList()[0])
def _MakeInputOpConfAndRetLbi(arg_blob_def): assert isinstance(arg_blob_def, input_blob_def.ArgBlobDef) op_conf = op_conf_util.OperatorConf() op_conf.name = id_util.UniqueStr("Input_") op_conf.input_conf.out = "out" op_conf.input_conf.blob_conf.CopyFrom(arg_blob_def.ToInterfaceBlobConf()) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = op_conf.input_conf.out return (op_conf, lbi)
def flush_summary_writer(name=None): """Flush the summary writer Args: name: This operator's name """ if name is None: name = id_util.UniqueStr("FlushWriter_") flow.user_op_builder(name).Op( "flush_summary_writer").Build().InferAndTryRun()
def smooth_l1_loss( prediction: oneflow._oneflow_internal.BlobDesc, label: oneflow._oneflow_internal.BlobDesc, beta: float = 1.0, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator computes the smooth l1 loss. The equation is: .. math:: & out = \\frac{(\\beta*x)^2}{2}, \\left|x\\right|<\\frac{1}{{\\beta}^2} & out = \\left|x\\right|-\\frac{0.5}{{\\beta}^2}, otherwise Args: prediction (oneflow._oneflow_internal.BlobDesc): The prediction Blob label (oneflow._oneflow_internal.BlobDesc): The label Blob beta (float, optional): The :math:`\\beta` in the equation. Defaults to 1.0. name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The result Blob For example: .. code-block:: python import oneflow.compatible.single_client as flow import numpy as np import oneflow.compatible.single_client.typing as tp @flow.global_function() def smooth_l1_loss_Job(prediction: tp.Numpy.Placeholder((5, )), label: tp.Numpy.Placeholder((5, )) ) -> tp.Numpy: return flow.smooth_l1_loss(prediction=prediction, label=label) prediction = np.array([0.1, 0.4, 0.3, 0.5, 0.9]).astype(np.float32) label = np.array([0.3, 0.9, 2.5, 0.4, 0.3]).astype(np.float32) out = smooth_l1_loss_Job(prediction, label) # out [0.02 0.12499999 1.7 0.005 0.17999998] """ op = (flow.user_op_builder(name if name is not None else id_util.UniqueStr( "SmoothL1Loss_")).Op("smooth_l1_loss").Input( "input", [prediction]).Input("target", [label]).Output("out")) op.Attr("beta", float(beta)) return op.Build().InferAndTryRun().RemoteBlobList()[0]
def assign(ref, value, dtype=None, name=None): if name is None: name = id_util.UniqueStr("Assign_") op = ( flow.consistent_user_op_builder(name) .Op("assign") .Input("ref", [ref]) .Input("value", [value]) .Build() ) op.InferAndTryRun()
def create_summary_writer(logdir, name=None): """Create a summary writer object Args: logdir: log dir name: This operator's name """ if name is None: name = id_util.UniqueStr("CreateWriter_") flow.user_op_builder(name).Op("create_summary_writer").Attr( "logdir", logdir).Build().InferAndTryRun()
def _sort_at_last_dim( input: oneflow._oneflow_internal.BlobDesc, direction: str = "ASCENDING", name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: assert direction in ["ASCENDING", "DESCENDING"] return ( flow.user_op_builder(name if name is not None else id_util.UniqueStr( "Sort_")).Op("sort").Input("in", [input]).Output("out").Attr( "direction", direction).Build().InferAndTryRun().RemoteBlobList()[0])
def forward(self, x: oneflow._oneflow_internal.BlobDesc): if self.call_seq_no == 0: name = self.module_name else: name = id_util.UniqueStr("Bernoulli_") return ( self.op_module_builder.OpName(name) .Input("in", [x]) .Build() .InferAndTryRun() .SoleOutputBlob() )
def acc(one, max_acc_num, name=None): assert not flow.eager_execution_enabled() return ( flow.user_op_builder(name if name is not None else id_util.UniqueStr("Acc_")) .Op("acc") .Input("in", [one]) .Output("out") .Attr("max_acc_num", max_acc_num) .Build() .InferAndTryRun() .RemoteBlobList()[0] )
def unpack(input, unpack_num, name=None): assert not flow.eager_execution_enabled() return ( flow.user_op_builder(name if name is not None else id_util.UniqueStr("Unpack_")) .Op("unpack") .Input("in", [input]) .Output("out") .Attr("unpack_num", unpack_num) .Build() .InferAndTryRun() .RemoteBlobList()[0] )
def write_pb(value, step=None, name=None): """Write raw protobuf data to log file Args: value: A 'Blob' with dtype in 'flow.int8' step: A 'Blob' with 1 value and dtype is 'flow.int64' name: This operator's name """ if name is None: name = id_util.UniqueStr("WritePb_") flow.user_op_builder(name).Op("summary_write_pb").Input( "in", [value]).Input("step", [step]).Build().InferAndTryRun()
def gen_tensor_buffer( shape: Sequence[int], shape_list: Sequence[Sequence[int]], value_list: Sequence[float], data_type: Optional[flow.dtype] = flow.float32, dynamic_out: Optional[bool] = False, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator generates a tensor buffer blob. Args: shape (Sequence[int]): shape of output blob shape_list ( Sequence[Sequence[int]]): shapes for tensor buffer in output blob value_list (Sequence[float]): values for tensor buffer in output blob data_type (Optional[flow.dtype]): data type for tensor buffer in output blob dynamic_out (Optional[bool]): if output is a dynamic blob name (Optional[str]): The name for the operation. Defaults to None. Returns: BlobDesc: The result Blob. For example: .. code-block:: python import oneflow.compatible.single_client as flow @flow.global_function(function_config=func_config) def GenTensorBufferJob(): with flow.scope.placement("cpu", "0:0"): x = flow.gen_tensor_buffer([(2,)], [(2, 1), (1, 2)], [0.0, 1.0]) y = flow.tensor_buffer_to_list_of_tensors(x, (100, 100), flow.float, True) return y # y_0.shape (2, 1), y_1.shape (1. 2) """ return ( flow.user_op_builder( name if name is not None else id_util.UniqueStr("GenTensorBuffer_") ) .Op("gen_tensor_buffer") .Output("out") .Attr("shape", shape) .Attr("shape_list", shape_list) .Attr("value_list", value_list) .Attr("data_type", data_type) .Attr("dynamic_out", dynamic_out) .Build() .InferAndTryRun() .RemoteBlobList()[0] )
def zero_pad2d( x: oneflow._oneflow_internal.BlobDesc, padding: Union[int, tuple, list], name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """Pads the input tensor using zeros. Args: x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format. padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension; if 4-dims tuple, uses ( ext{padding\\_left}padding_left , ext{padding\\_right}padding_right , ext{padding\\_top}padding_top , ext{padding\\_bottom}padding_bottom ) name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: [description] For example: .. code-block:: python import oneflow.compatible.single_client as flow import oneflow.compatible.single_client.typing as tp import numpy as np @flow.global_function() def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3), const_value) ) -> tp.Numpy: return flow.constant_pad2d(x, padding=[2, 2, 1, 1], const_value) x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32) const_value = 1.5 out = pad_Job(x, const_value) # out [[[[ 0. 0. 0. 0. 0. 0. 0.] # [ 0. 0. 0. 1. 2. 0. 0.] # [ 0. 0. 3. 4. 5. 0. 0.] # [ 0. 0. 6. 7. 8. 0. 0.] # [ 0. 0. 0. 0. 0. 0. 0.]] # [[ 0. 0. 0. 0. 0. 0. 0.] # [ 0. 0. 9. 10. 11. 0. 0.] # [ 0. 0. 12. 13. 14. 0. 0.] # [ 0. 0. 15. 16. 17. 0. 0.] # [ 0. 0. 0. 0. 0. 0. 0.]]]] """ if name is None: name = id_util.UniqueStr("Zero_Pad2d_") return constant_pad2d(x, padding, 0.0, name)
def sort( input: oneflow._oneflow_internal.BlobDesc, axis: int = -1, direction: str = "ASCENDING", name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: """This operator sorts the input Blob at specified axis. Args: input (oneflow._oneflow_internal.BlobDesc): A Blob axis (int, optional): dimension to be sorted. Defaults to the last dim (-1) direction (str, optional): The direction in which to sort the Blob values. If the direction is "ASCENDING", The order of input will be sorted as ascending, else, the order of input will be sorted as descending. Defaults to "ASCENDING". name (Optional[str], optional): The name for the operation. Defaults to None. Returns: oneflow._oneflow_internal.BlobDesc: The sorted Blob For example: .. code-block:: python import oneflow.compatible.single_client as flow import numpy as np import oneflow.compatible.single_client.typing as tp @flow.global_function() def sort_Job(x: tp.Numpy.Placeholder((5, )) ) -> tp.Numpy: return flow.sort(input=x) x = np.array([10, 2, 9, 3, 7]).astype("float32") out = sort_Job(x) # out [ 2. 3. 7. 9. 10.] """ assert direction in ["ASCENDING", "DESCENDING"] name = name if name is not None else id_util.UniqueStr("Sort_") num_axes = len(input.shape) axis = axis if axis >= 0 else axis + num_axes assert 0 <= axis < num_axes, "axis out of range" if axis == num_axes - 1: return _sort_at_last_dim(input, direction, name) else: perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis) x = flow.transpose(input, perm, False, True, name + "_transpose") x = _sort_at_last_dim(x, direction, name) return flow.transpose(x, get_inversed_perm(perm), False, True, name + "_inverse_transpose")
def image_decoder_random_crop_resize( input_blob: oneflow._oneflow_internal.BlobDesc, target_width: int, target_height: int, num_attempts: Optional[int] = None, seed: Optional[int] = None, random_area: Optional[Sequence[float]] = None, random_aspect_ratio: Optional[Sequence[float]] = None, num_workers: Optional[int] = None, warmup_size: Optional[int] = None, max_num_pixels: Optional[int] = None, name: Optional[str] = None, ) -> Tuple[oneflow._oneflow_internal.BlobDesc]: if name is None: name = id_util.UniqueStr("ImageDecoderRandomCropResize_") op_conf = op_conf_util.OperatorConf() op_conf.name = name setattr(op_conf.image_decoder_random_crop_resize_conf, "in", input_blob.unique_name) op_conf.image_decoder_random_crop_resize_conf.out = "out" op_conf.image_decoder_random_crop_resize_conf.target_width = target_width op_conf.image_decoder_random_crop_resize_conf.target_height = target_height if num_attempts is not None: op_conf.image_decoder_random_crop_resize_conf.num_attempts = num_attempts if seed is not None: op_conf.image_decoder_random_crop_resize_conf.seed = seed if random_area is not None: assert len(random_area) == 2 op_conf.image_decoder_random_crop_resize_conf.random_area_min = random_area[ 0] op_conf.image_decoder_random_crop_resize_conf.random_area_max = random_area[ 1] if random_aspect_ratio is not None: assert len(random_aspect_ratio) == 2 op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_min = random_aspect_ratio[ 0] op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_max = random_aspect_ratio[ 1] if num_workers is not None: op_conf.image_decoder_random_crop_resize_conf.num_workers = num_workers if warmup_size is not None: op_conf.image_decoder_random_crop_resize_conf.warmup_size = warmup_size if max_num_pixels is not None: op_conf.image_decoder_random_crop_resize_conf.max_num_pixels = max_num_pixels interpret_util.Forward(op_conf) lbi = logical_blob_id_util.LogicalBlobId() lbi.op_name = op_conf.name lbi.blob_name = "out" return remote_blob_util.RemoteBlob(lbi)