def api_nccl_fusion_all_reduce_use_buffer(val: bool) -> None: r"""Whether or not use buffer during nccl fusion progress Args: val (bool): True or False """ return enable_if.unique([nccl_fusion_all_reduce_use_buffer, do_nothing])(val)
def api_reserved_device_mem_mbyte(val: int) -> None: r"""Set up the memory size of reserved device Args: val (int): memory size, e.g. 1024(mb) """ return enable_if.unique([reserved_device_mem_mbyte, do_nothing])(val)
def api_thread_enable_local_message_queue(val: bool) -> None: """Whether or not enable thread using local message queue. Args: val (bool): True or False """ return enable_if.unique([thread_enable_local_message_queue, do_nothing])(val)
def api_numa_aware_cuda_malloc_host(val: bool = True) -> None: r"""Whether or not let numa know that cuda allocated host's memory. Args: val (bool, optional): True or False. Defaults to True. """ return enable_if.unique([enable_numa_aware_cuda_malloc_host, do_nothing])(val)
def api_rdma_mem_block_mbyte(val: int) -> None: r"""Set up the memory block size in rdma mode. Args: val (int): size of block, e.g. 1024(mb) """ return enable_if.unique([rdma_mem_block_mbyte, do_nothing])(val)
def api_distribute_concat( xs: Sequence[oneflow._oneflow_internal.BlobDesc], axis: int = 0, name: Optional[str] = None, ) -> oneflow._oneflow_internal.BlobDesc: func = enable_if.unique([distribute_concat]) return func(xs, axis=axis, name=name)
def api_cpu_device_num(val: int) -> None: r"""Set number of CPUs on each machine to run oneflow on. Usually you don't need to set this. Args: val (int): number of CPUs. It is identical on every machine. """ return enable_if.unique([cpu_device_num, do_nothing])(val)
def api_enable_mem_chain_merge(val: bool = True) -> None: r"""Whether or not to enable MemChain merge. Args: val (bool, optional): True or False. Defaults to True. """ return enable_if.unique([enable_mem_chain_merge, do_nothing])(val=val)
def api_nccl_use_compute_stream(val: bool = False) -> None: r"""Whether or not nccl use compute stream to reuse nccl memory and speedup Args: val (bool, optional): True or False. Defaults to False. """ return enable_if.unique([nccl_use_compute_stream, do_nothing])(val=val)
def api_collect_act_event(val: bool = True) -> None: r"""Whether or not collect active event. Args: val (bool, optional): True or False. Defaults to True. """ return enable_if.unique([collect_act_event, do_nothing])(val=val)
def api_enable_fusion(val: bool = True) -> None: r"""Whether or not allow fusion the operators Args: val (bool, optional): True or False. Defaults to True. """ return enable_if.unique([enable_fusion, do_nothing])(val=val)
def api_enable_model_io_v2(val): r"""Whether or not use version2 of model input/output function. Args: val ([type]): True or False """ return enable_if.unique([enable_model_io_v2, do_nothing])(val)
def api_enable_legacy_model_io(val: bool = True): r"""Whether or not use legacy model io. Args: val ([type]): True or False """ return enable_if.unique([enable_legacy_model_io, do_nothing])(val)
def api_persistence_buf_byte(val: int) -> None: r"""Set up buffer size for persistence. Args: val (int): e.g. 1024(bytes) """ return enable_if.unique([persistence_buf_byte, do_nothing])(val)
def api_nccl_enable_mixed_fusion(val: bool) -> None: r"""Whether or not use nccl mixed fusion Args: val (bool): True or False """ return enable_if.unique([nccl_enable_mixed_fusion, do_nothing])(val)
def api_nccl_num_streams(val: int) -> None: r"""Set up the number of nccl parallel streams while use boxing Args: val (int): number of streams """ return enable_if.unique([nccl_num_streams, do_nothing])(val)
def api_oneflow_function( type: str = "predict", function_config: FunctionConfig = None, ) -> Callable[[Callable], Callable]: r"""Creates a callable OneFlow global function from a Python function. For instance:: @oneflow.global_function(flow.FunctionConfig()) def train(): # your model Args: function_config (FunctionConfig, optional): a `FunctionConfig` object. Defaults to FunctionConfig(). Returns: Callable[[Callable], Callable]: a callable which is called to execute the compiled function """ if isinstance(type, FunctionConfig): function_config = type print( """WARNING: flow.global_function(func_config) is deprecated. Please replace it with flow.global_function(type, func_config). """) print(traceback.format_stack()[-2]) else: assert type in ["train", "predict"] if function_config is None: function_config = FunctionConfig() if type == "train": function_config.function_desc.job_config_proto.mutable_train_conf() else: function_config.function_desc.job_config_proto.mutable_predict_conf( ) api = enable_if.unique([eager_oneflow_function, lazy_oneflow_function]) return api(function_config)
def api_nccl_fusion_threshold_mb(val: int) -> None: r"""Set up threshold for oprators fusion Args: val (int): int number, e.g. 10(mb) """ return enable_if.unique([nccl_fusion_threshold_mb, do_nothing])(val)
def api_add_loss(loss: oneflow_api.BlobDesc) -> None: r"""Mark a `Blob` as a loss. Auto grad starts at every loss blob. It doesn't has to be a product of typical "loss" operator like softmax loss but can also be a `Blob` produced by any operator. Args: loss: A `Blob`. """ return enable_if.unique([lazy_add_loss, eager_add_loss])(loss)
def api_machine_num(val: int) -> None: r"""Set available number of machine/node for running job . Args: val (int): available number of machines """ return enable_if.unique([machine_num, do_nothing])(val)
def api_max_mdsave_worker_num(val: int) -> None: r"""Set up max number of workers for mdsave process. Args: val (int): max number of workers """ return enable_if.unique([max_mdsave_worker_num, do_nothing])(val)
def api_nccl_fusion_all_gather(val: bool) -> None: r"""Whether or not use nccl fusion during all gather progress Args: val (bool): True or False """ return enable_if.unique([nccl_fusion_all_gather, do_nothing])(val)
def api_compute_thread_pool_size(val: int) -> None: r"""Set up the size of compute thread pool Args: val (int): size of thread pool """ return enable_if.unique([compute_thread_pool_size, do_nothing])(val)
def api_nccl_fusion_broadcast(val: bool) -> None: r"""Whether or not use nccl fusion during broadcast progress Args: val (bool): True or False """ return enable_if.unique([nccl_fusion_broadcast, do_nothing])(val)
def api_rdma_recv_msg_buf_mbyte(val: int) -> None: r"""Set up the buffer size for receiving messages in rama mode Args: val (int): buffer size, e.g. 1024(mb) """ return enable_if.unique([rdma_recv_msg_buf_mbyte, do_nothing])(val)
def api_nccl_fusion_max_ops(val: int) -> None: r"""Maximum number of ops for nccl fusion. Args: val (int): Maximum number of ops """ return enable_if.unique([nccl_fusion_max_ops, do_nothing])(val)
def api_load_library(val: str) -> None: r"""Load necessary library for job Args: val (str): path to shared object file """ return enable_if.unique([load_library, do_nothing])(val)
def api_nccl_enable_all_to_all(val: bool) -> None: r"""Whether or not use nccl all2all during s2s boxing Args: val (bool): True or False """ return enable_if.unique([nccl_enable_all_to_all, do_nothing])(val)
def api_enable_debug_mode(val: bool) -> None: r"""Whether use debug mode or not. Args: val (bool): True or False """ return enable_if.unique([enable_debug_mode, do_nothing])(val)
def api_save_downloaded_file_to_local_fs(val: bool = True) -> None: r"""Whether or not save downloaded file to local file system. Args: val (bool, optional): True or False. Defaults to True. """ return enable_if.unique([save_downloaded_file_to_local_fs, do_nothing])(val=val)