Esempio n. 1
0
def MakeScopeSymbol(job_conf, parallel_conf, is_mirrored):
    parallel_hierarchy = None
    if parallel_conf.has_hierarchy():
        parallel_hierarchy = oneflow._oneflow_internal.Size(
            tuple(parallel_conf.hierarchy().dim()))
    return scope_util.MakeInitialScope(
        job_conf,
        parallel_conf.device_tag(),
        list(parallel_conf.device_name()),
        parallel_hierarchy,
        is_mirrored,
    ).symbol_id
Esempio n. 2
0
def graph_build_context(config_proto, session):
    prev_scope = oneflow._oneflow_internal.GetCurrentScope()
    new_scope = scope_util.MakeInitialScope(
        config_proto,
        "cpu",  # NOTE(chengcheng): graph init scope is useless, just set cpu 0:0 for test.
        ["0:0"],
        None,  # TODO(): set hierarchy from user graph config
        False,  # is_mirrored
    )

    with lazy_mode.guard(True):
        with JobBuildAndInferCtx(config_proto):
            with BlockScopeContext(prev_scope, new_scope):
                yield
Esempio n. 3
0
 def open(self, job_name, signature=None, batch_size=None):
     self._check_status(self.SessionStatus.OPEN)
     c_api_util.JobBuildAndInferCtx_Open(job_name)
     if signature is not None:
         self.set_job_signature(job_name, signature)
     if isinstance(batch_size, int):
         self.set_job_batch_size(job_name, batch_size)
     job_conf = self._get_job_conf(job_name)
     c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)
     # NOTE(chengcheng): placement_util is unavailable.
     # tag_and_dev_ids = placement_util.GetDefaultMachineDeviceIds(
     #     self.config_proto_.resource
     # )
     scope = scope_util.MakeInitialScope(job_conf, "cpu", ["0:0"], None,
                                         self.is_mirrored_)
     with runtime_mode.ModeScope(runtime_mode.GLOBAL_MODE):
         with scope_util.ScopeContext(scope):
             self.cur_job_name_ = job_name
             yield self
             self.cur_job_name_ = None
     oneflow._oneflow_internal.JobBuildAndInferCtx_Close()