Esempio n. 1
0
 def _construct_with_data(
     self,
     *args,
     dtype=None,
     device=None,
     requires_grad=False,
     retain_grad=False,
     placement=None,
     sbp=None,
     is_consistent=False,
     is_lazy=False,
 ):
     numpy_data = None
     if _input_args_is_tuple_or_list(*args):
         numpy_data = np.array(args[0]).astype(
             flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
     elif _input_args_is_numpy(*args):
         numpy_data = args[0].astype(
             flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
     shape = oneflow_api.Size(tuple(numpy_data.shape))
     self._determining_initializer = _numpy_initializer_for_determining
     self._undetermined_tensor = UndeterminedTensor(
         shape,
         dtype,
         device=device,
         requires_grad=requires_grad,
         retain_grad=retain_grad,
         placement=placement,
         sbp=sbp,
         is_consistent=is_consistent,
         is_lazy=is_lazy,
         numpy_data=numpy_data,
     )
Esempio n. 2
0
 def __init__(
     self,
     shape,
     dtype,
     device=None,
     requires_grad=False,
     retain_grad=False,
     placement=None,
     sbp=None,
     is_consistent=False,
     is_lazy=False,
     data_initializer=None,
 ):
     if not isinstance(shape, oneflow_api.Size):
         if not isinstance(shape, tuple):
             shape = tuple(shape)
         shape = oneflow_api.Size(shape)
     data_initializer = (data_initializer if data_initializer is not None
                         else flow.empty_initializer(dtype=dtype))
     device = device if device is not None else oneflow_api.device("cpu")
     self.shape = shape
     self.dtype = dtype
     self.device = device
     self.requires_grad = requires_grad
     self.retain_grad = retain_grad
     self.placement = placement
     self.sbp = sbp
     self.is_consistent = is_consistent
     self.is_lazy = is_lazy
     self.data_initializer = data_initializer
Esempio n. 3
0
def InterpretScope(session, function_desc, config_proto):
    job_conf = function_desc.job_config_proto
    job_conf.set_job_name(function_desc.job_func.__name__)
    placement_scope = function_desc.function_attribute.default_placement_scope
    if placement_scope is None:
        tag_and_dev_ids = placement_util.GetDefaultMachineDeviceIds(session.resource)
        hierarchy = None
    else:
        assert isinstance(placement_scope, placement_ctx.EmptyPlacementScope)
        tag_and_dev_ids = (
            placement_scope.device_tag,
            placement_scope.machine_device_ids,
        )
        hierarchy = placement_scope.hierarchy

    distribute_strategy = function_desc.function_attribute.default_distribute_strategy
    if distribute_strategy is None:
        distribute_strategy = distribute_util.DistributeConsistentStrategy()
    is_mirrored = isinstance(
        distribute_strategy, distribute_util.DistributeMirroredStrategy
    )
    assert isinstance(hierarchy, (list, tuple)) or hierarchy is None
    if hierarchy is not None:
        hierarchy = oneflow_api.Size(tuple(hierarchy))
    scope = scope_util.MakeInitialScope(
        job_conf, *tag_and_dev_ids, hierarchy, is_mirrored
    )
    with _JobBuildAndInferCtx(job_conf.job_name()), distribute_strategy:
        c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)
        with runtime_mode.ModeScope(runtime_mode.GLOBAL_MODE):
            with scope_util.ScopeContext(scope):
                yield
Esempio n. 4
0
def MakeScopeSymbol(job_conf, parallel_conf, is_mirrored):
    parallel_hierarchy = None
    if parallel_conf.has_hierarchy():
        parallel_hierarchy = oneflow_api.Size(
            tuple(parallel_conf.hierarchy().dim()))
    return scope_util.MakeInitialScope(
        job_conf,
        parallel_conf.device_tag(),
        list(parallel_conf.device_name()),
        parallel_hierarchy,
        is_mirrored,
    ).symbol_id
Esempio n. 5
0
def GetNormalModePlacementScope(device_tag,
                                machine_device_ids,
                                hierarchy=None):
    if isinstance(machine_device_ids, tuple):
        machine_device_ids = list(machine_device_ids)
    if not isinstance(machine_device_ids, list):
        machine_device_ids = [machine_device_ids]
    sess = session_ctx.GetDefaultSession()
    if hierarchy is not None:
        hierarchy = oneflow_api.Size(tuple(hierarchy))
    scope = scope_util.MakeScope(
        lambda old_scope, builder: builder.BuildScopeWithNewParallelDesc(
            old_scope, device_tag, machine_device_ids, hierarchy))
    return scope_util.ScopeContext(scope)
Esempio n. 6
0
def GetGlobalModePlacementScope(device_tag, machine_device_ids, hierarchy=None):
    if isinstance(machine_device_ids, (list, tuple)) == False:
        machine_device_ids = [machine_device_ids]
    sess = session_ctx.GetDefaultSession()
    if hierarchy is not None:
        hierarchy = oneflow_api.Size(tuple(hierarchy))

    def BuildScope(old_scope, builder):
        return builder.BuildScopeWithNewParallelDesc(
            old_scope, device_tag, machine_device_ids, hierarchy
        )

    scope_ctx = scope_util.ScopeContext(scope_util.MakeScope(BuildScope))
    return placement_ctx.GlobalModePlacementScope(scope_ctx)
Esempio n. 7
0
 def _construct_determined_tensor_with_numpy(
     self,
     dtype=None,
     device=None,
     requires_grad=False,
     retain_grad=False,
     is_lazy=False,
     numpy_data=None,
 ):
     shape = oneflow_api.Size(tuple(numpy_data.shape))
     # Only local tensor will be created
     self._local_or_consistent_tensor = _initialized_job(
         shape=shape,
         dtype=dtype,
         device=device,
         requires_grad=requires_grad,
         retain_grad=retain_grad,
         is_lazy=is_lazy,
         numpy_data=numpy_data,
     )
     self._undetermined_tensor = None