Exemplo n.º 1
0
def CreateEagerVariableBlob(op_attribute, job_name=""):
    bn_in_op2blob_object = oneflow_api.deprecated.BnInOp2BlobObject()

    def BuildInstruction(builder):
        parallel_conf = (
            oneflow.current_scope().device_parallel_desc_symbol.parallel_conf)
        cfg_op_attribute = oneflow_api.deprecated.MakeOpAttributeByString(
            str(op_attribute))
        builder.StatelessCall(
            cfg_op_attribute,
            parallel_conf,
            bn_in_op2blob_object,
            boxing_util.BoxingTo,
        )

    oneflow_api.deprecated.LogicalRun(BuildInstruction)
    lbi = lbi_util.LogicalBlobId()
    lbi.set_op_name(op_attribute.op_conf.name)
    lbi.set_blob_name(op_attribute.op_conf.variable_conf.out)
    if not isinstance(lbi, lbi_util.LogicalBlobId):
        cfg_lbi = lbi_util.LogicalBlobId()
        cfg_lbi.set_op_name(lbi.op_name)
        cfg_lbi.set_blob_name(lbi.blob_name)
        lbi = cfg_lbi
    return oneflow_api.EagerConsistentBlob(
        lbi,
        blob_object=bn_in_op2blob_object["out"],
        blob_register=blob_register,
        job_name=job_name,
    )
Exemplo n.º 2
0
def CreateEagerVariableBlob(op_attribute, job_name=""):
    bn_in_op2blob_object = {}

    def BuildInstruction(builder):
        parallel_conf = (
            oneflow.current_scope().device_parallel_desc_symbol.parallel_conf)
        builder.StatelessCall(op_attribute,
                              parallel_conf,
                              bn_in_op2blob_object=bn_in_op2blob_object)

    vm_util.LogicalRun(BuildInstruction)
    lbi = lbi_util.LogicalBlobId()
    lbi.set_op_name(op_attribute.op_conf.name)
    lbi.set_blob_name(op_attribute.op_conf.variable_conf.out)
    if not isinstance(lbi, lbi_util.LogicalBlobId):
        cfg_lbi = lbi_util.LogicalBlobId()
        cfg_lbi.set_op_name(lbi.op_name)
        cfg_lbi.set_blob_name(lbi.blob_name)
        lbi = cfg_lbi
    return oneflow_api.EagerConsistentBlob(
        lbi,
        blob_object=bn_in_op2blob_object["out"],
        blob_register=blob_register,
        job_name=job_name,
    )
Exemplo n.º 3
0
def _CreateEagerInputBlobAndFeedValue(arg_blob_def, arg_ndarray):
    _CheckInputArgBlobDefValueMatch(arg_blob_def, arg_ndarray)
    arg_blob_object, lbi = _MakeInputBlobObject(arg_blob_def)
    FeedValueToEagerBlob(arg_blob_object, arg_blob_def, arg_ndarray)
    get_blob = None
    if not isinstance(lbi, lbi_util.LogicalBlobId):
        cfg_lbi = lbi_util.LogicalBlobId()
        cfg_lbi.set_op_name(lbi.op_name)
        cfg_lbi.set_blob_name(lbi.blob_name)
        lbi = cfg_lbi
    if isinstance(arg_blob_def, input_blob_def.FixedTensorDef):

        def get_blob(lbi, blob_object, blob_register):
            blob = oneflow_api.EagerConsistentBlob(lbi, blob_object,
                                                   blob_register)
            with oneflow.scope.consistent_view():
                return oneflow.identity(blob)

    elif isinstance(arg_blob_def, input_blob_def.MirroredTensorDef):
        get_blob = oneflow_api.EagerMirroredBlob
    else:
        raise NotImplementedError
    return get_blob(lbi,
                    blob_object=arg_blob_object,
                    blob_register=blob_register)
Exemplo n.º 4
0
def _LogicalSlice(
    input_blob_object: oneflow_api.BlobObject,
    start: Sequence[int],
    stop: Sequence[int],
    scope_symbol_id: int,
) -> np.ndarray:
    """
    Construct a logical_slice op and run it by oneflow eager,
    return the sliced result as a numpy ndarray
    """
    op_name = id_util.UniqueStr(OP_PREFIX)

    def AsyncSlice(Yield):
        def build(builder):
            op_conf = op_conf_pb.OperatorConf()
            # device_tag doesn't matter for logical_slice op
            device_tag = oneflow.current_scope(
            ).device_parallel_desc_symbol.device_tag
            op_conf.device_tag = device_tag
            op_conf.name = op_name
            op_conf.user_conf.op_type_name = "logical_slice"
            op_conf.user_conf.input["x"].s.append("{}/x_0".format(op_name))
            op_conf.user_conf.output["y"].s.append("{}/y_0".format(op_name))
            parallel_conf = input_blob_object.parallel_desc_symbol.parallel_conf
            op_conf.user_conf.attr["parallel_conf"].at_string = str(
                parallel_conf)
            op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
            op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
            op_conf.user_conf.attr["step"].at_list_int64.val[:] = [
                1
            ] * len(start)
            bn_in_op2blob_object = oneflow_api.deprecated.BnInOp2BlobObject()
            bn_in_op2blob_object["x_0"] = input_blob_object
            op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object,
                                               scope_symbol_id)
            cfg_op_attribute = oneflow_api.deprecated.MakeOpAttributeByString(
                str(op_attribute))
            builder.StatelessCall(
                cfg_op_attribute,
                parallel_conf,
                bn_in_op2blob_object,
                boxing_util.BoxingTo,
            )
            Yield(bn_in_op2blob_object["y_0"])

        oneflow_api.deprecated.LogicalRun(build)

    lbi = lbi_util.LogicalBlobId()
    lbi.set_op_name(op_name)
    lbi.set_blob_name(op_name)

    blob_object = async_util.Await(1, AsyncSlice)[0]

    blob = oneflow_api.EagerConsistentBlob(
        lbi,
        blob_object=blob_object,
        blob_register=blob_register,
        job_name=FAKE_JOB_NAME,
    )
    return blob.numpy()
Exemplo n.º 5
0
 def __init__(self,
              lbi,
              job_name="",
              distribute=oneflow_api.distribute.auto()):
     if not isinstance(lbi, lbi_util.LogicalBlobId):
         cfg_lbi = lbi_util.LogicalBlobId()
         cfg_lbi.set_op_name(lbi.op_name)
         cfg_lbi.set_blob_name(lbi.blob_name)
         lbi = cfg_lbi
     oneflow_api.LazyMirroredBlob.__init__(self, lbi, job_name, distribute)
 def build(builder):
     blob_object = GetEagerInterfaceBlob(op_name).blob_object
     lbi = lbi_util.LogicalBlobId()
     lbi.set_op_name(op_name)
     op_attribute = sess.OpAttribute4InterfaceOpName(op_name)
     assert len(op_attribute.output_bns) == 1
     lbi.set_blob_name(op_attribute.output_bns[0])
     if not isinstance(lbi, lbi_util.LogicalBlobId):
         cfg_lbi = lbi_util.LogicalBlobId()
         cfg_lbi.set_op_name(lbi.op_name)
         cfg_lbi.set_blob_name(lbi.blob_name)
         lbi = cfg_lbi
     if blob_object.op_arg_parallel_attr.is_mirrored():
         remote_blob = oneflow_api.EagerMirroredBlob(
             lbi, blob_object, blob_register, job_name)
     else:
         remote_blob = oneflow_api.EagerConsistentBlob(
             lbi, blob_object, blob_register, job_name)
     value = remote_blob.numpy()
     Yield(value)
Exemplo n.º 7
0
 def __init__(self,
              lbi,
              job_name=None,
              distribute=oneflow_api.distribute.auto()):
     cfg_lbi = lbi_util.LogicalBlobId()
     cfg_lbi.set_op_name(lbi.op_name)
     cfg_lbi.set_blob_name(lbi.blob_name)
     oneflow_api.BlobDesc.__init__(self, cfg_lbi, distribute)
     if job_name is None:
         job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
     self.job_name_ = job_name
     self.parallel_size_ = 0
Exemplo n.º 8
0
def _AddAndInferMirroredOp(mirrored_lbn, op_conf, sub_consistent_blob_list):
    compile_context.CurJobAddMirroredOp(op_conf)
    job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
    num_sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi(
        job_name, mirrored_lbn)
    for i in range(num_sub_lbi):
        sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi(
            job_name, mirrored_lbn, i)
        lbi = lbi_util.LogicalBlobId()
        lbi.set_op_name(sub_lbi.op_name)
        lbi.set_blob_name(sub_lbi.blob_name)
        sub_consistent_blob_list.append(
            oneflow_api.ConsistentBlob(lbi, "", oneflow_api.distribute.auto()))
Exemplo n.º 9
0
 def __init__(self, shape, dtype, batch_axis, name=None):
     lbi = lbi_util.LogicalBlobId()
     if name is None:
         name = id_util.UniqueStr("Input_")
     lbi.set_op_name(name)
     lbi.set_blob_name("out")
     oneflow_api.BlobDesc.__init__(self, lbi, oneflow_api.distribute.auto())
     assert type(shape) is tuple
     for dim in shape:
         assert type(dim) is int
         assert dim > 0
     self.shape_ = shape
     self.dtype_ = dtype
     self.batch_axis_ = batch_axis
        def Build(builder, Yield):
            blob_object = _GetInterfaceBlobObject(builder, op_name)
            lbi = lbi_util.LogicalBlobId()
            lbi.set_op_name(op_name)
            op_attribute = sess.OpAttribute4InterfaceOpName(op_name)
            assert len(op_attribute.output_bns) == 1
            lbi.set_blob_name(op_attribute.output_bns[0])
            if blob_object.op_arg_parallel_attr.is_mirrored():
                remote_blob = oneflow_api.EagerMirroredBlob(
                    lbi, blob_object, blob_register, job_name)
            else:
                remote_blob = oneflow_api.EagerConsistentBlob(
                    lbi, blob_object, blob_register, job_name)

            Yield(remote_blob)
Exemplo n.º 11
0
def _MakeEagerLogicalBlob(op_attribute, obn, blob_register):
    lbi = op_attribute.arg_signature.bn_in_op2lbi[obn]
    blob_object = blob_register.GetObject4BlobName(
        "%s/%s" % (lbi.op_name, lbi.blob_name)
    )
    mirrored_sig_map = op_attribute.mirrored_signature.bn_in_op2opt_mirrored_parallel
    if not isinstance(lbi, lbi_util.LogicalBlobId):
        cfg_lbi = lbi_util.LogicalBlobId()
        cfg_lbi.set_op_name(lbi.op_name)
        cfg_lbi.set_blob_name(lbi.blob_name)
        lbi = cfg_lbi
    if mirrored_sig_map[obn].HasField("mirrored_parallel"):
        return oneflow_api.EagerMirroredBlob(lbi, blob_object, default_blob_register)
    else:
        return oneflow_api.EagerConsistentBlob(lbi, blob_object, default_blob_register)
Exemplo n.º 12
0
 def __init__(
     self, shape, dtype, name=None, distribute=oneflow_api.distribute.auto(),
 ):
     lbi = lbi_util.LogicalBlobId()
     if name is None:
         name = id_util.UniqueStr("Input_")
     lbi.set_op_name(name)
     lbi.set_blob_name("out")
     self.lbi_ = lbi
     assert type(shape) is tuple
     for dim in shape:
         assert type(dim) is int
         assert dim > 0
     self.shape_ = shape
     self.dtype_ = dtype
     self.distribute_ = distribute
Exemplo n.º 13
0
 def __init__(
         self,
         lbi,
         blob_object=None,
         job_name="",
         distribute=oneflow_api.distribute.auto(),
 ):
     if not isinstance(lbi, lbi_util.LogicalBlobId):
         cfg_lbi = lbi_util.LogicalBlobId()
         cfg_lbi.set_op_name(lbi.op_name)
         cfg_lbi.set_blob_name(lbi.blob_name)
         lbi = cfg_lbi
     if job_name is None:
         job_name = ""
     oneflow_api.MirroredBlob.__init__(self, lbi, job_name, distribute)
     self._Init(blob_object)
Exemplo n.º 14
0
def LazyRemoteBlob(lbi, **kw):
    job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
    lbn = lbi.op_name + "/" + lbi.blob_name
    blob_type = oneflow_api.LazyConsistentBlob
    if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
        blob_type = oneflow_api.LazyMirroredBlob
    if not isinstance(lbi, lbi_util.LogicalBlobId):
        cfg_lbi = lbi_util.LogicalBlobId()
        cfg_lbi.set_op_name(lbi.op_name)
        cfg_lbi.set_blob_name(lbi.blob_name)
        lbi = cfg_lbi
    job_name = ""
    if ("job_name" in kw) and (kw["job_name"] is not None):
        job_name = kw["job_name"]
    distribute = oneflow_api.distribute.auto()
    if "distribute" in kw:
        distribute = kw["distribute"]
    return blob_type(lbi, job_name, distribute)
Exemplo n.º 15
0
 def __init__(
         self,
         lbi,
         blob_object=None,
         job_name="",
         distribute=oneflow_api.distribute.auto(),
 ):
     if not isinstance(lbi, lbi_util.LogicalBlobId):
         cfg_lbi = lbi_util.LogicalBlobId()
         cfg_lbi.set_op_name(lbi.op_name)
         cfg_lbi.set_blob_name(lbi.blob_name)
         lbi = cfg_lbi
     if job_name is None:
         job_name = ""
     logical_blob_name = lbi.op_name() + "/" + lbi.blob_name()
     EagerBlobTrait.__init__(self)
     oneflow_api.MirroredBlob.__init__(self, lbi, job_name, distribute)
     self._Init(logical_blob_name, blob_object, blob_register)
Exemplo n.º 16
0
def _Watch(op_attribute, parallel_conf, blob_register):
    lbi = op_attribute.arg_signature.bn_in_op2lbi["in"]
    uuid = op_attribute.op_conf.foreign_watch_conf.handler_uuid
    lbn = "%s/%s" % (lbi.op_name, lbi.blob_name)
    in_blob_object = blob_register.GetObject4BlobName(lbn)
    if not isinstance(lbi, lbi_util.LogicalBlobId):
        cfg_lbi = lbi_util.LogicalBlobId()
        cfg_lbi.set_op_name(lbi.op_name)
        cfg_lbi.set_blob_name(lbi.blob_name)
        lbi = cfg_lbi
    if in_blob_object.op_arg_parallel_attr.is_mirrored():
        blob = oneflow_api.EagerMirroredBlob(lbi, in_blob_object,
                                             default_blob_register)
    else:
        blob = oneflow_api.EagerConsistentBlob(lbi, in_blob_object,
                                               default_blob_register)
    uuid2watch_handler = session_ctx.GetDefaultSession().uuid2watch_handler
    assert uuid in uuid2watch_handler
    uuid2watch_handler[uuid](blob)
    del uuid2watch_handler[uuid]
Exemplo n.º 17
0
def EagerLogicalBlob(lbi, **kw):
    job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
    lbn = lbi.op_name + "/" + lbi.blob_name
    if not isinstance(lbi, lbi_util.LogicalBlobId):
        cfg_lbi = lbi_util.LogicalBlobId()
        cfg_lbi.set_op_name(lbi.op_name)
        cfg_lbi.set_blob_name(lbi.blob_name)
        lbi = cfg_lbi
    blob_type = oneflow_api.EagerConsistentBlob
    if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
        blob_type = oneflow_api.EagerMirroredBlob
    job_name = ""
    if ("job_name" in kw) and (kw["job_name"] is not None):
        job_name = kw["job_name"]
    blob_object = None
    if "blob_object" in kw:
        blob_object = kw["blob_object"]
    distribute = oneflow_api.distribute.auto()
    if "distribute" in kw:
        distribute = kw["distribute"]
    return blob_type(lbi, blob_object, blob_register, job_name, distribute)