Exemple #1
0
def get_args_copy(args, kwargs):
    copy_args = []
    for arg in args:
        if flow.is_tensor(arg):
            copy_arg = arg.clone().detach()
        else:
            copy_arg = copy.deepcopy(arg)
        copy_args.append(copy_arg)
    copy_kwargs = {}
    for key, value in kwargs.items():
        if flow.is_tensor(value):
            copy_kwargs[key] = value.clone().detach()
        else:
            copy_kwargs[key] = copy.deepcopy(value)
    return copy_args, copy_kwargs
Exemple #2
0
def get_fake_program_more_detail(oneflow, mode, func, args=None, kwargs=None):
    print(f"\033[1;33m============= {mode} ================\033[1;33m")
    print(f"\033[1;33mEnter {func} function\033[1;33m")
    try:
        if "__self__" in dir(oneflow) and flow.is_tensor(oneflow.__self__):
            print(f"\033[1;33m{oneflow.__self__}\033[1;33m")
    except:
        if flow.is_tensor(oneflow):
            print(f"\033[1;33m{oneflow}\033[1;33m")
    if args is not None:
        print(f"\033[1;33m{args}\033[1;33m")
    if kwargs is not None:
        print(f"\033[1;33m{kwargs}\033[1;33m")
    print_note_fake_program()
    print(f"\033[1;33mLeave {func} function\033[1;33m")
    print(f"\033[1;37m\033[1;37m")
    print("\n\n")
Exemple #3
0
def as_tensor(data, dtype=None, device=None):
    if flow.is_tensor(data):
        if dtype is None:
            dtype = data.dtype
        if device is None:
            device = data.device
        if data.dtype is dtype and data.device is device:
            return data
        else:
            data = data.to(dtype=dtype, device=device)
    elif isinstance(data, (np.ndarray)):
        if dtype is None:
            if (device is None) or (device.type == "cpu"):
                data = flow.from_numpy(data)
            else:
                data = flow.tensor(data, device=device)
        else:
            if data.dtype in numpy_dtype_to_oneflow_dtype_dict:
                data_infer_flow_type = numpy_dtype_to_oneflow_dtype_dict[
                    data.dtype]
            else:
                raise TypeError(
                    "numpy-ndarray holds elements of unsupported datatype")
            if data_infer_flow_type is dtype:
                if (device is None) or (device.type == "cpu"):
                    data = flow.from_numpy(data)
                else:
                    data = flow.tensor(data, dtype=dtype, device=device)
            else:
                if (device is None) or (device.type == "cpu"):
                    data = flow.tensor(data, dtype=dtype)
                else:
                    data = flow.tensor(data, dtype=dtype, device=device)
    else:
        # handle tuple, list, scalar
        data = np.array(data)
        # not shared memory in this case
        data = flow.tensor(data)
        if device is not None:
            data = data.to(device)
        if dtype is not None:
            data = data.to(dtype)
    return data
Exemple #4
0
def as_tensor(data, dtype=None, device=None):
    if flow.is_tensor(data):
        if dtype is None:
            dtype = data.dtype
        if device is None:
            device = data.device
        if data.dtype is dtype and data.device is device:
            return data
        else:
            data = data.to(dtype=dtype, device=device)
    elif isinstance(data, (np.ndarray)):
        if dtype is None:
            if (device is None) or (device.type == "cpu"):
                data = flow.from_numpy(data)
            else:
                data = flow.tensor(data, device=device)
        else:
            data_infer_flow_type = flow.framework.dtype.convert_numpy_dtype_to_oneflow_dtype(
                data.dtype
            )
            if data_infer_flow_type is dtype:
                if (device is None) or (device.type == "cpu"):
                    data = flow.from_numpy(data)
                else:
                    data = flow.tensor(data, dtype=dtype, device=device)
            else:
                if (device is None) or (device.type == "cpu"):
                    data = flow.tensor(data, dtype=dtype)
                else:
                    data = flow.tensor(data, dtype=dtype, device=device)
    else:
        # not shared memory in this case
        data = flow.tensor(data)
        if device is not None:
            data = data.to(device)
        if dtype is not None:
            data = data.to(dtype)
    return data
Exemple #5
0
                    def dual_method(self, *args, **kwargs):
                        pytorch_method = getattr(pytorch, method_name)
                        oneflow_method = getattr(oneflow, method_name)
                        (
                            pytorch_args,
                            pytorch_kwargs,
                            oneflow_args,
                            oneflow_kwargs,
                        ) = get_args(pytorch_method, *args, **kwargs)
                        try:
                            pytorch_res = pytorch_method(
                                *pytorch_args, **pytorch_kwargs)
                            if isinstance(pytorch_res, torch_original.Tensor):
                                call_tensor_id.append(id(pytorch_res))
                        except Exception as e:
                            if align_exception:
                                try:
                                    oneflow_res = oneflow_method(
                                        *oneflow_args, **oneflow_kwargs)
                                except Exception as ee:
                                    raise BothDoNotSupportError(e,
                                                                ee) from None
                                print(
                                    "PyTorch has an error but OneFlow is ok, maybe you should check your implementation to align with PyTorch."
                                )
                            raise PyTorchDoesNotSupportError(e)

                        if testing_graph:
                            tensor_graph_args = []
                            for arg in oneflow_args:
                                if flow.is_tensor(arg):
                                    copy_arg = arg.clone().detach()
                                else:
                                    copy_arg = copy.deepcopy(arg)
                                tensor_graph_args.append(copy_arg)
                            tensor_graph_kwargs = {}
                            for key, value in oneflow_kwargs.items():
                                if flow.is_tensor(value):
                                    tensor_graph_kwargs[key] = value.clone(
                                    ).detach()
                                else:
                                    tensor_graph_kwargs[key] = copy.deepcopy(
                                        value)

                        oneflow_res = oneflow_method(*oneflow_args,
                                                     **oneflow_kwargs)
                        if testing_graph:

                            class TestGraphOfTensorMethod(flow.nn.Graph):
                                def __init__(self):
                                    super().__init__()

                                def build(self):
                                    return oneflow_method(
                                        *tensor_graph_args,
                                        **tensor_graph_kwargs)

                            try:
                                test_g = TestGraphOfTensorMethod()
                                if verbose:
                                    print("Run graph of method: ",
                                          repr(oneflow))
                                    test_g.debug(3)
                                test_g_res = test_g()
                            except Exception as e:
                                print_note_fake_program()
                                raise OneFlowGraphBuildOrRunError(e)
                            if isinstance(test_g_res, tuple):
                                for _, g_res in enumerate(test_g_res):
                                    check_eager_graph_tensor(
                                        oneflow_res, g_res)
                            else:
                                check_eager_graph_tensor(
                                    oneflow_res, test_g_res)
                        return GetDualObject("unused", pytorch_res,
                                             oneflow_res)
Exemple #6
0
                    def dual_method(self, *args, **kwargs):
                        (
                            pytorch_args,
                            pytorch_kwargs,
                            oneflow_args,
                            oneflow_kwargs,
                        ) = get_args(pytorch, *args, **kwargs)

                        try:
                            pytorch_res = pytorch(*pytorch_args,
                                                  **pytorch_kwargs)

                            if isinstance(pytorch_res, torch_original.Tensor):
                                if (hasattr(pytorch, "__name__")
                                        and pytorch.__name__ == "to" and
                                    ((len(pytorch_args) > 0
                                      and pytorch_args[0] == "cpu") or
                                     (len(pytorch_kwargs) > 0
                                      and pytorch_kwargs["device"] == "cpu"))):
                                    extra_input_tensor.add(pytorch_res)
                                elif (len(pytorch_args) > 0 and isinstance(
                                        pytorch_args[0], torch_original.Tensor)
                                      and id(
                                          pytorch_args[0]) == id(pytorch_res)):
                                    extra_input_tensor.add(pytorch_res)
                                else:
                                    call_tensor_id.append(id(pytorch_res))

                        except Exception as e:
                            if align_exception:
                                try:
                                    oneflow_res = oneflow(
                                        *oneflow_args, **oneflow_kwargs)
                                except Exception as ee:
                                    raise BothDoNotSupportError(e,
                                                                ee) from None
                                print(
                                    "PyTorch has an error but OneFlow is ok, maybe you should check your implementation to align with PyTorch."
                                )
                            raise PyTorchDoesNotSupportError(e)

                        if name in postulate:
                            oneflow_res = torch_tensor_to_flow(pytorch_res)
                        else:
                            if testing_graph:
                                graph_args = []
                                for arg in oneflow_args:
                                    if flow.is_tensor(arg):
                                        copy_arg = arg.clone().detach()
                                    else:
                                        copy_arg = copy.deepcopy(arg)
                                    graph_args.append(copy_arg)
                                graph_kwargs = {}
                                for key, value in oneflow_kwargs.items():
                                    if flow.is_tensor(value):
                                        graph_kwargs[key] = value.clone(
                                        ).detach()
                                    else:
                                        graph_kwargs[key] = copy.deepcopy(
                                            value)

                            if isinstance(oneflow,
                                          flow.nn.Module) and testing_graph:
                                graph_train_oneflow = copy.deepcopy(oneflow)
                                if not is_consistent():
                                    arg_device_type = "cpu"
                                    for arg in oneflow_args:
                                        if flow.is_tensor(arg):
                                            arg_device_type = arg.device.type
                                    graph_train_oneflow = graph_train_oneflow.to(
                                        arg_device_type)

                            oneflow_res = oneflow(*oneflow_args,
                                                  **oneflow_kwargs)
                            if testing_graph:
                                find_check_module_func = True
                                ignore_apis_list = ["tensor", "train"]
                                test_g_res = []
                                if isinstance(oneflow, flow.nn.Module):
                                    of_sgd = flow.optim.SGD(
                                        graph_train_oneflow.parameters(),
                                        lr=0.001,
                                        momentum=0.9,
                                    )
                                    graph_train_parameters_len = 0
                                    for param in oneflow._parameters.values():
                                        if param is not None:
                                            graph_train_parameters_len += 1

                                    class TestGraphOfModule(flow.nn.Graph):
                                        def __init__(self):
                                            super().__init__()
                                            self.test_module = graph_train_oneflow
                                            if (global_backward and
                                                    graph_train_parameters_len
                                                ):
                                                self.add_optimizer(of_sgd)

                                        def build(self, *args):
                                            res = self.test_module(*args)
                                            forward_res = res
                                            if (global_backward and
                                                    graph_train_parameters_len
                                                ):
                                                res = res.sum()
                                                res.backward()
                                            return forward_res

                                    test_g = TestGraphOfModule()
                                    if verbose:
                                        print("Run graph of module: ",
                                              repr(oneflow))
                                        test_g.debug(3)
                                    # When testing module methods, kwargs are not considered.
                                    test_g_res = test_g(*graph_args)
                                elif oneflow.__name__ in ignore_apis_list:
                                    find_check_module_func = False
                                # 1. "oneflow.nn.modules" not in oneflow.__module__: For avoid run nn.Module branch graph test, like fold op call Fold Module actually.
                                # 2. inspect.isfunction(oneflow): Compared with the ordinary flow.xxx, oneflow.nn.modules.math_ops series op exist an extra layer of python wrapper.
                                # 3. inspect.ismethod(oneflow) and "oneflow.nn.modules" in oneflow.__module__:  For op that only has Tensor.xxx method, and call oneflow.xxx actually, like masked_fill.
                                elif (("oneflow.nn.modules"
                                       not in oneflow.__module__)
                                      or inspect.isfunction(oneflow)
                                      or (inspect.ismethod(oneflow)
                                          and "oneflow.nn.modules"
                                          in oneflow.__module__)):

                                    class TestGraphOfFunctional(flow.nn.Graph):
                                        def __init__(self):
                                            super().__init__()

                                        def build(self):
                                            return oneflow(
                                                *graph_args, **graph_kwargs)

                                    try:
                                        # When the tensor on the cpu executes to to the cpu in nn.Graph, a check error will be reported.
                                        if (oneflow.__name__ == "to"
                                                or oneflow.__name__ == "_to"):
                                            if isinstance(
                                                    oneflow_res, flow.Tensor):
                                                if (oneflow_args and
                                                        oneflow_res.device.type
                                                        == oneflow_args[0]
                                                    ) or (
                                                        oneflow_kwargs and
                                                        oneflow_res.device.type
                                                        == oneflow_kwargs[
                                                            "device"]):
                                                    test_g_res = oneflow_res
                                            else:
                                                pass
                                            if verbose:
                                                print(
                                                    "Run graph of function: ",
                                                    repr(oneflow),
                                                    ", graph check is intentionally skiped.",
                                                )
                                        elif oneflow.__name__ == "Parameter":
                                            # nn.Graph donot deal with Parameter creation.
                                            test_g_res = oneflow_res
                                            if verbose:
                                                print(
                                                    "Run graph of function: ",
                                                    repr(oneflow),
                                                    ", graph check is intentionally skiped.",
                                                )
                                        else:
                                            test_g = TestGraphOfFunctional()
                                            if verbose:
                                                print(
                                                    "Run graph of function: ",
                                                    repr(oneflow),
                                                )
                                                test_g.debug(3)
                                            test_g_res = test_g()
                                    except Exception as e:
                                        print_note_fake_program()
                                        raise OneFlowGraphBuildOrRunError(e)
                                if find_check_module_func:
                                    if isinstance(test_g_res, tuple):
                                        for _, g_res in enumerate(test_g_res):
                                            check_eager_graph_tensor(
                                                oneflow_res, g_res)
                                    else:
                                        check_eager_graph_tensor(
                                            oneflow_res, test_g_res)

                        return GetDualObject("unused", pytorch_res,
                                             oneflow_res)
Exemple #7
0
def oneflow_eager_run_with_graph_check(oneflow, oneflow_args, oneflow_kwargs,
                                       testing_graph, verbose, *args):
    if testing_graph:
        graph_args, graph_kwargs = get_args_copy(oneflow_args, oneflow_kwargs)

        if isinstance(oneflow, flow.nn.Module):
            graph_train_oneflow = copy.deepcopy(oneflow)
            if not is_global():
                arg_device_type = "cpu"
                for arg in oneflow_args:
                    if flow.is_tensor(arg):
                        arg_device_type = arg.device.type
                graph_train_oneflow = graph_train_oneflow.to(arg_device_type)

        else:
            graph_functional_oneflow = copy.deepcopy(oneflow)

    oneflow_res = get_oneflow_eager_res(oneflow, oneflow_args, oneflow_kwargs,
                                        verbose)
    if testing_graph:
        if verbose:
            print(
                "After running eager module or functional: ",
                repr(oneflow),
            )
        find_check_module_func = True
        ignore_apis_list = ["tensor", "train"]
        test_g_res = []
        if isinstance(oneflow, flow.nn.Module):
            test_g = get_module_graph_test(graph_train_oneflow, oneflow, *args)
            if verbose:
                print("Run graph of module: ", repr(oneflow))
                test_g.debug(2)
            # When testing module methods, kwargs are not considered.
            test_g_res = test_g(*graph_args)
            if verbose:
                print(
                    "The result after running graph module: ",
                    test_g_res,
                )
        elif oneflow.__name__ in ignore_apis_list:
            find_check_module_func = False
        # 1. "oneflow.nn.modules" not in oneflow.__module__: For avoid run nn.Module branch graph test, like fold op call Fold Module actually.
        # 2. inspect.isfunction(oneflow): Compared with the ordinary flow.xxx, oneflow.nn.modules.math_ops series op exist an extra layer of python wrapper.
        # 3. inspect.ismethod(oneflow) and "oneflow.nn.modules" in oneflow.__module__:  For op that only has Tensor.xxx method, and call oneflow.xxx actually, like masked_fill.
        elif ((oneflow.__module__ is not None and
               ("oneflow.nn.modules" not in oneflow.__module__))
              or inspect.isfunction(oneflow)
              or (inspect.ismethod(oneflow)
                  and "oneflow.nn.modules" in oneflow.__module__)):

            test_g_res = get_functional_graph_res(
                graph_functional_oneflow,
                oneflow,
                oneflow_res,
                oneflow_args,
                oneflow_kwargs,
                verbose,
                *graph_args,
                **graph_kwargs,
            )
        if find_check_module_func:
            if isinstance(test_g_res, tuple):
                for _, g_res in enumerate(test_g_res):
                    check_eager_graph_tensor(oneflow_res, g_res)
            else:
                check_eager_graph_tensor(oneflow_res, test_g_res)
    return oneflow_res