Exemplo n.º 1
0
def _lazy_init():
    global _initialized, _cudart, _original_pid, _queued_calls
    if _initialized:
        return
    if _in_bad_fork:
        from sys import version_info
        if version_info < (3, 4):
            msg = ("To use CUDA with multiprocessing, you must use Python "
                   "3.4+ and the 'spawn' start method")
        else:
            msg = ("To use CUDA with multiprocessing, you must use the "
                   "'spawn' start method")
        raise RuntimeError("Cannot re-initialize CUDA in forked subprocess. " +
                           msg)
    _check_driver()
    torch._C._cuda_init()
    _cudart = _load_cudart()
    _cudart.cudaGetErrorName.restype = ctypes.c_char_p
    _cudart.cudaGetErrorString.restype = ctypes.c_char_p
    _original_pid = os.getpid()
    _initialized = True
    # Important to do this after _initialized, since some queued calls
    # may themselves call _lazy_init()
    for queued_call, orig_traceback in _queued_calls:
        try:
            queued_call()
        except Exception as e:
            msg = (
                "CUDA call failed lazily at initialization with error: {}\n\n"
                "CUDA call was originally invoked at:\n\n{}").format(
                    str(e), orig_traceback)
            raise_from(DeferredCudaCallError(msg), e)
Exemplo n.º 2
0
                def __call__(cls, *args, **kwargs):
                    # NOTE: this is called whenever an instance of this class is created
                    # The super call below will call __new__ and __init__, and we will
                    # patch things later.
                    try:
                        obj = super(CompiledModuleMeta,
                                    cls).__call__(*args, **kwargs)
                    except TypeError as e:
                        # If this fails here, the user probably didn't use this as a class decorator
                        if "super" in str(e):
                            raise_from(
                                TypeError(
                                    "torch.jit.compile must be used as a class decorator; "
                                    "using it on an already defined class is not valid."
                                    "\n\nOriginal error: {}".format(str(e))),
                                e)
                        else:
                            raise

                    compiled_fn = torch._C.CompiledFunction(
                        nderivs, optimize, obj.forward, arg.__name__)
                    compiled_fn.enabled = enabled
                    obj.compiled_fn = compiled_fn
                    obj.forward = lambda *args: compiled_fn(
                        args, list(obj.parameters()))
                    obj.has_trace_for = lambda *args: compiled_fn.has_trace_for(
                        args, list(obj.parameters()))
                    return obj
Exemplo n.º 3
0
def _lazy_init():
    global _initialized, _cudart, _original_pid, _queued_calls
    if _initialized:
        return
    if _in_bad_fork:
        from sys import version_info
        if version_info < (3, 4):
            msg = ("To use CUDA with multiprocessing, you must use Python "
                   "3.4+ and the 'spawn' start method")
        else:
            msg = ("To use CUDA with multiprocessing, you must use the "
                   "'spawn' start method")
        raise RuntimeError(
            "Cannot re-initialize CUDA in forked subprocess. " + msg)
    _check_driver()
    torch._C._cuda_init()
    _cudart = _load_cudart()
    _cudart.cudaGetErrorName.restype = ctypes.c_char_p
    _cudart.cudaGetErrorString.restype = ctypes.c_char_p
    _original_pid = os.getpid()
    _initialized = True
    # Important to do this after _initialized, since some queued calls
    # may themselves call _lazy_init()
    for queued_call, orig_traceback in _queued_calls:
        try:
            queued_call()
        except Exception as e:
            msg = ("CUDA call failed lazily at initialization with error: {}\n\n"
                   "CUDA call was originally invoked at:\n\n{}").format(str(e), orig_traceback)
            raise_from(DeferredCudaCallError(msg), e)
Exemplo n.º 4
0
 def exception_catcher(self, batch):
     try:
         yield
     except Exception as e:
         wrapper = EngineException(
             epoch=self._epochs,
             iteration=self._iterations,
             batch=self.batch_id_fn(batch) if self.batch_id_fn else batch,
             cause=e,
         )
         raise_from(wrapper, e)
Exemplo n.º 5
0
 def _parse(self, value, function, fmt):
     """
     Parse a string into a value, and format a nice ValueError if it fails.
     Returns `function(value)`.
     Any `ValueError` raised is catched and a new `ValueError` is raised
     with message `fmt.format(e)`, where `e` is the caught `ValueError`.
     """
     try:
         return function(value)
     except ValueError as e:
         raise_from(ValueError(fmt.format(e)), None)
Exemplo n.º 6
0
    def _read_annotations(self, csv_reader, classes):
        result = {}
        for line, row in enumerate(csv_reader):
            line += 1

            try:
                img_file, x1, y1, x2, y2, class_name = row[:6]
            except ValueError:
                raise_from(
                    ValueError(
                        'line {}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''
                        .format(line)), None)

            if img_file not in result:
                result[img_file] = []

            # If a row contains only an image path, it's an image without annotations.
            if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
                continue

            x1 = self._parse(x1, int,
                             'line {}: malformed x1: {{}}'.format(line))
            y1 = self._parse(y1, int,
                             'line {}: malformed y1: {{}}'.format(line))
            x2 = self._parse(x2, int,
                             'line {}: malformed x2: {{}}'.format(line))
            y2 = self._parse(y2, int,
                             'line {}: malformed y2: {{}}'.format(line))

            # Check that the bounding box is valid.
            if x2 <= x1:
                raise ValueError(
                    'line {}: x2 ({}) must be higher than x1 ({})'.format(
                        line, x2, x1))
            if y2 <= y1:
                raise ValueError(
                    'line {}: y2 ({}) must be higher than y1 ({})'.format(
                        line, y2, y1))

            # check if the current class name is correctly present
            if class_name not in classes:
                raise ValueError(
                    'line {}: unknown class name: \'{}\' (classes: {})'.format(
                        line, class_name, classes))

            result[img_file].append({
                'x1': x1,
                'x2': x2,
                'y1': y1,
                'y2': y2,
                'class': class_name
            })
        return result
Exemplo n.º 7
0
def _lazy_init():
    global _initialized, _queued_calls
    if is_initialized() or hasattr(_tls, 'is_initializing'):
        return
    with _initialization_lock:
        # We be double-checked locking, boys!  This is OK because
        # the above test was GIL protected anyway.  The inner test
        # is for when a thread blocked on some other thread which was
        # doing the initialization; when they get the lock, they will
        # find there is nothing left to do.
        if is_initialized():
            return
        # It is important to prevent other threads from entering _lazy_init
        # immediately, while we are still guaranteed to have the GIL, because some
        # of the C calls we make below will release the GIL
        if _is_in_bad_fork():
            from sys import version_info
            if version_info < (3, 4):
                msg = ("To use CUDA with multiprocessing, you must use Python "
                       "3.4+ and the 'spawn' start method")
            else:
                msg = ("To use CUDA with multiprocessing, you must use the "
                       "'spawn' start method")
            raise RuntimeError(
                "Cannot re-initialize CUDA in forked subprocess. " + msg)
        if not hasattr(torch._C, '_cuda_getDeviceCount'):
            raise AssertionError("Torch not compiled with CUDA enabled")
        if _cudart is None:
            raise AssertionError(
                "libcudart functions unavailable. It looks like you have a broken build?"
            )
        # This function throws if there's a driver initialization error, no GPUs
        # are found or any other error occurs
        torch._C._cuda_init()
        # Some of the queued calls may reentrantly call _lazy_init();
        # we need to just return without initializing in that case.
        # However, we must not let any *other* threads in!
        _tls.is_initializing = True
        try:
            for queued_call, orig_traceback in _queued_calls:
                try:
                    queued_call()
                except Exception as e:
                    msg = (
                        "CUDA call failed lazily at initialization with error: {}\n\n"
                        "CUDA call was originally invoked at:\n\n{}").format(
                            str(e), orig_traceback)
                    raise_from(DeferredCudaCallError(msg), e)
        finally:
            delattr(_tls, 'is_initializing')
        _initialized = True
Exemplo n.º 8
0
def _lazy_init():
    global _initialized, _cudart, _original_pid, _queued_calls
    if _initialized or hasattr(_tls, 'is_initializing'):
        return
    with _initialization_lock:
        # We be double-checked locking, boys!  This is OK because
        # the above test was GIL protected anyway.  The inner test
        # is for when a thread blocked on some other thread which was
        # doing the initialization; when they get the lock, they will
        # find there is nothing left to do.
        if _initialized:
            return
        # It is important to prevent other threads from entering _lazy_init
        # immediately, while we are still guaranteed to have the GIL, because some
        # of the C calls we make below will release the GIL
        if _in_bad_fork:
            from sys import version_info
            if version_info < (3, 4):
                msg = ("To use CUDA with multiprocessing, you must use Python "
                       "3.4+ and the 'spawn' start method")
            else:
                msg = ("To use CUDA with multiprocessing, you must use the "
                       "'spawn' start method")
            raise RuntimeError(
                "Cannot re-initialize CUDA in forked subprocess. " + msg)
        _check_driver()
        torch._C._cuda_init()
        _cudart = _load_cudart()
        _cudart.cudaGetErrorName.restype = ctypes.c_char_p
        _cudart.cudaGetErrorString.restype = ctypes.c_char_p
        _original_pid = os.getpid()
        # Some of the queued calls may reentrantly call _lazy_init();
        # we need to just return without initializing in that case.
        # However, we must not let any *other* threads in!
        _tls.is_initializing = True
        try:
            for queued_call, orig_traceback in _queued_calls:
                try:
                    queued_call()
                except Exception as e:
                    msg = (
                        "CUDA call failed lazily at initialization with error: {}\n\n"
                        "CUDA call was originally invoked at:\n\n{}").format(
                            str(e), orig_traceback)
                    raise_from(DeferredCudaCallError(msg), e)
        finally:
            delattr(_tls, 'is_initializing')
        _initialized = True
Exemplo n.º 9
0
 def __new_init(self, *args, **kwargs):
     try:
         # __old_init is assumed to handle super call
         self.__old_init(*args, **kwargs)
     except TypeError as e:
         # If this fails here, the user probably didn't use this as a class
         # decorator
         if "super" in str(e):
             raise_from(TypeError("torch.jit.compile must be used as a class decorator; "
                                  "using it on an already defined class is not valid."
                                  "\n\nOriginal error: {}".format(str(e))), e)
         else:
             raise
     model_name = self.__model_name if self.__model_name else type(self).__name__
     self.__name = "jit_{}_{}".format(model_name, _CompiledMixin.__next_id)
     _CompiledMixin.__next_id += 1
     self.__ktrace_cache = {}
     self.__next_ktrace_id = 0
Exemplo n.º 10
0
 def __init__(self, *args, **kwargs):
     torch._C.CompiledFunction.__init__(self,
                                        nderivs, optimize, enabled,
                                        self.forward,
                                        arg.__name__)
     try:
         old_init(self, *args, **kwargs)
     except TypeError as e:
         # If this fails here, the user probably didn't use this as a class decorator
         if "super" in str(e):
             raise_from(TypeError("torch.jit.compile must be used as a class decorator; "
                                  "using it on an already defined class is not valid."
                                  "\n\nOriginal error: {}".format(str(e))), e)
         else:
             raise
     # NOTE: This can't be done in CompiledFunction constructor,
     # because self.parameters() isn't well defined by then
     # (Module constructor hasn't run yet).
     self.set_captured_vars(list(self.parameters()))
Exemplo n.º 11
0
 def __init__(self, *args, **kwargs):
     torch._C.CompiledFunction.__init__(self,
                                        nderivs, optimize, enabled,
                                        self.forward,
                                        arg.__name__)
     try:
         old_init(self, *args, **kwargs)
     except TypeError as e:
         # If this fails here, the user probably didn't use this as a class decorator
         if "super" in str(e):
             raise_from(TypeError("torch.jit.compile must be used as a class decorator; "
                                  "using it on an already defined class is not valid."
                                  "\n\nOriginal error: {}".format(str(e))), e)
         else:
             raise
     # NOTE: This can't be done in CompiledFunction constructor,
     # because self.parameters() isn't well defined by then
     # (Module constructor hasn't run yet).
     self.set_captured_vars(list(self.parameters()))
Exemplo n.º 12
0
                def __call__(cls, *args, **kwargs):
                    # NOTE: this is called whenever an instance of this class is created
                    # The super call below will call __new__ and __init__, and we will
                    # patch things later.
                    try:
                        obj = super(CompiledModuleMeta, cls).__call__(*args, **kwargs)
                    except TypeError as e:
                        # If this fails here, the user probably didn't use this as a class decorator
                        if "super" in str(e):
                            raise_from(TypeError("torch.jit.compile must be used as a class decorator; "
                                                 "using it on an already defined class is not valid."
                                                 "\n\nOriginal error: {}".format(str(e))), e)
                        else:
                            raise

                    compiled_fn = torch._C.CompiledFunction(nderivs, optimize,
                                                            obj.forward,
                                                            arg.__name__)
                    compiled_fn.enabled = enabled
                    obj.compiled_fn = compiled_fn
                    obj.forward = lambda *args: compiled_fn(args, list(obj.parameters()))
                    obj.has_trace_for = lambda *args: compiled_fn.has_trace_for(args, list(obj.parameters()))
                    return obj
Exemplo n.º 13
0
def _lazy_init():
    global _initialized, _original_pid, _queued_calls
    if _initialized or hasattr(_tls, 'is_initializing'):
        return
    with _initialization_lock:
        # We be double-checked locking, boys!  This is OK because
        # the above test was GIL protected anyway.  The inner test
        # is for when a thread blocked on some other thread which was
        # doing the initialization; when they get the lock, they will
        # find there is nothing left to do.
        if _initialized:
            return
        # It is important to prevent other threads from entering _lazy_init
        # immediately, while we are still guaranteed to have the GIL, because some
        # of the C calls we make below will release the GIL
        if _in_bad_fork:
            raise RuntimeError(
                "Cannot re-initialize HammerBlade in forked subprocess.")
        # TODO: enable this
        # _check_driver()
        torch._C._hammerblade_init()
        _original_pid = os.getpid()
        # Some of the queued calls may reentrantly call _lazy_init();
        # we need to just return without initializing in that case.
        # However, we must not let any *other* threads in!
        _tls.is_initializing = True
        try:
            for queued_call, orig_traceback in _queued_calls:
                try:
                    queued_call()
                except Exception as e:
                    msg = ("HammerBlade call failed lazily at initialization with error: {}\n\n"
                           "HammerBlade call was originally invoked at:\n\n{}").format(str(e), orig_traceback)
                    raise_from(DeferredHammerBladeCallError(msg), e)
        finally:
            delattr(_tls, 'is_initializing')
        _initialized = True