示例#1
0
def fs_sharing():
    prev_strategy = mp.get_sharing_strategy()
    mp.set_sharing_strategy('file_system')
    try:
        yield
    finally:
        mp.set_sharing_strategy(prev_strategy)
示例#2
0
def fs_sharing():
    prev_strategy = mp.get_sharing_strategy()
    mp.set_sharing_strategy('file_system')
    try:
        yield
    finally:
        mp.set_sharing_strategy(prev_strategy)
示例#3
0
def fs_sharing():
    prev_strategy = multiprocessing.get_sharing_strategy()
    multiprocessing.set_sharing_strategy('file_system')
    try:
        yield
    finally:
        multiprocessing.set_sharing_strategy(prev_strategy)
示例#4
0
 def has_shm_files(self, wait=True):
     if not HAS_SHM_FILES:
         return False
     result = self._has_shm_files()
     if result and mp.get_sharing_strategy() == 'file_system' and wait:
         time.sleep(0.5)
         return self._has_shm_files()
     return result
示例#5
0
 def has_shm_files(self, wait=True):
     if not HAS_SHM_FILES:
         return False
     result = self._has_shm_files()
     if result and mp.get_sharing_strategy() == 'file_system' and wait:
         time.sleep(0.5)
         return self._has_shm_files()
     return result
示例#6
0
 def _new_shared(cls, size):
     """Creates a new storage in shared memory with the same data type"""
     from torch.multiprocessing import get_sharing_strategy
     if cls.is_cuda:
         return cls(size)
     elif get_sharing_strategy() == 'file_system':
         return cls._new_using_filename(size)
     else:
         return cls._new_using_fd(size)
示例#7
0
 def _new_shared(cls, size):
     """Creates a new storage in shared memory with the same data type"""
     from torch.multiprocessing import get_sharing_strategy
     if cls.is_cuda:
         return cls(size)
     elif get_sharing_strategy() == 'file_system':
         return cls._new_using_filename(size)
     else:
         return cls._new_using_fd(size)
示例#8
0
 def _new_shared(cls, size, *, device='cpu'):
     """Creates a new storage in shared memory with the same data type"""
     from torch.multiprocessing import get_sharing_strategy
     device = torch.device(device)
     if device.type == 'cuda':
         return cls(size, device=device)
     elif get_sharing_strategy() == 'file_system':
         return cls._new_using_filename_cpu(size)
     else:
         return cls._new_using_fd_cpu(size)
示例#9
0
    def share_memory_(self):
        """Moves the storage to shared memory.

        This is a no-op for storages already in shared memory and for CUDA
        storages, which do not need to be moved for sharing across processes.
        Storages in shared memory cannot be resized.

        Returns: self
        """
        from torch.multiprocessing import get_sharing_strategy
        if self.is_cuda:
            pass  # CUDA doesn't use POSIX shared memory
        elif get_sharing_strategy() == 'file_system':
            self._share_filename_cpu_()
        else:
            self._share_fd_cpu_()
        return self
示例#10
0
    def share_memory_(self):
        """Moves the storage to shared memory.

        This is a no-op for storages already in shared memory and for CUDA
        storages, which do not need to be moved for sharing across processes.
        Storages in shared memory cannot be resized.

        Returns: self
        """
        from torch.multiprocessing import get_sharing_strategy
        if self.is_cuda:
            pass  # CUDA doesn't use POSIX shared memory
        elif get_sharing_strategy() == 'file_system':
            self._share_filename_()
        else:
            self._share_fd_()
        return self
示例#11
0
    def init(cls: Type, spawn_multiproc: str = 'spawn',
             seed_kwargs: Dict[str, Any] = {}):
        """Initialize the PyTorch framework.  This includes:

          * Configuration of PyTorch multiprocessing so subprocesses can access
            the GPU, and

          * Setting the random seed state.

        The needs to be initialized at the very beginning of your program.

        Example::

            def main():
                from zensols.deeplearn import TorchConfig
                TorchConfig.init()

        **Note**: this method is separate from :meth:`set_random_seed` because
        that method is called by the framework to reset the seed after a model
        is unpickled.

        :see: :mod:`torch.multiprocessing`

        :see: :meth:`set_random_seed`

        """
        if cls._RANDOM_SEED is None:
            cls.set_random_seed(**seed_kwargs)
            try:
                cur = mp.get_sharing_strategy()
                if logger.isEnabledFor(logging.INFO):
                    logger.info('invoking pool with torch spawn ' +
                                f'method: {spawn_multiproc}, current: {cur}')
                if spawn_multiproc:
                    mp.set_start_method('spawn')
                else:
                    mp.set_start_method('forkserver', force=True)
            except RuntimeError as e:
                msg = str(e)
                if msg != 'context has already been set':
                    logger.warning(f'could not invoke spawn on pool: {e}')