Example #1
0
    def show_leaks(self, output: str = 'counts', fail: bool = True):
        """Show all resources/memory leaks in the current facade.  First, this
        deallocates the facade, then prints any lingering objects using
        :class:`~zensols.persist.Deallocatable`.

        **Important**: :obj:`allocation_tracking` must be set to ``True`` for
        this to work.

        :param output: one of ``stack``, ``counts``, or ``tensors``

        :param fail: if ``True``, raise an exception if there are any
                     unallocated references found

        """
        if self._facade is None:
            raise DeepLearnError('No facade created yet')
        if self.allocation_tracking:
            self._facade.deallocate()
            if output == 'counts':
                Deallocatable._print_undeallocated(only_counts=True, fail=fail)
            elif output == 'stack':
                Deallocatable._print_undeallocated(include_stack=True, fail=fail)
            elif output == 'tensors':
                TorchConfig.write_in_memory_tensors()
            else:
                raise DeepLearnError(f'Unknown output type: {output}')
            self._facade = None
Example #2
0
 def deallocate(self):
     with time('deallocated attribute', logging.DEBUG):
         if self.state == 'd' or self.state == 't':
             attrs = self.attributes
             for arr in tuple(attrs.values()):
                 del arr
             attrs.clear()
             del attrs
         self._decoded_state.deallocate()
     if hasattr(self, 'batch_stash'):
         del self.batch_stash
     if hasattr(self, 'data_point_ids'):
         del self.data_point_ids
     if hasattr(self, '_data_points'):
         Deallocatable._try_deallocate(self._data_points)
         del self._data_points
     with time('deallocated feature context', logging.DEBUG):
         if hasattr(self, '_feature_context_inst') and \
            self._feature_context_inst is not None:
             for ctx in self._feature_context_inst.values():
                 self._try_deallocate(ctx)
             self._feature_context_inst.clear()
             del self._feature_context_inst
     self.state = 'k'
     super().deallocate()
     logger.debug(f'deallocated batch: {self.id}')
Example #3
0
def find_leaks():
    #logging.getLogger('zensols.persist.annotation.Deallocatable').setLevel(logging.DEBUG)
    Deallocatable.ALLOCATION_TRACKING = True
    #Deallocatable.PRINT_TRACE = True
    fac = create_facade()
    executor = fac.executor

    if 0:
        from zensols.config import ClassExplorer
        from zensols.persist import Stash
        ce = ClassExplorer({Stash})
        ce.write(fac.executor.dataset_stash)

    executor.train()
    executor.test()
    fac.deallocate()
    Deallocatable._print_undeallocated(True)
Example #4
0
 def __post_init__(self, decoded_attributes):
     super().__post_init__()
     Deallocatable.__init__(self)
     # TODO: this class conflates key split and delegate stash functionality
     # in the `split_stash_container`.  An instance of this type serves the
     # purpose, but it need not be.  Instead it just needs to be both a
     # SplitKeyContainer and a Stash.  This probably should be split out in
     # to two different fields.
     cont = self.split_stash_container
     if not isinstance(cont, SplitStashContainer) \
        and (not isinstance(cont, SplitKeyContainer) or
             not isinstance(cont, Stash)):
         raise DeepLearnError('Expecting SplitStashContainer but got ' +
                              f'{self.split_stash_container.__class__}')
     self.data_point_id_sets_path.parent.mkdir(parents=True, exist_ok=True)
     self._batch_data_point_sets = PersistedWork(
         self.data_point_id_sets_path, self)
     self.priming = False
     self.decoded_attributes = decoded_attributes
     self._update_comp_stash_attribs()
Example #5
0
    def cleanup(self, include_cuda: bool = True, quiet: bool = False):
        """Report memory leaks, run the Python garbage collector and optionally empty
        the CUDA cache.

        :param include_cuda: if ``True`` clear the GPU cache

        :param quiet: do not report unallocated objects, regardless of the
                      setting of :obj:`allocation_tracking`

        """
        if self.allocation_tracking and not quiet:
            include_stack, only_counts = False, False
            if self.allocation_tracking == 'stack':
                include_stack, only_counts = True, False
            elif self.allocation_tracking == 'counts':
                include_stack, only_counts = False, True
            include_stack = (self.allocation_tracking == 'stack')
            Deallocatable._print_undeallocated(include_stack, only_counts)
        self.deallocate()
        Deallocatable._deallocate_all()
        gc.collect()
        if include_cuda:
            # free up memory in the GPU
            TorchConfig.empty_cache()
Example #6
0
 def test_facade(self):
     Deallocatable.ALLOCATION_TRACKING = True
     facade = ModelFacade(self.config, progress_bar=False)
     facade.writer = None
     facade.train()
     res = facade.test()
     self.validate_results(res)
     facade.deallocate()
     path = Path('target/iris/model')
     facade = ModelFacade.load_from_path(path, progress_bar=False)
     facade.writer = None
     res = facade.test()
     self.validate_results(res)
     facade.deallocate()
     #Deallocatable._print_undeallocated(True)
     self.assertEqual(0, Deallocatable._num_deallocations())
     Deallocatable.ALLOCATION_TRACKING = False
Example #7
0
 def __post_init__(self):
     super().__post_init__()
     Deallocatable.__init__(self)
     if logger.isEnabledFor(logging.DEBUG):
         logger.debug(f'split stash post init: {self.dataframe_path}')
     self._dataframe = PersistedWork(self.dataframe_path, self, mkdir=True)
Example #8
0
def end():
    print('deallocations:')
    Deallocatable._print_undeallocated(True)
Example #9
0
def end_dealloc():
    print('deallocations:', Deallocatable._num_deallocations())
    Deallocatable._print_undeallocated(True)
Example #10
0
 def deallocate(self):
     super().deallocate()
     Deallocatable._try_deallocate(self.decoder)
     self.recur.deallocate()
     self.recur_settings.deallocate()