示例#1
0
 def __init__(self, file, fps, resolution):
     self.file = bd.process_path(file)
     self.extension = f'.{file.split(".")[-1]}'
     self.writer = None
     self.context_count = 0
     self.fourcc = Video.LDR_VID_EXT[self.extension]['fourcc']
     self.fps = float(fps)
     self.resolution = resolution
     directory = os.path.dirname(file)
     bd.process_path(directory, create=True)
示例#2
0
    def __new__(cls, filename, fps=None, resolution=None):
        if not isinstance(filename, str):
            raise RuntimeError(
                f'Invalid filename, expected str type, got: {type(filename)}')
        filename = bd.process_path(filename)
        all_extensions = (list(Video.HDR_VID_EXT.keys()) +
                          list(Video.HDR_IMG_EXT.keys()) +
                          list(Video.LDR_IMG_EXT.keys()) +
                          list(Video.LDR_VID_EXT.keys()))
        extension = '.' + filename.split(".")[-1]
        if extension not in all_extensions:
            raise RuntimeError(f'Unsupported extension: {extension}')
        is_ldr_video_file = extension in Video.LDR_VID_EXT

        _id = (filename, extension)
        if _id in Video._VIDEOS:
            return Video._VIDEOS[_id]
        self = super().__new__(cls)
        Video._VIDEOS[_id] = self
        self.is_ldr_video_file = is_ldr_video_file
        self.extension = extension
        self.filename = filename
        self.dirname = os.path.dirname(filename)
        self.fps = fps
        self.resolution = resolution
        return self
示例#3
0
    def __init__(self,
                 name,
                 fields=None,
                 directory=".",
                 delimiter=',',
                 resume=True):
        if fields is None:
            fields = ['value']
        _check_valid_fields(fields)
        if not name.endswith('.csv'):
            name = name + '.csv'
        self.directory = bd.process_path(directory, True)
        self.file = os.path.join(self.directory, name)
        self.fields = fields
        self.logger = logging.getLogger(name)
        self.logger.setLevel(logging.INFO)
        self.logger.propagate = False
        # Write header
        if not resume or not os.path.exists(self.file):
            with open(self.file, 'w') as f:
                f.write(delimiter.join(fields) + '\n')

        file_handler = logging.FileHandler(self.file)
        # Adding underscore to avoid clashes with reserved words from logging
        field_tmpl = delimiter.join([f'{{_{x}}}' for x in fields])

        file_handler.setFormatter(logging.Formatter(field_tmpl, style='{'))
        self.logger.addHandler(file_handler)
示例#4
0
 def validate_paths(self):
     stgs = self.store['user_settings']
     paths = stgs['base_dirs']
     paths = list(set([boardom.process_path(path) for path in paths]))
     stgs['base_dirs'] = [
         path for path in paths if not is_subdir(path, paths)
     ]
示例#5
0
 def __init__(self, directory, db_map_size=10485760, readonly=False):
     self.readonly = readonly
     directory = bd.process_path(directory, create=True)
     self.db_dirname = os.path.join(directory, 'lmdb_data')
     self.env = lmdb.open(self.db_dirname,
                          subdir=True,
                          readonly=self.readonly)
示例#6
0
    def __init__(
        self,
        data_root_path,
        only_color=False,
        mode='training',
        preprocess=None,
        load_fn=None,
        need_labels=True,
    ):
        super().__init__()
        modes = {
            'training': 'train_standard',
            'validation': 'val',
            'testing': 'test',
        }
        assert mode in modes.keys()
        data_root_path = bd.process_path(data_root_path)
        flist_root = os.path.join(data_root_path, mode)
        txt_file_list = 'places365_{0}{1}.txt'.format(
            modes[mode], '_color' if only_color else '')
        txt_file_list = os.path.join(data_root_path, txt_file_list)

        with open(txt_file_list) as file:
            contents = file.readlines()

        self.need_labels = need_labels

        if mode == 'testing':
            flist = [x.strip().split(' ')[0] for x in contents]
            self.file_list = [os.path.join(flist_root, x) for x in flist]
            self.need_labels = False
        if mode == 'validation':
            flist, labels = zip(*[x.strip().split(' ') for x in contents])
            self.file_list = [os.path.join(flist_root, x) for x in flist]
            self.labels = [int(x) for x in labels]
        else:
            flist, labels = zip(*[x.strip().split(' ') for x in contents])
            self.file_list = [os.path.join(flist_root, x[1:]) for x in flist]
            self.labels = [int(x) for x in labels]

        class_name_list = os.path.join(data_root_path,
                                       'categories_places365.txt')
        with open(class_name_list) as file:
            contents = file.readlines()

        cls_ind = [x.strip().split(' ') for x in contents]
        cls_names, indices = zip(*cls_ind)
        cls_names = [x[3:] for x in cls_names]
        indices = [int(x) for x in indices]
        self.cls_to_ind = {c: l for c, l in zip(cls_names, indices)}
        self.ind_to_cls = {str(l): c for c, l in zip(cls_names, indices)}

        self.preprocess = preprocess
        self.load = bd.imread if load_fn is None else load_fn
示例#7
0
 def __init__(self):
     self.directory = bd.process_path(bd.main_file_path())
     self.lockfile = os.path.join(self.directory, '.bd.gitlock')
     if not os.path.isdir(self.directory):
         raise RuntimeError(f'[boardom] Invalid directory {self.directory}')
     self.is_git_dir = None
     self.status = None
     self.status_codes = None
     self.status_files = None
     self.current_branch = None
     self.has_branches = None
     self.has_unstaged = None
示例#8
0
    def setup(self, *cfg_files, extra=None, use_sysargv=True):
        if not self._prv['done_setup']:
            bd.log('Processing configuration')
            arglist = []
            #  if use_sysargv is bd.Null:
            #      use_sysargv = self._prv['is_core_config']
            if not isinstance(use_sysargv, bool):
                raise RuntimeError(
                    f'use_sysargv expected a bool value. Got {type(use_sysargv)}'
                )
            if use_sysargv:
                arglist += sys.argv[1:]
            if cfg_files:
                cfg_files = [bd.process_path(f) for f in cfg_files]
                arglist += cfg_files
            if extra is not None:
                arglist += extra

            self._update_data_from_parsed(self._parse(arglist))

            all_groups = self._prv['all_groups']
            if all_groups:
                bd.log(f'Groups defined: {self._prv["all_groups"]}')

            # Register automatic arguments
            self._prv['data']['time_configured'] = _create_datum(
                time.strftime("%Y/%m/%d %H:%M:%S")
            )
            self._prv['data']['process_id'] = _create_datum(_PROCESS_ID)
            self._prv['data']['session_path'] = _create_datum(None)

            # Leave this here, as the Logger functions called later on (in the subprocess)
            # and accesing cfg.project_path and cfg.session_name
            # depend on correctly identifying if _prv['done_setup'] is True or False
            self._prv['done_setup'] = True

            #  # If using logger, notify with session_id.  This is to change the ID
            #  # from the execution_id to the session_path
            #  if bd.BoardomLogger._started:
            #      # CFG needs to be sent first (lmdb requires session_path)
            #      bd.BoardomLogger()._send_cfg_full()
            #      bd.BoardomLogger()._start_lmdb()
            bd.log('Config done.')

        elif cfg_files:
            raise RuntimeError(
                'Could not setup from config files as bd.setup() was already called.'
            )

        return self
示例#9
0
 def setup_tensorboard_logger_from_cfg(
     self,
     values_key,
     step_key,
     mode='scalar',
     category='plots',
     fields=None,
     cfg=None,
     **kwargs,
 ):
     cfg = _prepare_cfg(cfg, ['session_path'])
     directory = bd.process_path(
         os.path.join(cfg.session_path, 'tensorboard'))
     return self.setup_tensorboard_logger(values_key, step_key, mode,
                                          category, fields, directory,
                                          **kwargs)
示例#10
0
 def setup_csv_logger_from_cfg(self,
                               state_key,
                               fields,
                               cfg=None,
                               delimiter=',',
                               resume=True):
     cfg = _prepare_cfg(cfg, ['session_path'])
     directory = bd.process_path(os.path.join(cfg.session_path, 'csv'),
                                 create=True)
     return self.setup_csv_logger(
         state_key,
         fields=fields,
         directory=directory,
         delimiter=delimiter,
         resume=resume,
     )
示例#11
0
def load_dng(filename, **kwargs):
    """Loads a dng image file from disk into a float32 Numpy Array (OpenCV view).

    Requires rawpy.

    Args:
        filename (str): Name of pfm image file.
        **kwargs: Extra keyword arguments to pass to `rawpy.postprocess()`.
    """
    import rawpy

    filename = bd.process_path(filename)
    with rawpy.imread(filename) as raw:
        default_kwargs = dict(gamma=(1, 1), no_auto_bright=True, output_bps=16)
        default_kwargs.update(kwargs)
        img = raw.postprocess(**default_kwargs)
    return bd.rgb2bgr(img, -1).astype('float32')
示例#12
0
 def _parse_cfg_file(self, config_file, subfile=False):
     config_file = bd.process_path(config_file)
     if not os.path.isfile(config_file):
         raise FileNotFoundError(config_file)
     with open(config_file) as f:
         for count, line in enumerate(f):
             yield from self._process_line(line.strip('\n'), count + 1,
                                           config_file)
     # Avoid checking opened if it's a subfile
     if subfile:
         return
     for element_type, (_, _, current_list) in self.elements_store.items():
         if current_list:
             elts = ', '.join(
                 [f'\'{x}\'' for elm in current_list for x in elm])
             raise SyntaxError(
                 f'Following {element_type}(s) were not closed\n\t{elts}'
                 f'\nin file: {config_file}')
示例#13
0
def imread(filename):
    """Reads an image file from disk into a Numpy Array (OpenCV view).

    Args:
        filename (str): Name of pfm image file.
    """
    filename = bd.process_path(filename)
    ext = os.path.splitext(filename)[1]
    if ext.lower() == '.pfm':
        return load_pfm(filename)
    elif ext.lower() == '.dng':
        return load_dng(filename)
    else:
        loaded = cv2.imread(filename,
                            flags=cv2.IMREAD_ANYDEPTH + cv2.IMREAD_COLOR)
        if loaded is None:
            raise IOError('Could not read {0}'.format(filename))
        else:
            return loaded
示例#14
0
    def setup_tensorboard_logger(
        self,
        values_key,
        step_key,
        mode='scalar',
        category='plots',
        fields=None,
        directory='.',
        **kwargs,
    ):
        if 'tensorboard' not in self:
            directory = bd.process_path(directory, create=True)
            writer = SummaryWriter(directory, **kwargs)
            self.tensorboard = writer
        elif not isinstance(self.tensorboard, SummaryWriter):
            raise RuntimeError(
                f'self.tensorboard is not a SummaryWriter: {type(self.tensorboard)}'
            )
        else:
            writer = self.tensorboard

        category = category.rstrip('/') + '/'

        if mode in ['scalar', 'scalars']:
            get_value_fn = _value_getter(self, values_key, fields)

            def log_fn(values, tag=''):
                tag = f'_{tag}' if tag != '' else ''
                new_cat = category.rstrip('/') + tag.rstrip('/') + '/'
                if step_key not in self:
                    return
                step = self[step_key]
                for key, val in values.items():
                    writer.add_scalar(f'{new_cat}{values_key}.{key}',
                                      val,
                                      global_step=step)

        else:
            raise RuntimeError(
                f'Tensorboard logger mode "{mode}" not supported.')

        return GenericLogger(get_value_fn, log_fn, values_key, fields)
示例#15
0
def load_pfm(filename):
    """Loads a pfm image file from disk into a Numpy Array (OpenCV view).

    Supports HDR and LDR image formats.

    Args:
        filename (str): Name of pfm image file.
    """
    filename = bd.process_path(filename)
    with open(filename, "r", encoding="ISO-8859-1") as file:
        nc = 3 if file.readline().rstrip() == "PF" else 1
        width, height = [int(x) for x in file.readline().rstrip().split()]
        shape = (height, width, nc)
        img = np.fromfile(
            file,
            '{0}{1}'.format(
                "<" if float(file.readline().rstrip()) < 0 else ">", 'f'),
        )
        img = np.reshape(img, shape)
        return np.flip(np.flip(img, 2), 0).copy()
示例#16
0
    def __init__(self,
                 data_root_path,
                 data_extensions,
                 load_fn,
                 preprocess=None):
        super().__init__()
        data_root_path = bd.process_path(data_root_path)
        self.file_list = []
        for root, _, fnames in sorted(os.walk(data_root_path)):
            for fname in fnames:
                if any(fname.lower().endswith(extension)
                       for extension in data_extensions):
                    self.file_list.append(os.path.join(root, fname))
        if len(self.file_list) == 0:
            msg = 'Could not find any files with extensions:\n[{0}]\nin\n{1}'
            raise RuntimeError(
                msg.format(', '.join(data_extensions), data_root_path))

        self.preprocess = preprocess
        self.load_fn = load_fn
示例#17
0
    def setup_csv_logger(self, state_key, fields, directory='.', **kwargs):
        directory = bd.process_path(directory, create=True)
        if 'csv_writers' not in self:
            self.csv_writers = {}
        csv_writers = self.csv_writers

        def get_writer(tag):
            tag = f'_{tag}' if tag != '' else ''
            desc = f'{state_key.replace(".","_")}{tag}'
            writer = csv_writers.get(desc, None)
            if writer is None:
                writer = bd.CSVLogger(desc,
                                      fields=fields,
                                      directory=directory,
                                      **kwargs)
                csv_writers[desc] = writer
            return writer

        get_value_fn = _value_getter(self, state_key, fields)

        def log_fn(values, tag=''):
            get_writer(tag)(values)

        return GenericLogger(get_value_fn, log_fn, state_key, fields)
示例#18
0
def _create_session(cfg, session_name=None):
    cfg.setup()
    if not cfg._prv['has_core_config']:
        raise RuntimeError('Can not create_session without core configuration')
    session = Session()

    # Configure session name
    if isinstance(session_name, str):
        _set(cfg, 'session_name', session_name)
    elif isinstance(session_name, Callable):
        session_name = session_name(cfg)
        _set(cfg, 'session_name', session_name)
    elif session_name is None:
        session_name = _get(cfg, 'session_name')
    else:
        raise RuntimeError(
            f'Unknown type for session_name parameter: {type(session_name)}')
    bd.log(f'Creating {session_name} session.')

    project_path = bd.process_path(_get(cfg, 'project_path'), create=True)
    bd.log(f'Project path: {project_path}.')

    session_path = os.path.join(project_path, session_name)
    bd.make_dir(session_path)

    boardom_path = bd.make_dir(os.path.join(session_path, '.boardom'))
    session_file = os.path.join(boardom_path, BD_FILENAME)
    # TODO: Improve Management of Session Files
    #     -- Maybe use a single file?
    #     -- Maybe add information
    if not os.path.exists(session_file):
        with open(session_file, 'w') as f:
            f.write('42')

    # Maybe create log
    create_log = _get(cfg, 'log_stdout')
    if create_log:
        log_name = f'{session_name}_{_PROCESS_ID}.log'
        logdir = os.path.join(session_path, 'log')
        logdir = bd.process_path(logdir, create=True)
        logfile = os.path.join(logdir, log_name)
        logfile = bd.number_file_if_exists(logfile)
        bd.log(f'Creating log file at {logfile}')
        session.stream_replicator = bd.replicate_std_stream(logfile, 'stdout')

    # Maybe copy config files
    cfg_files = cfg._prv['cfg_files']
    copy_config_files = _get(cfg, 'copy_config_files')
    if copy_config_files:
        for i, filename in enumerate(cfg_files):
            config_path = os.path.join(session_path, 'cfg')
            bd.make_dir(config_path)
            if i == 0:
                bd.log(f'Copying configuration files to {config_path}')
            fname, ext = os.path.splitext(filename)
            copied_config_filename = f'{fname}_{_PROCESS_ID}{ext}'
            bd.copy_file_to_dir(
                filename,
                config_path,
                number=True,
                new_name=copied_config_filename,
            )

    # Maybe save full config
    save_full_config = _get(cfg, 'save_full_config')
    if save_full_config:
        config_path = os.path.join(session_path, 'cfg')
        bd.make_dir(config_path)
        config_file = os.path.join(config_path, f'full_cfg_{_PROCESS_ID}.bd')
        config_file = bd.number_file_if_exists(config_file)
        bd.log(f'Saving full configuration at: {config_file}')

        # Makes an entry for the saved settings file
        def _make_entry(key, val):
            if any(isinstance(val, x) for x in [list, tuple]):
                val = ' '.join([str(x) for x in val])
            return f'{key} {str(val)}'

        args_to_print = [
            _make_entry(key, val) for key, val in cfg.__dict__.items()
        ]
        args_to_print.sort()
        bd.write_string_to_file('\n'.join(args_to_print), config_file)

    autocommit = _get(cfg, 'autocommit')
    only_run_same_hash = _get(cfg, 'only_run_same_hash')
    _, _, autohash = maybe_autocommit(autocommit, only_run_same_hash,
                                      session_path)
    pid_fname = f'process.{_PROCESS_ID}'
    if autohash is not None:
        pid_fname += f'.{autohash}'

    #  process_dir = bd.make_dir(os.path.join(boardom_path, 'processes'))
    #  process_id_file = os.path.join(process_dir, pid_fname)
    #
    #  if os.path.exists(process_id_file):
    #      raise RuntimeError(
    #          'Process File Already Exists?!? That is unlucky. Please run again..'
    #          f'\n id: {process_id_file}'
    #      )
    #  else:
    #      with open(process_id_file, 'w') as f:
    #          f.write('42')
    if _get(cfg, 'print_cfg'):
        bd.write('-' * 80)
        bd.write(cfg)
        bd.write('-' * 80)

    cfg._prv['data']['session_path'] = _create_datum(session_path)
    return session
示例#19
0
 def __init__(self, filename, extension):
     self.directory = bd.process_path(filename, create=True)
     self.fname = os.path.basename(filename)[:-len(extension)]
     self.extension = extension
     self.current_frame = 1
示例#20
0
def _is_valid_config_file(x):
    ret = any([x.lower().endswith(y) for y in _CONFIG_EXTENSIONS])
    return os.path.isfile(bd.process_path(x)) and ret
示例#21
0
 def _add_path(self, path):
     path = bd.process_path(path)
     self.engine.addpath(self.engine.genpath(path))
示例#22
0
    def _parse(self, arglist):
        # First parse all the input files and also get extra arguments
        # The data from the config files goes into self._prv['data']
        cfg_file_parser = ConfigFileParser()
        extra_argv = []
        for x in arglist:
            if _is_valid_config_file(x):
                fname = bd.process_path(x)
                self._prv['cfg_files'].append(fname)
                for data in cfg_file_parser.parse_cfg_file(fname):
                    self._add_cfg_file_line_data(data)
            else:
                extra_argv.append(x)
        self._prv['all_groups'] = cfg_file_parser.all_groups

        # Check that all arguments provided in the config files are registered for the parser
        # If not, give meaningful errors.
        # This is automatically handled by the parser, but we do this here to easily track
        # the config file error
        all_argnames = {
            x['flag'][2:]: i for i, x in enumerate(self._prv['argparse_arguments'])
        }
        for arg_name in self._prv['data']:
            if arg_name not in all_argnames:
                for arg_data in self._prv['data'][arg_name].values():
                    meta = arg_data['meta']
                    file, line, count = meta['file'], meta['line'], meta['count']
                    raise RuntimeError(
                        f'Could not find registered argument for \'{arg_name}\' '
                        f'provided in \'{file}\', line {count}:\n\t{line}'
                    )

        # Create parser and all arguments
        parser = argparse.ArgumentParser(allow_abbrev=False, conflict_handler='resolve')
        for arg in self._prv['argparse_arguments']:
            base_flag_name = arg['flag']
            arg_name = arg['flag'][2:]
            help_str = arg['help'] if 'help' in arg else None
            if arg_name.startswith('_'):
                raise ValueError(f'Argument \'{arg_name}\' starts with "_".')

            parser.add_argument(
                base_flag_name,
                help=help_str,
                **{k: v for k, v in arg.items() if k not in ['flag', 'help']},
            )

            # Also add all grouped versions of the argument, defined in the config files
            if arg_name in self._prv['data']:
                for group in self._prv['data'][arg_name]:
                    # Ignore default group (already added)
                    if group.is_default:
                        continue
                    flag_name = group.build_full_argname(base_flag_name)
                    if help_str and not group.is_default:
                        help_str += f' ({group})'
                    kwargs = {k: v for k, v in arg.items() if k not in ['flag', 'help']}
                    parser.add_argument(flag_name, help=help_str, **kwargs)

        # Generate argv for parser
        arg_list = []
        for arg_name, data in self._prv['data'].items():
            for group, values in data.items():
                name = group.build_full_argname(arg_name)
                for x in [f'--{name}'] + values['value']:
                    arg_list.append(x)

        return parser.parse_args(arg_list + extra_argv)
示例#23
0
def visualize_parameters(
    net,
    modules=None,
    match_names=None,
    param_names=None,
    tag='',
    save_path='.',
    histogram=True,
    bins=100,
    rename_fn=None,
):
    """Visualizes a network's parameters on an image grid or histogram.

    Args:
        net (nn.Module): The network whose parameters are to be visualized.
        modules (list or tuple, optional): List of class definitions for the
            modules where the hook is attached e.g. nn.Conv2d  (default None).
        match_names (list or tuple, optional): List of strings. If any modules
            contain one of the strings then the hook is attached (default None).
        param_names (list or tuple, optional): List of strings. If any
            parameters of the module contain one of the strings then they are
            visualized (default None).
        tag (str, optional): String tag to attach to saved images (default None).
        save_path (str, optional): Path to save visualisation results 
            (default '.').
        histogram (bool, optional): If True then the visualization is a
            histrogram, otherwise it's an image grid.
        bins (bool, optional): Number of bins for histogram, if `histogram` is
            True (default 100).

    Note:
        * If modules or match_names are not provided then no parameters will be
          visualized.
        * If param_names are not provided then no parameters will be visualized.
    """
    save_path = bd.process_path(save_path, True)
    modules = process_none(modules)
    match_names = process_none(match_names)
    rename_fn = rename_fn or bd.identity
    for module_name, mod in net.named_modules():
        name_match = any(
            [torch.typename(modules).find(x) >= 0 for x in match_names])
        instance_match = any([isinstance(mod, x) for x in modules])
        if instance_match or name_match:
            params = {x: _get_tensor(getattr(mod, x)) for x in param_names}
            for tensor_name, data in params.items():
                title = '{0}-{1}-{2}'.format(tag, rename_fn(module_name),
                                             tensor_name)
                if data is None:
                    continue
                if histogram:
                    img = bd.torch2cv(data)
                    df = pd.DataFrame(img.reshape(img.size))
                    fig, ax = plt.subplots()
                    df.hist(bins=bins, ax=ax)
                    fig.savefig(
                        os.path.join(save_path, '{0}.png'.format(title)))
                    plt.close(fig)
                else:
                    if data.dim() > 1:
                        img = bd.torch2cv(bd.make_grid(data))
                        to_save = (bd.map_range(img) * 255).astype(int)
                        cv2.imwrite(
                            os.path.join(save_path, '{0}.png'.format(title)),
                            to_save,
                        )
示例#24
0
 def get_logging_dir_from_cfg(self, cfg=None):
     cfg = _prepare_cfg(cfg, ['session_path'])
     return bd.process_path(os.path.join(cfg.session_path, 'csv'),
                            create=True)
示例#25
0
def _hook_generator(
    do_input=False,
    do_output=True,
    tag='',
    save_path='.',
    replace=True,
    histogram=True,
    bins=100,
    mode='forward',
    param_names=None,
    rename_fn=None,
    ext='.jpg',
):
    save_path = bd.process_path(save_path, True)
    tensor_names = (['in', 'out'] if mode in ['forward', 'parameters'] else
                    ['grad_in', 'grad_out'])

    def get_hook(module_name):
        counter = 1

        def hook(module, inp=None, out=None):
            nonlocal counter, tensor_names
            rename_module = rename_fn or bd.identity
            added_tag = tag if tag == '' else tag + '_'
            if mode == 'parameters':
                tensors = {
                    x: _get_tensor(getattr(module, x))
                    for x in param_names
                }
            else:
                tensor_coll = [
                    (tensor_names[0], inp, do_input),
                    (tensor_names[1], out, do_output),
                ]
                tensors = {}
                for name, x, to_do in tensor_coll:
                    if not to_do:
                        continue
                    for d in _get_tensor_dict(x, name):
                        tensors.update(d)
            for tensor_name, data in tensors.items():
                if data is None:
                    continue
                title_end = '' if replace else '-{0:06d}'.format(counter)
                title_end = title_end + '-hist' if histogram else title_end
                title = f'{added_tag}{rename_module(module_name)}_{tensor_name}{title_end}'
                if histogram:
                    img = bd.torch2cv(data)
                    df = pd.DataFrame(img.reshape(img.size))
                    fig, ax = plt.subplots()
                    df.hist(bins=bins, ax=ax)
                    fig.savefig(os.path.join(save_path, f'{title}{ext}'))
                    plt.close(fig)
                else:
                    if data.dim() > 1:
                        img = bd.torch2cv(bd.make_grid(data))
                        to_save = bd.map_range(img, 0, 255).astype(int)
                        cv2.imwrite(
                            os.path.join(save_path,
                                         f'{title}{ext}'.format(title)),
                            to_save,
                        )
            counter = counter + 1

        return hook

    return get_hook