Пример #1
0
    def plot_convergence(self, task_ind, sub_task_ind=0, measures=None,
                         fig_size=None, gridspec_kw=None):
        """
        Plot measure values for saved iterates.

        This shows the convergence behavior with respect to the measures.

        Parameters
        ----------
        task_ind : int
            Index of the task.
        sub_task_ind : int, optional
            Index of the sub-task (default ``0``).
        measures : [list of ] :class:`.Measure`, optional
            Measures to apply. Each measure is plotted in a subplot.
            If `None` is passed, all measures in ``result['measure_values']``
            are used.

        Returns
        -------
        ax : :class:`np.ndarray` of :class:`matplotlib.axes.Axes`
            The axes the measure values were plotted in.
        """
        row = self.results.loc[task_ind, sub_task_ind]
        iterates_measure_values = row['misc'].get('iterates_measure_values')
        if not iterates_measure_values:
            iterates = row['misc'].get('iterates')
            if not iterates:
                raise ValueError(
                    "no 'iterates_measure_values' or 'iterates' in results "
                    "of task {}{}".format(
                        task_ind, '.{}'.format(sub_task_ind) if
                        len(self.results.loc[task_ind]) > 1 else ''))
        if measures is None:
            measures = row['measure_values'].keys()
        elif isinstance(measures, Measure):
            measures = [measures]
        fig, ax = plt.subplots(len(measures), 1, gridspec_kw=gridspec_kw)
        if not isinstance(ax, np.ndarray):
            ax = np.array([ax])
        if fig_size is not None:
            fig.set_size_inches(fig_size)
        fig.suptitle('convergence of {}'.format(row['reconstructor'].name))
        for measure, ax_ in zip(measures, ax.flat):
            if isinstance(measure, str):
                measure = Measure.get_by_short_name(measure)
            if iterates_measure_values:
                errors = np.mean([iters_mvs[measure.short_name] for iters_mvs
                                  in iterates_measure_values], axis=0)
            else:
                ground_truth = row['test_data'].ground_truth
                errors = np.mean([[measure.apply(x, g) for x in iters] for
                                  iters, g in zip(iterates, ground_truth)],
                                 axis=0)
            ax_.plot(errors)
            ax_.set_title(measure.short_name)
        return ax
Пример #2
0
    def apply_measures(self, measures, task_ind=None):
        """Apply (additional) measures to reconstructions.

        This is not possible if the reconstructions were not saved, in which
        case a :class:`ValueError` is raised.

        Parameters
        ----------
        measures : list of :class:`.Measure`
            Measures to apply.
        task_ind : int or sequence of ints, optional
            Indexes of tasks to which the measures shall be applied.
            If `None`, this is interpreted as "all results".

        Raises
        ------
        ValueError
            If reconstructions are missing or `task_ind` is not valid.
        """
        if task_ind is None:
            indexes = self.results.index.levels[0]
        elif np.isscalar(task_ind):
            indexes = [task_ind]
        elif isinstance(task_ind, list):
            indexes = task_ind
        else:
            raise ValueError('`task_ind` must be a scalar, a list of ints or '
                             '`None`')
        for i in indexes:
            rows = self.results.loc[i]
            for j in range(len(rows)):
                row = rows.loc[j]
                if row['reconstructions'] is None:
                    raise ValueError(
                        'reconstructions missing in task {}{}'.format(
                            i, '.{}'.format(j) if len(rows) > 1 else ''))
                for measure in measures:
                    if isinstance(measure, str):
                        measure = Measure.get_by_short_name(measure)
                    row['measure_values'][measure.short_name] = [
                        measure.apply(r, g)
                        for r, g in zip(row['reconstructions'],
                                        row['test_data'].ground_truth)
                    ]
Пример #3
0
 def save_if_best_reconstructor(
         measure_values, iterations=None):
     measure = save_best_reconstructor.get(
         'measure', measures[0])
     if isinstance(measure, str):
         measure = Measure.get_by_short_name(
             measure)
     loss_sign = (1 if measure.measure_type
                  == 'distance' else -1)
     cur_loss = (loss_sign * np.mean(
         measure_values[measure.short_name]))
     if cur_loss < best_loss:
         if iterations is not None:
             reconstructor.hyper_params[
                 'iterations'] = iterations
         reconstructor.save_params(
             save_best_reconstructor['path'])
         return cur_loss
     return best_loss
Пример #4
0
    def plot_performance(self,
                         measure,
                         reconstructors=None,
                         test_data=None,
                         weighted_average=False,
                         **kwargs):
        """
        Plot average measure values for different reconstructors.
        The values have to be computed previously, e.g. by
        :meth:`apply_measures`.

        The average is computed over all rows of :attr:`results` with the
        specified `test_data` that store the requested `measure` value.

        Note that for tasks with multiple sub-tasks, all of them are used when
        computing the average (i.e., the measure values for all hyper parameter
        choices are averaged).

        Parameters
        ----------
        measure : :class:`.Measure` or str
            The measure to plot (or its :attr:`~.Measure.short_name`).
        reconstructors : sequence of :class:`.Reconstructor`, optional
            The reconstructors to compare. If `None` (default), all
            reconstructors that are found in the results are compared.
        test_data : [sequence of ] :class:`.DataPairs`, optional
            Test data to take into account for computing the mean value.
            By default, all test data is used.
        weighted_average : bool, optional
            Whether to weight the rows according to the number of pairs in
            their test data.
            Default: ``False``, i.e. all rows are weighted equally.
            If ``True``, all test data pairs are weighted equally.

        Returns
        -------
        ax : `matplotlib.axes.Axes`
            The axes the performance was plotted in.
        """
        if not isinstance(measure, Measure):
            measure = Measure.get_by_short_name(measure)
        if reconstructors is None:
            reconstructors = self.results['reconstructor'].unique()
        if isinstance(test_data, DataPairs):
            test_data = [test_data]
        mask = [
            measure.short_name in row['measure_values'].keys()
            and row['reconstructor'] in reconstructors
            and (test_data is None or row['test_data'] in test_data)
            for _, row in self.results.iterrows()
        ]
        rows = self.results[mask]
        v = []
        for reconstructor in reconstructors:
            r_rows = rows[rows['reconstructor'] == reconstructor]
            values = [
                mvs[measure.short_name] for mvs in r_rows['measure_values']
            ]
            weights = None
            if weighted_average:
                weights = [
                    len(test_data.observations)
                    for test_data in r_rows['test_data']
                ]
            v.append(np.average(values, weights=weights))
        fig, ax = plt.subplots(**kwargs)
        ax.bar(range(len(v)), v)
        ax.set_xticks(range(len(v)))
        ax.set_xticklabels([r.name for r in reconstructors], rotation=30)
        ax.set_title('{measure_name}'.format(measure_name=measure.name))
        return ax
Пример #5
0
    def run(self,
            save_reconstructions=True,
            reuse_iterates=True,
            show_progress='text'):
        """Run all tasks and return the results.

        The returned :class:`ResultTable` object is also stored as
        :attr:`results`.

        Parameters
        ----------
        save_reconstructions : bool, optional
            Whether the reconstructions should be saved in the results.
            The default is ``True``.

            If measures shall be applied after this method returns, it must be
            ``True``.

            If ``False``, no iterates (intermediate reconstructions) will be
            saved, even if ``task['options']['save_iterates']==True``.

        reuse_iterates : bool, optional
            Whether to reuse iterates from other sub-tasks if possible.
            The default is ``True``.

            If there are sub-tasks whose hyper parameter choices differ only
            in the number of iterations of an :class:`IterativeReconstructor`,
            only the sub-task with the maximum number of iterations is run and
            the results for the other ones determined by storing iterates if
            this option is ``True``.

            Note 1: If enabled, the callbacks assigned to the reconstructor
            will be run only for the above specified sub-tasks with the maximum
            number of iterations.

            Note 2: If the reconstructor is non-deterministic, this option can
            affect the results as the same realization is used for multiple
            sub-tasks.

        show_progress : str, optional
            Whether and how to show progress. Options are:

                ``'text'`` (default)
                    print a line before running each task
                ``'tqdm'``
                    show a progress bar with ``tqdm``
                `None`
                    do not show progress

        Returns
        -------
        results : :class:`ResultTable`
            The results.
        """
        row_list = []
        with std_out_err_redirect_tqdm(None if show_progress ==
                                       'tqdm' else sys.stdout) as orig_stdout:
            for i, task in enumerate(
                    tqdm(self.tasks,
                         desc='task',
                         file=orig_stdout,
                         disable=(show_progress != 'tqdm'))):
                if show_progress == 'text':
                    print('running task {i}/{num_tasks} ...'.format(
                        i=i, num_tasks=len(self.tasks)))
                test_data = task['test_data']
                reconstructor = task['reconstructor']
                if test_data.ground_truth is None and task['measures']:
                    raise ValueError('missing ground truth, cannot apply '
                                     'measures')
                measures = [(measure if isinstance(measure, Measure) else
                             Measure.get_by_short_name(measure))
                            for measure in task['measures']]
                options = task['options']
                skip_training = options.get('skip_training', False)
                save_best_reconstructor = options.get(
                    'save_best_reconstructor')
                save_iterates = (save_reconstructions
                                 and options.get('save_iterates'))

                hp_choices = task.get('hyper_param_choices')
                if hp_choices:
                    # run all hyper param choices as sub-tasks
                    retrain_param_keys = [
                        k for k, v in reconstructor.HYPER_PARAMS.items()
                        if v.get('retrain', False)
                    ]
                    orig_hyper_params = reconstructor.hyper_params.copy()

                    def _warn_if_invalid_keys(keys):
                        for k in keys:
                            if k not in reconstructor.HYPER_PARAMS.keys():
                                warn("choice for unknown hyper parameter '{}' "
                                     "for reconstructor of type '{}' will be "
                                     'ignored'.format(k, type(reconstructor)))

                    if isinstance(hp_choices, dict):
                        _warn_if_invalid_keys(hp_choices.keys())
                        keys_retrain_first = sorted(
                            hp_choices.keys(),
                            key=lambda k: k not in retrain_param_keys)
                        #                        if isinstance(reconstructor, IterativeReconstructor):
                        # 'iterations' treated specially to re-use iterates
                        #                            keys_retrain_first.remove('iterations')
                        #                            hp_choices_iterations = hp_choices.get(
                        #                                'iterations',
                        #                                [orig_hyper_params['iterations']])
                        param_values = [
                            hp_choices.get(k, [orig_hyper_params[k]])
                            for k in keys_retrain_first
                        ]
                        hp_choice_list = [
                            dict(zip(keys_retrain_first, v))
                            for v in product(*param_values)
                        ]
                    else:
                        hp_choice_list = hp_choices
                        for hp_choice in hp_choice_list:
                            _warn_if_invalid_keys(hp_choice.keys())


#                        if isinstance(reconstructor, IterativeReconstructor):
#                             no special support for re-using iterates
#                            hp_choices_iterations = []
                    if (isinstance(reconstructor, IterativeReconstructor)
                            and reuse_iterates):
                        reuse_iterates_from = []
                        for j, hp_choice_j in enumerate(hp_choice_list):
                            iter_j = hp_choice_j.get(
                                'iterations', orig_hyper_params['iterations'])
                            (k_max, iter_max) = (-1, iter_j)
                            for k, hp_choice_k in enumerate(hp_choice_list):
                                iter_k = hp_choice_k.get(
                                    'iterations',
                                    orig_hyper_params['iterations'])
                                if iter_k > iter_max:
                                    hp_choice_j_rem = hp_choice_j.copy()
                                    hp_choice_j_rem.pop('iterations')
                                    hp_choice_k_rem = hp_choice_k.copy()
                                    hp_choice_k_rem.pop('iterations')
                                    if hp_choice_j_rem == hp_choice_k_rem:
                                        (k_max, iter_max) = (k, iter_k)
                            reuse_iterates_from.append(k_max)
                    if save_best_reconstructor:
                        if len(measures) == 0 and len(hp_choice_list) > 1:
                            warn("No measures are chosen to be evaluated, so "
                                 "no best reconstructor can be selected. Will "
                                 "not save like requested by "
                                 "'save_best_reconstructor' option.")
                            save_best_reconstructor = None
                        else:
                            best_loss = np.inf
                    row_sub_list = [None] * len(hp_choice_list)
                    # run sub-tasks
                    for j, hp_choice in enumerate(
                            tqdm(hp_choice_list,
                                 desc='sub-task',
                                 file=orig_stdout,
                                 disable=(show_progress != 'tqdm'),
                                 leave=False)):
                        if show_progress == 'text':
                            print('sub-task {j}/{n} ...'.format(
                                j=j, n=len(hp_choice_list)))
                        train = (isinstance(reconstructor,
                                            LearnedReconstructor)
                                 and (j == 0 or any(
                                     (hp_choice.get(k, orig_hyper_params[k]) !=
                                      reconstructor.hyper_params[k]
                                      for k in retrain_param_keys))))
                        reconstructor.hyper_params = orig_hyper_params.copy()
                        reconstructor.hyper_params.update(hp_choice)
                        #                        if (isinstance(reconstructor, IterativeReconstructor)
                        #                                and hp_choices_iterations):
                        #                            reconstructor.hyper_params['iterations'] = max(
                        #                                hp_choices_iterations)  # only largest number
                        if train and not skip_training:
                            reconstructor.train(task['dataset'])
                        run_sub_task = not (isinstance(reconstructor,
                                                       IterativeReconstructor)
                                            and reuse_iterates
                                            and reuse_iterates_from[j] != -1)
                        if run_sub_task:
                            return_rows_iterates = None
                            if (isinstance(reconstructor,
                                           IterativeReconstructor)
                                    and reuse_iterates):
                                # determine the iteration numbers needed for
                                # other sub-tasks
                                return_iterates_for = [
                                    k for k, from_k in enumerate(
                                        reuse_iterates_from) if from_k == j
                                ]  # sub-task indices
                                return_rows_iterates = [
                                    hp_choice_list[k].get(
                                        'iterations',
                                        orig_hyper_params['iterations'])
                                    for k in return_iterates_for
                                ]  # iterations
                            row = self._run_task(
                                reconstructor=reconstructor,
                                test_data=test_data,
                                measures=measures,
                                hp_choice=hp_choice,
                                return_rows_iterates=return_rows_iterates,
                                options=options,
                                save_reconstructions=save_reconstructions,
                                save_iterates=save_iterates,
                            )
                            if return_rows_iterates is not None:
                                (row, rows_iterates) = row
                                # assign rows for other sub-tasks
                                for r_i, k in enumerate(return_iterates_for):
                                    rows_iterates[r_i]['task_ind'] = i
                                    rows_iterates[r_i]['sub_task_ind'] = k
                                    row_sub_list[k] = rows_iterates[r_i]
                            # assign row for current sub-task
                            row['task_ind'] = i
                            row['sub_task_ind'] = j
                            row_sub_list[j] = row
                            if save_best_reconstructor:

                                def save_if_best_reconstructor(
                                        measure_values, iterations=None):
                                    measure = save_best_reconstructor.get(
                                        'measure', measures[0])
                                    if isinstance(measure, str):
                                        measure = Measure.get_by_short_name(
                                            measure)
                                    loss_sign = (1 if measure.measure_type
                                                 == 'distance' else -1)
                                    cur_loss = (loss_sign * np.mean(
                                        measure_values[measure.short_name]))
                                    if cur_loss < best_loss:
                                        if iterations is not None:
                                            reconstructor.hyper_params[
                                                'iterations'] = iterations
                                        reconstructor.save_params(
                                            save_best_reconstructor['path'])
                                        return cur_loss
                                    return best_loss

                                best_loss = save_if_best_reconstructor(
                                    row['measure_values'])
                                if return_rows_iterates is not None:
                                    for row_iterates, iterations in zip(
                                            rows_iterates,
                                            return_rows_iterates):
                                        best_loss = save_if_best_reconstructor(
                                            row_iterates['measure_values'],
                                            iterations=iterations)
                    reconstructor.hyper_params = orig_hyper_params.copy()
                    row_list += row_sub_list
                else:
                    # run task (with hyper params as they are)
                    if (isinstance(reconstructor, LearnedReconstructor)
                            and not skip_training):
                        reconstructor.train(task['dataset'])

                    row = self._run_task(
                        reconstructor=reconstructor,
                        test_data=test_data,
                        measures=measures,
                        hp_choice=None,
                        return_rows_iterates=None,
                        options=options,
                        save_reconstructions=save_reconstructions,
                        save_iterates=save_iterates,
                    )
                    row['task_ind'] = i
                    row['sub_task_ind'] = 0
                    row_list.append(row)
                    if save_best_reconstructor:
                        reconstructor.save_params(
                            save_best_reconstructor['path'])

        self.results = ResultTable(row_list)
        return self.results
Пример #6
0
def optimize_hyper_params(reconstructor,
                          validation_data,
                          measure,
                          dataset=None,
                          HYPER_PARAMS_override=None,
                          hyperopt_max_evals=1000,
                          hyperopt_max_evals_retrain=1000,
                          hyperopt_rstate=None,
                          show_progressbar=True,
                          tqdm_file=None):
    """Optimize hyper parameters of a reconstructor.

    Parameters
    ----------
    reconstructor : :class:`.Reconstructor`
        The reconstructor.
    validation_data : :class:`.DataPairs`
        The test data on which the performance is measured.
    measure : :class:`.Measure` or str
        The measure to use as the objective. The sign is chosen automatically
        depending on the measures :attr:`~Measure.measure_type`.
    dataset : :class:`.Dataset`, optional
        The dataset used for training `reconstructor` if it is a
        :class:`LearnedReconstructor`.
    HYPER_PARAMS_override : dict, optional
        Hyper parameter specification overriding the defaults
        in ``type(reconstructor).HYPER_PARAMS``.
        The structure of this dict is the same as the structure
        of :attr:`Reconstructor.HYPER_PARAMS`, except that all
        fields are optional.
        Here, each value of a dict for one parameter is treated
        as an entity, i.e. specifying the dict
        ``HYPER_PARAMS[...]['grid_search_options']`` overrides
        the whole dict, not only the specified keys in it.
    hyperopt_max_evals : int, optional
        Number of evaluations for different combinations of the parameters that
        are optimized by ``hyperopt`` and that do not require retraining.
        Should be chosen depending on the complexity of dependence and the
        number of such parameters.
    hyperopt_max_evals_retrain : int, optional
        Number of evaluations for different combinations of the parameters that
        are optimized by ``hyperopt`` and that require retraining.
        Should be chosen depending on the complexity of dependence and the
        number of such parameters.
    hyperopt_rstate : :class:`np.random.RandomState`, optional
        Random state for the random searches performed by ``hyperopt``.
    show_progressbar : bool, optional
        Whether to show a progress bar for the optimization. Default: ``True``.
    tqdm_file : file-like object
        File/stream to pass to ``tqdm``.
    """
    if isinstance(measure, str):
        measure = Measure.get_by_short_name(measure)
    if dataset is None and isinstance(reconstructor, LearnedReconstructor):
        raise ValueError('dataset required for training of '
                         '`LearnedReconstructor`')

    if HYPER_PARAMS_override is None:
        HYPER_PARAMS_override = {}
    for k in HYPER_PARAMS_override.keys():
        if k not in type(reconstructor).HYPER_PARAMS.keys():
            warn("unknown hyper param '{}' for reconstructor of type '{}'".
                 format(k, type(reconstructor)))

    params = {}
    params_retrain = {}
    for k, v in type(reconstructor).HYPER_PARAMS.items():
        param = v.copy()
        param.update(HYPER_PARAMS_override.get(k, {}))
        param.setdefault('method', 'grid_search')
        retrain = v.get('retrain', False)
        if retrain:
            params_retrain[k] = param
        else:
            params[k] = param

    loss_sign = 1 if measure.measure_type == 'distance' else -1

    def fn(x):
        reconstructor.hyper_params.update(x)
        reconstructions = [
            reconstructor.reconstruct(observation)
            for observation in validation_data.observations
        ]
        measure_values = [
            measure.apply(r, g)
            for r, g in zip(reconstructions, validation_data.ground_truth)
        ]
        loss = loss_sign * np.mean(measure_values)

        return {'status': 'ok', 'loss': loss}

    def fn_retrain(x):
        reconstructor.hyper_params.update(x)
        reconstructor.train(dataset)

        best_sub_hp = _optimize_hyper_params_impl(
            reconstructor,
            fn,
            params,
            hyperopt_max_evals=hyperopt_max_evals,
            hyperopt_rstate=hyperopt_rstate,
            show_progressbar=False)

        reconstructions = [
            reconstructor.reconstruct(observation)
            for observation in validation_data.observations
        ]
        measure_values = [
            measure.apply(r, g)
            for r, g in zip(reconstructions, validation_data.ground_truth)
        ]
        loss = loss_sign * np.mean(measure_values)

        return {'status': 'ok', 'loss': loss, 'best_sub_hp': best_sub_hp}

    if params_retrain:
        best_hyper_params = _optimize_hyper_params_impl(
            reconstructor,
            fn_retrain,
            params_retrain,
            hyperopt_max_evals=hyperopt_max_evals_retrain,
            hyperopt_rstate=hyperopt_rstate,
            show_progressbar=show_progressbar,
            tqdm_file=tqdm_file)
    else:
        best_hyper_params = _optimize_hyper_params_impl(
            reconstructor,
            fn,
            params,
            hyperopt_max_evals=hyperopt_max_evals,
            hyperopt_rstate=hyperopt_rstate,
            show_progressbar=show_progressbar,
            tqdm_file=tqdm_file)

    return best_hyper_params
Пример #7
0
    def run(self, save_reconstructions=True, show_progress='text'):
        """Run all tasks and return the results.

        The returned :class:`ResultTable` object is also stored as
        :attr:`results`.

        Parameters
        ----------
        save_reconstructions : bool, optional
            Whether the reconstructions should be saved in the results.
            The default is ``True``.

            If measures shall be applied after this method returns, it must be
            ``True``.

            If ``False``, no iterates (intermediate reconstructions) will be
            saved, even if ``task['options']['save_iterates']==True``.

        show_progress : str, optional
            Whether and how to show progress. Options are:

                ``'text'`` (default)
                    print a line before running each task
                ``'tqdm'``
                    show a progress bar with ``tqdm``
                `None`
                    do not show progress

        Returns
        -------
        results : :class:`ResultTable`
            The results.
        """
        row_list = []
        with std_out_err_redirect_tqdm(None if show_progress ==
                                       'tqdm' else sys.stdout) as orig_stdout:
            for i, task in enumerate(
                    tqdm(self.tasks,
                         desc='task',
                         file=orig_stdout,
                         disable=(show_progress != 'tqdm'))):
                if show_progress == 'text':
                    print('running task {i}/{num_tasks} ...'.format(
                        i=i, num_tasks=len(self.tasks)))
                test_data = task['test_data']
                reconstructor = task['reconstructor']
                if test_data.ground_truth is None and task['measures']:
                    raise ValueError('missing ground truth, cannot apply '
                                     'measures')
                measures = [(measure if isinstance(measure, Measure) else
                             Measure.get_by_short_name(measure))
                            for measure in task['measures']]
                options = task['options']
                skip_training = options.get('skip_training', False)
                save_best_reconstructor = options.get(
                    'save_best_reconstructor')
                save_iterates = (save_reconstructions
                                 and options.get('save_iterates'))

                hp_choices = task.get('hyper_param_choices')
                if hp_choices:
                    # run all hyper param choices as sub-tasks
                    retrain_param_keys = [
                        k for k, v in reconstructor.HYPER_PARAMS.items()
                        if v.get('retrain', False)
                    ]
                    orig_hyper_params = reconstructor.hyper_params.copy()

                    def _warn_if_invalid_keys(keys):
                        for k in keys:
                            if k not in reconstructor.HYPER_PARAMS.keys():
                                warn("choice for unknown hyper parameter '{}' "
                                     "for reconstructor of type '{}' will be "
                                     'ignored'.format(k, type(reconstructor)))

                    if isinstance(hp_choices, dict):
                        _warn_if_invalid_keys(hp_choices.keys())
                        keys_retrain_first = sorted(
                            hp_choices.keys(),
                            key=lambda k: k not in retrain_param_keys)
                        param_values = [
                            hp_choices.get(k, [orig_hyper_params[k]])
                            for k in keys_retrain_first
                        ]
                        hp_choice_list = [
                            dict(zip(keys_retrain_first, v))
                            for v in product(*param_values)
                        ]
                    else:
                        hp_choice_list = hp_choices
                        for hp_choice in hp_choice_list:
                            _warn_if_invalid_keys(hp_choice.keys())
                    if save_best_reconstructor:
                        if len(measures) == 0 and len(hp_choice_list) > 1:
                            warn("No measures are chosen to be evaluated, so "
                                 "no best reconstructor can be selected. Will "
                                 "not save like requested by "
                                 "'save_best_reconstructor' option.")
                            save_best_reconstructor = None
                        else:
                            best_loss = np.inf
                    for j, hp_choice in enumerate(
                            tqdm(hp_choice_list,
                                 desc='sub-task',
                                 file=orig_stdout,
                                 disable=(show_progress != 'tqdm'),
                                 leave=False)):
                        if show_progress == 'text':
                            print('sub-task {j}/{n} ...'.format(
                                j=j, n=len(hp_choice_list)))
                        train = (isinstance(reconstructor,
                                            LearnedReconstructor)
                                 and (j == 0 or any(
                                     (hp_choice.get(k, orig_hyper_params[k]) !=
                                      reconstructor.hyper_params[k]
                                      for k in retrain_param_keys))))
                        reconstructor.hyper_params = orig_hyper_params.copy()
                        reconstructor.hyper_params.update(hp_choice)
                        if train and not skip_training:
                            reconstructor.train(task['dataset'])
                        row = self._run_task(
                            reconstructor=reconstructor,
                            test_data=test_data,
                            measures=measures,
                            hp_choice=hp_choice,
                            options=options,
                            save_reconstructions=save_reconstructions,
                            save_iterates=save_iterates,
                        )
                        row['task_ind'] = i
                        row['sub_task_ind'] = j
                        row_list.append(row)
                        if save_best_reconstructor:
                            measure = save_best_reconstructor.get(
                                'measure', measures[0])
                            if isinstance(measure, str):
                                measure = Measure.get_by_short_name(measure)
                            loss_sign = (1 if measure.measure_type
                                         == 'distance' else -1)
                            cur_loss = (loss_sign * np.mean(
                                row['measure_values'][measure.short_name]))
                            if cur_loss < best_loss:
                                reconstructor.save_params(
                                    save_best_reconstructor['path'])
                                best_loss = cur_loss
                    reconstructor.hyper_params = orig_hyper_params.copy()
                else:
                    # run task (with hyper params as they are)
                    if (isinstance(reconstructor, LearnedReconstructor)
                            and not skip_training):
                        reconstructor.train(task['dataset'])

                    row = self._run_task(
                        reconstructor=reconstructor,
                        test_data=test_data,
                        measures=measures,
                        hp_choice=None,
                        options=options,
                        save_reconstructions=save_reconstructions,
                        save_iterates=save_iterates,
                    )
                    row['task_ind'] = i
                    row['sub_task_ind'] = 0
                    row_list.append(row)
                    if save_best_reconstructor:
                        reconstructor.save_params(
                            save_best_reconstructor['path'])

        self.results = ResultTable(row_list)
        return self.results