Exemple #1
0
    def _update(self):
        assert self.data is not None
        # collect all learners for which results have not yet been computed
        need_update = [(i, item) for (i, item) in enumerate(self.learners)
                       if item.results is None]
        if not need_update:
            return

        learners = [item.learner for _, item in need_update]

        if self.testdata is None:
            # compute the learning curve result for all learners in one go
            results = learning_curve(
                learners,
                self.data,
                folds=self.folds,
                proportions=self.curvePoints,
            )
        else:
            results = learning_curve_with_test_data(
                learners,
                self.data,
                self.testdata,
                times=self.folds,
                proportions=self.curvePoints,
            )
        # split the combined result into per learner/model results
        results = [
            list(Results.split_by_model(p_results)) for p_results in results
        ]

        for i, (_, item) in enumerate(need_update):
            item.results = [p_results[i] for p_results in results]
    def _update(self):
        assert self.data is not None
        # collect all learners for which results have not yet been computed
        need_update = [(id, learner) for id, learner in self.learners.items()
                       if self.results[id] is None]
        if not need_update:
            return
        learners = [learner for _, learner in need_update]

        self.progressBarInit()
        if self.testdata is None:
            # compute the learning curve result for all learners in one go
            results = learning_curve(
                learners,
                self.data,
                folds=self.folds,
                proportions=self.curvePoints,
                callback=lambda value: self.progressBarSet(100 * value))
        else:
            results = learning_curve_with_test_data(
                learners,
                self.data,
                self.testdata,
                times=self.folds,
                proportions=self.curvePoints,
                callback=lambda value: self.progressBarSet(100 * value))

        self.progressBarFinished()
        # split the combined result into per learner/model results
        results = [
            list(Results.split_by_model(p_results)) for p_results in results
        ]

        for i, (id, learner) in enumerate(need_update):
            self.results[id] = [p_results[i] for p_results in results]
    def _update(self):
        assert self.data is not None
        # collect all learners for which results have not yet been computed
        need_update = [(id, learner) for id, learner in self.learners.items()
                       if self.results[id] is None]
        if not need_update:
            return
        learners = [learner for _, learner in need_update]

        if self.testdata is None:
            # compute the learning curve result for all learners in one go
            results = learning_curve(
                learners, self.data, folds=self.folds,
                proportions=self.curvePoints,
            )
        else:
            results = learning_curve_with_test_data(
                learners, self.data, self.testdata, times=self.folds,
                proportions=self.curvePoints,
            )
        # split the combined result into per learner/model results
        results = [list(Results.split_by_model(p_results)) for p_results in results]

        for i, (id, learner) in enumerate(need_update):
            self.results[id] = [p_results[i] for p_results in results]
Exemple #4
0
    def _update(self):
        assert self.data is not None
        # collect all learners for which results have not yet been computed

        need_update = [(id, learner) for id, learner in self.learners.items()
                       if self.results[id] is None]
        if not need_update:  #啥意思?
            return

        learners = [learner for _, learner in need_update]

        # compute the learning curve result for all learners in one go
        results = learning_curve(
            learners,
            self.data,
            folds=self.folds,
            proportions=self.curvePoints,
        )  #这是一个元组
        # split the combined result into per learner/model results
        results = [
            list(Results.split_by_model(p_results)) for p_results in results
        ]

        for i, (id, learner) in enumerate(need_update):
            self.results[id] = [p_results[i] for p_results in results]
Exemple #5
0
    def _task_finished(self, f):
        """
        Parameters
        ----------
        f : Future
            The future instance holding the result of learner evaluation.
        """
        assert self.thread() is QThread.currentThread()
        assert self._task is not None
        assert self._task.future is f
        assert f.done()

        self._task = None
        self.progressBarFinished()

        try:
            results = f.result()  # type: List[Results]
        except Exception as ex:
            # Log the exception with a traceback
            log = logging.getLogger()
            log.exception(__name__, exc_info=True)
            self.error("Exception occurred during evaluation: {!r}".format(ex))
            # clear all results
            for key in self.results.keys():
                self.results[key] = None
        else:
            # split the combined result into per learner/model results ...
            results = [
                list(Results.split_by_model(p_results))
                for p_results in results
            ]  # type: List[List[Results]]
            assert all(len(r.learners) == 1 for r1 in results for r in r1)
            assert len(results) == len(self.curvePoints)

            learners = [r.learners[0] for r in results[0]]
            learner_id = {
                learner: id_
                for id_, learner in self.learners.items()
            }

            # ... and update self.results
            for i, learner in enumerate(learners):
                id_ = learner_id[learner]
                self.results[id_] = [p_results[i] for p_results in results]
        # [end-snippet-9]
        # update the display
        self._update_curve_points()
        self._update_table()
Exemple #6
0
    def _task_finished(self, f):
        """
        Parameters
        ----------
        f : Future
            The future instance holding the result of learner evaluation.
        """
        assert self.thread() is QThread.currentThread()
        assert self._task is not None
        assert self._task.future is f
        assert f.done()

        self._task = None
        self.progressBarFinished()

        try:
            results = f.result()  # type: List[Results]
        except Exception as ex:
            # Log the exception with a traceback
            log = logging.getLogger()
            log.exception(__name__, exc_info=True)
            self.error("Exception occurred during evaluation: {!r}"
                       .format(ex))
            # clear all results
            for key in self.results.keys():
                self.results[key] = None
        else:
            # split the combined result into per learner/model results ...
            results = [list(Results.split_by_model(p_results))
                       for p_results in results]  # type: List[List[Results]]
            assert all(len(r.learners) == 1 for r1 in results for r in r1)
            assert len(results) == len(self.curvePoints)

            learners = [r.learners[0] for r in results[0]]
            learner_id = {learner: id_ for id_, learner in self.learners.items()}

            # ... and update self.results
            for i, learner in enumerate(learners):
                id_ = learner_id[learner]
                self.results[id_] = [p_results[i] for p_results in results]
# [end-snippet-9]
        # update the display
        self._update_curve_points()
        self._update_table()
    def _update(self):
        assert self.data is not None
        # collect all learners for which results have not yet been computed
        need_update = [(id, learner) for id, learner in self.learners.items()
                       if self.results[id] is None]
        if not need_update:
            return

        learners = [learner for _, learner in need_update]

        self.progressBarInit()
        # compute the learning curve result for all learners in one go
        results = learning_curve(
            learners, self.data, folds=self.folds,
            proportions=self.curvePoints,
            callback=lambda value: self.progressBarSet(100 * value)
        )
        self.progressBarFinished()
        # split the combined result into per learner/model results
        results = [list(Results.split_by_model(p_results))
                   for p_results in results]

        for i, (id, learner) in enumerate(need_update):
            self.results[id] = [p_results[i] for p_results in results]