示例#1
0
    def evaluate(self):
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()
        pred_labels = []
        gt_labels = []

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                with function.no_backprop_mode():
                    if isinstance(in_arrays, tuple):
                        eval_func(*in_arrays)
                    elif isinstance(in_arrays, dict):
                        eval_func(**in_arrays)
                    else:
                        eval_func(in_arrays)
                if eval_func.predictions is not None:
                    pred_labels.extend(cuda.to_cpu(eval_func.predictions))
                    gt_labels.extend(cuda.to_cpu(eval_func.gt))

            summary.add(observation)

        observation = summary.compute_mean()

        if self.label_names is not None and len(pred_labels) > 0:
            pred_labels = np.array(pred_labels)
            gt_labels = np.array(gt_labels)
            result = eval_semantic_segmentation(pred_labels, gt_labels)
            report = {
                'miou': result['miou'],
                'pixel_acc': result['pixel_accuracy'],
                'mean_class_acc': result['mean_class_accuracy']
            }
            for l, label_name in enumerate(self.label_names):
                try:
                    report['iou/{:s}'.format(label_name)] = result['iou'][l]
                    report['class_acc/{:s}'.format(
                        label_name)] = result['class_accuracy'][l]
                except IndexError:
                    report['iou/{:s}'.format(label_name)] = np.nan
                    report['class_acc/{:s}'.format(label_name)] = np.nan

            with reporter_module.report_scope(observation):
                reporter_module.report(report, eval_func)

        return observation
示例#2
0
    def evaluate_roc(self, trainer):
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        y_total = []
        t_total = []
        length = it.dataset.__len__()
        batchsize = it.batch_size
        length = length // batchsize
        from tqdm import tqdm
        pbar = tqdm(total=length)
        for batch in it:
            in_arrays = self.converter(batch, self.device)

            with chainer.no_backprop_mode(), chainer.using_config('train', False):
                y = eval_func(*in_arrays[:-1])
                t = in_arrays[-1]
            y_data = cuda.to_cpu(y.data)
            t_data = cuda.to_cpu(t)
            y_total.extend(y_data)
            t_total.extend(t_data)
            pbar.update(1)
        y_total = numpy.concatenate(y_total).ravel()
        t_total = numpy.concatenate(t_total).ravel()
        index = numpy.where(t_total != -1)[0]
        y_total = y_total[index]
        t_total = t_total[index]
        d = {'label': t_total, 'score': y_total}
        from pathlib import Path
        out_path = Path('./valid_result') / str(self.epoch)
        out_path.mkdir(exist_ok=True)
        np.savez(out_path / ('validation_' + str(self.rank)), **d)
        observation = {}
        with reporter.report_scope(observation):
            roc_auc = metrics.roc_auc_score(t_total, F.sigmoid(y_total).data)
            with reporter.report_scope(observation):
                reporter.report({'roc_auc_': roc_auc}, self._targets['main'])
                reporter.report({'loss': F.sigmoid_cross_entropy(y_total, t_total).data},
                                self._targets['main'])
                reporter.report({'accuracy': F.binary_accuracy(y_total, t_total).data}, self._targets['main'])
        return observation
    def evaluate(self):
        val_iter = self.get_iterator('main')
        model = self.get_target('main')

        it = copy.copy(val_iter)

        summary = reporter.DictSummary()
        res = []
        for i, batch in enumerate(it):
            observation = {}
            with reporter.report_scope(observation):
                imgs, pafs, heatmaps, ignore_mask = self.converter(batch, self.device)
                with function.no_backprop_mode():
                    x_data = preprocess(imgs)

                    pafs_ys, heatmaps_ys = model(x_data)

                    loss, paf_loss_log, heatmap_loss_log = compute_loss(
                        imgs, pafs_ys, heatmaps_ys, pafs, heatmaps, ignore_mask)

                    observation['val/loss'] = cuda.to_cpu(loss.data)
                    observation['val/paf'] = sum(paf_loss_log)
                    observation['val/heat'] = sum(heatmap_loss_log)
            summary.add(observation)
        return summary.compute_mean()
示例#4
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        annType = ['segm', 'bbox', 'keypoints']
        annType = annType[1]
        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_prediction_to_iterator(
            target.predict, it)
        # delete unused iterators explicitly
        del in_values

        pred_bboxes, _, pred_labels, pred_scores, pred_masks = out_values

        if len(rest_values) == 3:
            gt_bboxes, gt_labels, gt_difficults = rest_values
        elif len(rest_values) == 2:
            gt_bboxes, gt_labels = rest_values
            gt_difficults = None
        elif len(rest_values) == 5:
            gt_bboxes, gt_labels, _, _, i = rest_values
            gt_difficults = None
        pred_bboxes = iter(list(pred_bboxes))
        pred_labels = iter(list(pred_labels))
        pred_scores = iter(list(pred_scores))
        gt_bboxes = iter(list(gt_bboxes))
        gt_labels = iter(list(gt_labels))
        data_dict = []
        for i, (pred_bbox, pred_label, pred_score) in \
            enumerate(zip(pred_bboxes, pred_labels, pred_scores)):
            for bbox, label, score in zip(pred_bbox, pred_label, pred_score):
                A = {
                    "image_id": int(self.ids[i]),
                    "category_id": int(label),
                    "bbox": bbox.tolist(),
                    "score": float(score)
                }
                data_dict.append(A)
        if len(data_dict) > 0:
            cocoGt = self.cocoanns
            cocoDt = cocoGt.loadRes(data_dict)
            cocoEval = COCOeval(self.cocoanns, cocoDt, annType)
            cocoEval.params.imgIds = [int(id_) for id_ in self.ids]
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            report = {
                'map': cocoEval.stats[0]
            }  # report COCO AP (IoU=0.5:0:95)
        else:
            report = {'map': 0}
        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#5
0
    def test_report_key(self, metrics_fun, compute_metrics):
        repo = chainer.Reporter()

        link = Classifier(predictor=DummyPredictor(),
                          metrics_fun=metrics_fun)
        link.compute_metrics = compute_metrics
        repo.add_observer('target', link)
        with repo:
            observation = {}
            with reporter.report_scope(observation):
                link(self.x, self.t)

        # print('observation ', observation)
        actual_keys = set(observation.keys())
        if compute_metrics:
            if metrics_fun is None:
                assert set(['target/loss']) == actual_keys
            elif isinstance(metrics_fun, dict):
                assert set(['target/loss', 'target/user_key']) == actual_keys
            elif callable(metrics_fun):
                assert set(['target/loss', 'target/accuracy']) == actual_keys
            else:
                raise TypeError()
        else:
            assert set(['target/loss']) == actual_keys
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            target.predict,
            it,
            n_input=2,
        )
        # delete unused iterators explicitly
        del in_values

        pred_labels, = out_values
        gt_labels, = rest_values

        report = eval_sigmoid_segmentation(pred_labels,
                                           gt_labels,
                                           channel_names=self.channel_names)

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#7
0
 def evaluate(self):
     """Evaluate the model."""
     val_iter = self.get_iterator('main')
     loss = 0
     nll = 0
     count = 0
     self.model.eval()
     with torch.no_grad():
         for batch in copy.copy(val_iter):
             x, t = concat_examples(batch,
                                    device=self.device[0],
                                    padding=(0, -100))
             if self.device[0] == -1:
                 l, n, c = self.model(x, t)
             else:
                 # apex does not support torch.nn.DataParallel
                 l, n, c = data_parallel(self.model, (x, t), self.device)
             loss += float(l.sum())
             nll += float(n.sum())
             count += int(c.sum())
     self.model.train()
     # report validation loss
     observation = {}
     with reporter.report_scope(observation):
         reporter.report({'loss': loss}, self.model.reporter)
         reporter.report({'nll': nll}, self.model.reporter)
         reporter.report({'count': count}, self.model.reporter)
     return observation
示例#8
0
    def evaluate(self):
        iterator = self._iterators['main']
        unet = self._targets['unet']
        #eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                ground_truth, data = self.converter(batch, self.device)
                with chainer.using_config("train", False):
                    with chainer.no_backprop_mode():
                        predict = unet(data)
                        ground_truth = ground_truth[:, :, 20:72, 20:72, 20:72]
                #observation['vali/unet/loss'] = self.loss_softmax_cross_entropy(predict,ground_truth)
                observation['vali/unet/dice'] = self.dice_coefficent(
                    predict, ground_truth)
            summary.add(observation)
            #print(observation)

        return summary.compute_mean()
示例#9
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            self.predict_func, it)
        # delete unused iterators explicitly
        del in_values

        pred_labels, pred_scores = out_values
        gt_labels, = rest_values

        result = eval_multi_label_classification(pred_labels, pred_scores,
                                                 gt_labels)

        report = {'map': result['map']}

        if self.label_names is not None:
            for l, label_name in enumerate(self.label_names):
                try:
                    report['ap/{:s}'.format(label_name)] = result['ap'][l]
                except IndexError:
                    report['ap/{:s}'.format(label_name)] = np.nan

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#10
0
    def evaluate(self):
        val_iter = self.get_iterator('main')
        model = self.get_target('main')

        it = copy.copy(val_iter)

        summary = reporter.DictSummary()
        res = []
        for i, batch in enumerate(it):
            observation = {}
            with reporter.report_scope(observation):
                imgs, pafs, heatmaps, ignore_mask = self.converter(
                    batch, self.device)
                with function.no_backprop_mode():
                    x_data = preprocess(imgs)

                    pafs_ys, heatmaps_ys = model(x_data)

                    loss, paf_loss_log, heatmap_loss_log = compute_loss(
                        imgs, pafs_ys, heatmaps_ys, pafs, heatmaps,
                        ignore_mask)

                    observation['val/loss'] = cuda.to_cpu(loss.data)
                    observation['val/paf'] = sum(paf_loss_log)
                    observation['val/heat'] = sum(heatmap_loss_log)
            summary.add(observation)
        return summary.compute_mean()
示例#11
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        pred_bboxes, pred_labels, pred_scores, gt_values =\
            apply_detection_link(target, it)
        if len(gt_values) == 3:
            gt_bboxes, gt_labels, gt_difficults = gt_values
        elif len(gt_values) == 2:
            gt_bboxes, gt_labels = gt_values
            gt_difficults = None

        eval_ = eval_detection_voc(pred_bboxes,
                                   pred_labels,
                                   pred_scores,
                                   gt_bboxes,
                                   gt_labels,
                                   gt_difficults,
                                   use_07_metric=self.use_07_metric)

        observation = {}
        with reporter.report_scope(observation):
            reporter.report({'map': eval_['map']}, target)
        return observation
示例#12
0
    def evaluate(self):
        from chainer import reporter
        import copy

        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)
        summary = reporter.DictSummary()

        for batch in it:
            observation = {}
            with reporter.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                if isinstance(in_arrays, tuple):
                    eval_func(*in_arrays)
                elif isinstance(in_arrays, dict):
                    eval_func(**in_arrays)
                else:
                    eval_func(in_arrays)

            summary.add(observation)

        return summary.compute_mean()
示例#13
0
    def evaluate(self):
        """Main evaluate routine for CustomEvaluator."""
        iterator = self._iterators['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        self.model.eval()
        with torch.no_grad():
            for batch in it:
                observation = {}
                with reporter_module.report_scope(observation):
                    # read scp files
                    # x: original json with loaded features
                    #    will be converted to chainer variable later
                    x = self.converter(batch, self.device)
                    if self.ngpu == 0:
                        self.model(*x)
                    else:
                        # apex does not support torch.nn.DataParallel
                        data_parallel(self.model, x, range(self.ngpu))

                summary.add(observation)
        self.model.train()

        return summary.compute_mean()
示例#14
0
    def evaluate(self):
        bt = time.time()
        with chainer.no_backprop_mode():
            references = []
            hypotheses = []
            observation = {}
            with reporter.report_scope(observation):
                for i in range(0, len(self.test_data), self.batch):
                    src, trg = zip(*self.test_data[i:i + self.batch])
                    references.extend([[t.tolist()] for t in trg])

                    src = [chainer.dataset.to_device(self.device, x)
                           for x in src]

                    if self.comm.rank == 0:
                        self.model.translate(src, self.max_length)

                    elif self.comm.rank == 1:
                        ys = [y.tolist()
                              for y in self.model.translate(
                                  src, self.max_length)]
                        hypotheses.extend(ys)

                if self.comm.rank == 1:
                    bleu = bleu_score.corpus_bleu(
                        references, hypotheses, smoothing_function=bleu_score.
                        SmoothingFunction().method1)
                    reporter.report({'bleu': bleu}, self.model)
        et = time.time()

        if self.comm.rank == 1:
            print('BleuEvaluator(single)::evaluate(): '
                  'took {:.3f} [s]'.format(et - bt))
            sys.stdout.flush()
        return observation
示例#15
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)
        summary = reporter_module.DictSummary()

        acc = 0
        count = 0
        observation = {}
        with reporter_module.report_scope(observation):
            for batch in it:
                target.predictor.reset_state()
                w, l = batch[0]
                xp = cuda.cupy if self.device >= 0 else np
                xp_words = [xp.array([_w], xp.int32) for _w in w]
                predict = np.argmax(target.predictor(xp_words).data)
                if predict == l:
                    acc += 1
                count += 1

            summary.add({'main/validation/accuracy': acc / float(count)})

        return summary.compute_mean()
示例#16
0
    def evaluate(self):
        #target = self._targets['main']

        summary = reporter_module.DictSummary()
        for name, target in six.iteritems(self._targets):
            iterator = self._iterators['main']
            #target = self._targets['main']
            eval_func = self.eval_func or target

            if self.eval_hook:
                self.eval_hook(self)

            if hasattr(iterator, 'reset'):
                iterator.reset()
                it = iterator
            else:
                it = copy.copy(iterator)

            #summary = reporter_module.DictSummary()
            for batch in it:
                observation = {}
                with reporter_module.report_scope(observation):
                    in_arrays = self.converter(batch, self.device)
                    with function.no_backprop_mode():
                        if isinstance(in_arrays, tuple):
                            eval_func(*in_arrays)
                        elif isinstance(in_arrays, dict):
                            eval_func(**in_arrays)
                        else:
                            eval_func(in_arrays)

                summary.add(observation)
        return summary.compute_mean()
示例#17
0
    def evaluate(self):
        iterator = self._iterators['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        self.model.eval()
        with torch.no_grad():
            for batch in it:
                observation = {}
                with reporter_module.report_scope(observation):
                    # read scp files
                    # x: original json with loaded features
                    #    will be converted to chainer variable later
                    x = self.converter(batch, self.device)
                    self.model(*x)
                summary.add(observation)
        self.model.train()

        return summary.compute_mean()
示例#18
0
    def test_report_key(self, metrics_fun, compute_metrics):
        repo = chainer.Reporter()

        link = Regressor(predictor=DummyPredictor(),
                         metrics_fun=metrics_fun)
        link.compute_metrics = compute_metrics
        repo.add_observer('target', link)
        with repo:
            observation = {}
            with reporter.report_scope(observation):
                link(self.x, self.t)

        # print('observation ', observation)
        actual_keys = set(observation.keys())
        if compute_metrics:
            if metrics_fun is None:
                assert set(['target/loss']) == actual_keys
            elif isinstance(metrics_fun, dict):
                assert set(['target/loss', 'target/user_key']) == actual_keys
            elif callable(metrics_fun):
                assert set(['target/loss', 'target/metrics']) == actual_keys
            else:
                raise TypeError()
        else:
            assert set(['target/loss']) == actual_keys
示例#19
0
 def evaluate(self):
     """Evaluate the model."""
     val_iter = self.get_iterator("main")
     loss = 0
     nll = 0
     count = 0
     self.model.eval()
     with torch.no_grad():
         for batch in copy.copy(val_iter):
             x, t = concat_examples(batch[0],
                                    device=self.device[0],
                                    padding=(0, -100))
             aver_mask = concat_examples_pad_last_2dim(
                 batch[1], device=self.device[0], padding=0)
             if self.device[0] == -1:
                 l, n, c = self.model(x, t, aver_mask)
             else:
                 # apex does not support torch.nn.DataParallel
                 l, n, c = data_parallel(self.model, (x, t, aver_mask),
                                         self.device)
             loss += float(l.sum())
             nll += float(n.sum())
             count += int(c.sum())
     self.model.train()
     # report validation loss
     observation = {}
     with reporter.report_scope(observation):
         reporter.report({"loss": loss}, self.model.reporter)
         reporter.report({"nll": nll}, self.model.reporter)
         reporter.report({"count": count}, self.model.reporter)
     return observation
示例#20
0
    def evaluate(self):
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']
        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        while True:
            batch = it.next()
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                with function.no_backprop_mode():
                    if isinstance(in_arrays, tuple):
                        eval_func(*in_arrays)
                    elif isinstance(in_arrays, dict):
                        eval_func(**in_arrays)
                    else:
                        eval_func(in_arrays)

            summary.add(observation)
            if it.is_new_epoch:
                break
        out = summary.compute_mean()
        print('#############################################', out)
        return out
示例#21
0
    def evaluate(self):
        iterator = self._iterators["main"]
        
        if self.eval_hook:
            self.eval_hook(self)
        
        if hasattr(iterator, "reset"):
            iterator.reset()
            it = iterator
        else:
            warnings.warn('This iterator does not have the reset method. Evaluator '
                          'copies the iterator instead of resetting. This behavior is '
                          'deprecated. Please implement the reset method.',
                          DeprecationWarning)
            it = copy.copy(iterator)
        
        summary = reporter_module.DictSummary()

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                xs, adjs = self.converter(batch, self.device)
                xp = cuda.get_array_module(xs)
                with chainer.no_backprop_mode(), chainer.using_config("train", False):
                    hs = self.model(xs, adjs)
                    hs = F.reshape(hs, (-1, hs.shape[-1]))
                    xs = xs.reshape(-1)
                    loss = F.softmax_cross_entropy(hs, xs)
                    acc = F.accuracy(hs, xs)
                reporter_module.report({"accuracy": acc, "ce_loss": loss})
            summary.add(observation)

        return summary.compute_mean()
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            target.predict, it)
        # delete unused iterators explicitly
        del in_values

        points, labels, scores = out_values
        gt_points, gt_labels = rest_values

        result = eval_projected_3d_bbox_single(
            points, scores, gt_points,
            self.vertex, self.intrinsics, diam=self.diam)
        report = result

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#23
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            target.predict, it)
        # delete unused iterators explicitly
        del in_values

        points, labels, scores = out_values
        gt_points, gt_labels = rest_values

        result = eval_projected_3d_bbox_single(points,
                                               scores,
                                               gt_points,
                                               self.vertex,
                                               self.intrinsics,
                                               diam=self.diam)
        report = result

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
    def evaluate(self):
        iterator = self._iterators['main']
        eval_func = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            warnings.warn(
                'This iterator does not have the reset method. Evaluator '
                'copies the iterator instead of resetting. This behavior is '
                'deprecated. Please implement the reset method.',
                DeprecationWarning)
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        for x_batch, t_batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                with function.no_backprop_mode():
                    eval_func(x_batch, t_batch)

            summary.add(observation)

        return summary.compute_mean()
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)

        batch = next(iterator)

        observation = {}
        with reporter_module.report_scope(observation):
            in_arrays = self.converter(batch, self.device)
            if isinstance(in_arrays, tuple):
                in_vars = tuple(variable.Variable(x, volatile='on')
                                for x in in_arrays)
                eval_func(*in_vars)
            elif isinstance(in_arrays, dict):
                in_vars = {key: variable.Variable(x, volatile='on')
                           for key, x in six.iteritems(in_arrays)}
                eval_func(**in_vars)
            else:
                in_var = variable.Variable(in_arrays, volatile='on')
                eval_func(in_var)

        return observation
示例#26
0
    def evaluate(self):
        iterator = self._iterators['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        self.model.eval()
        if not torch_is_old:
            torch.set_grad_enabled(False)

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                # read scp files
                # x: original json with loaded features
                #    will be converted to chainer variable later
                x = self.converter(batch)
                self.model(x)
                delete_feat(x)

            summary.add(observation)

        self.model.train()
        if not torch_is_old:
            torch.set_grad_enabled(True)

        return summary.compute_mean()
示例#27
0
    def evaluate(self):
        '''evaluate over iterator'''
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        # for multi gpu calculation
        chainer.cuda.get_device_from_id(self.device).use()
        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                # read scp files
                # x: original json with loaded features
                #    will be converted to chainer variable later
                # batch only has one minibatch utterance, which is specified by batch[0]
                x = converter_kaldi(batch[0], self.reader)
                with function.no_backprop_mode():
                    eval_func(x)
                    delete_feat(x)

            summary.add(observation)

        return summary.compute_mean()
    def evaluate(self):
        val_iter = self.get_iterator('main')
        model = self.get_target('main')

        it = copy.copy(val_iter)

        summary = reporter.DictSummary()
        res = []
        for i, batch in enumerate(it):
            observation = {}
            with reporter.report_scope(observation):
                imgs, pafs, heatmaps, ignore_mask = self.converter(
                    batch, self.device)
                with function.no_backprop_mode():
                    x_data = imgs.astype(np.float32).transpose(0, 3, 1,
                                                               2) / 256 - 0.5

                    inferenced_pafs, inferenced_heatmaps = model(x_data)

                    loss, pafs_loss_log, heatmaps_loss_log = compute_loss(
                        inferenced_pafs, inferenced_heatmaps, pafs, heatmaps,
                        ignore_mask)
                    observation['val/loss'] = cuda.to_cpu(loss.data)
            summary.add(observation)
        return summary.compute_mean()
示例#29
0
    def evaluate(self):
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        for batch in it:
            observation = {}
            kwargs = {}
            kwargs['train'] = False
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                with function.no_backprop_mode():
                    if isinstance(in_arrays, tuple):
                        eval_func(*in_arrays, **kwargs)
                    elif isinstance(in_arrays, dict):
                        eval_func(**in_arrays, **kwargs)
                    else:
                        eval_func(in_arrays, **kwargs)

            summary.add(observation)

        observation = summary.compute_mean()

        return observation
示例#30
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            target.predict, it)

        # delete unused iterators explicitly
        del in_values

        pred_imgs, = out_values
        gt_imgs, = rest_values

        mse_list = []
        for pred_img, gt_img in zip(pred_imgs, gt_imgs):
            diff = (pred_img - gt_img).ravel()
            mse = diff.dot(diff) / diff.size
            mse_list.append(mse)

        report = {
            'loss': np.mean(mse_list),
        }

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#31
0
    def evaluate(self):
        bt = time.time()
        with chainer.no_backprop_mode():
            references = []
            hypotheses = []
            observation = {}
            with reporter.report_scope(observation):
                for i in range(0, len(self.test_data), self.batch):
                    src, trg = zip(*self.test_data[i:i + self.batch])
                    references.extend([[t.tolist()] for t in trg])

                    src = [chainer.dataset.to_device(self.device, x)
                           for x in src]

                    if self.comm.rank == 0:
                        self.model.translate(src, self.max_length)

                    elif self.comm.rank == 1:
                        ys = [y.tolist()
                              for y in self.model.translate(
                                  src, self.max_length)]
                        hypotheses.extend(ys)

                if self.comm.rank == 1:
                    bleu = bleu_score.corpus_bleu(
                        references, hypotheses, smoothing_function=bleu_score.
                        SmoothingFunction().method1)
                    reporter.report({'bleu': bleu}, self.model)
        et = time.time()

        if self.comm.rank == 1:
            print("BleuEvaluator(single)::evaluate(): "
                  "took {:.3f} [s]".format(et - bt))
            sys.stdout.flush()
        return observation
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            self.predict_func, it)
        # delete unused iterators explicitly
        del in_values

        pred_labels, pred_scores = out_values
        gt_labels, = rest_values

        result = eval_multi_label_classification(
            pred_labels, pred_scores, gt_labels)

        report = {'map': result['map']}

        if self.label_names is not None:
            for l, label_name in enumerate(self.label_names):
                try:
                    report['ap/{:s}'.format(label_name)] = result['ap'][l]
                except IndexError:
                    report['ap/{:s}'.format(label_name)] = np.nan

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#33
0
    def evaluate(self):
        iterator = self._iterators['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                # read scp files
                # x: original json with loaded features
                #    will be converted to chainer variable later
                # batch only has one minibatch utterance, which is specified by batch[0]
                x = converter_kaldi(batch[0], self.reader)
                self.model.eval()
                self.model(x)
                delete_feat(x)

            summary.add(observation)

        return summary.compute_mean()
示例#34
0
文件: evaluator.py 项目: tatHi/pyner
    def evaluate(self):
        iterator = self.get_iterator("main")
        target = self.get_target("main")

        if hasattr(iterator, "reset"):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter.DictSummary()

        for batch in it:
            observation = {}
            with reporter.report_scope(observation):
                with function.no_backprop_mode():
                    in_arrays, t_arrays = self.converter(batch, self.device)

                    p_arrays = target.predict(in_arrays)
                    _, t_tag_sentences = list(
                        zip(*self.transform_func(in_arrays[0], t_arrays)))
                    _, p_tag_sentences = list(
                        zip(*self.transform_func(in_arrays[0], p_arrays)))

                    fscore = metrics.f1_score(t_tag_sentences, p_tag_sentences)

                    reporter.report({"loss": target(in_arrays, t_arrays)},
                                    target)
                    reporter.report({"fscore": fscore}, target)

            summary.add(observation)

        return summary.compute_mean()
示例#35
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)
        summary = reporter_module.DictSummary()

        for _ in range(min(len(iterator.dataset) // iterator.batch_size, self.num_iterations)):
            batch = next(it, None)
            if batch is None:
                break

            observation = {}
            with reporter_module.report_scope(observation), chainer.using_config('train', False), chainer.using_config('enable_backprop', False):
                in_arrays = self.converter(batch, self.device)
                if isinstance(in_arrays, tuple):
                    eval_func(*in_arrays)
                elif isinstance(in_arrays, dict):
                    eval_func(**in_arrays)
                else:
                    eval_func(in_arrays)

            summary.add(observation)

        return summary.compute_mean()
示例#36
0
    def evaluate(self):
        """Evaluates the model and returns a result dictionary.

        This method runs the evaluation loop over the validation dataset. It
        accumulates the reported values to :class:`~chainer.DictSummary` and
        returns a dictionary whose values are means computed by the summary.

        Note that this function assumes that the main iterator raises
        ``StopIteration`` or code in the evaluation loop raises an exception.
        So, if this assumption is not held, the function could be caught in
        an infinite loop.

        Users can override this method to customize the evaluation routine.

        .. note::

            This method encloses :attr:`eval_func` calls with
            :func:`function.no_backprop_mode` context, so all calculations
            using :class:`~chainer.FunctionNode`\\s inside
            :attr:`eval_func` do not make computational graphs. It is for
            reducing the memory consumption.

        Returns:
            dict: Result dictionary. This dictionary is further reported via
            :func:`~chainer.report` without specifying any observer.

        """
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                with function.no_backprop_mode():
                    if isinstance(in_arrays, tuple):
                        eval_func(*in_arrays)
                    elif isinstance(in_arrays, dict):
                        eval_func(**in_arrays)
                    else:
                        eval_func(in_arrays)

            summary.add(observation)

        return summary.compute_mean()
示例#37
0
    def evaluate(self):
        """override method of extensions.Evaluator."""

        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)
        summary = reporter_module.DictSummary()
        features = None
        max_loc = None
        pbar = tqdm(total=len(iterator.dataset))
        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                if isinstance(in_arrays, tuple):
                    in_vars = tuple(variable.Variable(x, volatile='off')
                                    for x in in_arrays)
                    eval_func(*in_vars)
                elif isinstance(in_arrays, dict):
                    in_vars = {key: variable.Variable(x, volatile='off')
                               for key, x in six.iteritems(in_arrays)}
                    eval_func(**in_vars)
                else:
                    in_var = variable.Variable(in_arrays, volatile='off')
                    eval_func(in_var)
            pbar.update(len(batch))

            # deconv対象の層のVariableを取得
            layer_variable = Vutil.get_variable(
                observation[self.lastname], self.layer_rank)

            if features is None:
                features = Vutil.get_features(
                    layer_variable, self.operation)
            else:
                xp = cuda.get_array_module(features)
                features = xp.vstack((features, Vutil.get_features(
                    layer_variable, self.operation)))
            #self.add_to_confmat(self.confmat, in_vars[1].data, self.getpred(observation[self.lastname]))
            summary.add(observation)
        pbar.close()
        #print(self.confmat)
        #print(np.diag(self.confmat))
        #print(1.0 * np.diag(self.confmat).sum() / self.confmat.sum())
        return summary.compute_mean(), features
示例#38
0
    def evaluate(self):
        """Evaluates the model and returns a result dictionary.

        This method runs the evaluation loop over the validation dataset. It
        accumulates the reported values to :class:`~chainer.DictSummary` and
        returns a dictionary whose values are means computed by the summary.

        Users can override this method to customize the evaluation routine.

        Returns:
            dict: Result dictionary. This dictionary is further reported via
                :func:`~chainer.report` without specifying any observer.

        """
        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        summary = reporter_module.DictSummary()

        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                if isinstance(in_arrays, tuple):
                    in_vars = tuple(variable.Variable(x, volatile='on')
                                    for x in in_arrays)
                    eval_func(*in_vars)
                elif isinstance(in_arrays, dict):
                    in_vars = {key: variable.Variable(x, volatile='on')
                               for key, x in six.iteritems(in_arrays)}
                    eval_func(**in_vars)
                else:
                    in_var = variable.Variable(in_arrays, volatile='on')
                    eval_func(in_var)

            summary.add(observation)

        return summary.compute_mean()
    def evaluate(self):
        """override method of extensions.Evaluator."""

        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)
        summary = reporter_module.DictSummary()
        predictions = []
        rankings = []
        n_categories = 0
        pbar = tqdm(total=len(it.dataset))
        self.confmat = None
        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                if isinstance(in_arrays, tuple):
                    in_vars = tuple(variable.Variable(x, volatile='off')
                                    for x in in_arrays)
                    eval_func(*in_vars)
                elif isinstance(in_arrays, dict):
                    in_vars = {key: variable.Variable(x, volatile='off')
                               for key, x in six.iteritems(in_arrays)}
                    eval_func(**in_vars)
                else:
                    in_var = variable.Variable(in_arrays, volatile='off')
                    eval_func(in_var)

            if n_categories == 0:
                n_categories = self.get_n_categories(observation[self.lastname])
                self.confmat = np.zeros((n_categories, n_categories), dtype=np.int32)
                #self.printloss(observation[self.lastname])
                #print(self.getpred(observation[self.lastname]))

            predictions.append(self.getpred(observation[self.lastname]))
            rankings.append(self.getranking(observation[self.lastname], in_vars[1].data))
            self.add_to_confmat(self.confmat, in_vars[1].data, self.getpred(observation[self.lastname]))
            summary.add(observation)
            pbar.update(len(batch))
        #print(self.confmat)
        #print(np.diag(self.confmat))
        #print(1.0 * np.diag(self.confmat).sum() / self.confmat.sum())
        return summary.compute_mean(), predictions, rankings
示例#40
0
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            target.predict, it)
        # delete unused iterators explicitly
        del in_values

        pred_bboxes, pred_labels, pred_scores = out_values

        if len(rest_values) == 3:
            gt_bboxes, gt_labels, gt_difficults = rest_values
        elif len(rest_values) == 2:
            gt_bboxes, gt_labels = rest_values
            gt_difficults = None

        result = eval_detection_voc(
            pred_bboxes, pred_labels, pred_scores,
            gt_bboxes, gt_labels, gt_difficults,
            use_07_metric=self.use_07_metric)

        report = {'map': result['map']}

        if self.label_names is not None:
            for l, label_name in enumerate(self.label_names):
                try:
                    report['ap/{:s}'.format(label_name)] = result['ap'][l]
                except IndexError:
                    report['ap/{:s}'.format(label_name)] = np.nan

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
    def evaluate(self):
        val_iter = self.get_iterator('main')
        model = self.get_target('main')

        it = copy.copy(val_iter)

        summary = reporter.DictSummary()
        res = []
        for i, batch in enumerate(it):
            observation = {}
            with reporter.report_scope(observation):
                imgs, pafs, heatmaps, ignore_mask = self.converter(batch, self.device)
                with function.no_backprop_mode():
                    x_data = imgs.astype(np.float32).transpose(0, 3, 1, 2) / 256 - 0.5

                    inferenced_pafs, inferenced_heatmaps = model(x_data)

                    loss, pafs_loss_log, heatmaps_loss_log = compute_loss(
                        inferenced_pafs, inferenced_heatmaps, pafs, heatmaps, ignore_mask)
                    observation['val/loss'] = cuda.to_cpu(loss.data)
            summary.add(observation)
        return summary.compute_mean()
    def evaluate(self):
        iterator = self._iterators['main']
        target = self._targets['main']

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        in_values, out_values, rest_values = apply_to_iterator(
            target.predict, it)
        # delete unused iterators explicitly
        del in_values

        pred_labels, = out_values
        gt_labels, = rest_values

        result = eval_semantic_segmentation(pred_labels, gt_labels)

        report = {'miou': result['miou'],
                  'pixel_accuracy': result['pixel_accuracy'],
                  'mean_class_accuracy': result['mean_class_accuracy']}

        if self.label_names is not None:
            for l, label_name in enumerate(self.label_names):
                try:
                    report['iou/{:s}'.format(label_name)] = result['iou'][l]
                    report['class_accuracy/{:s}'.format(label_name)] =\
                        result['class_accuracy'][l]
                except IndexError:
                    report['iou/{:s}'.format(label_name)] = np.nan
                    report['class_accuracy/{:s}'.format(label_name)] = np.nan

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(report, target)
        return observation
示例#43
0
    def evaluate(self):
        bt = time.time()
        with chainer.no_backprop_mode():
            references = []
            hypotheses = []
            observation = {}
            with reporter.report_scope(observation):
                for i in range(0, len(self.test_data), self.batch):
                    src, trg = zip(*self.test_data[i:i + self.batch])
                    references.extend([[t.tolist()] for t in trg])

                    src = [chainer.dataset.to_device(self.device, x)
                           for x in src]
                    ys = [y.tolist()
                          for y in self.model.translate(src, self.max_length)]
                    hypotheses.extend(ys)

                bleu = bleu_score.corpus_bleu(
                    references, hypotheses,
                    smoothing_function=bleu_score.SmoothingFunction().method1)
                reporter.report({'bleu': bleu}, self.model)
        et = time.time()

        if self.comm is not None:
            # This evaluator is called via chainermn.MultiNodeEvaluator
            for i in range(0, self.comm.size):
                print('BleuEvaluator::evaluate(): '
                      'took {:.3f} [s]'.format(et - bt))
                sys.stdout.flush()
                self.comm.mpi_comm.Barrier()
        else:
            # This evaluator is called from a conventional
            # Chainer exntension
            print('BleuEvaluator(single)::evaluate(): '
                  'took {:.3f} [s]'.format(et - bt))
            sys.stdout.flush()
        return observation
示例#44
0
    def evaluate(self):
        iterator = self._iterators['main']
        eval_func = self.eval_func or self._targets['main']

        if self.eval_hook:
            self.eval_hook(self)

        if hasattr(iterator, 'reset'):
            iterator.reset()
            it = iterator
        else:
            it = copy.copy(iterator)

        y_total = []
        t_total = []
        for batch in it:
            in_arrays = self.converter(batch, self.device)
            with chainer.no_backprop_mode(), chainer.using_config('train',
                                                                  False):
                y = eval_func(*in_arrays[:-1])
            t = in_arrays[-1]
            y_data = _get_1d_numpy_array(y)
            t_data = _get_1d_numpy_array(t)
            y_total.append(y_data)
            t_total.append(t_data)

        y_total = numpy.concatenate(y_total).ravel()
        t_total = numpy.concatenate(t_total).ravel()
        # metrics_value = self.metrics_fun(y_total, t_total)
        metrics = {key: metric_fun(y_total, t_total) for key, metric_fun in
                   self.metrics_fun.items()}

        observation = {}
        with reporter.report_scope(observation):
            reporter.report(metrics, self._targets['main'])
        return observation
    def evaluate(self):
        """override method of extensions.Evaluator."""

        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)
        summary = reporter_module.DictSummary()
        max_locs = []
        bounds = []
        n_processed = 0
        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                if isinstance(in_arrays, tuple):
                    in_vars = tuple(variable.Variable(x, volatile='off')
                                    for x in in_arrays)
                    eval_func(*in_vars)
                elif isinstance(in_arrays, dict):
                    in_vars = {key: variable.Variable(x, volatile='off')
                               for key, x in six.iteritems(in_arrays)}
                    eval_func(**in_vars)
                else:
                    in_var = variable.Variable(in_arrays, volatile='off')
                    eval_func(in_var)

            # deconv対象の層のVariableを取得
            layer_variable = Vutil.get_variable(
                observation[self.lastname], self.layer_rank)

            pred = self.getpred(layer_variable)
            truth = in_vars[1].data
            xp = cuda.get_array_module(pred)
            if xp == cupy:
                pred = pred.get()
                truth = truth.get()
            indices = pred.flatten() if self.target == 'pred' else truth.flatten()

            # 最大値の位置の計算に必要な入力層の領域を取得
            isfc = Vutil.has_fc_layer(layer_variable)
            if isfc:
                bounds = Vutil.get_data_bounds(layer_variable)
            else:
                bounds = Vutil.get_max_bounds(layer_variable, indices)
            # deconvを実行
            deconv_data = self.get_deconv(
                layer_variable, indices)

            topk = np.arange(n_processed, n_processed + len(batch)) // self.n_features

            for i, (t, p, d, b) in enumerate(zip(truth, indices, deconv_data, bounds)):
                #print(dir(d))
                # deconvされた入力層に平均画像を足して画像化
                img = ioutil.deprocess(d.get(), self.mean)
                # 最大値の位置の計算に必要な入力層の領域だけクロッピングして保存
                img.crop((b[0], b[2], b[1], b[3])).save(
                os.path.join(self.deconv_image_dir,
                    "{0:0>6}_{1:0>4}_{2:0>4}.png".format(n_processed + i, t, p)))

            '''max_locs.extend(self.get_max_locs(
                observation[self.lastname], self.layer_rank, indices))
            bounds.extend(self.get_max_patch_bounds(
                observation[self.lastname], self.layer_rank, indices))'''
            n_processed += len(batch)

            #self.add_to_confmat(self.confmat, in_vars[1].data, self.getpred(observation[self.lastname]))
            summary.add(observation)
        #print(self.confmat)
        #print(np.diag(self.confmat))
        #print(1.0 * np.diag(self.confmat).sum() / self.confmat.sum())
        return summary.compute_mean(), max_locs, bounds
示例#46
0
    def evaluate(self):
        """override method of extensions.Evaluator."""

        iterator = self._iterators['main']
        target = self._targets['main']
        eval_func = self.eval_func or target

        if self.eval_hook:
            self.eval_hook(self)
        it = copy.copy(iterator)
        summary = reporter_module.DictSummary()
        max_locs = []
        bounds = []
        n_processed = 0
        filter_idx = 0
        pbar = tqdm(total=len(iterator.dataset))
        for batch in it:
            observation = {}
            with reporter_module.report_scope(observation):
                in_arrays = self.converter(batch, self.device)
                if isinstance(in_arrays, tuple):
                    in_vars = tuple(variable.Variable(x, volatile='off')
                                    for x in in_arrays)
                    eval_func(*in_vars)
                elif isinstance(in_arrays, dict):
                    in_vars = {key: variable.Variable(x, volatile='off')
                               for key, x in six.iteritems(in_arrays)}
                    eval_func(**in_vars)
                else:
                    in_var = variable.Variable(in_arrays, volatile='off')
                    eval_func(in_var)
            pbar.update(len(batch))

            indices = np.arange(filter_idx, filter_idx + len(batch)) % self.n_features

            # deconv対象の層のVariableを取得
            layer_variable = Vutil.get_variable(
                observation[self.lastname], self.layer_rank)

            # 最大値の位置の計算に必要な入力層の領域を取得
            isfc = Vutil.has_fc_layer(layer_variable)
            if isfc:
                batch_bounds = Vutil.get_data_bounds(layer_variable)
            else:
                batch_bounds = Vutil.get_max_bounds(layer_variable, indices)

            topk = np.arange(n_processed, n_processed + len(batch)) // self.n_features
            input_data = Vutil.get_data_layer(layer_variable).data

            for k, f, d, b in zip(topk, indices, input_data, batch_bounds):
                #print(dir(d))
                # deconvされた入力層に平均画像を足して画像化
                img = ioutil.deprocess(d.get(), self.mean)
                # 最大値の位置の計算に必要な入力層の領域だけクロッピングして保存
                img.crop((b[0], b[2], b[1], b[3])).save(
                os.path.join(self.patch_image_dir,
                    "{0:0>4}_{1:0>2}.png".format(f, k)))

            if not isfc:
                max_locs.extend(Vutil.get_max_locs(layer_variable, indices))
                bounds.extend(batch_bounds)

            filter_idx = (filter_idx + len(batch)) % self.n_features
            n_processed += len(batch)
            #self.add_to_confmat(self.confmat, in_vars[1].data, self.getpred(observation[self.lastname]))
            summary.add(observation)
        pbar.close()
        #print(self.confmat)
        #print(np.diag(self.confmat))
        #print(1.0 * np.diag(self.confmat).sum() / self.confmat.sum())
        return summary.compute_mean(), max_locs, bounds