def eval_ss(self, data_loader, epoch=0, model_file_name=None, save_path=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name), txt_path=self.config.ss_save_result_txt)
            self.load_model(model_file_name)
            pass

        un_norm = MyTransform.transform_un_normalize()
        self.net.eval()
        metrics = StreamSegMetrics(self.config.ss_num_classes)
        with torch.no_grad():
            for i, (inputs, labels, _) in tqdm(enumerate(data_loader), total=len(data_loader)):
                inputs = inputs.float().cuda()
                labels = labels.long().cuda()
                outputs = self.net(inputs)
                preds = outputs.detach().max(dim=1)[1].cpu().numpy()
                targets = labels.cpu().numpy()

                metrics.update(targets, preds)

                if save_path:
                    for j, (input_one, label_one, pred_one) in enumerate(zip(inputs, targets, preds)):
                        un_norm(input_one.cpu()).save(os.path.join(save_path, "{}_{}.JPEG".format(i, j)))
                        DataUtil.gray_to_color(np.asarray(label_one, dtype=np.uint8)).save(
                            os.path.join(save_path, "{}_{}_l.png".format(i, j)))
                        DataUtil.gray_to_color(np.asarray(pred_one, dtype=np.uint8)).save(
                            os.path.join(save_path, "{}_{}_p.png".format(i, j)))
                        pass
                    pass
                pass
            pass

        score = metrics.get_results()
        Tools.print("{} {}".format(epoch, metrics.to_str(score)), txt_path=self.config.ss_save_result_txt)
        return score
Пример #2
0
    def eval_ss(self, epoch=0, model_file_name=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name),
                        txt_path=self.config.ss_save_result_txt)
            self.load_model(model_file_name)
            pass

        self.net.eval()
        metrics = StreamSegMetrics(self.config.ss_num_classes)
        with torch.no_grad():
            for i, (inputs,
                    labels) in tqdm(enumerate(self.data_loader_ss_val),
                                    total=len(self.data_loader_ss_val)):
                inputs = inputs.float().cuda()
                labels = labels.long().cuda()
                outputs = self.net(inputs)
                preds = outputs.detach().max(dim=1)[1].cpu().numpy()
                targets = labels.cpu().numpy()

                metrics.update(targets, preds)
                pass
            pass

        score = metrics.get_results()
        Tools.print("{} {}".format(epoch, metrics.to_str(score)),
                    txt_path=self.config.ss_save_result_txt)
        return score
    def eval_ss(self, epoch=0, model_file_name=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name), txt_path=self.config.ss_save_result_txt)
            self.load_model(model_file_name)
            pass

        self.net.eval()
        metrics = StreamSegMetrics(self.config.ss_num_classes)
        with torch.no_grad():
            for i, (inputs, labels, image_info_list, label_info_list) in tqdm(
                    enumerate(self.data_loader_ss_val), total=len(self.data_loader_ss_val)):
                assert len(image_info_list) == 1
                size = Image.open(label_info_list[0]).size

                inputs = inputs.float().cuda()
                outputs = self.net(inputs)
                outputs = F.interpolate(outputs, size=(size[1], size[0]), mode="bilinear", align_corners=False)

                preds = outputs.detach().max(dim=1)[1].cpu().numpy()
                targets = np.expand_dims(np.asarray(Image.open(label_info_list[0])), axis=0)

                metrics.update(targets, preds)
                pass
            pass

        score = metrics.get_results()
        Tools.print("{} {}".format(epoch, metrics.to_str(score)), txt_path=self.config.ss_save_result_txt)
        return score
    def inference_crf(self, dataset, logits_path):
        logit_file_path = Tools.new_dir("{}_logit".format(logits_path))
        crf_file_path = Tools.new_dir("{}_crf".format(logits_path))
        crf_final_file_path = Tools.new_dir("{}_crf_final".format(logits_path))

        postprocessor = DenseCRF()
        n_jobs = multiprocessing.cpu_count()

        def process(i):
            image_info, label_info = dataset.__getitem__(i)
            label = Image.fromarray(np.zeros_like(np.asarray(Image.open(image_info)))).convert("L") \
                if label_info == 1 else Image.open(label_info)

            basename = os.path.basename(image_info)
            im = Image.open(image_info)
            logit = np.load(
                os.path.join(logit_file_path, basename.replace(".jpg",
                                                               ".npy")))

            ori_size = (im.size[1], im.size[0])
            crf_size = (logit.shape[1], logit.shape[2])

            logit_tensor = torch.FloatTensor(logit)[None, ...]
            logit_tensor = self._up_to_target(logit_tensor,
                                              target_size=crf_size)
            prob_one = F.softmax(logit_tensor, dim=1)[0].numpy()

            prob_crf = postprocessor(
                np.array(im.resize((crf_size[1], crf_size[0]))), prob_one)
            prob_crf_resize = self._up_to_target(
                torch.FloatTensor(prob_crf)[None, ...], target_size=ori_size)
            result = np.argmax(prob_crf_resize[0].numpy(), axis=0)

            # save
            im.save(os.path.join(crf_file_path, basename))
            DataUtil.gray_to_color(np.asarray(label, dtype=np.uint8)).save(
                os.path.join(crf_file_path, basename.replace(".jpg",
                                                             "_l.png")))
            DataUtil.gray_to_color(np.asarray(result, dtype=np.uint8)).save(
                os.path.join(crf_file_path, basename.replace(".jpg", ".png")))
            Image.fromarray(np.asarray(result, dtype=np.uint8)).save(
                os.path.join(crf_final_file_path,
                             basename.replace(".jpg", ".png")))

            return result, np.array(label)

        results = joblib.Parallel(n_jobs=n_jobs,
                                  verbose=10,
                                  pre_dispatch="all")([
                                      joblib.delayed(process)(i)
                                      for i in range(len(dataset))
                                  ])

        metrics = StreamSegMetrics(self.config.ss_num_classes)
        for preds, targets in results:
            metrics.update(targets, preds)
        Tools.print("{}".format(metrics.to_str(metrics.get_results())))
        Tools.print()
        pass
    def eval(self, epoch=0, model_file_name=None, result_path=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name), txt_path=self.config.save_result_txt)
            self.load_model(model_file_name)
            pass

        avg_meter = AverageMeter()
        ss_meter = StreamSegMetrics(self.config.num_classes + 1)
        self.net.eval()
        with torch.no_grad():
            for i, (inputs, masks, labels, image_info_list, label_info_list) in tqdm(
                    enumerate(self.data_loader_val), total=len(self.data_loader_val)):
                assert len(image_info_list) == 1
                size = Image.open(label_info_list[0]).size

                inputs, labels = inputs.float().cuda(), labels.numpy()
                result = self.net.module.forward_inference(inputs, has_class=self.config.has_class,
                                                           has_cam=self.config.has_cam, has_ss=self.config.has_ss)

                # Class
                class_out = torch.sigmoid(result["class_logits"]).detach().cpu().numpy()
                one, zero = labels == 1, labels != 1
                avg_meter.update("mae", (np.abs(class_out[one] - labels[one]).mean() +
                                         np.abs(class_out[zero] - labels[zero]).mean()) / 2)
                avg_meter.update("f1", metrics.f1_score(y_true=labels, y_pred=class_out > 0.5, average='micro'))
                avg_meter.update("acc", self._acc(net_out=class_out, labels=labels))

                # SS
                if self.config.has_ss:
                    outputs = F.interpolate(result["ss"]["out_up"], size=(size[1], size[0]), mode="bilinear",
                                            align_corners=False).detach().max(dim=1)[1].cpu().numpy()
                    targets = np.expand_dims(np.asarray(Image.open(label_info_list[0])), axis=0)
                    ss_meter.update(targets, outputs)

                    if result_path is not None:
                        for image_info_one, ss_out_one, mask_one in zip(image_info_list, outputs, targets):
                            result_file = Tools.new_dir(os.path.join(result_path, os.path.basename(image_info_one)))
                            Image.open(image_info_one).save(result_file)
                            DataUtil.gray_to_color(np.asarray(
                                ss_out_one, dtype=np.uint8)).save(result_file.replace(".jpg", "_p.png"))
                            DataUtil.gray_to_color(np.asarray(
                                mask_one, dtype=np.uint8)).save(result_file.replace(".jpg", "_l.png"))
                            pass
                        pass

                    pass
                pass
            pass

        Tools.print("[E:{:3d}] val mae:{:.4f} f1:{:.4f} acc:{:.4f}".format(
            epoch, avg_meter.get_results("mae"), avg_meter.get_results("f1"),
            avg_meter.get_results("acc")), txt_path=self.config.save_result_txt)
        if self.config.has_ss:
            Tools.print("[E:{:3d}] ss {}".format(epoch, ss_meter.to_str(ss_meter.get_results())))
            pass
        pass
    def inference_ss(self, model_file_name=None, data_loader=None, save_path=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name), txt_path=self.config.ss_save_result_txt)
            self.load_model(model_file_name)
            pass

        final_save_path = Tools.new_dir("{}_final".format(save_path))

        self.net.eval()
        metrics = StreamSegMetrics(self.config.ss_num_classes)
        with torch.no_grad():
            for i, (inputs, labels, image_info_list, label_info_list) in tqdm(
                    enumerate(data_loader), total=len(data_loader)):
                assert len(image_info_list) == 1

                # 标签
                basename = os.path.basename(image_info_list[0])
                final_name = os.path.join(final_save_path, basename.replace(".jpg", ".png"))
                size = Image.open(image_info_list[0]).size
                if os.path.exists(final_name):
                    continue

                target_im = Image.fromarray(np.zeros_like(np.asarray(Image.open(image_info_list[0])))).convert("L") \
                    if label_info_list[0] == 1 else Image.open(label_info_list[0])
                targets = np.expand_dims(np.asarray(target_im), axis=0)

                # 预测
                outputs = 0
                for input_index, input_one in enumerate(inputs):
                    output_one = self.net(input_one.float().cuda())
                    outputs += F.interpolate(output_one, size=(size[1], size[0]),
                                             mode="bilinear", align_corners=False).detach().cpu()
                    pass
                outputs = outputs / len(inputs)
                preds = outputs.max(dim=1)[1].numpy()

                # 计算
                metrics.update(targets, preds)

                if save_path:
                    Image.open(image_info_list[0]).save(os.path.join(save_path, basename))
                    DataUtil.gray_to_color(np.asarray(targets[0], dtype=np.uint8)).save(
                        os.path.join(save_path, basename.replace(".jpg", "_l.png")))
                    DataUtil.gray_to_color(np.asarray(preds[0], dtype=np.uint8)).save(
                        os.path.join(save_path, basename.replace(".jpg", ".png")))
                    Image.fromarray(np.asarray(preds[0], dtype=np.uint8)).save(final_name)
                    pass
                pass
            pass

        score = metrics.get_results()
        Tools.print("{}".format(metrics.to_str(score)), txt_path=self.config.ss_save_result_txt)
        return score
    def inference_ss_logits_single_scale(self, model_file_name=None, data_loader=None, save_path=None):
        if model_file_name is not None:
            Tools.print("Load model form {}".format(model_file_name))
            self.load_model(model_file_name)
            pass

        logit_save_path = Tools.new_dir("{}_logit".format(save_path))

        self.net.eval()
        metrics = StreamSegMetrics(self.config.ss_num_classes)
        with torch.no_grad():
            for i, (inputs, labels, image_info_list, label_info_list) in tqdm(
                    enumerate(data_loader), total=len(data_loader)):
                assert len(image_info_list) == 1

                basename = os.path.basename(image_info_list[0])
                logit_file_path = os.path.join(logit_save_path, basename.replace(".jpg", ".npy"))
                im = Image.open(image_info_list[0])

                ori_size = (im.size[1], im.size[0])
                # logit_size = (ori_size[0] // 4, ori_size[1] // 4)
                logit_size = ori_size

                # 标签
                target_im = Image.fromarray(np.zeros_like(np.asarray(im))).convert("L") \
                    if label_info_list[0] == 1 else Image.open(label_info_list[0])
                targets = np.expand_dims(np.array(target_im.resize((logit_size[1], logit_size[0]))), axis=0)

                # 预测
                output_one = self.net(inputs[0].float().cuda())
                outputs = self._up_to_target(output_one, target_size=logit_size).detach().cpu()
                preds = outputs.max(dim=1)[1].numpy()

                # 计算
                metrics.update(targets, preds)

                if save_path:
                    np.save(logit_file_path, outputs[0].numpy())
                    pass

                pass
            pass

        score = metrics.get_results()
        Tools.print("{}".format(metrics.to_str(score)))
        return score