Exemple #1
0
    def validation(self, epoch, args):
        self.model.eval()
        self.evaluator.reset()
        tbar = tqdm(self.val_loader, desc="\r")
        test_loss = 0.0

        saved_images = {}
        saved_target = {}
        saved_prediction = {}
        for idx_unseen_class in args.unseen_classes_idx_metric:
            saved_images[idx_unseen_class] = []
            saved_target[idx_unseen_class] = []
            saved_prediction[idx_unseen_class] = []

        targets, outputs = [], []
        log_file = './logs_context_step_2_GMMN.txt'
        logger = logWritter(log_file)

        for i, sample in enumerate(tbar):
            image, target, embedding = (
                sample["image"],
                sample["label"],
                sample["label_emb"],
            )
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            with torch.no_grad():
                output = self.model(image)
            loss = self.criterion(output, target)
            test_loss += loss.item()
            tbar.set_description("Test loss: %.3f" % (test_loss / (i + 1)))
            ## save image for tensorboard
            for idx_unseen_class in args.unseen_classes_idx_metric:
                if len((target.reshape(-1) == idx_unseen_class).nonzero()) > 0:
                    if len(saved_images[idx_unseen_class]
                           ) < args.saved_validation_images:
                        saved_images[idx_unseen_class].append(
                            image.clone().cpu())
                        saved_target[idx_unseen_class].append(
                            target.clone().cpu())
                        saved_prediction[idx_unseen_class].append(
                            output.clone().cpu())

            pred = output.data.cpu().numpy()
            target = target.cpu().numpy().astype(np.int64)
            pred = np.argmax(pred, axis=1)
            for o, t in zip(pred, target):
                outputs.append(o)
                targets.append(t)
            # Add batch sample into evaluator
            self.evaluator.add_batch(target, pred)

        config = get_config(args.config)
        vals_cls, valu_cls, all_labels, visible_classes, visible_classes_test, train, val, sampler, _, cls_map, cls_map_test = get_split(
            config)
        assert (visible_classes_test.shape[0] == config['dis']['out_dim_cls'] -
                1)
        score, class_iou = scores_gzsl(targets,
                                       outputs,
                                       n_class=len(visible_classes_test),
                                       seen_cls=cls_map_test[vals_cls],
                                       unseen_cls=cls_map_test[valu_cls])

        print("Test results:")
        logger.write("Test results:")

        for k, v in score.items():
            print(k + ': ' + json.dumps(v))
            logger.write(k + ': ' + json.dumps(v))

        score["Class IoU"] = {}
        visible_classes_test = sorted(visible_classes_test)
        for i in range(len(visible_classes_test)):
            score["Class IoU"][all_labels[
                visible_classes_test[i]]] = class_iou[i]
        print("Class IoU: " + json.dumps(score["Class IoU"]))
        logger.write("Class IoU: " + json.dumps(score["Class IoU"]))

        print("Test finished.\n\n")
        logger.write("Test finished.\n\n")

        # Fast test during the training
        Acc, Acc_seen, Acc_unseen = self.evaluator.Pixel_Accuracy()
        (
            Acc_class,
            Acc_class_by_class,
            Acc_class_seen,
            Acc_class_unseen,
        ) = self.evaluator.Pixel_Accuracy_Class()
        (
            mIoU,
            mIoU_by_class,
            mIoU_seen,
            mIoU_unseen,
        ) = self.evaluator.Mean_Intersection_over_Union()
        (
            FWIoU,
            FWIoU_seen,
            FWIoU_unseen,
        ) = self.evaluator.Frequency_Weighted_Intersection_over_Union()
        self.writer.add_scalar("val_overall/total_loss_epoch", test_loss,
                               epoch)
        self.writer.add_scalar("val_overall/mIoU", mIoU, epoch)
        self.writer.add_scalar("val_overall/Acc", Acc, epoch)
        self.writer.add_scalar("val_overall/Acc_class", Acc_class, epoch)
        self.writer.add_scalar("val_overall/fwIoU", FWIoU, epoch)

        self.writer.add_scalar("val_seen/mIoU", mIoU_seen, epoch)
        self.writer.add_scalar("val_seen/Acc", Acc_seen, epoch)
        self.writer.add_scalar("val_seen/Acc_class", Acc_class_seen, epoch)
        self.writer.add_scalar("val_seen/fwIoU", FWIoU_seen, epoch)

        self.writer.add_scalar("val_unseen/mIoU", mIoU_unseen, epoch)
        self.writer.add_scalar("val_unseen/Acc", Acc_unseen, epoch)
        self.writer.add_scalar("val_unseen/Acc_class", Acc_class_unseen, epoch)
        self.writer.add_scalar("val_unseen/fwIoU", FWIoU_unseen, epoch)

        print("Validation:")
        print("[Epoch: %d, numImages: %5d]" %
              (epoch, i * self.args.batch_size + image.data.shape[0]))
        print(f"Loss: {test_loss:.3f}")
        print(
            f"Overall: Acc:{Acc}, Acc_class:{Acc_class}, mIoU:{mIoU}, fwIoU: {FWIoU}"
        )
        print("Seen: Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
            Acc_seen, Acc_class_seen, mIoU_seen, FWIoU_seen))
        print("Unseen: Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
            Acc_unseen, Acc_class_unseen, mIoU_unseen, FWIoU_unseen))

        for class_name, acc_value, mIoU_value in zip(CLASSES_NAMES,
                                                     Acc_class_by_class,
                                                     mIoU_by_class):
            self.writer.add_scalar("Acc_by_class/" + class_name, acc_value,
                                   epoch)
            self.writer.add_scalar("mIoU_by_class/" + class_name, mIoU_value,
                                   epoch)
            print(class_name, "- acc:", acc_value, " mIoU:", mIoU_value)

        new_pred = mIoU_unseen

        is_best = True
        self.best_pred = new_pred
        self.saver.save_checkpoint(
            {
                "epoch": epoch + 1,
                "state_dict": self.model.module.state_dict(),
                "optimizer": self.optimizer.state_dict(),
                "best_pred": self.best_pred,
            },
            is_best,
            generator_state={
                "epoch": epoch + 1,
                "state_dict": self.generator.state_dict(),
                "optimizer": self.optimizer.state_dict(),
                "best_pred": self.best_pred,
            },
        )

        global_step = epoch + 1
        for idx_unseen_class in args.unseen_classes_idx_metric:
            if len(saved_images[idx_unseen_class]) > 0:
                nb_image = len(saved_images[idx_unseen_class])
                if nb_image > args.saved_validation_images:
                    nb_image = args.saved_validation_images
                for i in range(nb_image):
                    self.summary.visualize_image_validation(
                        self.writer,
                        self.args.dataset,
                        saved_images[idx_unseen_class][i],
                        saved_target[idx_unseen_class][i],
                        saved_prediction[idx_unseen_class][i],
                        global_step,
                        name="validation_" + CLASSES_NAMES[idx_unseen_class] +
                        "_" + str(i),
                        nb_image=1,
                    )

        self.evaluator.reset()
    def validation(self, epoch, args):
        self.model.eval()
        self.evaluator.reset()
        all_target = []
        all_pred = []
        tbar = tqdm(self.val_loader, desc="\r")
        test_loss = 0.0
        targets, outputs = [], []
        log_file = './logs_context_eval.txt'
        logger = logWritter(log_file)
        for i, sample in enumerate(tbar):
            image, target = sample["image"], sample["label"]
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            with torch.no_grad():
                if args.nonlinear_last_layer:
                    output = self.model(image, image.size()[2:])
                else:
                    output = self.model(image)
            loss = self.criterion(output, target)
            test_loss += loss.item()
            tbar.set_description("Test loss: %.3f" % (test_loss / (i + 1)))
            pred = output.data.cpu().numpy()
            target = target.cpu().numpy().astype(np.int64)
            pred = np.argmax(pred, axis=1)
            for o, t in zip(pred, target):
                outputs.append(o)
                targets.append(t)
            # Add batch sample into evaluator
            self.evaluator.add_batch(target, pred)

            all_target.append(target)
            all_pred.append(pred)

        config = get_config(args.config)
        vals_cls, valu_cls, all_labels, visible_classes, visible_classes_test, train, val, sampler, _, cls_map, cls_map_test = get_split(
            config)
        assert (visible_classes_test.shape[0] == config['dis']['out_dim_cls'] -
                1)
        score, class_iou = scores_gzsl(targets,
                                       outputs,
                                       n_class=len(visible_classes_test),
                                       seen_cls=cls_map_test[vals_cls],
                                       unseen_cls=cls_map_test[valu_cls])

        print("Test results:")
        logger.write("Test results:")

        for k, v in score.items():
            print(k + ': ' + json.dumps(v))
            logger.write(k + ': ' + json.dumps(v))

        score["Class IoU"] = {}
        visible_classes_test = sorted(visible_classes_test)
        for i in range(len(visible_classes_test)):
            score["Class IoU"][all_labels[
                visible_classes_test[i]]] = class_iou[i]
        print("Class IoU: " + json.dumps(score["Class IoU"]))
        logger.write("Class IoU: " + json.dumps(score["Class IoU"]))

        print("Test finished.\n\n")
        logger.write("Test finished.\n\n")

        # Fast test during the training
        Acc, Acc_seen, Acc_unseen = self.evaluator.Pixel_Accuracy()
        (
            Acc_class,
            Acc_class_by_class,
            Acc_class_seen,
            Acc_class_unseen,
        ) = self.evaluator.Pixel_Accuracy_Class()
        (
            mIoU,
            mIoU_by_class,
            mIoU_seen,
            mIoU_unseen,
        ) = self.evaluator.Mean_Intersection_over_Union()
        (
            FWIoU,
            FWIoU_seen,
            FWIoU_unseen,
        ) = self.evaluator.Frequency_Weighted_Intersection_over_Union()
        self.writer.add_scalar("val_overall/total_loss_epoch", test_loss,
                               epoch)
        self.writer.add_scalar("val_overall/mIoU", mIoU, epoch)
        self.writer.add_scalar("val_overall/Acc", Acc, epoch)
        self.writer.add_scalar("val_overall/Acc_class", Acc_class, epoch)
        self.writer.add_scalar("val_overall/fwIoU", FWIoU, epoch)

        self.writer.add_scalar("val_seen/mIoU", mIoU_seen, epoch)
        self.writer.add_scalar("val_seen/Acc", Acc_seen, epoch)
        self.writer.add_scalar("val_seen/Acc_class", Acc_class_seen, epoch)
        self.writer.add_scalar("val_seen/fwIoU", FWIoU_seen, epoch)

        self.writer.add_scalar("val_unseen/mIoU", mIoU_unseen, epoch)
        self.writer.add_scalar("val_unseen/Acc", Acc_unseen, epoch)
        self.writer.add_scalar("val_unseen/Acc_class", Acc_class_unseen, epoch)
        self.writer.add_scalar("val_unseen/fwIoU", FWIoU_unseen, epoch)

        print("Validation:")
        print("[Epoch: %d, numImages: %5d]" %
              (epoch, i * self.args.batch_size + image.data.shape[0]))
        print(f"Loss: {test_loss:.3f}")
        print(
            f"Overall: Acc:{Acc}, Acc_class:{Acc_class}, mIoU:{mIoU}, fwIoU: {FWIoU}"
        )
        print("Seen: Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
            Acc_seen, Acc_class_seen, mIoU_seen, FWIoU_seen))
        print("Unseen: Acc:{}, Acc_class:{}, mIoU:{}, fwIoU: {}".format(
            Acc_unseen, Acc_class_unseen, mIoU_unseen, FWIoU_unseen))

        for class_name, acc_value, mIoU_value in zip(CLASSES_NAMES,
                                                     Acc_class_by_class,
                                                     mIoU_by_class):
            self.writer.add_scalar("Acc_by_class/" + class_name, acc_value,
                                   epoch)
            self.writer.add_scalar("mIoU_by_class/" + class_name, mIoU_value,
                                   epoch)
            print(class_name, "- acc:", acc_value, " mIoU:", mIoU_value)
Exemple #3
0
    def validation(self, epoch, args):
        class_names = CLASSES_NAMES[:20]
        self.model.eval()
        self.evaluator.reset()
        tbar = tqdm(self.val_loader, desc="\r")
        test_loss = 0.0
        torch.set_printoptions(profile="full")
        targets, outputs = [], []
        log_file = './logs_voc12_step_1.txt'
        logger = logWritter(log_file)
        for i, sample in enumerate(tbar):
            image, target = sample["image"], sample["label"]
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            with torch.no_grad():
                output = self.model(image)
            target = resize_target(target, s=output.size()[2:]).cuda()
            loss = self.criterion(output, target)
            test_loss += loss.item()
            tbar.set_description("Test loss: %.3f" % (test_loss / (i + 1)))
            pred = output.data.cpu().numpy()
            target = target.cpu().numpy()
            pred = np.argmax(pred, axis=1)
            #             print('pred', pred[:, 100:105, 100:120])
            #             print('target', target[:, 100:105, 100:120])
            for o, t in zip(pred, target):
                outputs.append(o)
                targets.append(t)
            # Add batch sample into evaluator
            self.evaluator.add_batch(target, pred)

        config = get_config(args.config)
        vals_cls, valu_cls, all_labels, visible_classes, visible_classes_test, train, val, sampler, _, cls_map, cls_map_test = get_split(
            config)
        assert (visible_classes_test.shape[0] == config['dis']['out_dim_cls'] -
                1)
        score, class_iou = scores_gzsl(targets,
                                       outputs,
                                       n_class=len(visible_classes_test),
                                       seen_cls=cls_map_test[vals_cls],
                                       unseen_cls=cls_map_test[valu_cls])

        print("Test results:")
        logger.write("Test results:")

        for k, v in score.items():
            print(k + ': ' + json.dumps(v))
            logger.write(k + ': ' + json.dumps(v))

        score["Class IoU"] = {}
        for i in range(len(visible_classes_test)):
            score["Class IoU"][all_labels[
                visible_classes_test[i]]] = class_iou[i]
        print("Class IoU: " + json.dumps(score["Class IoU"]))
        logger.write("Class IoU: " + json.dumps(score["Class IoU"]))

        print("Test finished.\n\n")
        logger.write("Test finished.\n\n")

        # Fast test during the training
        Acc = self.evaluator.Pixel_Accuracy()
        Acc_class, Acc_class_by_class = self.evaluator.Pixel_Accuracy_Class()
        mIoU, mIoU_by_class = self.evaluator.Mean_Intersection_over_Union()
        FWIoU = self.evaluator.Frequency_Weighted_Intersection_over_Union()
        self.writer.add_scalar("val/total_loss_epoch", test_loss, epoch)
        self.writer.add_scalar("val/mIoU", mIoU, epoch)
        self.writer.add_scalar("val/Acc", Acc, epoch)
        self.writer.add_scalar("val/Acc_class", Acc_class, epoch)
        self.writer.add_scalar("val/fwIoU", FWIoU, epoch)
        print("Validation:")
        print("[Epoch: %d, numImages: %5d]" %
              (epoch, i * self.args.batch_size + image.data.shape[0]))
        print(f"Acc:{Acc}, Acc_class:{Acc_class}, mIoU:{mIoU}, fwIoU: {FWIoU}")
        print(f"Loss: {test_loss:.3f}")

        for i, (class_name, acc_value, mIoU_value) in enumerate(
                zip(class_names, Acc_class_by_class, mIoU_by_class)):
            self.writer.add_scalar("Acc_by_class/" + class_name, acc_value,
                                   epoch)
            self.writer.add_scalar("mIoU_by_class/" + class_name, mIoU_value,
                                   epoch)
            print(class_names[i], "- acc:", acc_value, " mIoU:", mIoU_value)

        new_pred = mIoU
        is_best = False
        if new_pred > self.best_pred:
            is_best = True
            self.best_pred = new_pred
        self.saver.save_checkpoint(
            {
                "epoch": epoch + 1,
                "state_dict": self.model.module.state_dict(),
                "optimizer": self.optimizer.state_dict(),
                "best_pred": self.best_pred,
            },
            is_best,
        )
Exemple #4
0
    def validation(self, epoch, args):
        # class_names = CLASSES_NAMES[:21]
        class_names = [
            #             "background",  # class 0
            "aeroplane",  # class 1
            "bicycle",  # class 2
            "bird",  # class 3
            "boat",  # class 4
            "bottle",  # class 5
            "bus",  # class 6
            "car",  # class 7
            "cat",  # class 8
            "chair",  # class 9
            "cow",  # class 10
            "diningtable",  # class 11
            "dog",  # class 12
            "horse",  # class 13
            "motorbike",  # class 14
            "person",  # class 15
            "potted plant",  # class 16
            "sheep",  # class 17
            "sofa",  # class 18
            "train",  # class 19
            "tv/monitor",  # class 20
        ]
        self.model.eval()
        self.evaluator.reset()
        all_target = []
        all_pred = []
        all_pred_unseen = []
        tbar = tqdm(self.val_loader, desc="\r")
        test_loss = 0.0
        targets, outputs = [], []
        log_file = './logs_test.txt'
        logger = logWritter(log_file)
        for i, sample in enumerate(tbar):
            image, target = sample["image"], sample["label"]
            if self.args.cuda:
                image, target = image.cuda(), target.cuda()
            with torch.no_grad():
                if args.nonlinear_last_layer:
                    output = self.model(image, image.size()[2:])
                else:
                    output = self.model(image)
            loss = self.criterion(output, target)
            test_loss += loss.item()
            tbar.set_description("Test loss: %.3f" % (test_loss / (i + 1)))
            pred = output.data.cpu().numpy()
            #             print('target', target.size())
            #             print('output', output.size())

            pred_unseen = pred.copy()
            target = target.cpu().numpy().astype(np.int64)
            pred = np.argmax(pred, axis=1)
            # print('target', target.shape, target.dtype)
            # print('pred', pred.shape, pred.dtype)
            for o, t in zip(pred, target):
                outputs.append(o)
                targets.append(t)

        config = get_config(args.config)
        vals_cls, valu_cls, all_labels, visible_classes, visible_classes_test, train, val, sampler, _, cls_map, cls_map_test = get_split(
            config)
        assert (visible_classes_test.shape[0] == config['dis']['out_dim_cls'] -
                1)
        score, class_iou = scores_gzsl(targets,
                                       outputs,
                                       n_class=len(visible_classes_test),
                                       seen_cls=cls_map_test[vals_cls],
                                       unseen_cls=cls_map_test[valu_cls])

        print("Test results:")
        logger.write("Test results:")

        for k, v in score.items():
            print(k + ': ' + json.dumps(v))
            logger.write(k + ': ' + json.dumps(v))

        score["Class IoU"] = {}
        for i in range(len(visible_classes_test)):
            score["Class IoU"][all_labels[
                visible_classes_test[i]]] = class_iou[i]
        print("Class IoU: " + json.dumps(score["Class IoU"]))
        logger.write("Class IoU: " + json.dumps(score["Class IoU"]))

        print("Test finished.\n\n")
        logger.write("Test finished.\n\n")