Esempio n. 1
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        temp = 1.7
        usingCRF = True
        output_dir = os.path.join(
            cfg.VISUAL.OUTPUT_DIR,
            'noCRF_foggy_conv9_full_dataset_comp_{}_{}_{}_{}_temp_{}_crf_{}'.
            format(cfg.MODEL.MODEL_NAME, cfg.MODEL.BACKBONE, cfg.DATASET.NAME,
                   cfg.TIME_STAMP, temp, usingCRF))
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)
        time_start = time.time()
        for (image, target, filename) in tqdm(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            # print(image.shape)
            with torch.no_grad():
                output = model.evaluate(image)
                no_cal_output = output.clone()

                forcrf_output = output / temp

                # if use CRF
                filename = filename[0]
                raw_image = cv2.imread(filename, cv2.IMREAD_COLOR).astype(
                    np.float32).transpose(2, 0, 1)
                raw_image = torch.from_numpy(raw_image).to(self.device)
                raw_image = raw_image.unsqueeze(dim=0)
                crf = GaussCRF(conf=get_default_conf(),
                               shape=image.shape[2:],
                               nclasses=len(self.classes),
                               use_gpu=True)
                crf = crf.to(self.device)
                assert image.shape == raw_image.shape
                forcrf_output = crf.forward(forcrf_output, raw_image)
                forcrf_nocali_output = crf.forward(no_cal_output, raw_image)

                outname = os.path.splitext(os.path.split(
                    filename)[-1])[0] + f'_temp_{temp}_crf_{usingCRF}.png'
                savename = os.path.join(output_dir, outname)
                plt = giveComparisionImages(output.softmax(dim=1),
                                            (no_cal_output /
                                             temp).softmax(dim=1), raw_image,
                                            target, self.classes, savename)
Esempio n. 2
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        time_start = time.time()
        for (image, target, filename) in tqdm(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            # print(image.shape)
            with torch.no_grad():
                output = model.evaluate(image)
                # output = torch.softmax(output, dim=1)

                # output /= 3

                # if use CRF
                filename = filename[0]
                # print(filename)
                raw_image = cv2.imread(filename, cv2.IMREAD_COLOR).astype(
                    np.float32).transpose(2, 0, 1)
                raw_image = torch.from_numpy(raw_image).to(self.device)
                raw_image = raw_image.unsqueeze(dim=0)
                crf = GaussCRF(conf=get_default_conf(),
                               shape=image.shape[2:],
                               nclasses=len(self.classes),
                               use_gpu=True)
                crf = crf.to(self.device)
                assert image.shape == raw_image.shape
                output = crf.forward(output, raw_image)

            # print(output.shape)
            self.metric.update(output, target)
            pixAcc, mIoU = self.metric.get()

        pixAcc, mIoU, category_iou = self.metric.get(return_category_iou=True)
        logging.info('Eval use time: {:.3f} second'.format(time.time() -
                                                           time_start))
        logging.info('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(
            pixAcc * 100, mIoU * 100))
Esempio n. 3
0
def demo():
    args = parse_args()
    cfg.update_from_file(args.config_file)
    cfg.PHASE = 'test'
    cfg.ROOT_PATH = root_path
    cfg.check_and_freeze()
    default_setup(args)

    # temp=1.8
    temp = 1.6
    # usingCRF=False
    usingCRF = True
    # output folder
    output_dir = os.path.join(
        cfg.VISUAL.OUTPUT_DIR,
        'snow_1_conv_9_{}_{}_{}_{}_temp_{}_crf_{}'.format(
            cfg.MODEL.MODEL_NAME, cfg.MODEL.BACKBONE, cfg.DATASET.NAME,
            cfg.TIME_STAMP, temp, usingCRF))
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # image transform
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(cfg.DATASET.MEAN, cfg.DATASET.STD),
    ])

    model = get_segmentation_model().to(args.device)
    model.eval()

    if os.path.isdir(args.input_img):
        img_paths = [
            os.path.join(args.input_img, x) for x in os.listdir(args.input_img)
        ]
    else:
        img_paths = [args.input_img]
    for img_path in img_paths:
        image_pil = Image.open(img_path).convert('RGB')
        image = transform(image_pil).unsqueeze(0).to(args.device)
        with torch.no_grad():
            output = model.evaluate(image).detach()
            # output shape is [1,21,w,h] connected to cuda

        # import pdb; pdb.set_trace()
        print(img_path)
        if (usingCRF):
            non_cali_crf_output = output.clone()
            output /= temp

            pre_crf_pred = torch.argmax(output,
                                        1).squeeze(0).cpu().data.numpy()
            pre_crf_mask = get_color_pallete(pre_crf_pred, cfg.DATASET.NAME)

            raw_image = cv2.imread(img_path, cv2.IMREAD_COLOR).astype(
                np.float32).transpose(2, 0, 1)
            raw_image = torch.from_numpy(raw_image).to(args.device)
            raw_image = raw_image.unsqueeze(dim=0)

            # output shape is [1,21,w,h]
            num_classes = output.shape[1]
            crf = GaussCRF(conf=get_default_conf(),
                           shape=image.shape[2:],
                           nclasses=num_classes,
                           use_gpu=True)
            crf = crf.to(args.device)
            assert image.shape == raw_image.shape
            output = crf.forward(output, raw_image)
            # print(output.shape)

            # Saving the image
            pred = torch.argmax(output, 1).squeeze(0).cpu().data.numpy()
            mask = get_color_pallete(pred, cfg.DATASET.NAME)
            outname = os.path.splitext(os.path.split(
                img_path)[-1])[0] + f'_temp_{temp}_crf_{usingCRF}.png'

            # Uncalibrated image withth crf
            non_cali_crf_output = crf.forward(non_cali_crf_output, raw_image)
            non_cali_crf_pred = torch.argmax(non_cali_crf_output,
                                             1).squeeze(0).cpu().data.numpy()
            non_cali_crf_mask = get_color_pallete(non_cali_crf_pred,
                                                  cfg.DATASET.NAME)

            # Concatenating horizontally [out_post_crf,out_pre_crf, rgb]
            dst = Image.new('RGB', (4 * mask.width + 9, mask.height),
                            color="white")
            dst.paste(mask, (0, 0))
            dst.paste(non_cali_crf_mask, (mask.width + 3, 0))
            dst.paste(pre_crf_mask, (2 * mask.width + 6, 0))
            dst.paste(image_pil, (3 * mask.width + 9, 0))
            dst.save(os.path.join(output_dir, outname))

        else:
            pred = torch.argmax(output, 1).squeeze(0).cpu().data.numpy()
            mask = get_color_pallete(pred, cfg.DATASET.NAME)

            # Concatenating horizontally [output, rgb]
            dst = Image.new('RGB', (mask.width + image_pil.width, mask.height))
            dst.paste(mask, (0, 0))
            dst.paste(image_pil, (mask.width, 0))
            outname = os.path.splitext(os.path.split(
                img_path)[-1])[0] + f'_temp_{temp}_crf_{usingCRF}.png'

            # mask.save(os.path.join(output_dir, outname))
            dst.save(os.path.join(output_dir, outname))
Esempio n. 4
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        time_start = time.time()
        # if(not self.useCRF):
        bin_total = []
        bin_total_correct = []
        bin_conf_total = []
        for (image, target, filename) in tqdm(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            # print(image.shape)
            with torch.no_grad():
                output = model.evaluate(image)
                output /= self.temp
                output_for_ece = output.clone()

                # if use CRF
                if (self.useCRF):
                    filename = filename[0]
                    raw_image = cv2.imread(filename, cv2.IMREAD_COLOR).astype(
                        np.float32).transpose(2, 0, 1)
                    raw_image = torch.from_numpy(raw_image).to(self.device)
                    raw_image = raw_image.unsqueeze(dim=0)
                    crf = GaussCRF(conf=get_default_conf(),
                                   shape=image.shape[2:],
                                   nclasses=len(self.classes),
                                   use_gpu=True)
                    crf = crf.to(self.device)
                    # print(image.shape,raw_image.shape)
                    assert image.shape == raw_image.shape
                    output = crf.forward(output, raw_image)

            # ECE Stuff
            conf = np.max(output_for_ece.softmax(dim=1).cpu().numpy(), axis=1)
            label = torch.argmax(output_for_ece, dim=1).cpu().numpy()
            # print(conf.shape,label.shape,target.shape)
            bin_total_current, bin_total_correct_current, bin_conf_total_current = self.ece_criterion.get_collective_bins(
                conf, label,
                target.cpu().numpy())
            # import pdb; pdb.set_trace()
            bin_total.append(bin_total_current)
            bin_total_correct.append(bin_total_correct_current)
            bin_conf_total.append(bin_conf_total_current)

            # Accuracy Stuff
            self.metric.update(output, target)
            pixAcc, mIoU = self.metric.get()

        # ECE stuff
        # if(not self.useCRF):
        self.eceOperations(bin_total, bin_total_correct, bin_conf_total)

        # Accuracy stuff
        pixAcc, mIoU, category_iou = self.metric.get(return_category_iou=True)
        logging.info('Eval use time: {:.3f} second'.format(time.time() -
                                                           time_start))
        # file=open("foggy_1_conv13_VOC.txt","a")
        file = open(f"{self.postfix}.txt", "a")
        file.write("Temp={} + crf\n".format(self.temp))
        file.write('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(
            pixAcc * 100, mIoU * 100))

        file.write("\n\n")
        file.close()

        logging.info('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(
            pixAcc * 100, mIoU * 100))
Esempio n. 5
0
        return pixAcc * 100, mIoU * 100


if __name__ == '__main__':
    args = parse_args()
    cfg.update_from_file(args.config_file)
    cfg.update_from_list(args.opts)
    cfg.PHASE = 'train'
    cfg.ROOT_PATH = root_path
    cfg.check_and_freeze()

    default_setup(args)

    evaluator = Evaluator(args)

    crf = GaussCRF(conf=get_default_conf(),
                   shape=[1024, 2048],
                   nclasses=len(evaluator.classes),
                   use_gpu=True)
    crf = crf.cuda()

    # evaluator.eval(evaluator.val_loader_noisy)
    evaluator.eval(evaluator.val_loader_noisy, crf)

    # import numpy as np

    # bi_xy_std=list(map(int,(np.linspace(50, 100, 6)).astype(np.int32)))
    # bi_xy_std.append(141)
    # bi_xy_std.append(121)
    # bi_xy_std.sort()