Esempio n. 1
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        time_start = time.time()
        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                output = model.evaluate(image)

            self.metric.update(output, target)
            pixAcc, mIoU = self.metric.get()
            logging.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
                    i + 1, pixAcc * 100, mIoU * 100))

        synchronize()
        logging.info('Eval use time: {}'.format(time.time() - time_start))
Esempio n. 2
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        logging.info("Using Val/Test img scale : {}".format(cfg.TEST.IMG_SCALE))
        logging.info("Start validation, Total sample: {:d}".format(len(self.val_loader)))
        import time
        time_start = time.time()
        pbar = tqdm(self.val_loader)
        for image, target, filename in pbar:
            image = image.to(self.device)
            target = target.to(self.device)

            assert image.shape[0] == 1, "Only batch-size 1 allowed when evaluating on test/val images"

            with torch.no_grad():
                output = mmseg_evaluate(model, image, target)

            self.metric.update(output, target)
            pixAcc, mIoU = self.metric.get()

            pbar.set_postfix_str("pixAcc: {:.3f}, mIoU: {:.3f}".format(pixAcc * 100, mIoU * 100))

        synchronize()
        pixAcc, mIoU, category_iou = self.metric.get(return_category_iou=True)
        logging.info('Eval use time: {:.3f} second'.format(time.time() - time_start))
        logging.info('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(pixAcc * 100, mIoU * 100))
Esempio n. 3
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        logging.info("Start validation, Total sample: {:d}".format(len(self.val_loader)))
        import time
        time_start = time.time()
        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                output = model.evaluate(image)

            self.metric.update(output, target)
            pixAcc, mIoU = self.metric.get()
            logging.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
                i + 1, pixAcc * 100, mIoU * 100))

        synchronize()
        pixAcc, mIoU, category_iou = self.metric.get(return_category_iou=True)
        logging.info('Eval use time: {:.3f} second'.format(time.time() - time_start))
        logging.info('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(
                pixAcc * 100, mIoU * 100))

        headers = ['class id', 'class name', 'iou']
        table = []
        for i, cls_name in enumerate(self.classes):
            table.append([cls_name, category_iou[i]])
        logging.info('Category iou: \n {}'.format(tabulate(table, headers, tablefmt='grid', showindex="always",
                                                           numalign='center', stralign='center')))
Esempio n. 4
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        time_start = time.time()
        for i, (image, target, filename) in enumerate(self.val_loader):

            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                output = model.evaluate(image)
            # import pdb; pdb.set_trace()

            # do operations here, NOTE : We are saving with batch size of 1
            # np.save('npy_files_voc/' + os.path.basename(filename[0]).strip('.jpg'), output[0].cpu().numpy())

            output = F.interpolate(output, (image.shape[2], image.shape[3]),
                                   mode='bilinear',
                                   align_corners=True)
            output = torch.argmax(output, 1)

            self.metric.update(output, target)

            pixAcc, mIoU = self.metric.get()
            logging.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
                    i + 1, pixAcc * 100, mIoU * 100))

        synchronize()
        pixAcc, mIoU, category_iou = self.metric.get(return_category_iou=True)
        logging.info('Eval use time: {:.3f} second'.format(time.time() -
                                                           time_start))
        logging.info('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(
            pixAcc * 100, mIoU * 100))

        headers = ['class id', 'class name', 'iou']
        table = []
        for i, cls_name in enumerate(self.classes):
            table.append([cls_name, category_iou[i]])
        logging.info('Category iou: \n {}'.format(
            tabulate(table,
                     headers,
                     tablefmt='grid',
                     showindex="always",
                     numalign='center',
                     stralign='center')))
Esempio n. 5
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                size = image.size()[2:]
                if size[0] < cfg.TEST.CROP_SIZE[0] and size[
                        1] < cfg.TEST.CROP_SIZE[1]:
                    pad_height = cfg.TEST.CROP_SIZE[0] - size[0]
                    pad_width = cfg.TEST.CROP_SIZE[1] - size[1]
                    image = F.pad(image, (0, pad_height, 0, pad_width))
                    output = model(image)[0]
                    output = output[..., :size[0], :size[1]]
                else:
                    output = model(image)[0]

            self.metric.update(output, target)
            pixAcc, mIoU = self.metric.get()
            logging.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
                    i + 1, pixAcc * 100, mIoU * 100))

            # Todo
            # if self.args.save_pred:
            #     pred = torch.argmax(output, 1)
            #     pred = pred.cpu().data.numpy()
            #
            #     predict = pred.squeeze(0)
            #     mask = get_color_pallete(predict, self.args.dataset)
            #     mask.save(os.path.join(outdir, os.path.splitext(filename[0])[0] + '.png'))
        synchronize()
Esempio n. 6
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        time_start = time.time()

        tot_conf = torch.Tensor([]).reshape(-1, 1)
        tot_obj = torch.Tensor([]).reshape(-1, 1)
        tot_label_for_image = torch.Tensor([]).reshape(-1, 1)

        for i, (image, target, filename) in enumerate(self.val_loader):

            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                output = model.evaluate(image)
            # import pdb; pdb.set_trace()

            doingCali = True
            usingCRF = True
            if (doingCali):
                # predetermined
                # temp=1.6127
                temp = 2.8
            else:
                temp = 1

            output = output / temp
            # to be removed for temp scaling
            if (not usingCRF):
                output_post = output
            else:
                output_post = []

            output = F.softmax(output, dim=1)
            output_numpy = output.cpu().numpy()

            def get_raw_image(file_location):
                # load in bgr in H W C format
                raw_image = cv2.imread(file_location,
                                       cv2.IMREAD_COLOR).astype(np.float32)
                mean_bgr = np.array([103.53, 116.28, 123.675])
                # Do some subtraction
                raw_image -= mean_bgr
                # converted to C H W
                raw_image = raw_image.transpose(2, 0, 1)
                raw_image = raw_image.astype(np.uint8)
                raw_image = raw_image.transpose(1, 2, 0)
                return raw_image

            for j, image_file_loc in enumerate(filename):

                prob_to_use = output_numpy[j]
                if (usingCRF):
                    raw_image = get_raw_image(image_file_loc)
                    prob_post = self.postprocessor(raw_image, prob_to_use)
                    prob_to_use = prob_post
                    output_post.append(prob_post)

                # import pdb;pdb.set_trace()
                prob_to_use = torch.tensor(prob_to_use)
                # Neels implementation
                labels = torch.argmax(prob_to_use, dim=0)
                conf = torch.max(prob_to_use, dim=0)[0].cpu()
                obj = labels.cpu().float()
                label_for_image = target[j].view(-1, 1).cpu().float()
                sel = (label_for_image >= 0)

                tot_conf = torch.cat(
                    [tot_conf, conf.view(-1, 1)[sel].view(-1, 1)], dim=0)
                tot_obj = torch.cat(
                    [tot_obj, obj.view(-1, 1)[sel].view(-1, 1)], dim=0)
                tot_label_for_image = torch.cat([
                    tot_label_for_image,
                    label_for_image.view(-1, 1)[sel].view(-1, 1)
                ],
                                                dim=0)

            if (usingCRF):
                output_post = np.array(output_post)
                output_post = torch.tensor(output_post)
                output_post = output_post.to(self.device)

            self.metric.update(output_post, target)
            # self.metric.update(output, target)
            pixAcc, mIoU = self.metric.get()
            logging.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
                    i + 1, pixAcc * 100, mIoU * 100))

        print(tot_conf.shape, tot_obj.shape, tot_label_for_image.shape)
        import pickle
        ece_folder = "eceData"
        makedirs(ece_folder)

        # postfix="DLV2_UnCal"
        postfix = "Foggy_Calibrated_DLV3Plus"
        saveDir = os.path.join(ece_folder, postfix)
        makedirs(saveDir)

        file = open(os.path.join(saveDir, "conf.pickle"), "wb")
        pickle.dump(tot_conf, file)
        file.close()
        file = open(os.path.join(saveDir, "obj.pickle"), "wb")
        pickle.dump(tot_obj, file)
        file.close()
        file = open(os.path.join(saveDir, "gt.pickle"), "wb")
        pickle.dump(tot_label_for_image, file)
        file.close()

        synchronize()
        pixAcc, mIoU, category_iou = self.metric.get(return_category_iou=True)
        logging.info('Eval use time: {:.3f} second'.format(time.time() -
                                                           time_start))
        logging.info('End validation pixAcc: {:.3f}, mIoU: {:.3f}'.format(
            pixAcc * 100, mIoU * 100))

        headers = ['class id', 'class name', 'iou']
        table = []
        for i, cls_name in enumerate(self.classes):
            table.append([cls_name, category_iou[i]])
        logging.info('Category iou: \n {}'.format(
            tabulate(table,
                     headers,
                     tablefmt='grid',
                     showindex="always",
                     numalign='center',
                     stralign='center')))
Esempio n. 7
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        time_start = time.time()
        widgets = [
            'Inference: ',
            Percentage(), ' ',
            Bar('#'), ' ',
            Timer(), ' ',
            ETA(), ' ',
            FileTransferSpeed()
        ]
        pbar = ProgressBar(widgets=widgets,
                           maxval=10 * len(self.val_loader)).start()

        for i, (image, target, boundary,
                filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)
            boundary = boundary.to(self.device)

            filename = filename[0]
            with torch.no_grad():
                output, output_boundary = model.evaluate(image)

            if 'hard' in filename:
                self.metric_hard.update(output, target)
                self.count_hard += 1
            elif 'easy' in filename:
                self.metric_easy.update(output, target)
                self.count_easy += 1
            else:
                print(filename)
                continue

            self.metric.update(output, target)
            pbar.update(10 * i + 1)

        pbar.finish()
        synchronize()
        pixAcc, mIoU, category_iou, mae, mBer, category_Ber = self.metric.get(
            return_category_iou=True)
        pixAcc_e, mIoU_e, category_iou_e, mae_e, mBer_e, category_Ber_e = self.metric_easy.get(
            return_category_iou=True)
        pixAcc_h, mIoU_h, category_iou_h, mae_h, mBer_h, category_Ber_h = self.metric_hard.get(
            return_category_iou=True)

        logging.info('Eval use time: {:.3f} second'.format(time.time() -
                                                           time_start))
        logging.info(
            'End validation pixAcc: {:.2f}, mIoU: {:.2f}, mae: {:.3f}, mBer: {:.2f}'
            .format(pixAcc * 100, mIoU * 100, mae, mBer))
        logging.info(
            'End validation easy pixAcc: {:.2f}, mIoU: {:.2f}, mae: {:.3f}, mBer: {:.2f}'
            .format(pixAcc_e * 100, mIoU_e * 100, mae_e, mBer_e))
        logging.info(
            'End validation hard pixAcc: {:.2f}, mIoU: {:.2f}, mae: {:.3f}, mBer: {:.2f}'
            .format(pixAcc_h * 100, mIoU_h * 100, mae_h, mBer_h))

        headers = [
            'class id', 'class name', 'iou', 'iou_easy', 'iou_hard', 'ber',
            'ber_easy', 'ber_hard'
        ]
        table = []
        for i, cls_name in enumerate(self.classes):
            table.append([
                cls_name, category_iou[i], category_iou_e[i],
                category_iou_h[i], category_Ber[i], category_Ber_e[i],
                category_Ber_h[i]
            ])
        logging.info('Category iou: \n {}'.format(
            tabulate(table,
                     headers,
                     tablefmt='grid',
                     showindex="always",
                     numalign='center',
                     stralign='center')))
        logging.info('easy images: {}, hard images: {}'.format(
            self.count_easy, self.count_hard))
Esempio n. 8
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model

        one_five = torch.ones(1) * 1.5
        one_five = one_five.to(self.device)

        temp = torch.nn.Parameter(one_five)
        print(temp)

        criterion = torch.nn.CrossEntropyLoss(
            ignore_index=cfg.DATASET.IGNORE_INDEX).to(self.device)
        optimizer = torch.optim.SGD([temp], lr=1)

        logging.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        import time
        time_start = time.time()

        loss_series = list()
        temp_series = list()

        for epoch in range(10):

            logging.info("Epoch Started {}".format(epoch))
            loss_epoch = 0.0

            for i, (image, target, filename) in enumerate(self.val_loader):

                optimizer.zero_grad()

                image = image.to(self.device)
                target = target.to(self.device)

                with torch.no_grad():
                    output = model.evaluate(image)

                # output = output.cpu()
                output = output / temp
                # print(output.shape)
                # print(target.shape)

                loss = criterion(output, target)
                loss_epoch += loss.item()
                loss.backward()
                optimizer.step()

                logging.info("Batch {} loss for Temp Scaling : {}".format(
                    i, loss))

            logging.info("Epoch {} loss for Temp Scaling : {}".format(
                epoch, loss_epoch / (len(self.val_loader))))
            logging.info("Epoch {} Temp Scaling factor is : {}".format(
                epoch, temp.item()))

            loss_series.append(loss_epoch)
            temp_series.append(temp.item())

        print(loss_series)
        print(temp_series)

        synchronize()
        print('Final scaled temp : {}'.format(temp))