Exemplo n.º 1
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        logger.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                outputs = self.model(image)
            self.metric.update(outputs[0], target)
            pixAcc, mIoU = self.metric.get()
            logger.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
                    i + 1, pixAcc * 100, mIoU * 100))

            if self.args.save_pred:
                pred = torch.argmax(outputs[0], 1)
                pred = pred.cpu().data.numpy()

                predict = pred.squeeze(0)
                mask = get_color_pallete(predict, args.dataset)
                mask.save(
                    os.path.join(outdir,
                                 os.path.splitext(filename[0])[0] + '.png'))
        synchronize()
Exemplo n.º 2
0
def run_model(args):
    # args = parse_args()
    # reference maskrcnn-benchmark
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.num_gpus = num_gpus
    args.distributed = num_gpus > 1
    if not args.no_cuda and torch.cuda.is_available():
        cudnn.benchmark = True
        args.device = "cuda"
    else:
        args.distributed = False
        args.device = "cpu"
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()
    args.lr = args.lr * num_gpus

    logger = setup_logger("semantic_segmentation",
                          args.log_dir,
                          get_rank(),
                          filename='{}_{}_{}_log.txt'.format(
                              args.model, args.backbone, args.dataset))
    logger.info("Using {} GPUs".format(num_gpus))
    logger.info(args)

    trainer = Trainer(args)
    trainer.train()
    torch.cuda.empty_cache()
Exemplo n.º 3
0
    def validation(self):
        # total_inter, total_union, total_correct, total_label = 0, 0, 0, 0
        is_best = False
        self.metric.reset()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model
        torch.cuda.empty_cache()  # TODO check if it helps
        model.eval()
        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                outputs = model(image)
            self.metric.update(outputs[0], target)
            pixAcc, mIoU = self.metric.get()
            logger.info(
                "Sample: {:d}, Validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
                    i + 1, pixAcc, mIoU))

        new_pred = (pixAcc + mIoU) / 2
        if new_pred > self.best_pred:
            is_best = True
            self.best_pred = new_pred
        save_checkpoint(self.model, self.args, is_best)
        synchronize()
Exemplo n.º 4
0
    def eval(self):
        #self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model
        logger.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        counter = 0
        for i, (image, target) in enumerate(self.val_loader):
            oimage = cv2.imread("../dataset/rgb/train/1.png", 1)
            oimage = cv2.cvtColor(oimage, cv2.COLOR_BGR2RGB)
            oimage = np.transpose(oimage, (2, 0, 1))
            oimage = oimage.astype(np.float32) / 255.
            oimage = oimage[np.newaxis, ...]
            oimage = torch.tensor(oimage).cuda()
            #print(oimage.shape)

            #from PIL import Image
            #oimage = Image.open("../dataset/rgb/val/1.png").convert('RGB')
            #image_transform = transforms.Compose([transforms.ToTensor()])
            #oimage = image_transform(oimage).unsqueeze(0)
            #oimage = oimage.to(self.device)
            #print(oimage.shape)

            image = image.to(self.device)
            #print(image)
            #print(type(image), image.shape)
            #target = target.to(self.device)

            with torch.no_grad():
                import time
                time_start = time.time()
                for i in range(100):
                    outputs = model(image)
                print((time.time() - time_start) / 100.)

            #self.metric.update(outputs[0], target)
            #pixAcc, mIoU = self.metric.get()
            #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
            #    i + 1, pixAcc * 100, mIoU * 100))

            if self.args.save_pred:
                pred = torch.argmax(outputs[0], 1)
                pred = pred.cpu().data.numpy()
                predict = pred.squeeze(0)
                #predict = cv2.resize(predict, (oimage.shape[1], oimage.shape[0]), interpolation = cv2.INTER_NEAREST)
                mask = get_color_pallete(predict, self.args.dataset)
                #cv2.imshow("image", np.array(mask, dtype=np.uint8))
                #cv2.waitKey(2000)
                cv2.imwrite(
                    os.path.join(outdir, 'test_mask_' + str(counter) + '.png'),
                    np.array(predict * 20, dtype=np.uint8))
                mask.save(os.path.join(outdir,
                                       'test_' + str(counter) + '.png'))
                counter += 1
        synchronize()
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model
        logger.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        all_pixAcc = 0
        all_mIoU = 0
        all_IoU_0 = 0
        all_IoU_1 = 0
        all_IoU_2 = 0

        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                outputs = model(image)
            self.metric.update(outputs[0], target)
            #pixAcc, mIoU = self.metric.get()
            pixAcc, mIoU, IoU_0, IoU_1, IoU_2 = self.metric.get()
            #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
            #    i + 1, pixAcc * 100, mIoU * 100))
            logger.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, IoU_0: {:.3f}, IoU_1: {:.3f}, IoU_2: {:.3f}"
                .format(i + 1, pixAcc * 100, mIoU * 100, IoU_0 * 100,
                        IoU_1 * 100, IoU_2 * 100))
            all_pixAcc = all_pixAcc + pixAcc
            all_mIoU = all_mIoU + mIoU
            all_IoU_0 = all_IoU_0 + IoU_0
            all_IoU_1 = all_IoU_1 + IoU_1
            all_IoU_2 = all_IoU_2 + IoU_2

            if self.args.save_pred:
                pred = torch.argmax(outputs[0], 1)
                pred = pred.cpu().data.numpy()

                predict = pred.squeeze(0)
                mask = get_color_pallete(predict, self.args.dataset)
                mask.save(
                    os.path.join(outdir,
                                 os.path.splitext(filename[0])[0] + '.png'))
        print('mean pixAcc: ', all_pixAcc / len(self.val_loader))
        print('mean mIoU: ', all_mIoU / len(self.val_loader))
        print('mean IoU_0: ', all_IoU_0 / len(self.val_loader))
        print('mean IoU_1: ', all_IoU_1 / len(self.val_loader))
        print('mean IoU_2: ', all_IoU_2 / len(self.val_loader))
        synchronize()
Exemplo n.º 6
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        rles = []
        images = []
        filenames = []
        if self.run_config['distributed']:
            model = self.model.module
        else:
            model = self.model
        logger.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        for i, (image, filename) in enumerate(self.val_loader):
            print(i)
            image = image.to(self.device)
            # target = target.to(self.device)

            with torch.no_grad():
                outputs = model(image)
            # self.metric.update(outputs[0], target)
            # pixAcc, mIoU = self.metric.get()
            # logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
            #     i + 1, pixAcc * 100, mIoU * 100))

            if self.run_config['save_pred']:
                pred = torch.argmax(outputs[0], 1)
                pred = pred.cpu().data.numpy()

                for predict, f_name in zip(pred, filename):
                    # predict = p.squeeze(0)
                    images.append(predict)
                    filenames.append(f_name.split('.pn')[0])

                # mask = get_color_pallete(predict, self.data_config['dataset_name'])
                # mask.save(os.path.join(run_config['path']['pred_pic'], os.path.splitext(filename[0])[0] + '.png'))
        synchronize()

        try:
            pool = Pool(8)
            for rle in tqdm(pool.map(mask2rle, images), total=len(rles)):
                rles.append(rle)
            #pool.map(process_image, mdlParams['im_paths'])  # process data_inputs iterable with pool
        finally:  # To make sure processes are closed in the end, even if errors happen
            pool.close()
            pool.join()

        # ids = [o.split('.pn')[0] for o in filenames]
        sub_df = pd.DataFrame({'ImageId': filenames, 'EncodedPixels': rles})
        sub_df.loc[sub_df.EncodedPixels == '', 'EncodedPixels'] = '-1'
        sub_df.to_csv('submission.csv', index=False)
Exemplo n.º 7
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model
        logger.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        fps_sum = 0.0
        for i, (image, target, filename) in enumerate(self.val_loader):
            start = time.time()
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                outputs = model(image)
            self.metric.update(outputs[0], target)
            pixAcc, mIoU = self.metric.get()
            end = time.time()
            fps = 1.0 / (end - start)
            fps_sum = fps_sum + fps
            logger.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, FPS: {:.3f}"
                .format(i + 1, pixAcc * 100, mIoU * 100, fps))

            if self.args.save_pred:
                pred = torch.argmax(outputs[0], 1)
                pred = pred.cpu().data.numpy()

                predict = pred.squeeze(0)
                mask = get_color_pallete(predict, self.args.dataset)
                mask.save(
                    os.path.join(outdir,
                                 os.path.splitext(filename[0])[0] + '.png'))
                #danet  显存不足
                #if i + 1 > 302: break
        avg_fps = fps_sum / len(self.val_loader)
        logger.info("avgFPS: {:.3f}".format(avg_fps))
        synchronize()
Exemplo n.º 8
0
if __name__ == '__main__':
    args = parse_args()
    num_gpus = int(
        os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
    args.distributed = num_gpus > 1
    if not args.no_cuda and torch.cuda.is_available():
        cudnn.benchmark = True
        args.device = "cuda"
    else:
        args.distributed = False
        args.device = "cpu"
    if args.distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl",
                                             init_method="env://")
        synchronize()

    # TODO: optim code
    args.save_pred = True
    if args.save_pred:
        outdir = '../runs/pred_pic/{}_{}_{}'.format(args.model, args.backbone,
                                                    args.dataset)
        if not os.path.exists(outdir):
            os.makedirs(outdir)

    logger = setup_logger("semantic_segmentation",
                          args.log_dir,
                          get_rank(),
                          filename='{}_{}_{}_log.txt'.format(
                              args.model, args.backbone, args.dataset),
                          mode='a+')
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model
        logger.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        all_pixAcc = 0
        all_mIoU = 0
        all_IoU_0 = 0
        all_IoU_1 = 0
        all_IoU_2 = 0

        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                #outputs = model(image)
                outputs, mat_result_512, mat_result_3 = model(image)

            mat_result_512 = mat_result_512.transpose((1, 2, 0))
            mat_result_3 = mat_result_3.transpose((1, 2, 0))
            print('mat_result_512.type:', type(mat_result_512))
            print('mat_result_512.shape:', mat_result_512.shape)
            print('mat_result_3.type:', type(mat_result_3))
            print('mat_result_3.shape:', mat_result_3.shape)

            filename_mat = os.path.splitext(filename[0])[0] + '.mat'

            print('This is:', filename_mat)
            datapath_512 = '/home/pzn/pzncode/non-local/awesome-semantic-segmentation-pytorch/runs/mat_result_512_121/' + filename_mat
            print(datapath_512)
            result_512 = {'pzn_feature_512': mat_result_512}
            scio.savemat(datapath_512, {'feature512': result_512},
                         appendmat=True,
                         do_compression=True)

            print('This is:', filename_mat)
            datapath_3 = '/home/pzn/pzncode/non-local/awesome-semantic-segmentation-pytorch/runs/mat_result_3_121/' + filename_mat
            print(datapath_3)
            result_3 = {'pzn_feature_3': mat_result_3}
            scio.savemat(datapath_3, {'feature3': result_3},
                         appendmat=True,
                         do_compression=True)

            self.metric.update(outputs[0], target)
            #pixAcc, mIoU = self.metric.get()
            pixAcc, mIoU, IoU_0, IoU_1, IoU_2 = self.metric.get()
            #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
            #    i + 1, pixAcc * 100, mIoU * 100))
            logger.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, IoU_0: {:.3f}, IoU_1: {:.3f}, IoU_2: {:.3f}"
                .format(i + 1, pixAcc * 100, mIoU * 100, IoU_0 * 100,
                        IoU_1 * 100, IoU_2 * 100))
            all_pixAcc = all_pixAcc + pixAcc
            all_mIoU = all_mIoU + mIoU
            all_IoU_0 = all_IoU_0 + IoU_0
            all_IoU_1 = all_IoU_1 + IoU_1
            all_IoU_2 = all_IoU_2 + IoU_2

            if self.args.save_pred:
                pred = torch.argmax(outputs[0], 1)
                pred = pred.cpu().data.numpy()

                predict = pred.squeeze(0)
                mask = get_color_pallete(predict, self.args.dataset)
                mask.save(
                    os.path.join(outdir,
                                 os.path.splitext(filename[0])[0] + '.png'))
        print('mean pixAcc: ', all_pixAcc / len(self.val_loader))
        print('mean mIoU: ', all_mIoU / len(self.val_loader))
        print('mean IoU_0: ', all_IoU_0 / len(self.val_loader))
        print('mean IoU_1: ', all_IoU_1 / len(self.val_loader))
        print('mean IoU_2: ', all_IoU_2 / len(self.val_loader))
        synchronize()
Exemplo n.º 10
0
    def eval(self):
        self.metric.reset()
        self.model.eval()
        if self.args.distributed:
            model = self.model.module
        else:
            model = self.model
        logger.info("Start validation, Total sample: {:d}".format(
            len(self.val_loader)))
        name_list = []
        mIOU_list = []
        acc_list = []

        all_pixAcc = 0
        all_mIoU = 0
        all_IoU_0 = 0
        all_IoU_1 = 0
        all_IoU_2 = 0

        for i, (image, target, filename) in enumerate(self.val_loader):
            image = image.to(self.device)
            target = target.to(self.device)

            with torch.no_grad():
                outputs = model(image)
            self.metric.update(outputs[0], target)
            #pixAcc, mIoU = self.metric.get()
            pixAcc, mIoU, IoU_0, IoU_1, IoU_2 = self.metric.get()
            #logger.info("Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}".format(
            #    i + 1, pixAcc * 100, mIoU * 100))
            logger.info(
                "Sample: {:d}, validation pixAcc: {:.3f}, mIoU: {:.3f}, IoU_0: {:.3f}, IoU_1: {:.3f}, IoU_2: {:.3f}"
                .format(i + 1, pixAcc * 100, mIoU * 100, IoU_0 * 100,
                        IoU_1 * 100, IoU_2 * 100))
            all_pixAcc = all_pixAcc + pixAcc
            all_mIoU = all_mIoU + mIoU
            all_IoU_0 = all_IoU_0 + IoU_0
            all_IoU_1 = all_IoU_1 + IoU_1
            all_IoU_2 = all_IoU_2 + IoU_2

            mIOU_list.append(mIoU)
            acc_list.append(pixAcc)
            print('image_pre_i:', filename)
            name_list.append(filename[0])

            if self.args.save_pred:
                pred = torch.argmax(outputs[0], 1)
                pred = pred.cpu().data.numpy()

                predict = pred.squeeze(0)
                mask = get_color_pallete(predict, self.args.dataset)
                mask.save(
                    os.path.join(outdir,
                                 os.path.splitext(filename[0])[0] + '.png'))
        print('mean pixAcc: ', all_pixAcc / len(self.val_loader))
        print('mean mIoU: ', all_mIoU / len(self.val_loader))
        print('mean IoU_0: ', all_IoU_0 / len(self.val_loader))
        print('mean IoU_1: ', all_IoU_1 / len(self.val_loader))
        print('mean IoU_2: ', all_IoU_2 / len(self.val_loader))

        print('name_list: ', name_list)
        print('mIOU_list: ', mIOU_list)
        print('acc_list: ', acc_list)
        df_data = name_list + mIOU_list + acc_list
        title_name = ['image_name']
        df = pd.DataFrame(columns=title_name, data=df_data)
        df.to_csv('name_demo.csv')

        synchronize()