コード例 #1
0
ファイル: main.py プロジェクト: johnny305/deeplab-pytorch
def evaluate(model, writer, iteration, CONFIG):
    """
    Evaluation on validation set
    """

    device = 0
    torch.set_grad_enabled(False)
    model.eval()
    model.to(device)
    # Dataset
    if CONFIG.DATASET.NAME == "h16":
        CONFIG.DATASET.NAME = "vocaug"
    dataset = get_dataset(CONFIG.DATASET.NAME)(
        root=CONFIG.DATASET.ROOT,
        split=CONFIG.DATASET.SPLIT.VAL,
        ignore_label=CONFIG.DATASET.IGNORE_LABEL,
        mean_bgr=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G,
                  CONFIG.IMAGE.MEAN.R),
        augment=False,
    )

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.SOLVER.BATCH_SIZE.TEST,
        num_workers=CONFIG.DATALOADER.NUM_WORKERS,
        shuffle=False,
    )

    preds, gts = [], []
    for image_ids, images, gt_labels in loader:
        # Image
        images = images.to(device)

        # Forward propagation
        logits = model(images)

        # Pixel-wise labeling
        _, H, W = gt_labels.shape
        logits = F.interpolate(logits,
                               size=(H, W),
                               mode="bilinear",
                               align_corners=False)
        probs = F.softmax(logits, dim=1)
        labels = torch.argmax(probs, dim=1)

        preds += list(labels.cpu().numpy())
        gts += list(gt_labels.numpy())

    # Pixel Accuracy, Mean Accuracy, Class IoU, Mean IoU, Freq Weighted IoU
    score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)
    print("MeanIoU: {:2.2f}".format(score["Mean IoU"] * 100))
    writer.add_scalar("meanIoU",
                      score["Mean IoU"] * 100,
                      global_step=iteration)
コード例 #2
0
def main(config, model_path, cuda, crf):
    # Configuration
    CONFIG = Dict(yaml.load(open(config)))

    cuda = cuda and torch.cuda.is_available()
    if cuda:
        current_device = torch.cuda.current_device()
        print('Running on', torch.cuda.get_device_name(current_device))

    image_size = (
        CONFIG.IMAGE.SIZE.TEST,
        CONFIG.IMAGE.SIZE.TEST,
    )

    # Dataset
    dataset = CocoStuff10k(
        root=CONFIG.ROOT,
        split='test',
        image_size=image_size,
        scale=False,
        flip=False,
        preload=False,
    )

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.BATCH_SIZE,
        num_workers=CONFIG.NUM_WORKERS,
        shuffle=False,
    )

    state_dict = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    # Model
    model = DeepLabV2_ResNet101_MSC(n_classes=CONFIG.N_CLASSES)
    model.load_state_dict(state_dict)
    model = nn.DataParallel(model)
    model.eval()
    if cuda:
        model.cuda()

    targets, outputs = [], []
    for data, target in tqdm(
            loader,
            total=len(loader),
            leave=False,
            dynamic_ncols=True,
    ):
        # Image
        data = data.cuda() if cuda else data
        data = Variable(data, volatile=True)

        # Forward propagation
        output = model(data)
        output = F.upsample(output, size=image_size, mode='bilinear')
        output = F.softmax(output, dim=1)
        output = output.data.cpu().numpy()

        # Postprocessing
        if crf:
            crf_output = np.zeros(output.shape)
            images = data.data.cpu().numpy().astype(np.uint8)
            for i, (image, prob_map) in enumerate(zip(images, output)):
                image = image.transpose(1, 2, 0)
                crf_output[i] = dense_crf(image, prob_map)
            output = crf_output

        output = np.argmax(output, axis=1)
        target = target.numpy()

        for o, t in zip(output, target):
            outputs.append(o)
            targets.append(t)

    score, class_iou = scores(targets, outputs, n_class=CONFIG.N_CLASSES)

    for k, v in score.items():
        print(k, v)

    score['Class IoU'] = {}
    for i in range(CONFIG.N_CLASSES):
        score['Class IoU'][i] = class_iou[i]

    with open(model_path.replace('.pth', '.json'), 'w') as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #3
0
def test(config_path, model_path, cuda):
    """
    Evaluation on validation set
    """

    # Configuration
    CONFIG = Dict(yaml.load(config_path))
    device = get_device(cuda)
    torch.set_grad_enabled(False)

    # Dataset
    dataset = get_dataset(CONFIG.DATASET.NAME)(
        root=CONFIG.DATASET.ROOT,
        split=CONFIG.DATASET.SPLIT.VAL,
        ignore_label=CONFIG.DATASET.IGNORE_LABEL,
        mean_bgr=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
        augment=False,
    )
    print(dataset)

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.SOLVER.BATCH_SIZE.TEST,
        num_workers=CONFIG.DATALOADER.NUM_WORKERS,
        shuffle=False,
    )

    # Model
    model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
    state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
    model.load_state_dict(state_dict)
    model = nn.DataParallel(model)
    model.eval()
    model.to(device)

    # Path to save logits
    logit_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "features",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
        "logit",
    )
    makedirs(logit_dir)
    print("Logit dst:", logit_dir)

    # Path to save scores
    save_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "scores",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
    )
    makedirs(save_dir)
    save_path = os.path.join(save_dir, "scores.json")
    print("Score dst:", save_path)

    preds, gts = [], []
    for image_ids, images, gt_labels in tqdm(
        loader, total=len(loader), dynamic_ncols=True
    ):
        # Image
        images = images.to(device)

        # Forward propagation
        logits = model(images)

        """
        # Save on disk for CRF post-processing
        for image_id, logit in zip(image_ids, logits):
            filename = os.path.join(logit_dir, image_id + ".npy")
            np.save(filename, logit.cpu().numpy())

        # Pixel-wise labeling
        _, H, W = gt_labels.shape
        logits = F.interpolate(
            logits, size=(H, W), mode="bilinear", align_corners=False
        )
        probs = F.softmax(logits, dim=1)
        labels = torch.argmax(probs, dim=1)

        preds += list(labels.cpu().numpy())
        gts += list(gt_labels.numpy())

    # Pixel Accuracy, Mean Accuracy, Class IoU, Mean IoU, Freq Weighted IoU
    score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)

    with open(save_path, "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)


@main.command()
@click.option(
    "-c",
    "--config-path",
    type=click.File(),
    required=True,
    help="Dataset configuration file in YAML",
)
@click.option(
    "-j",
    "--n-jobs",
    type=int,
    default=multiprocessing.cpu_count(),
    show_default=True,
    help="Number of parallel jobs",
)
def crf(config_path, n_jobs):
    """
    CRF post-processing on pre-computed logits
    """

    # Configuration
    CONFIG = Dict(yaml.load(config_path))
    torch.set_grad_enabled(False)
    print("# jobs:", n_jobs)

    # Dataset
    dataset = get_dataset(CONFIG.DATASET.NAME)(
        root=CONFIG.DATASET.ROOT,
        split=CONFIG.DATASET.SPLIT.VAL,
        ignore_label=CONFIG.DATASET.IGNORE_LABEL,
        mean_bgr=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
        augment=False,
    )
    print(dataset)

    # CRF post-processor
    postprocessor = DenseCRF(
        iter_max=CONFIG.CRF.ITER_MAX,
        pos_xy_std=CONFIG.CRF.POS_XY_STD,
        pos_w=CONFIG.CRF.POS_W,
        bi_xy_std=CONFIG.CRF.BI_XY_STD,
        bi_rgb_std=CONFIG.CRF.BI_RGB_STD,
        bi_w=CONFIG.CRF.BI_W,
    )

    # Path to logit files
    logit_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "features",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
        "logit",
    )
    print("Logit src:", logit_dir)
    if not os.path.isdir(logit_dir):
        print("Logit not found, run first: python main.py test [OPTIONS]")
        quit()

    # Path to save scores
    save_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "scores",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
    )
    makedirs(save_dir)
    save_path = os.path.join(save_dir, "scores_crf.json")
    print("Score dst:", save_path)

    # Process per sample
    def process(i):
        image_id, image, gt_label = dataset.__getitem__(i)

        filename = os.path.join(logit_dir, image_id + ".npy")
        logit = np.load(filename)

        _, H, W = image.shape
        logit = torch.FloatTensor(logit)[None, ...]
        logit = F.interpolate(logit, size=(H, W), mode="bilinear", align_corners=False)
        prob = F.softmax(logit, dim=1)[0].numpy()

        image = image.astype(np.uint8).transpose(1, 2, 0)
        prob = postprocessor(image, prob)
        label = np.argmax(prob, axis=0)

        return label, gt_label

    # CRF in multi-process
    results = joblib.Parallel(n_jobs=n_jobs, verbose=10, pre_dispatch="all")(
        [joblib.delayed(process)(i) for i in range(len(dataset))]
    )

    preds, gts = zip(*results)

    # Pixel Accuracy, Mean Accuracy, Class IoU, Mean IoU, Freq Weighted IoU
    score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)

    with open(save_path, "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #4
0
ファイル: main.py プロジェクト: Madave94/deeplab-pytorch
def crf(config_path, n_jobs):
    """
    CRF post-processing on pre-computed logits
    """

    # Configuration
    CONFIG = Dict(yaml.load(config_path))
    torch.set_grad_enabled(False)
    print("# jobs:", n_jobs)

    # Dataset
    dataset = get_dataset(CONFIG.DATASET.NAME)(
        root=CONFIG.DATASET.ROOT,
        split=CONFIG.DATASET.SPLIT.VAL,
        ignore_label=CONFIG.DATASET.IGNORE_LABEL,
        mean_bgr=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G,
                  CONFIG.IMAGE.MEAN.R),
        augment=False,
    )
    print(dataset)

    # CRF post-processor
    postprocessor = DenseCRF(
        iter_max=CONFIG.CRF.ITER_MAX,
        pos_xy_std=CONFIG.CRF.POS_XY_STD,
        pos_w=CONFIG.CRF.POS_W,
        bi_xy_std=CONFIG.CRF.BI_XY_STD,
        bi_rgb_std=CONFIG.CRF.BI_RGB_STD,
        bi_w=CONFIG.CRF.BI_W,
    )

    # Path to logit files
    logit_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "features",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
        "logit",
    )
    print("Logit src:", logit_dir)
    if not os.path.isdir(logit_dir):
        print("Logit not found, run first: python main.py test [OPTIONS]")
        quit()

    # Path to save scores
    save_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "scores",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
    )
    makedirs(save_dir)
    save_path = os.path.join(save_dir, "scores_crf.json")
    print("Score dst:", save_path)

    # Process per sample
    def process(i):
        image_id, image, gt_label = dataset.__getitem__(i)

        filename = os.path.join(logit_dir, image_id + ".npy")
        logit = np.load(filename)

        _, H, W = image.shape
        logit = torch.FloatTensor(logit)[None, ...]
        logit = F.interpolate(logit,
                              size=(H, W),
                              mode="bilinear",
                              align_corners=False)
        prob = F.softmax(logit, dim=1)[0].numpy()

        image = image.astype(np.uint8).transpose(1, 2, 0)
        prob = postprocessor(image, prob)
        label = np.argmax(prob, axis=0)

        return label, gt_label

    # CRF in multi-process
    results = joblib.Parallel(n_jobs=n_jobs, verbose=10, pre_dispatch="all")(
        [joblib.delayed(process)(i) for i in range(len(dataset))])

    preds, gts = zip(*results)

    # Pixel Accuracy, Mean Accuracy, Class IoU, Mean IoU, Freq Weighted IoU
    score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)

    with open(save_path, "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #5
0
ファイル: main.py プロジェクト: Madave94/deeplab-pytorch
def test(config_path, model_path, cuda):
    """
    Evaluation on validation set
    """

    # Configuration
    CONFIG = Dict(yaml.load(config_path))
    device = get_device(cuda)
    torch.set_grad_enabled(False)

    # Dataset
    dataset = get_dataset(CONFIG.DATASET.NAME)(
        root=CONFIG.DATASET.ROOT,
        split=CONFIG.DATASET.SPLIT.VAL,
        ignore_label=CONFIG.DATASET.IGNORE_LABEL,
        mean_bgr=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G,
                  CONFIG.IMAGE.MEAN.R),
        augment=False,
    )
    print(dataset)

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.SOLVER.BATCH_SIZE.TEST,
        num_workers=CONFIG.DATALOADER.NUM_WORKERS,
        shuffle=False,
    )

    # Model
    model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)
    state_dict = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(state_dict)
    model = nn.DataParallel(model)
    model.eval()
    model.to(device)

    # Path to save logits
    logit_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "features",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
        "logit",
    )
    makedirs(logit_dir)
    print("Logit dst:", logit_dir)

    # Path to save scores
    save_dir = os.path.join(
        CONFIG.EXP.OUTPUT_DIR,
        "scores",
        CONFIG.EXP.ID,
        CONFIG.MODEL.NAME.lower(),
        CONFIG.DATASET.SPLIT.VAL,
    )
    makedirs(save_dir)
    save_path = os.path.join(save_dir, "scores.json")
    print("Score dst:", save_path)

    preds, gts = [], []
    for image_ids, images, gt_labels in tqdm(loader,
                                             total=len(loader),
                                             dynamic_ncols=True):
        # Image
        images = images.to(device)

        # Forward propagation
        logits = model(images)

        # Save on disk for CRF post-processing
        for image_id, logit in zip(image_ids, logits):
            filename = os.path.join(logit_dir, image_id + ".npy")
            np.save(filename, logit.cpu().numpy())

        # Pixel-wise labeling
        _, H, W = gt_labels.shape
        logits = F.interpolate(logits,
                               size=(H, W),
                               mode="bilinear",
                               align_corners=False)
        probs = F.softmax(logits, dim=1)
        labels = torch.argmax(probs, dim=1)

        preds += list(labels.cpu().numpy())
        gts += list(gt_labels.numpy())

    # Pixel Accuracy, Mean Accuracy, Class IoU, Mean IoU, Freq Weighted IoU
    score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)

    with open(save_path, "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #6
0
		gt[gt >=128.] = 1

		preds += list(pd)
		gts += list(gt)


	##################visualization##################
	pd_vis_list = []
	for i,(im,pd) in enumerate(zip(image_list,pd_list)):
		im = cv2.imread(im).astype(np.float32)
		im = cv2.resize(im,(321,321))
		pd_vis = vis(im,pd,0.7)
		pd_vis_list.append(pd_vis)


	all_pd = np.concatenate(pd_vis_list,axis = 1)
	cv2.imwrite(save_dir + '/' + one_class_name + '.png',all_pd)

	score = scores(gts, preds, n_class=2)
	print(one_class_name)
	print('jaccard: ' + str(score['Class IoU'][1]))
	print('precision: ' + str(score['Pixel Accuracy']))
	score_list.append(score)

iou_list = [one['Class IoU'][1] for one in score_list]
print('mean jaccard: ' + str(sum(iou_list)/len(iou_list)))

pre_list = [one['Pixel Accuracy'] for one in score_list]
print('mean precision: ' + str(sum(pre_list)/len(pre_list)))

コード例 #7
0
def test(config, model_path, cuda, crf):
    # Disable autograd globally
    torch.set_grad_enabled(False)

    # Setup
    device = get_device(cuda)
    CONFIG = Dict(yaml.load(open(config)))

    # If the image size never change,
    if CONFIG.DATASET.WARP_IMAGE:
        # Auto-tune cuDNN
        torch.backends.cudnn.benchmark = True

    # Dataset 10k or 164k
    dataset = get_dataset(CONFIG.DATASET.NAME)(
        root=CONFIG.DATASET.ROOT,
        split=CONFIG.DATASET.SPLIT.VAL,
        base_size=CONFIG.IMAGE.SIZE.TEST,
        crop_size=None,
        mean=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
        warp=CONFIG.DATASET.WARP_IMAGE,
        scale=None,
        flip=False,
    )

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.SOLVER.BATCH_SIZE.TEST,
        num_workers=CONFIG.DATALOADER.NUM_WORKERS,
        shuffle=False,
    )

    # Model
    model = setup_model(model_path, CONFIG.DATASET.N_CLASSES, train=False)
    model.to(device)

    # CRF post-processor
    postprocessor = DenseCRF(
        iter_max=CONFIG.CRF.ITER_MAX,
        pos_xy_std=CONFIG.CRF.POS_XY_STD,
        pos_w=CONFIG.CRF.POS_W,
        bi_xy_std=CONFIG.CRF.BI_XY_STD,
        bi_rgb_std=CONFIG.CRF.BI_RGB_STD,
        bi_w=CONFIG.CRF.BI_W,
    )

    preds, gts = [], []
    for images, labels in tqdm(loader,
                               total=len(loader),
                               leave=False,
                               dynamic_ncols=True):
        # Image
        images = images.to(device)
        _, H, W = labels.shape

        # Forward propagation
        logits = model(images)
        logits = F.interpolate(logits,
                               size=(H, W),
                               mode="bilinear",
                               align_corners=True)
        probs = F.softmax(logits, dim=1)
        probs = probs.data.cpu().numpy()

        # Postprocessing
        if crf:
            # images: (B,C,H,W) -> (B,H,W,C)
            images = images.data.cpu().numpy().astype(np.uint8).transpose(
                0, 2, 3, 1)
            probs = joblib.Parallel(n_jobs=-1)([
                joblib.delayed(postprocessor)(*pair)
                for pair in zip(images, probs)
            ])

        labelmaps = np.argmax(probs, axis=1)

        preds += list(labelmaps)
        gts += list(labels.numpy())

    # Pixel Accuracy, Mean Accuracy, Class IoU, Mean IoU, Freq Weighted IoU
    score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)

    with open(model_path.replace(".pth", ".json"), "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #8
0
def main(config, model_path, cuda, crf):
    cuda = cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")

    if cuda:
        current_device = torch.cuda.current_device()
        print("Running on", torch.cuda.get_device_name(current_device))
    else:
        print("Running on CPU")

    # Configuration
    CONFIG = Dict(yaml.load(open(config)))

    # Dataset 10k or 164k
    dataset = get_dataset(CONFIG.DATASET)(
        root=CONFIG.ROOT,
        split=CONFIG.SPLIT.VAL,
        base_size=CONFIG.IMAGE.SIZE.TEST,
        mean=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
        warp=CONFIG.WARP_IMAGE,
        scale=None,
        flip=False,
    )

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.BATCH_SIZE.TEST,
        num_workers=CONFIG.NUM_WORKERS,
        shuffle=False,
    )

    torch.set_grad_enabled(False)

    # Model
    model = DeepLabV2_ResNet101_MSC(n_classes=CONFIG.N_CLASSES)
    state_dict = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    model.load_state_dict(state_dict)
    model = nn.DataParallel(model)
    model.eval()
    model.to(device)

    targets, outputs = [], []
    for data, target in tqdm(loader,
                             total=len(loader),
                             leave=False,
                             dynamic_ncols=True):
        # Image
        data = data.to(device)

        # Forward propagation
        output = model(data)
        output = F.interpolate(output, size=data.shape[2:], mode="bilinear")
        output = F.softmax(output, dim=1)
        output = output.data.cpu().numpy()

        # Postprocessing
        if crf:
            crf_output = np.zeros(output.shape)
            images = data.data.cpu().numpy().astype(np.uint8)
            for i, (image, prob_map) in enumerate(zip(images, output)):
                image = image.transpose(1, 2, 0)
                crf_output[i] = dense_crf(image, prob_map)
            output = crf_output

        output = np.argmax(output, axis=1)
        target = target.numpy()

        for o, t in zip(output, target):
            outputs.append(o)
            targets.append(t)

    score, class_iou = scores(targets, outputs, n_class=CONFIG.N_CLASSES)

    for k, v in score.items():
        print(k, v)

    score["Class IoU"] = {}
    for i in range(CONFIG.N_CLASSES):
        score["Class IoU"][i] = class_iou[i]

    with open(model_path.replace(".pth", ".json"), "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #9
0
ファイル: main.py プロジェクト: bityangke/WSSS2020
def evaluate(model, writer, train_iter, save_class=False, save_logit=False):
    """Create the model and start the evaluation process."""
    # Configuration
    torch.set_grad_enabled(False)
    device = get_device(torch.cuda.is_available())
    model.eval()

    # Path to save logits
    logit_dir = os.path.join(
        CONFIG.EXP.SAVE_PRED,
        "logit",
    )
    if not os.path.exists(logit_dir):
        os.makedirs(logit_dir)
    print("Logit dst:", logit_dir)
    # Path to save scores
    save_dir = os.path.join(
        CONFIG.EXP.SAVE_PRED,
        "scores",
    )
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)
    save_path = os.path.join(save_dir, "scores_xavier.json")
    print("Score dst:", save_path)

    # saved_state_dict = torch.load(args.test_weight_path)
    # model.load_state_dict(saved_state_dict)

    testloader = data.DataLoader(VOCDataSet(CONFIG.DATASET.DIRECTORY,
                                            CONFIG.DATASET.VAL_LIST_PATH,
                                            crop_size=(CONFIG.IMAGE.SIZE.TEST,
                                                       CONFIG.IMAGE.SIZE.TEST),
                                            mean=IMG_MEAN,
                                            scale=False,
                                            mirror=False),
                                 batch_size=CONFIG.SOLVER.BATCH_SIZE.TEST,
                                 shuffle=False,
                                 pin_memory=True)

    t_start = time.time()
    # with torch.no_grad():
    preds, gts = [], []
    for index, batch in enumerate(testloader):
        images, gt_labels, size, image_names = batch
        # Image
        images = images.to(device)

        # Forward propagation
        logits = model(images)
        # Save on disk for CRF post-processing
        if save_logit:
            for image_id, logit in zip(image_names, logits):
                filename = os.path.join(logit_dir, image_id + ".npy")
                np.save(filename, logit.cpu().numpy())

        # Pixel-wise labeling
        _, H, W = gt_labels.shape
        logits = F.interpolate(logits,
                               size=(H, W),
                               mode="bilinear",
                               align_corners=False)
        probs = F.softmax(logits, dim=1)
        labels = torch.argmax(probs, dim=1)

        preds += list(labels.cpu().numpy())
        gts += list(gt_labels.numpy())
        score = scores(gts, preds, n_class=CONFIG.DATASET.N_CLASSES)
        """ scores()
         return {
        "Pixel Accuracy": acc,
        "Mean Accuracy": acc_cls,
        "Frequency Weighted IoU": fwavacc,
        "Mean IoU": mean_iu,
        "Class IoU": cls_iu,
        }
        """
        print("Evaluate [{}] name {}  time: {:3} s".format(
            index, image_names[0],
            time.time() - t_start),
              end='\r')
        # === save predict result in .png
        if save_class:
            save_dir_class = os.path.join(
                CONFIG.EXP.SAVE_PRED,
                "classes",
            )
            if not os.path.exists(save_dir_class):
                os.makedirs(save_dir_class)
            scipy.misc.toimage(labels.cpu().numpy()[0],
                               cmin=0,
                               cmax=255,
                               pal=colors_map,
                               mode='P').save(
                                   os.path.join(save_dir_class,
                                                image_names[0] + '.png'))
        # show_timing(t_start, time.time())

    writer.add_scalar("meanIoU", score["Mean IoU"], train_iter)
コード例 #10
0
def main(config, excludeval, embedding, model_path, run, cuda, crf, redo,
         imagedataset, threshold):
    pth_extn = '.pth.tar'
    if osp.isfile(model_path.replace(
            pth_extn, "_" + run + ".json")) and not threshold and not redo:
        print("Already Done!")
        with open(model_path.replace(pth_extn,
                                     "_" + run + ".json")) as json_file:
            data = json.load(json_file)
            for key, value in data.items():
                if not key == "Class IoU":
                    print(key, value)
        sys.exit()

    cuda = cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")

    if cuda:
        current_device = torch.cuda.current_device()
        print("Running on", torch.cuda.get_device_name(current_device))
    else:
        print("Running on CPU")

    # Configuration
    CONFIG = Dict(yaml.load(open(config)))

    datadir = os.path.join('data/datasets', imagedataset)
    print("Split dir: ", datadir)
    savedir = osp.dirname(model_path)
    epoch = re.findall("checkpoint_(.*)\." + pth_extn[1:],
                       osp.basename(model_path))[-1]
    val = None
    visible_classes = None

    if run == 'zlss' or run == 'flss':
        val = np.load(datadir + '/split/test_list.npy')
        visible_classes = np.load(datadir + '/split/novel_cls.npy')
    elif run == 'gzlss' or run == 'gflss':
        val = np.load(datadir + '/split/test_list.npy')
        if excludeval:
            vals_cls = np.asarray(np.load(datadir + '/split/seen_cls.npy'),
                                  dtype=int)
        else:
            vals_cls = np.asarray(np.concatenate([
                np.load(datadir + '/split/seen_cls.npy'),
                np.load(datadir + '/split/val_cls.npy')
            ]),
                                  dtype=int)
        valu_cls = np.load(datadir + '/split/novel_cls.npy')
        visible_classes = np.concatenate([vals_cls, valu_cls])
    else:
        print("invalid run ", run)
        sys.exit()

    if threshold is not None and run != 'gzlss':
        print("invalid run for threshold", run)
        sys.exit()

    cls_map = np.array([255] * 256)
    for i, n in enumerate(visible_classes):
        cls_map[n] = i

    if threshold is not None:
        savedir = osp.join(savedir, str(threshold))

    if crf is not None:
        savedir = savedir + '-crf'

    if run == 'gzlss' or run == 'gflss':

        novel_cls_map = np.array([255] * 256)
        for i, n in enumerate(list(valu_cls)):
            novel_cls_map[cls_map[n]] = i

        seen_cls_map = np.array([255] * 256)
        for i, n in enumerate(list(vals_cls)):
            seen_cls_map[cls_map[n]] = i

        if threshold is not None:

            thresholdv = np.asarray(np.zeros((visible_classes.shape[0], 1)),
                                    dtype=np.float)
            thresholdv[np.in1d(visible_classes, vals_cls), 0] = threshold
            thresholdv = torch.tensor(thresholdv).float().cuda()

    visible_classesp = np.concatenate([visible_classes, [255]])

    all_labels = np.genfromtxt(datadir + '/labels_2.txt',
                               delimiter='\t',
                               usecols=1,
                               dtype='str')

    print("Visible Classes: ", visible_classes)

    # Dataset
    dataset = get_dataset(CONFIG.DATASET)(
        train=None,
        test=val,
        root=CONFIG.ROOT,
        split=CONFIG.SPLIT.TEST,
        base_size=CONFIG.IMAGE.SIZE.TEST,
        mean=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
        warp=CONFIG.WARP_IMAGE,
        scale=None,
        flip=False,
    )

    if embedding == 'word2vec':
        class_emb = pickle.load(
            open(datadir + '/word_vectors/word2vec.pkl', "rb"))
    elif embedding == 'fasttext':
        class_emb = pickle.load(
            open(datadir + '/word_vectors/fasttext.pkl', "rb"))
    elif embedding == 'fastnvec':
        class_emb = np.concatenate([
            pickle.load(open(datadir + '/word_vectors/fasttext.pkl', "rb")),
            pickle.load(open(datadir + '/word_vectors/word2vec.pkl', "rb"))
        ],
                                   axis=1)
    else:
        print("invalid emb ", embedding)
        sys.exit()

    class_emb = class_emb[visible_classes]
    class_emb = F.normalize(torch.tensor(class_emb), p=2, dim=1).cuda()

    print("Embedding dim: ", class_emb.shape[1])
    print("# Visible Classes: ", class_emb.shape[0])

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.BATCH_SIZE.TEST,
        num_workers=CONFIG.NUM_WORKERS,
        shuffle=False,
    )

    torch.set_grad_enabled(False)

    # Model
    model = DeepLabV2_ResNet101_MSC(class_emb.shape[1], class_emb)

    sdir = osp.join(savedir, model_path.replace(pth_extn, ""), str(epoch), run)

    state_dict = torch.load(model_path,
                            map_location=lambda storage, loc: storage)
    model = nn.DataParallel(model)
    model.load_state_dict(state_dict['state_dict'])
    model.eval()
    model.to(device)
    imgfeat = []
    targets, outputs = [], []
    for data, target, img_id in tqdm(loader,
                                     total=len(loader),
                                     leave=False,
                                     dynamic_ncols=True):
        # Image
        data = data.to(device)
        # Forward propagation
        output = model(data)
        output = F.interpolate(output,
                               size=data.shape[2:],
                               mode="bilinear",
                               align_corners=False)

        output = F.softmax(output, dim=1)
        if threshold is not None:
            output = output - thresholdv.view(1, -1, 1, 1)

        target = cls_map[target.numpy()]

        # Postprocessing
        if crf:
            output = output.data.cpu().numpy()
            crf_output = np.zeros(output.shape)
            images = data.data.cpu().numpy().astype(np.uint8)
            for i, (image, prob_map) in enumerate(zip(images, output)):
                image = image.transpose(1, 2, 0)
                crf_output[i] = dense_crf(image, prob_map)
            output = crf_output
            output = np.argmax(output, axis=1)
        else:
            output = torch.argmax(output, dim=1).cpu().numpy()

        for o, t in zip(output, target):
            outputs.append(o)
            targets.append(t)

    if run == 'gzlss' or run == 'gflss':
        score, class_iou = scores_gzsl(targets,
                                       outputs,
                                       n_class=len(visible_classes),
                                       seen_cls=cls_map[vals_cls],
                                       unseen_cls=cls_map[valu_cls])
    else:
        score, class_iou = scores(targets,
                                  outputs,
                                  n_class=len(visible_classes))

    for k, v in score.items():
        print(k, v)

    score["Class IoU"] = {}
    for i in range(len(visible_classes)):
        score["Class IoU"][all_labels[visible_classes[i]]] = class_iou[i]

    if threshold is not None:
        with open(
                model_path.replace(pth_extn, "_" + run + '_T' +
                                   str(threshold) + ".json"), "w") as f:
            json.dump(score, f, indent=4, sort_keys=True)
    else:
        with open(model_path.replace(pth_extn, "_" + run + ".json"), "w") as f:
            json.dump(score, f, indent=4, sort_keys=True)

    print(score["Class IoU"])
コード例 #11
0
def main(config, cuda, show):
    CONFIG = Dict(yaml.load(open(config)))

    cuda = cuda and torch.cuda.is_available()

    dataset = VOCSegmentation(
        root=CONFIG.DATASET_ROOT, image_set="val", dataset_name="VOC2012"
    )

    dataloader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=1,  #! DO NOT CHANGE
        num_workers=CONFIG.NUM_WORKERS,
        pin_memory=False,
        shuffle=False,
    )

    # Load a model
    state_dict = torch.load(CONFIG.PYTORCH_MODEL)

    # Model
    model = PSPNet(
        n_classes=CONFIG.N_CLASSES, n_blocks=CONFIG.N_BLOCKS, pyramids=CONFIG.PYRAMIDS
    )
    model.load_state_dict(state_dict)
    model = nn.DataParallel(model)
    model.eval()
    if cuda:
        model.cuda()

    crop_size = CONFIG.IMAGE.SIZE.TEST
    targets, outputs = [], []

    for image, target in tqdm(
        dataloader, total=len(dataloader), leave=False, dynamic_ncols=True
    ):

        h, w = image.size()[2:]
        outputs_ = []

        for scale in CONFIG.SCALES:

            # Resize
            long_side = int(scale * CONFIG.IMAGE.SIZE.BASE)
            new_h = long_side
            new_w = long_side
            if h > w:
                new_w = int(long_side * w / h)
            else:
                new_h = int(long_side * h / w)
            image_ = F.upsample(image, size=(new_h, new_w), mode="bilinear").data

            # Predict (w/ flipping)
            if long_side <= crop_size:
                # Padding evaluation
                image_ = pad_image(image_, crop_size)
                image_ = to_var(image_, cuda)
                output = torch.cat(
                    (model(image_), flip(model(flip(image_))))  # C, H, W  # C, H, W
                )
                output = F.upsample(output, size=(crop_size,) * 2, mode="bilinear")
                # Revert to original size
                output = output[..., 0:new_h, 0:new_w]
                output = F.upsample(output, size=(h, w), mode="bilinear")
                outputs_ += [o for o in output.data]  # 2 x [C, H, W]
            else:
                # Sliced evaluation
                image_ = pad_image(image_, crop_size)
                output = torch.cat(
                    (
                        tile_predict(image_, model, crop_size, cuda, CONFIG.N_CLASSES),
                        flip(
                            tile_predict(
                                flip(image_), model, crop_size, cuda, CONFIG.N_CLASSES
                            )
                        ),
                    )
                )
                # Revert to original size
                output = output[..., 0:new_h, 0:new_w]
                output = F.upsample(output, size=(h, w), mode="bilinear")
                outputs_ += [o for o in output.data]  # 2 x [C, H, W]

        # Average
        output = torch.stack(outputs_, dim=0)  # 2x#scales, C, H, W
        output = torch.mean(output, dim=0)  # C, H, W
        output = torch.max(output, dim=0)[1]  # H, W
        output = output.cpu().numpy()
        target = target.squeeze(0).numpy()

        if show:
            res_gt = np.concatenate((output, target), 1)
            mask = (res_gt >= 0)[..., None]
            res_gt[res_gt < 0] = 0
            res_gt = np.uint8(res_gt / float(CONFIG.N_CLASSES) * 255)
            res_gt = cv2.applyColorMap(res_gt, cv2.COLORMAP_JET)
            res_gt = np.uint8(res_gt * mask)
            img = np.uint8(image.numpy()[0].transpose(1, 2, 0) + dataset.mean_rgb)[
                ..., ::-1
            ]
            img_res_gt = np.concatenate((img, res_gt), 1)
            cv2.imshow("result", img_res_gt)
            cv2.waitKey(10)

        outputs.append(output)
        targets.append(target)

    score, class_iou = scores(targets, outputs, n_class=CONFIG.N_CLASSES)

    for k, v in score.items():
        print(k, v)

    score["Class IoU"] = {}
    for i in range(CONFIG.N_CLASSES):
        score["Class IoU"][i] = class_iou[i]

    with open("results.json", "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #12
0
def main(config, model_path, cuda):
    # Configuration
    with open(config) as f:
        CONFIG = yaml.load(f)

    cuda = cuda and torch.cuda.is_available()

    image_size = (CONFIG['IMAGE']['SIZE']['TEST'],
                  CONFIG['IMAGE']['SIZE']['TEST'])
    n_classes = CONFIG['N_CLASSES']

    # Dataset
    dataset = get_dataset(CONFIG['DATASET'])(root=CONFIG['ROOT'],
                                             split='test',
                                             image_size=image_size,
                                             scale=False,
                                             flip=False,
                                             preload=False)

    # DataLoader
    loader = torch.utils.data.DataLoader(dataset=dataset,
                                         batch_size=CONFIG['BATCH_SIZE'],
                                         num_workers=CONFIG['NUM_WORKERS'],
                                         shuffle=False)

    state_dict = torch.load(model_path,
                            map_location=lambda storage, loc: storage)

    # Model
    model = DeepLabV2_ResNet101_MSC(n_classes=n_classes)
    model.load_state_dict(state_dict)
    model.eval()
    if cuda:
        model.cuda()

    targets, outputs = [], []
    for data, target in tqdm(loader,
                             total=len(loader),
                             leave=False,
                             dynamic_ncols=True):
        # Image
        data = data.cuda() if cuda else data
        data = Variable(data, volatile=True)

        # Forward propagation
        output = model(data)
        output = F.upsample(output, size=image_size, mode='bilinear')
        output = F.softmax(output, dim=1)
        output = output.data.cpu().numpy()

        crf_output = np.zeros(output.shape)
        images = data.data.cpu().numpy().astype(np.uint8)
        for i, (image, prob_map) in enumerate(zip(images, output)):
            image = image.transpose(1, 2, 0)
            crf_output[i] = dense_crf(image, prob_map)
        output = crf_output

        output = np.argmax(output, axis=1)
        target = target.numpy()

        for o, t in zip(output, target):
            outputs.append(o)
            targets.append(t)

    score, class_iou = scores(targets, outputs, n_class=n_classes)

    for k, v in score.items():
        print k, v

    score['Class IoU'] = {}
    for i in range(n_classes):
        score['Class IoU'][i] = class_iou[i]

    with open('results.json', 'w') as f:
        json.dump(score, f)
コード例 #13
0
ファイル: eval.py プロジェクト: SunYa0/deeplab-pytorch-crf
def main(config, model_path, cuda, crf):
    cuda = cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda else "cpu")

    if cuda:
        current_device = torch.cuda.current_device()
        print("Running on", torch.cuda.get_device_name(current_device))
    else:
        print("Running on CPU")

    # Configuration
    CONFIG = Dict(yaml.load(open(config)))

    # Dataset 10k or 164k
    dataset = get_dataset(CONFIG.DATASET)(
        root=CONFIG.ROOT,
        split=CONFIG.SPLIT.VAL,
        base_size=CONFIG.IMAGE.SIZE.TEST,
        mean=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
        warp=CONFIG.WARP_IMAGE,
        scale=None,
        flip=False,
    )

    # DataLoader
    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=CONFIG.BATCH_SIZE.TEST,
        num_workers=CONFIG.NUM_WORKERS,
        shuffle=False,
    )

    torch.set_grad_enabled(False)

    # Model
    model = DeepLabV2_ResNet101_MSC(n_classes=CONFIG.N_CLASSES)
    state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)
    model.load_state_dict(state_dict)
    model = nn.DataParallel(model)
    model.eval()
    model.to(device)

    preds, gts = [], []
    for images, labels in tqdm(
        loader, total=len(loader), leave=False, dynamic_ncols=True
    ):
        # Image
        images = images.to(device)

        # Forward propagation
        logits = model(images)
        logits = F.interpolate(
            logits, size=images.shape[2:], mode="bilinear", align_corners=True
        )
        probs = F.softmax(logits, dim=1)
        probs = probs.data.cpu().numpy()

        # Postprocessing
        if crf:
            pool = mp.Pool(mp.cpu_count())
            images = images.data.cpu().numpy().astype(np.uint8).transpose(0, 2, 3, 1)
            probs = pool.map(dense_crf_wrapper, zip(images, probs))
            pool.close()

        preds += list(np.argmax(probs, axis=1))
        gts += list(labels.numpy())

    score = scores(gts, preds, n_class=CONFIG.N_CLASSES)

    with open(model_path.replace(".pth", ".json"), "w") as f:
        json.dump(score, f, indent=4, sort_keys=True)
コード例 #14
0
ファイル: eval.py プロジェクト: yangsenwxy/STRICT
def main(config, embedding, model_path, run, imagedataset, local_rank, resnet,
         bkg):

    rank, world_size, device_id, device = setup(local_rank)
    print("Local rank: {} Rank: {} World Size: {} Device_id: {} Device: {}".
          format(local_rank, rank, world_size, device_id, device))
    pth_extn = '.pth.tar'

    # Configuration
    CONFIG = Dict(yaml.load(open(config)))

    datadir = os.path.join('data/datasets', imagedataset)
    print("Split dir: ", datadir)
    savedir = osp.dirname(model_path)
    epoch = re.findall("checkpoint_(.*)\." + pth_extn[1:],
                       osp.basename(model_path))[-1]

    if run == 'zlss' or run == 'flss':
        val = np.load(datadir + '/split/test_list.npy')
        visible_classes = np.load(datadir + '/split/novel_cls.npy')
        if bkg:
            visible_classes = np.asarray(np.concatenate(
                [np.array([0]), visible_classes]),
                                         dtype=int)
    elif run == 'gzlss' or run == 'gflss':
        val = np.load(datadir + '/split/test_list.npy')

        vals_cls = np.asarray(np.concatenate([
            np.load(datadir + '/split/seen_cls.npy'),
            np.load(datadir + '/split/val_cls.npy')
        ]),
                              dtype=int)

        if bkg:
            vals_cls = np.asarray(np.concatenate([np.array([0]), vals_cls]),
                                  dtype=int)
        valu_cls = np.load(datadir + '/split/novel_cls.npy')
        visible_classes = np.concatenate([vals_cls, valu_cls])
    else:
        print("invalid run ", run)
        sys.exit()

    cls_map = np.array([255] * 256)
    for i, n in enumerate(visible_classes):
        cls_map[n] = i

    if run == 'gzlss' or run == 'gflss':

        novel_cls_map = np.array([255] * 256)
        for i, n in enumerate(list(valu_cls)):
            novel_cls_map[cls_map[n]] = i

        seen_cls_map = np.array([255] * 256)
        for i, n in enumerate(list(vals_cls)):
            seen_cls_map[cls_map[n]] = i

    all_labels = np.genfromtxt(datadir + '/labels_2.txt',
                               delimiter='\t',
                               usecols=1,
                               dtype='str')

    print("Visible Classes: ", visible_classes)

    # Dataset
    dataset = get_dataset(CONFIG.DATASET)(
        train=None,
        test=val,
        root=CONFIG.ROOT,
        split=CONFIG.SPLIT.TEST,
        base_size=CONFIG.IMAGE.SIZE.TEST,
        mean=(CONFIG.IMAGE.MEAN.B, CONFIG.IMAGE.MEAN.G, CONFIG.IMAGE.MEAN.R),
        warp=CONFIG.WARP_IMAGE,
        scale=None,
        flip=False,
    )

    random.seed(42)

    if embedding == 'word2vec':
        class_emb = pickle.load(
            open(datadir + '/word_vectors/word2vec.pkl', "rb"))
    elif embedding == 'fasttext':
        class_emb = pickle.load(
            open(datadir + '/word_vectors/fasttext.pkl', "rb"))
    elif embedding == 'fastnvec':
        class_emb = np.concatenate([
            pickle.load(open(datadir + '/word_vectors/fasttext.pkl', "rb")),
            pickle.load(open(datadir + '/word_vectors/word2vec.pkl', "rb"))
        ],
                                   axis=1)
    else:
        print("invalid emb ", embedding)
        sys.exit()

    class_emb = class_emb[visible_classes]
    class_emb = F.normalize(torch.tensor(class_emb), p=2, dim=1).cuda()

    print("Embedding dim: ", class_emb.shape[1])
    print("# Visible Classes: ", class_emb.shape[0])

    # DataLoader
    loader = torch.utils.data.DataLoader(dataset=dataset,
                                         batch_size=CONFIG.BATCH_SIZE.TEST,
                                         num_workers=CONFIG.NUM_WORKERS,
                                         shuffle=False,
                                         sampler=DistributedSampler(
                                             dataset,
                                             num_replicas=world_size,
                                             rank=rank,
                                             shuffle=False),
                                         pin_memory=True,
                                         drop_last=True)

    torch.set_grad_enabled(False)

    # Model
    model = DeepLabV2_ResNet101_MSC(class_emb.shape[1],
                                    class_emb,
                                    resnet=resnet)

    state_dict = torch.load(model_path, map_location='cpu')
    model = DistributedDataParallel(model.to(device), device_ids=[rank])
    new_state_dict = OrderedDict()
    if resnet == 'spnet':
        for k, v in state_dict['state_dict'].items():
            name = k.replace("scale", "base")  # 'scale'->base
            name = name.replace("stages.", "")
            new_state_dict[name] = v
    else:
        new_state_dict = state_dict['state_dict']
    model.load_state_dict(new_state_dict)
    del state_dict

    model.eval()
    targets, outputs = [], []

    loader_iter = iter(loader)
    iterations = len(loader_iter)
    print("Iterations: {}".format(iterations))

    pbar = tqdm(loader,
                total=iterations,
                leave=False,
                dynamic_ncols=True,
                position=rank)
    for iteration in pbar:

        data, target, img_id = next(loader_iter)
        # Image
        data = data.to(device)
        # Forward propagation
        output = model(data)
        output = F.interpolate(output,
                               size=data.shape[2:],
                               mode="bilinear",
                               align_corners=False)

        output = F.softmax(output, dim=1)
        target = cls_map[target.numpy()]

        remote_target = torch.tensor(target).to(device)
        if rank == 0:
            remote_target = torch.zeros_like(remote_target).to(device)

        output = torch.argmax(output, dim=1).cpu().numpy()

        remote_output = torch.tensor(output).to(device)
        if rank == 0:
            remote_output = torch.zeros_like(remote_output).to(device)

        for o, t in zip(output, target):
            outputs.append(o)
            targets.append(t)

        torch.distributed.reduce(remote_output, dst=0)
        torch.distributed.reduce(remote_target, dst=0)

        torch.distributed.barrier()

        if rank == 0:
            remote_output = remote_output.cpu().numpy()
            remote_target = remote_target.cpu().numpy()
            for o, t in zip(remote_output, remote_target):
                outputs.append(o)
                targets.append(t)

    if rank == 0:

        if run == 'gzlss' or run == 'gflss':
            score, class_iou = scores_gzsl(targets,
                                           outputs,
                                           n_class=len(visible_classes),
                                           seen_cls=cls_map[vals_cls],
                                           unseen_cls=cls_map[valu_cls])
        else:
            score, class_iou = scores(targets,
                                      outputs,
                                      n_class=len(visible_classes))

        for k, v in score.items():
            print(k, v)

        score["Class IoU"] = {}
        for i in range(len(visible_classes)):
            score["Class IoU"][all_labels[visible_classes[i]]] = class_iou[i]

        name = ""
        name = model_path.replace(pth_extn, "_" + run + ".json")

        if bkg == True:
            with open(name.replace('.json', '_bkg.json'), "w") as f:
                json.dump(score, f, indent=4, sort_keys=True)
        else:
            with open(name, "w") as f:
                json.dump(score, f, indent=4, sort_keys=True)

        print(score["Class IoU"])

    return