コード例 #1
0
def prediction_result(model, image):
    prediction = model.student(image.cuda())
    interp = torch.nn.Upsample(size=image.shape[-2:],
                               mode='bilinear',
                               align_corners=True)
    if isinstance(prediction, list):
        prediction = prediction[0]  # shape for whole: b, 19, 129, 257
    prediction = interp(prediction).cpu().data[0].numpy().transpose(
        1, 2, 0)  # (1024, 2048, 19)
    # print(prediction.shape)
    seg_pred = np.asarray(np.argmax(prediction, axis=2),
                          dtype=np.uint8)  # (1024, 2048)
    seg_pred = id2trainId(seg_pred, id_to_trainid, reverse=True)
    return seg_pred
コード例 #2
0
        os.makedirs(save_path)
    for index, batch in tqdm(enumerate(loader)):
        image, label, size, name = batch
        print(image.shape)
        # img_np = image.squeeze().numpy().transpose(1,2,0)
        image, label = image.cuda(), label.cuda()
        # print(image.device)
        B, C, H, W = image.shape

        pivot1 = np.zeros((5, W), dtype=np.uint8)
        pivot2 = np.zeros((10, W), dtype=np.uint8)
        pivot3 = np.zeros((5, W, 3), dtype=np.uint8)

        gt = label.squeeze().cpu()
        gt = np.asarray(gt, dtype=np.uint8)
        gt = id2trainId(gt, id_to_trainid, reverse=True)

        seg_pred1 = prediction_result(model1, image)
        seg_pred2 = prediction_result(model2, image)

        seg_pred1[np.where(gt == 255)] = 255
        seg_pred2[np.where(gt == 255)] = 255
        output = np.concatenate([gt, pivot1, seg_pred1, pivot1, seg_pred2],
                                axis=0)
        output = PILImage.fromarray(output)
        output.putpalette(palette)
        save_name = os.path.join(save_path, str(index) + '.png')
        output.save(save_name)

        output = cv2.imread(save_name)
        img_np = cv2.imread(trainset.files[index]["img"], cv2.IMREAD_COLOR)