コード例 #1
0
#model_path = '../model/cityscapes_mobilenetv2/model.pth'
#model_path = '../model/cityscapes_deeplabv3p_mobilenetv2/model_tmp.pth'
param = torch.load(model_path)
model.load_state_dict(param)
del param

batch_size = 1

valid_dataset = CityscapesDataset(split='valid', net_type='deeplab')
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)

images_list = []
labels_list = []
preds_list = []

model.eval()
with torch.no_grad():
    print("Begininng first batch")
    prev = datetime.datetime.now()
    for batched in valid_loader:
        images, labels = batched
        images_np = images.numpy().transpose(0, 2, 3, 1)
        labels_np = labels.numpy()

        images, labels = images.to(device), labels.to(device)
        preds = model.tta(images, net_type='deeplab')
        preds = preds.argmax(dim=1)
        preds_np = preds.detach().cpu().numpy()

        images_list.append(images_np)
        labels_list.append(labels_np)
コード例 #2
0
class Tester:
    def __init__(
            self,
            model_path='../model/deepglobe_deeplabv3_weights-cityscapes_19-outputs/model.pth',
            dataset='deepglobe',
            output_channels=19,
            split='valid',
            net_type='deeplab',
            batch_size=1,
            shuffle=True):
        """
        Initializes the tester by loading the model with the good parameters.
        :param model_path: Path to model weights
        :param dataset: dataset used amongst {'deepglobe', 'pascal', 'cityscapes'}
        :param output_channels: num of output channels of model
        :param split: split to be used amongst {'train', 'valid'}
        :param net_type: model type to be used amongst {'deeplab', 'unet'}
        :param batch_size: batch size when loading images (always 1 here)
        :param shuffle: when loading images from dataset
        """
        model_path = '/home/sfoucher/DEV/pytorch-segmentation/model/my_pascal_unet_res18_scse/model.pth'
        dataset_dir = '/home/sfoucher/DEV/pytorch-segmentation/data/deepglobe_as_pascalvoc/VOCdevkit/VOC2012'

        output_channels = 8
        net_type = 'unet'
        print('[Tester] [Init] Initializing tester...')
        self.dataset = dataset
        self.model_path = model_path

        # Load model
        print('[Tester] [Init] Loading model ' + model_path + ' with ' +
              str(output_channels) + ' output channels...')

        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        if net_type == 'unet':
            self.model = EncoderDecoderNet(output_channels=8,
                                           enc_type='resnet18',
                                           dec_type='unet_scse',
                                           num_filters=8)
        else:
            self.model = SPPNet(output_channels=output_channels).to(
                self.device)
        param = torch.load(model_path)
        self.model.load_state_dict(param)
        del param

        # Create data loader depending on dataset, split and net type
        if dataset == 'pascal':
            self.valid_dataset = PascalVocDataset(split=split,
                                                  net_type=net_type)
        elif dataset == 'cityscapes':
            self.valid_dataset = CityscapesDataset(split=split,
                                                   net_type=net_type)
        elif dataset == 'deepglobe':
            self.valid_dataset = DeepGlobeDataset(base_dir=dataset_dir,
                                                  target_size=(64, 64),
                                                  split=split,
                                                  net_type=net_type)
        else:
            raise NotImplementedError

        self.valid_loader = DataLoader(self.valid_dataset,
                                       batch_size=batch_size,
                                       shuffle=shuffle)

        print('[Tester] [Init] ...done!')
        print('[Tester] [Init] Tester created.')

    def make_demo_image(self):
        """
        Picks 4 images from dataset randomly and creates image with raw, inferred and label pictures.
        :return: null
        """
        images_list = []
        labels_list = []
        preds_list = []

        print('[Tester] [Demo] Gathering images and inferring...')
        self.model.eval()
        with torch.no_grad():
            for batched in self.valid_loader:
                images, labels = batched
                images_np = images.numpy().transpose(0, 2, 3, 1)
                labels_np = labels.numpy()

                images, labels = images.to(self.device), labels.to(self.device)
                preds = self.model.tta(images, net_type='deeplab')
                preds = preds.argmax(dim=1)
                preds_np = preds.detach().cpu().numpy()

                images_list.append(images_np)
                labels_list.append(labels_np)
                preds_list.append(preds_np)

                if len(images_list) == 4:
                    break

        print('[Tester] [Demo] Processing results...')

        images = np.concatenate(images_list)
        labels = np.concatenate(labels_list)
        preds = np.concatenate(preds_list)

        # Ignore index
        ignore_pixel = labels == 255
        preds[ignore_pixel] = 0
        labels[ignore_pixel] = 0

        # Plot
        fig, axes = plt.subplots(4, 3, figsize=(12, 10))
        plt.tight_layout()

        axes[0, 0].set_title('input image')
        axes[0, 1].set_title('prediction')
        axes[0, 2].set_title('ground truth')

        for ax, img, lbl, pred in zip(axes, images, labels, preds):
            ax[0].imshow(
                minmax_normalize(img, norm_range=(0, 1), orig_range=(-1, 1)))
            ax[1].imshow(pred)
            ax[2].imshow(lbl)
            ax[0].set_xticks([])
            ax[0].set_yticks([])
            ax[1].set_xticks([])
            ax[1].set_yticks([])
            ax[2].set_xticks([])
            ax[2].set_yticks([])

        plt.savefig('eval.png')
        plt.show()
        plt.close()

    def infer_image_by_path(
            self,
            image_path='/home/ubuntu/data/Segmentation/pytorch-segmentation/test1.jpg',
            output_name='single_test_output',
            display=False):
        """
        Opens image from fs and passes it through the loaded network, then displays and saves the result.
        :param output_name: Output image name
        :param display: Display images in windows or not
        :param image_path: Path of input images
        :return: null
        """
        if not self.dataset == 'deepglobe':
            print(
                '[ERROR] Inference script only available for the Deepglobe dataset.'
            )
            exit(-1)

        print('[Tester] [Single test] Opening image ' + image_path + '...')
        # Open and prepare image
        input_img = Image.open(image_path)
        if display:
            input_img.show()

        custom_img = np.array(input_img)
        custom_img = minmax_normalize(custom_img, norm_range=(-1, 1))
        custom_img = custom_img.transpose(2, 0, 1)
        custom_img = torch.FloatTensor([custom_img])

        print('[Tester] [Single test] Inferring image...')
        self.model.eval().to(self.device)
        with torch.no_grad():
            # Send to GPU, infer and collect
            custom_img = custom_img.to(self.device)
            #preds = self.model.tta(custom_img, net_type='deeplab')
            preds = self.model.tta(custom_img, net_type='unet')
            preds = preds.argmax(dim=1)
            preds_np = preds.detach().cpu().numpy()

        print('[Tester] [Single test] Processing result...')

        good_preds = preds_np[0]
        good_mask = Image.fromarray(good_preds.astype('uint8'), 'P')

        # Transform mask to set good indexes and palette
        good_mask = DeepGlobeDataset.index_to_palette(good_mask)

        if display:
            good_mask.show()
        good_mask.save(output_name + '_prediction.png')

        overlay = Tester.make_overlay(good_mask, input_img, 100)
        if display:
            overlay.show()
        overlay.save(output_name + '_overlay.png')
        print('[Tester] [Single test] Done.')

    def infer_image_by_name(self,
                            image_name='255876',
                            output_name='single_test_output',
                            display=True):
        """
        Opens image from fs and passes it through the loaded network, then displays and saves the result.
        :param output_name: Output image name
        :param display: Display images in windows or not
        :param image_path: Path of input images
        :return: null
        """
        if not self.dataset == 'deepglobe':
            print(
                '[ERROR] Inference script only available for the Deepglobe dataset.'
            )
            exit(-1)

        print('[Tester] [Single test] Opening image ' + image_name + '...')
        # Open and prepare image
        input_img = Image.open(
            '/home/ubuntu/data/Segmentation/pytorch-segmentation/data/deepglobe_as_pascalvoc/VOCdevkit/VOC2012/JPEGImages/'
            + image_name + '.jpg')
        label = Image.open(
            '/home/ubuntu/data/Segmentation/pytorch-segmentation/data/deepglobe_as_pascalvoc/VOCdevkit/VOC2012/SegmentationClass/'
            + image_name + '.png')
        label_raw = copy.deepcopy(label)
        overlay_ground_truth = Tester.make_overlay(label_raw, input_img, 100)
        label = label.convert('P', palette=Image.WEB)

        if display:
            input_img.show(title='Input raw image')
            label.show(title='Ground truth')
            overlay_ground_truth.show(title='Overlay_ground_truth')

        custom_img = np.array(input_img)
        custom_img = minmax_normalize(custom_img, norm_range=(-1, 1))
        custom_img = custom_img.transpose(2, 0, 1)
        custom_img = torch.FloatTensor([custom_img])

        print('[Tester] [Single test] Inferring image...')
        self.model.eval()
        with torch.no_grad():
            # Send to GPU, infer and collect
            custom_img = custom_img.to(self.device)
            preds = self.model.tta(custom_img, net_type='deeplab')
            preds = preds.argmax(dim=1)
            preds_np = preds.detach().cpu().numpy()

        print('[Tester] [Single test] Processing result...')

        good_preds = preds_np[0]
        good_mask = Image.fromarray(good_preds.astype('uint8'), 'P')

        # Transform mask to set good indexes and palette
        good_mask = DeepGlobeDataset.index_to_palette(good_mask)

        overlay = Tester.make_overlay(good_mask, input_img, 100)
        if display:
            good_mask.show(title='Prediction')
            overlay.show(title='Overlay')

        good_mask.save(output_name + '_prediction.png')
        overlay.save(output_name + '_overlay.png')
        overlay_ground_truth.save(output_name + '_overlay_truth.png')

        print('[Tester] [Single test] Done.')

    @staticmethod
    def make_overlay(pred_in, img_in, transparency):
        """
        Build PIL image from input img and mask overlay with given transparency.
        :param pred_in: mask input
        :param img_in: img input
        :param transparency: transparency wanted between 0..255
        :return: PIL image result
        """
        pred = copy.deepcopy(pred_in)
        img = copy.deepcopy(img_in)
        print('[Tester] [Overlay] Building overlay...')
        if transparency < 0 or transparency > 255:
            print('ERROR : Transparency should be in range 0..255.')
            exit(-1)
        # Make preds semi_transparent
        pred = pred.convert('RGBA')
        data = pred.getdata()  # you'll get a list of tuples
        new_data = []
        for a in data:
            a = a[:3]  # you'll get your tuple shorten to RGB
            a = a + (
                transparency,
            )  # change the 100 to any transparency number you like between (0,255)
            new_data.append(a)
        pred.putdata(new_data)  # you'll get your new img ready

        # Paste translucid preds on input image
        img.paste(pred, (0, 0), pred)
        print('[Tester] [Overlay] Done.')
        return img
コード例 #3
0
def eval_from_model(split,
                    output_channels,
                    model_path,
                    postproc=False,
                    vis=True,
                    debug=True):

    model_path = Path(model_path)
    path, model_dir = os.path.split(
        model_path.parent)  # separate path and filename
    device = torch.device('cuda:0' if torch.cuda.is_available() else
                          'cpu')  #work on GPU if available

    print(f'Device: {device}')

    if 'mnv2' in model_dir:
        model = SPPNet(enc_type='mobilenetv2',
                       dec_type='maspp',
                       output_channels=output_channels).to(device)
        defaults = True
    else:
        model = SPPNet(output_channels=output_channels).to(device)
        defaults = False

    if device == torch.device('cpu'):
        param = torch.load(model_path, map_location='cpu'
                           )  # parameters saved in checkpoint via model_path
    else:
        param = torch.load(
            model_path)  # parameters saved in checkpoint via model_path

    print(f'Parameters loaded from {model_path}')

    model.load_state_dict(param)  #apply method load_state_dict to model?
    del param  # delete parameters? Reduce memory usage?

    dataset = SherbrookeDataset(
        split=split, net_type='deeplab',
        defaults=defaults)  #reach cityscapes dataset, validation split
    classes = np.arange(1, dataset.n_classes)
    img_paths = dataset.img_paths
    base_dir = dataset.base_dir
    split = dataset.split
    if len(img_paths) == 0:
        raise ValueError('Your dataset seems empty...')
    else:
        print(f'{len(img_paths)} images found in {base_dir}\\{split}')

    model.eval()  #apply eval method on model. ?

    #print(f'Files containing \'{filetype}\' will be converted to \'{colortype}\' colormap and saved to:\n{output_folder}')

    valid_ious = []
    count = 0
    predicted_boxes = {}
    ground_truth_boxes = {}

    with torch.no_grad():
        #dataloader is a 2 element list with images and labels as torch tensors
        print('Generating predictions...')

        with tqdm(range(len(dataset))) as _tqdm:
            for i in _tqdm:
                count += 1
                image, label = dataset[i]
                img_path = dataset.img_paths[i]
                #filename = img_path.stem
                filename = img_path.name

                #if isinstance(image, tuple): #take only image in label is also returned by __getitem__
                #    image = image[0]

                image = image[
                    None]  # mimick dataloader with 4th channel (batch channel)
                image = image.to(device)
                # next line reaches to tta.py --> net.py --> xception.py ...
                # output: predictions (segmentation maps)
                pred = model.tta(image, net_type='deeplab')
                # pred = model(image)
                # pred = F.interpolate(pred, size=label.shape, mode='bilinear', align_corners=True)
                # pred = pred.argmax(dim=1)
                pred = pred.detach().cpu().numpy()
                label = label.numpy()

                # take first pred of single item list of preds...
                pred = pred[0]

                pred = softmax_from_feat_map(pred)

                # take channel corresponding to softmax scores in class 1. Reduces array to 2D
                pred = pred[1, :, :]

                if pred.shape[1] / pred.shape[0] == 4:
                    pred = topcrop(pred, reverse=True)
                    label = topcrop(label, reverse=True)

                if debug:
                    print(
                        f'Prediction shape after evaluation: {pred.shape}\nLabel shape: {label.shape}'
                    )

                if defaults:
                    # set all pixel in pred corresponding to an ignore_pixel in label to 0
                    pred[label == dataset.ignore_index] = 0

                #perc = round(len(np.unique(pred)) *0.5) #find index at median
                #val_at_perc = np.unique(pred)[perc]
                val_at_perc = 0.0002
                #print(
                #    f'Value at median in prediction is: {val_at_perc}')

                pred_masked = np.where(pred >= val_at_perc, pred, np.nan)
                pred_binary = threshold(
                    pred.copy(), value=val_at_perc
                )  # set values under 0.5 to 0, else to 1. result: binary array
                bbox_list, scores_list = contour_proc(pred_binary, pred_masked)

                #add key to predicted_boxes: {'filename': {'boxes':bbox_list, 'scores':scores_list}}
                predicted_boxes.update(
                    {filename: {
                        "boxes": bbox_list,
                        "scores": scores_list
                    }})

                #pred = filter_by_activation(pred, percentile=90)
                #pred = threshold(pred)

                bbox_list_lbl, _ = contour_proc(label, label.copy())

                #add key to predicted_boxes: {'filename': {'boxes':bbox_list, 'scores':scores_list}}
                ground_truth_boxes.update({filename: bbox_list_lbl})

                if debug:
                    print(f'Label unique values: {np.unique(label)}')

                _tqdm.set_postfix(OrderedDict(last_image=f'{filename}'))

    with open('predicted_boxes_GSV.json', 'w') as json_file:
        json.dump(predicted_boxes, json_file, sort_keys=True)

    with open('ground_truth_boxes_GSV.json', 'w') as json_file:
        json.dump(ground_truth_boxes, json_file, sort_keys=True)
コード例 #4
0
def eval_from_model(split,
                    output_channels,
                    model_path,
                    postproc=False,
                    vis=True,
                    debug=True,
                    mean_AP=False):

    model_path = Path(model_path)
    path, model_dir = os.path.split(
        model_path.parent)  # separate path and filename
    device = torch.device('cuda:0' if torch.cuda.is_available() else
                          'cpu')  #work on GPU if available

    print(f'Device: {device}')

    if 'mnv2' in model_dir:
        model = SPPNet(enc_type='mobilenetv2',
                       dec_type='maspp',
                       output_channels=output_channels).to(device)
        defects = True
    else:
        model = SPPNet(output_channels=output_channels).to(device)
        defects = False

    if device == torch.device('cpu'):
        param = torch.load(model_path, map_location='cpu'
                           )  # parameters saved in checkpoint via model_path
    else:
        param = torch.load(
            model_path)  # parameters saved in checkpoint via model_path

    print(f'Parameters loaded from {model_path}')

    model.load_state_dict(param)  #apply method load_state_dict to model?
    del param  # delete parameters? Reduce memory usage?

    dataset = SherbrookeDataset(
        split=split, net_type='deeplab',
        defects=defects)  #reach cityscapes dataset, validation split
    classes = np.arange(1, dataset.n_classes)
    img_paths = dataset.img_paths
    base_dir = dataset.base_dir
    split = dataset.split
    if len(img_paths) == 0:
        raise ValueError('Your dataset seems empty...')
    else:
        print(f'{len(img_paths)} images found in {base_dir}\\{split}')

    model.eval()  #apply eval method on model. ?

    #print(f'Files containing \'{filetype}\' will be converted to \'{colortype}\' colormap and saved to:\n{output_folder}')

    valid_ious = []
    count = 0
    predicted_boxes = {}
    ground_truth_boxes = {}

    with torch.no_grad():
        #dataloader is a 2 element list with images and labels as torch tensors
        print('Generating predictions...')

        with tqdm(range(len(dataset))) as _tqdm:
            for i in _tqdm:
                count += 1
                if count % 1 == 10:
                    print(f'Evaluation progress: {count}/{len(img_paths)}')
                image, label = dataset[i]
                img_path = dataset.img_paths[i]
                orig_image = np.array(Image.open(img_path))
                filename = img_path.name

                #if isinstance(image, tuple): #take only image in label is also returned by __getitem__
                #    image = image[0]

                image = image[
                    None]  # mimick dataloader with 4th channel (batch channel)
                image = image.to(device)
                # next line reaches to tta.py --> net.py --> xception.py ...
                # output: predictions (segmentation maps)
                pred = model.tta(image, net_type='deeplab')
                # pred = model(image)
                # pred = F.interpolate(pred, size=label.shape, mode='bilinear', align_corners=True)

                # take first pred of single item list of preds...
                pred = pred[0]

                softmax = torch.nn.Softmax(dim=1)
                pred = softmax(pred)
                #pred = softmax_from_feat_map(pred)

                pred = pred.detach().cpu().numpy()
                label = label.numpy()

                if pred.shape[1] / pred.shape[0] == 2:
                    pred, label = dataset.postprocess(pred, label)

                if mean_AP and not postproc:
                    raise Exception(
                        'postproc argument in eval_from_model function must be true if mean_AP is set to True'
                    )
                elif postproc:
                    # take channel corresponding to softmax scores in channel 1 (class 1). Reduces array to 2D
                    pred = pred[1, :, :]

                    if dataset.defects:
                        # set all pixel in pred corresponding to an ignore_pixel in label to 0
                        pred[label == dataset.ignore_index] = 0

                    if mean_AP:
                        val_at_perc = 0.0002
                        # print(
                        #    f'Value at median in prediction is: {val_at_perc}')

                        #create array copy and wreplace all values under threshold by nan values
                        pred_masked = np.where(pred >= val_at_perc, pred,
                                               np.nan)

                        #create copy of pred array and set all values above threshold to 1 and under to 0
                        pred_binary = threshold(pred.copy(), value=val_at_perc)

                        # set values under 0.5 to 0, else to 1. result: binary array
                        bbox_list, scores_list = contour_proc(
                            pred_binary, pred_masked)

                        # add key to predicted_boxes: {'filename': {'boxes':bbox_list, 'scores':scores_list}}
                        predicted_boxes.update({
                            filename: {
                                "boxes": bbox_list,
                                "scores": scores_list
                            }
                        })

                        # pred = filter_by_activation(pred, percentile=90)
                        # pred = threshold(pred)

                        bbox_list_lbl, _ = contour_proc(label, label.copy())

                        # add key to predicted_boxes: {'filename': {'boxes':bbox_list, 'scores':scores_list}}
                        ground_truth_boxes.update({filename: bbox_list_lbl})

                        pred_masked = np.where(pred >= val_at_perc, pred,
                                               np.nan)
                        pred_binary = threshold(
                            pred.copy(), value=val_at_perc
                        )  # set values under 0.5 to 0, else to 1. result: binary array
                        bbox_list, scores_list = contour_proc(
                            pred_binary, pred_masked)

                        #add key to predicted_boxes: {'filename': {'boxes':bbox_list, 'scores':scores_list}}
                        predicted_boxes.update({
                            filename: {
                                "boxes": bbox_list,
                                "scores": scores_list
                            }
                        })

                    pred = filter_by_activation(pred, percentile=90)

                else:
                    pred = np.argmax(pred, axis=0)

                if debug:
                    print(f'Label unique values: {np.unique(label)}')

                # print(np.unique(pred))
                if output_channels == 19:
                    # create mask for values other than 0 (background) and 1(sidewalk)
                    for i in range(2, 19):
                        pred[
                            pred ==
                            i] = 0  # convert these classes to background value

                if dataset.split == 'val':
                    # compute iou
                    iou = compute_iou_batch(pred, label, classes)
                    print(f'Iou for {filename}: {iou}')
                    valid_ious.append(iou)

                if vis:

                    output_dir = Path(
                        f'../data/output/{model_dir}/{split}/{os.path.split(img_path.parent)[1]}'
                    )
                    output_dir.mkdir(parents=True, exist_ok=True)

                    folder = output_dir.joinpath('figures')
                    folder.mkdir(parents=True, exist_ok=True)
                    label[label == 255] = 0
                    conf_overlay = np.add(label, pred * 2)
                    print(np.unique(conf_overlay))
                    confus_overlay = vis_segmentation(conf_overlay, img_path)
                    confus_overlay.save(
                        folder.joinpath(f'{filename}_overlay.jpg'))

                elif dataset.split == 'bootstrap':
                    # convert 1 values to 8. For bootstrapping.
                    pred = encode_mask(pred)

                    pred_pil = Image.fromarray(pred.astype(np.uint8))
                    img_pil = Image.open(img_path)
                    if pred_pil.size != img_pil.size:
                        pred_pil = pred_pil.resize(
                            (img_pil.size[0], img_pil.size[1]), Image.NEAREST)

                    pred_pil.save(
                        output_dir.joinpath(f'{filename}_gtFine_labelIds.png'))
                    #save_colormap(pred[0], savename, output_dir, filetype, colortype=colortype)
                else:
                    raise NotImplementedError

            _tqdm.set_postfix(OrderedDict(last_image=f'{filename}'))

    if mean_AP:
        with open('predicted_boxes_GSV.json', 'w') as json_file:
            json.dump(predicted_boxes, json_file, sort_keys=True)

        with open('ground_truth_boxes_GSV.json', 'w') as json_file:
            json.dump(ground_truth_boxes, json_file, sort_keys=True)

    if dataset.split == 'val':
        valid_iou = np.nanmean(valid_ious)
        print(f'mean valid iou: {valid_iou}')
        #print(f'Confusion matrix: \n{conf_mat}')

    with open('predicted_boxes_GSV.json', 'w') as json_file:
        json.dump(predicted_boxes, json_file)  # , sort_keys=True)

    with open('ground_truth_boxes_GSV.json', 'w') as json_file:
        json.dump(ground_truth_boxes, json_file, sort_keys=True)

    if vis:
        print(f'Files were be saved to {output_dir.parent}')