Ejemplo n.º 1
0
def evaluate(args, model, image_list, seg_classes, device):
    im_size = tuple(args.im_size)

    # get color map for pascal dataset
    if args.dataset == 'pascal':
        from utilities.color_map import VOCColormap
        cmap = VOCColormap().get_color_map_voc()
    else:
        cmap = None

    model.eval()
    inter_meter = AverageMeter()
    union_meter = AverageMeter()

    miou_class = MIOU(num_classes=seg_classes)

    for i, imgName in tqdm(enumerate(image_list)):
        img = Image.open(imgName).convert('RGB')
        w, h = img.size

        img = data_transform(img, im_size)
        img = img.unsqueeze(0)  # add a batch dimension
        img = img.to(device)
        img_out = model(img)
        img_out = img_out.squeeze(0)  # remove the batch dimension
        img_out = img_out.max(0)[1].byte()  # get the label map
        img_out = img_out.to(device='cpu').numpy()

        if args.dataset == 'city':
            # cityscape uses different IDs for training and testing
            # so, change from Train IDs to actual IDs
            img_out = relabel(img_out)

        img_out = Image.fromarray(img_out)
        # resize to original size
        img_out = img_out.resize((w, h), Image.NEAREST)

        # pascal dataset accepts colored segmentations
        if args.dataset == 'pascal':
            img_out.putpalette(cmap)

        # save the segmentation mask
        name = imgName.split('/')[-1]
        img_extn = imgName.split('.')[-1]
        name = '{}/{}'.format(args.savedir, name.replace(img_extn, 'png'))
        img_out.save(name)
Ejemplo n.º 2
0
import torch
from utilities.print_utils import *
import os
from PIL import Image
from utilities.color_map import VOCColormap
import glob
from torchvision.transforms import functional as F
from transforms.classification.data_transforms import MEAN, STD
from tqdm import tqdm
from argparse import ArgumentParser

COLOR_MAP = VOCColormap().get_color_map()
IMAGE_EXTENSIONS = ['.jpg', '.png', '.jpeg']

def data_transform(img, im_size):
    img = img.resize(im_size, Image.BILINEAR)
    img = F.to_tensor(img)  # convert to tensor (values between 0 and 1)
    img = F.normalize(img, MEAN, STD)  # normalize the tensor
    return img

def run_segmentation(args, model, image_list, device):
    im_size = tuple(args.im_size)

    model.eval()
    with torch.no_grad():
        for imgName in tqdm(image_list):
            img = Image.open(imgName).convert('RGB')
            img_clone = img.copy()
            w, h = img.size

            img = data_transform(img, im_size)
Ejemplo n.º 3
0
import argparse
import numpy as np
import torch
import torchvision
from utilities.print_utils import *
from model.detection.ssd import ssd
import os
from model.detection.box_predictor import BoxPredictor
from PIL import Image
import cv2
from utilities.color_map import VOCColormap
import glob


COLOR_MAP = []
for cmap in VOCColormap().get_color_map():
    r, g, b = cmap
    COLOR_MAP.append((int(r), int(g), int(b)))

FONT_SIZE = cv2.FONT_HERSHEY_SIMPLEX
LABEL_COLOR = [255, 255, 255]
TEXT_THICKNESS = 2
RECT_BORDER_THICKNESS=3


def main(args):
    if args.im_size in [300, 512]:
        from model.detection.ssd_config import get_config
        cfg = get_config(args.im_size)
    else:
        print_error_message('{} image size not supported'.format(args.im_size))