def create_dataset(mode='G2C'):
    support_mode = ['G2C', 'S2C']
    assert support_mode.__contains__(mode)

    if mode == 'G2C':
        source_dataset = GTAVDataset(
            root=g2c_config['source_data_dir'],
            list_path=osp.join(g2c_config['source_list_dir'], 'train.txt'),
            max_iters=g2c_config['max_iter'],
            crop_size=tuple(g2c_config['source_input_size']),
            mean=tuple(g2c_config['mean']),
            std=tuple(g2c_config['std']),
            set='train',
            ignore_label=g2c_config['ignore_label'])

        target_dataset = CityscapesDataset(
            root=g2c_config['target_data_dir'],
            list_path=osp.join(g2c_config['target_list_dir'], 'train.txt'),
            max_iters=g2c_config['max_iter'],
            crop_size=tuple(g2c_config['target_input_size']),
            mean=tuple(g2c_config['mean']),
            std=tuple(g2c_config['std']),
            set='train',
            ignore_label=g2c_config['ignore_label'])

        val_dataset = CityscapesDataset(
            root=g2c_config['target_data_dir'],
            list_path=osp.join(g2c_config['target_list_dir'], 'val.txt'),
            crop_size=tuple(g2c_config['target_input_size']),
            mean=tuple(g2c_config['mean']),
            std=tuple(g2c_config['std']),
            set='val',
            ignore_label=g2c_config['ignore_label'])

    return source_dataset, target_dataset, val_dataset
Ejemplo n.º 2
0
    def __init__(
            self,
            model_path='../model/deepglobe_deeplabv3_weights-cityscapes_19-outputs/model.pth',
            dataset='deepglobe',
            output_channels=19,
            split='valid',
            net_type='deeplab',
            batch_size=1,
            shuffle=True):
        print('[Score Calculator] Initializing calculator...')
        self.dataset = dataset
        self.model_path = model_path
        self.net_type = net_type

        self.fp16 = True

        # Load model
        print('[Score Calculator] Loading model ' + model_path + ' with ' +
              str(output_channels) + ' output channels...')

        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.model = SPPNet(output_channels=output_channels).to(self.device)
        param = torch.load(model_path)
        self.model.load_state_dict(param)
        del param

        # Create data loader depending on dataset, split and net type
        if dataset == 'pascal':
            self.valid_dataset = PascalVocDataset(split=split,
                                                  net_type=net_type)
            self.classes = np.arange(1, 22)
        elif dataset == 'cityscapes':
            self.valid_dataset = CityscapesDataset(split=split,
                                                   net_type=net_type)
            self.classes = np.arange(1, 20)
        elif dataset == 'deepglobe':
            self.valid_dataset = DeepGlobeDataset(split=split,
                                                  net_type=net_type)
            self.classes = np.arange(1, 8)
        else:
            raise NotImplementedError

        self.valid_loader = DataLoader(self.valid_dataset,
                                       batch_size=batch_size,
                                       shuffle=shuffle)

        # fp16
        if self.fp16:
            from utils.apex.apex.fp16_utils.fp16util import BN_convert_float
            self.model = BN_convert_float(self.model.half())
            print('[Score Calculator] fp16 applied')

        print('[Score Calculator] ...done!')
        print('[Score Calculator] Calculator created.')
    def __init__(
            self,
            model_path='../model/deepglobe_deeplabv3_weights-cityscapes_19-outputs/model.pth',
            dataset='deepglobe',
            output_channels=19,
            split='valid',
            net_type='deeplab',
            batch_size=1,
            shuffle=True):
        """
        Initializes the tester by loading the model with the good parameters.
        :param model_path: Path to model weights
        :param dataset: dataset used amongst {'deepglobe', 'pascal', 'cityscapes'}
        :param output_channels: num of output channels of model
        :param split: split to be used amongst {'train', 'valid'}
        :param net_type: model type to be used amongst {'deeplab', 'unet'}
        :param batch_size: batch size when loading images (always 1 here)
        :param shuffle: when loading images from dataset
        """

        print('[Tester] [Init] Initializing tester...')
        self.dataset = dataset
        self.model_path = model_path

        # Load model
        print('[Tester] [Init] Loading model ' + model_path + ' with ' +
              str(output_channels) + ' output channels...')

        self.device = torch.device(
            'cuda:0' if torch.cuda.is_available() else 'cpu')
        self.model = SPPNet(output_channels=output_channels).to(self.device)
        param = torch.load(model_path)
        self.model.load_state_dict(param)
        del param

        # Create data loader depending on dataset, split and net type
        if dataset == 'pascal':
            self.valid_dataset = PascalVocDataset(split=split,
                                                  net_type=net_type)
        elif dataset == 'cityscapes':
            self.valid_dataset = CityscapesDataset(split=split,
                                                   net_type=net_type)
        elif dataset == 'deepglobe':
            self.valid_dataset = DeepGlobeDataset(split=split,
                                                  net_type=net_type)
        else:
            raise NotImplementedError

        self.valid_loader = DataLoader(self.valid_dataset,
                                       batch_size=batch_size,
                                       shuffle=shuffle)

        print('[Tester] [Init] ...done!')
        print('[Tester] [Init] Tester created.')
def create_dataset(dataset_name='Cityscapes', set='train'):
    support_dataset = ['Cityscapes', 'PASCAL_VOC', 'KITTI', 'BDD']
    support_set = ['train', 'val', 'test']
    assert support_dataset.__contains__(dataset_name)
    assert support_set.__contains__(set)

    if dataset_name == 'Cityscapes':
        dataset = CityscapesDataset(root=cityscapes_config['data_dir'],
                                    list_path=osp.join(cityscapes_config['list_dir'], set + '.txt'),
                                    crop_size=tuple(cityscapes_config['crop_size']),
                                    mean=tuple(cityscapes_config['mean']),
                                    std=tuple(cityscapes_config['std']), set=set,
                                    ignore_label=cityscapes_config['ignore_label'])

    return dataset
Ejemplo n.º 5
0
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
#model = SPPNet(output_channels=19).to(device)
model = SPPNet(output_channels=19, enc_type='mobilenetv2',
               dec_type='maspp').to(device)
summary(model, (3, 128, 256))
model_path = '../model/cityscapes_deeplab_v3_plus/model.pth'
#model_path = '../model/cityscapes_mobilenetv2/model.pth'
#model_path = '../model/cityscapes_deeplabv3p_mobilenetv2/model_tmp.pth'
param = torch.load(model_path)
model.load_state_dict(param)
del param

batch_size = 1

valid_dataset = CityscapesDataset(split='valid', net_type='deeplab')
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)

images_list = []
labels_list = []
preds_list = []

model.eval()
with torch.no_grad():
    print("Begininng first batch")
    prev = datetime.datetime.now()
    for batched in valid_loader:
        images, labels = batched
        images_np = images.numpy().transpose(0, 2, 3, 1)
        labels_np = labels.numpy()
Ejemplo n.º 6
0
from models.net import SPPNet
from dataset.cityscapes import CityscapesDataset
from utils.preprocess import minmax_normalize


device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = SPPNet(output_channels=19).to(device)
model_path = '../model/cityscapes_deeplab_v3_plus/model.pth'
param = torch.load(model_path)
model.load_state_dict(param)
del param

batch_size = 1

valid_dataset = CityscapesDataset(split='valid', preprocess='deeplab')
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)

images_list = []
labels_list = []
preds_list = []

model.eval()
with torch.no_grad():
    for batched in valid_loader:
        images, labels = batched
        images_np = images.numpy().transpose(0, 2, 3, 1)
        labels_np = labels.numpy()[:, :1024, :2048]

        images, labels = images.to(device), labels.to(device)
        preds = model(images)
Ejemplo n.º 7
0
    model = EncoderDecoderNet(**net_config)
else:
    net_type = 'deeplab'
    model = SPPNet(**net_config)
model.to(device)
model.update_bn_eps()

param = torch.load(model_path)
model.load_state_dict(param)
del param

model.eval()

batch_size = 1
scales = [0.25, 0.75, 1, 1.25]
valid_dataset = CityscapesDataset(split='valid', net_type=net_type)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)

if vis_flag:
    images_list = []
    labels_list = []
    preds_list = []

    with torch.no_grad():
        for batched in valid_loader:
            images_np, labels_np, preds_np, names = predict(batched)
            images_list.append(images_np)
            labels_list.append(labels_np)
            preds_list.append(preds_np)
            if len(images_list) == 4:
                break
else:
    net_type = 'deeplab'
    model = SPPNet(**net_config)
model.to(device)
model.update_bn_eps()

param = torch.load(model_path)
model.load_state_dict(param)
del param

model.eval()

batch_size = 1
scales = [0.25, 0.75, 1, 1.25]
valid_dataset = CityscapesDataset(split='test',
                                  kitti="G:/3D_detector/data",
                                  net_type=net_type)
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=False)

if vis_flag:
    images_list = []
    labels_list = []
    preds_list = []

    with torch.no_grad():
        for batched in valid_loader:
            images_np, labels_np, preds_np, names = predict(batched)
            images_list.append(images_np)
            labels_list.append(labels_np)
            preds_list.append(preds_np)
            if len(images_list) == 4:
Ejemplo n.º 9
0
from models.net import SPPNet
from dataset.cityscapes import CityscapesDataset
from utils.preprocess import minmax_normalize

device = torch.device('cuda:4' if torch.cuda.is_available() else 'cpu')
model = SPPNet(output_channels=19).to(device)
model_path = '../model/cityscapes_deeplab_v3_plus/model.pth'
param = torch.load(model_path)
model.load_state_dict(param)
del param

batch_size = 1

valid_dataset = CityscapesDataset(base_dir='../data/cityscapes/',
                                  split='val',
                                  net_type='deeplab')
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)

# images_list = []
# labels_list = []
# preds_list = []

model.eval()
with torch.no_grad():
    for i, batched in enumerate(valid_loader):
        # image, label, image_path = batched['img'], batched['lbl'], batched['img_path']
        image, image_path = batched['img'], batched['img_path']

        image_name, _ = os.path.splitext(image_path[0])
        image_np = image.numpy().transpose(0, 2, 3, 1)
Ejemplo n.º 10
0
    net_type = 'deeplab'
    model = SPPNet(**net_config)
model.to(device)
model.update_bn_eps()

param = torch.load(model_path, map_location='cpu')
model.load_state_dict(param)
del param

model.eval()

batch_size = 1
scales = [0.25, 0.75, 1, 1.25]

valid_dataset = CityscapesDataset(split='eval',
                                  net_type=net_type,
                                  base_dir='../data/kyoto')
valid_loader = DataLoader(valid_dataset, batch_size=batch_size, shuffle=True)

output_dir = Path('../output/kyoto_eval/') / (str(modelname) + '_tta'
                                              if tta_flag else modelname)
output_dir.mkdir(parents=True)

with torch.no_grad():
    for batched in tqdm(valid_loader):
        _, preds_np, names = predict(batched)
        preds_np = id2cls_func(preds_np).astype(np.uint8)

        for name, pred in zip(names, preds_np):
            Image.fromarray(pred).save(output_dir / f'{name}.png')