def main(path, dataset, datadir, model, gpu, num_cls):
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu
    net = get_model(model, num_cls=num_cls, weights_init=path)
    net.eval()
    ds = get_fcn_dataset(dataset, datadir, split='val',
                         transform=net.transform, target_transform=to_tensor_raw)
    classes = ds.classes
    loader = torch.utils.data.DataLoader(ds, num_workers=8)

    intersections = np.zeros(num_cls)
    unions = np.zeros(num_cls)

    errs = []
    hist = np.zeros((num_cls, num_cls))
    if len(loader) == 0:
        print('Empty data loader')
        return
    iterations = tqdm(enumerate(loader))
    for im_i, (im, label) in iterations:
        im = Variable(im.cuda())
        score = net(im).data
        _, preds = torch.max(score, 1)
        hist += fast_hist(label.numpy().flatten(),
                          preds.cpu().numpy().flatten(),
                          num_cls)
        acc_overall, acc_percls, iu, fwIU = result_stats(hist)
        iterations.set_postfix({'mIoU': ' {:0.2f}  fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.format(
            np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))})
    print()
    print(','.join(classes))
    print(fmt_array(iu))
    print(np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))
    print()
    print('Errors:', errs)
Esempio n. 2
0
def main(path, dataset, datadir, model, gpu, num_cls):
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu
    net = get_model(model, num_cls=num_cls, weights_init=path)
    net.eval()
    ds = get_fcn_dataset(dataset,
                         datadir,
                         split='val',
                         transform=net.transform,
                         target_transform=to_tensor_raw)
    classes = ds.classes
    loader = torch.utils.data.DataLoader(ds, num_workers=8)

    intersections = np.zeros(num_cls)
    unions = np.zeros(num_cls)

    errs = []
    hist = np.zeros((num_cls, num_cls))
    if len(loader) == 0:
        print('Empty data loader')
        return
    iterations = tqdm(enumerate(loader))
    for im_i, (im, label) in iterations:
        im = Variable(im.cuda())
        score = net(im).data
        _, preds = torch.max(score, 1)
        p = preds
        p = p.cpu().numpy().flatten()  # 481 868
        p = p.reshape(481, 868)
        p = p.astype(np.int32)
        image = Image.fromarray(p)
        image = image.convert('RGB')
        datas = image.getdata()
        newdata = []
        for item in datas:
            if item[0] == 0:
                newdata.append((151, 126, 171))
            elif item[0] == 1:
                newdata.append((232, 250, 80))
            elif item[0] == 2:
                newdata.append((55, 181, 57))
            elif item[0] == 3:
                newdata.append((187, 70, 156))
            else:
                newdata.append((0, 0, 0))
        image.putdata(newdata)
        image.save('./image/' + ds.ids[im_i] + '.png')
        hist += fast_hist(label.numpy().flatten(),
                          preds.cpu().numpy().flatten(), num_cls)
        acc_overall, acc_percls, iu, fwIU = result_stats(hist)
        iterations.set_postfix({
            'mIoU':
            ' {:0.2f}  fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.
            format(np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))
        })
    print('Errors:', errs)
def main(path, dataset, datadir, model, gpu, num_cls):
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    #net = get_model('CalibratorNet', model=model, num_cls=num_cls, weights_init=path,cali_model = 'resnet_9blocks',pretrained=False)
    net = get_model(model, num_cls=num_cls, weights_init=path)
    net.load_state_dict(torch.load(path))
    net.eval()
    ds = get_fcn_dataset(dataset,
                         datadir,
                         split='val',
                         transform=net.transform,
                         target_transform=to_tensor_raw)
    #ds = get_fcn_dataset(dataset, datadir, split='val',
    #                     transform=torchvision.transforms.ToTensor(), target_transform=to_tensor_raw)
    classes = ds.classes
    loader = torch.utils.data.DataLoader(ds, num_workers=8)

    intersections = np.zeros(num_cls)
    unions = np.zeros(num_cls)

    errs = []
    hist = np.zeros((num_cls, num_cls))
    if len(loader) == 0:
        print('Empty data loader')
        return
    iterations = tqdm(enumerate(loader))
    count = 0
    res = []
    with torch.no_grad():
        for im_i, (im, label) in iterations:
            im = Variable(im.cuda())

            if count > 25:
                break
            source_out = torch.argmax(net(im).data, dim=1)

            res.append((im, source_out, label, count))
            #save_seg_results(max_ori_score,count)
            #save_seg_results(im,max_score,max_ori_score,count)
            '''
            _, preds = torch.max(score, 1)
            hist += fast_hist(label.numpy().flatten(),
                              preds.cpu().numpy().flatten(),
                              num_cls)
            acc_overall, acc_percls, iu, fwIU = result_stats(hist)
            iterations.set_postfix({'mIoU': ' {:0.2f}  fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.format(
                np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))})
            '''
            count += 1
    with torch.no_grad():
        for r in res:
            im, score, label, id = r
            save_seg_results(im, score, label, id)
    '''
Esempio n. 4
0
def main(path, dataset, datadir, model, gpu, num_cls, batch_size, loadsize, finesize):
	os.environ['CUDA_VISIBLE_DEVICES'] = gpu
	loadSize=loadsize
	fineSize=finesize
	net = get_model(model, num_cls=num_cls, weights_init=path)
	
	str_ids = gpu.split(',')
	gpu_ids = []
	for str_id in str_ids:
		id = int(str_id)
		if id >= 0:
			gpu_ids.append(id)
	
	# set gpu ids
	if len(gpu_ids) > 0:
		torch.cuda.set_device(gpu_ids[0])
		assert (torch.cuda.is_available())
		net.to(gpu_ids[0])
		net = torch.nn.DataParallel(net, gpu_ids)
	
	net.eval()
	
	if (loadSize and fineSize) is not None:
		print("Loading Center Crop DataLoader Transform")
		data_transform = torchvision.transforms.Compose([transforms.Resize([int(loadSize), int(int(fineSize) * 1.8)], interpolation=Image.BICUBIC),
		                                                 net.module.transform.transforms[0], net.module.transform.transforms[1]])
		
		target_transform = torchvision.transforms.Compose([transforms.Resize([int(loadSize), int(int(fineSize) * 1.8)], interpolation=Image.NEAREST),
			 transforms.Lambda(lambda img: to_tensor_raw(img))])
	
	else:
		data_transform = net.module.transform
		target_transform = torchvision.transforms.Compose([transforms.Lambda(lambda img: to_tensor_raw(img))])
	
	ds = get_fcn_dataset(dataset, datadir, num_cls=num_cls, split='val', transform=data_transform, target_transform=target_transform)
	classes = ds.classes
	
	loader = torch.utils.data.DataLoader(ds, num_workers=16, batch_size=batch_size)

	errs = []
	hist = np.zeros((num_cls, num_cls))
	if len(loader) == 0:
		print('Empty data loader')
		return
	iterations = tqdm(enumerate(loader))
	for im_i, (im, label) in iterations:
		if im_i == 0:
			print(im.size())
			print(label.size())
		
		if im_i > 32:
			break
		
		im = Variable(im.cuda())
		score = net(im).data
		_, preds = torch.max(score, 1)
		hist += fast_hist(label.numpy().flatten(), preds.cpu().numpy().flatten(), num_cls)
		acc_overall, acc_percls, iu, fwIU = result_stats(hist)
		iterations.set_postfix({'mIoU': ' {:0.2f}  fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'.format(np.nanmean(iu), fwIU, acc_overall,
		                                                                                                          np.nanmean(acc_percls))})
	print()
	
	synthia_metric_iu = 0
	
	# line = ""
	for index, item in enumerate(classes):
		print(classes[index], " {:0.1f}".format(iu[index]))
		if classes[index] != 'terrain' and classes[index] != 'truck' and classes[index] != 'train':
			synthia_metric_iu += iu[index]
			# line += " {:0.1f} &".format(iu[index])
			
	# variable "line" is used for adding format results into latex grids
	# print(line)
	
	print(np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))
	print("16 Class-Wise mIOU is {}".format(synthia_metric_iu / 16))
	print('Errors:', errs)
	
	cur_path = path.split('/')[-1]
	parent_path = path.replace(cur_path, '')
	results_dict_path = os.path.join(parent_path, 'result.json')
	results_dict = {}
	results_dict[cur_path] = [np.nanmean(iu), synthia_metric_iu / 16]
	
	if os.path.exists(results_dict_path) is False:
		with open(results_dict_path, 'w') as fp:
			json.dump(results_dict, fp)
	else:
		with open(results_dict_path, 'r') as fp:
			exist_dict = json.load(fp)
		
		with open(results_dict_path, 'w') as fp:
			exist_dict.update(results_dict)
			json.dump(exist_dict, fp)
Esempio n. 5
0
def main(path, dataset, data_type, datadir, model, num_cls, mode):
    net = get_model(model, num_cls=num_cls)
    net.load_state_dict(torch.load(path))
    net.eval()
    ds = get_fcn_dataset(dataset,
                         data_type,
                         os.path.join(datadir, dataset),
                         split=mode)
    classes = ds.num_cls
    collate_fn = torch.utils.data.dataloader.default_collate

    loader = torch.utils.data.DataLoader(ds,
                                         num_workers=0,
                                         batch_size=16,
                                         shuffle=False,
                                         pin_memory=True,
                                         collate_fn=collate_fn)

    intersections = np.zeros(num_cls)
    unions = np.zeros(num_cls)

    ious = list()
    recalls = list()
    precisions = list()
    fscores = list()

    errs = []
    hist = np.zeros((num_cls, num_cls))

    if len(loader) == 0:
        print('Empty data loader')
        return
    iterations = tqdm(iter(loader))

    folderPath = '/'.join(
        path.split('/')[:-1]) + '/' + path.split('/')[-1].split('.')[0]

    os.makedirs(folderPath + '_worst_10', exist_ok=True)
    os.makedirs(folderPath + '_best_10', exist_ok=True)

    for i, (im, label) in enumerate(iterations):

        im = make_variable(im, requires_grad=False)
        label = make_variable(label, requires_grad=False)
        p = net(im)
        score = p

        iou = IoU(p, label)
        rc = recall(p, label)
        pr, rc, fs, _ = sklearnScores(p, label)

        ious.append(iou.item())

        recalls.append(rc)
        precisions.append(pr)
        fscores.append(fs)

        print("iou: ", np.mean(ious))
        print("recalls: ", np.mean(recalls))
        print("precision: ", np.mean(precisions))
        print("f1: ", np.mean(fscores))

    # Max, Min 10
    mx = list(np.argsort(ious)[-10:])
    mn = list(np.argsort(ious)[:10])

    iterations = tqdm(iter(loader))
    for i, (im, label) in enumerate(iterations):

        if i in mx:

            im = make_variable(im, requires_grad=False)
            label = make_variable(label, requires_grad=False)
            p = net(im)
            score = p

            saveImg(im, label, score,
                    folderPath + '_best_10' + "/img_" + str(i) + ".png")

        if i in mn:

            im = make_variable(im, requires_grad=False)
            label = make_variable(label, requires_grad=False)
            p = net(im)
            score = p

            saveImg(im, label, score,
                    folderPath + '_worst_10' + "/img_" + str(i) + ".png")

    print("=" * 100 + "\niou: ", np.mean(ious))
Esempio n. 6
0
def main(path, dataset, datadir, model, gpu, num_cls):
    os.environ['CUDA_VISIBLE_DEVICES'] = gpu

    net = get_model('CalibratorNet',
                    model=model,
                    num_cls=num_cls,
                    weights_init=path,
                    cali_model='resnet_9blocks',
                    task='segmentation')
    #net = get_model(model, num_cls=num_cls, weights_init=path)
    #net.load_state_dict(torch.load(path))

    net.eval()

    transform = net.src_net.module.transform if hasattr(
        net.src_net, 'module') else net.src_net.transform

    ds = get_fcn_dataset(dataset,
                         datadir,
                         split='val',
                         transform=transform,
                         target_transform=to_tensor_raw)
    #ds = get_fcn_dataset(dataset, datadir, split='val',
    #                     transform=torchvision.transforms.ToTensor(), target_transform=to_tensor_raw)
    classes = ds.classes
    loader = torch.utils.data.DataLoader(ds, num_workers=8)

    intersections = np.zeros(num_cls)
    unions = np.zeros(num_cls)

    errs = []
    hist = np.zeros((num_cls, num_cls))
    if len(loader) == 0:
        print('Empty data loader')
        return
    iterations = tqdm(enumerate(loader))
    count = 0
    res = []

    with torch.no_grad():
        for im_i, (im, label) in iterations:

            im = Variable(im.cuda())
            pert = net.calibrator_T(im)
            #pert = torch.clamp(pert,0,0)
            score = net.src_net(torch.clamp(im + pert, -3, 3)).data

            max_score = torch.argmax(score, dim=1)
            #max_ori_score = torch.argmax(net(im).data,dim=1)

            #res.append((max_score,count))
            #save_seg_results(max_ori_score,count)
            #save_seg_results(im,max_score,max_ori_score,count)

            _, preds = torch.max(score, 1)
            hist += fast_hist(label.numpy().flatten(),
                              preds.cpu().numpy().flatten(), num_cls)
            acc_overall, acc_percls, iu, fwIU = result_stats(hist)
            iterations.set_postfix({
                'mIoU':
                ' {:0.2f}  fwIoU: {:0.2f} pixel acc: {:0.2f} per cls acc: {:0.2f}'
                .format(np.nanmean(iu), fwIU, acc_overall,
                        np.nanmean(acc_percls))
            })

            count += 1

    print()
    print(','.join(classes))
    print(fmt_array(iu))
    print(np.nanmean(iu), fwIU, acc_overall, np.nanmean(acc_percls))
    print()
    print('Errors:', errs)