Exemplo n.º 1
0
def validate_sintel(sess, framework, dataset, path):
    tflearn.is_training(False, session=sess)
    validationImg1, validationImg2, validationFlow = zip(*dataset)
    validationSize = len(dataset)
    batchEpe = []
    c = 0
    for j in tqdm(range(0, validationSize, batchSize)):
        if j + batchSize <= validationSize:
            batchImg1 = [sintel.load(p) for p in validationImg1[j: j + batchSize]]
            batchImg2 = [sintel.load(p) for p in validationImg2[j: j + batchSize]]
            batchFlow = [sintel.load(p) for p in validationFlow[j: j + batchSize]]

        batchEpe.append(sess.run(framework.epe, framework.feed_dict(
            img1=batchImg1,
            img2=batchImg2,
            flow=batchFlow
        )))

        if (j // batchSize) % 5 == 0:
            batchPred = sess.run(framework.flow, framework.feed_dict(
                img1=batchImg1,
                img2=batchImg2,
                flow=batchFlow
            ))
            batchImg1 = batchImg1[::4]
            batchImg2 = batchImg2[::4]
            batchFlow = batchFlow[::4]
            batchPred = batchPred[::4]
            visualization.plot(batchImg1, batchImg2, batchFlow, batchPred, path, c)
            c += len(batchImg1)
    mean_epe = np.mean(batchEpe)
    return float(mean_epe)
Exemplo n.º 2
0
def predict(pipe, prefix, batch_size = 8, resize = None):

	sintel_resize = (448, 1024) if resize is None else resize
	sintel_dataset = sintel.list_data()
	prefix = prefix + '_sintel'
	if not os.path.exists(prefix):
		os.mkdir(prefix)
	
	flo = sintel.Flo(1024, 436)

	for div in ('test',):
		for k, dataset in sintel_dataset[div].items():
			if k == 'clean':
				continue
			output_folder = os.path.join(prefix, k)
			if not os.path.exists(output_folder):
				os.mkdir(output_folder)
			img1, img2 = [[sintel.load(p) for p in data] for data in list(zip(*dataset))[:2]]
			for result, entry in zip(pipe.predict(img1, img2, batch_size = 1, resize = sintel_resize), dataset):
				flow, occ_mask, warped = result
				img1 = entry[0]
				fname = os.path.basename(img1)
				seq = os.path.basename(os.path.dirname(img1))
				seq_output_folder = os.path.join(output_folder, seq)
				if not os.path.exists(seq_output_folder):
					os.mkdir(seq_output_folder)
				flo.save(flow, os.path.join(seq_output_folder, fname.replace('.png', '.flo')))

	'''
	KITTI 2012: 
	Submission instructions: For the optical flow benchmark, all flow fields of the test set must be provided in the root directory of a zip file using the file format described in the readme.txt (16 bit color png) and the file name convention of the ground truth (000000_10.png, ... , 000194_10.png).

	KITTI 2015:
	Submission instructions: Provide a zip file which contains the 'disp_0' directory (stereo), the 'flow' directory (flow), or the 'disp_0', 'disp_1' and 'flow' directories (scene flow) in its root folder. Use the file format and naming described in the readme.txt (000000_10.png,...,000199_10.png). 
	'''

	kitti_resize = (512, 1152) if resize is None else resize
	kitti_dataset = kitti.read_dataset_testing(resize = kitti_resize)
	prefix = prefix.replace('sintel', 'kitti')
	if not os.path.exists(prefix):
		os.mkdir(prefix)

	for k, dataset in kitti_dataset.items():
		output_folder = os.path.join(prefix, k)
		if not os.path.exists(output_folder):
			os.mkdir(output_folder)

		img1 = kitti_dataset[k]['image_0']
		img2 = kitti_dataset[k]['image_1']
		cnt = 0
		for result in pipe.predict(img1, img2, batch_size = 1, resize = kitti_resize):
			flow, occ_mask, warped = result
			out_name = os.path.join(output_folder, '%06d_10.png' % cnt)
			cnt = cnt + 1

			pred = np.ones((flow.shape[0], flow.shape[1], 3)).astype(np.uint16)
			pred[:, :, 2] = (64.0 * (flow[:, :, 0] + 512)).astype(np.uint16)
			pred[:, :, 1] = (64.0 * (flow[:, :, 1] + 512)).astype(np.uint16)
			cv2.imwrite(out_name, pred)
			
Exemplo n.º 3
0
def predict(pipe, prefix, batch_size=8):
    sintel_dataset = sintel.list_data(sintel.sintel_path)
    if not os.path.exists(prefix):
        os.mkdir(prefix)

    flo = sintel.Flo(1024, 436)

    for div in ('test', ):
        for k, dataset in sintel_dataset[div].items():
            output_folder = os.path.join(prefix, k)
            if not os.path.exists(output_folder):
                os.mkdir(output_folder)

            img1, img2 = [[sintel.load(p) for p in data]
                          for data in zip(*dataset)]
            for flow, entry in zip(
                    pipe.predict(img1, img2, batch_size=batch_size), dataset):
                img1 = entry[0]
                fname = os.path.basename(img1).replace('.png', '.flo')
                seq = os.path.basename(os.path.dirname(img1))
                seq_output_folder = os.path.join(output_folder, seq)
                if not os.path.exists(seq_output_folder):
                    os.mkdir(seq_output_folder)
                flo.save(flow, os.path.join(seq_output_folder, fname))
Exemplo n.º 4
0
# ======== If to do validation ========

def validate():
	validation_result = {}
	for dataset_name in validation_datasets:
		validation_result[dataset_name] = pipe.validate(*validation_datasets[dataset_name], batch_size = args.batch)
	return validation_result

if args.valid:
	log = logger.FileLog(os.path.join(repoRoot, 'logs', 'val', '{}.val.log'.format(run_id)), screen=True)
	
	# sintel
	sintel_dataset = sintel.list_data()
	for div in ('training2', 'training'):
		for k, dataset in sintel_dataset[div].items():
			img1, img2, flow, mask = [[sintel.load(p) for p in data] for data in zip(*dataset)]
			val_epe = pipe.validate(img1, img2, flow, mask, batch_size=args.batch, resize = infer_resize)
			log.log('steps={}, sintel.{}.{}:epe={}'.format(steps, div, k, val_epe))
			sys.stdout.flush()
	
	# kitti
	read_resize = (370, 1224) # if infer_resize is None else infer_resize
	for kitti_version in ('2012', '2015'):
		dataset = kitti.read_dataset(editions = kitti_version, parts = 'mixed', resize = read_resize)
		val_epe = pipe.validate(dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'], batch_size=args.batch, resize = infer_resize, return_type = 'epe')	
		log.log('steps={}, kitti.{}:epe={}'.format(steps, kitti_version, val_epe))
		sys.stdout.flush()
		val_epe = pipe.validate(dataset['image_0'], dataset['image_1'], dataset['flow'], dataset['occ'], batch_size=args.batch, resize = infer_resize, return_type = 'kitti')	
		log.log('steps={}, kitti.{}:kitti={}'.format(steps, kitti_version, val_epe))
		sys.stdout.flush()
Exemplo n.º 5
0
    validationSize = len(validationSet)

    validationImg1, validationImg2, validationFlow = zip(
        *[fetch_data(i) for i in validationSet])
elif train_cfg.dataset.value == 'sintel':
    print('loading sintel dataset ...')
    subsets = train_cfg.subsets.value
    print(subsets[0], subsets[1])
    trainImg1 = []
    trainImg2 = []
    trainFlow = []
    sintel_dataset = sintel.list_data(sintel.sintel_path)
    for k, dataset in sintel_dataset[subsets[0]].items():
        if train_cfg.fast.get(False):
            dataset = dataset[:8]
        img1, img2, flow = [[sintel.load(p) for p in data]
                            for data in zip(*dataset)]
        trainImg1.extend(img1)
        trainImg2.extend(img2)
        trainFlow.extend(flow)
    trainSize = len(trainImg1)

    validationImg1 = []
    validationImg2 = []
    validationFlow = []
    for k, dataset in sintel_dataset[subsets[1]].items():
        if train_cfg.fast.get(False):
            dataset = dataset[:8]
        img1, img2, flow = [[sintel.load(p) for p in data]
                            for data in zip(*dataset)]
        validationImg1.extend(img1)