Ejemplo n.º 1
0
def debug(exp, bench=None, net=None):
	if bench is None:
		bench = pbench.PoseBenchmark(azimuthOnly=False, classes=['car'])
	preds = get_predictions(exp, bench, net=net, debugMode=True)
	gtPose = bench.giveTestPoses('car')
	gtAz, pdAz = [], []
	gtEl, pdEl = [], []
	testPreds  = []
	for i in range(len(gtPose)):
		az, el, _ = gtPose[i]
		az,_ = pep.format_label(az, exp.dPrms_, bins=exp.dPrms_.azBins)
		el,_ = pep.format_label(el, exp.dPrms_, bins=exp.dPrms_.elBins)
		gtAz.append(az)
		gtEl.append(el)
		pdFloat, pdBins = preds[i]
		testPreds.append(pdFloat)
		paz, pel, _ = pdBins
		pdAz.append(paz)
		pdEl.append(pel)
	gtAz = np.array(gtAz)
	pdAz = np.array(pdAz)
	gtEl = np.array(gtEl)
	pdEl = np.array(pdEl)
	errs  = bench.evaluatePredictions('car', testPreds)
	print (180*np.median(errs)/np.pi)
	return np.array(gtPose), np.array(testPreds), gtAz, pdAz
Ejemplo n.º 2
0
def save_evaluation(exp, numIter, bench=None, forceWrite=False):
	resFile = get_result_filename(exp, numIter)	
	#Check if result file exists
	if osp.exists(resFile) and not forceWrite:
		print ('%s exists' % resFile)
		return
	#Get the benchmark object
	print ('Loading Benchmark Object')
	if bench is None:
		bench         = pbench.PoseBenchmark(classes=PASCAL_CLS)
	#Make the net
	net = make_deploy_net(exp, numIter)
	#Start evaluation
	print ('Starting evaluation')
	res = edict()
	mds = []
	for i, cls in enumerate(PASCAL_CLS):
		res[cls] = edict()
		res[cls]['pd']  = get_predictions(exp, bench, className=cls,
                     net=net) 
		res[cls]['gt']  = bench.giveTestPoses(cls)
		res[cls]['err'] = bench.evaluatePredictions(cls, res[cls]['pd']) 
		mds.append(180 * (np.median(res[cls]['err'])/np.pi))
		print ('Median accuracy on %s is %f' % (cls, mds[i]))
		res[cls]['imn'], res[cls]['bbox'] = bench.giveTestInstances(cls)
	mds = np.array(mds)
	print ('MEAN ACCURACY %f' % np.mean(mds))
	pickle.dump(res, open(resFile, 'w'))
Ejemplo n.º 3
0
def save_nn_results(cls='car', bench=None):
	dat = get_data_dict('train')
	fullKeys = dat.keys()
	idKeys   = [osp.split(k)[1] for k in dat.keys()]
	trainFiles, trainLbs = get_cls_set_files('train', cls=cls)
	if bench is None:
		bench = pbench.PoseBenchmark(classes=[cls])
	imNames, bbox = bench.giveTestInstances(cls)	
	dat	
Ejemplo n.º 4
0
def stupid_debug(exp, bench=None):
	bench = pbench.PoseBenchmark(azimuthOnly=True, classes=['car'])
	gtPose = bench.giveTestPoses('car')
	pdPose = []
	for i in range(len(gtPose)):
		a, e, _ = gtPose[i]
		aBin,_ = pep.format_label(a, exp.dPrms_, bins=exp.dPrms_.azBins)
		eBin,_ = pep.format_label(e, exp.dPrms_, bins=exp.dPrms_.elBins)
		az    = pep.unformat_label(aBin, None,
						exp.dPrms_, bins=exp.dPrms_.azBins)
		el    = pep.unformat_label(eBin, None, 
						exp.dPrms_, bins=exp.dPrms_.elBins)
		pdPose.append([az, el, 0])
	errs  = bench.evaluatePredictions('car', pdPose)
	print (np.median(errs))
Ejemplo n.º 5
0
def find_test_keys(cls='car', bench=None, dat=None):
	if dat is None:
		dat = get_data_dict('test')
	fullKeys = dat.keys()
	idKeys   = [osp.split(k)[1] for k in fullKeys]
	testKeys = []
	if bench is None:
		bench = pbench.PoseBenchmark(classes=[cls])
	imNames, bbox = bench.giveTestInstances(cls)	
	for nm in imNames:
		_, pascalid = osp.split(nm)
		pascalid = pascalid[0:-4]
		idx = idKeys.index(pascalid)
		if len(dat[fullKeys[idx]].coords) > 1:
			pdb.set_trace()
		testKeys.append(fullKeys[idx])			
	return testKeys
Ejemplo n.º 6
0
def save_imdata():
	bench = pbench.PoseBenchmark(classes=PASCAL_CLS)
	count = 0
	dName = '/data0/pulkitag/data_sets/pascal3d/imCrop/test/im%d.jpg'
	testList = []
	count = 0
	for cls in PASCAL_CLS:
		print (cls)	
		imNames, bbox = bench.giveTestInstances(cls)	
		ims = get_imdata(imNames, bbox, svMode=True)	
		for i in range(ims.shape[0]):
			svName = dName % count
			scm.imsave(svName, ims[i])
			testList.append([imNames[i], bbox[i], svName])
			count += 1
	outFile = 'pose-files/pascal_test_data.pkl'
	pickle.dump({'testList': testList}, open(outFile, 'w'))
Ejemplo n.º 7
0
def debug_evaluate_data(exp, classes=['car'], isPlot=False):
	bench         = pbench.PoseBenchmark(classes=classes)
	imNames, bbox = bench.giveTestInstances(classes[0])
	ims = get_imdata(imNames, bbox, exp) 	 	
	if isPlot:
		plt.ion()
		fig = plt.figure()
		ax  = fig.add_subplot(111)
		for i in range(ims.shape[0]):
			im = ims[i].transpose((1,2,0))
			im = im[:,:,(2,1,0)] + 128	
			ax.imshow(im.astype(np.uint8))
			plt.show()
			plt.draw()
			ip = raw_input()
			if ip == 'q':
				return 
			plt.cla()
	return ims
Ejemplo n.º 8
0
def eval_accuracy_nn(bench=None, netName='caffe_pose_fc5', classes=['car'], visMatches=False):
	modErr = []	
	if bench is None:
		bench = pbench.PoseBenchmark(classes=classes)
	#Train data
	trainDat   = transform_dict('train')
	keyList    = trainDat.keys()
	#result data
	resDat    = pickle.load(open('pascal_results/pascal_results_%s.pkl' % netName,'r'))
	resImList  = [l[0] for l in resDat['testInfo']]
	resBBox    = [l[1] for l in resDat['testInfo']] 
	resKeys    = resDat['nnKeys'] 
	imNames, bbox = bench.giveTestInstances(classes[0])
	gtPoses       = bench.giveTestPoses(classes[0])
	preds = []
	if visMatches:
		plt.ion()
		fig = plt.figure()
		ax  = []
		count = 1
		for i in range(3):
			for j in range(2):
				ax.append(fig.add_subplot(2,3, count))
				count += 1
	
	exampleCount = 0	
	for nm, bb in zip(imNames, bbox):
		#nm is the name of the image for which we want to find the pose
		idx = [i for i,l in enumerate(resImList) if l == nm]
		if len(idx) > 1:
			for dd in idx:
				resBox  = resBBox[dd]
				isFound = match_bbox(resBox, bb)
				if isFound:
					idx = [dd]
					break
			if not isFound:
				pdb.set_trace() 
		assert len(idx)==1
		idx = idx[0]
		#The 1-NN
		if visMatches:
			dirName  = osp.join(cfg.pths.pascal.dataDr, 'imCrop',
           'imSz256_pad36_hash', 'imSz256_pad36_hash')
			nnImNames = resKeys[idx]
			ax[0].imshow(get_imdata([nm], [bb], svMode=True)[0])
			for vv, visname in enumerate(nnImNames[0:5]):
				im = scm.imread(osp.join(dirName, visname))
				ax[vv+1].imshow(im)
			plt.show()
			plt.draw()
			ip = raw_input()
			if ip =='q':
				return
		key = resKeys[idx][0]
		_, pred = trainDat[key]	
		#print (gtPoses[exampleCount])	
		modErr.append(find_theta_diff(pred[0], gtPoses[exampleCount][0], 'mod180'))
		exampleCount += 1
		pred = pred + (0.,)
		preds.append(pred)
	errs  = bench.evaluatePredictions(classes[0], preds)
	modErr = np.array(modErr)
	mdModErr = 180 * np.median(modErr)/np.pi
	mdErr    = 180*np.median(errs)/np.pi
	return mdModErr, mdErr
Ejemplo n.º 9
0
def get_car_bench_obj():
	bench = pbench.PoseBenchmark('car')
	return bench
Ejemplo n.º 10
0
def save_evaluation_multiple_iters(exp):
	bench         = pbench.PoseBenchmark(classes=PASCAL_CLS)
	numIter = range(8000,60000,4000)
	for n in numIter:
		save_evaluation(exp, n, bench=bench)
Ejemplo n.º 11
0
def save_evaluation_multiple():
	bench         = pbench.PoseBenchmark(classes=PASCAL_CLS)
	for num in range(7):
		exp, numIter = get_exp(num)
		save_evaluation(exp, numIter, bench=bench)