Esempio n. 1
0
def detect_wand(x2ds_data, x2ds_splits, mats, thresh=20. / 2000., x3d_threshold=1000000.):
	Ps = np.array([m[2] / np.linalg.norm(m[2][0, :3]) for m in mats], dtype=np.float32)
	wand_x3ds = np.array([[160, 0, 0], [0, 0, 0], [-80, 0, 0], [0, 0, -120], [0, 0, -240]], dtype=np.float32)
	x2ds_labels = -np.ones(x2ds_data.shape[0], dtype=np.int32)
	ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
	x2ds_labels2 = x2ds_labels.copy()
	count = np.sum(x2ds_labels2 != -1) / 5
	if count < 3: return None, None, None
	x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps)
	count = ISCV.project_and_clean(x3ds, Ps, x2ds_data, x2ds_splits, x2ds_labels, x2ds_labels2, thresh ** 2, thresh ** 2, x3d_threshold)
	if count < 3: return None, None, None
	x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps)
	assert np.all(x3ds_labels == [0, 1, 2, 3, 4]), 'ERROR: Labels do not match' # skip if somehow not all points seen
	assert np.max(x3ds ** 2) < 1e9, 'ERROR: Values out of bounds' + repr(x3ds)
	mat = rigid_align_points(wand_x3ds, x3ds)
	x3ds = np.dot(wand_x3ds, mat[:3, :3].T) + mat[:, 3]
	return x3ds, x3ds_labels, x2ds_labels2
Esempio n. 2
0
                    img[int(r.sy - dy):int(r.sy + dy),
                        int(r.sx - dx):int(r.sx + dx), 0] = 128
    else:
        pts0 = pts1 = []
    return (pts0, pts1)


def tighten_calibration(
    (x3s, x3s_labels), (x2s, x2s_splits, x2s_labels), mats):
    x3s_original = x3s.copy()
    x2s_labels_original = x2s_labels.copy()
    for it in range(10):
        x2d_threshold = 0.08  # - it * 0.04/50.
        Ps = np.array([m[2] / (m[0][0, 0]) for m in mats], dtype=np.float32)
        u2s, _ = Calibrate.undistort_dets(x2s, x2s_splits, mats)
        x3s, x3s_labels, E, x2d_labels = Recon.solve_x3ds(
            u2s, x2s_splits, x2s_labels_original, Ps, True)
        clouds = ISCV.HashCloud2DList(u2s, x2s_splits, x2d_threshold)
        sc, x2s_labels, _ = Label.project_assign(clouds, x3s, x3s_labels, Ps,
                                                 x2d_threshold)
        print 'it', it, sc
        tiara_xis = np.where(x3s_labels < len(VICON_tiara_x3ds))[0]
        tiara_lis = x3s_labels[tiara_xis]
        tiara_true = VICON_tiara_x3ds[tiara_lis] + [0, 1000, 0]
        tiara_xs = x3s[tiara_xis]
        # now solve the tiara into place by finding a rigid transform
        RT, inliers = Calibrate.rigid_align_points_inliers(tiara_xs,
                                                           tiara_true,
                                                           scale=True)
        x3s = np.dot(x3s, RT[:3, :3].T) + RT[:, 3]
        x3s[tiara_xis] = tiara_true
        singles = np.where([x in list(x2d_labels) for x in x2s_labels])[0]
Esempio n. 3
0
def solve_skeleton_from_2d(x2ds,
                           splits,
                           labels,
                           effectorLabels,
                           Ps,
                           skelDict,
                           effectorData,
                           rootMat,
                           outerIts=5):
    """
	Given a posed skeleton and some labelled 2d points, solve the skeleton to better fit the points.
	
	Args:
		x2ds (float[][2]): 2d Detections from all cameras
		splits (int[]): list of camera indices
		labels (int[]): Assigned labels of the x2ds
		effectorLabels (?): For each effector, which label it depends on.
			Joints may be effected by a number of labellings.
		Ps (float[][3][4]): Projection matrices of the cameras.
		skelDict (GskelDict): The Skeleton to process
		effectorData (?): What's this?
		rootMat (float[3][4]): reference frame of the Skeleton.
		outerIts (int): IK Iterations to solve the skeleton. Default = 5.
		
	Returns:
		float[][3]: (x3ds) - the resulting 3D reconstructions.
		int[]: (x3d_labels) - the labels for the 3D points.
		??: (E[singles]) - Equations describing 2D detections not born of the 3D yet.
		int[] (x2d_labels) - labels for the 2D contributions.
		
	Requires:
		Recon.solve_x3ds
		
	"""
    x3ds, x3d_labels, E, x2d_labels = Recon.solve_x3ds(x2ds, splits, labels,
                                                       Ps)

    # effectorLabels tells, for each effector, which label it depends on
    # effectorLabels[ei] = li
    # given a list of labels, collect all the effectors that depend on those labels; and then find the reordering of the
    # original labels (which may include duplicates) that matches the effectors.

    numLabels = np.max(effectorLabels) + 1

    lbl3_inv = -np.ones(numLabels + 1, dtype=np.int32)
    lbl3_inv[x3d_labels] = range(len(x3d_labels))
    tmp3 = lbl3_inv[effectorLabels]
    ae3 = np.array(np.where(tmp3 != -1)[0], dtype=np.int32)
    tmp3 = tmp3[ae3]

    lbl2_inv = -np.ones(numLabels + 1, dtype=np.int32)
    lbl2_inv[x2d_labels] = range(len(x2d_labels))
    tmp2 = lbl2_inv[effectorLabels]
    ae2 = np.array(np.where(tmp2 != -1)[0], dtype=np.int32)
    tmp2 = tmp2[ae2]
    #
    solveIK1Ray(skelDict,
                effectorData,
                x3ds.take(tmp3, axis=0),
                ae3,
                E.take(tmp2, axis=0),
                ae2,
                outerIts=outerIts,
                rootMat=rootMat)
    return x3ds, x3d_labels, E, x2d_labels
Esempio n. 4
0
def generate_wand_correspondences(wand_frames, mats2, camera_solved, rigid_filter=True, error_thresholds=None, x3d_threshold=1000000.):
	"""
	Args:
		wand_frames
		mats2
		camera_solved
		rigid_filter = True
		error_thresholds = None
		
	Returns:
		x2s_cameras
		x3s_cameras
		frames_cameras
		num_kept_frames
		
	Requires:
		ISCV.undistort_points
		ISCV.label_T_wand
		Recon.solve_x3ds
		ISCV.project_and_clean
		
	"""

	def get_order(labels):
		"""
		Return the x2d index of the five points of the T Wand
		
		Args:
			labels (int[]): 
			
		Returns:
			int[5]: "order" label indexes
			
		"""
		try:
			l = list(labels)
			order = [l.index(x) for x in xrange(5)]
			return order
		except:
			return None
	
	numCameras = len(mats2)
	Ps2 = np.array([m[2]/np.linalg.norm(m[2][0,:3]) for m in mats2],dtype=np.float32)
	x2ds_frames = []
	x2ds_labels_frames = []
	x2ds_splits_frames = []
	x3ds_frames = []
	# TODO wand geo should be passed in? must be compatible with the label_T_wand
	wand_x3ds = np.array([[160,0,0],[0,0,0],[-80,0,0],[0,0,-120],[0,0,-240]],dtype=np.float32)
	thresh = (20./2000.)**2 if error_thresholds is None else error_thresholds**2 # projection must be close to be included for intersection
	num_kept_frames = 0
	for fi,(x2ds_raw_data,x2ds_splits) in enumerate(wand_frames): # intersect over all frames with current solved cameras
		x2ds_data,_ = undistort_dets(x2ds_raw_data, x2ds_splits, mats2)
		x2ds_labels = -np.ones(x2ds_data.shape[0],dtype=np.int32)
		ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
		x2ds_labels2 = x2ds_labels.copy()
		for cs,c0,c1 in zip(camera_solved,x2ds_splits[:-1],x2ds_splits[1:]): # remove labels for unsolved cameras
			if not cs: x2ds_labels2[c0:c1] = -1
		count = np.sum(x2ds_labels2 != -1)/5
		if count >= 3: # only use points seen in three solved cameras
			x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)
			count = ISCV.project_and_clean(x3ds, Ps2, x2ds_data, x2ds_splits, x2ds_labels, x2ds_labels2, thresh, thresh, x3d_threshold)
			if count < 3: continue
			x3ds, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)
			#if not np.all(x3ds_labels == [0,1,2,3,4]): print 'ERROR'; continue # skip if somehow not all points seen
			#if np.max(x3ds**2) > 1e9: print 'ERROR oh oh',x3ds; continue
			if rigid_filter: # enforce x3ds must be a rigid transform of the wand
				mat = rigid_align_points(wand_x3ds, x3ds)
				x3ds = np.dot(wand_x3ds,mat[:3,:3].T) + mat[:,3]
			for cs,c0,c1 in zip(camera_solved,x2ds_splits[:-1],x2ds_splits[1:]): #copy 'cleaned' labels for solved cameras to avoid bad data
				if cs: x2ds_labels[c0:c1] = x2ds_labels2[c0:c1]
			x2ds_frames.append(x2ds_raw_data)
			x2ds_splits_frames.append(x2ds_splits)
			x2ds_labels_frames.append(x2ds_labels) # CBD not x2ds_labels2, otherwise we can't add cameras!
			x3ds_frames.append(x3ds)
			num_kept_frames+=1

	# TODO collapse this into the code above and clean up
	x2s_cameras,x3s_cameras,frames_cameras = [],[],[]
	for ci in xrange(numCameras):
		orders = [get_order(xlf[xsf[ci]:xsf[ci+1]]) for xlf,xsf in zip(x2ds_labels_frames,x2ds_splits_frames)]
		which_frames = np.where([o is not None for o in orders])[0]
		if len(which_frames) == 0:
			x2s,x3s = np.zeros((0,2),dtype=np.float32),np.zeros((0,3),dtype=np.float32)
		else:
			x2s = np.vstack([x2ds_frames[fi][x2ds_splits_frames[fi][ci]:x2ds_splits_frames[fi][ci+1]][orders[fi]] for fi in which_frames])
			x3s = np.vstack([x3ds_frames[fi] for fi in which_frames])
		x2s_cameras.append(x2s)
		x3s_cameras.append(x3s)
		frames_cameras.append(which_frames)

	return x2s_cameras,x3s_cameras,frames_cameras,num_kept_frames
Esempio n. 5
0
def boot_cameras_from_wand(wand_frames, cameras_info, lo_focal_threshold=0.5, hi_focal_threshold=4.0, cv_2d_threshold=0.02):
	"""
	Attempt to boot position of cameras from 2d data containing a wand. This is assumed to be 5 marker T wand.
	
	TODO: Generalise size of wand to allow 120mm, 240mm, 780mm etc variations. Also use actual measurements of wand.

	Args:
		wand_frames
		cameras_info
		lo_focal_threshold=0.5
		hi_focal_threshold=4.0
		cv_2d_threshold=0.02
		
	Returns:
		Mat[]: "mats2" - list of GRIP Camera Mats of solved or uninitalised cameras.
		bool[]: "camera_solved" flag to show which cameras have been solved in this process.
		
	Requires:
		ISCV.label_T_wand
		Recon.solve_x3ds
		np.linalg.norm
		
	"""
	
	numCameras = len(cameras_info)
	numFrames = len(wand_frames)
	camera_solved = [False]*numCameras
	# use the wand to boot the first camera
	x2ds_data,x2ds_splits = wand_frames[0]
	x2ds_labels = -np.ones(x2ds_data.shape[0], dtype=np.int32)
	ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
	first_x3ds = np.array([[160, 0, 0],[0, 0, 0],[-80, 0, 0],[0, 0, -120],[0, 0, -240]], dtype=np.float32)
	mats2 = [None]*numCameras
	first_good_cameras = [None]*numCameras
	for ci,(c0,c1) in enumerate(zip(x2ds_splits[:-1], x2ds_splits[1:])):
		x2ds = x2ds_data[c0:c1]
		labels = x2ds_labels[c0:c1]
		try:
			order = [list(labels).index(x) for x in range(5)]
		except:
			mats2[ci] = makeUninitialisedMat(ci, cameras_info[ci])
			camera_solved[ci] = False
			continue
		print ('found wand in camera',ci)
		first_good_cameras[ci] = x2ds[order]
		cv2_mat = cv2_solve_camera_from_3d(first_x3ds, x2ds[order])
		rms = cv2_mat[2]
		mats2[ci] = makeMat(cv2_mat[0], cv2_mat[1], cameras_info[ci])
		camera_solved[ci] = True
		if mats2[ci][0][0,0] < lo_focal_threshold or mats2[ci][0][0,0] > hi_focal_threshold or rms > cv_2d_threshold:
			print ('resetting bad camera',ci,'with focal',mats2[ci][0][0,0],'and error',rms)
			mats2[ci] = makeUninitialisedMat(ci,cameras_info[ci])
			camera_solved[ci] = False
	Ps2 = np.array([m[2]/m[0][0,0] for m in mats2],dtype=np.float32)
	x2ds_labels2 = x2ds_labels.copy()
	for ci in xrange(numCameras): # remove unsolved cameras
		if not camera_solved[ci]: x2ds_labels2[x2ds_splits[ci]:x2ds_splits[ci+1]] = -1
	x3ds_ret, x3ds_labels, E_x2ds_single, x2ds_single_labels = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)

	print (x3ds_ret,first_x3ds) # all points should be within 2.5mm of 'true'
	assert(np.allclose(x3ds_ret, first_x3ds, 0.0, 2.5))

	# so, we booted some cameras and they reconstruct the wand in the correct place.
	# unfortunately, there is still an ambiguity: something like the Necker cube (two different ways we can perceive the wand).
	# as soon as the wand moves, we can resolve this
	for mfi in xrange(40,numFrames,20):
		print (mfi)
		x2ds_data,x2ds_splits = wand_frames[mfi]
		x2ds_labels = -np.ones(x2ds_data.shape[0],dtype=np.int32)
		ISCV.label_T_wand(x2ds_data, x2ds_splits, x2ds_labels, 2.0, 0.5, 0.01, 0.07)
		solved_cameras = np.where(camera_solved)[0]
		good_cameras = []
		second_good_cameras = [None]*numCameras
		print (solved_cameras)
		for ci in solved_cameras:
			c0,c1 = x2ds_splits[ci:ci+2]
			x2ds = x2ds_data[c0:c1]
			labels = x2ds_labels[c0:c1]
			try:
				order = [list(labels).index(x) for x in range(5)]
			except:
				continue
			diff = x2ds[order] - first_good_cameras[ci]
			if np.linalg.norm(diff) < 0.02*len(diff): continue # must have moved 'enough'
			good_cameras.append(ci)
			second_good_cameras[ci] = x2ds[order]
		print (good_cameras)
		if len(good_cameras) >= 3: # this is the good frame...
			x2ds_labels2 = x2ds_labels.copy()
			for ci in xrange(numCameras): # remove unsolved cameras
				if not ci in good_cameras: x2ds_labels2[x2ds_splits[ci]:x2ds_splits[ci+1]] = -1
			second_x3ds, second_x3ds_labels, _,_ = Recon.solve_x3ds(x2ds_data, x2ds_splits, x2ds_labels2, Ps2)
			for ci in solved_cameras:
				if ci not in good_cameras:
					print ('resetting bad camera',ci)
					mats2[ci] = makeUninitialisedMat(ci,mats2[ci][5])
					camera_solved[ci] = False
			for ci in good_cameras:
				cv2_mat = cv2_solve_camera_from_3d(np.concatenate((first_x3ds,second_x3ds)), np.concatenate((first_good_cameras[ci],second_good_cameras[ci])))
				rms = cv2_mat[2]
				print (ci,rms)
				mats2[ci] = makeMat(cv2_mat[0],cv2_mat[1],mats2[ci][5])
				camera_solved[ci] = True
			break # finished
	return mats2, camera_solved