def barndoorRmsToPxr(top=0.0, bottom=0.0, left=0.0, right=0.0, mode='expand', time=1.0): # this function convert barndoors attribute in RMS Light to parameters in Pxr light filter # note: the input angle should be greater than 0 and less than 90 degree # also, you should select the barn filter in scene graph of Katana to run this function # this function does changes the values of the selected nodes or scene graph location min_angle = 1.0 top = min_angle if top<min_angle else (90.0-min_angle if top>(90.0-min_angle) else top) bottom = min_angle if bottom<min_angle else (90.0-min_angle if bottom>(90.0-min_angle) else bottom) left = min_angle if left<min_angle else (90.0-min_angle if left>(90.0-min_angle) else left) right = min_angle if right<min_angle else (90.0-min_angle if right>(90.0-min_angle) else right) max_angle = max([top, bottom, left, right]) # get scene graph location of the selected light filters locations = kcf.getSelectedLocations() nodes = kcf.locationsToNodes(locations) for (l, n) in nodes.iteritems(): if not n: continue # get parent light matrix light_location = os.path.dirname(l) lgt_matrix = tf.list_to_matrix(kcf.getWorldXform(light_location)[0]) scale, shear, angles, trans, persp = tf.decompose_matrix(lgt_matrix) if scale[0]<=0 or scale[1]<=0 or scale[2]<=0: print 'Error: '+os.path.basename(light_location)+' has zero scale, ignored!' continue # calculate distance to the light, so that the angle between the light and filter meets the max angle dist_to_light_x = math.tan(max_angle/180.0*math.pi) * scale[0] dist_to_light_y = math.tan(max_angle/180.0*math.pi) * scale[1] dist_to_light = 0 if mode=='expand': dist_to_light = max(dist_to_light_x, dist_to_light_y) else: dist_to_light = min(dist_to_light_x, dist_to_light_y) dist_to_light = 1 # calculate refine shape of the barn door top_edge = (dist_to_light / math.tan(top/180.0*math.pi) - scale[1])/scale[1] bottom_edge = (dist_to_light / math.tan(bottom/180.0*math.pi) - scale[1])/scale[1] left_edge = (dist_to_light / math.tan(left/180.0*math.pi) - scale[0])/scale[0] right_edge = (dist_to_light / math.tan(right/180.0*math.pi) - scale[0])/scale[0] # let's set the parameters on the selected light filters # light filter is a group, so let's get their children and set the transform parameters light_create_nodes = [i for i in n.getChildren() if i.getType().lower()=='lightcreate'] if not light_create_nodes: print 'Error: failed to find lightCreate node in '+os.path.basename(l)+"'s children, ignored!" continue param_value_dict = {'transform.translate.x':[0, time], \ 'transform.translate.y':[0, time], \ 'transform.translate.z':[-dist_to_light, time]} kcf.setParameters(param_value_dict, light_create_nodes[0]) # set the refine edges of the barn door light_filter_nodes = [i for i in n.getChildren() if i.getType().lower()=='material'] if not light_create_nodes: print 'Error: failed to find lightFilter material in '+os.path.basename(l)+"'s children, ignored!" continue param_value_dict = {'top.value':[top_edge, time], 'bottom.value':[bottom_edge, time], \ 'left.value':[left_edge, time], 'right.value':[right_edge, time]} kcf.setParameters(param_value_dict, light_filter_nodes[0])
def align_centroids(centroids1, centroids2): """Compute alignment matrix for centroids2 into centroids1 coordinates. Arguments: centroids1: Nx3 array in Z,Y,X order centroids2: Mx3 array in Z,Y,X order Returns: M: 4x4 transformation matrix angles: decomposed rotation vector from M (in radians) """ pc1 = centroids2pointcloud(centroids_zx_swap(centroids1)) pc2 = centroids2pointcloud(centroids_zx_swap(centroids2)) results = pcl.registration.icp_nl(pc2, pc1) if not results[0]: raise ValueError("point-cloud registration did not converge") M = results[1] parts = decompose_matrix(M.T) angles = parts[2] # sanity check that our transform is using same math as pcl did nuc1 = centroids_zx_swap(centroids1) nuc2 = centroids_zx_swap(transform_centroids(M, centroids2)) diff = nuc2 - results[2] assert diff.min() <= 0.00001, 'Our transform differs from PCL by %s minimum.' % diff.min() assert diff.max() <= 0.001, 'Our transform differs from PCL by %s maximum.' % diff.max() return M, angles
def hmat_to_trans_rot(hmat): ''' Converts a 4x4 homogenous rigid transformation matrix to a translation and a quaternion rotation. ''' _scale, _shear, angles, trans, _persp = transformations.decompose_matrix(hmat) rot = transformations.quaternion_from_euler(*angles) return trans, rot
def hmat_to_trans_rot(hmat): ''' Converts a 4x4 homogenous rigid transformation matrix to a translation and a quaternion rotation. ''' scale, shear, angles, trans, persp = transformations.decompose_matrix(hmat) rot = transformations.quaternion_from_euler(*angles) return trans, rot
def georef(ply, camCoords, output): print "Inside georef", os.getcwd() header = "" camImgCoords = readply(camCoords) print "camImgCoords", camImgCoords grCord = np.genfromtxt("GroundCoordinates.txt", usecols=(0, 1, 2)) camGrCoords = np.matrix(grCord, dtype=np.float64, copy=False) print "camGrCoords", camGrCoords mdlCoords = readply(ply, 12, 2 * numcam) print "mdlCoords", mdlCoords Tr = trans.superimposition_matrix(camImgCoords.T, camGrCoords.T, True) print "Transformation", Tr print trans.decompose_matrix(Tr) grCoords = applyTransform(Tr, mdlCoords.T) print "grCoords", grCoords tempclr = grCoords[:, :-1] writeply("", tempclr, output)
def _decompose(self): """ returns tuple of: scale : vector of 3 scaling factors shear : list of shear factors for x-y, x-z, y-z angles : list of euler angles about sxyz translate : vector of 3 perspective ; perspective partition """ return xform.decompose_matrix(self.matrix)
def vertical_plane_of_markers(id_first, rows, cols, spacing, origin_pose): xforms = [ origin_pose @ xf.compose_matrix(None, None, [0, -math.pi / 2, 0], [0, (c - (cols - 1) / 2) * spacing, r * spacing]) for r in range(rows) for c in range(cols) ] decompositions = [xf.decompose_matrix(xform) for xform in xforms] return [[ id_first + idx, d[3][0], d[3][1], d[3][2], d[2][0], d[2][1], d[2][2] ] for idx, d in enumerate(decompositions)]
def georef(ply,camCoords,output): print "Inside georef",os.getcwd() header="" camImgCoords,head1 = readply(camCoords,0) print "camImgCoords", camImgCoords grCord = np.genfromtxt("GroundCoordinates.txt", usecols=(0,1,2)) camGrCoords = np.matrix(grCord,dtype=np.float64, copy=False) print "camGrCoords", camGrCoords mdlCoords,header = readply(ply,13) print "mdlCoords", mdlCoords Tr = trans.superimposition_matrix(camImgCoords.T,camGrCoords.T,True) print "Transformation", Tr print trans.decompose_matrix(Tr) grCoords = applyTransform(Tr,mdlCoords.T) print "grCoords", grCoords colorCoords,head3 = readcolorply(ply,13) print "color", colorCoords tempclr=np.hstack((grCoords[:,:-1],colorCoords)) print"tempclr",tempclr print "header",head3 writeply("",tempclr,output)
def eliminateAffineScaling(params): # adopt parameters to image spacing params = n.array(params) oldMatrix = n.diag((1.,1.,1.,1.)) oldMatrix[:3,:3] = params[:9].reshape((3,3)) #print oldMatrix decomp = transformations.decompose_matrix(oldMatrix) #print decomp[0] newMatrix = transformations.compose_matrix(scale=n.ones(3),shear=decomp[1],angles=decomp[2]) newParams = newMatrix[:3,:3].flatten() newTrans = params[9:]*n.array([decomp[0][0],1.,1.]) newParams = n.concatenate([newParams,newTrans],0) return newParams
def recenter(cam_dict): src = [[], [], [], []] # current camera locations dst = [[], [], [], []] # original camera locations for image in proj.image_list: if image.name in cam_dict: newned = cam_dict[image.name]['ned'] else: newned, ypr, quat = image.get_camera_pose() src[0].append(newned[0]) src[1].append(newned[1]) src[2].append(newned[2]) src[3].append(1.0) origned, ypr, quat = image.get_camera_pose() dst[0].append(origned[0]) dst[1].append(origned[1]) dst[2].append(origned[2]) dst[3].append(1.0) print "%s %s" % (origned, newned) Aff3D = transformations.superimposition_matrix(src, dst, scale=True) print "Aff3D:\n", Aff3D scale, shear, angles, trans, persp = transformations.decompose_matrix(Aff3D) R = transformations.euler_matrix(*angles) print "R:\n", R # rotate, translate, scale the group of camera positions to best # align with original locations update_cams = Aff3D.dot( np.array(src) ) print update_cams[:3] for i, p in enumerate(update_cams.T): key = proj.image_list[i].name if not key in cam_dict: cam_dict[key] = {} ned = [ p[0], p[1], p[2] ] print "ned:", ned cam_dict[key]['ned'] = ned if False: # adjust the camera projection matrix (rvec) to rotate by the # amount of the affine transformation as well (this should now # be better accounted for in solvePnP2() rvec = cam_dict[key]['rvec'] tvec = cam_dict[key]['tvec'] Rcam, jac = cv2.Rodrigues(rvec) print "Rcam:\n", Rcam Rcam_new = R[:3,:3].dot(Rcam) print "Rcam_new:\n", Rcam_new rvec, jac = cv2.Rodrigues(Rcam_new) cam_dict[key]['rvec'] = rvec tvec = -np.matrix(Rcam_new) * np.matrix(ned).T cam_dict[key]['tvec'] = tvec
def eliminateAffineScaling(params): # adopt parameters to image spacing params = n.array(params) oldMatrix = n.diag((1., 1., 1., 1.)) oldMatrix[:3, :3] = params[:9].reshape((3, 3)) #print oldMatrix decomp = transformations.decompose_matrix(oldMatrix) #print decomp[0] newMatrix = transformations.compose_matrix(scale=n.ones(3), shear=decomp[1], angles=decomp[2]) newParams = newMatrix[:3, :3].flatten() newTrans = params[9:] * n.array([decomp[0][0], 1., 1.]) newParams = n.concatenate([newParams, newTrans], 0) return newParams
def align_centroids(centroids1, centroids2, maxiters=50): """Compute alignment matrix for centroids2 into centroids1 coordinates. Arguments: centroids1: Nx3 array in Z,Y,X order centroids2: Mx3 array in Z,Y,X order Returns: M: 4x4 transformation matrix angles: decomposed rotation vector from M (in radians) """ def make_pc_poly(a): points = vtk.vtkPoints() verts = vtk.vtkCellArray() for i in range(a.shape[0]): verts.InsertNextCell(1) verts.InsertCellPoint(points.InsertNextPoint(a[i, :])) poly = vtk.vtkPolyData() poly.SetPoints(points) poly.SetVerts(verts) return poly def do_icp(src, tgt): icp = vtk.vtkIterativeClosestPointTransform() icp.SetSource(src) icp.SetTarget(tgt) icp.GetLandmarkTransform().SetModeToRigidBody() icp.SetMaximumNumberOfIterations(maxiters) icp.StartByMatchingCentroidsOn() icp.Modified() icp.Update() M = icp.GetMatrix() return np.array([[M.GetElement(i, j) for j in range(4)] for i in range(4)], dtype=np.float64) pc1 = make_pc_poly(centroids_zx_swap(centroids1).astype(np.float32)) pc2 = make_pc_poly(centroids_zx_swap(centroids2).astype(np.float32)) M = do_icp(pc2, pc1) parts = decompose_matrix(M) angles = parts[2] return M, angles
def get_transformation(transformation): transformation_matrix_str = transformation[ transformation.find('(') + 1:transformation.rfind(')')] transformation_matrix = extract_matrix_from_str( transformation_matrix_str) decomposed_matrix = decompose_matrix(transformation_matrix) scale = decomposed_matrix[0] rotation = decomposed_matrix[2] translation = decomposed_matrix[3] transformation_spec = { 'x': translation[0], 'y': translation[1], 'z': translation[2], 'r0': rotation[0], 'r1': rotation[1], 'r2': rotation[2], 'sx': scale[0], 'sy': scale[1], 'sz': scale[2], } return transformation_spec
def transform_cams(A, cam_dict): # construct an array of camera positions src = [[], [], [], []] for image in proj.image_list: new = cam_dict[image.name]['ned'] src[0].append(new[0]) src[1].append(new[1]) src[2].append(new[2]) src[3].append(1.0) # extract the rotational portion of the affine matrix scale, shear, angles, trans, persp = transformations.decompose_matrix(A) R = transformations.euler_matrix(*angles) #print "R:\n", R # full transform the camera ned positions to best align with # original locations update_cams = A.dot(np.array(src)) #print update_cams[:3] for i, p in enumerate(update_cams.T): key = proj.image_list[i].name if not key in cam_dict: cam_dict[key] = {} ned = [p[0], p[1], p[2]] # print "ned:", ned cam_dict[key]['ned'] = ned # adjust the camera projection matrix (rvec) to rotate by the # amount of the affine transformation as well rvec = cam_dict[key]['rvec'] tvec = cam_dict[key]['tvec'] Rcam, jac = cv2.Rodrigues(rvec) # print "Rcam:\n", Rcam Rcam_new = R[:3, :3].dot(Rcam) # print "Rcam_new:\n", Rcam_new rvec, jac = cv2.Rodrigues(Rcam_new) cam_dict[key]['rvec'] = rvec tvec = -np.matrix(Rcam_new) * np.matrix(ned).T cam_dict[key]['tvec'] = tvec
def save(self, *largs): global args data = {} data['args'] = args data['sheets'] = {} for s in self.sheet_widgets: data['sheets'][s] = [] for p in self.sheet_widgets[s]: print p if p.part is not None: print str(p.part.name) + " self.center" + str( p.center) + " startcentre" + str( p.startcentre) + " pos=" + str( p.pos) + " startpos=" + str(p.startpos) rec = {} rec['name'] = str(p.part.name) rec['translate'] = (p.pos[0] - p.startpos[0], p.pos[1] - p.startpos[1]) m = p.get_window_matrix(x=p.center[0], y=p.center[1]) # print m m2 = numpy.matrix(m.tolist()) # print m2 d = transformations.decompose_matrix(m2) # print "rotate="+str(d[2][2]/math.pi*180) # print "translate="+str(d[3]) # print "perspective="+str(d[4]) # print "position="+ str( [d[4][0]/d[4][3], d[4][1]/d[4][3] ]) # print "kivy_translate="+str(rec['translate']) # rec['rotate'] = math.atan2(m[4], m[0])/math.pi*180 rec['rotate'] = d[2][2] / math.pi * 180 rec['startcentre'] = [0, 0] #rec['startpos'] = p.startpos rec['startpos'] = [d[4][0] / d[4][3], d[4][1] / d[4][3]] if p.deleted == 0: data['sheets'][s].append(rec) # print p.get_window_matrix(0,0) h = open('layout_file', 'w') json.dump(data, h)
def transform_cams(A, cam_dict): # construct an array of camera positions src = [[], [], [], []] for image in proj.image_list: new = cam_dict[image.name]['ned'] src[0].append(new[0]) src[1].append(new[1]) src[2].append(new[2]) src[3].append(1.0) # extract the rotational portion of the affine matrix scale, shear, angles, trans, persp = transformations.decompose_matrix(A) R = transformations.euler_matrix(*angles) #print "R:\n", R # full transform the camera ned positions to best align with # original locations update_cams = A.dot( np.array(src) ) #print update_cams[:3] for i, p in enumerate(update_cams.T): key = proj.image_list[i].name if not key in cam_dict: cam_dict[key] = {} ned = [ p[0], p[1], p[2] ] # print "ned:", ned cam_dict[key]['ned'] = ned # adjust the camera projection matrix (rvec) to rotate by the # amount of the affine transformation as well rvec = cam_dict[key]['rvec'] tvec = cam_dict[key]['tvec'] Rcam, jac = cv2.Rodrigues(rvec) # print "Rcam:\n", Rcam Rcam_new = R[:3,:3].dot(Rcam) # print "Rcam_new:\n", Rcam_new rvec, jac = cv2.Rodrigues(Rcam_new) cam_dict[key]['rvec'] = rvec tvec = -np.matrix(Rcam_new) * np.matrix(ned).T cam_dict[key]['tvec'] = tvec
def __init__(self, T): if ( type(T) == str): Tfile = T #Load transformation Tfis = open(Tfile, 'r') lines = [] lines = Tfis.readlines() self.scale = float(lines[0]) self.Ss = tf.scale_matrix(self.scale) quat_line = lines[1].split(" ") self.quat = tf.unit_vector(np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])])) self.Hs = tf.quaternion_matrix(self.quat) trans_line = lines[2].split(" ") self.Ts = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]) Tfis.close() self.Rs = self.Hs.copy()[:3, :3] self.Hs[:3, 3] = self.Ts[:3] self.Hs = self.Ss.dot(self.Hs) # to add again elif (type(T) == np.ndarray): self.Hs = T scale, shear, angles, trans, persp = tf.decompose_matrix(T) self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2]) self.Rs = tf.quaternion_matrix(self.quat) self.scale =scale[0] self.Ts = trans / self.scale print "Loaded Ground Truth Transformation: " print self.Hs
def save(self, *largs): global args data = {} data["args"] = args data["sheets"] = {} for s in self.sheet_widgets: data["sheets"][s] = [] for p in self.sheet_widgets[s]: print p if p.part is not None: print p.part.name + " self.center" + str(p.center) + " startcentre" + str( p.startcentre ) + " pos=" + str(p.pos) + " startpos=" + str(p.startpos) rec = {} rec["name"] = p.part.name rec["translate"] = (p.pos[0] - p.startpos[0], p.pos[1] - p.startpos[1]) m = p.get_window_matrix(x=p.center[0], y=p.center[1]) # print m m2 = numpy.matrix(m.tolist()) # print m2 d = transformations.decompose_matrix(m2) # print "rotate="+str(d[2][2]/math.pi*180) # print "translate="+str(d[3]) # print "perspective="+str(d[4]) # print "position="+ str( [d[4][0]/d[4][3], d[4][1]/d[4][3] ]) # print "kivy_translate="+str(rec['translate']) # rec['rotate'] = math.atan2(m[4], m[0])/math.pi*180 rec["rotate"] = d[2][2] / math.pi * 180 rec["startcentre"] = [0, 0] # rec['startpos'] = p.startpos rec["startpos"] = [d[4][0] / d[4][3], d[4][1] / d[4][3]] if p.deleted == 0: data["sheets"][s].append(rec) # print p.get_window_matrix(0,0) h = open("layout_file", "w") pickle.dump(data, h)
def asString(self): # nb: currently we assume no scales, skews, etc q = xform.quaternion_from_matrix(self.matrix) o = xform.decompose_matrix(self.matrix)[3] return "o %g %g %g %s" % \ (o[0], o[1], o[2], Quaternion(q).asString())
def __init__(self, ia_Tfile, icp_Tfile, force_icp_unit_scale=True): #Load IA transformation Tfis = open(ia_Tfile, 'r') lines = Tfis.readlines() format = len(lines) Tfis.seek(0) #reset file pointer if format==5: """If the transformation was saved as -----IA------------- scale H (4x4) - = [S*R|S*T] """ self.Hs_ia = np.genfromtxt(Tfis, skip_header=1, usecols={0, 1, 2, 3}) Tfis.close() Tfis = open(ia_Tfile, 'r') self.scale_ia = np.genfromtxt(Tfis, skip_footer=4, usecols={0}) Tfis.close() self.Rs_ia = self.Hs_ia[:3, :3] * (1.0 / self.scale_ia) self.Ts_ia = self.Hs_ia[:3, 3] * (1.0 / self.scale_ia) if format==4: """If the transformation was saved as -----IA------------- H (4x4) - = [S*R|S*T] """ self.Hs_ia = np.genfromtxt(Tfis, usecols={0, 1, 2, 3}) Tfis.close() scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs_ia) self.scale_ia = scale[0] # assuming isotropic scaling self.Rs_ia = self.Hs_ia[:3, :3] * (1.0 / self.scale_ia) self.Ts_ia = self.Hs_ia[:3, 3] * (1.0 / self.scale_ia) #Load ICP transformation Tfis = open(icp_Tfile, 'r') self.Hs_icp = np.genfromtxt(Tfis, usecols={0, 1, 2, 3}) Tfis.close() if np.isnan(np.sum(self.Hs_icp)): self.Hs_icp = np.identity(4) if force_icp_unit_scale: print "ICP assuming unit scale" self.scale_icp = 1.0; else: scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs_icp) self.scale_icp = scale[0] # assuming isotropic scaling self.Hs_icp = self.Hs_icp.dot(self.Hs_ia) self.Rs_icp = self.Hs_icp[:3, :3] * (1.0 / (self.scale_ia * self.scale_icp)) self.Ts_icp = self.Hs_icp[:3, 3] * (1.0 / (self.scale_ia * self.scale_icp)) # scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs_ia) # self.Rs_ia = tf.euler_matrix(*angles) # scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs_icp.dot(self.Hs_ia)) # self.Rs_icp = tf.euler_matrix(*angles) # import pdb; pdb.set_trace() print "Loaded IA Transformation: " print self.Hs_ia print "Loaded ICP Transformation: " print self.Hs_icp print "Loaded IA-ICP Scales: " print self.scale_ia, self.scale_icp
def main(): SHOW_AXES = True SHOW_SCENE_AXES = True SHOW_COIL_AXES = True SHOW_SKIN = True SHOW_BRAIN = True SHOW_COIL = True SHOW_MARKERS = True TRANSF_COIL = True SHOW_PLANE = False SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp' SAVE_ID = False AFFINE_IMG = True NO_SCALE = True SCREENSHOT = False reorder = [0, 2, 1] flipx = [True, False, False] # reorder = [0, 1, 2] # flipx = [False, False, False] # default folder and subject # subj = 's03' subj = 'EEGTA04' id_extra = False # 8, 9, 10, 12, False # data_dir = os.environ['OneDriveConsumer'] + '\\data\\nexstim_coord\\' data_dir = r'P:\tms_eeg\mTMS\projects\2019 EEG-based target automatization\Analysis\EEG electrode transformation' # filenames # coil_file = data_dir + 'magstim_fig8_coil.stl' coil_file = os.environ[ 'OneDrive'] + '\\data\\nexstim_coord\\magstim_fig8_coil.stl' if id_extra: coord_file = data_dir + 'ppM1_eximia_%s_%d.txt' % (subj, id_extra) else: coord_file = nav_dir + 'ppM1_eximia_%s.txt' % subj # img_file = data_subj + subj + '.nii' img_file = data_dir + 'mri\\ppM1_%s\\ppM1_%s.nii' % (subj, subj) brain_file = simnibs_dir + "wm.stl" skin_file = simnibs_dir + "skin.stl" if id_extra: output_file = nav_dir + 'transf_mat_%s_%d' % (subj, id_extra) else: output_file = nav_dir + 'transf_mat_%s' % subj coords = lc.load_nexstim(coord_file) # red, green, blue, maroon (dark red), # olive (shitty green), teal (petrol blue), yellow, orange col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # extract image header shape and affine transformation from original nifti file imagedata = nb.squeeze_image(nb.load(img_file)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) affine_aux = imagedata.affine.copy() if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix( imagedata.affine) affine_aux = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) if AFFINE_IMG: affine = affine_aux # if NO_SCALE: # scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine) # affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) # affine_I = np.identity(4) # create a camera, render window and renderer camera = vtk.vtkCamera() camera.SetPosition(0, 1000, 0) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) camera.ComputeViewPlaneNormal() camera.Azimuth(90.0) camera.Elevation(10.0) ren = vtk.vtkRenderer() ren.SetActiveCamera(camera) ren.ResetCamera() camera.Dolly(1.5) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) if SELECT_LANDMARKS == 'mri': # MRI landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 7, 10] elif SELECT_LANDMARKS == 'all': # all coords coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 5, 4, 6, 7, 10] elif SELECT_LANDMARKS == 'scalp': # scalp landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] hdr_mri = [ 'Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max' ] pts_ref = [5, 4, 6, 7, 10] coords_np = np.zeros([len(pts_ref), 3]) for n, pts_id in enumerate(pts_ref): # to keep in the MRI space use the identity as the affine # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder) # affine_trans = affine_I.copy() # affine_trans = affine.copy() # affine_trans[:3, -1] = affine[:3, -1] coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder) coords_np[n, :] = coord_aux [coord_mri[n].append(s) for s in coord_aux] if SHOW_MARKERS: marker_actor = add_marker(coord_aux, ren, col[n]) if id_extra: # compare coil locations in experiments with 8, 9, 10 and 12 mm shifts # MRI Nexstim space: 8, 9, 10, 12 mm coil locations # coord_others = [[122.2, 198.8, 99.7], # [121.1, 200.4, 100.1], # [120.5, 200.7, 98.2], # [117.7, 202.9, 96.6]] if AFFINE_IMG: # World space: 8, 9, 10, 12 mm coil locations coord_others = [ [-42.60270233154297, 28.266497802734378, 81.02450256347657], [-41.50270233154296, 28.66649780273437, 82.62450256347657], [-40.90270233154297, 26.766497802734378, 82.92450256347655], [-38.10270233154297, 25.16649780273437, 85.12450256347657] ] else: # MRI space reordered and flipped: 8, 9, 10, 12 mm coil locations coord_others = [[27.8, 99.7, 198.8], [28.9, 100.1, 200.4], [29.5, 98.2, 200.7], [32.3, 96.6, 202.9]] col_others = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [0., 0., 0.]] for n, c in enumerate(coord_others): marker_actor = add_marker(c, ren, col_others[n]) print('\nOriginal coordinates from Nexstim: \n') [print(s) for s in coords] print('\nMRI coordinates flipped and reordered: \n') [print(s) for s in coords_np] print('\nTransformed coordinates to MRI space: \n') [print(s) for s in coord_mri] # coil location, normal vector and direction vector coil_loc = coord_mri[-2][1:] coil_norm = coords[8][1:] coil_dir = coords[9][1:] # creating the coil coordinate system by adding a point in the direction of each given coil vector # the additional vector is just the cross product from coil direction and coil normal vectors # origin of the coordinate system is the coil location given by Nexstim # the vec_length is to allow line creation with visible length in VTK scene vec_length = 75 p1 = coords[7][1:] p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)] p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder) p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)] p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder) coil_face = np.cross(coil_norm, coil_dir) p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())] p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # Coil face unit vector (X) u1 = np.asarray(p2_face) - np.asarray(coil_loc) u1_n = u1 / np.linalg.norm(u1) # Coil direction unit vector (Y) u2 = np.asarray(p2_dir) - np.asarray(coil_loc) u2_n = u2 / np.linalg.norm(u2) # Coil normal unit vector (Z) u3 = np.asarray(p2_norm) - np.asarray(coil_loc) u3_n = u3 / np.linalg.norm(u3) transf_matrix = np.identity(4) if TRANSF_COIL: transf_matrix[:3, 0] = u1_n transf_matrix[:3, 1] = u2_n transf_matrix[:3, 2] = u3_n transf_matrix[:3, 3] = coil_loc[:] # the absolute value of the determinant indicates the scaling factor # the sign of the determinant indicates how it affects the orientation: if positive maintain the # original orientation and if negative inverts all the orientations (flip the object inside-out)' # the negative determinant is what makes objects in VTK scene to become black print('Transformation matrix: \n', transf_matrix, '\n') print('Determinant: ', np.linalg.det(transf_matrix)) if SAVE_ID: coord_dict = { 'm_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np } io.savemat(output_file + '.mat', coord_dict) hdr_names = ';'.join( ['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)]) np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names) if SHOW_BRAIN: if AFFINE_IMG: brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1.) else: # to visualize brain in MRI space brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1., user_matrix=np.linalg.inv(affine_aux)) if SHOW_SKIN: if AFFINE_IMG: skin_actor = load_stl(skin_file, ren, colour="SkinColor", opacity=.4) else: # to visualize skin in MRI space skin_actor = load_stl(skin_file, ren, colour="SkinColor", opacity=.4, user_matrix=np.linalg.inv(affine_aux)) if SHOW_COIL: # reposition STL object prior to transformation matrix # [translation_x, translation_y, translation_z, rotation_x, rotation_y, rotation_z] # old translation when using Y as normal vector # repos = [0., -6., 0., 0., -90., 90.] # Translate coil loc coordinate to coil bottom # repos = [0., 0., 5.5, 0., 0., 180.] repos = [0., 0., 0., 0., 0., 180.] act_coil = load_stl(coil_file, ren, replace=repos, user_matrix=transf_matrix, opacity=.3) if SHOW_PLANE: act_plane = add_plane(ren, user_matrix=transf_matrix) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Add axes to object origin if SHOW_COIL_AXES: add_line(ren, coil_loc, p2_norm, color=[.0, .0, 1.0]) add_line(ren, coil_loc, p2_dir, color=[.0, 1.0, .0]) add_line(ren, coil_loc, p2_face, color=[1.0, .0, .0]) # Add interactive axes to scene if SHOW_SCENE_AXES: axes = vtk.vtkAxesActor() widget = vtk.vtkOrientationMarkerWidget() widget.SetOutlineColor(0.9300, 0.5700, 0.1300) widget.SetOrientationMarker(axes) widget.SetInteractor(iren) # widget.SetViewport(0.0, 0.0, 0.4, 0.4) widget.SetEnabled(1) widget.InteractiveOn() if SCREENSHOT: # screenshot of VTK scene w2if = vtk.vtkWindowToImageFilter() w2if.SetInput(ren_win) w2if.Update() writer = vtk.vtkPNGWriter() writer.SetFileName("screenshot.png") writer.SetInput(w2if.GetOutput()) writer.Write() # Enable user interface interactor # ren_win.Render() ren.ResetCameraClippingRange() iren.Initialize() iren.Start()
def ICP(self, i1, i2): if i1 == i2: return [] # create a new copy of i2.coord_list coord_list2 = np.array(i2.coord_list, copy=True).T #print coord_list2 # make homogeneous newcol = np.ones( (1, coord_list2.shape[1]) ) #print newcol #print coord_list2.shape, newcol.shape coord_list2 = np.vstack((coord_list2, newcol)) done = False while not done: # find a pairing for the closest points. If the closest point # is already taken, then if the distance is less, the old # pairing is dropped and the new one accepted. If the distance # is greater than a previous pairing, the new pairing is # skipped i1.icp_index = np.zeros( len(i1.coord_list), dtype=int ) i1.icp_index.fill(-1) i1.icp_dist = np.zeros( len(i1.coord_list) ) i1.icp_dist.fill(np.inf) # a big number for i in range(coord_list2.shape[1]): c2 = coord_list2[:3,i] (dist, index) = i1.kdtree.query(c2, k=1) if dist < i1.icp_dist[index]: i1.icp_dist[index] = dist i1.icp_index[index] = i pairs = [] for i, index in enumerate(i1.icp_index): if index >= 0: c1 = i1.coord_list[i] c2 = coord_list2[:3,index] #print "c1=%s c2=%s" % (c1, c2) pairs.append( [c1, c2] ) do_plot = False if do_plot: # This can be plotted in gnuplot with: # plot "c1.txt", "c2.txt", "vector.txt" u 1:2:($3-$1):($4-$2) title "pairs" with vectors f = open('c1.txt', 'w') for c1 in i1.coord_list: f.write("%.3f %.3f %.3f\n" % (c1[1], c1[0], -c1[2])) f.close() f = open('c2.txt', 'w') for i in range(coord_list2.shape[1]): c2 = coord_list2[:3,i] f.write("%.3f %.3f %.3f\n" % (c2[1], c2[0], -c2[2])) f.close() f = open('vector.txt', 'w') for pair in pairs: c1 = pair[0] c2 = pair[1] f.write("%.3f %.3f %.3f %.3f %.3f %.3f\n" % ( c2[1], c2[0], -c2[2], c1[1], c1[0], -c1[2] )) f.close() # find the affine transform matrix that brings the paired # points together #print "icp pairs =", len(pairs) v0 = np.zeros( (3, len(pairs)) ) v1 = np.zeros( (3, len(pairs)) ) weights = np.zeros( len(pairs) ) weights.fill(1.0) for i, pair in enumerate(pairs): v0[:,i] = pair[0] v1[:,i] = pair[1] #print "v0\n", v0 #print "v1\n", v1 #print "weights\n", weights M = transformations.affine_matrix_from_points(v1, v0, shear=False, scale=False) #M = transformations.affine_matrix_from_points_weighted(v0, v1, weights, shear=False, scale=False) #print M scale, shear, angles, trans, persp = transformations.decompose_matrix(M) #print "scale=", scale #print "shear=", shear #print "angles=", angles #print "trans=", trans #print "persp=", persp coord_list2 = np.dot(M, coord_list2) coord_list2 /= coord_list2[3] # print coord_list2 rot = np.linalg.norm(angles) dist = np.linalg.norm(trans) print "rot=%.6f dist=%.6f" % (rot, dist) if rot < 0.001 and dist < 0.001: done = True a = raw_input("Press Enter to continue...")
def dimensionsOLD(gltf, node, parentMatrix=transformations.scale_matrix(1)): # pprint(node) T = node.translation or [0, 0, 0] R = node.rotation or [0, 0, 0, 1] S = node.scale or [1, 1, 1] # thisNodeMatrix = composeMatrixFromTRS(T,R,S) R = transformations.euler_from_quaternion(R,'sxyz') thisNodeMatrix = node.matrix or transformations.compose_matrix( S, None, R, T) thisNodeMatrix = np.array(thisNodeMatrix) thisNodeMatrix.shape = (4,4) newMatrix = np.dot(thisNodeMatrix,parentMatrix) if node.mesh != None: accessor = gltf.accessors[gltf.meshes[node.mesh] .primitives[0].attributes.POSITION] possiblities = [ [0,0,0], [0,0,1], [0,1,0], [0,1,1], [1,0,0], [1,0,1], [1,1,0], [1,1,1], ] testMin = accessor.min testMax = accessor.max for test in possiblities: test = [ testMin[0] if test[0] == 0 else testMax[0], testMin[1] if test[1] == 0 else testMax[1], testMin[2] if test[2] == 0 else testMax[2], ] print("=======",node.name) print('before',test) test = np.matmul(test + [1],newMatrix ) scale, shear, angles, trans, persp = transformations.decompose_matrix(newMatrix) print("by matrix",np.multiply(angles,57)) print('after',test) myMin[0] = test[0] if test[0] < myMin[0] else myMin[0] myMin[1] = test[1] if test[1] < myMin[1] else myMin[1] myMin[2] = test[2] if test[2] < myMin[2] else myMin[2] myMax[0] = test[0] if test[0] > myMax[0] else myMax[0] myMax[1] = test[1] if test[1] > myMax[1] else myMax[1] myMax[2] = test[2] if test[2] > myMax[2] else myMax[2] if node.children == None or len(node.children) == 0: # pprint(thisNodeMatrix) # print(node.mesh) return for child in node.children: dimensionsOLD(gltf, gltf.nodes[child], newMatrix)
refit_group_orientations = False if refit_group_orientations: src_list = [] dst_list = [] # only consider images that are in the main group for i in groups[0]: image = proj.image_list[i] ned, ypr, quat = image.get_camera_pose(opt=True) src_list.append(ned) ned, ypr, quat = image.get_camera_pose() dst_list.append(ned) A = get_recenter_affine(src_list, dst_list) # extract the rotation matrix (R) from the affine transform scale, shear, angles, trans, persp = transformations.decompose_matrix(A) print(' scale:', scale) print(' shear:', shear) print(' angles:', angles) print(' translate:', trans) print(' perspective:', persp) R = transformations.euler_matrix(*angles) print("R:\n{}".format(R)) # update the optimized camera locations based on best fit camera_list = [] # load optimized poses for i, image in enumerate(proj.image_list): if i in groups[0]: ned, ypr, quat = image.get_camera_pose(opt=True) else:
def controls_3d(self, dx, dy, zooming_one_shot=False): """ Orbiting the camera is implemented the following way: - the rotation is split into a rotation around the *world* Z axis (controlled by the horizontal mouse motion along X) and a rotation around the *X* axis of the camera (pitch) *shifted to the focal origin* (the world origin for now). This is controlled by the vertical motion of the mouse (Y axis). - as a result, the resulting transformation of the camera in the world frame C' is: C' = (T · Rx · T⁻¹ · (Rz · C)⁻¹)⁻¹ where: - C is the original camera transformation in the world frame, - Rz is the rotation along the Z axis (in the world frame) - T is the translation camera -> world (ie, the inverse of the translation part of C - Rx is the rotation around X in the (translated) camera frame """ CAMERA_TRANSLATION_FACTOR = 0.01 CAMERA_ROTATION_FACTOR = 0.01 if not (self.is_rotating or self.is_panning or self.is_zooming): return current_pos = self.current_cam.transformation[:3, 3].copy() distance = numpy.linalg.norm(self.focal_point - current_pos) if self.is_rotating: rotation_camera_x = dy * CAMERA_ROTATION_FACTOR rotation_world_z = dx * CAMERA_ROTATION_FACTOR world_z_rotation = transformations.euler_matrix(0, 0, rotation_world_z) cam_x_rotation = transformations.euler_matrix(rotation_camera_x, 0, 0) after_world_z_rotation = numpy.dot(world_z_rotation, self.current_cam.transformation) inverse_transformation = transformations.inverse_matrix(after_world_z_rotation) translation = transformations.translation_matrix( transformations.decompose_matrix(inverse_transformation)[3]) inverse_translation = transformations.inverse_matrix(translation) new_inverse = numpy.dot(inverse_translation, inverse_transformation) new_inverse = numpy.dot(cam_x_rotation, new_inverse) new_inverse = numpy.dot(translation, new_inverse) self.current_cam.transformation = transformations.inverse_matrix(new_inverse).astype(numpy.float32) if self.is_panning: tx = -dx * CAMERA_TRANSLATION_FACTOR * distance ty = dy * CAMERA_TRANSLATION_FACTOR * distance cam_transform = transformations.translation_matrix((tx, ty, 0)).astype(numpy.float32) self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform) if self.is_zooming: tz = dy * CAMERA_TRANSLATION_FACTOR * distance cam_transform = transformations.translation_matrix((0, 0, tz)).astype(numpy.float32) self.current_cam.transformation = numpy.dot(self.current_cam.transformation, cam_transform) if zooming_one_shot: self.is_zooming = False self.update_view_camera()
def drawit_pygame(screen, myfont, i, x_star, x_star_cov, M_star, toplot_star, x_input, M_input, toplot_input, x_true, toplot_true, pusher_loc, contact_normal, contact_point, has_contact, has_apriltag, saveto, titletext, saveformat, plotconfig, sub, M_star_3d, M_input_3d, shape, shape_type, shape_polygon_3d, star_color, index, legend): label = mat['label'] radius = mat['probe_radius'] startdate = mat['startdate'] shape_id = mat['shape_id'] offset = mat['offset'] textsurface = myfont.render('%d' % i, True, BLACK) screen.blit(textsurface, (0, 0)) shift = 0 shiftx = 750 filled_polygon(screen, [[shiftx, 0 + shift], [40 + shiftx, 0 + shift], [40 + shiftx, 40 + shift], [0 + shiftx, 40 + shift]], GREY) aapolygon(screen, [[0 + shiftx, 0 + shift], [40 + shiftx, 0 + shift], [40 + shiftx, 40 + shift], [0 + shiftx, 40 + shift]], BLACK) textsurface = myfont.render('Groundtruth', True, BLACK) screen.blit(textsurface, (50 + shiftx, shift)) shift = 40 filled_polygon(screen, [[0 + shiftx, 0 + shift], [40 + shiftx, 0 + shift], [40 + shiftx, 40 + shift], [0 + shiftx, 40 + shift]], GREEN) aapolygon(screen, [[0 + shiftx, 0 + shift], [40 + shiftx, 0 + shift], [40 + shiftx, 40 + shift], [0 + shiftx, 40 + shift]], BLACK) textsurface = myfont.render('Input', True, BLACK) screen.blit(textsurface, (50 + shiftx, shift)) shift = 80 + index * 40 filled_polygon(screen, [[0 + shiftx, 0 + shift], [40 + shiftx, 0 + shift], [40 + shiftx, 40 + shift], [0 + shiftx, 40 + shift]], star_color) aapolygon(screen, [[0 + shiftx, 0 + shift], [40 + shiftx, 0 + shift], [40 + shiftx, 40 + shift], [0 + shiftx, 40 + shift]], BLACK) textsurface = myfont.render(legend, True, BLACK) screen.blit(textsurface, (50 + shiftx, shift)) # plot groundtruth shape and pose if toplot_true: T = matrix_from_xyzrpy([x_true[i][0], x_true[i][1], 0], [0, 0, x_true[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox': shape_polygon_3d_world = np.dot(T, shape_polygon_3d.T) #pygame.draw.polygon(screen, BLACK, topixel(shape_polygon_3d_world.T[:,0:2], offset).tolist(), 5) filled_polygon(screen, topixel(shape_polygon_3d_world.T[:, 0:2], offset), GREY) aapolygon(screen, topixel(shape_polygon_3d_world.T[:, 0:2], offset), BLACK) #obj = mpatches.Polygon(shape_polygon_3d_world.T[:,0:2], closed=True, linewidth=2, linestyle='dashed', fill=False) elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) #obj = mpatches.Ellipse(trans[0:2], shape[0]*2, shape[1]*2, angle=angles[2]/np.pi*180.0, fill=False, linewidth=1, linestyle='solid') #ax1.add_patch(obj) if toplot_star: if len(M_star.shape) == 3: M_star_i = npa(M_star[i]) M_star_3d = np.hstack( (np.array(M_star_i), np.zeros( (len(M_star_i), 1)), np.ones((len(M_star_i), 1)))) # plot input shape and pose if toplot_input: T = matrix_from_xyzrpy([x_input[i][0], x_input[i][1], 0], [0, 0, x_input[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox' or shape_type == 'ellip': M_input_3d_world = np.dot(T, M_input_3d.T) filled_polygon(screen, topixel(M_input_3d_world.T[:, 0:2], offset), GREEN) aapolygon(screen, topixel(M_input_3d_world.T[:, 0:2], offset), BLACK) #obj = mpatches.Polygon(M_input_3d_world.T[:,0:2], closed=True, linewidth=1, linestyle='solid', fill=False) #ax1.plot(M_input_3d_world.T[:,0:1], M_input_3d_world.T[:,1:2], 'go') elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) #obj = mpatches.Ellipse(trans[0:2], shape[0]*2, shape[1]*2, angle=angles[2], fill=False, linewidth=1, linestyle='solid') #ax1.add_patch(obj) # plot estimated shape and pose if toplot_star: T = matrix_from_xyzrpy([x_star[i][0], x_star[i][1], 0], [0, 0, x_star[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox' or shape_type == 'ellip': M_star_3d_world = np.dot(T, M_star_3d.T) #obj = mpatches.Polygon(M_star_3d_world.T[:,0:2], closed=True, linewidth=1, linestyle='solid', fill=False) #ax1.plot(M_star_3d_world.T[:,0:1], M_star_3d_world.T[:,1:2], 'ro') filled_polygon(screen, topixel(M_star_3d_world.T[:, 0:2], offset), star_color) aapolygon(screen, topixel(M_star_3d_world.T[:, 0:2], offset), BLACK) elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) #obj = mpatches.Ellipse(trans[0:2], shape[0]*2, shape[1]*2, angle=angles[2], fill=False, linewidth=1, linestyle='solid') #ax1.add_patch(obj) #return # plot the covariance of pose #~ if x_star_cov is not None: #~ plot_cov_ellipse(npa(x_star_cov[i][0:2][:,0:2]), npa(x_star[i][0:2]), ax = ax1, facecolor = (1,1,153/255.0,0.5)) #~ plot_cov_fan(x_star_cov[i][2][2], npa(x_star[i][0:3]), npa(x_true[i][0:3]), ax = ax1) #~ #~ plot_pos(x_true[i][0:3], ax = ax1) #~ #~ plot_pos(x_input[i][0:3], ax = ax1, color = 'green') for side in [0, 1]: # left(0) / right(1) # plot probe #circle = mpatches.Circle(pusher_loc[side][i], radius = radius) #ax1.add_patch(circle) center_pix = topixel([pusher_loc[side][i]], offset) pygame.gfxdraw.filled_circle(screen, int(center_pix[0][0]), int(center_pix[0][1]), int(scaletopixel(radius)), BLUE) pygame.gfxdraw.aacircle(screen, int(center_pix[0][0]), int(center_pix[0][1]), int(scaletopixel(radius)), BLACK) #~ if has_contact[side][i]: # plot contact point #~ ax1.plot(contact_point[side][i][0], contact_point[side][i][1], 'k*') #~ #~ # plot normal line_pix = topixel([ contact_point[side][i], (npa(contact_point[side][i]) + npa(contact_normal[side][i]) * 0.01).tolist() ], offset) pygame.gfxdraw.line(screen, line_pix[0][0], line_pix[0][1], line_pix[1][0], line_pix[1][1], GREEN) #~ #ax1.arrow(d[i,0], d[i,1], turned_norm[i][0]*0.01, turned_norm[i][1]*0.01, head_width=0.001, head_length=0.01, fc='r', ec='r') #~ #ax1.arrow(d[i,0], d[i,1], vicon_norm[i][0]*0.01, vicon_norm[i][1]*0.01, head_width=0.001, head_length=0.01, fc='g', ec='g') #~ # plot no apriltag if not has_apriltag[i]: #ax1.text(offset[0]-0.1, offset[1]-0.1, 'No apriltag') textsurface = myfont.render('No apriltag', True, BLACK) screen.blit(textsurface, (0, 40))
def displayResult(x_star, x_star_cov, M_star, toplot_star, x_input, M_input, toplot_input, d, radius, saveto, titletext, saveformat, plotconfig, shape_id, offset, sub, turned_norm, vicon_norm, label=''): # 0. convert from matlab col-based to python row-based if toplot_star: if len(x_star) == 3: x_star = npa(x_star).T else: x_star = npa(x_star) if toplot_input: if len(x_input) == 3: x_input = npa(x_input).T else: x_input = npa(x_input) if toplot_star: if len(M_star.shape) == 2: M_star = npa(M_star).T M_star_3d = np.hstack((np.array(M_star), np.zeros( (len(M_star), 1)), np.ones((len(M_star), 1)))) if toplot_input: if len(M_input.shape) == 2: M_input = npa(M_input) M_input_3d = np.hstack( (np.array(M_input), np.zeros( (len(M_input), 1)), np.ones((len(M_input), 1)))) # 1. convert groundtruth to list of [x,y,theta] true_x = [] d = d.T length = d.shape[0] for i in xrange(d.shape[0]): #import pdb; pdb.set_trace() matlabq = d[i, 10:13].tolist() + [d[i, 9].tolist()] true_x.append([ d[i][6], d[i][7], tfm.euler_from_quaternion(matlabq, axes='sxyz')[2] ]) true_x = np.array(true_x) # 2. load shape #### add the object as polygon shape_db = ShapeDB() shape = shape_db.shape_db[shape_id][ 'shape'] # shape of the objects presented as polygon. shape_type = shape_db.shape_db[shape_id]['shape_type'] if shape_type == 'poly': shape_polygon_3d = np.hstack((np.array(shape), np.zeros( (len(shape), 1)), np.ones((len(shape), 1)))) elif shape_type == 'ellip': shape = shape[0] elif shape_type == 'polyapprox': shape_polygon_3d = np.hstack( (np.array(shape[0]), np.zeros( (len(shape[0]), 1)), np.ones((len(shape[0]), 1)))) # 3. loop through trajectory and plot the shape length = len(x_star) if toplot_star else len(x_input) for i in xrange(0, length, sub): if i % 100 == 0: print 'display result', i fig1 = plt.figure() ax1 = fig1.add_subplot(111, aspect='equal') ax1.set_xlim(npa([-0.09, 0.09]) * 1.3 + offset[0]) ax1.set_ylim(npa([-0.09, 0.09]) * 1.3 + offset[1]) has_apriltag = d[i][25] > 0.5 # plot groundtruth shape and pose T = matrix_from_xyzrpy([true_x[i][0], true_x[i][1], 0], [0, 0, true_x[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox': shape_polygon_3d_world = np.dot(T, shape_polygon_3d.T) obj = mpatches.Polygon(shape_polygon_3d_world.T[:, 0:2], closed=True, linewidth=2, linestyle='dashed', fill=False) elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) obj = mpatches.Ellipse(trans[0:2], shape[0] * 2, shape[1] * 2, angle=angles[2] / np.pi * 180.0, fill=False, linewidth=1, linestyle='solid') ax1.add_patch(obj) #fig1.savefig('rect1.png', dpi=200, bbox_inches='tight') if toplot_star: if len(M_star.shape) == 3: M_star_i = npa(M_star[i]) M_star_3d = np.hstack( (np.array(M_star_i), np.zeros( (len(M_star_i), 1)), np.ones((len(M_star_i), 1)))) # plot input shape and pose if toplot_input: T = matrix_from_xyzrpy([x_input[i][0], x_input[i][1], 0], [0, 0, x_input[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox' or shape_type == 'ellip': M_input_3d_world = np.dot(T, M_input_3d.T) obj = mpatches.Polygon(M_input_3d_world.T[:, 0:2], closed=True, linewidth=1, linestyle='solid', fill=False) ax1.plot(M_input_3d_world.T[:, 0:1], M_input_3d_world.T[:, 1:2], 'go') elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) obj = mpatches.Ellipse(trans[0:2], shape[0] * 2, shape[1] * 2, angle=angles[2], fill=False, linewidth=1, linestyle='solid') ax1.add_patch(obj) # plot estimated shape and pose if toplot_star: T = matrix_from_xyzrpy([x_star[i][0], x_star[i][1], 0], [0, 0, x_star[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox' or shape_type == 'ellip': M_star_3d_world = np.dot(T, M_star_3d.T) obj = mpatches.Polygon(M_star_3d_world.T[:, 0:2], closed=True, linewidth=1, linestyle='solid', fill=False) ax1.plot(M_star_3d_world.T[:, 0:1], M_star_3d_world.T[:, 1:2], 'ro') elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) obj = mpatches.Ellipse(trans[0:2], shape[0] * 2, shape[1] * 2, angle=angles[2], fill=False, linewidth=1, linestyle='solid') ax1.add_patch(obj) # plot the covariance of pose if x_star_cov is not None: plot_cov_ellipse(npa(x_star_cov[i][0:2][:, 0:2]), npa(x_star[i][0:2]), ax=ax1, facecolor=(1, 1, 153 / 255.0, 0.5)) plot_cov_fan(x_star_cov[i][2][2], npa(x_star[i][0:3]), npa(true_x[i][0:3]), ax=ax1) plot_pos(true_x[i][0:3], ax=ax1) # no axes ax1.set_axis_off() # plot contact point ax1.plot(d[i, 0], d[i, 1], 'k*') # plot probe circle = mpatches.Circle((d[i, 13:15]), radius=radius) ax1.add_patch(circle) if not has_apriltag: ax1.text(offset[0] - 0.1, offset[1] - 0.1, 'No apriltag') # plot normal #ax1.arrow(d[i,0], d[i,1], d[i,0]+d[i,3]*0.0001, d[i,1]+d[i,4]*0.0001, head_width=0.01, head_length=0.01, fc='k', ec='k') ax1.arrow(d[i, 0], d[i, 1], d[i, 3] * 0.01, d[i, 4] * 0.01, head_width=0.001, head_length=0.01, fc='g', ec='g') #ax1.arrow(d[i,0], d[i,1], turned_norm[i][0]*0.01, turned_norm[i][1]*0.01, head_width=0.001, head_length=0.01, fc='r', ec='r') #ax1.arrow(d[i,0], d[i,1], vicon_norm[i][0]*0.01, vicon_norm[i][1]*0.01, head_width=0.001, head_length=0.01, fc='g', ec='g') fig1.savefig('%s%07d.png' % (saveto, i), dpi=200, bbox_inches='tight') plt.close(fig1)
def main(): SHOW_AXES = True AFFINE_IMG = True NO_SCALE = True n_tracts = 240 # n_tracts = 24 # n_threads = 2*psutil.cpu_count() img_shift = 256 # 255 data_dir = os.environ.get('OneDrive') + r'\data\dti_navigation\baran\anat_reg_improve_20200609' data_dir = data_dir.encode('utf-8') # FOD_path = 'Baran_FOD.nii' # trk_path = os.path.join(data_dir, FOD_path) # data_dir = b'C:\Users\deoliv1\OneDrive\data\dti' stl_path = b'wm_orig_smooth_world.stl' brain_path = os.path.join(data_dir, stl_path) # data_dir = b'C:\Users\deoliv1\OneDrive\data\dti' stl_path = b'wm_2.stl' brain_simnibs_path = os.path.join(data_dir, stl_path) stl_path = b'wm.stl' brain_inv_path = os.path.join(data_dir, stl_path) nii_path = b'Baran_FOD.nii' trk_path = os.path.join(data_dir, nii_path) nii_path = b'Baran_T1_inFODspace.nii' img_path = os.path.join(data_dir, nii_path) nii_path = b'Baran_trekkerACTlabels_inFODspace.nii' act_path = os.path.join(data_dir, nii_path) stl_path = b'magstim_fig8_coil.stl' coil_path = os.path.join(data_dir, stl_path) imagedata = nb.squeeze_image(nb.load(img_path.decode('utf-8'))) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() act_data = nb.squeeze_image(nb.load(act_path.decode('utf-8'))) act_data = nb.as_closest_canonical(act_data) act_data.update_header() act_data_arr = act_data.get_fdata() # print(imagedata.header) print("pix_dim: {}, img_shape: {}".format(pix_dim, img_shape)) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix(imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) print("affine: {0}\n".format(affine)) # Create a rendering window and renderer ren = vtk.vtkRenderer() ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # Create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) minFODamp = np.arange(0.01, 0.11, 0.01) dataSupportExponent = np.arange(0.1, 1.1, 0.1) # COMBINATION 1 # tracker = minFODamp(0.01) # tracker = dataSupportExponent(0.1) # COMBINATION "n" # tracker = minFODamp(0.01 * n) # tracker = dataSupportExponent(0.1 * n) start_time = time.time() trekker_cfg = {'seed_max': 1, 'step_size': 0.1, 'min_fod': 0.1, 'probe_quality': 3, 'max_interval': 1, 'min_radius_curv': 0.8, 'probe_length': 0.4, 'write_interval': 50, 'numb_threads': '', 'max_lenth': 200, 'min_lenth': 20, 'max_sampling_step': 100} tracker = Trekker.initialize(trk_path) tracker, n_threads = dti.set_trekker_parameters(tracker, trekker_cfg) duration = time.time() - start_time print("Initialize Trekker: {:.2f} ms".format(1e3*duration)) repos = [0., -img_shift, 0., 0., 0., 0.] # brain_actor = load_stl(brain_inv_path, ren, opacity=1., colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.identity(4)) # the one always been used brain_actor = load_stl(brain_simnibs_path, ren, opacity=1., colour=[1.0, 1.0, 1.0], replace=repos, user_matrix=np.linalg.inv(affine)) # bds = brain_actor.GetBounds() # print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) # invesalius surface # repos = [0., 0., 0., 0., 0., 0.] # brain_actor = load_stl(brain_inv_path, ren, opacity=.5, colour=[1.0, .5, .5], replace=repos, user_matrix=np.identity(4)) # repos = [0., 0., 0., 0., 0., 0.] # brain_actor_mri = load_stl(brain_path, ren, opacity=.1, colour=[0.0, 1.0, 0.0], replace=repos, user_matrix=np.linalg.inv(affine)) # bds = brain_actor_mri.GetBounds() # print("Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) # repos = [0., 256., 0., 0., 0., 0.] # brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=0.5, replace=repos, user_matrix=np.linalg.inv(affine)) # brain_inv_actor = load_stl(brain_inv_path, ren, colour="SkinColor", opacity=.6, replace=repos) # bds = brain_inv_actor.GetBounds() # print("Reposed: Y length: {} --- Bounds: {}".format(bds[3] - bds[2], bds)) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Show tracks repos_trk = [0., -img_shift, 0., 0., 0., 0.] # repos_trk = [0., 0., 0., 0., 0., 0.] matrix_vtk = vtk.vtkMatrix4x4() trans = np.identity(4) trans[1, -1] = repos_trk[1] final_matrix = np.linalg.inv(affine) @ trans print("final_matrix: {}".format(final_matrix)) for row in range(0, 4): for col in range(0, 4): matrix_vtk.SetElement(row, col, final_matrix[row, col]) root = vtk.vtkMultiBlockDataSet() # for i in range(10): # seed = np.array([[-8.49, -8.39, 2.5]]) # seed = np.array([[27.53, -77.37, 46.42]]) # from the invesalius exported fiducial markers you have to multiply the Y coordinate by -1 to # transform to the regular 3D invesalius space where coil location is saved fids_inv = np.array([[168.300, -126.600, 97.000], [9.000, -120.300, 93.700], [90.100, -33.500, 150.000]]) for n in range(3): fids_actor = add_marker(fids_inv[n, :], ren, [1., 0., 0.], radius=2) seed = np.array([[-25.66, -30.07, 54.91]]) coil_pos = [40.17, 152.28, 235.78, -18.22, -25.27, 64.99] m_coil = coil_transform_pos(coil_pos) repos = [0., 0., 0., 0., 0., 90.] coil_actor = load_stl(coil_path, ren, opacity=.6, replace=repos, colour=[1., 1., 1.], user_matrix=m_coil) # coil_actor = load_stl(coil_path, ren, opacity=.6, replace=repos, colour=[1., 1., 1.]) # create coil vectors vec_length = 75 print(m_coil.shape) p1 = m_coil[:-1, -1] print(p1) coil_dir = m_coil[:-1, 0] coil_face = m_coil[:-1, 1] p2_face = p1 + vec_length * coil_face p2_dir = p1 + vec_length * coil_dir coil_norm = np.cross(coil_dir, coil_face) p2_norm = p1 - vec_length * coil_norm add_line(ren, p1, p2_dir, color=[1.0, .0, .0]) add_line(ren, p1, p2_face, color=[.0, 1.0, .0]) add_line(ren, p1, p2_norm, color=[.0, .0, 1.0]) colours = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] marker_actor = add_marker(p1, ren, colours[0], radius=1) # p1_change = n2m.coord_change(p1) p1_change = p1.copy() p1_change[1] = -p1_change[1] # p1_change[1] += img_shift marker_actor2 = add_marker(p1_change, ren, colours[1], radius=1) offset = 40 coil_norm = coil_norm/np.linalg.norm(coil_norm) coord_offset_nav = p1 - offset * coil_norm marker_actor_seed_nav = add_marker(coord_offset_nav, ren, colours[3], radius=1) coord_offset_mri = coord_offset_nav.copy() coord_offset_mri[1] += img_shift marker_actor_seed_nav = add_marker(coord_offset_mri, ren, colours[3], radius=1) coord_mri_label = [int(s) for s in coord_offset_mri] print("offset MRI: {}, and label: {}".format(coord_mri_label, act_data_arr[tuple(coord_mri_label)])) offset_list = 10 + np.arange(0, 31, 3) coord_offset_list = p1 - np.outer(offset_list, coil_norm) coord_offset_list += [0, img_shift, 0] coord_offset_list = coord_offset_list.astype(int).tolist() # for pt in coord_offset_list: # print(pt) # if act_data_arr[tuple(pt)] == 2: # cl = colours[5] # else: # cl = colours[4] # _ = add_marker(pt, ren, cl) x = np.arange(-4, 5, 2) y = np.arange(-4, 5, 2) z = 10 + np.arange(0, 31, 3) xv, yv, zv = np.meshgrid(x, y, - z) coord_grid = np.array([xv, yv, zv]) start_time = time.time() for p in range(coord_grid.shape[1]): for n in range(coord_grid.shape[2]): for m in range(coord_grid.shape[3]): pt = coord_grid[:, p, n, m] pt = np.append(pt, 1) pt_tr = m_coil @ pt[:, np.newaxis] pt_tr = np.squeeze(pt_tr[:3]).astype(int) + [0, img_shift, 0] pt_tr = tuple(pt_tr.tolist()) if act_data_arr[pt_tr] == 2: cl = colours[6] elif act_data_arr[pt_tr] == 1: cl = colours[7] else: cl = [1., 1., 1.] # print(act_data_arr[pt_tr]) _ = add_marker(pt_tr, ren, cl, radius=1) duration = time.time() - start_time print("Compute coil grid: {:.2f} ms".format(1e3*duration)) start_time = time.time() # create grid of points grid_number = x.shape[0]*y.shape[0]*z.shape[0] coord_grid = coord_grid.reshape([3, grid_number]).T # sort grid from distance to the origin/coil center coord_list = coord_grid[np.argsort(np.linalg.norm(coord_grid, axis=1)), :] # make the coordinates homogeneous coord_list_w = np.append(coord_list.T, np.ones([1, grid_number]), axis=0) # apply the coil transformation matrix coord_list_w_tr = m_coil @ coord_list_w # convert to int so coordinates can be used as indices in the MRI image space coord_list_w_tr = coord_list_w_tr[:3, :].T.astype(int) + np.array([[0, img_shift, 0]]) # extract the first occurrence of a specific label from the MRI image labs = act_data_arr[coord_list_w_tr[..., 0], coord_list_w_tr[..., 1], coord_list_w_tr[..., 2]] lab_first = np.argmax(labs == 1) if labs[lab_first] == 1: pt_found = coord_list_w_tr[lab_first, :] _ = add_marker(pt_found, ren, [0., 0., 1.], radius=1) # convert coordinate back to invesalius 3D space pt_found_inv = pt_found - np.array([0., img_shift, 0.]) # convert to world coordinate space to use as seed for fiber tracking pt_found_tr = np.append(pt_found, 1)[np.newaxis, :].T pt_found_tr = affine @ pt_found_tr pt_found_tr = pt_found_tr[:3, 0, np.newaxis].T duration = time.time() - start_time print("Compute coil grid fast: {:.2f} ms".format(1e3*duration)) # create tracts count_tracts = 0 start_time_all = time.time() # uncertain_params = list(zip(dataSupportExponent, minFODamp)) for n in range(0, round(n_tracts/n_threads)): # branch = dti.multi_block(tracker, seed, n_threads) # branch = dti.multi_block(tracker, pt_found_tr, n_threads) # rescale n so that there is no 0 opacity tracts n_param = (n % 10) + 1 branch = dti.multi_block_uncertainty(tracker, pt_found_tr, n_threads, n_param) count_tracts += branch.GetNumberOfBlocks() # start_time = time.time() # root = dti.tracts_root(out_list, root, n) root.SetBlock(n, branch) # duration = time.time() - start_time # print("Compute root {}: {:.2f} ms".format(n, 1e3*duration)) duration = time.time() - start_time_all print("Compute multi {}: {:.2f} ms".format(n, 1e3*duration)) print("Number computed tracts {}".format(count_tracts)) print("Number computed branches {}".format(root.GetNumberOfBlocks())) start_time = time.time() tracts_actor = dti.compute_actor(root, matrix_vtk) duration = time.time() - start_time print("Compute actor: {:.2f} ms".format(1e3*duration)) # Assign actor to the renderer # ren.AddActor(brain_actor) # ren.AddActor(brain_inv_actor) # ren.AddActor(coil_actor) start_time = time.time() ren.AddActor(tracts_actor) duration = time.time() - start_time print("Add actor: {:.2f} ms".format(1e3*duration)) # ren.AddActor(brain_actor_mri) planex, planey, planez = raw_image(act_path, ren) planex.SetInteractor(iren) planex.On() planey.SetInteractor(iren) planey.On() planez.SetInteractor(iren) planez.On() _ = add_marker(np.squeeze(seed).tolist(), ren, [0., 1., 0.], radius=1) _ = add_marker(np.squeeze(pt_found_tr).tolist(), ren, [1., 0., 0.], radius=1) _ = add_marker(pt_found_inv, ren, [1., 1., 0.], radius=1) # Enable user interface interactor iren.Initialize() ren_win.Render() iren.Start()
def main(): """ Visualize Freesurfer, SimNIBS headreco, and Nexstim coil locations in the scanner coordinate system. """ SHOW_AXES = True SHOW_SCENE_AXES = True SHOW_COIL_AXES = True SHOW_SKIN = True SHOW_BRAIN = True SHOW_FREESURFER = True SHOW_COIL = True SHOW_MARKERS = True TRANSF_COIL = True SHOW_PLANE = False SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp' SAVE_ID = False AFFINE_IMG = True NO_SCALE = True SCREENSHOT = False reorder = [0, 2, 1] flipx = [True, False, False] # reorder = [0, 1, 2] # flipx = [False, False, False] # default folder and subject # subj = 's03' subj = 'S5' id_extra = False # 8, 9, 10, 12, False data_dir = os.environ['OneDrive'] + r'\data\nexstim_coord' # data_dir = 'P:\\tms_eeg\\mTMS\\projects\\lateral ppTMS M1\\E-fields\\' # data_subj = data_dir + subj + '\\' simnibs_dir = data_dir + r'\simnibs\m2m_ppM1_{}_nc'.format(subj) fs_dir = data_dir + r'\freesurfer\ppM1_{}'.format(subj) if id_extra: nav_dir = data_dir + r'\nav_coordinates\ppM1_{}_{}'.format( subj, id_extra) else: nav_dir = data_dir + r'\nav_coordinates\ppM1_{}'.format(subj) # filenames # coil_file = data_dir + 'magstim_fig8_coil.stl' coil_file = os.environ[ 'OneDrive'] + r'\data\nexstim_coord\magstim_fig8_coil.stl' if id_extra: coord_file = nav_dir + r'\ppM1_eximia_{}_{}.txt'.format(subj, id_extra) else: coord_file = nav_dir + r'\ppM1_eximia_{}.txt'.format(subj) # img_file = data_subj + subj + '.nii' img_file = data_dir + r'\mri\ppM1_{}\ppM1_{}.nii'.format(subj, subj) brain_file = simnibs_dir + r'\wm.stl' skin_file = simnibs_dir + r'\skin.stl' fs_file = fs_dir + r'\lh.pial.stl' fs_t1 = fs_dir + r'\mri\T1.mgz' if id_extra: output_file = nav_dir + r'\transf_mat_{}_{}'.format(subj, id_extra) else: output_file = nav_dir + r'\transf_mat_{}'.format(subj) coords = lc.load_nexstim(coord_file) # red, green, blue, maroon (dark red), # olive (shitty green), teal (petrol blue), yellow, orange col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # extract image header shape and affine transformation from original nifti file imagedata = nb.squeeze_image(nb.load(img_file)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) if AFFINE_IMG: affine = imagedata.affine if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix( imagedata.affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) print("\nAffine: \n") print(affine) else: affine = np.identity(4) # affine_I = np.identity(4) # create a camera, render window and renderer camera = vtk.vtkCamera() camera.SetPosition(0, 1000, 0) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) camera.ComputeViewPlaneNormal() camera.Azimuth(90.0) camera.Elevation(10.0) ren = vtk.vtkRenderer() ren.SetActiveCamera(camera) ren.ResetCamera() camera.Dolly(1.5) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) if SELECT_LANDMARKS == 'mri': # MRI landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 7, 10] elif SELECT_LANDMARKS == 'all': # all coords coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] pts_ref = [1, 2, 3, 5, 4, 6, 7, 10] elif SELECT_LANDMARKS == 'scalp': # scalp landmarks coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] hdr_mri = [ 'Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max' ] pts_ref = [5, 4, 6, 7, 10] coords_np = np.zeros([len(pts_ref), 3]) for n, pts_id in enumerate(pts_ref): # to keep in the MRI space use the identity as the affine # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder) # affine_trans = affine_I.copy() # affine_trans = affine.copy() # affine_trans[:3, -1] = affine[:3, -1] coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder) coords_np[n, :] = coord_aux [coord_mri[n].append(s) for s in coord_aux] if SHOW_MARKERS: marker_actor = add_marker(coord_aux, ren, col[n]) print('\nOriginal coordinates from Nexstim: \n') [print(s) for s in coords] print('\nTransformed coordinates to MRI space: \n') [print(s) for s in coord_mri] # coil location, normal vector and direction vector coil_loc = coord_mri[-2][1:] coil_norm = coords[8][1:] coil_dir = coords[9][1:] # creating the coil coordinate system by adding a point in the direction of each given coil vector # the additional vector is just the cross product from coil direction and coil normal vectors # origin of the coordinate system is the coil location given by Nexstim # the vec_length is to allow line creation with visible length in VTK scene vec_length = 75 p1 = coords[7][1:] p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)] p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder) p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)] p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder) coil_face = np.cross(coil_norm, coil_dir) p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())] p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # Coil face unit vector (X) u1 = np.asarray(p2_face) - np.asarray(coil_loc) u1_n = u1 / np.linalg.norm(u1) # Coil direction unit vector (Y) u2 = np.asarray(p2_dir) - np.asarray(coil_loc) u2_n = u2 / np.linalg.norm(u2) # Coil normal unit vector (Z) u3 = np.asarray(p2_norm) - np.asarray(coil_loc) u3_n = u3 / np.linalg.norm(u3) transf_matrix = np.identity(4) if TRANSF_COIL: transf_matrix[:3, 0] = u1_n transf_matrix[:3, 1] = u2_n transf_matrix[:3, 2] = u3_n transf_matrix[:3, 3] = coil_loc[:] # the absolute value of the determinant indicates the scaling factor # the sign of the determinant indicates how it affects the orientation: if positive maintain the # original orientation and if negative inverts all the orientations (flip the object inside-out)' # the negative determinant is what makes objects in VTK scene to become # black print('Transformation matrix: \n', transf_matrix, '\n') print('Determinant: ', np.linalg.det(transf_matrix)) if SAVE_ID: coord_dict = { 'm_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np } io.savemat(output_file + '.mat', coord_dict) hdr_names = ';'.join( ['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)]) np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names) if SHOW_BRAIN: # brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=0.7, user_matrix=np.linalg.inv(affine)) brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=1.) if SHOW_SKIN: # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) skin_actor = load_stl(skin_file, ren, colour="SkinColor", opacity=.4) if SHOW_FREESURFER: img = fsio.MGHImage.load(fs_t1) #print("MGH Header: ", img) #print("MGH data: ", img.header['Pxyz_c']) # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) trans_fs = np.identity(4) trans_fs[:3, -1] = img.header['Pxyz_c'] fs_actor = load_stl(fs_file, ren, colour=[1., 0., 1.], opacity=0.5, user_matrix=trans_fs) if SHOW_COIL: # reposition STL object prior to transformation matrix # [translation_x, translation_y, translation_z, rotation_x, rotation_y, rotation_z] # old translation when using Y as normal vector # repos = [0., -6., 0., 0., -90., 90.] # Translate coil loc coordinate to coil bottom # repos = [0., 0., 5.5, 0., 0., 180.] repos = [0., 0., 0., 0., 0., 180.] act_coil = load_stl(coil_file, ren, replace=repos, user_matrix=transf_matrix, opacity=.3) if SHOW_PLANE: act_plane = add_plane(ren, user_matrix=transf_matrix) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Add axes to object origin if SHOW_COIL_AXES: add_line(ren, coil_loc, p2_norm, color=[.0, .0, 1.0]) add_line(ren, coil_loc, p2_dir, color=[.0, 1.0, .0]) add_line(ren, coil_loc, p2_face, color=[1.0, .0, .0]) # Add interactive axes to scene if SHOW_SCENE_AXES: axes = vtk.vtkAxesActor() widget = vtk.vtkOrientationMarkerWidget() widget.SetOutlineColor(0.9300, 0.5700, 0.1300) widget.SetOrientationMarker(axes) widget.SetInteractor(iren) # widget.SetViewport(0.0, 0.0, 0.4, 0.4) widget.SetEnabled(1) widget.InteractiveOn() if SCREENSHOT: # screenshot of VTK scene w2if = vtk.vtkWindowToImageFilter() w2if.SetInput(ren_win) w2if.Update() writer = vtk.vtkPNGWriter() writer.SetFileName("screenshot.png") writer.SetInput(w2if.GetOutput()) writer.Write() # Enable user interface interactor # ren_win.Render() ren.ResetCameraClippingRange() iren.Initialize() iren.Start()
# over the original ... trusting the original group gps solution as # our best absolute truth for positioning the system in world # coordinates. src_list = [] dst_list = [] for image in proj.image_list: if image.feature_count >= 25: # only consider images that are in the fitted set ned, ypr, quat = image.get_camera_pose_sba() src_list.append(ned) ned, ypr, quat = image.get_camera_pose() dst_list.append(ned) A = get_recenter_affine(src_list, dst_list) # extract the rotation matrix (R) from the affine transform scale, shear, angles, trans, persp = transformations.decompose_matrix(A) R = transformations.euler_matrix(*angles) print "R:\n", R # update the sba camera locations based on best fit camera_list = [] # load current sba poses for image in proj.image_list: ned, ypr, quat = image.get_camera_pose_sba() camera_list.append( ned ) # refit new_cams = transform_points(A, camera_list) # update sba poses. FIXME: do we need to update orientation here as # well? Somewhere we worked out the code, but it may not matter all # that much ... except for later manually computing mean projection # error.
def pose_from_point_cloud(img1, img2): left, right = s3d.load_images(img1) left2, right2 = s3d.load_images(img2) matched_features_1, matched_features_2 = s3d.find_n_best_feature_points( left, left2, 300, 40, False) features_left_1 = np.array([(f.pt[0], f.pt[1]) for f in matched_features_1]) features_left_2 = np.array([(f.pt[0], f.pt[1]) for f in matched_features_2]) #print(features_left_1, features_left_2) disparity_1 = s3d.compute_disparity(left, right, False) disparity_2 = s3d.compute_disparity(left2, right2, False) points_3d_1, points_3d_2 = matched_points_from_point_cloud( disparity_1, features_left_1, disparity_2, features_left_2 ) # MAKE ONE THAT DOES BOTH IMAGES AT SAME TIME, SINCE LOTS OF THE POINTS DO NOT GET DONE DUES TO DISPARITY FOR SOME REASON, SO THE LEFTOVER ONES ARENT MATCHED #print(np.transpose(points_3d_1)) #formatted_points_1 = formatted_points_2 = [] #for p in points_3d_1: _, transformation, _ = cv2.estimateAffine3D(points_3d_1, points_3d_2, ransacThreshold=0.97) #RESHAPE POINT ARRAYS #print(points_3d_1, points_3d_2) #affine3d = trans.affine_matrix_from_points(np.transpose(points_3d_1), np.transpose(points_3d_2), False, False) #print(transformation, affine3d) transformation = np.append(transformation, [[0.0, 0.0, 0.0, 1.0]], axis=0) #print(transformation, affine3d) scale, shear, angles, translation, perspective = trans.decompose_matrix( transformation) r = angles t = translation """ h_mat, _ = cv2.findHomography(points_3d_1, points_3d_2, method=cv2.RANSAC) _, r, t, _ = cv2.decomposeHomographyMat(h_mat, camera_matrix) print("r, t:", r, t) global prev_r, prev_t if len(prev_r) > 0 and len(prev_t) > 0: best_r = r[0] for i in r[1:]: #print(i) if abs(prev_r[0][0] - i[0][0]) < abs(prev_r[0][0] - best_r[0][0]): best_r = i best_t = t[0] for i in t[1:]: if abs(prev_t[0][0] - i[0][0]) < abs(prev_t[0][0] - best_t[0][0]): best_t = i else: best_r = r[0] best_t = t[0] for i in r: if i[0][0] > 0.99: best_r = i for i in t: if i[2] > 0: best_t = i prev_r = best_r prev_t = best_t""" return r, t
def drawit(i, x_star, x_star_cov, M_star, toplot_star, x_input, M_input, toplot_input, x_true, pusher_loc, contact_normal, contact_point, has_contact, has_apriltag, saveto, titletext, saveformat, plotconfig, sub, M_star_3d, M_input_3d, shape, shape_type, shape_polygon_3d): label = mat['label'] radius = mat['probe_radius'] startdate = mat['startdate'] shape_id = mat['shape_id'] offset = mat['offset'] if i % 100 == 0: print '\ndisplay result', i else: sys.stdout.write('.') sys.stdout.flush() fig1 = plt.figure() ax1 = fig1.add_subplot(111, aspect='equal') ax1.set_xlim(npa([-0.09, 0.09]) * 1.3 + offset[0]) ax1.set_ylim(npa([-0.09, 0.09]) * 1.3 + offset[1]) # plot groundtruth shape and pose T = matrix_from_xyzrpy([x_true[i][0], x_true[i][1], 0], [0, 0, x_true[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox': shape_polygon_3d_world = np.dot(T, shape_polygon_3d.T) obj = mpatches.Polygon(shape_polygon_3d_world.T[:, 0:2], closed=True, linewidth=2, linestyle='dashed', fill=False) elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) obj = mpatches.Ellipse(trans[0:2], shape[0] * 2, shape[1] * 2, angle=angles[2] / np.pi * 180.0, fill=False, linewidth=1, linestyle='solid') ax1.add_patch(obj) #fig1.savefig('rect1.png', dpi=200, bbox_inches='tight') if toplot_star: if len(M_star.shape) == 3: M_star_i = npa(M_star[i]) M_star_3d = np.hstack( (np.array(M_star_i), np.zeros( (len(M_star_i), 1)), np.ones((len(M_star_i), 1)))) # plot input shape and pose if toplot_input: T = matrix_from_xyzrpy([x_input[i][0], x_input[i][1], 0], [0, 0, x_input[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox' or shape_type == 'ellip': M_input_3d_world = np.dot(T, M_input_3d.T) obj = mpatches.Polygon(M_input_3d_world.T[:, 0:2], closed=True, linewidth=1, linestyle='solid', fill=False) ax1.plot(M_input_3d_world.T[:, 0:1], M_input_3d_world.T[:, 1:2], 'go') elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) obj = mpatches.Ellipse(trans[0:2], shape[0] * 2, shape[1] * 2, angle=angles[2], fill=False, linewidth=1, linestyle='solid') ax1.add_patch(obj) # plot estimated shape and pose if toplot_star: T = matrix_from_xyzrpy([x_star[i][0], x_star[i][1], 0], [0, 0, x_star[i][2]]) if shape_type == 'poly' or shape_type == 'polyapprox' or shape_type == 'ellip': M_star_3d_world = np.dot(T, M_star_3d.T) obj = mpatches.Polygon(M_star_3d_world.T[:, 0:2], closed=True, linewidth=1, linestyle='solid', fill=False) ax1.plot(M_star_3d_world.T[:, 0:1], M_star_3d_world.T[:, 1:2], 'ro') elif shape_type == 'ellip': scale, shear, angles, trans, persp = tfm.decompose_matrix(T) obj = mpatches.Ellipse(trans[0:2], shape[0] * 2, shape[1] * 2, angle=angles[2], fill=False, linewidth=1, linestyle='solid') ax1.add_patch(obj) # plot the covariance of pose if x_star_cov is not None: plot_cov_ellipse(npa(x_star_cov[i][0:2][:, 0:2]), npa(x_star[i][0:2]), ax=ax1, facecolor=(1, 1, 153 / 255.0, 0.5)) plot_cov_fan(x_star_cov[i][2][2], npa(x_star[i][0:3]), npa(x_true[i][0:3]), ax=ax1) plot_pos(x_true[i][0:3], ax=ax1) plot_pos(x_input[i][0:3], ax=ax1, color='green') for side in [0, 1]: # left(0) / right(1) # plot probe circle = mpatches.Circle(pusher_loc[side][i], radius=radius) ax1.add_patch(circle) if has_contact[side][i]: # plot contact point ax1.plot(contact_point[side][i][0], contact_point[side][i][1], 'k*') # plot normal ax1.arrow(contact_point[side][i][0], contact_point[side][i][1], contact_normal[side][i][0] * 0.01, contact_normal[side][i][1] * 0.01, head_width=0.001, head_length=0.01, fc='g', ec='g') #ax1.arrow(d[i,0], d[i,1], turned_norm[i][0]*0.01, turned_norm[i][1]*0.01, head_width=0.001, head_length=0.01, fc='r', ec='r') #ax1.arrow(d[i,0], d[i,1], vicon_norm[i][0]*0.01, vicon_norm[i][1]*0.01, head_width=0.001, head_length=0.01, fc='g', ec='g') # plot no apriltag if not has_apriltag[i]: ax1.text(offset[0] - 0.1, offset[1] - 0.1, 'No apriltag') # no axes ax1.set_axis_off() fig1.savefig('%s%07d.png' % (saveto, i), dpi=200, bbox_inches='tight') plt.close(fig1)
def main(): SHOW_AXES = True SHOW_SCENE_AXES = True SHOW_COIL_AXES = True SHOW_SKIN = True SHOW_BRAIN = True SHOW_COIL = True SHOW_MARKERS = True TRANSF_COIL = True SHOW_PLANE = False SELECT_LANDMARKS = 'scalp' # 'all', 'mri' 'scalp' SAVE_ID = True AFFINE_IMG = True NO_SCALE = True SCREENSHOT = False SHOW_OTHER = False reorder = [0, 2, 1] flipx = [True, False, False] # reorder = [0, 1, 2] # flipx = [False, False, False] # default folder and subject # for Bert image use the translation in the base_affine (fall-back) subj_list = ['VictorSouza', 'JaakkoNieminen', 'AinoTervo', 'JuusoKorhonen', 'BaranAydogan', 'AR', 'Bert'] subj = 0 data_dir = os.environ.get('OneDrive') + r'\vh\eventos\sf 2019\mri_science_factory\{}'.format(subj_list[subj]) # filenames img_file = data_dir + r'\{}.nii'.format(subj_list[subj]) brain_file = data_dir + r'\gm.stl' skin_file = data_dir + r'\gm_sn.stl' if subj == 3: other_file = data_dir + r'\gm.ply' elif subj == 4: other_file = data_dir + r'\tracks.vtp' elif subj == 6: other_file = data_dir + r'\gm.ply' else: other_file = data_dir + r'\gm.stl' # coords = lc.load_nexstim(coord_file) # red, green, blue, maroon (dark red), # olive (shitty green), teal (petrol blue), yellow, orange col = [[1., 0., 0.], [0., 1., 0.], [0., 0., 1.], [1., .0, 1.], [.5, .5, 0.], [0., .5, .5], [1., 1., 0.], [1., .4, .0]] # extract image header shape and affine transformation from original nifti file imagedata = nb.squeeze_image(nb.load(img_file)) imagedata = nb.as_closest_canonical(imagedata) imagedata.update_header() pix_dim = imagedata.header.get_zooms() img_shape = imagedata.header.get_data_shape() print("Pixel size: \n") print(pix_dim) print("\nImage shape: \n") print(img_shape) print("\nSform: \n") print(imagedata.get_qform(coded=True)) print("\nQform: \n") print(imagedata.get_sform(coded=True)) print("\nFall-back: \n") print(imagedata.header.get_base_affine()) scale_back, shear_back, angs_back, trans_back, persp_back = tf.decompose_matrix(imagedata.header.get_base_affine()) if AFFINE_IMG: affine = imagedata.affine # affine = imagedata.header.get_base_affine() if NO_SCALE: scale, shear, angs, trans, persp = tf.decompose_matrix(affine) affine = tf.compose_matrix(scale=None, shear=shear, angles=angs, translate=trans, perspective=persp) else: affine = np.identity(4) # affine_I = np.identity(4) # create a camera, render window and renderer camera = vtk.vtkCamera() camera.SetPosition(0, 1000, 0) camera.SetFocalPoint(0, 0, 0) camera.SetViewUp(0, 0, 1) camera.ComputeViewPlaneNormal() camera.Azimuth(90.0) camera.Elevation(10.0) ren = vtk.vtkRenderer() ren.SetActiveCamera(camera) ren.ResetCamera() ren.SetUseDepthPeeling(1) ren.SetOcclusionRatio(0.1) ren.SetMaximumNumberOfPeels(100) camera.Dolly(1.5) ren_win = vtk.vtkRenderWindow() ren_win.AddRenderer(ren) ren_win.SetSize(800, 800) ren_win.SetMultiSamples(0) ren_win.SetAlphaBitPlanes(1) # create a renderwindowinteractor iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(ren_win) # if SELECT_LANDMARKS == 'mri': # # MRI landmarks # coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] # pts_ref = [1, 2, 3, 7, 10] # elif SELECT_LANDMARKS == 'all': # # all coords # coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Nose/Nasion'], ['Left ear'], ['Right ear'], # ['Coil Loc'], ['EF max']] # pts_ref = [1, 2, 3, 5, 4, 6, 7, 10] # elif SELECT_LANDMARKS == 'scalp': # # scalp landmarks # coord_mri = [['Nose/Nasion'], ['Left ear'], ['Right ear'], ['Coil Loc'], ['EF max']] # hdr_mri = ['Nose/Nasion', 'Left ear', 'Right ear', 'Coil Loc', 'EF max'] # pts_ref = [5, 4, 6, 7, 10] # # coords_np = np.zeros([len(pts_ref), 3]) # for n, pts_id in enumerate(pts_ref): # # to keep in the MRI space use the identity as the affine # # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine_I, flipx, reorder) # # affine_trans = affine_I.copy() # # affine_trans = affine.copy() # # affine_trans[:3, -1] = affine[:3, -1] # coord_aux = n2m.coord_change(coords[pts_id][1:], img_shape, affine, flipx, reorder) # coords_np[n, :] = coord_aux # [coord_mri[n].append(s) for s in coord_aux] # if SHOW_MARKERS: # marker_actor = add_marker(coord_aux, ren, col[n]) # # print('\nOriginal coordinates from Nexstim: \n') # [print(s) for s in coords] # print('\nTransformed coordinates to MRI space: \n') # [print(s) for s in coord_mri] # # # coil location, normal vector and direction vector # coil_loc = coord_mri[-2][1:] # coil_norm = coords[8][1:] # coil_dir = coords[9][1:] # # # creating the coil coordinate system by adding a point in the direction of each given coil vector # # the additional vector is just the cross product from coil direction and coil normal vectors # # origin of the coordinate system is the coil location given by Nexstim # # the vec_length is to allow line creation with visible length in VTK scene # vec_length = 75 # p1 = coords[7][1:] # p2 = [x + vec_length * y for x, y in zip(p1, coil_norm)] # p2_norm = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # # p2 = [x + vec_length * y for x, y in zip(p1, coil_dir)] # p2_dir = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # # coil_face = np.cross(coil_norm, coil_dir) # p2 = [x - vec_length * y for x, y in zip(p1, coil_face.tolist())] # p2_face = n2m.coord_change(p2, img_shape, affine, flipx, reorder) # Coil face unit vector (X) # u1 = np.asarray(p2_face) - np.asarray(coil_loc) # u1_n = u1 / np.linalg.norm(u1) # # Coil direction unit vector (Y) # u2 = np.asarray(p2_dir) - np.asarray(coil_loc) # u2_n = u2 / np.linalg.norm(u2) # # Coil normal unit vector (Z) # u3 = np.asarray(p2_norm) - np.asarray(coil_loc) # u3_n = u3 / np.linalg.norm(u3) # # transf_matrix = np.identity(4) # if TRANSF_COIL: # transf_matrix[:3, 0] = u1_n # transf_matrix[:3, 1] = u2_n # transf_matrix[:3, 2] = u3_n # transf_matrix[:3, 3] = coil_loc[:] # the absolute value of the determinant indicates the scaling factor # the sign of the determinant indicates how it affects the orientation: if positive maintain the # original orientation and if negative inverts all the orientations (flip the object inside-out)' # the negative determinant is what makes objects in VTK scene to become black # print('Transformation matrix: \n', transf_matrix, '\n') # print('Determinant: ', np.linalg.det(transf_matrix)) # if SAVE_ID: # coord_dict = {'m_affine': transf_matrix, 'coords_labels': hdr_mri, 'coords': coords_np} # io.savemat(output_file + '.mat', coord_dict) # hdr_names = ';'.join(['m' + str(i) + str(j) for i in range(1, 5) for j in range(1, 5)]) # np.savetxt(output_file + '.txt', transf_matrix.reshape([1, 16]), delimiter=';', header=hdr_names) if SHOW_BRAIN: # brain_actor = load_stl(brain_file, ren, colour=[0., 1., 1.], opacity=0.7, user_matrix=np.linalg.inv(affine)) affine_orig = np.identity(4) # affine_orig = affine.copy() # affine_orig[0, 3] = affine_orig[0, 3] + pix_dim[0]*img_shape[0] # affine_orig[1, 3] = affine_orig[1, 3] + pix_dim[1]*img_shape[1] # affine_orig[0, 3] = affine_orig[0, 3] + pix_dim[0]*img_shape[0] # affine_orig[0, 3] = affine_orig[0, 3] - 5 # this partially works for DTI Baran # modified close to correct [-75.99139404 123.88291931 - 148.19839478] # fall-back [87.50042766 - 127.5 - 127.5] # affine_orig[0, 3] = -trans_back[0] # affine_orig[1, 3] = -trans_back[1] # this works for the bert image # affine_orig[0, 3] = -127 # affine_orig[1, 3] = 127 # affine_orig[2, 3] = -127 # affine_orig[:3, :3] = affine[:3, :3] # affine_orig[1, 3] = -affine_orig[1, 3]+27.5 # victorsouza # affine_orig[1, 3] = -affine_orig[1, 3]+97.5 # affine_orig[1, 3] = -affine_orig[1, 3] print('Affine original: \n', affine) scale, shear, angs, trans, persp = tf.decompose_matrix(affine) print('Angles: \n', np.rad2deg(angs)) print('Translation: \n', trans) print('Affine modified: \n', affine_orig) scale, shear, angs, trans, persp = tf.decompose_matrix(affine_orig) print('Angles: \n', np.rad2deg(angs)) print('Translation: \n', trans) # colour=[0., 1., 1.], brain_actor, brain_mesh = load_stl(brain_file, ren, replace=True, colour=[1., 0., 0.], opacity=.3, user_matrix=affine_orig) # print('Actor origin: \n', brain_actor.GetPosition()) if SHOW_SKIN: # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) # affine[0, 3] = affine[0, 3] + pix_dim[0] * img_shape[0] # this is working # affine[0, 3] = affine[0, 3] + 8. affine[1, 3] = affine[1, 3] + pix_dim[1] * img_shape[1] # affine[2, 3] = affine[2, 3] + pix_dim[2] * img_shape[2] affine_inv = np.linalg.inv(affine) # affine_inv[:3, 3] = -affine[:3, 3] # affine_inv[2, 3] = -affine_inv[2, 3] skin_actor, skin_mesh = load_stl(skin_file, ren, colour="SkinColor", opacity=1., user_matrix=affine_inv) # skin_actor, skin_mesh = load_stl(skin_file, ren, colour="SkinColor", opacity=1.) skino_actor, skino_mesh = load_stl(skin_file, ren, colour=[1., 0., 0.], opacity=1.) if SHOW_OTHER: # skin_actor = load_stl(skin_file, ren, opacity=0.5, user_matrix=np.linalg.inv(affine)) affine[1, 3] = affine[1, 3] + pix_dim[1] * img_shape[1] affine_inv = np.linalg.inv(affine) # affine_inv[:3, 3] = -affine[:3, 3] affine_inv[1, 3] = affine_inv[1, 3] # other_actor, other_mesh = load_stl(other_file, ren, opacity=1., user_matrix=affine_inv) # other_actor, other_mesh = load_stl(other_file, ren, opacity=1.) # if SHOW_COIL: # # reposition STL object prior to transformation matrix # # [translation_x, translation_y, translation_z, rotation_x, rotation_y, rotation_z] # # old translation when using Y as normal vector # # repos = [0., -6., 0., 0., -90., 90.] # # Translate coil loc coordinate to coil bottom # # repos = [0., 0., 5.5, 0., 0., 180.] # repos = [0., 0., 0., 0., 0., 180.] # act_coil = load_stl(coil_file, ren, replace=repos, user_matrix=transf_matrix, opacity=.3) # # if SHOW_PLANE: # act_plane = add_plane(ren, user_matrix=transf_matrix) # Add axes to scene origin if SHOW_AXES: add_line(ren, [0, 0, 0], [150, 0, 0], color=[1.0, 0.0, 0.0]) add_line(ren, [0, 0, 0], [0, 150, 0], color=[0.0, 1.0, 0.0]) add_line(ren, [0, 0, 0], [0, 0, 150], color=[0.0, 0.0, 1.0]) # Add axes to object origin # if SHOW_COIL_AXES: # add_line(ren, coil_loc, p2_norm, color=[.0, .0, 1.0]) # add_line(ren, coil_loc, p2_dir, color=[.0, 1.0, .0]) # add_line(ren, coil_loc, p2_face, color=[1.0, .0, .0]) # Add interactive axes to scene if SHOW_SCENE_AXES: axes = vtk.vtkAxesActor() widget = vtk.vtkOrientationMarkerWidget() widget.SetOutlineColor(0.9300, 0.5700, 0.1300) widget.SetOrientationMarker(axes) widget.SetInteractor(iren) # widget.SetViewport(0.0, 0.0, 0.4, 0.4) widget.SetEnabled(1) widget.InteractiveOn() # # if SCREENSHOT: # # screenshot of VTK scene # w2if = vtk.vtkWindowToImageFilter() # w2if.SetInput(ren_win) # w2if.Update() # # writer = vtk.vtkPNGWriter() # writer.SetFileName("screenshot.png") # writer.SetInput(w2if.GetOutput()) # writer.Write() # Enable user interface interactor # ren_win.Render() ren.ResetCameraClippingRange() iren.Initialize() iren.Start()
def __init__(self, T): if ( type(T) == str): print "Loading Transformation from file: " + T Tfile = T #Load transformation Tfis = open(Tfile, 'r') lines = [] lines = Tfis.readlines() format = len(lines) Tfis.seek(0) #reset file pointer if not (format==3 or format==4 or format==5) : raise ValueError("Wrong number of lines in file") # import code; code.interact(local=locals()) if format == 3: """Handles processing a ground truth transfomation File saved using vxl format x' = s *(Rx + T) scale quaternion translation """ print("Reading format 3") self.scale = float(lines[0]) self.Ss = tf.scale_matrix(self.scale) quat_line = lines[1].split(" ") self.quat = tf.unit_vector(np.array([float(quat_line[3]), float(quat_line[0]), float(quat_line[1]), float(quat_line[2])])) self.Hs = tf.quaternion_matrix(self.quat) trans_line = lines[2].split(" ") self.Ts = np.array([float(trans_line[0]), float(trans_line[1]), float(trans_line[2])]) Tfis.close() self.Rs = self.Hs.copy()[:3, :3] self.Hs[:3, 3] = self.Ts[:3] self.Hs = self.Ss.dot(self.Hs) # to add again if format == 4 : """If the transformation was saved as: H (4x4) - = [S*R|S*T] """ print("Reading format 4") self.Hs = np.genfromtxt(Tfile, usecols={0, 1, 2, 3}) Tfis.close() scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs) self.scale = scale[0] # assuming isotropic scaling self.Rs = self.Hs[:3, :3] * (1.0 / self.scale) self.Ts = self.Hs[:3, 3] * (1.0 / self.scale) self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2]) if format==5: """If the transformation was saved as: scale H (4x4) - = [S*R|S*T] """ print("Reading format 5") self.Hs = np.genfromtxt(Tfis, skip_header=1, usecols={0, 1, 2, 3}) Tfis.close() Tfis = open(Tfile, 'r') self.scale = np.genfromtxt(Tfis, skip_footer=4, usecols={0}) Tfis.close() self.Rs = self.Hs[:3, :3] * (1.0 / self.scale) self.Ts = self.Hs[:3, 3] * (1.0 / self.scale) scale, shear, angles, trans, persp = tf.decompose_matrix(self.Hs) self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2]) print "Debugging translation:" print self.Ts print trans/self.scale elif (type(T) == np.ndarray): print "Loading Transformation array" self.Hs = T scale, shear, angles, trans, persp = tf.decompose_matrix(T) self.scale =scale[0] self.Rs = self.Hs[:3, :3] * (1.0 / self.scale) self.Ts = self.Hs[:3, 3] * (1.0 / self.scale) self.quat = tf.quaternion_from_euler(angles[0], angles[1], angles[2]) print "Debugging translation:" print self.Ts print trans/self.scale # self.Rs = tf.quaternion_matrix(self.quat) # self.Ts = trans / self.scale print self.Hs
print "A_1:", m.A_1 ef_data = [] for s in sense_array: ef = m.map(s) norm = np.linalg.norm(ef) #ef /= norm ef_data.append(ef) #print 's:', s, 'ef:', ef ef_array = np.array(ef_data) affine = transformations.affine_matrix_from_points(sense_array.T, ef_array.T) print "affine ef:" np.set_printoptions(precision=10, suppress=True) print affine scale, shear, angles, translate, perspective = transformations.decompose_matrix( affine) print ' scale:', scale print ' shear:', shear print ' angles:', angles print ' trans:', translate print ' persp:', perspective cal_dir = os.path.join(args.cal, imu_sn) if not os.path.exists(cal_dir): os.makedirs(cal_dir) cal_file = os.path.join(cal_dir, "imucal.json") cal = imucal.Calibration() cal.load(cal_file) cal.mag_affine = affine cal.save(cal_file)
import numpy as np import math import transformations from points import * NOM = NOM.T NOM_A = NOM_A.T MEAS = MEAS.T MEAS_A = MEAS_A.T print "\nNOMINAL\n\n",NOM,"\n\nMEASURED\n\n",MEAS,"\n" print "\nNOMINAL_A\n\n",NOM_A,"\n\nMEASURED_A\n\n",MEAS_A,"\n" #Rotation matrix may be pre- or post- multiplied (changing between a right-handed system and a left-handed system). R = transformations.superimposition_matrix(MEAS,NOM,usesvd=True) scale, shear, angles, translate, perspective = transformations.decompose_matrix(R) #R = transformations.inverse_matrix(R) print "Rotation matrix\n\n",R,"\n" #rot=R.T p1 = transformations.euler_matrix(MEAS_A[0,0]/180*np.pi,MEAS_A[1,0]/180*np.pi,MEAS_A[2,0]/180*np.pi, axes='sxyz') p2 = transformations.euler_matrix(MEAS_A[0,1]/180*np.pi,MEAS_A[1,1]/180*np.pi,MEAS_A[2,2]/180*np.pi, axes='sxyz') p3 = transformations.euler_matrix(MEAS_A[0,2]/180*np.pi,MEAS_A[1,2]/180*np.pi,MEAS_A[2,2]/180*np.pi, axes='sxyz') p4 = transformations.euler_matrix(MEAS_A[0,3]/180*np.pi,MEAS_A[1,3]/180*np.pi,MEAS_A[2,3]/180*np.pi, axes='sxyz') t1 = transformations.translation_matrix(MEAS[0:,0]) t2 = transformations.translation_matrix(MEAS[0:,1]) t3 = transformations.translation_matrix(MEAS[0:,2]) t4 = transformations.translation_matrix(MEAS[0:,3]) #print "p1\n",p1,"\n","p2\n",p2,"\n","p3\n",p3,"\n","p4\n",p4,"\n" #print "t1\n",t1,"\n","t2\n",t2,"\n","t3\n",t3,"\n","t4\n",t4,"\n" p1n = np.dot(t1,p1) p2n = np.dot(t2,p2)
def controls_3d(self, dx, dy, zooming_one_shot=False): CAMERA_TRANSLATION_FACTOR = 0.01 CAMERA_ROTATION_FACTOR = 0.01 if not (self.is_rotating or self.is_panning or self.is_zooming): return current_pos = self.current_cam.transformation[:3, 3].copy() distance = numpy.linalg.norm(self.focal_point - current_pos) if self.is_rotating: """ Orbiting the camera is implemented the following way: - the rotation is split into a rotation around the *world* Z axis (controlled by the horizontal mouse motion along X) and a rotation around the *X* axis of the camera (pitch) *shifted to the focal origin* (the world origin for now). This is controlled by the vertical motion of the mouse (Y axis). - as a result, the resulting transformation of the camera in the world frame C' is: C' = (T · Rx · T⁻¹ · (Rz · C)⁻¹)⁻¹ where: - C is the original camera transformation in the world frame, - Rz is the rotation along the Z axis (in the world frame) - T is the translation camera -> world (ie, the inverse of the translation part of C - Rx is the rotation around X in the (translated) camera frame """ rotation_camera_x = dy * CAMERA_ROTATION_FACTOR rotation_world_z = dx * CAMERA_ROTATION_FACTOR world_z_rotation = transformations.euler_matrix( 0, 0, rotation_world_z) cam_x_rotation = transformations.euler_matrix( rotation_camera_x, 0, 0) after_world_z_rotation = numpy.dot(world_z_rotation, self.current_cam.transformation) inverse_transformation = transformations.inverse_matrix( after_world_z_rotation) translation = transformations.translation_matrix( transformations.decompose_matrix(inverse_transformation)[3]) inverse_translation = transformations.inverse_matrix(translation) new_inverse = numpy.dot(inverse_translation, inverse_transformation) new_inverse = numpy.dot(cam_x_rotation, new_inverse) new_inverse = numpy.dot(translation, new_inverse) self.current_cam.transformation = transformations.inverse_matrix( new_inverse).astype(numpy.float32) if self.is_panning: tx = -dx * CAMERA_TRANSLATION_FACTOR * distance ty = dy * CAMERA_TRANSLATION_FACTOR * distance cam_transform = transformations.translation_matrix( (tx, ty, 0)).astype(numpy.float32) self.current_cam.transformation = numpy.dot( self.current_cam.transformation, cam_transform) if self.is_zooming: tz = dy * CAMERA_TRANSLATION_FACTOR * distance cam_transform = transformations.translation_matrix( (0, 0, tz)).astype(numpy.float32) self.current_cam.transformation = numpy.dot( self.current_cam.transformation, cam_transform) if zooming_one_shot: self.is_zooming = False self.update_view_camera()
def getTranslation(self): xlate = xform.decompose_matrix(self.matrix)[3] return (xlate[0], xlate[1], xlate[2])