def discretefiber_reduced(params_in): """ input parameters are [hkl_id, com_ome, com_eta] """ bMat = paramMP['bMat'] chi = paramMP['chi'] csym = paramMP['csym'] fiber_ndiv = paramMP['fiber_ndiv'] hkl = params_in[:3].reshape(3, 1) gVec_s = xfcapi.anglesToGVec( np.atleast_2d(params_in[3:]), chi=chi, ).T tmp = mutil.uniqueVectors( rot.discreteFiber( hkl, gVec_s, B=bMat, ndiv=fiber_ndiv, invert=False, csym=csym )[0] ) return tmp
def xy_of_angs(self, angs): """ Cartesion coordinates of vstacked (tth, eta) pairs *) wrapper for transforms.anglesToGVec """ gVec_l = xf.anglesToGVec(angs, self.bVec, self.eVec, rMat_s=None, rMat_c=None) xy = xf.gvecToDetectorXY(gVec_l, self.rMat, I3, I3, self.tVec, self.tVec_s, self.tVec_c, distortion=self.distortion, beamVec=self.bVec, etaVec=self.eVec) return xy
def angles_to_cart(self, tth_eta): """ TODO: distortion """ rmat_s = rmat_c = ct.identity_3x3 tvec_s = tvec_c = ct.zeros_3 angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) xy_det = gvecToDetectorXY( anglesToGVec(angs, bHat_l=self.bvec, eHat_l=self.evec), self.rmat, rmat_s, rmat_c, self.tvec, tvec_s, tvec_c, beamVec=self.bvec) return xy_det
def angles_to_cart(self, tth_eta): """ TODO: distortion """ rmat_s = rmat_c = ct.identity_3x3 tvec_s = tvec_c = ct.zeros_3 angs = np.hstack([tth_eta, np.zeros((len(tth_eta), 1))]) xy_det = gvecToDetectorXY(anglesToGVec(angs, bHat_l=self.bvec, eHat_l=self.evec), self.rmat, rmat_s, rmat_c, self.tvec, tvec_s, tvec_c, beamVec=self.bvec) return xy_det
def xy_of_angs(self, angs): """ Cartesion coordinates of vstacked (tth, eta) pairs *) wrapper for transforms.anglesToGVec """ gVec_l = xf.anglesToGVec(angs, self.bVec, self.eVec, rMat_s=None, rMat_c=None) xy = xf.gvecToDetectorXY(gVec_l, self.rMat, I3, I3, self.tVec, self.tVec_s, self.tVec_c, distortion=self.distortion, beamVec=self.bVec, etaVec=self.eVec) return xy
def make_powder_rings(self, pd, merge_hkls=False, delta_tth=None, delta_eta=10., eta_period=None, rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3, tvec_c=ct.zeros_3, full_output=False): """ """ # in case you want to give it tth angles directly if hasattr(pd, '__len__'): tth = np.array(pd).flatten() if delta_tth is None: raise RuntimeError( "If supplying a 2theta list as first arg, " + "must supply a delta_tth") sector_vertices = np.tile( 0.5 * np.radians([ -delta_tth, -delta_eta, -delta_tth, delta_eta, delta_tth, delta_eta, delta_tth, -delta_eta, 0.0, 0.0 ]), (len(tth), 1)) else: # Okay, we have a PlaneData object pd = PlaneData.makeNew(pd) # make a copy to munge if delta_tth is not None: pd.tThWidth = np.radians(delta_tth) else: delta_tth = np.degrees(pd.tThWidth) # conversions, meh... del_eta = np.radians(delta_eta) # do merging if asked if merge_hkls: _, tth_ranges = pd.getMergedRanges() tth = np.array([0.5 * sum(i) for i in tth_ranges]) else: tth_ranges = pd.getTThRanges() tth = pd.getTTh() tth_pm = tth_ranges - np.tile(tth, (2, 1)).T sector_vertices = np.vstack([[ i[0], -del_eta, i[0], del_eta, i[1], del_eta, i[1], -del_eta, 0.0, 0.0 ] for i in tth_pm]) # for generating rings if eta_period is None: eta_period = (-np.pi, np.pi) neta = int(360. / float(delta_eta)) eta = mapAngle( np.radians(delta_eta * (np.linspace(0, neta - 1, num=neta) + 0.5)) + eta_period[0], eta_period) angs = [ np.vstack([i * np.ones(neta), eta, np.zeros(neta)]) for i in tth ] # need xy coords and pixel sizes valid_ang = [] valid_xy = [] map_indices = [] npp = 5 # [ll, ul, ur, lr, center] for i_ring in range(len(angs)): # expand angles to patch vertices these_angs = angs[i_ring].T patch_vertices = (np.tile(these_angs[:, :2], (1, npp)) + np.tile(sector_vertices[i_ring], (neta, 1))).reshape(npp * neta, 2) # duplicate ome array ome_dupl = np.tile(these_angs[:, 2], (npp, 1)).T.reshape(npp * neta, 1) # find vertices that all fall on the panel gVec_ring_l = anglesToGVec(np.hstack([patch_vertices, ome_dupl]), bHat_l=self.bvec) all_xy = gvecToDetectorXY(gVec_ring_l, self.rmat, rmat_s, ct.identity_3x3, self.tvec, tvec_s, tvec_c, beamVec=self.bvec) _, on_panel = self.clip_to_panel(all_xy) # all vertices must be on... patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] idx = np.where(patch_is_on)[0] valid_ang.append(these_angs[patch_is_on, :2]) valid_xy.append(patch_xys[:, -1, :].squeeze()) map_indices.append(idx) pass # ??? is this option necessary? if full_output: return valid_ang, valid_xy, map_indices, eta else: return valid_ang, valid_xy
def generate_orientation_fibers(eta_ome, chi, threshold, seed_hkl_ids, fiber_ndiv, filt_stdev=0.8): """ From ome-eta maps and hklid spec, generate list of quaternions from fibers """ # seed_hkl_ids must be consistent with this... pd_hkl_ids = eta_ome.iHKLList[seed_hkl_ids] # grab angular grid infor from maps del_ome = eta_ome.omegas[1] - eta_ome.omegas[0] del_eta = eta_ome.etas[1] - eta_ome.etas[0] # labeling mask structureNDI_label = ndimage.generate_binary_structure(2, 1) # crystallography data from the pd object pd = eta_ome.planeData tTh = pd.getTTh() bMat = pd.latVecOps['B'] csym = pd.getLaueGroup() ############################################ ## Labeling of spots from seed hkls ## ############################################ qfib = [] labels = [] numSpots = [] coms = [] for i in seed_hkl_ids: # First apply filter this_map_f = -ndimage.filters.gaussian_laplace(eta_ome.dataStore[i], filt_stdev) labels_t, numSpots_t = ndimage.label( this_map_f > threshold, structureNDI_label ) coms_t = np.atleast_2d( ndimage.center_of_mass( this_map_f, labels=labels_t, index=np.arange(1, np.amax(labels_t)+1) ) ) labels.append(labels_t) numSpots.append(numSpots_t) coms.append(coms_t) pass ############################################ ## Generate discrete fibers from labels ## ############################################ for i in range(len(pd_hkl_ids)): ii = 0 qfib_tmp = np.empty((4, fiber_ndiv*numSpots[i])) for ispot in range(numSpots[i]): if not np.isnan(coms[i][ispot][0]): ome_c = eta_ome.omeEdges[0] \ + (0.5 + coms[i][ispot][0])*del_ome eta_c = eta_ome.etaEdges[0] \ + (0.5 + coms[i][ispot][1])*del_eta #gVec_s = xrdutil.makeMeasuredScatteringVectors( # tTh[pd_hkl_ids[i]], eta_c, ome_c # ) gVec_s = xfcapi.anglesToGVec( np.atleast_2d( [tTh[pd_hkl_ids[i]], eta_c, ome_c] ), chi=chi ).T tmp = mutil.uniqueVectors( rot.discreteFiber( pd.hkls[:, pd_hkl_ids[i]].reshape(3, 1), gVec_s, B=bMat, ndiv=fiber_ndiv, invert=False, csym=csym )[0] ) jj = ii + tmp.shape[1] qfib_tmp[:, ii:jj] = tmp ii += tmp.shape[1] pass pass qfib.append(qfib_tmp[:, :ii]) pass return np.hstack(qfib)
def pull_spots(self, plane_data, grain_params, imgser_dict, tth_tol=0.25, eta_tol=1., ome_tol=1., npdiv=2, threshold=10, eta_ranges=[(-np.pi, np.pi), ], ome_period=(-np.pi, np.pi), dirname='results', filename=None, output_format='text', save_spot_list=False, quiet=True, check_only=False, interp='nearest'): """ Exctract reflection info from a rotation series encoded as an OmegaImageseries object """ # grain parameters rMat_c = makeRotMatOfExpMap(grain_params[:3]) tVec_c = grain_params[3:6] # grab omega ranges from first imageseries # # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = imgser_dict[imgser_dict.keys()[0]] ome_ranges = [np.radians([i['ostart'], i['ostop']]) for i in oims0.omegawedges.wedges] # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( delta_ome, ome_tol, 1, adjust_window=True, ) # generate structuring element for connected component labeling if ndiv_ome == 1: label_struct = ndimage.generate_binary_structure(2, 2) else: label_struct = ndimage.generate_binary_structure(3, 3) # simulate rotation series sim_results = self.simulate_rotation_series( plane_data, [grain_params, ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period) # patch vertex generator (global for instrument) tol_vec = 0.5*np.radians( [-tth_tol, -eta_tol, -tth_tol, eta_tol, tth_tol, eta_tol, tth_tol, -eta_tol]) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = io.GrainDataWriter_h5( os.path.join(dirname, filename), self.write_config(), grain_params) # ===================================================================== # LOOP OVER PANELS # ===================================================================== iRefl = 0 compl = [] output = dict.fromkeys(self.detectors) for detector_id in self.detectors: # initialize text-based output writer if filename is not None and output_format.lower() == 'text': output_dir = os.path.join( dirname, detector_id ) if not os.path.exists(output_dir): os.makedirs(output_dir) this_filename = os.path.join( output_dir, filename ) writer = io.PatchDataWriter(this_filename) # grab panel panel = self.detectors[detector_id] instr_cfg = panel.config_dict(self.chi, self.tvec) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict ome_imgser = imgser_dict[detector_id] # extract simulation results sim_results_p = sim_results[detector_id] hkl_ids = sim_results_p[0][0] hkls_p = sim_results_p[1][0] ang_centers = sim_results_p[2][0] xy_centers = sim_results_p[3][0] ang_pixel_size = sim_results_p[4][0] # now verify that full patch falls on detector... # ???: strictly necessary? # # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( np.tile(ang_centers[:, :2], (1, 4)) + np.tile(tol_vec, (nangs, 1)) ).reshape(4*nangs, 2) ome_dupl = np.tile( ang_centers[:, 2], (4, 1) ).T.reshape(len(patch_vertices), 1) # find vertices that all fall on the panel det_xy, _ = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), panel.rmat, rMat_c, self.chi, panel.tvec, tVec_c, self.tvec, panel.distortion) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] # re-filter... hkl_ids = hkl_ids[patch_is_on] hkls_p = hkls_p[patch_is_on, :] ang_centers = ang_centers[patch_is_on, :] xy_centers = xy_centers[patch_is_on, :] ang_pixel_size = ang_pixel_size[patch_is_on, :] # TODO: add polygon testing right here! # done <JVB 06/21/16> if check_only: patch_output = [] for i_pt, angs in enumerate(ang_centers): # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(angs[2]) + ome_del # ...vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkls_p[i_pt, :]) print(msg) continue else: these_vertices = patch_xys[i_pt] ijs = panel.cartToPixel(these_vertices) ii, jj = polygon(ijs[:, 0], ijs[:, 1]) contains_signal = False for i_frame in frame_indices: contains_signal = contains_signal or np.any( ome_imgser[i_frame][ii, jj] > threshold ) compl.append(contains_signal) patch_output.append((ii, jj, frame_indices)) else: # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, ang_centers[:, :2], ang_pixel_size, omega=ang_centers[:, 2], tth_tol=tth_tol, eta_tol=eta_tol, rMat_c=rMat_c, tVec_c=tVec_c, distortion=panel.distortion, npdiv=npdiv, quiet=True, beamVec=self.beam_vector) # GRAND LOOP over reflections for this panel patch_output = [] for i_pt, patch in enumerate(patches): # strip relevant objects out of current patch vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape nrm_fac = areas/float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info hkl = hkls_p[i_pt, :] hkl_id = hkl_ids[i_pt] # edge arrays tth_edges = vtx_angs[0][0, :] delta_tth = tth_edges[1] - tth_edges[0] eta_edges = vtx_angs[1][:, 0] delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation xy_eval = np.vstack([xy_eval[0].flatten(), xy_eval[1].flatten()]).T # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del # ???: vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkl) print(msg) continue else: # initialize spot data parameters # !!! maybe change these to nan to not f**k up writer peak_id = -999 sum_int = None max_int = None meas_angs = None meas_xy = None # quick check for intensity contains_signal = False patch_data_raw = [] for i_frame in frame_indices: tmp = ome_imgser[i_frame][ijs[0], ijs[1]] contains_signal = contains_signal or np.any( tmp > threshold ) patch_data_raw.append(tmp) pass patch_data_raw = np.stack(patch_data_raw, axis=0) compl.append(contains_signal) if contains_signal: # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( (len(frame_indices), prows, pcols)) for i, i_frame in enumerate(frame_indices): patch_data[i] = \ panel.interpolate_bilinear( xy_eval, ome_imgser[i_frame], pad_with_nans=False ).reshape(prows, pcols) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: msg = "interpolation option " + \ "'%s' not understood" raise(RuntimeError, msg % interp) # now have interpolated patch data... labels, num_peaks = ndimage.label( patch_data > threshold, structure=label_struct ) slabels = np.arange(1, num_peaks + 1) if num_peaks > 0: peak_id = iRefl coms = np.array( ndimage.center_of_mass( patch_data, labels=labels, index=slabels ) ) if num_peaks > 1: center = np.r_[patch_data.shape]*0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( np.sum(com_diff**2, axis=1) ) else: closest_peak_idx = 0 pass # end multipeak conditional coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome meas_omes = \ ome_eval[0] + coms[0]*delta_ome meas_angs = np.hstack( [tth_edges[0] + (0.5 + coms[2])*delta_tth, eta_edges[0] + (0.5 + coms[1])*delta_eta, mapAngle( np.radians(meas_omes), ome_period ) ] ) # intensities # - summed is 'integrated' over interpolated # data # - max is max of raw input data sum_int = np.sum( patch_data[ labels == slabels[closest_peak_idx] ] ) max_int = np.max( patch_data_raw[ labels == slabels[closest_peak_idx] ] ) # ???: Should this only use labeled pixels? # Those are segmented from interpolated data, # not raw; likely ok in most cases. # need MEASURED xy coords gvec_c = anglesToGVec( meas_angs, chi=self.chi, rMat_c=rMat_c, bHat_l=self.beam_vector) rMat_s = makeOscillRotMat( [self.chi, meas_angs[2]] ) meas_xy = gvecToDetectorXY( gvec_c, panel.rmat, rMat_s, rMat_c, panel.tvec, self.tvec, tVec_c, beamVec=self.beam_vector) if panel.distortion is not None: # FIXME: distortion handling meas_xy = panel.distortion[0]( np.atleast_2d(meas_xy), panel.distortion[1], invert=True).flatten() pass # FIXME: why is this suddenly necessary??? meas_xy = meas_xy.squeeze() pass # end num_peaks > 0 else: patch_data = patch_data_raw pass # end contains_signal # write output if filename is not None: if output_format.lower() == 'text': writer.dump_patch( peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, xy_centers[i_pt], meas_xy) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( detector_id, iRefl, peak_id, hkl_id, hkl, tth_edges, eta_edges, np.radians(ome_eval), xyc_arr, ijs, frame_indices, patch_data, ang_centers[i_pt], xy_centers[i_pt], meas_angs, meas_xy) pass # end conditional on write output pass # end conditional on check only patch_output.append([ peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, meas_xy, ]) iRefl += 1 pass # end patch conditional pass # end patch loop output[detector_id] = patch_output if filename is not None and output_format.lower() == 'text': writer.close() pass # end detector loop if filename is not None and output_format.lower() == 'hdf5': writer.close() return compl, output
tth0 = np.degrees(tth_avg) eta0 = 0. tth_range = np.degrees(tth_hi - tth_lo) eta_range = 360. ntth = int(tth_range/tth_size) neta = int(eta_range/eta_size) tth_vec = tth_size*(np.arange(ntth) - 0.5*ntth - 1) + tth0 eta_vec = eta_size*(np.arange(neta) - 0.5*neta - 1) + eta0 angpts = np.meshgrid(eta_vec, tth_vec, indexing='ij') gpts = xfc.anglesToGVec( np.vstack([ np.radians(angpts[1].flatten()), np.radians(angpts[0].flatten()), np.zeros(neta*ntth) ]).T, bHat_l=d.bvec) xypts = xfc.gvecToDetectorXY( gpts, d.rmat, np.eye(3), np.eye(3), d.tvec, np.zeros(3), np.zeros(3), beamVec=d.bvec) img2 = d.interpolate_bilinear(xypts, average_frame).reshape(neta, ntth) img3 = copy.deepcopy(img2) borders = np.isnan(img2) img2[borders] = 0. img3[borders] = 0. img3 += np.min(img3) + 1
def make_powder_rings( self, pd, merge_hkls=False, delta_tth=None, delta_eta=10., eta_period=None, rmat_s=ct.identity_3x3, tvec_s=ct.zeros_3, tvec_c=ct.zeros_3, full_output=False): """ """ # in case you want to give it tth angles directly if hasattr(pd, '__len__'): tth = np.array(pd).flatten() if delta_tth is None: raise RuntimeError( "If supplying a 2theta list as first arg, " + "must supply a delta_tth") sector_vertices = np.tile( 0.5*np.radians([-delta_tth, -delta_eta, -delta_tth, delta_eta, delta_tth, delta_eta, delta_tth, -delta_eta, 0.0, 0.0]), (len(tth), 1) ) else: # Okay, we have a PlaneData object pd = PlaneData.makeNew(pd) # make a copy to munge if delta_tth is not None: pd.tThWidth = np.radians(delta_tth) else: delta_tth = np.degrees(pd.tThWidth) # conversions, meh... del_eta = np.radians(delta_eta) # do merging if asked if merge_hkls: _, tth_ranges = pd.getMergedRanges() tth = np.array([0.5*sum(i) for i in tth_ranges]) else: tth_ranges = pd.getTThRanges() tth = pd.getTTh() tth_pm = tth_ranges - np.tile(tth, (2, 1)).T sector_vertices = np.vstack( [[i[0], -del_eta, i[0], del_eta, i[1], del_eta, i[1], -del_eta, 0.0, 0.0] for i in tth_pm]) # for generating rings if eta_period is None: eta_period = (-np.pi, np.pi) neta = int(360./float(delta_eta)) eta = mapAngle( np.radians(delta_eta*(np.linspace(0, neta - 1, num=neta) + 0.5)) + eta_period[0], eta_period ) angs = [np.vstack([i*np.ones(neta), eta, np.zeros(neta)]) for i in tth] # need xy coords and pixel sizes valid_ang = [] valid_xy = [] map_indices = [] npp = 5 # [ll, ul, ur, lr, center] for i_ring in range(len(angs)): # expand angles to patch vertices these_angs = angs[i_ring].T patch_vertices = ( np.tile(these_angs[:, :2], (1, npp)) + np.tile(sector_vertices[i_ring], (neta, 1)) ).reshape(npp*neta, 2) # duplicate ome array ome_dupl = np.tile( these_angs[:, 2], (npp, 1) ).T.reshape(npp*neta, 1) # find vertices that all fall on the panel gVec_ring_l = anglesToGVec( np.hstack([patch_vertices, ome_dupl]), bHat_l=self.bvec) all_xy = gvecToDetectorXY( gVec_ring_l, self.rmat, rmat_s, ct.identity_3x3, self.tvec, tvec_s, tvec_c, beamVec=self.bvec) _, on_panel = self.clip_to_panel(all_xy) # all vertices must be on... patch_is_on = np.all(on_panel.reshape(neta, npp), axis=1) patch_xys = all_xy.reshape(neta, 5, 2)[patch_is_on] idx = np.where(patch_is_on)[0] valid_ang.append(these_angs[patch_is_on, :2]) valid_xy.append(patch_xys[:, -1, :].squeeze()) map_indices.append(idx) pass # ??? is this option necessary? if full_output: return valid_ang, valid_xy, map_indices, eta else: return valid_ang, valid_xy
def pull_spots(self, plane_data, grain_params, imgser_dict, tth_tol=0.25, eta_tol=1., ome_tol=1., npdiv=2, threshold=10, eta_ranges=[ (-np.pi, np.pi), ], ome_period=(-np.pi, np.pi), dirname='results', filename=None, output_format='text', save_spot_list=False, quiet=True, check_only=False, interp='nearest'): """ Exctract reflection info from a rotation series encoded as an OmegaImageseries object """ # grain parameters rMat_c = makeRotMatOfExpMap(grain_params[:3]) tVec_c = grain_params[3:6] # grab omega ranges from first imageseries # # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = imgser_dict[imgser_dict.keys()[0]] ome_ranges = [ np.radians([i['ostart'], i['ostop']]) for i in oims0.omegawedges.wedges ] # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( delta_ome, ome_tol, 1, adjust_window=True, ) # generate structuring element for connected component labeling if ndiv_ome == 1: label_struct = ndimage.generate_binary_structure(2, 2) else: label_struct = ndimage.generate_binary_structure(3, 3) # simulate rotation series sim_results = self.simulate_rotation_series(plane_data, [ grain_params, ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period) # patch vertex generator (global for instrument) tol_vec = 0.5 * np.radians([ -tth_tol, -eta_tol, -tth_tol, eta_tol, tth_tol, eta_tol, tth_tol, -eta_tol ]) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = io.GrainDataWriter_h5(os.path.join(dirname, filename), self.write_config(), grain_params) # ===================================================================== # LOOP OVER PANELS # ===================================================================== iRefl = 0 compl = [] output = dict.fromkeys(self.detectors) for detector_id in self.detectors: # initialize text-based output writer if filename is not None and output_format.lower() == 'text': output_dir = os.path.join(dirname, detector_id) if not os.path.exists(output_dir): os.makedirs(output_dir) this_filename = os.path.join(output_dir, filename) writer = io.PatchDataWriter(this_filename) # grab panel panel = self.detectors[detector_id] instr_cfg = panel.config_dict(self.chi, self.tvec) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict ome_imgser = imgser_dict[detector_id] # extract simulation results sim_results_p = sim_results[detector_id] hkl_ids = sim_results_p[0][0] hkls_p = sim_results_p[1][0] ang_centers = sim_results_p[2][0] xy_centers = sim_results_p[3][0] ang_pixel_size = sim_results_p[4][0] # now verify that full patch falls on detector... # ???: strictly necessary? # # patch vertex array from sim nangs = len(ang_centers) patch_vertices = (np.tile(ang_centers[:, :2], (1, 4)) + np.tile(tol_vec, (nangs, 1))).reshape( 4 * nangs, 2) ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape(len(patch_vertices), 1) # find vertices that all fall on the panel det_xy, _ = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), panel.rmat, rMat_c, self.chi, panel.tvec, tVec_c, self.tvec, panel.distortion) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] # re-filter... hkl_ids = hkl_ids[patch_is_on] hkls_p = hkls_p[patch_is_on, :] ang_centers = ang_centers[patch_is_on, :] xy_centers = xy_centers[patch_is_on, :] ang_pixel_size = ang_pixel_size[patch_is_on, :] # TODO: add polygon testing right here! # done <JVB 06/21/16> if check_only: patch_output = [] for i_pt, angs in enumerate(ang_centers): # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(angs[2]) + ome_del # ...vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkls_p[i_pt, :]) print(msg) continue else: these_vertices = patch_xys[i_pt] ijs = panel.cartToPixel(these_vertices) ii, jj = polygon(ijs[:, 0], ijs[:, 1]) contains_signal = False for i_frame in frame_indices: contains_signal = contains_signal or np.any( ome_imgser[i_frame][ii, jj] > threshold) compl.append(contains_signal) patch_output.append((ii, jj, frame_indices)) else: # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, ang_centers[:, :2], ang_pixel_size, omega=ang_centers[:, 2], tth_tol=tth_tol, eta_tol=eta_tol, rMat_c=rMat_c, tVec_c=tVec_c, distortion=panel.distortion, npdiv=npdiv, quiet=True, beamVec=self.beam_vector) # GRAND LOOP over reflections for this panel patch_output = [] for i_pt, patch in enumerate(patches): # strip relevant objects out of current patch vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info hkl = hkls_p[i_pt, :] hkl_id = hkl_ids[i_pt] # edge arrays tth_edges = vtx_angs[0][0, :] delta_tth = tth_edges[1] - tth_edges[0] eta_edges = vtx_angs[1][:, 0] delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation xy_eval = np.vstack( [xy_eval[0].flatten(), xy_eval[1].flatten()]).T # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del # ???: vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkl) print(msg) continue else: # initialize spot data parameters # !!! maybe change these to nan to not f**k up writer peak_id = -999 sum_int = None max_int = None meas_angs = None meas_xy = None # quick check for intensity contains_signal = False patch_data_raw = [] for i_frame in frame_indices: tmp = ome_imgser[i_frame][ijs[0], ijs[1]] contains_signal = contains_signal or np.any( tmp > threshold) patch_data_raw.append(tmp) pass patch_data_raw = np.stack(patch_data_raw, axis=0) compl.append(contains_signal) if contains_signal: # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( (len(frame_indices), prows, pcols)) for i, i_frame in enumerate(frame_indices): patch_data[i] = \ panel.interpolate_bilinear( xy_eval, ome_imgser[i_frame], pad_with_nans=False ).reshape(prows, pcols) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: msg = "interpolation option " + \ "'%s' not understood" raise (RuntimeError, msg % interp) # now have interpolated patch data... labels, num_peaks = ndimage.label( patch_data > threshold, structure=label_struct) slabels = np.arange(1, num_peaks + 1) if num_peaks > 0: peak_id = iRefl coms = np.array( ndimage.center_of_mass(patch_data, labels=labels, index=slabels)) if num_peaks > 1: center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( np.sum(com_diff**2, axis=1)) else: closest_peak_idx = 0 pass # end multipeak conditional coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome meas_omes = \ ome_eval[0] + coms[0]*delta_ome meas_angs = np.hstack([ tth_edges[0] + (0.5 + coms[2]) * delta_tth, eta_edges[0] + (0.5 + coms[1]) * delta_eta, mapAngle(np.radians(meas_omes), ome_period) ]) # intensities # - summed is 'integrated' over interpolated # data # - max is max of raw input data sum_int = np.sum(patch_data[ labels == slabels[closest_peak_idx]]) max_int = np.max(patch_data_raw[ labels == slabels[closest_peak_idx]]) # ???: Should this only use labeled pixels? # Those are segmented from interpolated data, # not raw; likely ok in most cases. # need MEASURED xy coords gvec_c = anglesToGVec(meas_angs, chi=self.chi, rMat_c=rMat_c, bHat_l=self.beam_vector) rMat_s = makeOscillRotMat( [self.chi, meas_angs[2]]) meas_xy = gvecToDetectorXY( gvec_c, panel.rmat, rMat_s, rMat_c, panel.tvec, self.tvec, tVec_c, beamVec=self.beam_vector) if panel.distortion is not None: # FIXME: distortion handling meas_xy = panel.distortion[0]( np.atleast_2d(meas_xy), panel.distortion[1], invert=True).flatten() pass # FIXME: why is this suddenly necessary??? meas_xy = meas_xy.squeeze() pass # end num_peaks > 0 else: patch_data = patch_data_raw pass # end contains_signal # write output if filename is not None: if output_format.lower() == 'text': writer.dump_patch(peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, xy_centers[i_pt], meas_xy) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape(prows, pcols, 2).transpose( 2, 0, 1) writer.dump_patch( detector_id, iRefl, peak_id, hkl_id, hkl, tth_edges, eta_edges, np.radians(ome_eval), xyc_arr, ijs, frame_indices, patch_data, ang_centers[i_pt], xy_centers[i_pt], meas_angs, meas_xy) pass # end conditional on write output pass # end conditional on check only patch_output.append([ peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, meas_xy, ]) iRefl += 1 pass # end patch conditional pass # end patch loop output[detector_id] = patch_output if filename is not None and output_format.lower() == 'text': writer.close() pass # end detector loop if filename is not None and output_format.lower() == 'hdf5': writer.close() return compl, output
tth0 = np.degrees(tth_avg) eta0 = 0. tth_range = np.degrees(tth_hi - tth_lo) eta_range = 360. ntth = int(tth_range / tth_size) neta = int(eta_range / eta_size) tth_vec = tth_size * (np.arange(ntth) - 0.5 * ntth - 1) + tth0 eta_vec = eta_size * (np.arange(neta) - 0.5 * neta - 1) + eta0 angpts = np.meshgrid(eta_vec, tth_vec, indexing='ij') gpts = xfc.anglesToGVec(np.vstack([ np.radians(angpts[1].flatten()), np.radians(angpts[0].flatten()), np.zeros(neta * ntth) ]).T, bHat_l=d.bvec) xypts = xfc.gvecToDetectorXY(gpts, d.rmat, np.eye(3), np.eye(3), d.tvec, np.zeros(3), np.zeros(3), beamVec=d.bvec) img2 = d.interpolate_bilinear(xypts, average_frame).reshape(neta, ntth) img3 = copy.deepcopy(img2) borders = np.isnan(img2)