def simulate_diffractions(grain_params, experiment, controller): """actual forward simulation of the diffraction""" # use a packed array for the image_stack array_dims = (experiment.nframes, experiment.ncols, ((experiment.nrows - 1) // 8) + 1) image_stack = np.zeros(array_dims, dtype=np.uint8) count = len(grain_params) subprocess = 'simulate diffractions' _project = xrdutil._project_on_detector_plane rD = experiment.rMat_d chi = experiment.chi tD = experiment.tVec_d tS = experiment.tVec_s distortion = experiment.distortion eta_range = [ (-np.pi, np.pi), ] ome_range = experiment.ome_range ome_period = (-np.pi, np.pi) full_hkls = xrdutil._fetch_hkls_from_planedata(experiment.plane_data) bMat = experiment.plane_data.latVecOps['B'] wlen = experiment.plane_data.wavelength controller.start(subprocess, count) for i in range(count): rC = xfcapi.makeRotMatOfExpMap(grain_params[i][0:3]) tC = np.ascontiguousarray(grain_params[i][3:6]) vInv_s = np.ascontiguousarray(grain_params[i][6:12]) ang_list = np.vstack( xfcapi.oscillAnglesOfHKLs(full_hkls[:, 1:], chi, rC, bMat, wlen, vInv=vInv_s)) # hkls not needed here all_angs, _ = xrdutil._filter_hkls_eta_ome(full_hkls, ang_list, eta_range, ome_range) all_angs[:, 2] = xfcapi.mapAngle(all_angs[:, 2], ome_period) det_xy, _ = _project(all_angs, rD, rC, chi, tD, tC, tS, distortion) _write_pixels(det_xy, all_angs[:, 2], image_stack, experiment.base, experiment.inv_deltas, experiment.clip_vals) controller.update(i + 1) controller.finish(subprocess) return image_stack
def simulate_diffractions(grain_params, experiment, controller): """actual forward simulation of the diffraction""" # use a packed array for the image_stack array_dims = (experiment.nframes, experiment.ncols, ((experiment.nrows - 1)//8) + 1) image_stack = np.zeros(array_dims, dtype=np.uint8) count = len(grain_params) subprocess = 'simulate diffractions' _project = xrdutil._project_on_detector_plane rD = experiment.rMat_d chi = experiment.chi tD = experiment.tVec_d tS = experiment.tVec_s distortion = experiment.distortion eta_range = [(-np.pi, np.pi), ] ome_range = experiment.ome_range ome_period = (-np.pi, np.pi) full_hkls = xrdutil._fetch_hkls_from_planedata(experiment.plane_data) bMat = experiment.plane_data.latVecOps['B'] wlen = experiment.plane_data.wavelength controller.start(subprocess, count) for i in range(count): rC = xfcapi.makeRotMatOfExpMap(grain_params[i][0:3]) tC = np.ascontiguousarray(grain_params[i][3:6]) vInv_s = np.ascontiguousarray(grain_params[i][6:12]) ang_list = np.vstack(xfcapi.oscillAnglesOfHKLs(full_hkls[:, 1:], chi, rC, bMat, wlen, vInv=vInv_s)) # hkls not needed here all_angs, _ = xrdutil._filter_hkls_eta_ome(full_hkls, ang_list, eta_range, ome_range) all_angs[:, 2] = xfcapi.mapAngle(all_angs[:, 2], ome_period) det_xy, _ = _project(all_angs, rD, rC, chi, tD, tC, tS, distortion) _write_pixels(det_xy, all_angs[:, 2], image_stack, experiment.base, experiment.inv_deltas, experiment.clip_vals) controller.update(i+1) controller.finish(subprocess) return image_stack
def __init__(self, filename, instr_cfg, grain_params, use_attr=False): if isinstance(filename, h5py.File): self.fid = filename else: self.fid = h5py.File(filename + ".hdf5", "w") icfg = {} icfg.update(instr_cfg) # add instrument groups and attributes self.instr_grp = self.fid.create_group('instrument') unwrap_dict_to_h5(self.instr_grp, icfg, asattr=use_attr) # add grain group self.grain_grp = self.fid.create_group('grain') rmat_c = makeRotMatOfExpMap(grain_params[:3]) tvec_c = np.array(grain_params[3:6]).flatten() vinv_s = np.array(grain_params[6:]).flatten() vmat_s = np.linalg.inv(mutil.vecMVToSymm(vinv_s)) if use_attr: # attribute version self.grain_grp.attrs.create('rmat_c', rmat_c) self.grain_grp.attrs.create('tvec_c', tvec_c) self.grain_grp.attrs.create('inv(V)_s', vinv_s) self.grain_grp.attrs.create('vmat_s', vmat_s) else: # dataset version self.grain_grp.create_dataset('rmat_c', data=rmat_c) self.grain_grp.create_dataset('tvec_c', data=tvec_c) self.grain_grp.create_dataset('inv(V)_s', data=vinv_s) self.grain_grp.create_dataset('vmat_s', data=vmat_s) data_key = 'reflection_data' self.data_grp = self.fid.create_group(data_key) for det_key in self.instr_grp['detectors'].keys(): self.data_grp.create_group(det_key)
def sxcal_obj_func(plist_fit, plist_full, param_flags, dfuncs, dparam_flags, ndparams, instr, xyo_det, hkls_idx, bmat, vinv_s, ome_period, bvec, evec, sim_only=False, return_value_flag=None): """ """ # stack flags and force bool repr refine_flags = np.array(np.hstack([param_flags, dparam_flags]), dtype=bool) # fill out full parameter list # !!! no scaling for now plist_full[refine_flags] = plist_fit # instrument quantities wavelength = plist_full[0] chi = plist_full[1] tvec_s = plist_full[2:5] # calibration crystal quantities rmat_c = xfcapi.makeRotMatOfExpMap(plist_full[5:8]) tvec_c = plist_full[8:11] # right now just stuck on the end and assumed # to all be the same length... FIX THIS dparams_all = plist_full[-len(dparam_flags):] xy_unwarped = {} meas_omes = {} calc_omes = {} calc_xy = {} ii = 11 # offset to start of panels... jj = 0 npts_tot = 0 for det_key, panel in instr.detectors.iteritems(): xy_unwarped[det_key] = xyo_det[det_key][:, :2] npts_tot += len(xyo_det[det_key]) dfunc = dfuncs[det_key] len_these_dps = ndparams[det_key] if dfunc is not None: # do unwarping dparams = dparams_all[jj:jj + len_these_dps] jj += len_these_dps xy_unwarped[det_key] = dfunc(xy_unwarped[det_key], dparams) pass meas_omes[det_key] = xyo_det[det_key][:, 2] # get these panel params for convenience gparams = plist_full[ii:ii + 6] rmat_d = xfcapi.makeDetectorRotMat(gparams[:3]) tvec_d = gparams[3:].reshape(3, 1) # transform G-vectors: # 1) convert inv. stretch tensor from MV notation in to 3x3 # 2) take reciprocal lattice vectors from CRYSTAL to SAMPLE frame # 3) apply stretch tensor # 4) normalize reciprocal lattice vectors in SAMPLE frame # 5) transform unit reciprocal lattice vetors back to CRYSAL frame gvec_c = np.dot(bmat, hkls_idx[det_key].T) vmat_s = mutil.vecMVToSymm(vinv_s) ghat_s = mutil.unitVector(np.dot(vmat_s, np.dot(rmat_c, gvec_c))) ghat_c = np.dot(rmat_c.T, ghat_s) match_omes, calc_omes_tmp = fitting.matchOmegas(xyo_det[det_key], hkls_idx[det_key].T, chi, rmat_c, bmat, wavelength, vInv=vinv_s, beamVec=bvec, etaVec=evec, omePeriod=ome_period) rmat_s_arr = xfcapi.makeOscillRotMatArray( chi, np.ascontiguousarray(calc_omes_tmp)) calc_xy_tmp = xfcapi.gvecToDetectorXYArray(ghat_c.T, rmat_d, rmat_s_arr, rmat_c, tvec_d, tvec_s, tvec_c) if np.any(np.isnan(calc_xy_tmp)): print("infeasible parameters: " + "may want to scale back finite difference step size") calc_omes[det_key] = calc_omes_tmp calc_xy[det_key] = calc_xy_tmp ii += 6 pass # return values if sim_only: retval = {} for det_key in calc_xy.keys(): # ??? calc_xy is always 2-d retval[det_key] = np.vstack( [calc_xy[det_key].T, calc_omes[det_key]]).T else: meas_xy_all = [] calc_xy_all = [] meas_omes_all = [] calc_omes_all = [] for det_key in xy_unwarped.keys(): meas_xy_all.append(xy_unwarped[det_key]) calc_xy_all.append(calc_xy[det_key]) meas_omes_all.append(meas_omes[det_key]) calc_omes_all.append(calc_omes[det_key]) pass meas_xy_all = np.vstack(meas_xy_all) calc_xy_all = np.vstack(calc_xy_all) meas_omes_all = np.hstack(meas_omes_all) calc_omes_all = np.hstack(calc_omes_all) diff_vecs_xy = calc_xy_all - meas_xy_all diff_ome = xfcapi.angularDifference(calc_omes_all, meas_omes_all) retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts_tot, 1)]).flatten() if return_value_flag == 1: retval = sum(abs(retval)) elif return_value_flag == 2: denom = npts_tot - len(plist_fit) - 1. if denom != 0: nu_fac = 1. / denom else: nu_fac = 1. nu_fac = 1 / (npts_tot - len(plist_fit) - 1.) retval = nu_fac * sum(retval**2) return retval
vHat2 = xfcapi.unitRowVector(vec) print "unitVector results match: ",np.linalg.norm(vHat1.T-vHat2)/np.linalg.norm(vHat1) < epsf tAng = np.array([0.0011546340766314521,-0.0040527538387122993,-0.0026221336905160211]) rMat1 = xf.makeDetectorRotMat(tAng) rMat2 = xfcapi.makeDetectorRotMat(tAng) print "makeDetectorRotMat results match: ",np.linalg.norm(rMat1-rMat2)/np.linalg.norm(rMat1) < epsf oAng = np.array([-0.0011591608938627839,0.0011546340766314521]) rMat1 = xf.makeOscillRotMat(oAng) rMat2 = xfcapi.makeOscillRotMat(oAng) print "makeOscillRotMat results match: ",np.linalg.norm(rMat1-rMat2)/np.linalg.norm(rMat1) < epsf eMap = np.array([ 0.66931818,-0.98578066,0.73593251]) rMat1 = xf.makeRotMatOfExpMap(eMap) rMat2 = xfcapi.makeRotMatOfExpMap(eMap) print "makeRotMatOfExpMap results match: ",np.linalg.norm(rMat1-rMat2)/np.linalg.norm(rMat1) < epsf axis = np.array([ 0.66931818,-0.98578066,0.73593251]) rMat1 = xf.makeBinaryRotMat(axis) rMat2 = xfcapi.makeBinaryRotMat(axis) print "makeBinaryRotMat results match: ",np.linalg.norm(rMat1-rMat2)/np.linalg.norm(rMat1) < epsf bHat = np.array([0.0,0.0,-1.0]) eta = np.array([1.0,0.0,0.0]) rMat1 = xf.makeEtaFrameRotMat(bHat,eta) rMat2 = xfcapi.makeEtaFrameRotMat(bHat,eta) print "makeEtaFrameRotMat results match: ",np.linalg.norm(rMat1-rMat2)/np.linalg.norm(rMat1) < epsf angles = np.array([random.uniform(-np.pi,np.pi),random.uniform(-np.pi,np.pi),random.uniform(-np.pi,np.pi)]) aMin = np.array([random.uniform(-np.pi,np.pi),random.uniform(-np.pi,np.pi)])
def objFuncFitGrain(gFit, gFull, gFlag, detectorParams, xyo_det, hkls_idx, bMat, wavelength, bVec, eVec, dFunc, dParams, omePeriod, simOnly=False, returnScalarValue=returnScalarValue): """ gFull[0] = expMap_c[0] gFull[1] = expMap_c[1] gFull[2] = expMap_c[2] gFull[3] = tVec_c[0] gFull[4] = tVec_c[1] gFull[5] = tVec_c[2] gFull[6] = vInv_MV[0] gFull[7] = vInv_MV[1] gFull[8] = vInv_MV[2] gFull[9] = vInv_MV[3] gFull[10] = vInv_MV[4] gFull[11] = vInv_MV[5] detectorParams[0] = tiltAngles[0] detectorParams[1] = tiltAngles[1] detectorParams[2] = tiltAngles[2] detectorParams[3] = tVec_d[0] detectorParams[4] = tVec_d[1] detectorParams[5] = tVec_d[2] detectorParams[6] = chi detectorParams[7] = tVec_s[0] detectorParams[8] = tVec_s[1] detectorParams[9] = tVec_s[2] """ npts = len(xyo_det) gFull[gFlag] = gFit xy_unwarped = dFunc(xyo_det[:, :2], dParams) rMat_d = xfcapi.makeDetectorRotMat(detectorParams[:3]) tVec_d = detectorParams[3:6].reshape(3, 1) chi = detectorParams[6] tVec_s = detectorParams[7:10].reshape(3, 1) rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3]) tVec_c = gFull[3:6].reshape(3, 1) vInv_s = gFull[6:] vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R gVec_c = np.dot(bMat, hkls_idx) # gVecs with magnitudes in CRYSTAL frame gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # stretched gVecs in SAMPLE frame gHat_c = mutil.unitVector( np.dot(rMat_c.T, gVec_s)) # unit reciprocal lattice vectors in CRYSTAL frame match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, vInv=vInv_s, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod) calc_xy = np.zeros((npts, 2)) for i in range(npts): rMat_s = xfcapi.makeOscillRotMat([chi, calc_omes[i]]) calc_xy[i, :] = xfcapi.gvecToDetectorXY(gHat_c[:, i], rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec).flatten() pass if np.any(np.isnan(calc_xy)): print "infeasible pFull" # return values if simOnly: retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)]) else: diff_vecs_xy = calc_xy - xy_unwarped[:, :2] diff_ome = xf.angularDifference( calc_omes, xyo_det[:, 2] ) retval = mutil.rowNorm( np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1) ]) ).flatten() if returnScalarValue: retval = sum( retval ) return retval
def pull_spots(self, plane_data, grain_params, imgser_dict, tth_tol=0.25, eta_tol=1., ome_tol=1., npdiv=2, threshold=10, eta_ranges=[ (-np.pi, np.pi), ], ome_period=(-np.pi, np.pi), dirname='results', filename=None, output_format='text', save_spot_list=False, quiet=True, check_only=False, interp='nearest'): """ Exctract reflection info from a rotation series encoded as an OmegaImageseries object """ # grain parameters rMat_c = makeRotMatOfExpMap(grain_params[:3]) tVec_c = grain_params[3:6] # grab omega ranges from first imageseries # # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = imgser_dict[imgser_dict.keys()[0]] ome_ranges = [ np.radians([i['ostart'], i['ostop']]) for i in oims0.omegawedges.wedges ] # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( delta_ome, ome_tol, 1, adjust_window=True, ) # generate structuring element for connected component labeling if ndiv_ome == 1: label_struct = ndimage.generate_binary_structure(2, 2) else: label_struct = ndimage.generate_binary_structure(3, 3) # simulate rotation series sim_results = self.simulate_rotation_series(plane_data, [ grain_params, ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period) # patch vertex generator (global for instrument) tol_vec = 0.5 * np.radians([ -tth_tol, -eta_tol, -tth_tol, eta_tol, tth_tol, eta_tol, tth_tol, -eta_tol ]) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = io.GrainDataWriter_h5(os.path.join(dirname, filename), self.write_config(), grain_params) # ===================================================================== # LOOP OVER PANELS # ===================================================================== iRefl = 0 compl = [] output = dict.fromkeys(self.detectors) for detector_id in self.detectors: # initialize text-based output writer if filename is not None and output_format.lower() == 'text': output_dir = os.path.join(dirname, detector_id) if not os.path.exists(output_dir): os.makedirs(output_dir) this_filename = os.path.join(output_dir, filename) writer = io.PatchDataWriter(this_filename) # grab panel panel = self.detectors[detector_id] instr_cfg = panel.config_dict(self.chi, self.tvec) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict ome_imgser = imgser_dict[detector_id] # extract simulation results sim_results_p = sim_results[detector_id] hkl_ids = sim_results_p[0][0] hkls_p = sim_results_p[1][0] ang_centers = sim_results_p[2][0] xy_centers = sim_results_p[3][0] ang_pixel_size = sim_results_p[4][0] # now verify that full patch falls on detector... # ???: strictly necessary? # # patch vertex array from sim nangs = len(ang_centers) patch_vertices = (np.tile(ang_centers[:, :2], (1, 4)) + np.tile(tol_vec, (nangs, 1))).reshape( 4 * nangs, 2) ome_dupl = np.tile(ang_centers[:, 2], (4, 1)).T.reshape(len(patch_vertices), 1) # find vertices that all fall on the panel det_xy, _ = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), panel.rmat, rMat_c, self.chi, panel.tvec, tVec_c, self.tvec, panel.distortion) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] # re-filter... hkl_ids = hkl_ids[patch_is_on] hkls_p = hkls_p[patch_is_on, :] ang_centers = ang_centers[patch_is_on, :] xy_centers = xy_centers[patch_is_on, :] ang_pixel_size = ang_pixel_size[patch_is_on, :] # TODO: add polygon testing right here! # done <JVB 06/21/16> if check_only: patch_output = [] for i_pt, angs in enumerate(ang_centers): # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(angs[2]) + ome_del # ...vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkls_p[i_pt, :]) print(msg) continue else: these_vertices = patch_xys[i_pt] ijs = panel.cartToPixel(these_vertices) ii, jj = polygon(ijs[:, 0], ijs[:, 1]) contains_signal = False for i_frame in frame_indices: contains_signal = contains_signal or np.any( ome_imgser[i_frame][ii, jj] > threshold) compl.append(contains_signal) patch_output.append((ii, jj, frame_indices)) else: # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, ang_centers[:, :2], ang_pixel_size, omega=ang_centers[:, 2], tth_tol=tth_tol, eta_tol=eta_tol, rMat_c=rMat_c, tVec_c=tVec_c, distortion=panel.distortion, npdiv=npdiv, quiet=True, beamVec=self.beam_vector) # GRAND LOOP over reflections for this panel patch_output = [] for i_pt, patch in enumerate(patches): # strip relevant objects out of current patch vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape nrm_fac = areas / float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info hkl = hkls_p[i_pt, :] hkl_id = hkl_ids[i_pt] # edge arrays tth_edges = vtx_angs[0][0, :] delta_tth = tth_edges[1] - tth_edges[0] eta_edges = vtx_angs[1][:, 0] delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation xy_eval = np.vstack( [xy_eval[0].flatten(), xy_eval[1].flatten()]).T # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del # ???: vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkl) print(msg) continue else: # initialize spot data parameters # !!! maybe change these to nan to not f**k up writer peak_id = -999 sum_int = None max_int = None meas_angs = None meas_xy = None # quick check for intensity contains_signal = False patch_data_raw = [] for i_frame in frame_indices: tmp = ome_imgser[i_frame][ijs[0], ijs[1]] contains_signal = contains_signal or np.any( tmp > threshold) patch_data_raw.append(tmp) pass patch_data_raw = np.stack(patch_data_raw, axis=0) compl.append(contains_signal) if contains_signal: # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( (len(frame_indices), prows, pcols)) for i, i_frame in enumerate(frame_indices): patch_data[i] = \ panel.interpolate_bilinear( xy_eval, ome_imgser[i_frame], pad_with_nans=False ).reshape(prows, pcols) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: msg = "interpolation option " + \ "'%s' not understood" raise (RuntimeError, msg % interp) # now have interpolated patch data... labels, num_peaks = ndimage.label( patch_data > threshold, structure=label_struct) slabels = np.arange(1, num_peaks + 1) if num_peaks > 0: peak_id = iRefl coms = np.array( ndimage.center_of_mass(patch_data, labels=labels, index=slabels)) if num_peaks > 1: center = np.r_[patch_data.shape] * 0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( np.sum(com_diff**2, axis=1)) else: closest_peak_idx = 0 pass # end multipeak conditional coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome meas_omes = \ ome_eval[0] + coms[0]*delta_ome meas_angs = np.hstack([ tth_edges[0] + (0.5 + coms[2]) * delta_tth, eta_edges[0] + (0.5 + coms[1]) * delta_eta, mapAngle(np.radians(meas_omes), ome_period) ]) # intensities # - summed is 'integrated' over interpolated # data # - max is max of raw input data sum_int = np.sum(patch_data[ labels == slabels[closest_peak_idx]]) max_int = np.max(patch_data_raw[ labels == slabels[closest_peak_idx]]) # ???: Should this only use labeled pixels? # Those are segmented from interpolated data, # not raw; likely ok in most cases. # need MEASURED xy coords gvec_c = anglesToGVec(meas_angs, chi=self.chi, rMat_c=rMat_c, bHat_l=self.beam_vector) rMat_s = makeOscillRotMat( [self.chi, meas_angs[2]]) meas_xy = gvecToDetectorXY( gvec_c, panel.rmat, rMat_s, rMat_c, panel.tvec, self.tvec, tVec_c, beamVec=self.beam_vector) if panel.distortion is not None: # FIXME: distortion handling meas_xy = panel.distortion[0]( np.atleast_2d(meas_xy), panel.distortion[1], invert=True).flatten() pass # FIXME: why is this suddenly necessary??? meas_xy = meas_xy.squeeze() pass # end num_peaks > 0 else: patch_data = patch_data_raw pass # end contains_signal # write output if filename is not None: if output_format.lower() == 'text': writer.dump_patch(peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, xy_centers[i_pt], meas_xy) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape(prows, pcols, 2).transpose( 2, 0, 1) writer.dump_patch( detector_id, iRefl, peak_id, hkl_id, hkl, tth_edges, eta_edges, np.radians(ome_eval), xyc_arr, ijs, frame_indices, patch_data, ang_centers[i_pt], xy_centers[i_pt], meas_angs, meas_xy) pass # end conditional on write output pass # end conditional on check only patch_output.append([ peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, meas_xy, ]) iRefl += 1 pass # end patch conditional pass # end patch loop output[detector_id] = patch_output if filename is not None and output_format.lower() == 'text': writer.close() pass # end detector loop if filename is not None and output_format.lower() == 'hdf5': writer.close() return compl, output
def simulate_rotation_series(self, plane_data, grain_param_list, eta_ranges=[ (-np.pi, np.pi), ], ome_ranges=[ (-np.pi, np.pi), ], ome_period=(-np.pi, np.pi), chi=0., tVec_s=ct.zeros_3, wavelength=None): """ """ # grab B-matrix from plane data bMat = plane_data.latVecOps['B'] # reconcile wavelength # * added sanity check on exclusions here; possible to # * make some reflections invalid (NaN) if wavelength is None: wavelength = plane_data.wavelength else: if plane_data.wavelength != wavelength: plane_data.wavelength = ct.keVToAngstrom(wavelength) assert not np.any(np.isnan(plane_data.getTTh())),\ "plane data exclusions incompatible with wavelength" # vstacked G-vector id, h, k, l full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) """ LOOP OVER GRAINS """ valid_ids = [] valid_hkls = [] valid_angs = [] valid_xys = [] ang_pixel_size = [] for gparm in grain_param_list: # make useful parameters rMat_c = makeRotMatOfExpMap(gparm[:3]) tVec_c = gparm[3:6] vInv_s = gparm[6:] # All possible bragg conditions as vstacked [tth, eta, ome] # for each omega solution angList = np.vstack( oscillAnglesOfHKLs( full_hkls[:, 1:], chi, rMat_c, bMat, wavelength, vInv=vInv_s, )) # filter by eta and omega ranges # ??? get eta range from detector? allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( full_hkls, angList, eta_ranges, ome_ranges) allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) # find points that fall on the panel det_xy, rMat_s = xrdutil._project_on_detector_plane( allAngs, self.rmat, rMat_c, chi, self.tvec, tVec_c, tVec_s, self.distortion) xys_p, on_panel = self.clip_to_panel(det_xy) valid_xys.append(xys_p) # grab hkls and gvec ids for this panel valid_hkls.append(allHKLs[on_panel, 1:]) valid_ids.append(allHKLs[on_panel, 0]) # reflection angles (voxel centers) and pixel size in (tth, eta) valid_angs.append(allAngs[on_panel, :]) ang_pixel_size.append(self.angularPixelSize(xys_p)) return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size
def objFuncFitGrain(gFit, gFull, gFlag, instrument, reflections_dict, bMat, wavelength, omePeriod, simOnly=False, return_value_flag=return_value_flag): """ gFull[0] = expMap_c[0] gFull[1] = expMap_c[1] gFull[2] = expMap_c[2] gFull[3] = tVec_c[0] gFull[4] = tVec_c[1] gFull[5] = tVec_c[2] gFull[6] = vInv_MV[0] gFull[7] = vInv_MV[1] gFull[8] = vInv_MV[2] gFull[9] = vInv_MV[3] gFull[10] = vInv_MV[4] gFull[11] = vInv_MV[5] OLD CALL objFuncFitGrain(gFit, gFull, gFlag, detectorParams, xyo_det, hkls_idx, bMat, wavelength, bVec, eVec, dFunc, dParams, omePeriod, simOnly=False, return_value_flag=return_value_flag) """ bVec = instrument.beam_vector eVec = instrument.eta_vector # fill out parameters gFull[gFlag] = gFit # map parameters to functional arrays rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3]) tVec_c = gFull[3:6].reshape(3, 1) vInv_s = gFull[6:] vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R # loop over instrument panels # CAVEAT: keeping track of key ordering in the "detectors" attribute of # instrument here because I am not sure if instatiating them using # dict.fromkeys() preserves the same order if using iteration... # <JVB 2017-10-31> calc_omes_dict = dict.fromkeys(instrument.detectors, []) calc_xy_dict = dict.fromkeys(instrument.detectors) meas_xyo_all = [] det_keys_ordered = [] for det_key, panel in instrument.detectors.iteritems(): det_keys_ordered.append(det_key) rMat_d, tVec_d, chi, tVec_s = extract_detector_transformation( instrument.detector_parameters[det_key]) results = reflections_dict[det_key] if len(results) == 0: continue """ extract data from results list fields: refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy or array from spots tables: 0:5 ID PID H K L 5:7 sum(int) max(int) 7:10 pred tth pred eta pred ome 10:13 meas tth meas eta meas ome 13:15 pred X pred Y 15:17 meas X meas Y """ if isinstance(results, list): # WARNING: hkls and derived vectors below must be columnwise; # strictly necessary??? change affected APIs instead? # <JVB 2017-03-26> hkls = np.atleast_2d( np.vstack([x[2] for x in results]) ).T meas_xyo = np.atleast_2d( np.vstack([np.r_[x[7], x[6][-1]] for x in results]) ) elif isinstance(results, np.ndarray): hkls = np.atleast_2d(results[:, 2:5]).T meas_xyo = np.atleast_2d(results[:, [15, 16, 12]]) # FIXME: distortion handling must change to class-based if panel.distortion is not None: meas_omes = meas_xyo[:, 2] xy_unwarped = panel.distortion[0]( meas_xyo[:, :2], panel.distortion[1]) meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T pass # append to meas_omes meas_xyo_all.append(meas_xyo) # G-vectors: # 1. calculate full g-vector components in CRYSTAL frame from B # 2. rotate into SAMPLE frame and apply stretch # 3. rotate back into CRYSTAL frame and normalize to unit magnitude # IDEA: make a function for this sequence of operations with option for # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB) gVec_c = np.dot(bMat, hkls) gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s)) # !!!: check that this operates on UNWARPED xy match_omes, calc_omes = matchOmegas( meas_xyo, hkls, chi, rMat_c, bMat, wavelength, vInv=vInv_s, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod) # append to omes dict calc_omes_dict[det_key] = calc_omes # TODO: try Numba implementations rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes) calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec) # append to xy dict calc_xy_dict[det_key] = calc_xy pass # stack results to concatenated arrays calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered]) tmp = [] for k in det_keys_ordered: if calc_xy_dict[k] is not None: tmp.append(calc_xy_dict[k]) calc_xy_all = np.vstack(tmp) meas_xyo_all = np.vstack(meas_xyo_all) npts = len(meas_xyo_all) if np.any(np.isnan(calc_xy)): raise RuntimeError( "infeasible pFull: may want to scale" + "back finite difference step size") # return values if simOnly: # return simulated values if return_value_flag in [None, 1]: retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)]) else: rd = dict.fromkeys(det_keys_ordered) for det_key in det_keys_ordered: rd[det_key] = {'calc_xy': calc_xy_dict[det_key], 'calc_omes': calc_omes_dict[det_key]} retval = rd else: # return residual vector # IDEA: try angles instead of xys? diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2] diff_ome = xf.angularDifference(calc_omes_all, meas_xyo_all[:, 2]) retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1) ]).flatten() if return_value_flag == 1: # return scalar sum of squared residuals retval = sum(abs(retval)) elif return_value_flag == 2: # return DOF-normalized chisq # TODO: check this calculation denom = 3*npts - len(gFit) - 1. if denom != 0: nu_fac = 1. / denom else: nu_fac = 1. retval = nu_fac * sum(retval**2) return retval
def objFuncFitGrain(gFit, gFull, gFlag, instrument, reflections_dict, bMat, wavelength, omePeriod, simOnly=False, return_value_flag=return_value_flag): """ gFull[0] = expMap_c[0] gFull[1] = expMap_c[1] gFull[2] = expMap_c[2] gFull[3] = tVec_c[0] gFull[4] = tVec_c[1] gFull[5] = tVec_c[2] gFull[6] = vInv_MV[0] gFull[7] = vInv_MV[1] gFull[8] = vInv_MV[2] gFull[9] = vInv_MV[3] gFull[10] = vInv_MV[4] gFull[11] = vInv_MV[5] OLD CALL objFuncFitGrain(gFit, gFull, gFlag, detectorParams, xyo_det, hkls_idx, bMat, wavelength, bVec, eVec, dFunc, dParams, omePeriod, simOnly=False, return_value_flag=return_value_flag) """ bVec = instrument.beam_vector eVec = instrument.eta_vector # fill out parameters gFull[gFlag] = gFit # map parameters to functional arrays rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3]) tVec_c = gFull[3:6].reshape(3, 1) vInv_s = gFull[6:] vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R # loop over instrument panels # CAVEAT: keeping track of key ordering in the "detectors" attribute of # instrument here because I am not sure if instatiating them using # dict.fromkeys() preserves the same order if using iteration... # <JVB 2017-10-31> calc_omes_dict = dict.fromkeys(instrument.detectors, []) calc_xy_dict = dict.fromkeys(instrument.detectors) meas_xyo_all = [] det_keys_ordered = [] for det_key, panel in instrument.detectors.iteritems(): det_keys_ordered.append(det_key) # extract transformation quantities rMat_d = instrument.detectors[det_key].rmat tVec_d = instrument.detectors[det_key].tvec chi = instrument.chi tVec_s = instrument.tvec results = reflections_dict[det_key] if len(results) == 0: continue """ extract data from results list fields: refl_id, gvec_id, hkl, sum_int, max_int, pred_ang, meas_ang, meas_xy """ # WARNING: hkls and derived vectors below must be columnwise; # strictly necessary??? change affected APIs instead? # <JVB 2017-03-26> hkls = np.atleast_2d(np.vstack([x[2] for x in results])).T meas_xyo = np.atleast_2d( np.vstack([np.r_[x[7], x[6][-1]] for x in results])) # FIXME: distortion handling must change to class-based if panel.distortion is not None: meas_omes = meas_xyo[:, 2] xy_unwarped = panel.distortion[0](meas_xyo[:, :2], panel.distortion[1]) meas_xyo = np.vstack([xy_unwarped.T, meas_omes]).T pass # append to meas_omes meas_xyo_all.append(meas_xyo) # G-vectors: # 1. calculate full g-vector components in CRYSTAL frame from B # 2. rotate into SAMPLE frame and apply stretch # 3. rotate back into CRYSTAL frame and normalize to unit magnitude # IDEA: make a function for this sequence of operations with option for # choosing ouput frame (i.e. CRYSTAL vs SAMPLE vs LAB) gVec_c = np.dot(bMat, hkls) gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) gHat_c = mutil.unitVector(np.dot(rMat_c.T, gVec_s)) # !!!: check that this operates on UNWARPED xy match_omes, calc_omes = matchOmegas(meas_xyo, hkls, chi, rMat_c, bMat, wavelength, vInv=vInv_s, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod) # append to omes dict calc_omes_dict[det_key] = calc_omes # TODO: try Numba implementations rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes) calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec) # append to xy dict calc_xy_dict[det_key] = calc_xy pass # stack results to concatenated arrays calc_omes_all = np.hstack([calc_omes_dict[k] for k in det_keys_ordered]) tmp = [] for k in det_keys_ordered: if calc_xy_dict[k] is not None: tmp.append(calc_xy_dict[k]) calc_xy_all = np.vstack(tmp) meas_xyo_all = np.vstack(meas_xyo_all) npts = len(meas_xyo_all) if np.any(np.isnan(calc_xy)): raise RuntimeError("infeasible pFull: may want to scale" + "back finite difference step size") # return values if simOnly: # return simulated values if return_value_flag in [None, 1]: retval = np.hstack([calc_xy_all, calc_omes_all.reshape(npts, 1)]) else: rd = dict.fromkeys(det_keys_ordered) for det_key in det_keys_ordered: rd[det_key] = { 'calc_xy': calc_xy_dict[det_key], 'calc_omes': calc_omes_dict[det_key] } retval = rd else: # return residual vector # IDEA: try angles instead of xys? diff_vecs_xy = calc_xy_all - meas_xyo_all[:, :2] diff_ome = xf.angularDifference(calc_omes_all, meas_xyo_all[:, 2]) retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten() if return_value_flag == 1: # return scalar sum of squared residuals retval = sum(abs(retval)) elif return_value_flag == 2: # return DOF-normalized chisq # TODO: check this calculation denom = 3 * npts - len(gFit) - 1. if denom != 0: nu_fac = 1. / denom else: nu_fac = 1. retval = nu_fac * sum(retval**2) return retval
def simulate_laue_pattern(self, crystal_data, minEnergy=5., maxEnergy=35., rmat_s=None, tvec_s=None, grain_params=None, beam_vec=None): """ """ if isinstance(crystal_data, PlaneData): plane_data = crystal_data # grab the expanded list of hkls from plane_data hkls = np.hstack(plane_data.getSymHKLs()) # and the unit plane normals (G-vectors) in CRYSTAL FRAME gvec_c = np.dot(plane_data.latVecOps['B'], hkls) elif len(crystal_data) == 2: # !!! should clean this up hkls = np.array(crystal_data[0]) bmat = crystal_data[1] gvec_c = np.dot(bmat, hkls) else: raise(RuntimeError, 'argument list not understood') nhkls_tot = hkls.shape[1] # parse energy ranges # TODO: allow for spectrum parsing multipleEnergyRanges = False if hasattr(maxEnergy, '__len__'): assert len(maxEnergy) == len(minEnergy), \ 'energy cutoff ranges must have the same length' multipleEnergyRanges = True lmin = [] lmax = [] for i in range(len(maxEnergy)): lmin.append(ct.keVToAngstrom(maxEnergy[i])) lmax.append(ct.keVToAngstrom(minEnergy[i])) else: lmin = ct.keVToAngstrom(maxEnergy) lmax = ct.keVToAngstrom(minEnergy) # parse grain parameters kwarg if grain_params is None: grain_params = np.atleast_2d( np.hstack([np.zeros(6), ct.identity_6x1]) ) n_grains = len(grain_params) # sample rotation if rmat_s is None: rmat_s = ct.identity_3x3 # dummy translation vector... make input if tvec_s is None: tvec_s = ct.zeros_3 # beam vector if beam_vec is None: beam_vec = ct.beam_vec # ========================================================================= # LOOP OVER GRAINS # ========================================================================= # pre-allocate output arrays xy_det = np.nan*np.ones((n_grains, nhkls_tot, 2)) hkls_in = np.nan*np.ones((n_grains, 3, nhkls_tot)) angles = np.nan*np.ones((n_grains, nhkls_tot, 2)) dspacing = np.nan*np.ones((n_grains, nhkls_tot)) energy = np.nan*np.ones((n_grains, nhkls_tot)) for iG, gp in enumerate(grain_params): rmat_c = makeRotMatOfExpMap(gp[:3]) tvec_c = gp[3:6].reshape(3, 1) vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) # stretch them: V^(-1) * R * Gc gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) # project dpts = gvecToDetectorXY(ghat_c_str.T, self.rmat, rmat_s, rmat_c, self.tvec, tvec_s, tvec_c, beamVec=beam_vec) # check intersections with detector plane canIntersect = ~np.isnan(dpts[:, 0]) npts_in = sum(canIntersect) if np.any(canIntersect): dpts = dpts[canIntersect, :].reshape(npts_in, 2) dhkl = hkls[:, canIntersect].reshape(3, npts_in) # back to angles tth_eta, gvec_l = detectorXYToGvec( dpts, self.rmat, rmat_s, self.tvec, tvec_s, tvec_c, beamVec=beam_vec) tth_eta = np.vstack(tth_eta).T # warp measured points if self.distortion is not None: if len(self.distortion) == 2: dpts = self.distortion[0]( dpts, self.distortion[1], invert=True) else: raise(RuntimeError, "something is wrong with the distortion") # plane spacings and energies dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T) wlen = 2*dsp*np.sin(0.5*tth_eta[:, 0]) # clip to detector panel _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) if multipleEnergyRanges: validEnergy = np.zeros(len(wlen), dtype=bool) for i in range(len(lmin)): in_energy_range = np.logical_and( wlen >= lmin[i], wlen <= lmax[i]) validEnergy = validEnergy | in_energy_range pass else: validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) pass # index for valid reflections keepers = np.where(np.logical_and(on_panel, validEnergy))[0] # assign output arrays xy_det[iG][keepers, :] = dpts[keepers, :] hkls_in[iG][:, keepers] = dhkl[:, keepers] angles[iG][keepers, :] = tth_eta[keepers, :] dspacing[iG, keepers] = dsp[keepers] energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) pass # close conditional on valids pass # close loop on grains return xy_det, hkls_in, angles, dspacing, energy
def simulate_rotation_series(self, plane_data, grain_param_list, eta_ranges=[(-np.pi, np.pi), ], ome_ranges=[(-np.pi, np.pi), ], ome_period=(-np.pi, np.pi), chi=0., tVec_s=ct.zeros_3, wavelength=None): """ """ # grab B-matrix from plane data bMat = plane_data.latVecOps['B'] # reconcile wavelength # * added sanity check on exclusions here; possible to # * make some reflections invalid (NaN) if wavelength is None: wavelength = plane_data.wavelength else: if plane_data.wavelength != wavelength: plane_data.wavelength = ct.keVToAngstrom(wavelength) assert not np.any(np.isnan(plane_data.getTTh())),\ "plane data exclusions incompatible with wavelength" # vstacked G-vector id, h, k, l full_hkls = xrdutil._fetch_hkls_from_planedata(plane_data) """ LOOP OVER GRAINS """ valid_ids = [] valid_hkls = [] valid_angs = [] valid_xys = [] ang_pixel_size = [] for gparm in grain_param_list: # make useful parameters rMat_c = makeRotMatOfExpMap(gparm[:3]) tVec_c = gparm[3:6] vInv_s = gparm[6:] # All possible bragg conditions as vstacked [tth, eta, ome] # for each omega solution angList = np.vstack( oscillAnglesOfHKLs( full_hkls[:, 1:], chi, rMat_c, bMat, wavelength, vInv=vInv_s, ) ) # filter by eta and omega ranges # ??? get eta range from detector? allAngs, allHKLs = xrdutil._filter_hkls_eta_ome( full_hkls, angList, eta_ranges, ome_ranges ) allAngs[:, 2] = mapAngle(allAngs[:, 2], ome_period) # find points that fall on the panel det_xy, rMat_s = xrdutil._project_on_detector_plane( allAngs, self.rmat, rMat_c, chi, self.tvec, tVec_c, tVec_s, self.distortion) xys_p, on_panel = self.clip_to_panel(det_xy) valid_xys.append(xys_p) # grab hkls and gvec ids for this panel valid_hkls.append(allHKLs[on_panel, 1:]) valid_ids.append(allHKLs[on_panel, 0]) # reflection angles (voxel centers) and pixel size in (tth, eta) valid_angs.append(allAngs[on_panel, :]) ang_pixel_size.append(self.angularPixelSize(xys_p)) return valid_ids, valid_hkls, valid_angs, valid_xys, ang_pixel_size
def rmat(self): return makeRotMatOfExpMap(self.tilt)
def objFuncFitGrain(gFit, gFull, gFlag, detectorParams, xyo_det, hkls_idx, bMat, wavelength, bVec, eVec, dFunc, dParams, omePeriod, simOnly=False, return_value_flag=return_value_flag): """ gFull[0] = expMap_c[0] gFull[1] = expMap_c[1] gFull[2] = expMap_c[2] gFull[3] = tVec_c[0] gFull[4] = tVec_c[1] gFull[5] = tVec_c[2] gFull[6] = vInv_MV[0] gFull[7] = vInv_MV[1] gFull[8] = vInv_MV[2] gFull[9] = vInv_MV[3] gFull[10] = vInv_MV[4] gFull[11] = vInv_MV[5] detectorParams[0] = tiltAngles[0] detectorParams[1] = tiltAngles[1] detectorParams[2] = tiltAngles[2] detectorParams[3] = tVec_d[0] detectorParams[4] = tVec_d[1] detectorParams[5] = tVec_d[2] detectorParams[6] = chi detectorParams[7] = tVec_s[0] detectorParams[8] = tVec_s[1] detectorParams[9] = tVec_s[2] """ npts = len(xyo_det) gFull[gFlag] = gFit xy_unwarped = dFunc(xyo_det[:, :2], dParams) rMat_d = xfcapi.makeDetectorRotMat(detectorParams[:3]) tVec_d = detectorParams[3:6].reshape(3, 1) chi = detectorParams[6] tVec_s = detectorParams[7:10].reshape(3, 1) rMat_c = xfcapi.makeRotMatOfExpMap(gFull[:3]) tVec_c = gFull[3:6].reshape(3, 1) vInv_s = gFull[6:] vMat_s = mutil.vecMVToSymm(vInv_s) # NOTE: Inverse of V from F = V * R gVec_c = np.dot(bMat, hkls_idx) # gVecs with magnitudes in CRYSTAL frame gVec_s = np.dot(vMat_s, np.dot(rMat_c, gVec_c)) # stretched gVecs in SAMPLE frame gHat_c = mutil.unitVector(np.dot( rMat_c.T, gVec_s)) # unit reciprocal lattice vectors in CRYSTAL frame match_omes, calc_omes = matchOmegas(xyo_det, hkls_idx, chi, rMat_c, bMat, wavelength, vInv=vInv_s, beamVec=bVec, etaVec=eVec, omePeriod=omePeriod) rMat_s = xfcapi.makeOscillRotMatArray(chi, calc_omes) calc_xy = xfcapi.gvecToDetectorXYArray(gHat_c.T, rMat_d, rMat_s, rMat_c, tVec_d, tVec_s, tVec_c, beamVec=bVec) if np.any(np.isnan(calc_xy)): print "infeasible pFull" # return values if simOnly: retval = np.hstack([calc_xy, calc_omes.reshape(npts, 1)]) else: diff_vecs_xy = calc_xy - xy_unwarped[:, :2] diff_ome = xf.angularDifference(calc_omes, xyo_det[:, 2]) retval = np.hstack([diff_vecs_xy, diff_ome.reshape(npts, 1)]).flatten() if return_value_flag == 1: retval = sum(abs(retval)) elif return_value_flag == 2: denom = npts - len(gFit) - 1. if denom != 0: nu_fac = 1. / denom else: nu_fac = 1. retval = nu_fac * sum(retval**2 / abs( np.hstack([calc_xy, calc_omes.reshape(npts, 1)]).flatten())) return retval
def pull_spots(self, plane_data, grain_params, imgser_dict, tth_tol=0.25, eta_tol=1., ome_tol=1., npdiv=2, threshold=10, eta_ranges=[(-np.pi, np.pi), ], ome_period=(-np.pi, np.pi), dirname='results', filename=None, output_format='text', save_spot_list=False, quiet=True, check_only=False, interp='nearest'): """ Exctract reflection info from a rotation series encoded as an OmegaImageseries object """ # grain parameters rMat_c = makeRotMatOfExpMap(grain_params[:3]) tVec_c = grain_params[3:6] # grab omega ranges from first imageseries # # WARNING: all imageseries AND all wedges within are assumed to have # the same omega values; put in a check that they are all the same??? oims0 = imgser_dict[imgser_dict.keys()[0]] ome_ranges = [np.radians([i['ostart'], i['ostop']]) for i in oims0.omegawedges.wedges] # delta omega in DEGREES grabbed from first imageseries in the dict delta_ome = oims0.omega[0, 1] - oims0.omega[0, 0] # make omega grid for frame expansion around reference frame # in DEGREES ndiv_ome, ome_del = make_tolerance_grid( delta_ome, ome_tol, 1, adjust_window=True, ) # generate structuring element for connected component labeling if ndiv_ome == 1: label_struct = ndimage.generate_binary_structure(2, 2) else: label_struct = ndimage.generate_binary_structure(3, 3) # simulate rotation series sim_results = self.simulate_rotation_series( plane_data, [grain_params, ], eta_ranges=eta_ranges, ome_ranges=ome_ranges, ome_period=ome_period) # patch vertex generator (global for instrument) tol_vec = 0.5*np.radians( [-tth_tol, -eta_tol, -tth_tol, eta_tol, tth_tol, eta_tol, tth_tol, -eta_tol]) # prepare output if requested if filename is not None and output_format.lower() == 'hdf5': this_filename = os.path.join(dirname, filename) writer = io.GrainDataWriter_h5( os.path.join(dirname, filename), self.write_config(), grain_params) # ===================================================================== # LOOP OVER PANELS # ===================================================================== iRefl = 0 compl = [] output = dict.fromkeys(self.detectors) for detector_id in self.detectors: # initialize text-based output writer if filename is not None and output_format.lower() == 'text': output_dir = os.path.join( dirname, detector_id ) if not os.path.exists(output_dir): os.makedirs(output_dir) this_filename = os.path.join( output_dir, filename ) writer = io.PatchDataWriter(this_filename) # grab panel panel = self.detectors[detector_id] instr_cfg = panel.config_dict(self.chi, self.tvec) native_area = panel.pixel_area # pixel ref area # pull out the OmegaImageSeries for this panel from input dict ome_imgser = imgser_dict[detector_id] # extract simulation results sim_results_p = sim_results[detector_id] hkl_ids = sim_results_p[0][0] hkls_p = sim_results_p[1][0] ang_centers = sim_results_p[2][0] xy_centers = sim_results_p[3][0] ang_pixel_size = sim_results_p[4][0] # now verify that full patch falls on detector... # ???: strictly necessary? # # patch vertex array from sim nangs = len(ang_centers) patch_vertices = ( np.tile(ang_centers[:, :2], (1, 4)) + np.tile(tol_vec, (nangs, 1)) ).reshape(4*nangs, 2) ome_dupl = np.tile( ang_centers[:, 2], (4, 1) ).T.reshape(len(patch_vertices), 1) # find vertices that all fall on the panel det_xy, _ = xrdutil._project_on_detector_plane( np.hstack([patch_vertices, ome_dupl]), panel.rmat, rMat_c, self.chi, panel.tvec, tVec_c, self.tvec, panel.distortion) _, on_panel = panel.clip_to_panel(det_xy, buffer_edges=True) # all vertices must be on... patch_is_on = np.all(on_panel.reshape(nangs, 4), axis=1) patch_xys = det_xy.reshape(nangs, 4, 2)[patch_is_on] # re-filter... hkl_ids = hkl_ids[patch_is_on] hkls_p = hkls_p[patch_is_on, :] ang_centers = ang_centers[patch_is_on, :] xy_centers = xy_centers[patch_is_on, :] ang_pixel_size = ang_pixel_size[patch_is_on, :] # TODO: add polygon testing right here! # done <JVB 06/21/16> if check_only: patch_output = [] for i_pt, angs in enumerate(ang_centers): # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(angs[2]) + ome_del # ...vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkls_p[i_pt, :]) print(msg) continue else: these_vertices = patch_xys[i_pt] ijs = panel.cartToPixel(these_vertices) ii, jj = polygon(ijs[:, 0], ijs[:, 1]) contains_signal = False for i_frame in frame_indices: contains_signal = contains_signal or np.any( ome_imgser[i_frame][ii, jj] > threshold ) compl.append(contains_signal) patch_output.append((ii, jj, frame_indices)) else: # make the tth,eta patches for interpolation patches = xrdutil.make_reflection_patches( instr_cfg, ang_centers[:, :2], ang_pixel_size, omega=ang_centers[:, 2], tth_tol=tth_tol, eta_tol=eta_tol, rMat_c=rMat_c, tVec_c=tVec_c, distortion=panel.distortion, npdiv=npdiv, quiet=True, beamVec=self.beam_vector) # GRAND LOOP over reflections for this panel patch_output = [] for i_pt, patch in enumerate(patches): # strip relevant objects out of current patch vtx_angs, vtx_xy, conn, areas, xy_eval, ijs = patch prows, pcols = areas.shape nrm_fac = areas/float(native_area) nrm_fac = nrm_fac / np.min(nrm_fac) # grab hkl info hkl = hkls_p[i_pt, :] hkl_id = hkl_ids[i_pt] # edge arrays tth_edges = vtx_angs[0][0, :] delta_tth = tth_edges[1] - tth_edges[0] eta_edges = vtx_angs[1][:, 0] delta_eta = eta_edges[1] - eta_edges[0] # need to reshape eval pts for interpolation xy_eval = np.vstack([xy_eval[0].flatten(), xy_eval[1].flatten()]).T # the evaluation omegas; # expand about the central value using tol vector ome_eval = np.degrees(ang_centers[i_pt, 2]) + ome_del # ???: vectorize the omega_to_frame function to avoid loop? frame_indices = [ ome_imgser.omega_to_frame(ome)[0] for ome in ome_eval ] if -1 in frame_indices: if not quiet: msg = """ window for (%d%d%d) falls outside omega range """ % tuple(hkl) print(msg) continue else: # initialize spot data parameters # !!! maybe change these to nan to not f**k up writer peak_id = -999 sum_int = None max_int = None meas_angs = None meas_xy = None # quick check for intensity contains_signal = False patch_data_raw = [] for i_frame in frame_indices: tmp = ome_imgser[i_frame][ijs[0], ijs[1]] contains_signal = contains_signal or np.any( tmp > threshold ) patch_data_raw.append(tmp) pass patch_data_raw = np.stack(patch_data_raw, axis=0) compl.append(contains_signal) if contains_signal: # initialize patch data array for intensities if interp.lower() == 'bilinear': patch_data = np.zeros( (len(frame_indices), prows, pcols)) for i, i_frame in enumerate(frame_indices): patch_data[i] = \ panel.interpolate_bilinear( xy_eval, ome_imgser[i_frame], pad_with_nans=False ).reshape(prows, pcols) # * nrm_fac elif interp.lower() == 'nearest': patch_data = patch_data_raw # * nrm_fac else: msg = "interpolation option " + \ "'%s' not understood" raise(RuntimeError, msg % interp) # now have interpolated patch data... labels, num_peaks = ndimage.label( patch_data > threshold, structure=label_struct ) slabels = np.arange(1, num_peaks + 1) if num_peaks > 0: peak_id = iRefl coms = np.array( ndimage.center_of_mass( patch_data, labels=labels, index=slabels ) ) if num_peaks > 1: center = np.r_[patch_data.shape]*0.5 center_t = np.tile(center, (num_peaks, 1)) com_diff = coms - center_t closest_peak_idx = np.argmin( np.sum(com_diff**2, axis=1) ) else: closest_peak_idx = 0 pass # end multipeak conditional coms = coms[closest_peak_idx] # meas_omes = \ # ome_edges[0] + (0.5 + coms[0])*delta_ome meas_omes = \ ome_eval[0] + coms[0]*delta_ome meas_angs = np.hstack( [tth_edges[0] + (0.5 + coms[2])*delta_tth, eta_edges[0] + (0.5 + coms[1])*delta_eta, mapAngle( np.radians(meas_omes), ome_period ) ] ) # intensities # - summed is 'integrated' over interpolated # data # - max is max of raw input data sum_int = np.sum( patch_data[ labels == slabels[closest_peak_idx] ] ) max_int = np.max( patch_data_raw[ labels == slabels[closest_peak_idx] ] ) # ???: Should this only use labeled pixels? # Those are segmented from interpolated data, # not raw; likely ok in most cases. # need MEASURED xy coords gvec_c = anglesToGVec( meas_angs, chi=self.chi, rMat_c=rMat_c, bHat_l=self.beam_vector) rMat_s = makeOscillRotMat( [self.chi, meas_angs[2]] ) meas_xy = gvecToDetectorXY( gvec_c, panel.rmat, rMat_s, rMat_c, panel.tvec, self.tvec, tVec_c, beamVec=self.beam_vector) if panel.distortion is not None: # FIXME: distortion handling meas_xy = panel.distortion[0]( np.atleast_2d(meas_xy), panel.distortion[1], invert=True).flatten() pass # FIXME: why is this suddenly necessary??? meas_xy = meas_xy.squeeze() pass # end num_peaks > 0 else: patch_data = patch_data_raw pass # end contains_signal # write output if filename is not None: if output_format.lower() == 'text': writer.dump_patch( peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, xy_centers[i_pt], meas_xy) elif output_format.lower() == 'hdf5': xyc_arr = xy_eval.reshape( prows, pcols, 2 ).transpose(2, 0, 1) writer.dump_patch( detector_id, iRefl, peak_id, hkl_id, hkl, tth_edges, eta_edges, np.radians(ome_eval), xyc_arr, ijs, frame_indices, patch_data, ang_centers[i_pt], xy_centers[i_pt], meas_angs, meas_xy) pass # end conditional on write output pass # end conditional on check only patch_output.append([ peak_id, hkl_id, hkl, sum_int, max_int, ang_centers[i_pt], meas_angs, meas_xy, ]) iRefl += 1 pass # end patch conditional pass # end patch loop output[detector_id] = patch_output if filename is not None and output_format.lower() == 'text': writer.close() pass # end detector loop if filename is not None and output_format.lower() == 'hdf5': writer.close() return compl, output
def simulate_laue_pattern(self, crystal_data, minEnergy=5., maxEnergy=35., rmat_s=None, tvec_s=None, grain_params=None, beam_vec=None): """ """ if isinstance(crystal_data, PlaneData): plane_data = crystal_data # grab the expanded list of hkls from plane_data hkls = np.hstack(plane_data.getSymHKLs()) # and the unit plane normals (G-vectors) in CRYSTAL FRAME gvec_c = np.dot(plane_data.latVecOps['B'], hkls) elif len(crystal_data) == 2: # !!! should clean this up hkls = np.array(crystal_data[0]) bmat = crystal_data[1] gvec_c = np.dot(bmat, hkls) else: raise (RuntimeError, 'argument list not understood') nhkls_tot = hkls.shape[1] # parse energy ranges # TODO: allow for spectrum parsing multipleEnergyRanges = False if hasattr(maxEnergy, '__len__'): assert len(maxEnergy) == len(minEnergy), \ 'energy cutoff ranges must have the same length' multipleEnergyRanges = True lmin = [] lmax = [] for i in range(len(maxEnergy)): lmin.append(ct.keVToAngstrom(maxEnergy[i])) lmax.append(ct.keVToAngstrom(minEnergy[i])) else: lmin = ct.keVToAngstrom(maxEnergy) lmax = ct.keVToAngstrom(minEnergy) # parse grain parameters kwarg if grain_params is None: grain_params = np.atleast_2d( np.hstack([np.zeros(6), ct.identity_6x1])) n_grains = len(grain_params) # sample rotation if rmat_s is None: rmat_s = ct.identity_3x3 # dummy translation vector... make input if tvec_s is None: tvec_s = ct.zeros_3 # beam vector if beam_vec is None: beam_vec = ct.beam_vec # ========================================================================= # LOOP OVER GRAINS # ========================================================================= # pre-allocate output arrays xy_det = np.nan * np.ones((n_grains, nhkls_tot, 2)) hkls_in = np.nan * np.ones((n_grains, 3, nhkls_tot)) angles = np.nan * np.ones((n_grains, nhkls_tot, 2)) dspacing = np.nan * np.ones((n_grains, nhkls_tot)) energy = np.nan * np.ones((n_grains, nhkls_tot)) for iG, gp in enumerate(grain_params): rmat_c = makeRotMatOfExpMap(gp[:3]) tvec_c = gp[3:6].reshape(3, 1) vInv_s = mutil.vecMVToSymm(gp[6:].reshape(6, 1)) # stretch them: V^(-1) * R * Gc gvec_s_str = np.dot(vInv_s, np.dot(rmat_c, gvec_c)) ghat_c_str = mutil.unitVector(np.dot(rmat_c.T, gvec_s_str)) # project dpts = gvecToDetectorXY(ghat_c_str.T, self.rmat, rmat_s, rmat_c, self.tvec, tvec_s, tvec_c, beamVec=beam_vec) # check intersections with detector plane canIntersect = ~np.isnan(dpts[:, 0]) npts_in = sum(canIntersect) if np.any(canIntersect): dpts = dpts[canIntersect, :].reshape(npts_in, 2) dhkl = hkls[:, canIntersect].reshape(3, npts_in) # back to angles tth_eta, gvec_l = detectorXYToGvec(dpts, self.rmat, rmat_s, self.tvec, tvec_s, tvec_c, beamVec=beam_vec) tth_eta = np.vstack(tth_eta).T # warp measured points if self.distortion is not None: if len(self.distortion) == 2: dpts = self.distortion[0](dpts, self.distortion[1], invert=True) else: raise (RuntimeError, "something is wrong with the distortion") # plane spacings and energies dsp = 1. / rowNorm(gvec_s_str[:, canIntersect].T) wlen = 2 * dsp * np.sin(0.5 * tth_eta[:, 0]) # clip to detector panel _, on_panel = self.clip_to_panel(dpts, buffer_edges=True) if multipleEnergyRanges: validEnergy = np.zeros(len(wlen), dtype=bool) for i in range(len(lmin)): in_energy_range = np.logical_and( wlen >= lmin[i], wlen <= lmax[i]) validEnergy = validEnergy | in_energy_range pass else: validEnergy = np.logical_and(wlen >= lmin, wlen <= lmax) pass # index for valid reflections keepers = np.where(np.logical_and(on_panel, validEnergy))[0] # assign output arrays xy_det[iG][keepers, :] = dpts[keepers, :] hkls_in[iG][:, keepers] = dhkl[:, keepers] angles[iG][keepers, :] = tth_eta[keepers, :] dspacing[iG, keepers] = dsp[keepers] energy[iG, keepers] = ct.keVToAngstrom(wlen[keepers]) pass # close conditional on valids pass # close loop on grains return xy_det, hkls_in, angles, dspacing, energy
tAng = np.array( [0.0011546340766314521, -0.0040527538387122993, -0.0026221336905160211]) rMat1 = xf.makeDetectorRotMat(tAng) rMat2 = xfcapi.makeDetectorRotMat(tAng) print "makeDetectorRotMat results match: ", np.linalg.norm( rMat1 - rMat2) / np.linalg.norm(rMat1) < epsf oAng = np.array([-0.0011591608938627839, 0.0011546340766314521]) rMat1 = xf.makeOscillRotMat(oAng) rMat2 = xfcapi.makeOscillRotMat(oAng) print "makeOscillRotMat results match: ", np.linalg.norm( rMat1 - rMat2) / np.linalg.norm(rMat1) < epsf eMap = np.array([0.66931818, -0.98578066, 0.73593251]) rMat1 = xf.makeRotMatOfExpMap(eMap) rMat2 = xfcapi.makeRotMatOfExpMap(eMap) print "makeRotMatOfExpMap results match: ", np.linalg.norm( rMat1 - rMat2) / np.linalg.norm(rMat1) < epsf axis = np.array([0.66931818, -0.98578066, 0.73593251]) rMat1 = xf.makeBinaryRotMat(axis) rMat2 = xfcapi.makeBinaryRotMat(axis) print "makeBinaryRotMat results match: ", np.linalg.norm( rMat1 - rMat2) / np.linalg.norm(rMat1) < epsf bHat = np.array([0.0, 0.0, -1.0]) eta = np.array([1.0, 0.0, 0.0]) rMat1 = xf.makeEtaFrameRotMat(bHat, eta) rMat2 = xfcapi.makeEtaFrameRotMat(bHat, eta) print "makeEtaFrameRotMat results match: ", np.linalg.norm( rMat1 - rMat2) / np.linalg.norm(rMat1) < epsf