def simulate_diffractions(grain_params): pbar = ProgressBar(widgets=['simulate_diffractions', Percentage(), Bar()], maxval=len(grain_params)).start() image_stack = np.zeros((nframes, nrows, ncols), dtype=bool) for i in range(len(grain_params)): sim_results = xrdutil.simulateGVecs(pd, detector_params, grain_params[i], panel_dims=panel_dims, pixel_pitch=pixel_size, ome_range=ome_range, ome_period=ome_period, distortion=None) valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps = sim_results j_pix = gridutil.cellIndices(x_col_edges, valid_xy[:, 0]) i_pix = gridutil.cellIndices(y_row_edges, valid_xy[:, 1]) k_frame = gridutil.cellIndices(ome_edges, valid_ang[:, 2]) # assign intensity for j, k in enumerate(k_frame): image_stack[k][i_pix[j], j_pix[j]] = True pbar.update(i + 1) pass pbar.finish() #np.save('gold_cubes.npy', image_stack) return image_stack
def simulate_diffractions(grain_params): pbar = ProgressBar(widgets=['simulate_diffractions', Percentage(), Bar()], maxval=len(grain_params)).start() image_stack = np.zeros((nframes, nrows, ncols), dtype=bool) for i in range(len(grain_params)): sim_results = xrdutil.simulateGVecs(pd, detector_params, grain_params[i], panel_dims=panel_dims, pixel_pitch=pixel_size, ome_range=ome_range, ome_period=ome_period, distortion=None) valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps = sim_results j_pix = gridutil.cellIndices(x_col_edges, valid_xy[:, 0]) i_pix = gridutil.cellIndices(y_row_edges, valid_xy[:, 1]) k_frame = gridutil.cellIndices(ome_edges, valid_ang[:, 2]) # assign intensity for j, k in enumerate(k_frame): image_stack[k][i_pix[j], j_pix[j]] = True pbar.update(i+1) pass pbar.finish() #np.save('gold_cubes.npy', image_stack) return image_stack
def _evaluate_diffraction_angles(exp_maps): panel_dims_expanded = [(-10, -10), (10, 10)] pbar = ProgressBar(widgets=['evaluate diffraction angles', Percentage(), Bar()], maxval=n_grains).start() all_angles = [] for i in range(n_grains): gparams = np.hstack([exp_maps[i, :].flatten(), ref_gparams]) sim_results = xrdutil.simulateGVecs(pd, detector_params, gparams, panel_dims=panel_dims_expanded, pixel_pitch=pixel_size, ome_range=ome_range, ome_period=ome_period, distortion=None) all_angles.append(sim_results[2]) pbar.update(i+1) pass pbar.finish() return all_angles
def evaluate_diffraction_angles(experiment, controller=None): panel_dims_expanded = [(-10, -10), (10, 10)] subprocess = 'evaluate diffraction angles' pbar = controller.start(subprocess, len(experiment.exp_maps)) all_angles = [] ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) for i, exp_map in enumerate(experiment.exp_maps): gparams = np.hstack([exp_map, ref_gparams]) sim_results = xrdutil.simulateGVecs(experiment.plane_data, experiment.detector_params, gparams, panel_dims=panel_dims_expanded, pixel_pitch=experiment.pixel_size, ome_range=experiment.ome_range, ome_period=experiment.ome_period, distortion=None) all_angles.append(sim_results[2]) controller.update(i + 1) pass controller.finish(subprocess) return all_angles
def evaluate_diffraction_angles(experiment , controller=None): panel_dims_expanded = [(-10, -10), (10, 10)] subprocess='evaluate diffraction angles' pbar = controller.start(subprocess, len(experiment.exp_maps)) all_angles = [] ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) for i, exp_map in enumerate(experiment.exp_maps): gparams = np.hstack([exp_map, ref_gparams]) sim_results = xrdutil.simulateGVecs(experiment.plane_data, experiment.detector_params, gparams, panel_dims=panel_dims_expanded, pixel_pitch=experiment.pixel_size, ome_range=experiment.ome_range, ome_period=experiment.ome_period, distortion=None) all_angles.append(sim_results[2]) controller.update(i+1) pass controller.finish(subprocess) return all_angles
def evaluate_diffraction_angles(experiment, controller=None): """Uses simulateGVecs to generate the angles used per each grain. returns a list containg one array per grain. experiment -- a bag of experiment values, including the grains specs and other required parameters. """ # extract required data from experiment exp_maps = experiment.exp_maps plane_data = experiment.plane_data detector_params = experiment.detector_params pixel_size = experiment.pixel_size ome_range = experiment.ome_range ome_period = experiment.ome_period panel_dims_expanded = [(-10, -10), (10, 10)] subprocess='evaluate diffraction angles' pbar = controller.start(subprocess, len(experiment.exp_maps)) all_angles = [] ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) for i, exp_map in enumerate(experiment.exp_maps): gparams = np.hstack([exp_map, ref_gparams]) sim_results = xrdutil.simulateGVecs(plane_data, detector_params, gparams, panel_dims=panel_dims_expanded, pixel_pitch=pixel_size, ome_range=ome_range, ome_period=ome_period, distortion=None) all_angles.append(sim_results[2]) controller.update(i+1) pass controller.finish(subprocess) return all_angles
def evaluate_diffraction_angles(experiment, controller=None): """Uses simulateGVecs to generate the angles used per each grain. returns a list containg one array per grain. experiment -- a bag of experiment values, including the grains specs and other required parameters. """ # extract required data from experiment exp_maps = experiment.exp_maps plane_data = experiment.plane_data detector_params = experiment.detector_params pixel_size = experiment.pixel_size ome_range = experiment.ome_range ome_period = experiment.ome_period panel_dims_expanded = [(-10, -10), (10, 10)] subprocess='evaluate diffraction angles' pbar = controller.start(subprocess, len(exp_maps)) all_angles = [] ref_gparams = np.array([0., 0., 0., 1., 1., 1., 0., 0., 0.]) for i, exp_map in enumerate(exp_maps): gparams = np.hstack([exp_map, ref_gparams]) sim_results = xrdutil.simulateGVecs(plane_data, detector_params, gparams, panel_dims=panel_dims_expanded, pixel_pitch=pixel_size, ome_range=ome_range, ome_period=ome_period, distortion=None) all_angles.append(sim_results[2]) controller.update(i+1) pass controller.finish(subprocess) return all_angles
def _evaluate_diffraction_angles(exp_maps): panel_dims_expanded = [(-10, -10), (10, 10)] pbar = ProgressBar( widgets=['evaluate diffraction angles', Percentage(), Bar()], maxval=n_grains).start() all_angles = [] for i in range(n_grains): gparams = np.hstack([exp_maps[i, :].flatten(), ref_gparams]) sim_results = xrdutil.simulateGVecs(pd, detector_params, gparams, panel_dims=panel_dims_expanded, pixel_pitch=pixel_size, ome_range=ome_range, ome_period=ome_period, distortion=None) all_angles.append(sim_results[2]) pbar.update(i + 1) pass pbar.finish() return all_angles
def find_orientations(cfg, hkls=None, clean=False, profile=False): """ Takes a config dict as input, generally a yml document NOTE: single cfg instance, not iterator! """ # ...make this an attribute in cfg? analysis_id = '%s_%s' %( cfg.analysis_name.strip().replace(' ', '-'), cfg.material.active.strip().replace(' ', '-'), ) # grab planeData object matl = cPickle.load(open('materials.cpl', 'r')) md = dict(zip([matl[i].name for i in range(len(matl))], matl)) pd = md[cfg.material.active].planeData # make image_series image_series = cfg.image_series.omegaseries # need instrument cfg later on down... instr_cfg = get_instrument_parameters(cfg) detector_params = np.hstack([ instr_cfg['detector']['transform']['tilt_angles'], instr_cfg['detector']['transform']['t_vec_d'], instr_cfg['oscillation_stage']['chi'], instr_cfg['oscillation_stage']['t_vec_s'], ]) rdim = cfg.instrument.detector.pixels.size[0]*cfg.instrument.detector.pixels.rows cdim = cfg.instrument.detector.pixels.size[1]*cfg.instrument.detector.pixels.columns panel_dims = ((-0.5*cdim, -0.5*rdim), ( 0.5*cdim, 0.5*rdim), ) # UGH! hard-coded distortion... if instr_cfg['detector']['distortion']['function_name'] == 'GE_41RT': distortion = (dFuncs.GE_41RT, instr_cfg['detector']['distortion']['parameters'], ) else: distortion = None min_compl = cfg.find_orientations.clustering.completeness # start logger logger.info("beginning analysis '%s'", cfg.analysis_name) # load the eta_ome orientation maps eta_ome = load_eta_ome_maps(cfg, pd, image_series, hkls=hkls, clean=clean) ome_range = ( np.min(eta_ome.omeEdges), np.max(eta_ome.omeEdges) ) try: # are we searching the full grid of orientation space? qgrid_f = cfg.find_orientations.use_quaternion_grid quats = np.load(qgrid_f) logger.info("Using %s for full quaternion search", qgrid_f) hkl_ids = None except (IOError, ValueError, AttributeError): # or doing a seeded search? logger.info("Defaulting to seeded search") hkl_seeds = cfg.find_orientations.seed_search.hkl_seeds hkl_ids = [ eta_ome.planeData.hklDataList[i]['hklID'] for i in hkl_seeds ] hklseedstr = ', '.join( [str(i) for i in eta_ome.planeData.hkls.T[hkl_seeds]] ) logger.info( "Seeding search using hkls from %s: %s", cfg.find_orientations.orientation_maps.file, hklseedstr ) quats = generate_orientation_fibers( eta_ome, detector_params[6], cfg.find_orientations.threshold, cfg.find_orientations.seed_search.hkl_seeds, cfg.find_orientations.seed_search.fiber_ndiv, ncpus=cfg.multiprocessing, ) if save_as_ascii: np.savetxt( os.path.join(cfg.working_dir, 'trial_orientations.dat'), quats.T, fmt="%.18e", delimiter="\t" ) pass pass # close conditional on grid search # generate the completion maps logger.info("Running paintgrid on %d trial orientations", quats.shape[1]) if profile: logger.info("Profiling mode active, forcing ncpus to 1") ncpus = 1 else: ncpus = cfg.multiprocessing logger.info( "%d of %d available processors requested", ncpus, mp.cpu_count() ) compl = idx.paintGrid( quats, eta_ome, etaRange=np.radians(cfg.find_orientations.eta.range), omeTol=np.radians(cfg.find_orientations.omega.tolerance), etaTol=np.radians(cfg.find_orientations.eta.tolerance), omePeriod=np.radians(cfg.find_orientations.omega.period), threshold=cfg.find_orientations.threshold, doMultiProc=ncpus > 1, nCPUs=ncpus ) if save_as_ascii: np.savetxt(os.path.join(cfg.working_dir, 'completeness.dat'), compl) else: np.save( os.path.join( cfg.working_dir, 'scored_orientations_%s.npy' %analysis_id ), np.vstack([quats, compl]) ) ########################################################## ## Simulate N random grains to get neighborhood size ## ########################################################## if hkl_ids is not None: ngrains = 100 rand_q = mutil.unitVector(np.random.randn(4, ngrains)) rand_e = np.tile(2.*np.arccos(rand_q[0, :]), (3, 1)) \ * mutil.unitVector(rand_q[1:, :]) refl_per_grain = np.zeros(ngrains) num_seed_refls = np.zeros(ngrains) print('fo: hklids = ', hkl_ids) for i in range(ngrains): grain_params = np.hstack([rand_e[:, i], xf.zeroVec.flatten(), xf.vInv_ref.flatten() ]) sim_results = simulateGVecs(pd, detector_params, grain_params, ome_range=(ome_range,), ome_period=(ome_range[0], ome_range[0]+2*np.pi), eta_range=np.radians(cfg.find_orientations.eta.range), panel_dims=panel_dims, pixel_pitch=cfg.instrument.detector.pixels.size, distortion=distortion, ) refl_per_grain[i] = len(sim_results[0]) # lines below fix bug when sim_results[0] is empty if refl_per_grain[i] > 0: num_seed_refls[i] = np.sum([sum(sim_results[0] == hkl_id) for hkl_id in hkl_ids]) else: num_seed_refls[i] = 0 #min_samples = 2 min_samples = max( int(np.floor(0.5*min_compl*min(num_seed_refls))), 2 ) mean_rpg = int(np.round(np.average(refl_per_grain))) else: min_samples = 1 mean_rpg = 1 logger.info("mean number of reflections per grain is %d", mean_rpg) logger.info("neighborhood size estimate is %d points", min_samples) # cluster analysis to identify orientation blobs, the final output: qbar, cl = run_cluster(compl, quats, pd.getQSym(), cfg, min_samples=min_samples) analysis_id = '%s_%s' %( cfg.analysis_name.strip().replace(' ', '-'), cfg.material.active.strip().replace(' ', '-'), ) np.savetxt( os.path.join( cfg.working_dir, 'accepted_orientations_%s.dat' %analysis_id ), qbar.T, fmt="%.18e", delimiter="\t") return
def find_orientations(cfg, hkls=None, clean=False, profile=False): """ Takes a config dict as input, generally a yml document NOTE: single cfg instance, not iterator! """ # ...make this an attribute in cfg? analysis_id = '%s_%s' % ( cfg.analysis_name.strip().replace(' ', '-'), cfg.material.active.strip().replace(' ', '-'), ) # grab planeData object matl = cPickle.load(open('materials.cpl', 'r')) md = dict(zip([matl[i].name for i in range(len(matl))], matl)) pd = md[cfg.material.active].planeData # make image_series image_series = cfg.image_series.omegaseries # need instrument cfg later on down... instr_cfg = get_instrument_parameters(cfg) detector_params = np.hstack([ instr_cfg['detector']['transform']['tilt_angles'], instr_cfg['detector']['transform']['t_vec_d'], instr_cfg['oscillation_stage']['chi'], instr_cfg['oscillation_stage']['t_vec_s'], ]) rdim = cfg.instrument.detector.pixels.size[ 0] * cfg.instrument.detector.pixels.rows cdim = cfg.instrument.detector.pixels.size[ 1] * cfg.instrument.detector.pixels.columns panel_dims = ( (-0.5 * cdim, -0.5 * rdim), (0.5 * cdim, 0.5 * rdim), ) # UGH! hard-coded distortion... if instr_cfg['detector']['distortion']['function_name'] == 'GE_41RT': distortion = ( dFuncs.GE_41RT, instr_cfg['detector']['distortion']['parameters'], ) else: distortion = None min_compl = cfg.find_orientations.clustering.completeness # start logger logger.info("beginning analysis '%s'", cfg.analysis_name) # load the eta_ome orientation maps eta_ome = load_eta_ome_maps(cfg, pd, image_series, hkls=hkls, clean=clean) ome_range = (np.min(eta_ome.omeEdges), np.max(eta_ome.omeEdges)) try: # are we searching the full grid of orientation space? qgrid_f = cfg.find_orientations.use_quaternion_grid quats = np.load(qgrid_f) logger.info("Using %s for full quaternion search", qgrid_f) hkl_ids = None except (IOError, ValueError, AttributeError): # or doing a seeded search? logger.info("Defaulting to seeded search") hkl_seeds = cfg.find_orientations.seed_search.hkl_seeds hkl_ids = [ eta_ome.planeData.hklDataList[i]['hklID'] for i in hkl_seeds ] hklseedstr = ', '.join( [str(i) for i in eta_ome.planeData.hkls.T[hkl_seeds]]) logger.info("Seeding search using hkls from %s: %s", cfg.find_orientations.orientation_maps.file, hklseedstr) quats = generate_orientation_fibers( eta_ome, detector_params[6], cfg.find_orientations.threshold, cfg.find_orientations.seed_search.hkl_seeds, cfg.find_orientations.seed_search.fiber_ndiv, ncpus=cfg.multiprocessing, ) if save_as_ascii: np.savetxt(os.path.join(cfg.working_dir, 'trial_orientations.dat'), quats.T, fmt="%.18e", delimiter="\t") pass pass # close conditional on grid search # generate the completion maps logger.info("Running paintgrid on %d trial orientations", quats.shape[1]) if profile: logger.info("Profiling mode active, forcing ncpus to 1") ncpus = 1 else: ncpus = cfg.multiprocessing logger.info("%d of %d available processors requested", ncpus, mp.cpu_count()) compl = idx.paintGrid( quats, eta_ome, etaRange=np.radians(cfg.find_orientations.eta.range), omeTol=np.radians(cfg.find_orientations.omega.tolerance), etaTol=np.radians(cfg.find_orientations.eta.tolerance), omePeriod=np.radians(cfg.find_orientations.omega.period), threshold=cfg.find_orientations.threshold, doMultiProc=ncpus > 1, nCPUs=ncpus) if save_as_ascii: np.savetxt(os.path.join(cfg.working_dir, 'completeness.dat'), compl) else: np.save( os.path.join(cfg.working_dir, 'scored_orientations_%s.npy' % analysis_id), np.vstack([quats, compl])) ########################################################## ## Simulate N random grains to get neighborhood size ## ########################################################## if hkl_ids is not None: ngrains = 100 rand_q = mutil.unitVector(np.random.randn(4, ngrains)) rand_e = np.tile(2.*np.arccos(rand_q[0, :]), (3, 1)) \ * mutil.unitVector(rand_q[1:, :]) refl_per_grain = np.zeros(ngrains) num_seed_refls = np.zeros(ngrains) print('fo: hklids = ', hkl_ids) for i in range(ngrains): grain_params = np.hstack( [rand_e[:, i], xf.zeroVec.flatten(), xf.vInv_ref.flatten()]) sim_results = simulateGVecs( pd, detector_params, grain_params, ome_range=(ome_range, ), ome_period=(ome_range[0], ome_range[0] + 2 * np.pi), eta_range=np.radians(cfg.find_orientations.eta.range), panel_dims=panel_dims, pixel_pitch=cfg.instrument.detector.pixels.size, distortion=distortion, ) refl_per_grain[i] = len(sim_results[0]) # lines below fix bug when sim_results[0] is empty if refl_per_grain[i] > 0: num_seed_refls[i] = np.sum( [sum(sim_results[0] == hkl_id) for hkl_id in hkl_ids]) else: num_seed_refls[i] = 0 #min_samples = 2 min_samples = max(int(np.floor(0.5 * min_compl * min(num_seed_refls))), 2) mean_rpg = int(np.round(np.average(refl_per_grain))) else: min_samples = 1 mean_rpg = 1 logger.info("mean number of reflections per grain is %d", mean_rpg) logger.info("neighborhood size estimate is %d points", min_samples) # cluster analysis to identify orientation blobs, the final output: qbar, cl = run_cluster(compl, quats, pd.getQSym(), cfg, min_samples=min_samples) analysis_id = '%s_%s' % ( cfg.analysis_name.strip().replace(' ', '-'), cfg.material.active.strip().replace(' ', '-'), ) np.savetxt(os.path.join(cfg.working_dir, 'accepted_orientations_%s.dat' % analysis_id), qbar.T, fmt="%.18e", delimiter="\t") return
r=np.sqrt(xv**2.+yv**2.) lorentzianFilter=gamma**2 / ((r)**2 + gamma**2) lorentzianFilter=lorentzianFilter/lorentzianFilter.sum() return lorentzianFilter #%% #Calculate Intercepts for diffraction events from grains pixel_data = [] for ii in np.arange(grain_params_list.shape[0]): print "processing grain %d..." %ii simg = simulateGVecs(plane_data, detector_params, grain_params_list[ii,3:15],distortion=None) valid_ids, valid_hkl, valid_ang, valid_xy, ang_ps = simg #ax.plot(valid_xy[:, 0], valid_xy[:, 1], 'b.', ms=2) this_frame = sp.sparse.coo_matrix((nrows, ncols), np.uint16) frame_indices = cellIndices(ome_edges, np.degrees(valid_ang[:, 2])) i_row = cellIndices(row_edges, valid_xy[:, 1]) j_col = cellIndices(col_edges, valid_xy[:, 0]) pixel_data.append(np.vstack([i_row, j_col, frame_indices])) pixd = np.hstack(pixel_data)