def run_fit_grains(self, config): self.fit_grains_results = None min_samples, mean_rpg = create_clustering_parameters( config, self.ome_maps) # Add fit_grains config dialog_config = HexrdConfig().indexing_config['fit_grains'] config.set('fitgrains:npdiv', dialog_config['npdiv']) config.set('fitgrains:refit', dialog_config['refit']) config.set('fitgrains:threshold', dialog_config['threshold']) config.set('fitgrains:tth_max', dialog_config['tth_max']) config.set('fitgrains:tolerance:tth', dialog_config['tth_tolerances']) config.set('fitgrains:tolerance:eta', dialog_config['eta_tolerances']) config.set('fitgrains:tolerance:omega', dialog_config['omega_tolerances']) kwargs = { 'compl': self.completeness, 'qfib': self.qfib, 'qsym': config.material.plane_data.getQSym(), 'cfg': config, 'min_samples': min_samples, 'compl_thresh': config.find_orientations.clustering.completeness, 'radius': config.find_orientations.clustering.radius } self.update_progress_text('Running clustering') qbar, cl = run_cluster(**kwargs) # Generate grains table num_grains = qbar.shape[1] if num_grains == 0: print('Fit Grains Complete - no grains were found') return shape = (num_grains, 21) grains_table = np.empty(shape) gw = instrument.GrainDataWriter(array=grains_table) for gid, q in enumerate(qbar.T): phi = 2 * np.arccos(q[0]) n = xfcapi.unitRowVector(q[1:]) grain_params = np.hstack( [phi * n, const.zeros_3, const.identity_6x1]) gw.dump_grain(gid, 1., 0., grain_params) gw.close() self.update_progress_text( f'Found {num_grains} grains. Running fit optimization.') self.fit_grains_results = fit_grains(config, grains_table, write_spots_files=False) print('Fit Grains Complete')
def run_fit_grains(self): cfg = create_indexing_config() write_spots = HexrdConfig().indexing_config.get('_write_spots', False) num_grains = self.grains_table.shape[0] self.update_progress_text(f'Running fit grains on {num_grains} grains') kwargs = { 'cfg': cfg, 'grains_table': self.grains_table, 'write_spots_files': write_spots, } self.fit_grains_results = fit_grains(**kwargs) print('Fit Grains Complete') # If we wrote out the spots, let's write out the grains.out file too if write_spots: write_fit_grains_results(self.fit_grains_results, cfg)
def test_fit_grains(single_ge_include_path, test_config, grains_file_path, grains_reference_file_path): os.chdir(str(single_ge_include_path)) grains_table = np.loadtxt(grains_reference_file_path, ndmin=2) ref_grain_params = grains_table[:, 3:15] gresults = fit_grains(test_config, grains_table, show_progress=False, ids_to_refine=None, write_spots_files=False) cresult = compare_grain_fits(np.vstack([i[-1] for i in gresults]), ref_grain_params, mtol=1.e-4, ctol=1.e-3, vtol=1.e-4) assert cresult
def run_fit_grains(self, config): min_samples, mean_rpg = create_clustering_parameters( config, self.ome_maps) kwargs = { 'compl': self.completeness, 'qfib': self.qfib, 'qsym': config.material.plane_data.getQSym(), 'cfg': config, 'min_samples': min_samples, 'compl_thresh': config.find_orientations.clustering.completeness, 'radius': config.find_orientations.clustering.radius } self.update_progress_text('Running clustering') qbar, cl = run_cluster(**kwargs) # Generate grains table num_grains = qbar.shape[1] if num_grains == 0: QMessageBox.warning(self.parent, 'No Grains', 'Clustering found no grains') return shape = (num_grains, 21) grains_table = np.empty(shape) gw = instrument.GrainDataWriter(array=grains_table) for gid, q in enumerate(qbar.T): phi = 2 * np.arccos(q[0]) n = xfcapi.unitRowVector(q[1:]) grain_params = np.hstack( [phi * n, const.zeros_3, const.identity_6x1]) gw.dump_grain(gid, 1., 0., grain_params) gw.close() self.update_progress_text( f'Found {num_grains} grains. Running fit optimization.') self.fit_grains_results = fit_grains(config, grains_table, write_spots_files=False) print('Fit Grains Complete')
def execute(args, parser): import logging import os import sys import yaml from hexrd import config from hexrd.fitgrains import fit_grains # load the configuration settings cfgs = config.open(args.yml) # configure logging to the console: log_level = logging.DEBUG if args.debug else logging.INFO if args.quiet: log_level = logging.ERROR logger = logging.getLogger('hexrd') logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(logging.CRITICAL if args.quiet else log_level) cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S') ch.setFormatter(cf) logger.addHandler(ch) # ...make this an attribute in cfg? analysis_id = '%s_%s' % ( cfgs[0].analysis_name.strip().replace(' ', '-'), cfgs[0].material.active.strip().replace(' ', '-'), ) # if find-orientations has not already been run, do so: quats_f = os.path.join(cfgs[0].working_dir, 'accepted_orientations_%s.dat' % analysis_id) if not os.path.exists(quats_f): logger.info("Missing %s, running find-orientations", quats_f) logger.removeHandler(ch) from . import findorientations findorientations.execute(args, parser) logger.addHandler(ch) logger.info('=== begin fit-grains ===') clobber = args.force or args.clean for cfg in cfgs: # prepare the analysis directory if os.path.exists(cfg.analysis_dir) and not clobber: logger.error( 'Analysis "%s" at %s already exists.' ' Change yml file or specify "force"', cfg.analysis_name, cfg.analysis_dir) sys.exit() if not os.path.exists(cfg.analysis_dir): os.makedirs(cfg.analysis_dir) logger.info('*** begin analysis "%s" ***', cfg.analysis_name) # configure logging to file for this particular analysis logfile = os.path.join(cfg.working_dir, cfg.analysis_name, 'fit-grains.log') fh = logging.FileHandler(logfile, mode='w') fh.setLevel(log_level) ff = logging.Formatter('%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S') fh.setFormatter(ff) logger.info("logging to %s", logfile) logger.addHandler(fh) if args.profile: import cProfile as profile, pstats, StringIO pr = profile.Profile() pr.enable() # process the data gid_list = None if args.grains is not None: gid_list = [int(i) for i in args.grains.split(',')] fit_grains( cfg, force=args.force, clean=args.clean, show_progress=not args.quiet, ids_to_refine=gid_list, ) if args.profile: pr.disable() s = StringIO.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats(50) logger.info('%s', s.getvalue()) # stop logging for this particular analysis fh.flush() fh.close() logger.removeHandler(fh) logger.info('*** end analysis "%s" ***', cfg.analysis_name) logger.info('=== end fit-grains ===') # stop logging to the console ch.flush() ch.close() logger.removeHandler(ch)
def execute(args, parser): import logging import os import sys import yaml from hexrd import config from hexrd.fitgrains import fit_grains # load the configuration settings cfgs = config.open(args.yml) # configure logging to the console: log_level = logging.DEBUG if args.debug else logging.INFO if args.quiet: log_level = logging.ERROR logger = logging.getLogger('hexrd') logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(logging.CRITICAL if args.quiet else log_level) cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S') ch.setFormatter(cf) logger.addHandler(ch) # ...make this an attribute in cfg? analysis_id = '%s_%s' %( cfgs[0].analysis_name.strip().replace(' ', '-'), cfgs[0].material.active.strip().replace(' ', '-'), ) # if find-orientations has not already been run, do so: quats_f = os.path.join( cfgs[0].working_dir, 'accepted_orientations_%s.dat' %analysis_id ) if not os.path.exists(quats_f): logger.info("Missing %s, running find-orientations", quats_f) logger.removeHandler(ch) from . import findorientations findorientations.execute(args, parser) logger.addHandler(ch) logger.info('=== begin fit-grains ===') clobber = args.force or args.clean for cfg in cfgs: # prepare the analysis directory if os.path.exists(cfg.analysis_dir) and not clobber: logger.error( 'Analysis "%s" at %s already exists.' ' Change yml file or specify "force"', cfg.analysis_name, cfg.analysis_dir ) sys.exit() if not os.path.exists(cfg.analysis_dir): os.makedirs(cfg.analysis_dir) logger.info('*** begin analysis "%s" ***', cfg.analysis_name) # configure logging to file for this particular analysis logfile = os.path.join( cfg.working_dir, cfg.analysis_name, 'fit-grains.log' ) fh = logging.FileHandler(logfile, mode='w') fh.setLevel(log_level) ff = logging.Formatter( '%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S' ) fh.setFormatter(ff) logger.info("logging to %s", logfile) logger.addHandler(fh) if args.profile: import cProfile as profile, pstats, StringIO pr = profile.Profile() pr.enable() # process the data if args.grains is not None: args.grains = [int(i) for i in args.grains.split(',')] fit_grains( cfg, force=args.force, clean=args.clean, show_progress=not args.quiet, ids_to_refine=args.grains, ) if args.profile: pr.disable() s = StringIO.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats(50) logger.info('%s', s.getvalue()) # stop logging for this particular analysis fh.flush() fh.close() logger.removeHandler(fh) logger.info('*** end analysis "%s" ***', cfg.analysis_name) logger.info('=== end fit-grains ===') # stop logging to the console ch.flush() ch.close() logger.removeHandler(ch)
def execute(args, parser): import logging import os import sys import yaml from hexrd import config from hexrd.fitgrains import fit_grains # load the configuration settings cfgs = config.open(args.yml) # if find-orientations has not already been run, do so: quats_f = os.path.join(cfgs[0].working_dir, 'accepted_orientations.dat') if not os.path.exists(quats_f): from . import findorientations findorientations.execute(args, parser) # configure logging to the console: log_level = logging.DEBUG if args.debug else logging.INFO if args.quiet: log_level = logging.ERROR logger = logging.getLogger('hexrd') logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(logging.CRITICAL if args.quiet else log_level) cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S') ch.setFormatter(cf) logger.addHandler(ch) logger.info('=== begin fit-grains ===') for cfg in config.open(args.yml): # prepare the analysis directory if os.path.exists(cfg.analysis_dir) and not args.force: logger.error( 'Analysis "%s" at %s already exists.' ' Change yml file or specify "force"', cfg.analysis_name, cfg.analysis_dir ) sys.exit() if not os.path.exists(cfg.analysis_dir): os.makedirs(cfg.analysis_dir) logger.info('*** begin analysis "%s" ***', cfg.analysis_name) # configure logging to file for this particular analysis logfile = os.path.join( cfg.working_dir, cfg.analysis_name, 'fit-grains.log' ) fh = logging.FileHandler(logfile, mode='w') fh.setLevel(log_level) ff = logging.Formatter( '%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S' ) fh.setFormatter(ff) logger.info("logging to %s", logfile) logger.addHandler(fh) # process the data fit_grains(cfg, force=args.force, show_progress=not args.quiet) # stop logging for this particular analysis fh.flush() fh.close() logger.removeHandler(fh) logger.info('*** end analysis "%s" ***', cfg.analysis_name) logger.info('=== end fit-grains ===') # stop logging to the console ch.flush() ch.close() logger.removeHandler(ch)
for o, val in opts: if o == "-p": prof_dict["profile"] = True elif o == "-n": prof_dict["use_nvtx"] = True elif o=="-c": try: max_grains = int(val) except ValueError: print "invalid grain count" usage() sys.exit(2) else: assert False, "unhandled option '{0}'".format(o) if len(args) < 1: usage() sys.exit(2) cfg_filename = args[0] with profiling(**prof_dict): start = time.time() print "Using cfg file '%s'" % (cfg_filename) config = config.open(cfg_filename)[0] config._cfg['multiproc'] = 1 # force sequential run target.fit_grains(config, force=True, grains=range(max_grains)) elapsed = time.time() - start print "\nTotal processing time %.2f seconds" % elapsed
def execute(args, parser): # load the configuration settings cfgs = config.open(args.yml) # configure logging to the console: log_level = logging.DEBUG if args.debug else logging.INFO if args.quiet: log_level = logging.ERROR logger = logging.getLogger('hexrd') logger.setLevel(log_level) ch = logging.StreamHandler() ch.setLevel(logging.CRITICAL if args.quiet else log_level) cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S') ch.setFormatter(cf) logger.addHandler(ch) # if find-orientations has not already been run, do so: quats_f = os.path.join( cfgs[0].working_dir, 'accepted_orientations_%s.dat' % cfgs[0].analysis_id) if os.path.exists(quats_f): try: qbar = np.loadtxt(quats_f).T except (IOError): raise (RuntimeError, "error loading indexing results '%s'" % quats_f) else: logger.info("Missing %s, running find-orientations", quats_f) logger.removeHandler(ch) from hexrd.findorientations import find_orientations results = find_orientations(cfgs[0]) qbar = results['qbar'] logger.addHandler(ch) logger.info('=== begin fit-grains ===') clobber = args.force or args.clean for cfg in cfgs: # prepare the analysis directory if os.path.exists(cfg.analysis_dir) and not clobber: logger.error( 'Analysis "%s" at %s already exists.' ' Change yml file or specify "force"', cfg.analysis_name, cfg.analysis_dir) sys.exit() # make output directories instr = cfg.instrument.hedm if not os.path.exists(cfg.analysis_dir): os.makedirs(cfg.analysis_dir) for det_key in instr.detectors: os.mkdir(os.path.join(cfg.analysis_dir, det_key)) else: # make sure panel dirs exist under analysis dir for det_key in instr.detectors: if not os.path.exists(os.path.join(cfg.analysis_dir, det_key)): os.mkdir(os.path.join(cfg.analysis_dir, det_key)) logger.info('*** begin analysis "%s" ***', cfg.analysis_name) # configure logging to file for this particular analysis logfile = os.path.join(cfg.working_dir, cfg.analysis_name, 'fit-grains.log') fh = logging.FileHandler(logfile, mode='w') fh.setLevel(log_level) ff = logging.Formatter('%(asctime)s - %(name)s - %(message)s', '%m-%d %H:%M:%S') fh.setFormatter(ff) logger.info("logging to %s", logfile) logger.addHandler(fh) if args.profile: import cProfile as profile import pstats from io import StringIO pr = profile.Profile() pr.enable() grains_filename = os.path.join(cfg.analysis_dir, 'grains.out') # some conditions for arg handling existing_analysis = os.path.exists(grains_filename) new_with_estimate = not existing_analysis \ and cfg.fit_grains.estimate is not None new_without_estimate = not existing_analysis \ and cfg.fit_grains.estimate is None force_with_estimate = args.force \ and cfg.fit_grains.estimate is not None force_without_estimate = args.force and cfg.fit_grains.estimate is None # handle args if args.clean or force_without_estimate or new_without_estimate: # need accepted orientations from indexing in this case if args.clean: logger.info( "'clean' specified; ignoring estimate and using default") elif force_without_estimate: logger.info( "'force' option specified, but no initial estimate; " + "using default") try: gw = instrument.GrainDataWriter(grains_filename) for i_g, q in enumerate(qbar.T): phi = 2 * np.arccos(q[0]) n = xfcapi.unitRowVector(q[1:]) grain_params = np.hstack( [phi * n, cnst.zeros_3, cnst.identity_6x1]) gw.dump_grain(int(i_g), 1., 0., grain_params) gw.close() except (IOError): raise (RuntimeError, "indexing results '%s' not found!" % 'accepted_orientations_' + cfg.analysis_id + '.dat') elif force_with_estimate or new_with_estimate: grains_filename = cfg.fit_grains.estimate elif existing_analysis and not (clean or force): raise (RuntimeError, "fit results '%s' exist, " % grains_filename + "but --clean or --force options not specified") grains_table = np.loadtxt(grains_filename, ndmin=2) # process the data gid_list = None if args.grains is not None: gid_list = [int(i) for i in args.grains.split(',')] pass cfg.fit_grains.qbar = qbar fit_results = fit_grains( cfg, grains_table, show_progress=not args.quiet, ids_to_refine=gid_list, ) if args.profile: pr.disable() s = StringIO.StringIO() ps = pstats.Stats(pr, stream=s).sort_stats('cumulative') ps.print_stats(50) logger.info('%s', s.getvalue()) # stop logging for this particular analysis fh.flush() fh.close() logger.removeHandler(fh) logger.info('*** end analysis "%s" ***', cfg.analysis_name) write_results(fit_results, cfg, grains_filename) logger.info('=== end fit-grains ===') # stop logging to the console ch.flush() ch.close() logger.removeHandler(ch)
help="stretch threshold", type=float, default=1.e-4) args = parser.parse_args() cfg_file = args.cfg_file gt_ref = args.gt_ref mtol = args.misorientation ctol = args.centroid vtol = args.stretch # load the config object cfg = config.open(cfg_file)[0] grains_table = np.loadtxt(gt_ref, ndmin=2) ref_grain_params = grains_table[:, 3:15] gresults = fit_grains(cfg, grains_table, show_progress=False, ids_to_refine=None, write_spots_files=False) cresult = compare_grain_fits(np.vstack([i[-1] for i in gresults]), ref_grain_params, mtol=mtol, ctol=ctol, vtol=vtol) if cresult: print("test passed") else: print("test failed")