示例#1
0
def get_data(cfg, show_progress=False, force=False, clean=False):
    # TODO: this should be refactored somehow to avoid initialize_experiment
    # and avoid using the old reader. Also, the detector is not used here.
    pd, reader, detector = initialize_experiment(cfg)
    reader = get_frames(reader, cfg, show_progress, force, clean)

    instrument_cfg = get_instrument_parameters(cfg)
    detector_params = get_detector_parameters(instrument_cfg)
    saturation_level = get_saturation_level(instrument_cfg)
    distortion = get_distortion_correction(instrument_cfg)
    set_planedata_exclusions(cfg, detector, pd)
    pkwargs = {
        'distortion': distortion,
        'omega_start': cfg.image_series.omega.start,
        'omega_step': cfg.image_series.omega.step,
        'omega_stop': cfg.image_series.omega.stop,
        'eta_range': np.radians(cfg.find_orientations.eta.range),
        'omega_period': np.radians(cfg.find_orientations.omega.period),
        'tth_tol': cfg.fit_grains.tolerance.tth,
        'eta_tol': cfg.fit_grains.tolerance.eta,
        'omega_tol': cfg.fit_grains.tolerance.omega,
        'panel_buffer': cfg.fit_grains.panel_buffer,
        'nrows': instrument_cfg['detector']['pixels']['rows'],
        'ncols': instrument_cfg['detector']['pixels']['columns'],
        'pixel_pitch': instrument_cfg['detector']['pixels']['size'],
        'npdiv': cfg.fit_grains.npdiv,
        'threshold': cfg.fit_grains.threshold,
        'spots_stem': os.path.join(cfg.analysis_dir, 'spots_%05d.out'),
        'plane_data': pd,
        'detector_params': detector_params,
        'saturation_level': saturation_level
        }
    return reader, pkwargs
示例#2
0
def get_data(cfg, show_progress=False, force=False, clean=False):
    # TODO: this should be refactored somehow to avoid initialize_experiment
    # and avoid using the old reader. Also, the detector is not used here.
    pd, reader, detector = initialize_experiment(cfg)
    reader = get_frames(reader, cfg, show_progress, force, clean)

    instrument_cfg = get_instrument_parameters(cfg)
    detector_params = get_detector_parameters(instrument_cfg)
    saturation_level = get_saturation_level(instrument_cfg)
    distortion = get_distortion_correction(instrument_cfg)
    set_planedata_exclusions(cfg, detector, pd)
    pkwargs = {
        'distortion': distortion,
        'omega_start': cfg.image_series.omega.start,
        'omega_step': cfg.image_series.omega.step,
        'omega_stop': cfg.image_series.omega.stop,
        'eta_range': np.radians(cfg.find_orientations.eta.range),
        'omega_period': np.radians(cfg.find_orientations.omega.period),
        'tth_tol': cfg.fit_grains.tolerance.tth,
        'eta_tol': cfg.fit_grains.tolerance.eta,
        'omega_tol': cfg.fit_grains.tolerance.omega,
        'panel_buffer': cfg.fit_grains.panel_buffer,
        'nrows': instrument_cfg['detector']['pixels']['rows'],
        'ncols': instrument_cfg['detector']['pixels']['columns'],
        'pixel_pitch': instrument_cfg['detector']['pixels']['size'],
        'npdiv': cfg.fit_grains.npdiv,
        'threshold': cfg.fit_grains.threshold,
        'spots_stem': os.path.join(cfg.analysis_dir, 'spots_%05d.out'),
        'plane_data': pd,
        'detector_params': detector_params,
        'saturation_level': saturation_level
    }
    return reader, pkwargs
    def load_data(self):
        '''
            Read the config file and load appropriate GE2
            frames.
        '''
        cfg    = self.cfg
        logger = self.logger
        # process the data
        pd, reader, detector = initialize_experiment(cfg)
        n_frames = reader.getNFrames()
        logger.info("reading %d frames of data, storing values > %.1f", n_frames, cfg.fit_grains.threshold)
        frame_list = []
        for i in range(n_frames):
            frame = reader.read()
            frame_list.append(frame)

        frame_list = np.array(frame_list)
        frame_list[frame_list < cfg.fit_grains.threshold] = 0
        int_scale_factor = float(2**14)/float(np.amax(frame_list))
        frame_list = frame_list*int_scale_factor
        write_image('slice.png', frame_list[100, 100:400, 1350:1650], vmin=0)

        self.ge_data          = frame_list
        self.int_scale_factor = int_scale_factor

        return frame_list
示例#4
0
def find_orientations(cfg, hkls=None, profile=False):
    """Takes a config dict as input, generally a yml document"""

    # a goofy call, could be replaced with two more targeted calls
    pd, reader, detector = initialize_experiment(cfg)

    logger.info("beginning analysis '%s'", cfg.analysis_name)

    # load the eta_ome orientation maps
    eta_ome = load_eta_ome_maps(cfg, pd, reader, detector, hkls)

    try:
        # are we searching the full grid of orientation space?
        qgrid_f = cfg.find_orientations.use_quaternion_grid
        quats = np.loadtxt(qgrid_f).T
        logger.info("Using %s for full quaternion search", qgrid_f)
    except (IOError, ValueError):
        # or doing a seeded search?
        logger.info("Defaulting to seeded search")
        hkl_seeds = cfg.find_orientations.seed_search.hkl_seeds
        hklseedstr = ', '.join(
            [str(i) for i in eta_ome.planeData.hkls.T[hkl_seeds]])
        logger.info("Seeding search using hkls from %s: %s",
                    cfg.find_orientations.orientation_maps.file, hklseedstr)
        quats = generate_orientation_fibers(
            eta_ome, cfg.find_orientations.threshold,
            cfg.find_orientations.seed_search.hkl_seeds,
            cfg.find_orientations.seed_search.fiber_ndiv)
        np.savetxt(os.path.join(cfg.working_dir, 'trial_orientations.dat'),
                   quats.T,
                   fmt="%.18e",
                   delimiter="\t")

    # generate the completion maps
    logger.info("Running paintgrid on %d trial orientations", (quats.shape[1]))
    if profile:
        logger.info("Profiling mode active, forcing ncpus to 1")
        ncpus = 1
    else:
        ncpus = cfg.multiprocessing
        logger.info("%d of %d available processors requested", ncpus,
                    mp.cpu_count())
    compl = idx.paintGrid(
        quats,
        eta_ome,
        etaRange=np.radians(cfg.find_orientations.eta.range),
        omeTol=np.radians(cfg.find_orientations.omega.tolerance),
        etaTol=np.radians(cfg.find_orientations.eta.tolerance),
        omePeriod=np.radians(cfg.find_orientations.omega.period),
        threshold=cfg.find_orientations.threshold,
        doMultiProc=ncpus > 1,
        nCPUs=ncpus)
    np.savetxt(os.path.join(cfg.working_dir, 'completeness.dat'), compl)

    # cluster analysis to identify orientation blobs, the final output:
    qbar, cl = run_cluster(compl, quats, pd.getQSym(), cfg)
    np.savetxt(os.path.join(cfg.working_dir, 'accepted_orientations.dat'),
               qbar.T,
               fmt="%.18e",
               delimiter="\t")
    def load_data(self):
        '''
            Read the config file and load appropriate GE2
            frames.
        '''
        cfg = self.cfg
        logger = self.logger
        omega_start = self.omega_start

        # process the data
        pd, reader, detector = initialize_experiment(cfg)
        n_frames = reader.getNFrames()
        logger.info("Reading %d frames of data, storing values > %.1f",
                    n_frames,
                    cfg.get('pre_processing')['ge_reader_threshold'])
        # Loop over all frames and save them in a 3D array
        frame_list = []
        for i in range(n_frames):
            frame = reader.read()
            #omega = reader.getFrameOmega()
            frame_list.append(frame)
        # Turn the frame array into a Numpy array
        frame_list = np.array(frame_list)
        # Remove low intensity noise
        frame_list[
            frame_list < cfg.get('pre_processing')['ge_reader_threshold']] = 0
        # Scale the intensity to 16k
        int_scale_factor = float(2**14) / float(np.amax(frame_list))
        frame_list = frame_list * int_scale_factor
        if cfg.get('pre_processing')['print_diag_images']:
            # Flatten along omega and write the frame array to an image
            write_image('slice.png', np.amax(frame_list, axis=0), vmin=0)
        # Split the frame array into chunks for multiprocessing
        num_cores = cfg.multiprocessing
        num_cores = np.round(
            np.min([num_cores,
                    np.round(np.shape(frame_list)[0] / 60.)]))

        frame_list_split = np.array_split(frame_list, num_cores, axis=0)
        ge_data_ang_red = ()
        omega_start.append(0)
        for array_piece in frame_list_split:
            ge_data_ang_red = ge_data_ang_red + (array_piece, )
            omega_start.append(np.shape(array_piece)[0])

        omega_start.pop()
        omega_start = np.cumsum(omega_start)
        logger.info("Finished reading frames")

        logger.info("Split data for parallel processing at omega = %s",
                    omega_start.tostring())

        self.ge_data = frame_list
        self.int_scale_factor = int_scale_factor
        self.ge_data_ang_red = ge_data_ang_red
        self.omega_start = omega_start
        self.input_data_shape = np.shape(frame_list)

        return frame_list
    def load_data(self):
        '''
            Read the config file and load appropriate GE2
            frames.
        '''
        cfg          = self.cfg
        logger       = self.logger
        omega_start  = self.omega_start

        # process the data
        pd, reader, detector = initialize_experiment(cfg)
        n_frames = reader.getNFrames()
        logger.info("Reading %d frames of data, storing values > %.1f",
                    n_frames, cfg.get('pre_processing')['ge_reader_threshold'])
        # Loop over all frames and save them in a 3D array
        frame_list = []
        for i in range(n_frames):
            frame = reader.read()
            #omega = reader.getFrameOmega()
            frame_list.append(frame)
        # Turn the frame array into a Numpy array
        frame_list = np.array(frame_list)
        # Remove low intensity noise
        frame_list[frame_list < cfg.get('pre_processing')['ge_reader_threshold']] = 0
        # Scale the intensity to 16k
        int_scale_factor = float(2**14)/float(np.amax(frame_list))
        frame_list = frame_list*int_scale_factor
        if cfg.get('pre_processing')['print_diag_images']:
        	# Flatten along omega and write the frame array to an image
        	write_image('slice.png', np.amax(frame_list, axis=0), vmin=0)
        # Split the frame array into chunks for multiprocessing
        num_cores = cfg.multiprocessing
        num_cores = np.round(np.min([num_cores, np.round(np.shape(frame_list)[0]/60.)]))

        frame_list_split = np.array_split(frame_list, num_cores, axis=0)
        ge_data_ang_red = ()
        omega_start.append(0)
        for array_piece in frame_list_split:
           ge_data_ang_red = ge_data_ang_red + (array_piece,)
           omega_start.append(np.shape(array_piece)[0])

        omega_start.pop()
        omega_start = np.cumsum(omega_start)
        logger.info("Finished reading frames")

        logger.info("Split data for parallel processing at omega = %s", omega_start.tostring())

        self.ge_data          = frame_list
        self.int_scale_factor = int_scale_factor
	self.ge_data_ang_red  = ge_data_ang_red
        self.omega_start      = omega_start
        self.input_data_shape = np.shape(frame_list)

        return frame_list
示例#7
0
def get_data(cfg, show_progress=False, force=False, clean=False):
    # TODO: this should be refactored somehow to avoid initialize_experiment
    # and avoid using the old reader. Also, the detector is not used here.
    pd, reader, detector = initialize_experiment(cfg)
    if cfg.fit_grains.fit_only:
        reader = None
    else:
        reader = get_frames(reader, cfg, show_progress, force, clean)

    instrument_cfg = get_instrument_parameters(cfg)
    detector_params = get_detector_parameters(instrument_cfg)
    saturation_level = get_saturation_level(instrument_cfg)
    distortion = get_distortion_correction(instrument_cfg)
    set_planedata_exclusions(cfg, detector, pd)
    # HANDLE OMEGA STOP
    if cfg.image_series.omega.stop is None:
        assert cfg.image_series.images.stop is not None, \
            "Must specify stop point, either in omega or image"
        omega_stop = cfg.image_series.omega.start + \
            cfg.image_series.omega.step*cfg.image_series.images.stop
    else:
        omega_stop = cfg.image_series.omega.stop
    pkwargs = {
        'detector_params': detector_params,
        'distortion': distortion,
        'eta_range': np.radians(cfg.find_orientations.eta.range),
        'eta_tol': cfg.fit_grains.tolerance.eta,
        'fit_only': cfg.fit_grains.fit_only,
        'ncols': instrument_cfg['detector']['pixels']['columns'],
        'npdiv': cfg.fit_grains.npdiv,
        'nrows': instrument_cfg['detector']['pixels']['rows'],
        'omega_period': np.radians(cfg.find_orientations.omega.period),
        'omega_start': cfg.image_series.omega.start,
        'omega_step': cfg.image_series.omega.step,
        'omega_stop': omega_stop,
        'omega_tol': cfg.fit_grains.tolerance.omega,
        'overlap_table': os.path.join(cfg.analysis_dir, 'overlap_table.npz'),
        'output_hdf5': cfg.fit_grains.output_hdf5,
        'panel_buffer': cfg.fit_grains.panel_buffer,
        'pixel_pitch': instrument_cfg['detector']['pixels']['size'],
        'plane_data': pd,
        'refit_tol': cfg.fit_grains.refit,
        'saturation_level': saturation_level,
        'spots_stem': os.path.join(cfg.analysis_dir, 'spots_%05d.out'),
        'threshold': cfg.fit_grains.threshold,
        'tth_tol': cfg.fit_grains.tolerance.tth,
    }
    return reader, pkwargs
示例#8
0
文件: fitgrains.py 项目: praxes/hexrd
def get_data(cfg, show_progress=False, force=False, clean=False):
    # TODO: this should be refactored somehow to avoid initialize_experiment
    # and avoid using the old reader. Also, the detector is not used here.
    pd, reader, detector = initialize_experiment(cfg)
    if cfg.fit_grains.fit_only:
        reader = None
    else:
        reader = get_frames(reader, cfg, show_progress, force, clean)

    instrument_cfg = get_instrument_parameters(cfg)
    detector_params = get_detector_parameters(instrument_cfg)
    saturation_level = get_saturation_level(instrument_cfg)
    distortion = get_distortion_correction(instrument_cfg)
    set_planedata_exclusions(cfg, detector, pd)
    # HANDLE OMEGA STOP
    if cfg.image_series.omega.stop is None:
        assert cfg.image_series.images.stop is not None, \
            "Must specify stop point, either in omega or image"
        omega_stop = cfg.image_series.omega.start + \
            cfg.image_series.omega.step*cfg.image_series.images.stop
    else:
        omega_stop =  cfg.image_series.omega.stop
    pkwargs = {
        'detector_params': detector_params,
        'distortion': distortion,
        'eta_range': np.radians(cfg.find_orientations.eta.range),
        'eta_tol': cfg.fit_grains.tolerance.eta,
        'fit_only': cfg.fit_grains.fit_only,
        'ncols': instrument_cfg['detector']['pixels']['columns'],
        'npdiv': cfg.fit_grains.npdiv,
        'nrows': instrument_cfg['detector']['pixels']['rows'],
        'omega_period': np.radians(cfg.find_orientations.omega.period),
        'omega_start': cfg.image_series.omega.start,
        'omega_step': cfg.image_series.omega.step,
        'omega_stop': omega_stop,
        'omega_tol': cfg.fit_grains.tolerance.omega,
        'overlap_table': os.path.join(cfg.analysis_dir, 'overlap_table.npz'),
        'output_hdf5': cfg.fit_grains.output_hdf5,
        'panel_buffer': cfg.fit_grains.panel_buffer,
        'pixel_pitch': instrument_cfg['detector']['pixels']['size'],
        'plane_data': pd,
        'refit_tol': cfg.fit_grains.refit,
        'saturation_level': saturation_level,
        'spots_stem': os.path.join(cfg.analysis_dir, 'spots_%05d.out'),
        'threshold': cfg.fit_grains.threshold,
        'tth_tol': cfg.fit_grains.tolerance.tth,
        }
    return reader, pkwargs
    def __init__(self, config, logger, datafile):
        self.cfg = config  # heXRD config object
        self.logger = logger  # logger
        self.ms_datafile = datafile  # microstructural data file
        self.ms_grid = []  # (N, 3) array of X, Y, Z positions in microns
        self.ms_material_ids = []  # (N) Array of material present at X, Y, Z
        self.ms_quaternions = [
        ]  # (N, 4) Orientation at X, Y, Z in quaternions
        self.ms_lat_strains = []  # (N, 6) Lattice strain a X, Y, Z
        self.synth_angles = [
        ]  # Two-theta, eta, omega from virtual diffraction
        self.calc_xyo = []  # X, Y, projected on the detector and omega

        # Initialize detector and reader from the experiment. Really only detector is needed.
        pd, reader, detector = initialize_experiment(config)
        self.detector = detector
        self.reader = reader
示例#10
0
def execute(args, parser):
    import logging
    import os
    import sys

    import yaml

    from hexrd import config


    # load the configuration settings
    cfgs = config.open(args.yml)

    # configure logging to the console:
    log_level = logging.DEBUG if args.debug else logging.INFO
    logger = logging.getLogger('hexrd')
    logger.setLevel(log_level)
    ch = logging.StreamHandler()
    ch.setLevel(log_level)
    cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S')
    ch.setFormatter(cf)
    logger.addHandler(ch)

    logger.info('=== begin cake-data ===')

    for cfg in cfgs:
        logger.info('*** begin caking for analysis "%s" ***', cfg.analysis_name)

        # configure logging to file for this particular analysis
        logfile = os.path.join(
            cfg.working_dir,
            'cake-data.log'
            )
        fh = logging.FileHandler(logfile, mode='w')
        fh.setLevel(log_level)
        ff = logging.Formatter(
                '%(asctime)s - %(name)s - %(message)s',
                '%m-%d %H:%M:%S'
                )
        fh.setFormatter(ff)
        logger.info("logging to %s", logfile)
        logger.addHandler(fh)

        # process the data
        pd, reader, detector = initialize_experiment(cfg)
        # Load frames, generate "max over all" frame and bin the data
        show_progress = True
        reader = process_cake(reader, detector, cfg, show_progress)

        # stop logging for this particular analysis
        fh.flush()
        fh.close()
        logger.removeHandler(fh)

        logger.info('*** end caking for analysis "%s" ***', cfg.analysis_name)

    logger.info('=== end cake-data ===')
    # stop logging to the console
    ch.flush()
    ch.close()
    logger.removeHandler(ch)
示例#11
0
def find_orientations(cfg, hkls=None, clean=False, profile=False):
    """
    Takes a config dict as input, generally a yml document

    NOTE: single cfg instance, not iterator!
    """

    # ...make this an attribute in cfg?
    analysis_id = '%s_%s' % (
        cfg.analysis_name.strip().replace(' ', '-'),
        cfg.material.active.strip().replace(' ', '-')
    )

    # a goofy call, could be replaced with two more targeted calls
    pd, reader, detector = initialize_experiment(cfg)

    # need instrument cfg later on down...
    instr_cfg = get_instrument_parameters(cfg)
    detector_params = np.hstack([
        instr_cfg['detector']['transform']['tilt_angles'],
        instr_cfg['detector']['transform']['t_vec_d'],
        instr_cfg['oscillation_stage']['chi'],
        instr_cfg['oscillation_stage']['t_vec_s'],
        ])
    rdim = cfg.instrument.detector.pixels.size[0]*cfg.instrument.detector.pixels.rows
    cdim = cfg.instrument.detector.pixels.size[1]*cfg.instrument.detector.pixels.columns
    panel_dims = ((-0.5*cdim, -0.5*rdim),
                  ( 0.5*cdim,  0.5*rdim),
                  )
    # UGH! hard-coded distortion...
    if instr_cfg['detector']['distortion']['function_name'] == 'GE_41RT':
        distortion = (dFuncs.GE_41RT,
                      instr_cfg['detector']['distortion']['parameters'],
                      )
    else:
        distortion = None

    # start logger
    logger.info("beginning analysis '%s'", cfg.analysis_name)

    # load the eta_ome orientation maps
    eta_ome = load_eta_ome_maps(cfg, pd, reader, detector, hkls=hkls, clean=clean)

    ome_range = (np.min(eta_ome.omeEdges),
                 np.max(eta_ome.omeEdges)
                 )
    try:
        # are we searching the full grid of orientation space?
        qgrid_f = cfg.find_orientations.use_quaternion_grid
        quats = np.load(qgrid_f).T
        logger.info("Using %s for full quaternion search", qgrid_f)
        hkl_ids = None
    except (IOError, ValueError, AttributeError):
        # or doing a seeded search?
        logger.info("Defaulting to seeded search")
        hkl_seeds = cfg.find_orientations.seed_search.hkl_seeds
        hkl_ids = [eta_ome.planeData.hklDataList[i]['hklID'] for i in hkl_seeds]
        hklseedstr = ', '.join(
            [str(i) for i in eta_ome.planeData.hkls.T[hkl_seeds]]
            )
        logger.info(
            "Seeding search using hkls from %s: %s",
            cfg.find_orientations.orientation_maps.file,
            hklseedstr
            )
        quats = generate_orientation_fibers(
            eta_ome,
            detector_params[6],
            cfg.find_orientations.threshold,
            cfg.find_orientations.seed_search.hkl_seeds,
            cfg.find_orientations.seed_search.fiber_ndiv,
            ncpus=cfg.multiprocessing
            )
        if save_as_ascii:
            np.savetxt(
                os.path.join(cfg.working_dir, 'trial_orientations.dat'),
                quats.T,
                fmt="%.18e",
                delimiter="\t"
                )
            pass
        pass # close conditional on grid search

    # generate the completion maps
    logger.info("Running paintgrid on %d trial orientations", quats.shape[1])
    if profile:
        logger.info("Profiling mode active, forcing ncpus to 1")
        ncpus = 1
    else:
        ncpus = cfg.multiprocessing
        logger.info(
            "%d of %d available processors requested", ncpus, mp.cpu_count()
            )
    compl = idx.paintGrid(
        quats,
        eta_ome,
        etaRange=np.radians(cfg.find_orientations.eta.range),
        omeTol=np.radians(cfg.find_orientations.omega.tolerance),
        etaTol=np.radians(cfg.find_orientations.eta.tolerance),
        omePeriod=np.radians(cfg.find_orientations.omega.period),
        threshold=cfg.find_orientations.threshold,
        doMultiProc=ncpus > 1,
        nCPUs=ncpus
        )

    if save_as_ascii:
        np.savetxt(os.path.join(cfg.working_dir, 'completeness.dat'), compl)
    else:
        np.save(
            os.path.join(cfg.working_dir,
                         'scored_orientations_%s.npy' % analysis_id),
            np.vstack([quats, compl])
        )

    ##########################################################
    ##   Simulate N random grains to get neighborhood size  ##
    ##########################################################
    if hkl_ids is not None:
        ngrains = 100
        rand_q = mutil.unitVector(np.random.randn(4, ngrains))
        rand_e = np.tile(2.*np.arccos(rand_q[0, :]), (3, 1)) \
          * mutil.unitVector(rand_q[1:, :])
        refl_per_grain = np.zeros(ngrains)
        num_seed_refls = np.zeros(ngrains)
        for i in range(ngrains):
            grain_params = np.hstack([rand_e[:, i],
                                      xf.zeroVec.flatten(),
                                      xf.vInv_ref.flatten()
                                      ])

            eta_range = np.radians(cfg.find_orientations.eta.range)
            pixel_pitch = cfg.instrument.detector.pixels.size
            sim_results = xrdutil.simulateGVecs(
                pd,
                detector_params,
                grain_params,
                ome_range=(ome_range,),
                ome_period=(ome_range[0], ome_range[0]+2*np.pi),
                eta_range=eta_range,
                panel_dims=panel_dims,
                pixel_pitch=pixel_pitch,
                distortion=distortion,
            )
            refl_per_grain[i] = len(sim_results[0])
            num_seed_refls[i] = np.sum([sum(sim_results[0] == hkl_id) for hkl_id in hkl_ids])
            pass

        cfg_completeness = cfg.find_orientations.clustering.completeness
        min_samples = max(np.floor(cfg_completeness*np.average(num_seed_refls)), 2)
        mean_rpg = int(np.round(np.average(refl_per_grain)))
    else:
        min_samples = 1
        mean_rpg = 1

    logger.info("mean number of reflections per grain is %d", mean_rpg)
    logger.info("neighborhood size estimate is %d points", min_samples)

    # cluster analysis to identify orientation blobs, the final output:
    qbar, cl = run_cluster(compl, quats, pd.getQSym(), cfg, min_samples=min_samples)

    np.savetxt(
        os.path.join(cfg.working_dir, 'accepted_orientations_%s.dat' % analysis_id),
        qbar.T,
        fmt="%.18e",
        delimiter="\t"
        )
    return
示例#12
0
def find_orientations(cfg, hkls=None):
    """Takes a config dict as input, generally a yml document"""

    # a goofy call, could be replaced with two more targeted calls
    pd, reader, detector = initialize_experiment(cfg)

    logger.info("beginning analysis '%s'", cfg.analysis_name)

    # load the eta_ome orientation maps
    eta_ome = load_eta_ome_maps(cfg, pd, reader, detector, hkls)

    try:
        # are we searching the full grid of orientation space?
        qgrid_f = cfg.find_orientations.use_quaternion_grid
        quats = np.loadtxt(qgrid_f)
        logger.info("Using %s for full quaternian search", qgrid_f)
    except (IOError, ValueError):
        # or doing a seeded search?
        logger.info("Defaulting to seeded search")
        hkl_seeds = cfg.find_orientations.seed_search.hkl_seeds
        hklseedstr = ', '.join(
            [str(i) for i in eta_ome.planeData.hkls.T[hkl_seeds]]
            )
        logger.info(
            "Seeding search using hkls from %s: %s",
            cfg.find_orientations.orientation_maps.file,
            hklseedstr
            )
        quats = generate_orientation_fibers(
            eta_ome,
            cfg.find_orientations.threshold,
            cfg.find_orientations.seed_search.hkl_seeds,
            cfg.find_orientations.seed_search.fiber_ndiv
            )
        np.savetxt(
            os.path.join(cfg.working_dir, 'trial_orientations.dat'),
            quats.T,
            fmt="%.18e",
            delimiter="\t"
            )

    # generate the completion maps
    logger.info("Running paintgrid on %d trial orientations", (quats.shape[1]))
    ncpus = cfg.multiprocessing
    compl = idx.paintGrid(
        quats,
        eta_ome,
        etaRange=np.radians(cfg.find_orientations.eta.range),
        omeTol=np.radians(cfg.find_orientations.omega.tolerance),
        etaTol=np.radians(cfg.find_orientations.eta.tolerance),
        omePeriod=np.radians(cfg.find_orientations.omega.period),
        threshold=cfg.find_orientations.threshold,
        doMultiProc=ncpus > 1,
        nCPUs=ncpus
        )
    np.savetxt(os.path.join(cfg.working_dir, 'completeness.dat'), compl)

    # cluster analysis to identify orientation blobs, the final output:
    qbar, cl = run_cluster(compl, quats, pd.getQSym(), cfg)
    np.savetxt(
        os.path.join(cfg.working_dir, 'accepted_orientations.dat'),
        qbar.T,
        fmt="%.18e",
        delimiter="\t"
        )

    # do the peak extraction now?
    if cfg.find_orientations.extract_measured_g_vectors:
        raise ImplementationError('TODO: implement extract gvecs')
示例#13
0
def execute(args, parser):
    import logging
    import os
    import sys

    import yaml

    from hexrd import config
    from hexrd.cacheframes import cache_frames


    # load the configuration settings
    cfgs = config.open(args.yml)

    # configure logging to the console:
    log_level = logging.DEBUG if args.debug else logging.INFO
    if args.quiet:
        log_level = logging.ERROR
    logger = logging.getLogger('hexrd')
    logger.setLevel(log_level)
    ch = logging.StreamHandler()
    ch.setLevel(logging.CRITICAL if args.quiet else log_level)
    cf = logging.Formatter('%(asctime)s - %(message)s', '%y-%m-%d %H:%M:%S')
    ch.setFormatter(cf)
    logger.addHandler(ch)

    logger.info('=== begin cache-frames ===')

    for cfg in cfgs:
        logger.info('*** begin caching for analysis "%s" ***', cfg.analysis_name)

        # configure logging to file for this particular analysis
        logfile = os.path.join(
            cfg.working_dir,
            cfg.analysis_name,
            'cache-frames.log'
            )
        fh = logging.FileHandler(logfile, mode='w')
        fh.setLevel(log_level)
        ff = logging.Formatter(
                '%(asctime)s - %(name)s - %(message)s',
                '%m-%d %H:%M:%S'
                )
        fh.setFormatter(ff)
        logger.info("logging to %s", logfile)
        logger.addHandler(fh)

        # process the data
        pd, reader, detector = initialize_experiment(cfg)
        cache_frames(reader, cfg, show_progress=not args.quiet)

        # stop logging for this particular analysis
        fh.flush()
        fh.close()
        logger.removeHandler(fh)

        logger.info('*** end caching for analysis "%s" ***', cfg.analysis_name)

    logger.info('=== end cache-frames ===')
    # stop logging to the console
    ch.flush()
    ch.close()
    logger.removeHandler(ch)
示例#14
0
gScl  = np.array([1., 1., 1.,
                  1., 1., 1.,
                  1., 1., 1., 0.01, 0.01, 0.01])

"""
####### INPUT GOES HERE
"""
# def pull_spots_block(cfg_filename, blockID, pd, reader, detector):
if __name__ == "__main__":
    cfg_filename = sys.argv[1]
    blockID      = int(sys.argv[2])
    gp_fileroot  = sys.argv[3]

    print "Using cfg file '%s'" % (cfg_filename)

    pd, reader, detector = coreutil.initialize_experiment(cfg_filename)

    parser = SafeConfigParser()
    parser.read(cfg_filename)

    # output for eta-ome maps as pickles
    working_dir   = parser.get('base', 'working_dir')
    analysis_name = parser.get('base', 'analysis_name')

    restrict_eta = parser.getfloat('paint_grid', 'restrict_eta')
    omepd_str    = parser.get('paint_grid', 'ome_period')
    ome_period   = tuple(d2r*np.array(omepd_str.split(','), dtype=float))

    threshold      = parser.getfloat('pull_spots', 'threshold')
    det_origin_str = parser.get('pull_spots', 'det_origin')
    det_origin     = np.array(det_origin_str.split(','), dtype=float)