def __init__(self, db, save_dir, *, vis=True, write_to_disk=True, polarization_factor=.99, image_data_key="pe1_image", mask_kwargs=None, fq_config=None, pdf_config=None, calibration_md_folder="../xpdConfig/", mask_setting="default", analysis_setting="full"): self.vis = vis self.write_to_disk = write_to_disk self.analysis_setting = analysis_setting self.mask_setting = mask_setting if mask_kwargs is None: mask_kwargs = {} _pdf_config = dict(dataformat="QA", qmaxinst=28, qmax=22) _fq_config = dict(dataformat="QA", qmaxinst=26, qmax=25) if pdf_config is None: pdf_config = _pdf_config.copy() else: pdf_config2 = _pdf_config.copy() pdf_config2.update(pdf_config) pdf_config = pdf_config2 if fq_config is None: fq_config = _fq_config.copy() else: fq_config2 = _fq_config.copy() fq_config2.update(fq_config) fq_config = fq_config2 self.image_data_key = image_data_key self.mask_kwargs = mask_kwargs self.calibration_md_folder = calibration_md_folder self.pdf_kwargs = pdf_config self.fq_kwargs = fq_config self.polarization_factor = polarization_factor self.db = db self.save_dir = save_dir self.light_template = os.path.join(self.save_dir, base_template) if self.vis: self.vis_callbacks = { "dark_sub_iq": LiveImage("img", window_title="Dark Subtracted Image", cmap="viridis"), "masked_img": LiveImage("overlay_mask", window_title="Dark/Background/" "Polarization Corrected " "Image with Mask", cmap="viridis", limit_func=lambda im: ( np.nanpercentile(im, 1), np.nanpercentile(im, 99), ) # norm=LogNorm() ), "iq": LiveWaterfall(), "itth": LiveWaterfall(), "fq": LiveWaterfall(), "pdf": LiveWaterfall(), "zscore": LiveImage("img", window_title="Z Score Image", cmap="viridis"), } self.start_doc = None self.descriptor_doc = None self.mask = None self.composition = None self.wavelength = None self.dark_img = None self.background_img = None self.is_calibration = None self.detector = None self.calibrant = None self.descs = None
from xpdan.pipelines.main import * # noqa: F403, F401 from bluesky.callbacks.broker import LiveImage from shed.translation import ToEventStream from xpdtools.pipelines.extra import z_score # Zscore z_score_plot = ToEventStream(z_score, ('z_score', )).starsink( LiveImage('z_score', cmap='viridis', window_title='z score', limit_func=lambda im: (-2, 2)), stream_name='z score vis')
poni1=.1024 * 2, poni2=.1024 * 2, rot1=0, rot2=0, rot3=0, ) raw_foreground_dark.emit(0.0) raw_background_dark.emit(0.0) raw_background.emit(0.0) ave_ff.map(lambda x: ((ff / x) - 1) * 100).map(np.nan_to_num).sink( LiveImage( "image", cmap="viridis", limit_func=lambda x: ( np.nanpercentile(x, .1), np.nanpercentile(x, 99.9), ), # norm=SymLogNorm(.1), window_title="percent off", ).update) (mean_array.map(np.nan_to_num).sink( LiveImage( "image", cmap="viridis", window_title="predicted flat field", limit_func=lambda x: ( np.nanpercentile(x, .1), np.nanpercentile(x, 99.9), ), ).update)) raw_foreground.map(np.nan_to_num).sink(
proj = tomopy.normalize(proj, flat, dark) rot_center = tomopy.find_center(proj, theta, init=290, ind=0, tol=0.5) backend = "thread" # backend = 'dask' # from dask.distributed import Client # c = Client() center = Stream(stream_name="center") proj_node = Stream(stream_name="projection") theta_node = Stream(stream_name="theta") install_qt_kicker() li = LiveImage("hi", cmap="viridis") li2 = LiveImage("hi", cmap="viridis") x = Stream() th = Stream() th_dim = Stream() x_dim = Stream() th_extents = Stream() x_extents = Stream() qoi = Stream() # ns = tomo_prep( # x.scatter(backend=backend), # th.scatter(backend=backend), # th_dim.scatter(backend=backend), # x_dim.scatter(backend=backend),
def conf_main_pipeline(db, save_dir, *, write_to_disk=False, vis=True, polarization_factor=.99, image_data_key='pe1_image', mask_setting='default', mask_kwargs=None, pdf_config=None, verbose=False, calibration_md_folder='../xpdConfig/'): """Total data processing pipeline for XPD Parameters ---------- db: databroker.broker.Broker instance The databroker holding the data, this must be specified as a `db=` in the function call (keyword only argument) save_dir: str The folder in which to save the data, this must be specified as a `save_dir=` in the function call (keyword only argument) write_to_disk: bool, optional If True write files to disk, defaults to False vis: bool, optional If True visualize the data. Defaults to False polarization_factor : float, optional polarization correction factor, ranged from -1(vertical) to +1 (horizontal). default is 0.99. set to None for no correction. mask_setting : str, optional If 'default' reuse mask created for first image, if 'auto' mask all images, if None use no mask. Defaults to 'default' mask_kwargs : dict, optional dictionary stores options for automasking functionality. default is defined by an_glbl.auto_mask_dict. Please refer to documentation for more details image_data_key: str, optional The key for the image data, defaults to `pe1_image` pdf_config: dict, optional Configuration for making PDFs, see pdfgetx3 docs. Defaults to ``dict(dataformat='QA', qmaxinst=28, qmax=22)`` verbose: bool, optional If True print many outcomes from the pipeline, for debuging use only, defaults to False calibration_md_folder: str Path to where the calibration is stored for xpdAcq Returns ------- source: Stream The source for the graph See also -------- xpdan.tools.mask_img """ _pdf_config = dict(dataformat='QA', qmaxinst=28, qmax=22) if pdf_config is None: pdf_config = _pdf_config.copy() else: pdf_config = _pdf_config.copy().update(**pdf_config) if mask_kwargs is None: mask_kwargs = {} print('start pipeline configuration') light_template = os.path.join(save_dir, base_template) raw_source = Stream(stream_name='Raw Data') # raw data source = es.fill_events(db, raw_source) # filled raw data if_not_dark_stream = es.filter(lambda x: not if_dark(x), source, input_info={0: ((), 0)}, document_name='start', stream_name='If not dark', full_event=True) if_not_dark_stream.sink(star(StartStopCallback())) eventify_raw_start = es.Eventify(if_not_dark_stream, stream_name='eventify raw start') h_timestamp_stream = es.map(_timestampstr, if_not_dark_stream, input_info={0: 'time'}, output_info=[('human_timestamp', { 'dtype': 'str' })], full_event=True, stream_name='human timestamp') # only the primary stream if_not_dark_stream_primary = es.filter(lambda x: x[0]['name'] == 'primary', if_not_dark_stream, document_name='descriptor', stream_name='Primary') dark_query = es.Query(db, if_not_dark_stream, query_function=query_dark, query_decider=temporal_prox, stream_name='Query for FG Dark') dark_query_results = es.QueryUnpacker(db, dark_query, stream_name='Unpack FG Dark') # Do the dark subtraction zlid = es.zip_latest(if_not_dark_stream_primary, dark_query_results, stream_name='Combine darks and lights') dark_sub_fg = es.map(sub, zlid, input_info={ 0: (image_data_key, 0), 1: (image_data_key, 1) }, output_info=[('img', { 'dtype': 'array', 'source': 'testing' })], md=dict(stream_name='Dark Subtracted Foreground', analysis_stage='dark_sub')) # BACKGROUND PROCESSING # Query for background bg_query_stream = es.Query(db, if_not_dark_stream, query_function=query_background, query_decider=temporal_prox, stream_name='Query for Background') # Decide if there is background data if_background_stream = es.filter(if_query_results, bg_query_stream, full_event=True, input_info={'n_hdrs': (('n_hdrs', ), 0)}, document_name='start', stream_name='If background') # if has background do background subtraction bg_stream = es.QueryUnpacker(db, if_background_stream, stream_name='Unpack background') bg_dark_stream = es.QueryUnpacker(db, es.Query( db, bg_stream, query_function=query_dark, query_decider=temporal_prox, stream_name='Query for BG Dark'), stream_name='Unpack background dark') # Perform dark subtraction on everything dark_sub_bg = es.map(sub, es.zip_latest(bg_stream, bg_dark_stream, stream_name='Combine bg and bg dark'), input_info={ 0: (image_data_key, 0), 1: (image_data_key, 1) }, output_info=[('img', { 'dtype': 'array', 'source': 'testing' })], stream_name='Dark Subtracted Background') # bundle the backgrounds into one stream bg_bundle = es.BundleSingleStream(dark_sub_bg, bg_query_stream, name='Background Bundle') # sum the backgrounds summed_bg = es.accumulate(dstar(add_img), bg_bundle, start=dstar(pull_array), state_key='img1', input_info={'img2': 'img'}, output_info=[('img', { 'dtype': 'array', 'source': 'testing' })]) count_bg = es.accumulate(event_count, bg_bundle, start=1, state_key='count', output_info=[('count', { 'dtype': 'int', 'source': 'testing' })]) ave_bg = es.map(truediv, es.zip(summed_bg, count_bg), input_info={ 0: ('img', 0), 1: ('count', 1) }, output_info=[('img', { 'dtype': 'array', 'source': 'testing' })], stream_name='Average Background') # combine the fg with the summed_bg fg_bg = es.zip_latest(dark_sub_fg, ave_bg, stream_name='Combine fg with bg') # subtract the background images fg_sub_bg = es.map(sub, fg_bg, input_info={ 0: ('img', 0), 1: ('img', 1) }, output_info=[('img', { 'dtype': 'array', 'source': 'testing' })], stream_name='Background Corrected Foreground') # else do nothing eventify_nhdrs = es.Eventify(bg_query_stream, 'n_hdrs', output_info=[('n_hdrs', {})]) zldb = es.zip_latest(dark_sub_fg, eventify_nhdrs) if_not_background_stream = es.filter( lambda x: not if_query_results(x), zldb, input_info={'x': (( 'data', 'n_hdrs', ), 1)}, stream_name='If not background', full_event=True) if_not_background_split_stream = es.split(if_not_background_stream, 2) # union of background and not background branch foreground_stream = fg_sub_bg.union( if_not_background_split_stream.split_streams[0]) foreground_stream.stream_name = 'Pull from either bgsub or not sub' # CALIBRATION PROCESSING # if calibration send to calibration runner zlfi = es.zip_latest(foreground_stream, es.zip(if_not_dark_stream, eventify_raw_start), clear_on_lossless_stop=True) if_calibration_stream = es.filter(if_calibration, zlfi, input_info={0: ((), 1)}, full_event=True, document_name='start', stream_name='If calibration') # detector and calibration are under 'detector' and 'dSpacing' calibration_stream = es.map(img_calibration, if_calibration_stream, input_info={ 'img': (('data', 'img'), 0), 'wavelength': (( 'data', 'bt_wavelength', ), 2), 'calibrant': (( 'data', 'dSpacing', ), 2), 'detector': (( 'data', 'detector', ), 2) }, output_info=[('calibration', { 'dtype': 'object', 'source': 'workflow', 'instance': 'pyFAI.calibration.' 'Calibration' }), ('geo', { 'dtype': 'object', 'source': 'workflow', 'instance': 'pyFAI.azimuthalIntegrator' '.AzimuthalIntegrator' })], stream_name='Run Calibration', md={'analysis_stage': 'calib'}, full_event=True) # write calibration info into xpdAcq sacred place es.map(_save_calib_param, es.zip(calibration_stream, h_timestamp_stream), calib_yml_fp=os.path.join(calibration_md_folder, 'xpdAcq_calib_info.yml'), input_info={ 'calib_c': (('data', 'calibration'), 0), 'timestr': (('data', 'human_timestamp'), 1) }, output_info=[('calib_config_dict', { 'dtype': 'dict' })]) # else get calibration from header if_not_calibration_stream = es.filter(if_not_calibration, if_not_dark_stream, input_info={0: ((), 0)}, document_name='start', full_event=True, stream_name='If not calibration') cal_md_stream = es.Eventify(if_not_calibration_stream, 'calibration_md', output_info=[('calibration_md', { 'dtype': 'dict', 'source': 'workflow' })], stream_name='Eventify Calibration') loaded_cal_stream = es.map(load_geo, cal_md_stream, input_info={'cal_params': 'calibration_md'}, output_info=[('geo', { 'dtype': 'object', 'source': 'workflow', 'instance': 'pyFAI.azimuthalIntegrator' '.AzimuthalIntegrator' })], stream_name='Load geometry') # union the calibration branches loaded_calibration_stream = loaded_cal_stream.union(calibration_stream) loaded_calibration_stream.stream_name = 'Pull from either md or ' \ 'run calibration' # send calibration and corrected images to main workflow # polarization correction # SPLIT INTO TWO NODES zlfl = es.zip_latest(foreground_stream, loaded_calibration_stream, stream_name='Combine FG and Calibration', clear_on_lossless_stop=True) p_corrected_stream = es.map(polarization_correction, zlfl, input_info={ 'img': ('img', 0), 'geo': ('geo', 1) }, output_info=[('img', { 'dtype': 'array', 'source': 'testing' })], polarization_factor=polarization_factor, stream_name='Polarization corrected img') # generate masks if mask_setting is None: zlfc = es.zip_latest(es.filter(lambda x: x == 1, p_corrected_stream, input_info={0: 'seq_num'}, full_event=True), loaded_calibration_stream, clear_on_lossless_stop=True) mask_stream = es.map(lambda x: np.ones(x.shape, dtype=bool), zlfc, input_info={'x': ('img', 0)}, output_info=[('mask', { 'dtype': 'array', 'source': 'testing' })], stream_name='dummy mask', md=dict(analysis_stage='mask')) else: if mask_setting == 'default': # note that this could become a much fancier filter # eg make a mask every 5th image zlfc = es.zip_latest(es.filter(lambda x: x == 1, p_corrected_stream, input_info={0: 'seq_num'}, full_event=True), loaded_calibration_stream, clear_on_lossless_stop=True) else: zlfc = es.zip_latest(p_corrected_stream, loaded_calibration_stream, clear_on_lossless_stop=True) zlfc_ds = es.zip_latest(zlfc, if_not_dark_stream, clear_on_lossless_stop=True) if_setup_stream = es.filter(lambda sn: sn == 'Setup', zlfc_ds, input_info={0: (('sample_name', ), 2)}, document_name='start', full_event=True, stream_name='Is Setup Mask') blank_mask_stream = es.map(lambda x: np.ones(x.shape, dtype=bool), if_setup_stream, input_info={'x': ('img', 0)}, output_info=[('mask', { 'dtype': 'array', 'source': 'testing' })], stream_name='dummy setup mask', md=dict(analysis_stage='mask')) if_not_setup_steam = es.filter( lambda doc: doc.get('sample_name') != 'Setup', zlfc_ds, input_info={0: ((), 2)}, document_name='start', full_event=True, stream_name='Is Not Setup Mask') not_setup_mask_stream = es.map(mask_img, if_not_setup_steam, input_info={ 'img': ('img', 0), 'geo': ('geo', 1) }, output_info=[('mask', { 'dtype': 'array', 'source': 'testing' })], **mask_kwargs, stream_name='Mask', md=dict(analysis_stage='mask')) mask_stream = not_setup_mask_stream.union(blank_mask_stream) mask_stream.stream_name = 'If Setup pull Dummy Mask, else Mask' # generate binner stream zlmc = es.zip_latest(mask_stream, loaded_calibration_stream, clear_on_lossless_stop=True) binner_stream = es.map(generate_binner, zlmc, input_info={ 'geo': ('geo', 1), 'mask': ('mask', 0) }, output_info=[('binner', { 'dtype': 'function', 'source': 'testing' })], img_shape=(2048, 2048), stream_name='Binners') zlpb = es.zip_latest(p_corrected_stream, binner_stream, clear_on_lossless_stop=True) iq_stream = es.map(integrate, zlpb, input_info={ 'img': ('img', 0), 'binner': ('binner', 1) }, output_info=[('q', { 'dtype': 'array', 'source': 'testing' }), ('iq', { 'dtype': 'array', 'source': 'testing' })], stream_name='I(Q)', md=dict(analysis_stage='iq_q')) iq_rs_zl = es.zip_latest(iq_stream, eventify_raw_start) # convert to tth tth_stream = es.map( lambda q, wavelength: np.rad2deg(q_to_twotheta(q, wavelength)), iq_rs_zl, input_info={ 'q': ('q', 0), 'wavelength': ('bt_wavelength', 1) }, output_info=[('tth', { 'dtype': 'array', 'units': 'degrees' })]) tth_iq_stream = es.map(lambda **x: (x['tth'], x['iq']), es.zip(tth_stream, iq_stream), input_info={ 'tth': ('tth', 0), 'iq': ('iq', 1) }, output_info=[('tth', { 'dtype': 'array', 'source': 'testing' }), ('iq', { 'dtype': 'array', 'source': 'testing' })], stream_name='Combine tth and iq', md=dict(analysis_stage='iq_tth')) fq_stream = es.map(fq_getter, iq_rs_zl, input_info={ 0: ('q', 0), 1: ('iq', 0), 'composition': ('composition_string', 1) }, output_info=[('q', { 'dtype': 'array' }), ('fq', { 'dtype': 'array' }), ('config', { 'dtype': 'dict' })], dataformat='QA', qmaxinst=28, qmax=22, md=dict(analysis_stage='fq')) pdf_stream = es.map(pdf_getter, iq_rs_zl, input_info={ 0: ('q', 0), 1: ('iq', 0), 'composition': ('composition_string', 1) }, output_info=[('r', { 'dtype': 'array' }), ('pdf', { 'dtype': 'array' }), ('config', { 'dtype': 'dict' })], **pdf_config, md=dict(analysis_stage='pdf')) if vis: foreground_stream.sink( star( LiveImage('img', window_title='Dark Subtracted Image', cmap='viridis'))) zlpm = es.zip_latest(p_corrected_stream, mask_stream, clear_on_lossless_stop=True) masked_img = es.map(overlay_mask, zlpm, input_info={ 'img': (('data', 'img'), 0), 'mask': (('data', 'mask'), 1) }, full_event=True, output_info=[('overlay_mask', { 'dtype': 'array' })]) masked_img.sink( star( LiveImage('overlay_mask', window_title='Dark/Background/' 'Polarization Corrected ' 'Image with Mask', cmap='viridis', limit_func=lambda im: (np.nanpercentile(im, 1), np.nanpercentile(im, 99)) # norm=LogNorm() ))) iq_stream.sink( star( LiveWaterfall('q', 'iq', units=('Q (A^-1)', 'Arb'), window_title='I(Q)'))) tth_iq_stream.sink( star( LiveWaterfall('tth', 'iq', units=('tth', 'Arb'), window_title='I(tth)'))) fq_stream.sink( star( LiveWaterfall('q', 'fq', units=('Q (A^-1)', 'F(Q)'), window_title='F(Q)'))) pdf_stream.sink( star( LiveWaterfall('r', 'pdf', units=('r (A)', 'G(r) A^-2'), window_title='G(r)'))) if write_to_disk: eventify_raw_descriptor = es.Eventify( if_not_dark_stream, stream_name='eventify raw descriptor', document='descriptor') exts = ['.tiff', '', '_Q.chi', '_tth.chi', '.gr', '.poni'] eventify_input_streams = [ dark_sub_fg, mask_stream, iq_stream, tth_iq_stream, pdf_stream, calibration_stream ] input_infos = [{ 'data': ('img', 1), 'file': ('filename', 0) }, { 'mask': ('mask', 1), 'filename': ('filename', 0) }, { 'tth': ('q', 1), 'intensity': ('iq', 1), 'output_name': ('filename', 0) }, { 'tth': ('tth', 1), 'intensity': ('iq', 1), 'output_name': ('filename', 0) }, { 'r': ('r', 1), 'pdf': ('pdf', 1), 'filename': ('filename', 0), 'config': ('config', 1) }, { 'calibration': ('calibration', 1), 'filename': ('filename', 0) }] saver_kwargs = [{}, {}, { 'q_or_2theta': 'Q', 'ext': '' }, { 'q_or_2theta': '2theta', 'ext': '' }, {}, {}] eventifies = [ es.Eventify(s, stream_name='eventify {}'.format(s.stream_name)) for s in eventify_input_streams ] mega_render = [ es.map( render_and_clean, es.zip_latest( es.zip( h_timestamp_stream, # human readable event timestamp if_not_dark_stream, # raw events, stream_name='mega_render zip'), eventify_raw_start, eventify_raw_descriptor, analysed_eventify), string=light_template, input_info={ 'human_timestamp': (('data', 'human_timestamp'), 0), 'raw_event': ((), 1), 'raw_start': (('data', ), 2), 'raw_descriptor': (('data', ), 3), 'analyzed_start': (('data', ), 4) }, ext=ext, full_event=True, output_info=[('filename', { 'dtype': 'str' })], stream_name='mega render ' '{}'.format(analysed_eventify.stream_name)) for ext, analysed_eventify in zip(exts, eventifies) ] streams_to_be_saved = [ dark_sub_fg, mask_stream, iq_stream, tth_iq_stream, pdf_stream, calibration_stream ] save_callables = [ tifffile.imsave, fit2d_save, save_output, save_output, pdf_saver, poni_saver ] md_render = es.map(render_and_clean, eventify_raw_start, string=light_template, input_info={ 'raw_start': (('data', ), 0), }, output_info=[('filename', { 'dtype': 'str' })], ext='.yml', full_event=True, stream_name='MD render') make_dirs = [ es.map(lambda x: os.makedirs(os.path.split(x)[0], exist_ok=True), cs, input_info={0: 'filename'}, output_info=[('filename', { 'dtype': 'str' })], stream_name='Make dirs {}'.format(cs.stream_name)) for cs in mega_render ] [ es.map(writer_templater, es.zip_latest(es.zip(s2, s1, stream_name='zip render and data', zip_type='truncate'), made_dir, stream_name='zl dirs and render and data'), input_info=ii, output_info=[('final_filename', { 'dtype': 'str' })], stream_name='Write {}'.format(s1.stream_name), **kwargs) for s1, s2, made_dir, ii, writer_templater, kwargs in zip( streams_to_be_saved, mega_render, make_dirs, # prevent run condition btwn dirs and files input_infos, save_callables, saver_kwargs) ] es.map(dump_yml, es.zip(eventify_raw_start, md_render), input_info={ 0: (('data', 'filename'), 1), 1: (('data', ), 0) }, full_event=True, stream_name='dump yaml') if verbose: # if_calibration_stream.sink(pprint) # dark_sub_fg.sink(pprint) # eventify_raw_start.sink(pprint) # raw_source.sink(pprint) # if_not_dark_stream.sink(pprint) # zlid.sink(pprint) # dark_sub_fg.sink(pprint) # bg_query_stream.sink(pprint) # if_not_calibration_stream.sink(pprint) # if_not_background_stream.sink(pprint) # if_background_stream.sink(pprint) # fg_sub_bg.sink(pprint) # if_not_background_split_stream.split_streams[0].sink(pprint) # cal_md_stream.sink(pprint) # loaded_calibration_stream.sink(pprint) # if_not_dark_stream.sink(pprint) # foreground_stream.sink(pprint) # zlfl.sink(pprint) # p_corrected_stream.sink(pprint) # zlmc.sink(pprint) # binner_stream.sink(pprint) # zlpb.sink(pprint) # iq_stream.sink(pprint) # pdf_stream.sink(pprint) # mask_stream.sink(pprint) if write_to_disk: md_render.sink(pprint) [ es.zip(cs, streams_to_be_s, zip_type='truncate', stream_name='zip_print').sink(star(PrinterCallback())) for cs, streams_to_be_s in zip(mega_render, streams_to_be_saved) ] print('Finish pipeline configuration') return raw_source
from bluesky.callbacks.broker import LiveImage from shed.translation import ToEventStream from xpdan.pipelines.main import * from xpdview.callbacks import LiveWaterfall # Visualization # background corrected img ToEventStream(bg_corrected_img, ('image', )).starsink( LiveImage('image', window_title='Background_corrected_img', cmap='viridis')) # polarization corrected img with mask overlayed ToEventStream( pol_corrected_img.combine_latest(mask).starmap(overlay_mask), ('image', )).starsink( LiveImage('image', window_title='final img', limit_func=lambda im: (np.nanpercentile(im, 2.5), np.nanpercentile(im, 97.5)), cmap='viridis')) # integrated intensities iq_em = (ToEventStream(mean.combine_latest(q, emit_on=0), ('iq', 'q')).starsink(LiveWaterfall( 'q', 'iq', units=('1/A', 'Intensity'), window_title='{} vs {}'.format('iq', 'q')), stream_name='{} {} vis'.format( 'q', 'iq')))
from bluesky.examples import * from bluesky.broker_examples import * from bluesky.callbacks.broker import LiveImage from bluesky.tests.utils import setup_test_run_engine from matplotlib import pyplot as plt from xray_vision.backend.mpl.cross_section_2d import CrossSection import numpy as np import filestore.api as fsapi import time as ttime from filestore.handlers import NpyHandler fsapi.register_handler('npy', NpyHandler) def stepscan(det, motor): for i in np.linspace(-5, 5, 75): yield Msg('open_run') yield Msg('create') yield Msg('set', motor, i) yield Msg('trigger', det) yield Msg('read', motor) yield Msg('read', det) yield Msg('save') yield Msg('close_run') ic = LiveImage('det_2d') table_callback = LiveTable(fields=[motor.name, det_2d.name]) RE = setup_test_run_engine() RE(stepscan(det_2d, motor), subs={'event': ic, 'all': table_callback}, beamline_id='c08i')
def __init__(self, db, save_dir, *, vis=True, write_to_disk=True, polarization_factor=.99, image_data_key='pe1_image', mask_kwargs=None, fq_config=None, pdf_config=None, calibration_md_folder='../xpdConfig/', mask_setting='default', analysis_setting='full'): self.vis = vis self.write_to_disk = write_to_disk self.analysis_setting = analysis_setting self.mask_setting = mask_setting if mask_kwargs is None: mask_kwargs = {} _pdf_config = dict(dataformat='QA', qmaxinst=28, qmax=22) _fq_config = dict(dataformat='QA', qmaxinst=26, qmax=25) if pdf_config is None: pdf_config = _pdf_config.copy() else: pdf_config2 = _pdf_config.copy() pdf_config2.update(pdf_config) pdf_config = pdf_config2 if fq_config is None: fq_config = _fq_config.copy() else: fq_config2 = _fq_config.copy() fq_config2.update(fq_config) fq_config = fq_config2 self.image_data_key = image_data_key self.mask_kwargs = mask_kwargs self.calibration_md_folder = calibration_md_folder self.pdf_kwargs = pdf_config self.fq_kwargs = fq_config self.polarization_factor = polarization_factor self.db = db self.save_dir = save_dir self.light_template = os.path.join(self.save_dir, base_template) if self.vis: self.vis_callbacks = { 'dark_sub_iq': LiveImage('img', window_title='Dark Subtracted Image', cmap='viridis'), 'masked_img': LiveImage('overlay_mask', window_title='Dark/Background/' 'Polarization Corrected ' 'Image with Mask', cmap='viridis', limit_func=lambda im: (np.nanpercentile(im, 1), np.nanpercentile(im, 99)) # norm=LogNorm() ), 'iq': LiveWaterfall('q', 'iq', units=('Q (A^-1)', 'Arb'), window_title='I(Q)'), 'itth': LiveWaterfall('tth', 'iq', units=('tth', 'Arb'), window_title='I(tth)'), 'fq': LiveWaterfall('q', 'fq', units=('Q (A^-1)', 'F(Q)'), window_title='F(Q)'), 'pdf': LiveWaterfall('r', 'pdf', units=('r (A)', 'G(r) A^-2'), window_title='G(r)'), 'zscore': LiveImage('img', window_title='Z Score Image', cmap='viridis') } self.start_doc = None self.descriptor_doc = None self.mask = None self.composition = None self.wavelength = None self.dark_img = None self.background_img = None self.is_calibration = None self.detector = None self.calibrant = None self.descs = None
def conf_save_tiff_pipeline(db, save_dir, *, write_to_disk=False, vis=True, image_data_key='pe1_image'): """Total data processing pipeline for XPD Parameters ---------- db: databroker.broker.Broker instance The databroker holding the data, this must be specified as a `db=` in the function call (keyword only argument) write_to_disk: bool, optional If True write files to disk, defaults to False save_dir: str The folder in which to save the data, this must be specified as a `save_dir=` in the function call (keyword only argument) vis: bool, optional If True visualize the data. Defaults to False image_data_key: str, optional The key for the image data, defaults to `pe1_image` Returns ------- source: Stream The source for the graph See also -------- xpdan.tools.mask_img """ print('start pipeline configuration') light_template = os.path.join(save_dir, base_template) raw_source = Stream(stream_name='Raw Data') # raw data source = es.fill_events(db, raw_source) # filled raw data # DARK PROCESSING # if not dark do dark subtraction if_not_dark_stream = es.filter(lambda x: not if_dark(x), source, input_info={0: ((), 0)}, document_name='start', stream_name='If not dark', full_event=True) eventify_raw_start = es.Eventify(if_not_dark_stream, stream_name='eventify raw start') h_timestamp_stream = es.map(_timestampstr, if_not_dark_stream, input_info={0: 'time'}, output_info=[('human_timestamp', { 'dtype': 'str' })], full_event=True, stream_name='human timestamp') # only the primary stream if_not_dark_stream_primary = es.filter(lambda x: x[0]['name'] == 'primary', if_not_dark_stream, document_name='descriptor', stream_name='Primary') dark_query = es.Query(db, if_not_dark_stream, query_function=query_dark, query_decider=temporal_prox, stream_name='Query for FG Dark') dark_query_results = es.QueryUnpacker(db, dark_query, stream_name='Unpack FG Dark') # Do the dark subtraction zlid = es.zip_latest(if_not_dark_stream_primary, dark_query_results, stream_name='Combine darks and lights') dark_sub_fg = es.map(sub, zlid, input_info={ 0: (image_data_key, 0), 1: (image_data_key, 1) }, output_info=[('img', { 'dtype': 'array', 'source': 'testing' })], md=dict(stream_name='Dark Subtracted Foreground', analysis_stage='dark_sub')) if vis: dark_sub_fg.sink(star(LiveImage('img'))) if write_to_disk: eventify_raw_descriptor = es.Eventify( if_not_dark_stream, stream_name='eventify raw descriptor', document='descriptor') exts = ['.tiff'] eventify_input_streams = [dark_sub_fg] input_infos = [ { 'data': ('img', 0), 'file': ('filename', 1) }, ] saver_kwargs = [{}] eventifies = [ es.Eventify(s, stream_name='eventify {}'.format(s.stream_name)) for s in eventify_input_streams ] mega_render = [ es.map( render_and_clean, es.zip_latest( es.zip( h_timestamp_stream, # human readable event timestamp if_not_dark_stream, # raw events, stream_name='mega_render zip'), eventify_raw_start, eventify_raw_descriptor, analysed_eventify), string=light_template, input_info={ 'human_timestamp': (('data', 'human_timestamp'), 0), 'raw_event': ((), 1), 'raw_start': (('data', ), 2), 'raw_descriptor': (('data', ), 3), 'analyzed_start': (('data', ), 4) }, ext=ext, full_event=True, output_info=[('filename', { 'dtype': 'str' })], stream_name='mega render ' '{}'.format(analysed_eventify.stream_name)) for ext, analysed_eventify in zip(exts, eventifies) ] streams_to_be_saved = [dark_sub_fg] save_callables = [tifffile.imsave] md_render = es.map(render_and_clean, eventify_raw_start, string=light_template, input_info={ 'raw_start': (('data', ), 0), }, output_info=[('filename', { 'dtype': 'str' })], ext='.yml', full_event=True, stream_name='MD render') make_dirs = [ es.map(lambda x: os.makedirs(os.path.split(x)[0], exist_ok=True), cs, input_info={0: 'filename'}, output_info=[('filename', { 'dtype': 'str' })], stream_name='Make dirs {}'.format(cs.stream_name)) for cs in mega_render ] _s.update([ es.map(writer_templater, es.zip_latest(es.zip(s1, s2, stream_name='zip render and data', zip_type='truncate'), made_dir, stream_name='zl dirs and render and data'), input_info=ii, output_info=[('final_filename', { 'dtype': 'str' })], stream_name='Write {}'.format(s1.stream_name), **kwargs) for s1, s2, made_dir, ii, writer_templater, kwargs in zip( streams_to_be_saved, mega_render, make_dirs, # prevent run condition btwn dirs and files input_infos, save_callables, saver_kwargs) ]) _s.add( es.map(dump_yml, es.zip(eventify_raw_start, md_render), input_info={ 0: (('data', 'filename'), 1), 1: (('data', ), 0) }, full_event=True, stream_name='dump yaml')) return raw_source