def enscansw(seconds, enstart, enstop, steps, samplename='enscan', sampleid=''): # TODO: do it more generally # yield from bps.mv(sw_det.setexp, seconds) yield from bps.mv(sw_det.waxs.cam.acquire_time, seconds) yield from bps.mv(sw_det.saxs.cam.acquire_time, seconds) md = RE.md md['sample'] = samplename md['sampleid'] = sampleid first_scan_id = None dt = datetime.now() formatted_date = dt.strftime('%Y-%m-%d') for i, pos in enumerate(np.linspace(enstart, enstop, steps)): yield from bps.mv(en, pos) uid = (yield from bp.count([sw_det], md=md)) hdr = db[uid] quick_view(hdr) if i == 0: first_scan_id = hdr.start['scan_id'] dt = datetime.fromtimestamp(hdr.start['time']) formatted_date = dt.strftime('%Y-%m-%d') tiff_series.export(hdr.documents(fill=True), file_prefix=('{start[institution]}/' '{start[user]}/' '{start[project]}/' f'{formatted_date}/' f'{first_scan_id}-' '-{start[scan_id]}-' '-{start[sample]}-' f'{pos:.2f}eV-'), directory='Z:/images/users/') csv.export(hdr.documents(stream_name='baseline'), file_prefix=('{institution}/' '{user}/' '{project}/' f'{formatted_date}/' f'{first_scan_id}-' '{scan_id}-' '{sample}-' f'{pos:.2f}eV-'), directory='Z:/images/users/') csv.export( hdr.documents(stream_name='Izero Mesh Drain Current_monitor'), file_prefix=('{institution}/' '{user}/' '{project}/' f'{formatted_date}/' f'{first_scan_id}-' '{scan_id}-' '{sample}-' f'{pos:.2f}eV-'), directory='Z:/images/users/')
def snapsw(seconds, samplename='', sampleid='', num_images=1, dark=0): # TODO: do it more generally # yield from bps.mv(sw_det.setexp, seconds) yield from bps.mv(sw_det.waxs.cam.acquire_time, seconds) yield from bps.mv(sw_det.saxs.cam.acquire_time, seconds) yield from bps.mv(sw_det.waxs.cam.shutter_close_delay, 200) yield from bps.mv(sw_det.saxs.cam.shutter_close_delay, 200) yield from bps.mv(sw_det.waxs.cam.shutter_open_delay, 200) yield from bps.mv(sw_det.saxs.cam.shutter_open_delay, 200) if (dark): yield from bps.mv(sw_det.saxs.cam.shutter_mode, 0) if samplename is "": samplename = "dark" else: yield from bps.mv(sw_det.saxs.cam.shutter_mode, 2) if samplename is "": samplename = "snap" md = RE.md md['sample'] = samplename md['sampleid'] = sampleid md['exptime'] = seconds uid = (yield from bp.count([sw_det], num=num_images, md=md)) hdr = db[uid] quick_view(hdr) dt = datetime.fromtimestamp(hdr.start['time']) formatted_date = dt.strftime('%Y-%m-%d') energy = hdr.table(stream_name='baseline')['Beamline Energy_energy'][1] tiff_series.export(hdr.documents(fill=True), file_prefix=('{start[institution]}/' '{start[user]}/' '{start[project]}/' f'{formatted_date}/' '{start[scan_id]}-' '{start[sample]}-' f'{energy:.2f}eV-'), directory='Z:/images/users/') csv.export(hdr.documents(stream_name='baseline'), file_prefix=('{institution}/' '{user}/' '{project}/' f'{formatted_date}/' '{scan_id}-' '{sample}-' f'{energy:.2f}eV-'), directory='Z:/images/users/') csv.export(hdr.documents(stream_name='Izero Mesh Drain Current_monitor'), file_prefix=('{institution}/' '{user}/' '{project}/' f'{formatted_date}/' '{scan_id}-' '{sample}-' f'{energy:.2f}eV-'), directory='Z:/images/users/')
def test_export(tmp_path, example_data): ''' runs a test using the `example_data` pytest.fixture. Runs a test using the `suitcase.utils.tests.conftest` fixture `example_data`. ..note:: Due to the `example_data` `pytest.fixture` this will run multiple tests each with a range of detectors and a range of event_types. see `suitcase.utils.tests.conftest` for more info ''' collector = example_data(ignore=[]) expected_dict = create_expected(collector) artifacts = export(collector, tmp_path, file_prefix='') if 'stream_data' in artifacts.keys(): for filename in artifacts['stream_data']: streamname = str(filename).split('/')[-1].split('.')[0] actual = pandas.read_csv(filename) expected = expected_dict[streamname][list(actual.columns.values)] pandas.testing.assert_frame_equal(actual, expected)
def test_export_events(RE, hw): '''Test to see if the suitcase.csv.export works on events. ''' collector = [] def collect(name, doc): collector.append((name, doc)) RE.subscribe(collect) RE(count([hw.det], 5)) with tempfile.NamedTemporaryFile(mode='w') as f: # We don't actually need f itself, just a filepath to template on. meta, *csvs = export(collector, f.name) csv, = csvs docs = (doc for name, doc in collector) start, descriptor, *events, stop = docs expected = {} expected_dict = {'data': {'det': [], 'seq_num': []}, 'time': []} for event in events: expected_dict['data']['det'].append(event['data']['det']) expected_dict['data']['seq_num'].append(event['seq_num']) expected_dict['time'].append(event['time']) expected['events'] = pandas.DataFrame(expected_dict['data'], index=expected_dict['time']) expected['events'].index.name = 'time' with open(meta) as f: actual = json.load(f) # This next section is used to convert lists to tuples for the assert below for dims in actual['start']['hints']['dimensions']: new_dims = [] for dim in dims: if type(dim) is list: new_dims.append(tuple(dim)) else: new_dims.append(dim) actual['start']['hints']['dimensions'] = [tuple(new_dims)] expected.update({ 'start': start, 'stop': stop, 'descriptors': { 'primary': [descriptor] } }) actual['events'] = pandas.read_csv(csv, index_col=0) assert actual.keys() == expected.keys() assert actual['start'] == expected['start'] assert actual['descriptors'] == expected['descriptors'] assert actual['stop'] == expected['stop'] assert_frame_equal(expected['events'], actual['events'])
def test_export(RE, hw): collector = [] def collect(name, doc): collector.append((name, doc)) RE.subscribe(collect) RE(count([hw.det], 5)) with tempfile.NamedTemporaryFile(mode='w') as f: # We don't actually need f itself, just a filepath to template on. generate_csv(f) filepaths = export(collector, f.name) print(filepaths)
def test_file_prefix_formatting(file_prefix_list, example_data, tmp_path): '''Runs a test of the ``file_prefix`` formatting. ..note:: Due to the `file_prefix_list` and `example_data` `pytest.fixture`'s this will run multiple tests each with a range of file_prefixes, detectors and event_types. See `suitcase.utils.conftest` for more info. ''' collector = example_data() file_prefix = file_prefix_list() artifacts = export(collector, tmp_path, file_prefix=file_prefix) for name, doc in collector: if name == 'start': templated_file_prefix = file_prefix.format( start=doc).partition('-')[0] break if artifacts: unique_actual = set(str(artifact).split('/')[-1].partition('-')[0] for artifact in artifacts['stream_data']) assert unique_actual == set([templated_file_prefix])