def _name_templator(collector, file_prefix): events_list = [] descriptors = {} for name, doc in collector: if name == 'start': start = doc elif name == 'descriptor': descriptors[doc['uid']] = doc elif name == 'event_page': for event in event_model.unpack_event_page(doc): templated_file_prefix = file_prefix.format( start=start, descriptor=descriptors[doc['descriptor']], event=event) events_list.append(templated_file_prefix.partition('-')[0]) elif name == 'bulk_events': for key, events in doc.items(): for event in events: templated_file_prefix = file_prefix.format( start=start, descriptor=descriptors[event['descriptor']], event=event) events_list.append( templated_file_prefix.partition('-')[0]) elif name == 'event': templated_file_prefix = file_prefix.format( start=start, descriptor=descriptors[doc['descriptor']], event=doc) events_list.append(templated_file_prefix.partition('-')[0]) return events_list
def run_list_to_dict(embedded_run_list): """ Converts a run from the mongo database to a dictionary. """ run_dict = { 'start': {}, 'stop': {}, 'descriptor': [], 'resource': [], 'event': [], 'datum': [] } header = embedded_run_list[0][1] run_dict['start'] = header['start'][0] run_dict['stop'] = header['stop'][0] run_dict['descriptor'] = header.get('descriptors', []) run_dict['resource'] = header.get('resources', []) for name, doc in embedded_run_list[1:]: if name == 'event': run_dict['event'] += list(event_model.unpack_event_page(doc)) elif name == 'datum': run_dict['datum'] += list(event_model.unpack_datum_page(doc)) return run_dict
def collector(name, doc): if name == 'event_page': name = 'event' doc, = event_model.unpack_event_page(doc) elif name == 'datum_page': name = 'datum' doc, = event_model.unpack_datum_page(doc) collected.append((name, doc))
def __call__(self, name, doc): if name == 'event_page': for event in unpack_event_page(doc): self.__call__('event', event) elif name == 'event': column = doc['seq_num'] - 1 for row, val in enumerate(val for _, val in sorted(doc['data'].items())): self.setItem(row, column, QStandardItem(str(val))) self.setVerticalHeaderLabels(doc['data'].keys())
def event_page(self, doc): event = self.event # Avoid attribute lookup in hot loop. filled_events = [] for event_doc in event_model.unpack_event_page(doc): filled_events.append(event(event_doc)) new_event_page = event_model.pack_event_page(*filled_events) # Modify original doc in place, as we do with 'event'. doc['data'] = new_event_page['data'] return doc
def event_page(self, doc): # Unpack an EventPage into Events and do the actual insert inside # the `event` method. (This is the oppose what DocumentRouter does by # default.) event_method = self.event # Avoid attribute lookup in hot loop. filled_events = [] for event_doc in event_model.unpack_event_page(doc): filled_events.append(event_method(event_doc))
def __call__(self, name, doc): if name == 'event_page': for event in unpack_event_page(doc): self.__call__('event', event) elif name == 'event': column = doc['seq_num'] - 1 for row, val in enumerate( val for _, val in sorted(doc['data'].items())): self.setItem(row, column, QStandardItem(str(val))) self.setVerticalHeaderLabels(doc['data'].keys())
def event_page(self, doc): '''Converts an 'event_page' doc to 'event' docs for processing. Parameters: ----------- doc : dict Event_Page document ''' events = event_model.unpack_event_page(doc) for event_doc in events: self.event(event_doc)
def normalize(gen): """ Converted any pages to singles. """ for name, doc in gen: if name == "event_page": for event in event_model.unpack_event_page(doc): yield "event", event elif name == "datum_page": for datum in event_model.unpack_datum_page(doc): yield "datum", datum else: yield name, doc
def normalize(gen): """ Converted any pages to singles. """ for name, doc in gen: if name == 'event_page': for event in event_model.unpack_event_page(doc): yield 'event', event elif name == 'datum_page': for datum in event_model.unpack_datum_page(doc): yield 'datum', datum else: yield name, doc
def event_page(self, doc): data = doc["data"] if "q" not in data or "mean" not in data: return # this will mix across runs blindly! for doc in unpack_event_page(doc): self._event_cache.append(doc) if self.update_plot: self.redraw_plot() print(f"Currently has {len(self._event_cache)} datasets")
def page_to_list(name, page): """ Converts event/datum_page to event/datum lists. """ doc_list = [] if name == 'event_page': doc_list.extend([['event', event] for event in event_model.unpack_event_page(page)]) if name == 'datum_page': doc_list.extend([['datum', datum] for datum in event_model.unpack_datum_page(page)]) return doc_list
def flatten_event_page_gen(gen): """ Converts an event_page generator to an event generator. Parameters ---------- gen : generator Returns ------- event_generator : generator """ for page in gen: yield from event_model.unpack_event_page(page)
def test_round_trip_event_page_with_empty_data(): event_page = { 'time': [1, 2, 3], 'seq_num': [1, 2, 3], 'uid': ['a', 'b', 'c'], 'descriptor': 'd', 'data': {}, 'timestamps': {}, 'filled': {} } events = list(event_model.unpack_event_page(event_page)) assert len(events) == 3 page_again = event_model.pack_event_page(*events) assert page_again == event_page
def export_subtracted_tiff_series(name, doc, export_dir, my_sample_name, subtractor): print(f"export_subtracted_tiff_series name: {name}") out = [] ##subtractor = DarkSubtraction("pe1c_image") ##my_samplename = None file_written_list = [] export_dir_path = Path(export_dir) / Path(my_sample_name) export_dir_path.mkdir(parents=True, exist_ok=True) ##for name, doc in header.documents(fill=True): name, doc = subtractor(name, doc) ##if name == "start": ## my_samplename = doc["md"] if name == "event_page": for event_doc in unpack_event_page(doc): # if 'pe1c_is_background_subtracted' in doc['data']: if "Grid_Y" in event_doc["data"]: # print(list(doc['data'])) # out.append({'image': doc['data']['pe1c_image'], # 'center_Grid_X': (doc['data']['start_Grid_X'] + doc['data']['stop_Grid_X']) / 2, # **{k: doc['data'][k] for k in ('start_Grid_X', 'stop_Grid_X', 'Grid_Y','pe1c_stats1_total')}}) # def my_filename(out, my_samplename, my_iter): out = { "image": event_doc["data"]["pe1c_image"], "center_Grid_X": ( event_doc["data"]["start_Grid_X"] + event_doc["data"]["stop_Grid_X"] ) / 2, **{ k: event_doc["data"][k] for k in ( "start_Grid_X", "stop_Grid_X", "Grid_Y", "pe1c_stats1_total", ) }, } this_filename = my_filename(out, my_sample_name, event_doc["seq_num"]) file_written_list.append(this_filename) print("\nwheee " + str(this_filename)) imsave( str(export_dir_path / this_filename), data=out["image"].astype("int32") ) # break #remove later return file_written_list
def pages_to_list(pages): """ Converts event/datum_page to event/datum lists. """ doc_list = [] for page in pages: if page[0] == 'event_page': doc_list.extend( [['event', event] for event in event_model.unpack_event_page(page[1])]) if page[0] == 'datum_page': doc_list.extend( [['datum', datum] for datum in event_model.unpack_datum_page(page[1])]) return doc_list
def create_expected(collector): '''collects the run data into a `pandas.dataframe` for comparison tests.''' streamnames = {} events_dict = {} expected = {} for name, doc in collector: if name == 'descriptor': streamnames[doc['uid']] = doc.get('name') elif name == 'event': streamname = streamnames[doc['descriptor']] if streamname not in events_dict.keys(): events_dict[streamname] = [] events_dict[streamname].append(doc) elif name == 'bulk_events': for key, events in doc.items(): for event in events: streamname = streamnames[event['descriptor']] if streamname not in events_dict.keys(): events_dict[streamname] = [] events_dict[streamname].append(event) elif name == 'event_page': for event in event_model.unpack_event_page(doc): streamname = streamnames[event['descriptor']] if streamname not in events_dict.keys(): events_dict[streamname] = [] events_dict[streamname].append(event) for streamname, event_list in events_dict.items(): expected_dict = {} for event in event_list: for field in event['data']: if numpy.asarray(event['data'][field]).ndim in [1, 0]: if 'seq_num' not in expected_dict.keys(): expected_dict['seq_num'] = [] expected_dict['time'] = [] if field not in expected_dict.keys(): expected_dict[field] = [] expected_dict[field].append(event['data'][field]) if expected_dict: expected_dict['seq_num'].append(event['seq_num']) expected_dict['time'].append(event['time']) if expected_dict: expected[streamname] = pandas.DataFrame(expected_dict) return expected
def create_expected(collector, stack_images): streamnames = {} events_dict = {} for name, doc in collector: if name == 'descriptor': streamnames[doc['uid']] = doc.get('name') elif name == 'event': streamname = streamnames[doc['descriptor']] if streamname not in events_dict.keys(): events_dict[streamname] = [] events_dict[streamname].append(doc) elif name == 'bulk_events': for key, events in doc.items(): for event in events: streamname = streamnames[event['descriptor']] if streamname not in events_dict.keys(): events_dict[streamname] = [] events_dict[streamname].append(event) elif name == 'event_page': for event in event_model.unpack_event_page(doc): streamname = streamnames[event['descriptor']] if streamname not in events_dict.keys(): events_dict[streamname] = [] events_dict[streamname].append(event) for stream_name, event_list in events_dict.items(): expected_dict = {} if not stack_images: expected_dict[stream_name] = numpy.ones((10, 10)) expected_dict['baseline'] = numpy.ones((10, 10)) elif len(event_list) == 1: expected_dict[stream_name] = numpy.ones((10, 10)) expected_dict['baseline'] = numpy.ones((2, 10, 10)) else: expected_dict[stream_name] = numpy.ones( (len(event_list), 10, 10)) expected_dict['baseline'] = numpy.ones((3, 10, 10)) return expected_dict
def event_page(self, doc): for event in event_model.unpack_event_page(doc): self.event(event) return doc
def event_page(self, doc): for event_doc in event_model.unpack_event_page(doc): self.event(event_doc)
def event_page(self, doc): for event in unpack_event_page(doc): self.event(event)
def event_page(self, doc): for event in event_model.unpack_event_page(doc): self.get_filename_for_event(event_doc=event)
def test_round_trip_pagination(): run_bundle = event_model.compose_run() desc_bundle = run_bundle.compose_descriptor(data_keys={ 'motor': { 'shape': [], 'dtype': 'number', 'source': '...' }, 'image': { 'shape': [512, 512], 'dtype': 'number', 'source': '...', 'external': 'FILESTORE:' } }, name='primary') res_bundle = run_bundle.compose_resource(spec='TIFF', root='/tmp', resource_path='stack.tiff', resource_kwargs={}) datum_doc1 = res_bundle.compose_datum(datum_kwargs={'slice': 5}) datum_doc2 = res_bundle.compose_datum(datum_kwargs={'slice': 10}) datum_doc3 = res_bundle.compose_datum(datum_kwargs={'slice': 15}) event_doc1 = desc_bundle.compose_event(data={ 'motor': 0, 'image': datum_doc1['datum_id'] }, timestamps={ 'motor': 0, 'image': 0 }, filled={'image': False}, seq_num=1) event_doc2 = desc_bundle.compose_event(data={ 'motor': 1, 'image': datum_doc2['datum_id'] }, timestamps={ 'motor': 0, 'image': 0 }, filled={'image': False}, seq_num=1) event_doc3 = desc_bundle.compose_event(data={ 'motor': 2, 'image': datum_doc3['datum_id'] }, timestamps={ 'motor': 0, 'image': 0 }, filled={'image': False}, seq_num=1) # Round trip single event -> event_page -> event. expected = event_doc1 actual, = event_model.unpack_event_page( event_model.pack_event_page(expected)) assert actual == expected # Round trip two events -> event_page -> events. expected = [event_doc1, event_doc2] actual = list( event_model.unpack_event_page(event_model.pack_event_page(*expected))) assert actual == expected # Round trip three events -> event_page -> events. expected = [event_doc1, event_doc2, event_doc3] actual = list( event_model.unpack_event_page(event_model.pack_event_page(*expected))) assert actual == expected # Round trip on docs that don't have a filled key unfilled_doc1 = event_doc1 unfilled_doc1.pop('filled') unfilled_doc2 = event_doc2 unfilled_doc2.pop('filled') unfilled_doc3 = event_doc3 unfilled_doc3.pop('filled') expected = [unfilled_doc1, unfilled_doc2, unfilled_doc3] actual = list( event_model.unpack_event_page(event_model.pack_event_page(*expected))) for doc in actual: doc.pop('filled') assert actual == expected # Round trip one datum -> datum_page -> datum. expected = datum_doc1 actual, = event_model.unpack_datum_page( event_model.pack_datum_page(expected)) assert actual == expected # Round trip two datum -> datum_page -> datum. expected = [datum_doc1, datum_doc2] actual = list( event_model.unpack_datum_page(event_model.pack_datum_page(*expected))) assert actual == expected # Round trip three datum -> datum_page -> datum. expected = [datum_doc1, datum_doc2, datum_doc3] actual = list( event_model.unpack_datum_page(event_model.pack_datum_page(*expected))) assert actual == expected # Check edge case where datum_kwargs are empty. datum_doc1 = res_bundle.compose_datum(datum_kwargs={}) datum_doc2 = res_bundle.compose_datum(datum_kwargs={}) datum_doc3 = res_bundle.compose_datum(datum_kwargs={}) # Round trip one datum -> datum_page -> datum. expected = datum_doc1 actual, = event_model.unpack_datum_page( event_model.pack_datum_page(expected)) assert actual == expected # Round trip two datum -> datum_page -> datum. expected = [datum_doc1, datum_doc2] actual = list( event_model.unpack_datum_page(event_model.pack_datum_page(*expected))) assert actual == expected # Round trip three datum -> datum_page -> datum. expected = [datum_doc1, datum_doc2, datum_doc3] actual = list( event_model.unpack_datum_page(event_model.pack_datum_page(*expected))) assert actual == expected
zmq_publisher = Publisher( f"{args.zmq_host}:{args.zmq_publish_port}", prefix=args.zmq_publish_prefix.encode() ) extra = count() for name, doc in db[args.run_id].documents(): print(f"trying to emit {name}") doc = dict(doc) if name == "descriptor": doc["data_keys"]["extra"] = { "dtype": "number", "source": "computed", "units": "arb", "shape": [], } zmq_publisher("descriptor", doc) if name == "event_page": for ev in unpack_event_page(doc): for j in range(5): new_seq = next(extra) ev["seq_num"] = new_seq ev["uid"] = str(uuid.uuid4()) ev["data"]["extra"] = 5 ev["timestamps"]["extra"] = 0 zmq_publisher("event", ev) elif name == "datum_page": for datum in unpack_datum_page(doc): zmq_publisher("datum", datum) else: zmq_publisher(name, doc)