def test_data_key(): rs1_uid = insert_run_start(time=100.0, scan_id=1, owner="nedbrainard", beamline_id="example", uid=str(uuid.uuid4())) rs2_uid = insert_run_start(time=200.0, scan_id=2, owner="nedbrainard", beamline_id="example", uid=str(uuid.uuid4())) rs1, = find_run_starts(uid=rs1_uid) rs2, = find_run_starts(uid=rs2_uid) data_keys = {"fork": {"source": "_", "dtype": "number"}, "spoon": {"source": "_", "dtype": "number"}} insert_descriptor(run_start=rs1_uid, data_keys=data_keys, time=100.0, uid=str(uuid.uuid4())) insert_descriptor(run_start=rs2_uid, data_keys=data_keys, time=200.0, uid=str(uuid.uuid4())) result1 = db(data_key="fork") result2 = db(data_key="fork", start_time=150) assert len(result1) == 2 assert len(result2) == 1 actual = result2[0]["start"]["uid"] assert actual == str(rs2.uid)
def test_document_funcs_for_smoke(): global run_start_uid, descriptor_uid # todo this next line will break once NSLS-II/metadatastore#142 is merged run_start, = find_run_starts(uid=run_start_uid) descriptors = [desc for desc in find_event_descriptors(uid=descriptor_uid)] run_stop, = find_run_stops(uid=run_stop_uid) documents = [run_start, run_stop] documents.extend(descriptors) attrs = ['__repr__', '__str__', '_repr_html_', ] for doc, attr in product(documents, attrs): getattr(doc, attr)()
def test_data_key(): rs1_uid = insert_run_start(time=100., scan_id=1, owner='nedbrainard', beamline_id='example', beamline_config=insert_beamline_config( {}, time=0.)) rs2_uid = insert_run_start(time=200., scan_id=2, owner='nedbrainard', beamline_id='example', beamline_config=insert_beamline_config( {}, time=0.)) rs1, = find_run_starts(uid=rs1_uid) rs2, = find_run_starts(uid=rs2_uid) data_keys = {'fork': {'source': '_', 'dtype': 'number'}, 'spoon': {'source': '_', 'dtype': 'number'}} evd1_uid = insert_event_descriptor(run_start=rs1_uid, data_keys=data_keys, time=100.) insert_event_descriptor(run_start=rs2_uid, data_keys=data_keys, time=200.) result1 = db.find_headers(data_key='fork') result2 = db.find_headers(data_key='fork', start_time=150) assert_equal(len(result1), 2) assert_equal(len(result2), 1) actual = result2[0].run_start_uid assert_equal(actual, str(rs2.uid))
def test_data_key(): rs1_uid = insert_run_start(time=100., scan_id=1, owner='nedbrainard', beamline_id='example', uid=str(uuid.uuid4())) rs2_uid = insert_run_start(time=200., scan_id=2, owner='nedbrainard', beamline_id='example', uid=str(uuid.uuid4())) rs1, = find_run_starts(uid=rs1_uid) rs2, = find_run_starts(uid=rs2_uid) data_keys = {'fork': {'source': '_', 'dtype': 'number'}, 'spoon': {'source': '_', 'dtype': 'number'}} insert_descriptor(run_start=rs1_uid, data_keys=data_keys, time=100., uid=str(uuid.uuid4())) insert_descriptor(run_start=rs2_uid, data_keys=data_keys, time=200., uid=str(uuid.uuid4())) result1 = db(data_key='fork') result2 = db(data_key='fork', start_time=150) assert len(result1) == 2 assert len(result2) == 1 actual = result2[0]['start']['uid'] assert actual == str(rs2.uid)
def __getitem__(cls, key): if isinstance(key, slice): # Interpret key as a slice into previous scans. if key.start is not None and key.start > -1: raise ValueError("Slices must be negative. The most recent " "run is referred to as -1.") if key.stop is not None and key.stop > -1: raise ValueError("Slices must be negative. The most recent " "run is referred to as -1.") if key.stop is not None: stop = -key.stop else: stop = None if key.start is None: raise ValueError("Cannot slice infinitely into the past; " "the result could become too large.") start = -key.start result = list(find_last(start))[stop::key.step] header = Headers([Header.from_run_start(h) for h in result]) elif isinstance(key, int): if key > -1: # Interpret key as a scan_id. gen = find_run_starts(scan_id=key) try: result = next(gen) # most recent match except StopIteration: raise ValueError("No such run found.") header = Header.from_run_start(result) else: # Interpret key as the Nth last scan. gen = find_last(-key) for i in range(-key): try: result = next(gen) except StopIteration: raise IndexError( "There are only {0} runs.".format(i)) header = Header.from_run_start(result) elif isinstance(key, six.string_types): # Interpret key as a uid (or the few several characters of one). # First try searching as if we have the full uid. results = list(find_run_starts(uid=key)) if len(results) == 0: # No dice? Try searching as if we have a partial uid. gen = find_run_starts(uid={'$regex': '{0}.*'.format(key)}) results = list(gen) if len(results) < 1: raise ValueError("No such run found.") if len(results) > 1: raise ValueError("That partial uid matches multiple runs. " "Provide more characters.") result, = results header = Header.from_run_start(result) elif isinstance(key, Iterable): # Interpret key as a list of several keys. If it is a string # we will never get this far. return Headers([cls.__getitem__(k) for k in key]) else: raise ValueError("Must give an integer scan ID like [6], a slice " "into past scans like [-5], [-5:], or [-5:-9:2], " "a list like [1, 7, 13], or a (partial) uid " "like ['a23jslk'].") return header
def find_headers(cls, **kwargs): """Given search criteria, find Headers describing runs. This function returns a list of dictionary-like objects encapsulating the metadata for a run -- start time, instruments uses, and so on. In addition to the Parameters below, advanced users can specifiy arbitrary queries that are passed through to mongodb. Parameters ---------- start_time : time-like, optional Include Headers for runs started after this time. Valid "time-like" representations are: - float timestamps (seconds since 1970), such as time.time() - '2015' - '2015-01' - '2015-01-30' - '2015-03-30 03:00:00' - Python datetime objects, such as datetime.datetime.now() stop_time: time-like, optional Include Headers for runs started before this time. See `start_time` above for examples. beamline_id : str, optional String identifier for a specific beamline project : str, optional Project name owner : str, optional The username of the logged-in user when the scan was performed scan_id : int, optional Integer scan identifier uid : str, optional Globally unique id string provided to metadatastore _id : str or ObjectId, optional The unique id generated by mongo data_key : str, optional The alias (e.g., 'motor1') or PV identifier of data source Returns ------- data : list Header objects Examples -------- >>> find_headers(start_time='2015-03-05', stop_time='2015-03-10') >>> find_headers(data_key='motor1') >>> find_headers(data_key='motor1', start_time='2015-03-05') """ data_key = kwargs.pop('data_key', None) run_start = find_run_starts(**kwargs) if data_key is not None: node_name = 'data_keys.{0}'.format(data_key) query = {node_name: {'$exists': True}} descriptors = [] for rs in run_start: descriptor = find_event_descriptors(run_start=rs, **query) for d in descriptor: descriptors.append(d) # query = {node_name: {'$exists': True}, # 'run_start_id': {'$in': [ObjectId(rs.id) for rs in run_start]}} # descriptors = find_event_descriptors(**query) result = [] known_uids = deque() for descriptor in descriptors: if descriptor.run_start.uid not in known_uids: rs = descriptor.run_start known_uids.append(rs.uid) result.append(rs) run_start = result result = [] for rs in run_start: result.append(Header.from_run_start(rs)) return Headers(result)