def __init__(self, name="scan", parent=None, scan_info=None): self.__path = None self.root_node = parent.node if parent is not None else None self.nodes = dict() if parent: key = self.root_node.db_name() run_number = client.get_cache(db=1).hincrby(key, "%s_last_run_number" % name, 1) else: run_number = client.get_cache(db=1).incrby("%s_last_run_number" % name, 1) self.__name = '%s_%d' % (name, run_number) self.node = _create_node(self.__name, "scan", parent=self.root_node)
def __init__(self, node_type, name, parent=None, connection=None, create=False, **keys): if connection is None: connection = client.get_cache(db=1) db_name = '%s:%s' % (parent.db_name, name) if parent else name self._data = Struct(db_name, connection=connection) info_hash_name = '%s_info' % db_name self._info = HashObjSetting(info_hash_name, connection=connection) self.db_connection = connection if create: self._data.name = name self._data.db_name = db_name self._data.node_type = node_type if parent: self._data.parent = parent.db_name parent.add_children(self) self._ttl_setter = _TTL_setter(self.db_name) else: self._ttl_setter = None
def _get_or_create_node(name, node_type=None, parent=None, connection = None): if connection is None: connection = client.get_cache(db=1) db_name = DataNode.exists(name, parent, connection) if db_name: return get_node(db_name, connection=connection) else: return _create_node(name, node_type, parent, connection)
def set_ttl(self): redis_conn = client.get_cache(db=1) redis_conn.expire(self.db_name(), DataNode.default_time_to_live) self._children.ttl(DataNode.default_time_to_live) self._info.ttl(DataNode.default_time_to_live) parent = self.parent() if parent: parent.set_ttl()
def _create_node(name, node_type=None, parent=None, connection=None, **keys): if connection is None: connection = client.get_cache(db=1) return _get_node_object(node_type, name, parent, connection, create=True, **keys)
def get_node(name, node_type = None, parent = None, connection = None): if connection is None: connection = client.get_cache(db=1) data = Struct(name, connection=connection) if node_type is None: node_type = data.node_type if node_type is None: # node has been deleted return None return _get_node_object(node_type, name, parent, connection)
def get_node(db_name, connection=None): if connection is None: connection = client.get_cache(db=1) data = Struct(db_name, connection=connection) name = data.name if name is None: # node has been deleted return None node_type = data.node_type return _get_node_object(node_type, db_name, None, connection)
def _get_or_create_node(name, node_type=None, parent=None, connection=None, **keys): if connection is None: connection = client.get_cache(db=1) db_name = DataNode.exists(name, parent, connection) if db_name: return get_node(db_name, connection=connection) else: return _create_node(name, node_type, parent, connection, **keys)
def __init__(self,node_type,name,parent = None, connection = None, create=False): if connection is None: connection = client.get_cache(db=1) db_name = '%s:%s' % (parent.db_name(),name) if parent else name self._data = Struct(db_name, connection=connection) children_queue_name = '%s_children_list' % db_name self._children = QueueSetting(children_queue_name, connection=connection) info_hash_name = '%s_info' % db_name self._info = HashObjSetting(info_hash_name, connection=connection) self.db_connection = connection if create: self._data.name = name self._data.db_name = db_name self._data.node_type = node_type if parent: self._data.parent = parent.db_name() parent.add_children(self)
def _set_ttl(db_names): redis_conn = client.get_cache(db=1) pipeline = redis_conn.pipeline() for name in db_names: pipeline.expire(name, DataNode.default_time_to_live) pipeline.execute()
def exists(name, parent=None, connection=None): if connection is None: connection = client.get_cache(db=1) db_name = '%s:%s' % (parent.db_name, name) if parent else name return db_name if connection.exists(db_name) else None
def _walk_children(parent, index=0): print ' ' * index, parent.db_name, parent.name(), client.get_cache( db=1).ttl(parent.db_name) for child in parent.children(): _walk_children(child, index=index + 1)
def __init__(self, chain, name=None, parent=None, scan_info=None, writer=None, data_watch_callback=None): """ This class publish data and trig the writer if any. chain -- acquisition chain you want to use for this scan. name -- scan name, if None set default name *scan" parent -- the parent is the root node of the data tree. usually the parent is a Container like to a session,sample,experiment... i.e: parent = Container('eh3') scan_info -- should be the scan parameters as a dict writer -- is the final file writter (hdf5,cvs,spec file...) data_watch_callback -- a callback which can follow the data status of the scan. this callback is usually used to display the scan status. the callback will get: - data_event : a dict with Acq(Device/Master) as key and a set of signal as values - nodes : a dict with Acq(Device/Master) as key and the associated data node as value - info : dictionnary which contains the current scan state... if the callback is a class and have a method **on_state**, it will be called on each scan transition state. The return of this method will activate/deactivate the calling of the callback during this stage. """ if parent is None: self.root_node = None else: if isinstance(parent, DataNodeContainer): self.root_node = parent else: raise ValueError( "parent must be a DataNodeContainer object, or None") self._nodes = dict() self._writer = writer name = name if name else "scan" if parent: key = self.root_node.db_name run_number = client.get_cache(db=1).hincrby( key, "%s_last_run_number" % name, 1) else: run_number = client.get_cache(db=1).incrby( "%s_last_run_number" % name, 1) self.__name = '%s_%d' % (name, run_number) self._node = _create_node(self.__name, "scan", parent=self.root_node) if scan_info is not None: scan_info['scan_nb'] = run_number scan_info['start_time'] = self._node._data.start_time scan_info['start_time_str'] = self._node._data.start_time_str scan_info['start_time_stamp'] = self._node._data.start_time_stamp self._node._info.update(dict(scan_info)) self._data_watch_callback = data_watch_callback self._data_events = dict() if data_watch_callback is not None: if not callable(data_watch_callback): raise TypeError("data_watch_callback needs to be callable") data_watch_callback_event = gevent.event.Event() data_watch_callback_done = gevent.event.Event() def trig(*args): data_watch_callback_event.set() self._data_watch_running = False self._data_watch_task = gevent.spawn(Scan._data_watch, weakref.proxy(self, trig), data_watch_callback_event, data_watch_callback_done) self._data_watch_callback_event = data_watch_callback_event self._data_watch_callback_done = data_watch_callback_done else: self._data_watch_task = None self._acq_chain = chain self._scan_info = scan_info if scan_info is not None else dict() self._scan_info['node_name'] = self._node.db_name self._state = self.IDLE_STATE
def exists(name,parent = None, connection = None): if connection is None: connection = client.get_cache(db=1) db_name = '%s:%s' % (parent.db_name(),name) if parent else name return db_name if connection.exists(db_name) else None
def _create_node(name, node_type = None, parent = None, connection = None): if connection is None: connection = client.get_cache(db=1) return _get_node_object(node_type, name, parent, connection, create=True)
def _walk_children(parent,index = 0) : print ' ' * index,parent.db_name(), parent.name(),client.get_cache(db=1).ttl(parent.db_name()) for child in parent.children(): _walk_children(child,index = index + 1)