Example #1
0
 def __init__(self, worker_handlers):
     self.worker = Worker(worker_handlers)
     self.next_rid = 0
     self.queue = Notifier([])
     self.queue_modified = asyncio.Event()
     self.timed = Notifier(dict())
     self.timed_modified = asyncio.Event()
Example #2
0
File: log.py Project: cntnly/artiq
class LogBuffer:
    def __init__(self, depth):
        self.depth = depth
        self.data = Notifier([])

    def log(self, level, source, time, message):
        if len(self.data.read) >= self.depth:
            del self.data[0]
        self.data.append((level, source, time, message))
Example #3
0
class LogBuffer:
    def __init__(self, depth):
        self.depth = depth
        self.data = Notifier([])

    def log(self, level, source, time, message):
        if len(self.data.read) >= self.depth:
            del self.data[0]
        self.data.append((level, source, time, message))
Example #4
0
class Log:
    def __init__(self, depth):
        self.depth = depth
        self.data = Notifier([])

    def log(self, rid, message):
        if len(self.data.read) >= self.depth:
            del self.data[0]
        self.data.append((rid, message))
    log.worker_pass_rid = True
Example #5
0
    def __init__(self, repo_backend, worker_handlers):
        self.repo_backend = repo_backend
        self.worker_handlers = worker_handlers

        self.cur_rev = self.repo_backend.get_head_rev()
        self.repo_backend.request_rev(self.cur_rev)
        self.explist = Notifier(dict())
        self._scanning = False

        self.status = Notifier({"scanning": False, "cur_rev": self.cur_rev})
Example #6
0
class SimpleHistory:
    def __init__(self, depth):
        self.depth = depth
        self.history = Notifier([])

    def set(self, timestamp, name, value):
        if len(self.history.read) >= self.depth:
            del self.history[0]
        self.history.append((timestamp, name, value))

    def delete(self, timestamp, name):
        if len(self.history.read) >= self.depth:
            del self.history[0]
        self.history.append((timestamp, name))
Example #7
0
    def __init__(self, ddb):
        self._broadcaster = Notifier(dict())
        self.local = dict()
        self.archive = dict()

        self.ddb = ddb
        self._broadcaster.publish = ddb.update
Example #8
0
def main():
    args = get_argparser().parse_args()
    init_logger(args)
    if os.name == "nt":
        loop = asyncio.ProactorEventLoop()
        asyncio.set_event_loop(loop)
    else:
        loop = asyncio.get_event_loop()
    atexit.register(lambda: loop.close())

    ddb = FlatFileDB(args.ddb)
    pdb = FlatFileDB(args.pdb)
    rtr = Notifier(dict())
    log = Log(1000)

    if args.git:
        repo_backend = GitBackend(args.repository)
    else:
        repo_backend = FilesystemBackend(args.repository)
    repository = Repository(repo_backend, log.log)
    atexit.register(repository.close)
    repository.scan_async()

    worker_handlers = {
        "get_device": ddb.get,
        "get_parameter": pdb.get,
        "set_parameter": pdb.set,
        "update_rt_results": lambda mod: process_mod(rtr, mod),
        "log": log.log
    }
    scheduler = Scheduler(get_last_rid() + 1, worker_handlers, repo_backend)
    worker_handlers["scheduler_submit"] = scheduler.submit
    scheduler.start()
    atexit.register(lambda: loop.run_until_complete(scheduler.stop()))

    server_control = Server({
        "master_ddb": ddb,
        "master_pdb": pdb,
        "master_schedule": scheduler,
        "master_repository": repository
    })
    loop.run_until_complete(server_control.start(
        args.bind, args.port_control))
    atexit.register(lambda: loop.run_until_complete(server_control.stop()))

    server_notify = Publisher({
        "schedule": scheduler.notifier,
        "devices": ddb.data,
        "parameters": pdb.data,
        "rt_results": rtr,
        "explist": repository.explist,
        "log": log.data
    })
    loop.run_until_complete(server_notify.start(
        args.bind, args.port_notify))
    atexit.register(lambda: loop.run_until_complete(server_notify.stop()))

    loop.run_forever()
Example #9
0
    def __init__(self, backend, log_fn):
        self.backend = backend
        self.log_fn = log_fn

        self.cur_rev = self.backend.get_head_rev()
        self.backend.request_rev(self.cur_rev)
        self.explist = Notifier(dict())

        self._scanning = False
Example #10
0
    def __init__(self, persist_file, autosave_period=30):
        self.persist_file = persist_file
        self.autosave_period = autosave_period

        try:
            file_data = pyon.load_file(self.persist_file)
        except FileNotFoundError:
            file_data = dict()
        self.data = Notifier({k: (True, v) for k, v in file_data.items()})
Example #11
0
    def __init__(self, ridc, worker_handlers, experiment_db):
        self.notifier = Notifier(dict())

        self._pipelines = dict()
        self._worker_handlers = worker_handlers
        self._experiment_db = experiment_db
        self._terminated = False

        self._ridc = ridc
        self._deleter = Deleter(self._pipelines)
Example #12
0
    def __init__(self, next_rid, worker_handlers, repo_backend):
        self.notifier = Notifier(dict())

        self._pipelines = dict()
        self._worker_handlers = worker_handlers
        self._repo_backend = repo_backend
        self._terminated = False

        self._ridc = RIDCounter(next_rid)
        self._deleter = Deleter(self._pipelines)
Example #13
0
 def __init__(self, filename, default_data=None):
     self.filename = filename
     try:
         data = pyon.load_file(self.filename)
     except FileNotFoundError:
         if default_data is None:
             raise
         else:
             data = default_data
     self.data = Notifier(data)
     self.hooks = []
Example #14
0
 def __init__(self, filename):
     self.filename = filename
     self.data = Notifier(pyon.load_file(self.filename))
     self.hooks = []
Example #15
0
 def __init__(self, backing_file):
     self.backing_file = backing_file
     self.data = Notifier(pyon.load_file(self.backing_file))
Example #16
0
    def __init__(self, ddb):
        self.broadcast = Notifier(dict())
        self.local = dict()

        self.ddb = ddb
        self.broadcast.publish = ddb.update
Example #17
0
class Scheduler:
    def __init__(self, worker_handlers, run_cb):
        self.run_cb = run_cb
        self.worker = Worker(worker_handlers)
        self.next_rid = 0
        self.queue = Notifier([])
        self.queue_modified = asyncio.Event()
        self.timed = Notifier(dict())
        self.timed_modified = asyncio.Event()

    def new_rid(self):
        r = self.next_rid
        self.next_rid += 1
        return r

    def new_trid(self):
        trids = set(range(len(self.timed.read) + 1))
        trids -= set(self.timed.read.keys())
        return next(iter(trids))

    @asyncio.coroutine
    def start(self):
        self.task = asyncio.Task(self._schedule())
        yield from self.worker.create_process()

    @asyncio.coroutine
    def stop(self):
        self.task.cancel()
        yield from asyncio.wait([self.task])
        del self.task
        yield from self.worker.end_process()

    def run_queued(self, run_params, timeout):
        rid = self.new_rid()
        self.queue.append((rid, run_params, timeout))
        self.queue_modified.set()
        return rid

    def cancel_queued(self, rid):
        idx = next(idx for idx, (qrid, _, _) in enumerate(self.queue.read)
                   if qrid == rid)
        if idx == 0:
            # Cannot cancel when already running
            raise NotImplementedError
        del self.queue[idx]

    def run_timed(self, run_params, timeout, next_run):
        if next_run is None:
            next_run = time()
        trid = self.new_trid()
        self.timed[trid] = next_run, run_params, timeout
        self.timed_modified.set()
        return trid

    def cancel_timed(self, trid):
        del self.timed[trid]

    @asyncio.coroutine
    def _run(self, rid, run_params, timeout):
        self.run_cb(rid, run_params)
        try:
            yield from self.worker.run(run_params, timeout)
        except Exception as e:
            print("RID {} failed:".format(rid))
            print(e)
        else:
            print("RID {} completed successfully".format(rid))

    @asyncio.coroutine
    def _run_timed(self):
        while True:
            min_next_run = None
            min_trid = None
            for trid, params in self.timed.read.items():
                if min_next_run is None or params[0] < min_next_run:
                    min_next_run = params[0]
                    min_trid = trid

            now = time()

            if min_next_run is None:
                return None
            min_next_run -= now
            if min_next_run > 0:
                return min_next_run

            next_run, run_params, timeout = self.timed.read[min_trid]
            del self.timed[min_trid]

            rid = self.new_rid()
            self.queue.insert(0, (rid, run_params, timeout))
            yield from self._run(rid, run_params, timeout)
            del self.queue[0]

    @asyncio.coroutine
    def _schedule(self):
        while True:
            next_timed = yield from self._run_timed()
            if self.queue.read:
                rid, run_params, timeout = self.queue.read[0]
                yield from self._run(rid, run_params, timeout)
                del self.queue[0]
            else:
                self.queue_modified.clear()
                self.timed_modified.clear()
                t1 = asyncio.Task(self.queue_modified.wait())
                t2 = asyncio.Task(self.timed_modified.wait())
                try:
                    done, pend = yield from asyncio.wait(
                        [t1, t2],
                        timeout=next_timed,
                        return_when=asyncio.FIRST_COMPLETED)
                except:
                    t1.cancel()
                    t2.cancel()
                    raise
                for t in pend:
                    t.cancel()
Example #18
0
 def __init__(self, depth):
     self.depth = depth
     self.data = Notifier([])
Example #19
0
 def __init__(self, backing_file):
     self.backing_file = backing_file
     self.data = Notifier(device_db_from_file(self.backing_file))
Example #20
0
 def __init__(self, realtime_results):
     self.realtime_data = Notifier({x: [] for x in realtime_results})
     self.data = Notifier(dict())
Example #21
0
    def __init__(self):

        self.args = args = get_argparser().parse_args()
        init_logger(args)

        self.config = load_config(args, "_server")
        self.lasers = self.config["lasers"].keys()

        for laser in self.lasers:
            self.config["lasers"][laser]["lock_ready"] = False

        # connect to hardware
        self.wlm = WLM(args.simulation)
        self.osas = OSAs(self.config["osas"], args.simulation)

        self.exp_min = self.wlm.get_exposure_min()
        self.exp_max = self.wlm.get_exposure_max()
        self.num_ccds = self.wlm.get_num_ccds()

        if self.config["switch"]["type"] == "internal":
            self.switch = self.wlm.get_switch()
        elif self.config["switch"]["type"] == "leoni":
            self.switch = LeoniSwitch(
                self.config["switch"]["ip"], args.simulation)
        else:
            raise ValueError("Unrecognised switch type: {}".format(
                self.config["switch"]["type"]))

        # measurement queue, processed by self.measurement_task
        self.measurement_ids = task_id_generator()
        self.measurements_queued = asyncio.Event()
        self.queue = []

        self.wake_locks = {laser: asyncio.Event() for laser in self.lasers}

        # schedule initial frequency/osa readings all lasers
        self.measurements_queued.set()
        for laser in self.lasers:
            self.queue.append({
                "laser": laser,
                "priority": 0,
                "expiry": time.time(),
                "id": next(self.measurement_ids),
                "get_osa_trace": True,
                "done": asyncio.Event()
            })

        # "notify" interface
        self.laser_db = Notifier(self.config["lasers"])
        self.freq_db = Notifier({laser: {
            "freq": None,
            "status": WLMMeasurementStatus.ERROR,
            "timestamp": 0
        } for laser in self.lasers})
        self.osa_db = Notifier({laser: {
            "trace": None,
            "timestamp": 0
        } for laser in self.lasers})

        self.server_notify = Publisher({
            "laser_db": self.laser_db,  # laser settings
            "freq_db": self.freq_db,  # most recent frequency measurements
            "osa_db": self.osa_db  # most recent osa traces
        })

        # "control" interface
        self.control_interface = ControlInterface(self)
        self.server_control = RPCServer({"control": self.control_interface},
                                        allow_parallel=True)

        self.running = False
Example #22
0
 def __init__(self):
     self.rt = Notifier(dict())
     self.nrt = dict()
     self.store = set()
Example #23
0
File: log.py Project: cntnly/artiq
 def __init__(self, depth):
     self.depth = depth
     self.data = Notifier([])
Example #24
0
 def __init__(self):
     self.groups = Notifier(dict())
     self.current_group = "default"
Example #25
0
 def __init__(self, depth):
     self.depth = depth
     self.history = Notifier([])