Esempio n. 1
0
    def run(self):
        self.provider = StaticProvider(__name__)

        self.fieldNames = SharedPV(initial=NTScalar('as').wrap(
            {'value': ['pid%02x' % i for i in range(31)]}),
                                   handler=DefaultPVHandler(self))

        # 'i' (integer) or 'f' (float)
        self.fieldTypes = SharedPV(initial=NTScalar('aB').wrap(
            {'value': [ord('i')] * 31}),
                                   handler=DefaultPVHandler(self))

        self.fieldMask = SharedPV(initial=NTScalar('I').wrap({'value':
                                                              0x8000}),
                                  handler=DefaultPVHandler(self))

        self.payload = SharedPV(initial=Value(Type([]), {}),
                                handler=DefaultPVHandler(self))

        print('Hosting {:}HPS:FIELDMASK'.format(self.prefix))
        self.provider.add(self.prefix + 'HPS:FIELDNAMES', self.fieldNames)
        self.provider.add(self.prefix + 'HPS:FIELDTYPES', self.fieldTypes)
        self.provider.add(self.prefix + 'HPS:FIELDMASK', self.fieldMask)
        self.provider.add(self.prefix + 'PAYLOAD', self.payload)
        self.update()

        try:
            Server.forever(providers=[self.provider])
        except:
            print('Server exited')
Esempio n. 2
0
 def start(self):
     L.info("Starting %s Model Service.", self.name)
     pva_server = PVAServer(providers=[{
         f"SIMULACRUM:SYS0:1:{self.name}:LIVE:TWISS":
         self.live_twiss_pv,
         f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:TWISS":
         self.design_twiss_pv,
         f"SIMULACRUM:SYS0:1:{self.name}:LIVE:RMAT":
         self.live_rmat_pv,
         f"SIMULACRUM:SYS0:1:{self.name}:DESIGN:RMAT":
         self.design_rmat_pv,
     }])
     try:
         zmq_task = self.loop.create_task(self.recv())
         pva_refresh_task = self.loop.create_task(self.refresh_pva_table())
         broadcast_task = self.loop.create_task(
             self.broadcast_model_changes())
         jitter_task = self.loop.create_task(self.add_jitter())
         self.loop.run_forever()
     except KeyboardInterrupt:
         L.info("Shutting down Model Service.")
         zmq_task.cancel()
         pva_refresh_task.cancel()
         broadcast_task.cancel()
         pva_server.stop()
     finally:
         self.loop.close()
         L.info("Model Service shutdown complete.")
Esempio n. 3
0
 def do_init(self):
     super().do_init()
     if self._pva_server is None:
         self.log.info("Starting PVA server")
         self._provider = DynamicProvider("PvaServerComms", self)
         self._pva_server = Server(providers=[self._provider])
         self.log.info("Started PVA server")
Esempio n. 4
0
def main(args):
    provider = StaticProvider('mailbox')  # 'mailbox' is an arbitrary name

    pvs = [
    ]  # we must keep a reference in order to keep the Handler from being collected
    for name in args.name:
        pv = SharedPV(initial=types['int'], handler=MailboxHandler())

        provider.add(name, pv)
        pvs.append(pv)

    Server.forever(providers=[provider])

    print('Done')
Esempio n. 5
0
def quickRPCServer(provider, prefix, target,
                   maxsize=20,
                   workers=1,
                   useenv=True, conf=None, isolate=False):
    """Run an RPC server in the current thread

    Calls are handled sequentially, and always in the current thread, if workers=1 (the default).
    If workers>1 then calls are handled concurrently by a pool of worker threads.
    Requires NTURI style argument encoding.

    :param str provider: A provider name.  Must be unique in this process.
    :param str prefix: PV name prefix.  Along with method names, must be globally unique.
    :param target: The object which is exporting methods.  (use the :func:`rpc` decorator)
    :param int maxsize: Number of pending RPC calls to be queued.
    :param int workers: Number of worker threads (default 1)
    :param useenv: Passed to :class:`~p4p.server.Server`
    :param conf: Passed to :class:`~p4p.server.Server`
    :param isolate: Passed to :class:`~p4p.server.Server`
    """
    from p4p.server import Server
    import time
    queue = ThreadedWorkQueue(maxsize=maxsize, workers=workers)
    provider = NTURIDispatcher(queue, target=target, prefix=prefix, name=provider)
    threads = []
    server = Server(providers=[provider], useenv=useenv, conf=conf, isolate=isolate)
    with server, queue:
        while True:
            time.sleep(10.0)
Esempio n. 6
0
 def server(self):
     server = Server(providers=[self.provider, self.rpc_provider])
     with server, self.queue:
         try:
             while True:
                 time.sleep(100)
         except KeyboardInterrupt:
             pass
Esempio n. 7
0
def quickRPCServer(provider,
                   prefix,
                   target,
                   maxsize=20,
                   workers=1,
                   useenv=True,
                   conf=None):
    """Run an RPC server in the current thread

    Calls are handled sequentially, and always in the current thread, if workers=1 (the default).
    If workers>1 then calls are handled concurrently by a pool of worker threads.
    Requires NTURI style argument encoding.

    :param str provider: A provider name.  Must be unique in this process.
    :param str prefix: PV name prefix.  Along with method names, must be globally unique.
    :param target: The object which is exporting methods.  (use the :func:`rpc` decorator)
    :param int maxsize: Number of pending RPC calls to be queued.
    :param int workers: Number of worker threads (default 1)
    :param useenv: Passed to :class:`~p4p.server.Server`
    :param conf: Passed to :class:`~p4p.server.Server`
    """
    from p4p.server import Server, installProvider, removeProvider
    queue = WorkQueue(maxsize=maxsize)
    installProvider(provider,
                    NTURIDispatcher(queue, target=target, prefix=prefix))
    try:
        threads = []
        server = Server(providers=provider, useenv=useenv, conf=conf)
        try:
            for n in range(1, workers):
                T = Thread(name='%s Worker %d' % (provider, n),
                           target=queue.handle)
                threads.append(T)
                T.start()
            # handle calls in the current thread until KeyboardInterrupt
            queue.handle()
        finally:
            try:
                for T in threads:
                    queue.interrupt()
                    T.join()
            finally:
                # we really need to do this or the process will hang on exit
                server.stop()
    finally:
        removeProvider(provider)
Esempio n. 8
0
 def start(self):
     L.info("Starting Model Service.")
     pva_server = PVAServer(providers=[{
         "BMAD:SYS0:1:FULL_MACHINE:LIVE:TWISS":
         self.live_twiss_pv,
         "BMAD:SYS0:1:FULL_MACHINE:DESIGN:TWISS":
         self.design_twiss_pv
     }])
     zmq_task = self.loop.create_task(self.recv())
     pva_refresh_task = self.loop.create_task(self.refresh_pva_table())
     broadcast_task = self.loop.create_task(self.broadcast_model_changes())
     try:
         self.loop.run_until_complete(zmq_task)
     except KeyboardInterrupt:
         zmq_task.cancel()
         pva_refresh_task.cancel()
         broadcast_task.cancel()
         pva_server.stop()
Esempio n. 9
0
def main(args):
    lvl = logging.getLevelName(args.log_level)
    if isinstance(lvl, str):
        raise ValueError(
            "Bad level name, must be eg. ERROR, WARN, INFO, DEBUG")

    logging.basicConfig(level=lvl)

    Q = WorkQueue(maxsize=5)

    GM = 'minimasar.gather.' + args.gather
    _log.debug('Import gatherer "%s"', GM)
    GM = import_module(GM)
    gather = GM.Gatherer(queue=Q)

    _log.debug('Open DB "%s"', args.db)
    db = connect(args.db)

    _log.info("Install provider")
    M = Service(db, gather=gather.gather)

    _log.info("Prepare server")
    S = Server(providers=[
        # provide MASAR style calls through a single PV (args.name)
        MASARDispatcher(Q, target=M, name='masar', channels=[args.name]),
        # provide NTRUI style calls, one PV per method, with a common prefix (args.name+':')
        NTURIDispatcher(Q, target=M, name='masaruri', prefix=args.name + ':'),
    ])

    with S:
        _log.info("Run server")

        try:
            Q.handle()
        except KeyboardInterrupt:
            pass

        _log.info("Stop")
    _log.info("Done")

    db.close()
Esempio n. 10
0
    def run(self):
        self.provider = StaticProvider(__name__)

        self._pv = []
        self._msgHeader = 0

        def addPV(label,cmd):
            pv = SharedPV(initial=NTScalar('I').wrap(0), 
                          handler=PVHandler(cmd))
            name = self._name+':'+label
            print('Registering {:}'.format(name))
            self.provider.add(name,pv)
            self._pv.append(pv)

        addPV('GroupL0Reset'    , self.l0Reset)
        addPV('GroupL0Enable'   , self.l0Enable)
        addPV('GroupL0Disable'  , self.l0Disable)
        addPV('GroupMsgInsert'  , self.msgInsert)
        addPV('PART:0:Master'   , self.master)
        addPV('PART:0:MsgHeader', self.msgHeader)

        with Server(providers=[self.provider]):
            while True:
                time.sleep(1)
Esempio n. 11
0
 def start_server(self) -> None:
     """
     Starts the server and runs until KeyboardInterrupt.
     """
     print("Starting Server...")
     Server.forever(providers=[providers])
Esempio n. 12
0
    def start_server(self):

        Server.forever(providers=[self.providers])
Esempio n. 13
0
            initial = self.pos
            final = op.value()
            delta = abs(final-initial)
            op.info("Moving %s -> %s"%(initial, final))

            while delta>=1.0:
                op.info("Moving %s"%delta)
                delta -= 1.0
                cothread.Sleep(1.0) # move at 1 step per second

            self.pos = final
            op.done()
        finally:
            self.busy = False

pv = SharedPV(nt=NTScalar('d'),
              initial=0.0,
              handler=MoveHandler())

provider = StaticProvider('move') # 'move' is an arbitrary name
provider.add("foo", pv)

with Server(providers=[provider]):
    print('Running')
    try:
        cothread.WaitForQuit()
    except KeyboardInterrupt:
        pass

print('Done')
Esempio n. 14
0
class PvaServerComms(builtin.controllers.ServerComms):
    """A class for communication between pva client and server"""
    def __init__(self, mri: builtin.controllers.AMri) -> None:
        super().__init__(mri)
        self._pva_server = None
        self._provider = None
        self._published: Set[str] = set()
        self._pvs: Dict[str, Dict[Optional[str], SharedPV]] = {}
        # Hooks
        self.register_hooked(ProcessPublishHook, self.publish)

    # Need camelCase as called by p4p Server
    # noinspection PyPep8Naming
    def testChannel(self, channel_name: str) -> bool:
        if channel_name in self._published:
            # Someone is asking for a Block
            return True
        elif "." in channel_name:
            # Someone is asking for the field of a Block
            mri, field = channel_name.rsplit(".", 1)
            return mri in self._published
        else:
            # We don't have it
            return False

    # Need camelCase as called by p4p Server
    # noinspection PyPep8Naming
    def makeChannel(self, channel_name: str, src: str) -> SharedPV:
        # Need to spawn as we take a lock here and in process
        return cothread.CallbackResult(self._make_channel,
                                       channel_name,
                                       src,
                                       callback_timeout=1.0)

    def _make_channel(self, channel_name: str, src: str) -> SharedPV:
        self.log.debug(f"Making PV {channel_name} for {src}")
        if channel_name in self._published:
            # Someone is asking for a Block
            mri = channel_name
            field = None
        elif "." in channel_name:
            # Someone is asking for the field of a Block
            mri, field = channel_name.rsplit(".", 1)
        else:
            raise NameError("Bad channel %s" % channel_name)
        with self._lock:
            pvs = self._pvs.setdefault(mri, {})
            try:
                pv = pvs[field]
            except KeyError:
                assert self.process, "No attached process"
                controller = self.process.get_controller(mri)
                handler = BlockHandler(controller, field)
                # We want any client passing a pvRequest field() to ONLY receive
                # that field. The default behaviour of p4p is to send a masked
                # version of the full structure. The mapperMode option allows us
                # to tell p4p to send a slice instead
                # https://github.com/mdavidsaver/pvDataCPP/blob/master/src/copy/pv/createRequest.h#L76
                pv = SharedPV(handler=handler, options={"mapperMode": "Slice"})
                pvs[field] = pv
            return pv

    def do_init(self):
        super().do_init()
        if self._pva_server is None:
            self.log.info("Starting PVA server")
            self._provider = DynamicProvider("PvaServerComms", self)
            self._pva_server = Server(providers=[self._provider])
            self.log.info("Started PVA server")

    def do_disable(self):
        super().do_disable()
        if self._pva_server is not None:
            self.log.info("Stopping PVA server")
            # Stop the server
            self._pva_server.stop()
            # Disconnect everyone
            self.disconnect_pv_clients(list(self._pvs))
            # Get rid of the server reference so we can't stop again
            self._pva_server = None
            self.log.info("Stopped PVA server")

    @add_call_types
    def publish(self, published: APublished) -> None:
        self._published = set(published)
        if self._pva_server:
            with self._lock:
                mris = [mri for mri in self._pvs if mri not in published]
                # Delete blocks we no longer have
                self.disconnect_pv_clients(mris)

    def disconnect_pv_clients(self, mris: List[str]) -> None:
        """Disconnect anyone listening to any of the given mris"""
        for mri in mris:
            for pv in self._pvs.pop(mri, {}).values():
                # Close pv with force destroy on, this will call
                # onLastDisconnect
                pv.close(destroy=True, sync=True, timeout=1.0)
Esempio n. 15
0
from p4p.nt import NTScalar
from p4p.server import Server
from p4p.server.thread import SharedPV

pv1 = SharedPV(nt=NTScalar('d'), initial=0.0)
pv2 = SharedPV(nt=NTScalar('d'), initial=4.2)
pv3 = SharedPV(nt=NTScalar('d'), initial=24.2346692)


@pv1.put
#@pv2.put
#@pv3.put
def handleput(pv, op):
    print(
        f"You changed my value to: {op.value().raw['value']}, I used to be: {pv.current().raw['value']}"
    )
    pv.post(op.value())
    op.done()


print("Server running....")
Server.forever(providers=[{
    'p4p:pv1': pv1,
    'p4p:pv2': pv2,
    'p4p:pv3': pv3,
}])
Esempio n. 16
0
    P.add_argument('pvname')
    P.add_argument('-g',
                   '--gray',
                   action='store_const',
                   const=True,
                   default=True)
    P.add_argument('-C',
                   '--color',
                   action='store_const',
                   const=False,
                   dest='gray')
    P.add_argument('-d',
                   '--debug',
                   action='store_const',
                   const=logging.DEBUG,
                   default=logging.INFO)
    return P.parse_args()


args = getargs()

logging.basicConfig(level=args.debug)

pv = SharedPV(nt=NTNDArray(), initial=face(gray=args.gray))

print('serving pv:', args.pvname)

Server.forever(providers=[{
    args.pvname: pv,
}])
Esempio n. 17
0
def main():
    global pvdb
    pvdb = {}  # start with empty dictionary
    global prefix
    prefix = ''
    global provider

    parser = argparse.ArgumentParser(prog=sys.argv[0],
                                     description='host PVs for KCU')
    parser.add_argument('-i',
                        '--interval',
                        type=int,
                        help='PV update interval',
                        default=10)
    parser.add_argument('-H',
                        '--hsd',
                        action='store_true',
                        help='HSD node',
                        default=False)
    args = parser.parse_args()

    # Set base
    base = pr.Root(name='KCUr', description='')

    coreMap = rogue.hardware.axi.AxiMemMap('/dev/datadev_0')

    base.add(Top(memBase=coreMap))

    # Start the system
    base.start(
        pollEn=False,
        initRead=False,
        zmqPort=None,
    )

    kcu = base.KCU

    if args.hsd:
        kcu.I2cBus.selectDevice('QSFP0')
        print(kcu.I2cBus.QSFP0.getRxPwr())
    else:
        print(kcu.TDetTiming.getClkRates())
        print(kcu.TDetSemi.getRTT())

    provider = StaticProvider(__name__)

    pvstats = PVStats(
        'DAQ:LAB2:' + socket.gethostname().replace('-', '_').upper(), kcu,
        args.hsd)

    # process PVA transactions
    updatePeriod = args.interval
    with Server(providers=[provider]):
        try:
            pvstats.init()
            while True:
                prev = time.perf_counter()
                pvstats.update()
                curr = time.perf_counter()
                delta = prev + updatePeriod - curr
                #                print('Delta {:.2f}  Update {:.2f}  curr {:.2f}  prev {:.2f}'.format(delta,curr-prev,curr,prev))
                if delta > 0:
                    time.sleep(delta)
        except KeyboardInterrupt:
            pass
Esempio n. 18
0
        provider.add(name, pv)
        pvs[name] = pv
        names = list(pvs)  # makes a copy to ensure consistency outside lock

    _log.info("Added mailbox %s", name)
    listpv.post(names)
    op.done()


@delpv.rpc
def remover(pv, op):
    name = op.value().query.name

    with pvs_lock:
        if name not in pvs:
            op.done(error="PV doesn't exists")
            return
        pv = pvs.pop(name)
        provider.remove(name)
        names = list(pvs)  # makes a copy to ensure consistency outside lock

    _log.info("Removed mailbox %s", name)
    listpv.post(names)

    op.done()


Server.forever(providers=[provider])

print('Done')
Esempio n. 19
0
def main():
    global pvdb
    pvdb = {}  # start with empty dictionary
    global prefix
    prefix = ''

    parser = argparse.ArgumentParser(prog=sys.argv[0],
                                     description='host PVs for XPM')

    parser.add_argument('-P',
                        required=True,
                        help='e.g. DAQ:LAB2:XPM:1',
                        metavar='PREFIX')
    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='be verbose')
    parser.add_argument('--ip', type=str, required=True, help="IP address")
    parser.add_argument(
        '--db',
        type=str,
        default=None,
        help=
        "save/restore db, for example [https://pswww.slac.stanford.edu/ws-auth/devconfigdb/ws/,configDB,LAB2,PROD]"
    )
    parser.add_argument('-I', action='store_true', help='initialize Cu timing')
    parser.add_argument('-L', action='store_true', help='bypass AMC Locks')
    parser.add_argument('-F',
                        type=float,
                        default=1.076923e-6,
                        help='fiducial period (sec)')
    parser.add_argument('-C',
                        type=int,
                        default=200,
                        help='clocks per fiducial')

    args = parser.parse_args()
    if args.verbose:
        #        logging.basicConfig(level=logging.DEBUG)
        setVerbose(True)

    # Set base
    base = pr.Root(name='AMCc', description='')

    base.add(Top(
        name='XPM',
        ipAddr=args.ip,
        fidPrescale=args.C,
    ))

    # Start the system
    base.start(
        #        pollEn   = False,
        #        initRead = False,
        #        zmqPort  = None,
    )

    xpm = base.XPM
    app = base.XPM.XpmApp

    # Print the AxiVersion Summary
    xpm.AxiVersion.printStatus()

    provider = StaticProvider(__name__)

    lock = Lock()

    pvstats = PVStats(provider, lock, args.P, xpm, args.F)
    #    pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, handle=pvstats.handle, db=args.db, cuInit=True)
    pvctrls = PVCtrls(provider,
                      lock,
                      name=args.P,
                      ip=args.ip,
                      xpm=xpm,
                      stats=pvstats._groups,
                      handle=pvstats.handle,
                      db=args.db,
                      cuInit=args.I,
                      fidPrescale=args.C,
                      fidPeriod=args.F * 1.e9)
    pvxtpg = None

    # process PVA transactions
    updatePeriod = 1.0
    cycle = 0
    with Server(providers=[provider]):
        try:
            if pvxtpg is not None:
                pvxtpg.init()
            pvstats.init()
            while True:
                prev = time.perf_counter()
                pvstats.update(cycle)
                pvctrls.update(cycle)
                #  We have to delay the startup of some classes
                if cycle == 5:
                    pvxtpg = PVXTpg(provider,
                                    lock,
                                    args.P,
                                    xpm,
                                    xpm.mmcmParms,
                                    cuMode='xtpg'
                                    in xpm.AxiVersion.ImageName.get(),
                                    bypassLock=args.L)
                    pvxtpg.init()
                elif cycle < 5:
                    print('pvxtpg in %d' % (5 - cycle))
                if pvxtpg is not None:
                    pvxtpg.update()
                curr = time.perf_counter()
                delta = prev + updatePeriod - curr
                #                print('Delta {:.2f}  Update {:.2f}  curr {:.2f}  prev {:.2f}'.format(delta,curr-prev,curr,prev))
                if delta > 0:
                    time.sleep(delta)
                cycle += 1
        except KeyboardInterrupt:
            pass
            if not self.pv.isOpen():
                _log.info("Open %s", self.count)
                self.pv.open(NT.wrap(self.count))

            else:
                _log.info("Tick %s", self.count)
                self.pv.post(NT.wrap(self.count))
            self.count += 1

    def onLastDisconnect(self, pv):
        _log.info("Last client disconnects")
        # mark in-active, but don't immediately close()
        self.active = False

    def put(self, pv, op):
        # force counter value
        self.count = op.value().value
        op.done()


pv = SharedPV(handler=LazyCounter())

with Server(providers=[{'foo': pv}]):
    print('Running')
    try:
        cothread.WaitForQuit()
    except KeyboardInterrupt:
        pass

print('Done')
Esempio n. 21
0
def main():
    global pvdb
    pvdb = {}     # start with empty dictionary
    global prefix
    prefix = ''

    parser = argparse.ArgumentParser(prog=sys.argv[0], description='host PVs for XPM')

    parser.add_argument('-P', required=True, help='e.g. DAQ:LAB2:XPM:1', metavar='PREFIX')
    parser.add_argument('-v', '--verbose', action='store_true', help='be verbose')
    parser.add_argument('--ip', type=str, required=True, help="IP address" )
    parser.add_argument('--db', type=str, default=None, help="save/restore db, for example [https://pswww.slac.stanford.edu/ws-auth/devconfigdb/ws/,configDB,LAB2,PROD]")

    args = parser.parse_args()
    if args.verbose:
#        logging.basicConfig(level=logging.DEBUG)
        setVerbose(True)

    # Set base
    base = pr.Root(name='AMCc',description='') 

    base.add(Top(
        name   = 'XPM',
        ipAddr = args.ip
    ))
    
    # Start the system
    base.start(
        pollEn   = False,
        initRead = False,
        zmqPort  = None,
    )

    xpm = base.XPM
    app = base.XPM.XpmApp

    # Print the AxiVersion Summary
    xpm.AxiVersion.printStatus()

    provider = StaticProvider(__name__)

    lock = Lock()

    pvstats = PVStats(provider, lock, args.P, xpm)
    pvctrls = PVCtrls(provider, lock, name=args.P, ip=args.ip, xpm=xpm, stats=pvstats._groups, db=args.db)
    pvxtpg  = PVXTpg(provider, lock, args.P, xpm, xpm.mmcmParms, cuMode='xtpg' in xpm.AxiVersion.ImageName.get())

    # process PVA transactions
    updatePeriod = 1.0
    with Server(providers=[provider]):
        try:
            if pvxtpg is not None:
                pvxtpg .init()
            pvstats.init()
            while True:
                prev = time.perf_counter()
                if pvxtpg is not None:
                    pvxtpg .update()
                pvstats.update()
                pvctrls.update()
                curr  = time.perf_counter()
                delta = prev+updatePeriod-curr
#                print('Delta {:.2f}  Update {:.2f}  curr {:.2f}  prev {:.2f}'.format(delta,curr-prev,curr,prev))
                if delta>0:
                    time.sleep(delta)
        except KeyboardInterrupt:
            pass
Esempio n. 22
0
             ('R34', 'd'), ('R35', 'd'), ('R36', 'd'), ('R41', 'd'),
             ('R42', 'd'), ('R43', 'd'), ('R44', 'd'), ('R45', 'd'),
             ('R46', 'd'), ('R51', 'd'), ('R52', 'd'), ('R53', 'd'),
             ('R54', 'd'), ('R55', 'd'), ('R56', 'd'), ('R61', 'd'),
             ('R62', 'd'), ('R63', 'd'), ('R64', 'd'), ('R65', 'd'),
             ('R66', 'd')]
rmat_table = NTTable(rmat_cols)
rows = [{key: rmat_data['value'][key][i]
         for key, _ in rmat_cols}
        for i in range(0, len(rmat_data['value']['ELEMENT_NAME']))]
rmat_vals = rmat_table.wrap(rows)
rmat_pv = SharedPV(nt=rmat_table, initial=rmat_vals)


@twiss_pv.rpc
def twiss_request_handler(pv, op):
    op.done(twiss_vals)


@rmat_pv.rpc
def rmat_request_handler(pv, op):
    op.done(rmat_vals)


print("Starting Model Service Test Server!")
Server.forever(providers=[{
    'MODEL:TWISS:EXTANT:FULLMACHINE': twiss_pv,
    'MODEL:TWISS:DESIGN:FULLMACHINE': twiss_pv,
    'MODEL:RMATS:EXTANT:FULLMACHINE': rmat_pv,
    'MODEL:RMATS:DESIGN:FULLMACHINE': rmat_pv
}])
Esempio n. 23
0
 def forever(self):
     Server.forever(providers=[self.provider])
Esempio n. 24
0
    def main(self):
        cli = Context()

        pvs = {}
        # table of detected "features"
        self.features = pvs[args.output + 'features'] = SharedPV(
            nt=NTTable(columns=[
                ('X', 'd'),
                ('Y', 'd'),
                ('W', 'd'),
                ('H', 'd'),
                ('idx', 'd'),
            ]),
            initial=[])
        # output image (example)
        self.imgOut = pvs[args.output + 'img'] = SharedPV(nt=NTNDArray(),
                                                          initial=np.zeros(
                                                              (0, 0),
                                                              dtype='u1'))
        # display execution time
        self.execTime = pvs[args.output + 'etime'] = SharedPV(
            nt=NTScalar('d', display=True),
            initial={
                'value': 0.0,
                'display.units': 's',
            })
        # background threshold level
        bg = pvs[args.output + 'bg'] = SharedPV(nt=NTScalar('I', display=True),
                                                initial={
                                                    'value': self.bgLvl,
                                                    'display.units': 'px',
                                                })

        @bg.put
        def set_bg(pv, op):
            self.bgLvl = max(1, int(op.value()))
            pv.post(self.bgLvl)
            op.done()

        # image flattening mode
        imode = pvs[args.output + 'imode'] = SharedPV(
            nt=NTEnum(), initial={'choices': [e.name for e in ImageMode]})

        @imode.put
        def set_imode(pv, op):
            self.imode = ImageMode(op.value())
            pv.post(self.imode)
            op.done()

        # separately publish info of largest feature
        self.X = pvs[args.output + 'x'] = SharedPV(nt=NTScalar('d'),
                                                   initial=0.0)
        self.Y = pvs[args.output + 'y'] = SharedPV(nt=NTScalar('d'),
                                                   initial=0.0)
        self.W = pvs[args.output + 'w'] = SharedPV(nt=NTScalar('d'),
                                                   initial=0.0)
        self.H = pvs[args.output + 'h'] = SharedPV(nt=NTScalar('d'),
                                                   initial=0.0)

        print("Output PVs", list(pvs.keys()))

        # subscribe to input image PV and run local server
        with cli.monitor(self.args.input,
                         self.on_image,
                         request='record[pipeline=true,queueSize=2]'), Server(
                             providers=[pvs]):
            # park while work happens in other tasks
            done = threading.Event()
            signal.signal(signal.SIGINT, lambda x, y: done.set())
            done.wait()
Esempio n. 25
0
def getargs():
    from argparse import ArgumentParser
    P = ArgumentParser()
    P.add_argument('video', help='A V4L2 device (eg. /dev/video0')
    P.add_argument('pvname')
    return P

args = getargs().parse_args()

pv = SharedPV(nt=NTNDArray(),
              initial=numpy.zeros((0,0), dtype='u1'))
provider = StaticProvider('capture')
provider.add(args.pvname, pv)

# open the capture device, and run the Server
with open(args.video, 'r+b', 0) as F, Server(providers=[provider]):
    caps = v4l.query_capabilities(F.fileno())

    print('capabilities', caps)

    if 'VIDEO_CAPTURE' not in caps['capabilities']:
        print("Not a capture device")
        sys.exit(1)

    idx = -1
    for fmt in v4l.list_formats(F.fileno()):
        print('Supported:', fmt)
        if fmt['pixelformat'] in color._mangle:
            idx = fmt['index']
            # don't break, use last.
            # this assumes gray scale is listed first
Esempio n. 26
0
 def run(self):
     with Server(providers=[self.provider]) as self.server:
         for data in self.read_timetool():
             for name, value in data.items():
                 self.pvs[name].post(value)