コード例 #1
0
ファイル: control_cc.py プロジェクト: blazetopher/pyon
def main():
    parser = argparse.ArgumentParser(description="CC Control script")
    parser.add_argument("pidfile", help="pidfile to use. If not specified, uses the first one found.")
    parser.add_argument("command", help="command to send to the container agent", choices=IContainerAgent.names())
    parser.add_argument("commandargs", metavar="arg", nargs="*", help="arguments to the command being sent")

    opts = parser.parse_args()

    pidfile = opts.pidfile
    if not pidfile:
        raise Exception("No pidfile specified")

    parms = {}
    with open(pidfile, "r") as pf:
        parms = msgpack.loads(pf.read())

    assert parms, "No content in pidfile"

    node, ioloop = make_node(parms["messaging"])
    cc = RPCClient(node=node, name=(parms["container-xp"], parms["container-agent"]))

    # make a manual call - this is to avoid having to have the IonObject for the call
    methdefs = [x[1] for x in IContainerAgent.namesAndDescriptions() if x[0] == opts.command]
    assert len(methdefs) == 1

    arg_names = methdefs[0].positional  # ('name', 'module', 'cls', 'config')
    msg_args = msgpack.dumps(
        dict(zip(arg_names, opts.commandargs))
    )  # ('name', <usrinp1>, 'cls', <usrinp2>) -> { 'name' : <usrinp1>, 'cls': <usrinp2> }
    retval = cc.request(msg_args, op=opts.command)

    print "Returned", retval
    node.client.close()
コード例 #2
0
ファイル: control_cc.py プロジェクト: klawande-cci/scioncc
def main():
    parser = argparse.ArgumentParser(description="ScionCC Control")
    parser.add_argument(
        "pidfile",
        help="pidfile to use. If not specified, uses the first one found.")
    parser.add_argument("command",
                        help="command to send to the container agent",
                        choices=IContainerAgent.names())
    parser.add_argument("commandargs",
                        metavar="arg",
                        nargs="*",
                        help="arguments to the command being sent")

    opts = parser.parse_args()

    pidfile = opts.pidfile
    if not pidfile:
        raise Exception("No pidfile specified")

    parms = {}
    with open(pidfile, 'r') as pf:
        parms = msgpack.loads(pf.read())

    assert parms, "No content in pidfile"

    bootstrap_pyon()

    node, ioloop = make_node(parms['messaging'])
    node.setup_interceptors(CFG.interceptor)
    cc = ContainerAgentClient(node=node,
                              to_name=(parms['container-xp'],
                                       parms['container-agent']))

    # make a manual call - this is to avoid having to have the IonObject for the call
    methdefs = [
        x[1] for x in IContainerAgent.namesAndDescriptions()
        if x[0] == opts.command
    ]
    assert len(methdefs) == 1

    arg_names = methdefs[0].positional  # ('name', 'module', 'cls', 'config')
    msg_args = dict(
        zip(arg_names, opts.commandargs)
    )  # ('name', <usrinp1>, 'cls', <usrinp2>) -> { 'name' : <usrinp1>, 'cls': <usrinp2> }
    retval = cc.request(msg_args, op=opts.command)

    # special case: status
    if opts.command == "status":
        statstr = retval
        print "Status:", statstr

        if statstr != "RUNNING":
            node.client.close()
            sys.exit(2)
    else:
        print "Returned", retval

    node.client.close()
コード例 #3
0
ファイル: exchange.py プロジェクト: swarbhanu/pyon
    def start(self):
        log.debug("ExchangeManager.start")

        total_count = 0

        def handle_failure(name, node):
            log.warn("Node %s could not be started", name)
            node.ready.set()        # let it fall out below

        # Establish connection(s) to broker
        for name, cfgkey in CFG.container.messaging.server.iteritems():
            if not cfgkey:
                continue

            if cfgkey not in CFG.server:
                raise ExchangeManagerError("Config key %s (name: %s) (from CFG.container.messaging.server) not in CFG.server" % (cfgkey, name))

            total_count += 1
            log.debug("Starting connection: %s", name)

            # start it with a zero timeout so it comes right back to us
            try:
                node, ioloop = messaging.make_node(CFG.server[cfgkey], name, 0)

                # install a finished handler directly on the ioloop just for this startup period
                fail_handle = lambda _: handle_failure(name, node)
                ioloop.link(fail_handle)

                # wait for the node ready event, with a large timeout just in case
                node_ready = node.ready.wait(timeout=15)

                # remove the finished handler, we don't care about it here
                ioloop.unlink(fail_handle)

                # only add to our list if we started successfully
                if not node.running:
                    ioloop.kill()      # make sure ioloop dead
                else:
                    self._nodes[name]   = node
                    self._ioloops[name] = ioloop

            except socket.error as e:
                log.warn("Could not start connection %s due to socket error, continuing", name)

        fail_count = total_count - len(self._nodes)
        if fail_count > 0 or total_count == 0:
            if fail_count == total_count:
                raise ExchangeManagerError("No node connection was able to start (%d nodes attempted, %d nodes failed)" % (total_count, fail_count))

            log.warn("Some nodes could not be started, ignoring for now")   # @TODO change when ready

        self._transport = AMQPTransport.get_instance()

        # load interceptors into each
        map(lambda x: x.setup_interceptors(CFG.interceptor), self._nodes.itervalues())

        log.debug("Started %d connections (%s)", len(self._nodes), ",".join(self._nodes.iterkeys()))
コード例 #4
0
ファイル: cc.py プロジェクト: wfrench/pyon
    def start(self):
        log.debug("Container starting...")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise Exception("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            from pyon.core.bootstrap import get_sys_name
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self.datastore_manager.start()

        # Instantiate Directory and self-register
        self.directory = Directory(self.datastore_manager)
        self.directory.register("/Containers", self.id, cc_agent=self.name)

        # Create other repositories to make sure they are there and clean if needed
        self.datastore_manager.get_datastore("resources", DataStore.DS_PROFILE.RESOURCES)
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)
        self.state_repository = StateRepository(self.datastore_manager)
        self.event_repository = EventRepository(self.datastore_manager)

        # Start ExchangeManager. In particular establish broker connection
        self.ex_manager.start()

        # TODO: Move this in ExchangeManager - but there is an error
        self.node, self.ioloop = messaging.make_node() # TODO: shortcut hack

        self.proc_manager.start()

        self.app_manager.start()

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc)
        self.proc_manager.proc_sup.ensure_ready(proc)

        log.info("Container started, OK.")
コード例 #5
0
ファイル: exchange.py プロジェクト: ooici-dm/pyon
    def start(self):
        log.debug("ExchangeManager starting ...")

        # Establish connection to broker
        # @TODO: raise error if sux
        node, ioloop = messaging.make_node()

        self._transport = AMQPTransport.get_instance()
        self._client = self._get_channel(node)

        # Declare root exchange
        #self.default_xs.ensure_exists(self._get_channel())
        return node, ioloop
コード例 #6
0
ファイル: exchange.py プロジェクト: ooici-dm/pyon
    def start(self):
        log.debug("ExchangeManager starting ...")

        # Establish connection to broker
        # @TODO: raise error if sux
        node, ioloop = messaging.make_node()

        self._transport = AMQPTransport.get_instance()
        self._client    = self._get_channel(node)

        # Declare root exchange
        #self.default_xs.ensure_exists(self._get_channel())
        return node, ioloop
コード例 #7
0
ファイル: cc.py プロジェクト: blazetopher/pyon
    def start(self):
        log.debug("Container starting...")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise Exception("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            from pyon.core.bootstrap import sys_name
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': sys_name }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)


        # Start ExchangeManager. In particular establish broker connection
        self.ex_manager.start()

        # TODO: Move this in ExchangeManager - but there is an error
        self.node, self.ioloop = messaging.make_node() # TODO: shortcut hack


        # Instantiate Directory singleton and self-register
        # TODO: At this point, there is no special config override
        self.directory = Directory()
        self.directory.register("/Containers", self.id, cc_agent=self.name)

        self.proc_manager.start()

        self.app_manager.start()

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc)
        rsvc.get_ready_event().wait(timeout=10)   # @TODO: no hardcode
        log.info("Container started, OK.")
コード例 #8
0
ファイル: processblame_plugin.py プロジェクト: pkediyal/pyon
    def begin(self):
        from interface.services.cei.iprocess_dispatcher_service import ProcessDispatcherServiceClient
        from pyon.net.messaging import make_node
        from pyon.core import bootstrap
        from pyon.public import CFG

        self.base_pids = []
        self.rpc_timeout = 2
        self._procs_by_test = {}
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()
        self.node, self.ioloop = make_node()
        self.node.setup_interceptors(CFG.interceptor)
        self.pd_cli =  ProcessDispatcherServiceClient(node=self.node)
コード例 #9
0
    def begin(self):
        from interface.services.cei.iprocess_dispatcher_service import ProcessDispatcherServiceClient
        from pyon.net.messaging import make_node
        from pyon.core import bootstrap
        from pyon.public import CFG

        self.base_pids = []
        self.rpc_timeout = 2
        self._procs_by_test = {}
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()
        self.node, self.ioloop = make_node()
        self.node.setup_interceptors(CFG.interceptor)
        self.pd_cli = ProcessDispatcherServiceClient(node=self.node)
コード例 #10
0
    def test_make_node(self, gevmock):
        connection_params = { 'username': sentinel.username,
                              'password': sentinel.password,
                              'host': str(sentinel.host),
                              'vhost': sentinel.vhost,
                              'port': 2111 }

        # make a mocked method for PyonSelectConnection to be patched in - we need a way of simulating the on_connection_open callback
        cm = Mock()
        def select_connection(params, cb):
            cb(cm)
            return sentinel.connection

        with patch('pyon.net.messaging.PyonSelectConnection', new=select_connection):
            node, ilp = make_node(connection_params, name=sentinel.name)

        self.assertEquals(ilp, sentinel.ioloop_process)
        gevmock.assert_called_once_with(ioloop, sentinel.connection, name=sentinel.name)
コード例 #11
0
ファイル: test_messaging.py プロジェクト: jamie-cyber1/pyon
    def test_make_node(self, gevmock):
        connection_params = { 'username': sentinel.username,
                              'password': sentinel.password,
                              'host': str(sentinel.host),
                              'vhost': sentinel.vhost,
                              'port': 2111 }

        # make a mocked method for PyonSelectConnection to be patched in - we need a way of simulating the on_connection_open callback
        cm = Mock()
        def select_connection(params, cb):
            cb(cm)
            return sentinel.connection

        with patch('pyon.net.messaging.PyonSelectConnection', new=select_connection):
            node, ilp = make_node(connection_params, name=sentinel.name)

        self.assertEquals(ilp, sentinel.ioloop_process)
        gevmock.assert_called_once_with(ioloop, sentinel.connection, name=sentinel.name)
コード例 #12
0
ファイル: control_cc.py プロジェクト: scion-network/scioncc
def main():
    parser = argparse.ArgumentParser(description="ScionCC Control")
    parser.add_argument("pidfile", help="pidfile to use. If not specified, uses the first one found.")
    parser.add_argument("command", help="command to send to the container agent", choices=IContainerAgent.names())
    parser.add_argument("commandargs", metavar="arg", nargs="*", help="arguments to the command being sent")

    opts = parser.parse_args()

    pidfile = opts.pidfile
    if not pidfile:
        raise Exception("No pidfile specified")

    parms = {}
    with open(pidfile, 'r') as pf:
        parms = msgpack.loads(pf.read())

    assert parms, "No content in pidfile"

    bootstrap_pyon()

    node, ioloop = make_node(parms['messaging'])
    node.setup_interceptors(CFG.interceptor)
    cc = ContainerAgentClient(node=node, to_name=(parms['container-xp'], parms['container-agent']))

    # make a manual call - this is to avoid having to have the IonObject for the call
    methdefs = [x[1] for x in IContainerAgent.namesAndDescriptions() if x[0] == opts.command]
    assert len(methdefs) == 1

    arg_names = methdefs[0].positional                                  # ('name', 'module', 'cls', 'config')
    msg_args = dict(zip(arg_names, opts.commandargs))    # ('name', <usrinp1>, 'cls', <usrinp2>) -> { 'name' : <usrinp1>, 'cls': <usrinp2> }
    retval = cc.request(msg_args, op=opts.command)

    # special case: status
    if opts.command == "status":
        statstr = retval
        print "Status:", statstr

        if statstr != "RUNNING":
            node.client.close()
            sys.exit(2)
    else:
        print "Returned", retval

    node.client.close()
コード例 #13
0
ファイル: connection.py プロジェクト: nimbusproject/ceiclient
    def __init__(self, broker, username, password, vhost='/',
            sysname=None, timeout=None, port=5672, ssl=False):

        try:
            from pyon.net.messaging import make_node
            from pyon.net.endpoint import RPCClient
            from pyon.util.containers import get_default_sysname
            import pyon.core.exception as pyonexception
        except ImportError:
            raise CeiClientError("Pyon isn't available in your environment")

        self.pyonexception = pyonexception
        self.RPCClient = RPCClient

        self.connection_params = {
            'host': broker,
            'username': username,
            'password': password,
            'vhost': vhost,
            'port': port
        }
        self.timeout = timeout

        self.sysname = sysname or get_default_sysname()

        node, ioloop = make_node(connection_params=self.connection_params,
                timeout=self.timeout)

        interceptor_config = {
            'interceptors': {
                'encode': {
                    'class': 'pyon.core.interceptor.encode.EncodeInterceptor'
                }
            },
            'stack': {
                'message_incoming': ['encode'],
                'message_outgoing': ['encode']
            }
        }
        node.setup_interceptors(interceptor_config)

        self.pyon_node = node
        self.pyon_ioloop = ioloop
コード例 #14
0
ファイル: processblame_plugin.py プロジェクト: j2project/pyon
    def begin(self):
        from interface.services.cei.iprocess_dispatcher_service import ProcessDispatcherServiceClient
        from pyon.net.messaging import make_node
        from pyon.core import bootstrap
        from pyon.public import CFG

        self.rpc_timeout = 2
        self.base_pids = []
        self._procs_by_test = {}
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()
        self.node, self.ioloop = make_node()
        self.node.setup_interceptors(CFG.interceptor)
        self.pd_cli =  ProcessDispatcherServiceClient(node=self.node)
        # Set base_pids once
        from pyon.core.exception import Timeout
        try:
           self.base_pids = [ proc.process_id for proc in self.pd_cli.list_processes(timeout=20) ]
        except Timeout:
           pass
コード例 #15
0
ファイル: request_server.py プロジェクト: swarbhanu/pyon
"""
Run the request_client example once this is running.
"""
from pyon.net import messaging
from pyon.net import channel

if __name__ == '__main__':
    node, ioloop_process = messaging.make_node()
    ch = node.channel(channel.Bidirectional)
    ch.bind(('amq.direct', 'server_x'))
    ch.listen()
    connected_ch = ch.accept()
    data = connected_ch.recv()
    print 'Message recvd: ', data
    connected_ch.send('hola')


コード例 #16
0
ファイル: _rpcc.py プロジェクト: newbrough/pyon
#!/usr/bin/env python

from pyon.net.endpoint import RPCClient
from pyon.net.messaging import make_node
from interface.services.examples.hello.ihello_service import IHelloService

"""
import gevent

from interface.services.ibank_service import IBankService
from interface.services.idatastore_service import IDatastoreService

node,iowat=make_node()

bank = RPCClient(node=node, name="bank", iface=IBankService)
data = RPCClient(node=node, name="datastore", iface=IDatastoreService)
"""

n, io = make_node()

hello = RPCClient(node=n, name=("qq", "hello"), iface=IHelloService)
コード例 #17
0
    def afterTest(self, test):
        from pyon.net.transport import NameTrio, TransportError
        from pyon.net.channel import RecvChannel
        import os
        import sys

        # need a connection to node to get queue stats
        from pyon.net.messaging import make_node
        node, ioloop = make_node()

        os.environ.pop('QUEUE_BLAME')
        tid = test.id()

        # grab raw data from database
        obj_ids = self.ds.list_objects()
        objs = self.ds.read_doc_mult(obj_ids)

        for x in objs:
            queue = x['queue_name']

            if 'accesses' in self.queues_by_test[tid][queue]:
                self.queues_by_test[tid][queue]['accesses'] += 1
            else:
                # grab intel from channel
                ch = node.channel(RecvChannel)
                ch._recv_name = NameTrio(queue.split('.')[0], queue)

                try:
                    msgs, consumers = ch.get_stats()
                    exists = True
                    #print >>sys.stderr, "LOG ME", queue, msgs, consumers
                except TransportError:
                    msgs = 0
                    consumers = 0
                    exists = False
                finally:
                    ch.close()

                self.queues_by_test[tid][queue] = {
                    'exists': exists,
                    'msgs': msgs,
                    'consumers': consumers,
                    'accesses': 1
                }

        # must also check all the queues from previous tests, to capture bleed
        bleed_queues = set()
        for test, testqueues in self.queues_by_test.iteritems():
            if test != tid:
                map(bleed_queues.add, testqueues.iterkeys())

        # don't test anything we already just tested
        bleed_queues.difference_update(self.queues_by_test[tid].iterkeys())

        for queue in bleed_queues:
            ch = node.channel(RecvChannel)
            ch._recv_name = NameTrio(queue.split('.')[0], queue)

            try:
                msgs, consumers = ch.get_stats()
                exists = True
            except TransportError:
                msgs = 0
                consumers = 0
                exists = False

            # drain the queue!
            if exists and msgs > 0 and consumers == 0:
                print >> sys.stderr, "DRAIN QUEUE:", queue
                ch.start_consume()
                for x in xrange(msgs):
                    m, h, d = ch.recv()
                    print >> sys.stderr, h
                    ch.ack(d)

            ch.close()

            self.queues_by_test[tid][queue] = {
                'exists': exists,
                'msgs': msgs,
                'consumers': consumers,
                'accesses': 0
            }  # 0 is special here, indicates a bleed check

        # empty the database for next test use
        self.ds.delete_datastore()
        self.ds.create_datastore(create_indexes=False)

        node.stop_node()
        ioloop.join(timeout=5)
コード例 #18
0
ファイル: request_client.py プロジェクト: ateranishi/pyon
"""
Run the request_server example first.
"""
from pyon.net import messaging
from pyon.net import channel

if __name__ == '__main__':
    node, ioloop_process = messaging.make_node()
    ch = node.channel(channel.BidirectionalClient)
    ch.connect(('amq.direct', 'server_x'))
    ch.send('hello')
    data = ch.recv()
    print 'Message recvd: ', data


コード例 #19
0
ファイル: _rpcc.py プロジェクト: swarbhanu/pyon
#!/usr/bin/env python

from pyon.net.endpoint import RPCClient
from pyon.net.messaging import make_node
from interface.services.examples.hello.ihello_service import IHelloService
"""
import gevent

from interface.services.ibank_service import IBankService
from interface.services.idatastore_service import IDatastoreService

node,iowat=make_node()

bank = RPCClient(node=node, name="bank", iface=IBankService)
data = RPCClient(node=node, name="datastore", iface=IDatastoreService)
"""

n, io = make_node()

hello = RPCClient(node=n, name=('qq', 'hello'), iface=IHelloService)
コード例 #20
0
ファイル: exchange.py プロジェクト: scion-network/scioncc
    def start(self):
        log.debug("ExchangeManager.start")

        total_count = 0

        def handle_failure(name, node, priv):
            log.warn("Node %s (privileged: %s) could not be started", priv,
                     name)
            node.ready.set()  # let it fall out below

        # read broker config to get nodes to connect to
        brokers = []
        for broker_name, broker_cfg in CFG.get_safe(
                'exchange.exchange_brokers').iteritems():
            cfg_key = broker_cfg.get('server', None)
            if not cfg_key:
                continue

            brokers.append((broker_name, cfg_key, False))

            priv_key = broker_cfg.get('server_priv', None)
            if priv_key is not None:
                brokers.append((broker_name, priv_key, True))

        # connect to all known brokers
        for b in brokers:
            broker_name, cfgkey, is_priv = b

            if cfgkey not in CFG.server:
                raise ExchangeManagerError(
                    "Config key %s (name: %s) (from CFG.container.messaging.server) not in CFG.server"
                    % (cfgkey, broker_name))

            total_count += 1
            log.debug("Starting connection: %s", broker_name)

            try:
                cfg_params = CFG.server[cfgkey]

                if cfg_params['type'] == 'local':
                    node, ioloop = messaging.make_local_node(
                        0, self.container.local_router)
                else:
                    # start it with a zero timeout so it comes right back to us
                    node, ioloop = messaging.make_node(cfg_params, broker_name,
                                                       0)

                # install a finished handler directly on the ioloop just for this startup period
                fail_handle = lambda _: handle_failure(broker_name, node,
                                                       is_priv)
                ioloop.link(fail_handle)

                # wait for the node ready event, with a large timeout just in case
                node_ready = node.ready.wait(timeout=15)

                # remove the finished handler, we don't care about it here
                ioloop.unlink(fail_handle)

                # only add to our list if we started successfully
                if not node.running:
                    ioloop.kill()  # make sure ioloop dead
                else:
                    if is_priv:
                        self._priv_nodes[broker_name] = node
                        self._priv_ioloops[broker_name] = ioloop
                    else:
                        self._nodes[broker_name] = node
                        self._ioloops[broker_name] = ioloop

            except socket.error as e:
                log.warn(
                    "Could not start connection %s due to socket error, continuing",
                    broker_name)

        fail_count = total_count - len(self._nodes) - len(self._priv_nodes)
        if fail_count > 0 or total_count == 0:
            if fail_count == total_count:
                raise ExchangeManagerError(
                    "No node connection was able to start (%d nodes attempted, %d nodes failed)"
                    % (total_count, fail_count))

            log.warn("Some nodes could not be started, ignoring for now"
                     )  # @TODO change when ready

        # load interceptors into each
        map(lambda x: x.setup_interceptors(CFG.interceptor),
            self._nodes.itervalues())
        map(lambda x: x.setup_interceptors(CFG.interceptor),
            self._priv_nodes.itervalues())

        # prepare privileged transports
        for name in self._nodes:
            node = self._priv_nodes.get(name, self._nodes[name])
            transport = self.get_transport(node)
            transport.lock = True  # prevent any attempt to close
            transport.add_on_close_callback(
                lambda *a, **kw: self._privileged_transport_closed(
                    name, *a, **kw))
            self._priv_transports[name] = transport

        # create default Exchange Space
        self.default_xs = self._create_root_xs()

        log.debug("Started %d connections (%s)",
                  len(self._nodes) + len(self._priv_nodes),
                  ",".join(self._nodes.keys() + self._priv_nodes.keys()))
コード例 #21
0
ファイル: _sub.py プロジェクト: ateranishi/pyon
#!/usr/bin/env python

from pyon.net.endpoint import Subscriber
from pyon.net.messaging import make_node
import gevent

node,iowat=make_node()

def msg_recv(msg):
    print "\n\n========================================\n\nHASSAN SAYS: %s\n\n========================================\n\n" % str(msg)

sub=Subscriber(node=node, name="hassan", callback=msg_recv)
meh=gevent.spawn(sub.listen)

gevent.joinall([meh])
コード例 #22
0
ファイル: exchange.py プロジェクト: mkl-/scioncc
    def start(self):
        log.debug("ExchangeManager.start")

        total_count = 0

        def handle_failure(name, node, priv):
            log.warn("Node %s (privileged: %s) could not be started", priv, name)
            node.ready.set()  # let it fall out below

        # read broker config to get nodes to connect to
        brokers = []
        for broker_name, broker_cfg in CFG.get_safe("exchange.exchange_brokers").iteritems():
            cfg_key = broker_cfg.get("server", None)
            if not cfg_key:
                continue

            brokers.append((broker_name, cfg_key, False))

            priv_key = broker_cfg.get("server_priv", None)
            if priv_key is not None:
                brokers.append((broker_name, priv_key, True))

        # connect to all known brokers
        for b in brokers:
            broker_name, cfgkey, is_priv = b

            if cfgkey not in CFG.server:
                raise ExchangeManagerError(
                    "Config key %s (name: %s) (from CFG.container.messaging.server) not in CFG.server"
                    % (cfgkey, broker_name)
                )

            total_count += 1
            log.debug("Starting connection: %s", broker_name)

            try:
                cfg_params = CFG.server[cfgkey]

                if cfg_params["type"] == "local":
                    node, ioloop = messaging.make_local_node(0, self.container.local_router)
                else:
                    # start it with a zero timeout so it comes right back to us
                    node, ioloop = messaging.make_node(cfg_params, broker_name, 0)

                # install a finished handler directly on the ioloop just for this startup period
                fail_handle = lambda _: handle_failure(broker_name, node, is_priv)
                ioloop.link(fail_handle)

                # wait for the node ready event, with a large timeout just in case
                node_ready = node.ready.wait(timeout=15)

                # remove the finished handler, we don't care about it here
                ioloop.unlink(fail_handle)

                # only add to our list if we started successfully
                if not node.running:
                    ioloop.kill()  # make sure ioloop dead
                else:
                    if is_priv:
                        self._priv_nodes[broker_name] = node
                        self._priv_ioloops[broker_name] = ioloop
                    else:
                        self._nodes[broker_name] = node
                        self._ioloops[broker_name] = ioloop

            except socket.error as e:
                log.warn("Could not start connection %s due to socket error, continuing", broker_name)

        fail_count = total_count - len(self._nodes) - len(self._priv_nodes)
        if fail_count > 0 or total_count == 0:
            if fail_count == total_count:
                raise ExchangeManagerError(
                    "No node connection was able to start (%d nodes attempted, %d nodes failed)"
                    % (total_count, fail_count)
                )

            log.warn("Some nodes could not be started, ignoring for now")  # @TODO change when ready

        # load interceptors into each
        map(lambda x: x.setup_interceptors(CFG.interceptor), self._nodes.itervalues())
        map(lambda x: x.setup_interceptors(CFG.interceptor), self._priv_nodes.itervalues())

        # prepare privileged transports
        for name in self._nodes:
            node = self._priv_nodes.get(name, self._nodes[name])
            transport = self.get_transport(node)
            transport.lock = True  # prevent any attempt to close
            transport.add_on_close_callback(lambda *a, **kw: self._privileged_transport_closed(name, *a, **kw))
            self._priv_transports[name] = transport

        # create default Exchange Space
        self.default_xs = self._create_root_xs()

        log.debug(
            "Started %d connections (%s)",
            len(self._nodes) + len(self._priv_nodes),
            ",".join(self._nodes.keys() + self._priv_nodes.keys()),
        )
コード例 #23
0
#!/usr/bin/env python

from pyon.net.endpoint import Subscriber
from pyon.net.messaging import make_node
import gevent
import time

node, iowat = make_node()


def msg_recv(msg, h):
    global counter
    counter += 1


sub = Subscriber(node=node, name="hassan", callback=msg_recv)

counter = 0
st = time.time()


def tick():
    global counter, st
    while True:
        time.sleep(2)
        ct = time.time()
        elapsed_s = ct - st

        mps = counter / elapsed_s

        print counter, "messages, per sec:", mps
コード例 #24
0
ファイル: queueblame_plugin.py プロジェクト: swarbhanu/pyon
    def afterTest(self, test):
        from pyon.net.transport import NameTrio, TransportError
        from pyon.net.channel import RecvChannel
        import os
        import sys

        # need a connection to node to get queue stats
        from pyon.net.messaging import make_node
        node, ioloop = make_node()

        os.environ.pop('QUEUE_BLAME')
        tid = test.id()

        # grab raw data from database
        obj_ids = self.ds.list_objects()
        objs = self.ds.read_doc_mult(obj_ids)

        for x in objs:
            queue = x['queue_name']

            if 'accesses' in self.queues_by_test[tid][queue]:
                self.queues_by_test[tid][queue]['accesses'] += 1
            else:
                # grab intel from channel
                ch = node.channel(RecvChannel)
                ch._recv_name = NameTrio(queue.split('.')[0], queue)

                try:
                    msgs, consumers = ch.get_stats()
                    exists = True
                    #print >>sys.stderr, "LOG ME", queue, msgs, consumers
                except TransportError:
                    msgs = 0
                    consumers = 0
                    exists = False
                finally:
                    ch.close()

                self.queues_by_test[tid][queue] = { 'exists': exists,
                                                    'msgs': msgs,
                                                    'consumers' : consumers,
                                                    'accesses' : 1 }

        # must also check all the queues from previous tests, to capture bleed
        bleed_queues = set()
        for test, testqueues in self.queues_by_test.iteritems():
            if test != tid:
                map(bleed_queues.add, testqueues.iterkeys())

        # don't test anything we already just tested
        bleed_queues.difference_update(self.queues_by_test[tid].iterkeys())

        for queue in bleed_queues:
            ch = node.channel(RecvChannel)
            ch._recv_name = NameTrio(queue.split('.')[0], queue)

            try:
                msgs, consumers = ch.get_stats()
                exists = True
            except TransportError:
                msgs = 0
                consumers = 0
                exists = False

            # drain the queue!
            if exists and msgs > 0 and consumers == 0:
                print >>sys.stderr, "DRAIN QUEUE:", queue
                ch.start_consume()
                for x in xrange(msgs):
                    m, h, d = ch.recv()
                    print >>sys.stderr, h
                    ch.ack(d)

            ch.close()


            self.queues_by_test[tid][queue] = { 'exists': exists,
                                                'msgs': msgs,
                                                'consumers': consumers,
                                                'accesses' : 0 }        # 0 is special here, indicates a bleed check

        # empty the database for next test use
        self.ds.delete_datastore()
        self.ds.create_datastore(create_indexes=False)

        node.stop_node()
        ioloop.join(timeout=5)