Esempio n. 1
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        self._capabilities = []

        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        from pyon.core import bootstrap
        bootstrap.container_instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Instantiate Directory and self-register
        # Has the additional side effect of either
        # bootstrapping the configuration into the
        # directory or read the configuration based
        # in the value of the auto_bootstrap setting
        self.directory = Directory()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
Esempio n. 2
0
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".",
                                 "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        log.debug("Container (sysname=%s) initializing ..." %
                  bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
Esempio n. 3
0
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.ex_manager._transport  = Mock(BaseTransport)

        # mock out _client, which is a property, to return a sentinel
        propmock = Mock()
        propmock.__get__ = Mock(return_value=sentinel.client)
        patcher = patch.object(ExchangeManager, '_client', propmock)
        patcher.start()
        self.addCleanup(patcher.stop)
Esempio n. 4
0
File: cc.py Progetto: wfrench/pyon
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs)
        from pyon.core import bootstrap
        bootstrap.sys_name = CFG.system.name or bootstrap.sys_name
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.sys_name)

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = DictModifier(CFG, kwargs)

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()
        
        log.debug("Container initialized, OK.")
Esempio n. 5
0
File: cc.py Progetto: ooici-dm/pyon
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
Esempio n. 6
0
    def begin(self):
        self._active_queues = set()
        self._test_changes = {}
        self._queues_declared = []  # ordered list of queues declared
        self._queues = defaultdict(list)  # queue name -> list of accesses

        from pyon.ion.exchange import ExchangeManager
        from pyon.util.containers import DotDict
        from pyon.core.bootstrap import CFG
        from mock import Mock

        containermock = Mock()
        containermock.resource_registry.find_resources.return_value = ([],
                                                                       None)

        self.ex_manager = ExchangeManager(
            containermock)  # needs to be able to setattr
        self.ex_manager._nodes['priviledged'] = DotDict(client=DotDict(
            parameters=DotDict(
                host=CFG.get_safe('server.amqp.host', 'localhost'))))
Esempio n. 7
0
File: cc.py Progetto: swarbhanu/pyon
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        from pyon.core import bootstrap
        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")
Esempio n. 8
0
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.pt = Mock(spec=BaseTransport)
        self.ex_manager.get_transport = Mock(return_value=self.pt)

        # set up some nodes
        self.ex_manager._nodes = {'primary': Mock(), 'priviledged': Mock()}

        # patch for setUp and test
        self.patch_cfg('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register':False}}, 'messaging':{'server':{}}})

        # start ex manager
        self.ex_manager.start()
Esempio n. 9
0
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.pt = Mock(spec=BaseTransport)
        self.ex_manager.get_transport = Mock(return_value=self.pt)

        # set up some nodes
        self.ex_manager._nodes = {'primary': Mock(), 'priviledged': Mock()}

        # patch for setUp and test
        self.patch_cfg(
            'pyon.ion.exchange.CFG', {
                'container': {
                    'exchange': {
                        'auto_register': False
                    }
                },
                'messaging': {
                    'server': {}
                }
            })

        # start ex manager
        self.ex_manager.start()
Esempio n. 10
0
    def begin(self):
        self._active_queues = set()
        self._test_changes = {}
        self._queues_declared = []          # ordered list of queues declared
        self._queues = defaultdict(list)    # queue name -> list of accesses

        from pyon.ion.exchange import ExchangeManager
        from pyon.util.containers import DotDict
        from pyon.core.bootstrap import CFG
        from mock import Mock

        containermock = Mock()
        containermock.resource_registry.find_resources.return_value = ([], None)

        self.ex_manager = ExchangeManager(containermock)      # needs to be able to setattr
        self.ex_manager._nodes['priviledged'] = DotDict(client=DotDict(parameters=DotDict(host=CFG.get_safe('server.amqp.host', 'localhost'))))
Esempio n. 11
0
 def setUp(self):
     self.ex_manager = ExchangeManager(Mock())
     self.ex_manager._transport  = Mock(BaseTransport)
     self.ex_manager._client     = Mock()
Esempio n. 12
0
File: cc.py Progetto: pkediyal/pyon
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system. It also manages connections to the Exchange
    and the various forms of datastores in the systems.
    """

    # Singleton static variables
    #node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # internal router
        self.local_router = None

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        # publisher, initialized in start()
        self.event_pub = None

        # context-local storage
        self.context = LocalContextMixin()

        log.debug("Container initialized, OK.")

    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name()}
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        # set up greenlet debugging signal handler
        gevent.signal(signal.SIGUSR2, self._handle_sigusr2)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()
        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # internal router for local transports
        self.local_router = LocalRouter(bootstrap.get_sys_name())
        self.local_router.start()
        self.local_router.ready.wait(timeout=2)
        self._capabilities.append("LOCAL_ROUTER")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.get_safe('container.sflow.enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self, cleanup_method=cleanup)
        self.proc_manager.proc_sup.ensure_ready(proc)
        proc.start_listeners()
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container (%s) started, OK." , self.id)

    def _handle_sigusr2(self):#, signum, frame):
        """
        Handles SIGUSR2, prints debugging greenlet information.
        """
        gls = GreenletLeak.get_greenlets()

        allgls = []

        for gl in gls:
            status = GreenletLeak.format_greenlet(gl)

            # build formatted output:
            # Greenlet at 0xdeadbeef
            #     self: <EndpointUnit at 0x1ffcceef>
            #     func: bound, EndpointUnit.some_func

            status[0].insert(0, "%s at %s:" % (gl.__class__.__name__, hex(id(gl))))
            # indent anything in status a second time
            prefmt = [s.replace("\t", "\t\t") for s in status[0]]
            prefmt.append("traceback:")

            for line in status[1]:
                for subline in line.split("\n")[0:2]:
                    prefmt.append(subline)

            glstr = "\n\t".join(prefmt)

            allgls.append(glstr)

        # print it out!
        print >>sys.stderr, "\n\n".join(allgls)
        with open("gls-%s" % os.getpid(), "w") as f:
            f.write("\n\n".join(allgls))


    @property
    def node(self):
        """
        Returns the active/default Node that should be used for most communication in the system.

        Defers to exchange manager, but only if it has been started, otherwise returns None.
        """
        if "EXCHANGE_MANAGER" in self._capabilities:
            return self.ex_manager.default_node

        return None

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")

        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')

                if hasattr(self, 'gl_parent_watch') and self.gl_parent_watch is not None:
                    self.gl_parent_watch.kill()

            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status

    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Esempio n. 13
0
class TestManagementAPI(PyonTestCase):
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.ex_manager._nodes = MagicMock()
        self.ex_manager._nodes.get.return_value.client.parameters.host = "testhost"  # stringifies so don't use sentinel

        self.ex_manager._ems_client = Mock()

    def test__get_management_url(self):
        url = self.ex_manager._get_management_url()

        self.assertEquals(url, "http://*****:*****@patch('pyon.ion.exchange.json')
    @patch('pyon.ion.exchange.requests')
    def test__call_management(self, reqmock, jsonmock):
        content = self.ex_manager._call_management(sentinel.url)

        self.assertEquals(content, jsonmock.loads.return_value)
        reqmock.get.assert_called_once_with(sentinel.url,
                                            auth=('user', 'pass'),
                                            data=None)

    @patch('pyon.ion.exchange.json')
    @patch('pyon.ion.exchange.requests')
    def test__call_management_delete(self, reqmock, jsonmock):
        content = self.ex_manager._call_management_delete(sentinel.url)

        self.assertEquals(content, jsonmock.loads.return_value)
        reqmock.delete.assert_called_once_with(sentinel.url,
                                               auth=('user', 'pass'),
                                               data=None)

    @patch('pyon.ion.exchange.json')
    @patch('pyon.ion.exchange.requests')
    def test__make_management_call(self, reqmock, jsonmock):
        content = self.ex_manager._make_management_call(sentinel.url,
                                                        method="scoop")

        reqmock.scoop.assert_called_once_with(sentinel.url,
                                              auth=('user', 'pass'),
                                              data=None)

    def test__make_management_call_delegates_to_ems(self):
        self.ex_manager._ems_available = Mock(return_value=True)

        content = self.ex_manager._make_management_call(
            sentinel.url, method=sentinel.anymeth)

        self.ex_manager._ems_client.call_management.assert_called_once_with(
            sentinel.url, sentinel.anymeth, headers=None)

    def test__make_management_call_raises_exceptions(self):
        rmock = Mock()
        rmock.return_value.raise_for_status.side_effect = requests.exceptions.Timeout

        with patch('pyon.ion.exchange.requests.get', rmock):
            self.assertRaises(exception.Timeout,
                              self.ex_manager._make_management_call,
                              sentinel.url,
                              use_ems=False)

    def test_list_queues_does_filtering(self):
        self.ex_manager._list_queues = Mock(return_value=[{
            'name': 'a_1'
        }, {
            'name': 'a_2'
        }, {
            'name': 'b_1'
        }, {
            'name': 'b_2'
        }])

        self.assertEquals(len(self.ex_manager.list_queues("a_")), 2)
        self.assertEquals(len(self.ex_manager.list_queues("b_")), 2)
        self.assertEquals(len(self.ex_manager.list_queues("_")), 4)
        self.assertEquals(len(self.ex_manager.list_queues("_1")), 2)
        self.assertEquals(len(self.ex_manager.list_queues("_2")), 2)

    def test_list_bindings_does_filtering(self):
        self.ex_manager._list_bindings = Mock(return_value=[
            {
                'source': 'ex_1',
                'destination': 'qq',
                'routing_key': '',
                'properties_key': '',
                'destination_type': 'queue'
            },
            {
                'source': 'ex_2',
                'destination': 'qa',
                'routing_key': '',
                'properties_key': '',
                'destination_type': 'queue'
            },
            {
                'source': 'ex_1',
                'destination': 'aq',
                'routing_key': '',
                'properties_key': '',
                'destination_type': 'queue'
            },
            {
                'source': 'ex_2',
                'destination': 'za',
                'routing_key': '',
                'properties_key': '',
                'destination_type': 'queue'
            },
        ])

        self.assertEquals(len(self.ex_manager.list_bindings(exchange="ex_1")),
                          2)
        self.assertEquals(len(self.ex_manager.list_bindings(exchange="ex_2")),
                          2)
        self.assertEquals(len(self.ex_manager.list_bindings(exchange="ex_")),
                          4)
        self.assertEquals(len(self.ex_manager.list_bindings(queue="qq")), 1)
        self.assertEquals(len(self.ex_manager.list_bindings(queue="a")), 3)
        self.assertEquals(len(self.ex_manager.list_bindings(queue="q")), 3)
Esempio n. 14
0
class TestExchangeObjects(IonUnitTestCase):
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.ex_manager._transport  = Mock(BaseTransport)
        self.ex_manager._client     = Mock()
        # all exchange level operations are patched out via the _transport

    def test_exchange_by_name(self):
        # defaults: Root XS, no XNs
        self.assertIn(ION_ROOT_XS, self.ex_manager.xs_by_name)
        self.assertIn(self.ex_manager.default_xs, self.ex_manager.xs_by_name.itervalues())
        self.assertEquals(len(self.ex_manager.xn_by_name), 0)

        # create another XS
        xs = self.ex_manager.create_xs('exchange')
        self.assertIn('exchange', self.ex_manager.xs_by_name)
        self.assertIn(xs, self.ex_manager.xs_by_name.values())
        self.assertEquals(len(self.ex_manager.xn_by_name), 0)

        # now create some XNs underneath default exchange
        xn1 = self.ex_manager.create_xn_process('xn1')
        self.assertEquals(xn1._xs, self.ex_manager.default_xs)
        self.assertIn('xn1', self.ex_manager.xn_by_name)
        self.assertIn(xn1, self.ex_manager.xn_by_name.values())
        self.assertEquals(xn1, self.ex_manager.xn_by_name['xn1'])
        self.assertIsInstance(xn1, ExchangeNameProcess)

        self.assertEquals({ION_ROOT_XS:[xn1]}, self.ex_manager.xn_by_xs)

        xn2 = self.ex_manager.create_xn_service('xn2')
        self.assertIn('xn2', self.ex_manager.xn_by_name)
        self.assertIn(xn2, self.ex_manager.xn_by_xs[ION_ROOT_XS])
        self.assertEquals(xn2.xn_type, 'XN_SERVICE')

        # create one under our second xn3
        xn3 = self.ex_manager.create_xn_queue('xn3', xs)
        self.assertIn('xn3', self.ex_manager.xn_by_name)
        self.assertIn(xn3, self.ex_manager.xn_by_xs['exchange'])
        self.assertNotIn(xn3, self.ex_manager.xn_by_xs[ION_ROOT_XS])

    def test_create_xs(self):
        xs      = self.ex_manager.create_xs(sentinel.xs)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        self.assertEquals(xs._exchange, sentinel.xs)
        self.assertEquals(xs.exchange, exstr)
        self.assertEquals(xs.queue, None)
        self.assertEquals(xs.binding, None)

        self.assertEquals(xs._xs_exchange_type, 'topic')
        self.assertEquals(xs._xs_durable, False)
        self.assertEquals(xs._xs_auto_delete, True)

        # should be in our map too
        self.assertIn(sentinel.xs, self.ex_manager.xs_by_name)
        self.assertEquals(self.ex_manager.xs_by_name[sentinel.xs], xs)

        # should've tried to declare
        self.ex_manager._transport.declare_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr, auto_delete=True, durable=False, exchange_type='topic')

    def test_create_xs_with_params(self):
        xs      = self.ex_manager.create_xs(sentinel.xs, exchange_type=sentinel.ex_type, durable=True)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        self.assertEquals(xs._xs_durable, True)
        self.assertEquals(xs._xs_exchange_type, sentinel.ex_type)

        # declaration?
        self.ex_manager._transport.declare_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr, auto_delete=True, durable=True, exchange_type=sentinel.ex_type)

    def test_delete_xs(self):
        # need an XS first
        xs      = self.ex_manager.create_xs(sentinel.delete_me)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.delete_me))     # what we expect the exchange property to return

        self.assertIn(sentinel.delete_me, self.ex_manager.xs_by_name)

        self.ex_manager.delete_xs(xs)

        self.assertNotIn(sentinel.delete_me, self.ex_manager.xs_by_name)

        # call to broker
        self.ex_manager._transport.delete_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr)

    def test_delete_xs_without_creating_it_first(self):
        xsmock = Mock(ExchangeSpace)
        xsmock._exchange = sentinel.fake

        self.assertRaises(KeyError, self.ex_manager.delete_xs, xsmock)

    def test_create_xp(self):
        xp      = self.ex_manager.create_xp(sentinel.xp)
        exstr   = "%s.ion.xs.%s.xp.%s" % (get_sys_name(), self.ex_manager.default_xs._exchange, str(sentinel.xp))

        self.assertEquals(xp._exchange, sentinel.xp)
        self.assertEquals(xp._xs, self.ex_manager.default_xs)
        self.assertEquals(xp._xptype, 'ttree')
        self.assertEquals(xp._queue, None)
        self.assertEquals(xp._binding, None)

        self.assertEquals(xp.exchange, exstr)

        # declaration
        self.ex_manager._transport.declare_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr, auto_delete=True, durable=False, exchange_type='topic')

    def test_create_xp_with_params(self):
        xp = self.ex_manager.create_xp(sentinel.xp, xptype=sentinel.xptype)
        self.assertEquals(xp._xptype, sentinel.xptype)

    def test_create_xp_with_different_xs(self):
        xs = self.ex_manager.create_xs(sentinel.xs)
        xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        xp = self.ex_manager.create_xp(sentinel.xp, xs)
        xp_exstr = '%s.xp.%s' % (xs_exstr, str(sentinel.xp))

        # check mappings
        self.assertIn(sentinel.xp, self.ex_manager.xn_by_name)
        self.assertIn(xp, self.ex_manager.xn_by_xs[sentinel.xs])

        self.assertEquals(xp.exchange, xp_exstr)

    def test_delete_xp(self):
        xp      = self.ex_manager.create_xp(sentinel.xp)
        exstr   = "%s.ion.xs.%s.xp.%s" % (get_sys_name(), self.ex_manager.default_xs._exchange, str(sentinel.xp))

        self.assertIn(sentinel.xp, self.ex_manager.xn_by_name)

        self.ex_manager.delete_xp(xp)

        self.assertNotIn(sentinel.xp, self.ex_manager.xn_by_name)

        # deletion
        self.ex_manager._transport.delete_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr)

    def test_delete_xp_without_creating_it_first(self):
        xpmock = Mock(ExchangePoint)
        xpmock._exchange = sentinel.delete_me

        self.assertRaises(KeyError, self.ex_manager.delete_xp, xpmock)

    def test__create_xn_unknown_type(self):
        self.assertRaises(StandardError, self.ex_manager._create_xn, sentinel.unknown)

    def test_create_xn_service(self):
        xn      = self.ex_manager.create_xn_service('servicename')
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameService)

        # exclusive attrs to XN
        self.assertEquals(xn._xs, self.ex_manager.default_xs)
        self.assertEquals(xn._xn_auto_delete, ExchangeNameService._xn_auto_delete)
        self.assertEquals(xn._xn_durable, ExchangeNameService._xn_durable)
        self.assertEquals(xn.xn_type, 'XN_SERVICE')

        # underlying attrs
        self.assertEquals(xn._exchange, None)
        self.assertEquals(xn._queue, 'servicename')
        self.assertEquals(xn._binding, None)

        # top level props
        self.assertEquals(xn.exchange, self.ex_manager.default_xs.exchange)
        self.assertEquals(xn.queue, qstr)
        self.assertEquals(xn.binding, 'servicename')

        # should be in mapping
        self.assertIn('servicename', self.ex_manager.xn_by_name)
        self.assertIn(xn, self.ex_manager.xn_by_xs[ION_ROOT_XS])

        # declaration
        self.ex_manager._transport.declare_queue_impl.assert_called_once(self.ex_manager._client, qstr, durable=ExchangeNameService._xn_durable, auto_delete=ExchangeNameService._xn_auto_delete)

    def test_create_xn_process(self):
        xn = self.ex_manager.create_xn_process('procname')

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameProcess)

    def test_create_xn_queue(self):
        xn = self.ex_manager.create_xn_queue('queuename')

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameQueue)

    def test_create_xn_with_different_xs(self):
        xs = self.ex_manager.create_xs(sentinel.xs)
        xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        xn      = self.ex_manager.create_xn_service('servicename', xs)
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        # check mappings
        self.assertIn('servicename', self.ex_manager.xn_by_name)
        self.assertIn(xn, self.ex_manager.xn_by_xs[sentinel.xs])

        self.assertEquals(xn.queue, qstr)

    def test_delete_xn(self):
        xn      = self.ex_manager.create_xn_process('procname')
        qstr    = '%s.%s' % (xn.exchange, 'procname')

        self.assertIn('procname', self.ex_manager.xn_by_name)

        self.ex_manager.delete_xn(xn)

        self.assertNotIn('procname', self.ex_manager.xn_by_name)

        # call to broker
        self.ex_manager._transport.delete_queue_impl.assert_called_once_with(self.ex_manager._client, qstr)

    def test_xn_setup_listener(self):
        xn      = self.ex_manager.create_xn_service('servicename')
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        xn.setup_listener(sentinel.binding, None)

        self.ex_manager._transport.bind_impl.assert_called_once_with(self.ex_manager._client, xn.exchange, qstr, sentinel.binding)

    def test_xn_bind(self):
        xn      = self.ex_manager.create_xn_service('servicename')

        xn.bind(sentinel.bind)

        self.ex_manager._transport.bind_impl.assert_called_once_with(self.ex_manager._client, xn.exchange, xn.queue, sentinel.bind)

    def test_xn_unbind(self):
        xn      = self.ex_manager.create_xn_service('servicename')

        xn.unbind(sentinel.bind)

        self.ex_manager._transport.unbind_impl.assert_called_once_with(self.ex_manager._client, xn.exchange, xn.queue, sentinel.bind)
Esempio n. 15
0
class TestManagementAPI(PyonTestCase):
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.ex_manager._nodes = MagicMock()
        self.ex_manager._nodes.get.return_value.client.parameters.host = "testhost" # stringifies so don't use sentinel

        self.ex_manager._ems_client = Mock()

    def test__get_management_url(self):
        url = self.ex_manager._get_management_url()

        self.assertEquals(url, "http://*****:*****@patch('pyon.ion.exchange.json')
    @patch('pyon.ion.exchange.requests')
    def test__call_management(self, reqmock, jsonmock):
        content = self.ex_manager._call_management(sentinel.url)

        self.assertEquals(content, jsonmock.loads.return_value)
        reqmock.get.assert_called_once_with(sentinel.url, auth=('user', 'pass'), data=None)

    @patch('pyon.ion.exchange.json')
    @patch('pyon.ion.exchange.requests')
    def test__call_management_delete(self, reqmock, jsonmock):
        content = self.ex_manager._call_management_delete(sentinel.url)

        self.assertEquals(content, jsonmock.loads.return_value)
        reqmock.delete.assert_called_once_with(sentinel.url, auth=('user', 'pass'), data=None)

    @patch('pyon.ion.exchange.json')
    @patch('pyon.ion.exchange.requests')
    def test__make_management_call(self, reqmock, jsonmock):
        content = self.ex_manager._make_management_call(sentinel.url, method="scoop")

        reqmock.scoop.assert_called_once_with(sentinel.url, auth=('user', 'pass'), data=None)

    def test__make_management_call_delegates_to_ems(self):
        self.ex_manager._ems_available = Mock(return_value=True)

        content = self.ex_manager._make_management_call(sentinel.url, method=sentinel.anymeth)

        self.ex_manager._ems_client.call_management.assert_called_once_with(sentinel.url, sentinel.anymeth, headers=None)

    def test__make_management_call_raises_exceptions(self):
        rmock = Mock()
        rmock.return_value.raise_for_status.side_effect = requests.exceptions.Timeout

        with patch('pyon.ion.exchange.requests.get', rmock):
            self.assertRaises(exception.Timeout, self.ex_manager._make_management_call, sentinel.url, use_ems=False)

    def test_list_queues_does_filtering(self):
        self.ex_manager._list_queues = Mock(return_value=[{'name':'a_1'}, {'name':'a_2'}, {'name':'b_1'}, {'name':'b_2'}])

        self.assertEquals(len(self.ex_manager.list_queues("a_")), 2)
        self.assertEquals(len(self.ex_manager.list_queues("b_")), 2)
        self.assertEquals(len(self.ex_manager.list_queues("_")), 4)
        self.assertEquals(len(self.ex_manager.list_queues("_1")), 2)
        self.assertEquals(len(self.ex_manager.list_queues("_2")), 2)

    def test_list_bindings_does_filtering(self):
        self.ex_manager._list_bindings = Mock(return_value=[{'source':'ex_1', 'destination':'qq', 'routing_key':'', 'properties_key':'', 'destination_type':'queue'},
                                                            {'source':'ex_2', 'destination':'qa', 'routing_key':'', 'properties_key':'', 'destination_type':'queue'},
                                                            {'source':'ex_1', 'destination':'aq', 'routing_key':'', 'properties_key':'', 'destination_type':'queue'},
                                                            {'source':'ex_2', 'destination':'za', 'routing_key':'', 'properties_key':'', 'destination_type':'queue'},])

        self.assertEquals(len(self.ex_manager.list_bindings(exchange="ex_1")), 2)
        self.assertEquals(len(self.ex_manager.list_bindings(exchange="ex_2")), 2)
        self.assertEquals(len(self.ex_manager.list_bindings(exchange="ex_")), 4)
        self.assertEquals(len(self.ex_manager.list_bindings(queue="qq")), 1)
        self.assertEquals(len(self.ex_manager.list_bindings(queue="a")), 3)
        self.assertEquals(len(self.ex_manager.list_bindings(queue="q")), 3)
Esempio n. 16
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system. It also manages connections to the Exchange
    and the various forms of datastores in the systems.
    """

    # Singleton static variables
    #node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        self._capabilities = []

        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        from pyon.core import bootstrap
        bootstrap.container_instance = self

        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Instantiate Directory and self-register
        # Has the additional side effect of either
        # bootstrapping the configuration into the
        # directory or read the configuration based
        # in the value of the auto_bootstrap setting
        self.directory = Directory()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")

    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Self-register with Directory
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self.directory.register("/Containers/%s" % self.id, "Processes")
        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()

        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.container.get('sflow', {}).get('enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name, listeners=[rsvc], service=self)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container started, OK.")

    @property
    def node(self):
        """
        Returns the active/default Node that should be used for most communication in the system.

        Defers to exchange manager, but only if it has been started, otherwise returns None.
        """
        if "EXCHANGE_MANAGER" in self._capabilities:
            return self.ex_manager.default_node

        return None

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')
                watch_parent = CFG.system.get('watch_parent', None)
                if watch_parent:
                    watch_parent.kill()
            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Esempio n. 17
0
class TestExchangeObjects(PyonTestCase):
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.ex_manager._transport  = Mock(BaseTransport)
        self.ex_manager._client     = Mock()
        # all exchange level operations are patched out via the _transport

    def test_exchange_by_name(self):
        # defaults: Root XS, no XNs
        self.assertIn(ION_ROOT_XS, self.ex_manager.xs_by_name)
        self.assertIn(self.ex_manager.default_xs, self.ex_manager.xs_by_name.itervalues())
        self.assertEquals(len(self.ex_manager.xn_by_name), 0)

        # create another XS
        xs = self.ex_manager.create_xs('exchange')
        self.assertIn('exchange', self.ex_manager.xs_by_name)
        self.assertIn(xs, self.ex_manager.xs_by_name.values())
        self.assertEquals(len(self.ex_manager.xn_by_name), 0)

        # now create some XNs underneath default exchange
        xn1 = self.ex_manager.create_xn_process('xn1')
        self.assertEquals(xn1._xs, self.ex_manager.default_xs)
        self.assertIn('xn1', self.ex_manager.xn_by_name)
        self.assertIn(xn1, self.ex_manager.xn_by_name.values())
        self.assertEquals(xn1, self.ex_manager.xn_by_name['xn1'])
        self.assertIsInstance(xn1, ExchangeNameProcess)

        self.assertEquals({ION_ROOT_XS:[xn1]}, self.ex_manager.xn_by_xs)

        xn2 = self.ex_manager.create_xn_service('xn2')
        self.assertIn('xn2', self.ex_manager.xn_by_name)
        self.assertIn(xn2, self.ex_manager.xn_by_xs[ION_ROOT_XS])
        self.assertEquals(xn2.xn_type, 'XN_SERVICE')

        # create one under our second xn3
        xn3 = self.ex_manager.create_xn_queue('xn3', xs)
        self.assertIn('xn3', self.ex_manager.xn_by_name)
        self.assertIn(xn3, self.ex_manager.xn_by_xs['exchange'])
        self.assertNotIn(xn3, self.ex_manager.xn_by_xs[ION_ROOT_XS])

    def test_create_xs(self):
        xs      = self.ex_manager.create_xs(sentinel.xs)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        self.assertEquals(xs._exchange, sentinel.xs)
        self.assertEquals(xs.exchange, exstr)
        self.assertEquals(xs.queue, None)
        self.assertEquals(xs.binding, None)

        self.assertEquals(xs._xs_exchange_type, 'topic')
        self.assertEquals(xs._xs_durable, False)
        self.assertEquals(xs._xs_auto_delete, True)

        # should be in our map too
        self.assertIn(sentinel.xs, self.ex_manager.xs_by_name)
        self.assertEquals(self.ex_manager.xs_by_name[sentinel.xs], xs)

        # should've tried to declare
        self.ex_manager._transport.declare_exchange_impl.assert_called_with(self.ex_manager._client, exstr, auto_delete=True, durable=False, exchange_type='topic')

    def test_create_xs_with_params(self):
        xs      = self.ex_manager.create_xs(sentinel.xs, exchange_type=sentinel.ex_type, durable=True)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        self.assertEquals(xs._xs_durable, True)
        self.assertEquals(xs._xs_exchange_type, sentinel.ex_type)

        # declaration?
        self.ex_manager._transport.declare_exchange_impl.assert_called_with(self.ex_manager._client, exstr, auto_delete=True, durable=True, exchange_type=sentinel.ex_type)

    def test_delete_xs(self):
        # need an XS first
        xs      = self.ex_manager.create_xs(sentinel.delete_me)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.delete_me))     # what we expect the exchange property to return

        self.assertIn(sentinel.delete_me, self.ex_manager.xs_by_name)

        self.ex_manager.delete_xs(xs)

        self.assertNotIn(sentinel.delete_me, self.ex_manager.xs_by_name)

        # call to broker
        self.ex_manager._transport.delete_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr)

    def test_delete_xs_without_creating_it_first(self):
        xsmock = Mock(ExchangeSpace)
        xsmock._exchange = sentinel.fake

        self.assertRaises(KeyError, self.ex_manager.delete_xs, xsmock)

    def test_create_xp(self):
        xp      = self.ex_manager.create_xp(sentinel.xp)
        exstr   = "%s.ion.xs.%s.xp.%s" % (get_sys_name(), self.ex_manager.default_xs._exchange, str(sentinel.xp))

        self.assertEquals(xp._exchange, sentinel.xp)
        self.assertEquals(xp._xs, self.ex_manager.default_xs)
        self.assertEquals(xp._xptype, 'ttree')
        self.assertEquals(xp._queue, None)
        self.assertEquals(xp._binding, None)

        self.assertEquals(xp.exchange, exstr)

        # declaration
        self.ex_manager._transport.declare_exchange_impl.assert_called_with(self.ex_manager._client, exstr, auto_delete=True, durable=False, exchange_type='topic')

    def test_create_xp_with_params(self):
        xp = self.ex_manager.create_xp(sentinel.xp, xptype=sentinel.xptype)
        self.assertEquals(xp._xptype, sentinel.xptype)

    def test_create_xp_with_different_xs(self):
        xs = self.ex_manager.create_xs(sentinel.xs)
        xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        xp = self.ex_manager.create_xp(sentinel.xp, xs)
        xp_exstr = '%s.xp.%s' % (xs_exstr, str(sentinel.xp))

        # check mappings
        self.assertIn(sentinel.xp, self.ex_manager.xn_by_name)
        self.assertIn(xp, self.ex_manager.xn_by_xs[sentinel.xs])

        self.assertEquals(xp.exchange, xp_exstr)

    def test_delete_xp(self):
        xp      = self.ex_manager.create_xp(sentinel.xp)
        exstr   = "%s.ion.xs.%s.xp.%s" % (get_sys_name(), self.ex_manager.default_xs._exchange, str(sentinel.xp))

        self.assertIn(sentinel.xp, self.ex_manager.xn_by_name)

        self.ex_manager.delete_xp(xp)

        self.assertNotIn(sentinel.xp, self.ex_manager.xn_by_name)

        # deletion
        self.ex_manager._transport.delete_exchange_impl.assert_called_once_with(self.ex_manager._client, exstr)

    def test_delete_xp_without_creating_it_first(self):
        xpmock = Mock(ExchangePoint)
        xpmock._exchange = sentinel.delete_me

        self.assertRaises(KeyError, self.ex_manager.delete_xp, xpmock)

    def test__create_xn_unknown_type(self):
        self.assertRaises(StandardError, self.ex_manager._create_xn, sentinel.unknown)

    def test_create_xn_service(self):
        xn      = self.ex_manager.create_xn_service('servicename')
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameService)

        # exclusive attrs to XN
        self.assertEquals(xn._xs, self.ex_manager.default_xs)
        self.assertEquals(xn._xn_auto_delete, ExchangeNameService._xn_auto_delete)
        self.assertEquals(xn._xn_durable, ExchangeNameService._xn_durable)
        self.assertEquals(xn.xn_type, 'XN_SERVICE')

        # underlying attrs
        self.assertEquals(xn._exchange, None)
        self.assertEquals(xn._queue, 'servicename')
        self.assertEquals(xn._binding, None)

        # top level props
        self.assertEquals(xn.exchange, self.ex_manager.default_xs.exchange)
        self.assertEquals(xn.queue, qstr)
        self.assertEquals(xn.binding, 'servicename')

        # should be in mapping
        self.assertIn('servicename', self.ex_manager.xn_by_name)
        self.assertIn(xn, self.ex_manager.xn_by_xs[ION_ROOT_XS])

        # declaration
        self.ex_manager._transport.declare_queue_impl.assert_called_once(self.ex_manager._client, qstr, durable=ExchangeNameService._xn_durable, auto_delete=ExchangeNameService._xn_auto_delete)

    def test_create_xn_process(self):
        xn = self.ex_manager.create_xn_process('procname')

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameProcess)

    def test_create_xn_queue(self):
        xn = self.ex_manager.create_xn_queue('queuename')

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameQueue)

    def test_create_xn_with_different_xs(self):
        xs = self.ex_manager.create_xs(sentinel.xs)
        xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        xn      = self.ex_manager.create_xn_service('servicename', xs)
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        # check mappings
        self.assertIn('servicename', self.ex_manager.xn_by_name)
        self.assertIn(xn, self.ex_manager.xn_by_xs[sentinel.xs])

        self.assertEquals(xn.queue, qstr)

    def test_delete_xn(self):
        xn      = self.ex_manager.create_xn_process('procname')
        qstr    = '%s.%s' % (xn.exchange, 'procname')

        self.assertIn('procname', self.ex_manager.xn_by_name)

        self.ex_manager.delete_xn(xn)

        self.assertNotIn('procname', self.ex_manager.xn_by_name)

        # call to broker
        self.ex_manager._transport.delete_queue_impl.assert_called_once_with(self.ex_manager._client, qstr)

    def test_xn_setup_listener(self):
        xn      = self.ex_manager.create_xn_service('servicename')
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        xn.setup_listener(sentinel.binding, None)

        self.ex_manager._transport.bind_impl.assert_called_once_with(self.ex_manager._client, xn.exchange, qstr, sentinel.binding)

    def test_xn_bind(self):
        xn      = self.ex_manager.create_xn_service('servicename')

        xn.bind(sentinel.bind)

        self.ex_manager._transport.bind_impl.assert_called_once_with(self.ex_manager._client, xn.exchange, xn.queue, sentinel.bind)

    def test_xn_unbind(self):
        xn      = self.ex_manager.create_xn_service('servicename')

        xn.unbind(sentinel.bind)

        self.ex_manager._transport.unbind_impl.assert_called_once_with(self.ex_manager._client, xn.exchange, xn.queue, sentinel.bind)
Esempio n. 18
0
class TestExchangeManager(PyonTestCase):

    def setUp(self):
        self.container = Mock()
        self.ex_manager = ExchangeManager(self.container)
        self.ex_manager.get_transport = Mock()

    def test_verify_service(self, mockmessaging):
        PyonTestCase.test_verify_service(self)

    @patch.dict('pyon.ion.exchange.CFG', container=_make_container_cfg())
    def test_start_with_no_connections(self, mockmessaging):
        self.assertRaises(ExchangeManagerError, self.ex_manager.start)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':dict_amqp, 'postgresql':CFG.server.postgresql}, container=_make_container_cfg(primary='amqp'))
    def test_start_with_one_connection(self, mockmessaging):
        mockmessaging.make_node.return_value = (Mock(), Mock())     # node, ioloop
        self.ex_manager.start()

        mockmessaging.make_node.assert_called_once_with(dict_amqp, 'primary', 0)
        self.assertIn('primary', self.ex_manager._nodes)
        self.assertIn('primary', self.ex_manager._ioloops)
        self.assertEquals(self.ex_manager._nodes['primary'], mockmessaging.make_node.return_value[0])
        self.assertEquals(self.ex_manager._ioloops['primary'], mockmessaging.make_node.return_value[1])

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':dict_amqp, 'amqp_again':dict_amqp_again, 'postgresql':CFG.server.postgresql}, container=_make_container_cfg(primary='amqp', secondary='amqp_again'))
    def test_start_with_multi_connections(self, mockmessaging):
        mockmessaging.make_node.return_value = (Mock(), Mock())     # node, ioloop
        self.ex_manager.start()

        mockmessaging.make_node.assert_calls(call(dict_amqp, 'primary', 0), call(dict_amqp_again, 'secondary', 0))

        self.assertIn('primary', self.ex_manager._nodes)
        self.assertIn('primary', self.ex_manager._ioloops)
        self.assertEquals(self.ex_manager._nodes['primary'], mockmessaging.make_node.return_value[0])
        self.assertEquals(self.ex_manager._ioloops['primary'], mockmessaging.make_node.return_value[1])

        self.assertIn('secondary', self.ex_manager._nodes)
        self.assertIn('secondary', self.ex_manager._ioloops)
        self.assertEquals(self.ex_manager._nodes['secondary'], mockmessaging.make_node.return_value[0])
        self.assertEquals(self.ex_manager._ioloops['secondary'], mockmessaging.make_node.return_value[1])

    @patch.dict('pyon.ion.exchange.CFG', server={}, container=_make_container_cfg(primary='idontexist'))
    def test_start_with_non_existing_connection_in_server(self, mockmessaging):
        mockmessaging.make_node.return_value = (Mock(), Mock())     # node, ioloop

        self.assertRaises(ExchangeManagerError, self.ex_manager.start)
        self.assertFalse(mockmessaging.make_node.called)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':dict_amqp, 'amqp_fail':dict_amqp_fail, 'postgresql':CFG.server.postgresql}, container=_make_container_cfg(primary='amqp', secondary='amqp_fail'))
    def test_start_with_working_and_failing_connection(self, mockmessaging):

        # set up return values - first is amqp (Working) second is amqp_fail (not working)
        nodemock = Mock()
        nodemock.running = False
        iomock = Mock()
        def ret_vals(conf, name, timeout):
            if name == 'secondary':
                return (nodemock, iomock)
            return (Mock(), Mock())

        mockmessaging.make_node.side_effect = ret_vals

        self.ex_manager.start()

        self.assertEquals(len(self.ex_manager._nodes), 1)
        iomock.kill.assert_called_once_with()

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp_fail':dict_amqp_fail, 'postgresql':CFG.server.postgresql}, container=_make_container_cfg(primary='amqp_fail'))
    def test_start_with_only_failing_connections(self, mockmessaging):
        nodemock = Mock()
        nodemock.running = False
        iomock = Mock()

        mockmessaging.make_node.return_value = (nodemock, iomock)

        self.assertRaises(ExchangeManagerError, self.ex_manager.start)
        iomock.kill.assert_called_once_with()

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':dict_amqp, 'postgresql':CFG.server.postgresql}, container=_make_container_cfg(primary='amqp'))
    def test_start_stop(self, mockmessaging):
        nodemock = Mock()
        iomock = Mock()
        mockmessaging.make_node.return_value = (nodemock, iomock)

        self.ex_manager.start()
        self.ex_manager.stop()

        nodemock.stop_node.assert_called_once_with()
        iomock.kill.assert_called_once_with()

    def test_default_node_no_connections(self, mockmessaging):
        self.assertIsNone(self.ex_manager.default_node)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp_not_default':dict_amqp_not_default, 'postgresql':CFG.server.postgresql}, container=_make_container_cfg(secondary='amqp_not_default'))
    def test_default_node_no_default_name(self, mockmessaging):
        nodemock = Mock()
        mockmessaging.make_node.return_value = (nodemock, Mock())     # node, ioloop

        self.ex_manager.start()

        self.assertEquals(self.ex_manager.default_node, nodemock)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':dict_amqp, 'amqp_again':dict_amqp_again, 'postgresql':CFG.server.postgresql}, container=_make_container_cfg(primary='amqp', secondary='amqp_again'))
    def test_default_node(self, mockmessaging):

        # set up return values - amqp returns this named version, amqp_again does not
        nodemock = Mock()
        iomock = Mock()
        def ret_vals(conf, name, timeout):
            if name == 'primary':
                return (nodemock, iomock)
            return (Mock(), Mock())

        mockmessaging.make_node.side_effect = ret_vals
        self.ex_manager.start()

        self.assertEquals(self.ex_manager.default_node, nodemock)
Esempio n. 19
0
File: cc.py Progetto: pkediyal/pyon
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system. It also manages connections to the Exchange
    and the various forms of datastores in the systems.
    """

    # Singleton static variables
    #node        = None
    id = None
    name = None
    pidfile = None
    instance = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." %
                  bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # internal router
        self.local_router = None

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        # publisher, initialized in start()
        self.event_pub = None

        # context-local storage
        self.context = LocalContextMixin()

        log.debug("Container initialized, OK.")

    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError(
                "Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s"
                % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {
                'messaging': dict(CFG.server.amqp),
                'container-agent': self.name,
                'container-xp': bootstrap.get_sys_name()
            }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()  # cleanup the pidfile first
                self.quit(
                )  # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)

        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        # set up greenlet debugging signal handler
        gevent.signal(signal.SIGUSR2, self._handle_sigusr2)

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()
        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects",
                                             DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # internal router for local transports
        self.local_router = LocalRouter(bootstrap.get_sys_name())
        self.local_router.start()
        self.local_router.ready.wait(timeout=2)
        self._capabilities.append("LOCAL_ROUTER")

        # Start ExchangeManager, which starts the node (broker connection)
        self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.get_safe('container.sflow.enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node,
                                from_name=self.name,
                                service=self,
                                process=self)

        cleanup = lambda _: self.proc_manager._cleanup_method(self.name, rsvc)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn(name=self.name,
                                                listeners=[rsvc],
                                                service=self,
                                                cleanup_method=cleanup)
        self.proc_manager.proc_sup.ensure_ready(proc)
        proc.start_listeners()
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id,
                                     origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started = True
        self._status = "RUNNING"

        log.info("Container (%s) started, OK.", self.id)

    def _handle_sigusr2(self):  #, signum, frame):
        """
        Handles SIGUSR2, prints debugging greenlet information.
        """
        gls = GreenletLeak.get_greenlets()

        allgls = []

        for gl in gls:
            status = GreenletLeak.format_greenlet(gl)

            # build formatted output:
            # Greenlet at 0xdeadbeef
            #     self: <EndpointUnit at 0x1ffcceef>
            #     func: bound, EndpointUnit.some_func

            status[0].insert(
                0, "%s at %s:" % (gl.__class__.__name__, hex(id(gl))))
            # indent anything in status a second time
            prefmt = [s.replace("\t", "\t\t") for s in status[0]]
            prefmt.append("traceback:")

            for line in status[1]:
                for subline in line.split("\n")[0:2]:
                    prefmt.append(subline)

            glstr = "\n\t".join(prefmt)

            allgls.append(glstr)

        # print it out!
        print >> sys.stderr, "\n\n".join(allgls)
        with open("gls-%s" % os.getpid(), "w") as f:
            f.write("\n\n".join(allgls))

    @property
    def node(self):
        """
        Returns the active/default Node that should be used for most communication in the system.

        Defers to exchange manager, but only if it has been started, otherwise returns None.
        """
        if "EXCHANGE_MANAGER" in self._capabilities:
            return self.ex_manager.default_node

        return None

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")

        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate
                and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn(
                    "CFG.system.immediate=True but number of spawned processes is not 1 (%d)",
                    num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info(
                    'Received a kill signal, shutting down the container.')

                if hasattr(self, 'gl_parent_watch'
                           ) and self.gl_parent_watch is not None:
                    self.gl_parent_watch.kill()

            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug(
                "Container.serve_forever short-circuiting due to CFG.system.immediate"
            )

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status

    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Esempio n. 20
0
class QueueBlame(Plugin):
    name = 'queueblame'

    def __init__(self):
        Plugin.__init__(self)
        import uuid
        self.ds_name = "queueblame-%s" % str(uuid.uuid4())[0:6]

        self.queues_by_test = defaultdict(lambda: defaultdict(dict))

    def options(self, parser, env):
        super(QueueBlame, self).options(parser, env=env)

        parser.add_option('--queueblame-purge', action='store_true', dest='queueblame_purge', help='Purge queues with leftover messages and remove all bindings')

    def configure(self, options, conf):
        """Configure the plugin and system, based on selected options."""
        super(QueueBlame, self).configure(options, conf)

        self._queueblame_purge      = options.queueblame_purge

    def begin(self):
        self._active_queues = set()
        self._test_changes = {}
        self._queues_declared = []          # ordered list of queues declared
        self._queues = defaultdict(list)    # queue name -> list of accesses

        from pyon.ion.exchange import ExchangeManager
        from pyon.util.containers import DotDict
        from pyon.core.bootstrap import CFG
        from mock import Mock

        containermock = Mock()
        containermock.resource_registry.find_resources.return_value = ([], None)

        self.ex_manager = ExchangeManager(containermock)      # needs to be able to setattr
        self.ex_manager._nodes['priviledged'] = DotDict(client=DotDict(parameters=DotDict(host=CFG.get_safe('server.amqp.host', 'localhost'))))

    def finalize(self, result):
        pass

    def beforeTest(self, test):
        self._pre_defs = self.ex_manager.get_definitions()

        import os
        os.environ['QUEUE_BLAME'] = str(test.id())

    def afterTest(self, test):
        import os
        from pyon.core.bootstrap import get_sys_name        # can't guarantee exclusive access

        #os.environ.pop('QUEUE_BLAME')
        tid = test.id()

        post_defs = self.ex_manager.get_definitions()

        # diff the defs
        pre_queues = {str(x['name']) for x in self._pre_defs['queues']}
        post_queues = {str(x['name']) for x in post_defs['queues']}

        pre_exchanges = {str(x['name']) for x in self._pre_defs['exchanges']}
        post_exchanges = {str(x['name']) for x in post_defs['exchanges']}

        pre_binds = { (x['source'], x['destination'], x['routing_key']) for x in self._pre_defs['bindings'] if x['destination_type'] == 'queue' }
        post_binds = { (x['source'], x['destination'], x['routing_key']) for x in post_defs['bindings'] if x['destination_type'] == 'queue' }

        queue_diff_add      = post_queues.difference(pre_queues)
        exchange_diff_add   = post_exchanges.difference(pre_exchanges)
        binds_diff_add      = post_binds.difference(pre_binds)

        queue_diff_sub      = pre_queues.difference(post_queues)
        exchange_diff_sub   = pre_exchanges.difference(post_exchanges)
        binds_diff_sub      = pre_binds.difference(post_binds)

        # maintain active queue set
        map(self._active_queues.add, queue_diff_add)
        map(self._active_queues.discard, queue_diff_sub)

        # maintain changelog for tests
        self._test_changes[tid] = (queue_diff_add, queue_diff_sub, exchange_diff_add, exchange_diff_sub, binds_diff_add, binds_diff_sub)

        # add any new leftover queues to the list
        for q in queue_diff_add:
            if not q in self._queues_declared:
                self._queues_declared.append(q)

        # get stats about each leftover queue and record the access

        raw_queues_list = self.ex_manager._list_queues()
        raw_queues = { str(x['name']) : x for x in raw_queues_list }

        for q in self._queues_declared:

            # detect if queue has been deleted (and not readded)
            if len(self._queues[q]) > 0 and isinstance(self._queues[q][-1], str) and not q in queue_diff_add:
                continue

            # did we just delete it this test? add the sentinel
            if q in queue_diff_sub:
                self._queues[q].append(tid)
                continue

            # record the test, # messages on it, + bindings on the queue, - bindings on the queue
            self._queues[q].append( (tid,
                                     str(raw_queues[q]['messages']),
                                     [x for x in binds_diff_add if str(x[1]) == str(q)],
                                     [x for x in binds_diff_sub if str(x[1]) == str(q)]))

            # are we supposed to purge it / kill bindings?
            if self._queueblame_purge and raw_queues[q]['messages'] > 0:

                # remove bindings via API
                binds = self.ex_manager.list_bindings_for_queue(q)
                for bind in binds:
                    self.ex_manager.delete_binding_tuple(bind)

                    # add to list of removed bindings for report
                    rem_binds = self._queues[q][-1][3]
                    rem_binds.append(tuple(bind[0:2] + (bind[2] + " (PURGED)",)))

                # purge
                self.ex_manager.purge_queue(q)

    def report(self, stream):
        table = []
        for q in self._queues_declared:

            qd = self._queues[q]

            # first rows are:
            # queue     + test          # messages
            #             +B exchange   binding
            table.append([q, "+", qd[0][0], qd[0][1]])

            for bind in qd[0][2]:
                table.append(["", "", "    +B ex: %s key: %s" % (bind[0], bind[2]), ""])

            # add rest of accesses
            #             test          # messages
            #             +B exchange   binding
            #             -B exchange   binding
            for qdd in qd[1:]:
                if isinstance(qdd, str):
                    table.append(["", "-", qdd, ""])
                else:
                    table.append(["", "", qdd[0], qdd[1]])

                    for bind in qdd[2]:
                        table.append(["", "", "    +B ex: %s key: %s" % (bind[0], bind[2]), ""])
                    for bind in qdd[3]:
                        table.append(["", "", "    -B ex: %s key: %s" % (bind[0], bind[2]), ""])

        # header
        table.insert(0, ['Queue', '', 'Test', '# Msg'])

        # get widths
        widths = [max([len(row[x]) for row in table]) for x in xrange(len(table[0]))]
        fmt_out = [" ".join([x.ljust(widths[i]) for i, x in enumerate(row)]) for row in table]

        # insert col separation row
        fmt_out.insert(1, " ".join([''.ljust(widths[i], '=') for i in xrange(len(widths))]))

        # write this all to sstream
        stream.write("Queue blame report (purge: %s)\n" % (self._queueblame_purge))

        stream.write("\n".join(fmt_out))
        stream.write("\n")
Esempio n. 21
0
File: cc.py Progetto: pkediyal/pyon
    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False
        # set container id and cc_agent name (as they are set in base class call)
        self.id = get_default_container_id()
        self.name = "cc_agent_%s" % self.id
        self._capabilities = []

        bootstrap.container_instance = self
        Container.instance = self

        log.debug("Container (sysname=%s) initializing ..." %
                  bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # TODO: Do not start a capability here. Symmetric start/stop
        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory
        self.directory = Directory()

        # internal router
        self.local_router = None

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        # publisher, initialized in start()
        self.event_pub = None

        # context-local storage
        self.context = LocalContextMixin()

        log.debug("Container initialized, OK.")
Esempio n. 22
0
 def setUp(self):
     self.container = Mock()
     self.ex_manager = ExchangeManager(self.container)
     self.ex_manager.get_transport = Mock()
Esempio n. 23
0
File: cc.py Progetto: ooici-dm/pyon
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system.
    """

    # Singleton static variables
    node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.get_sys_name())

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Governance Controller - manages the governance related interceptors
        self.governance_controller = GovernanceController(self)

        # sFlow manager - controls sFlow stat emission
        self.sflow_manager = SFlowManager(self)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []
        self._status = "INIT"

        # protection for when the container itself is used as a Process for clients
        self.container = self

        log.debug("Container initialized, OK.")


    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': bootstrap.get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self._capabilities.append("EXCHANGE_CONNECTION")

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory and self-register
        self.directory = Directory()
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self.directory.register("/Containers/%s" % self.id, "Processes")
        self._capabilities.append("DIRECTORY")

        # Event repository
        self.event_repository = EventRepository()
        self.event_pub = EventPublisher()

        self._capabilities.append("EVENT_REPOSITORY")

        # Local resource registry
        self.resource_registry = ResourceRegistry()
        self._capabilities.append("RESOURCE_REGISTRY")

        # Persistent objects
        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        # State repository
        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        # Start ExchangeManager, which starts the node (broker connection)
        self.node, self.ioloop = self.ex_manager.start()
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        self.governance_controller.start()
        self._capabilities.append("GOVERNANCE_CONTROLLER")

        if CFG.container.get('sflow', {}).get('enabled', False):
            self.sflow_manager.start()
            self._capabilities.append("SFLOW_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, from_name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self.event_pub.publish_event(event_type="ContainerLifecycleEvent",
                                     origin=self.id, origin_type="CapabilityContainer",
                                     sub_type="START",
                                     state=ContainerStateEnum.START)

        self._is_started    = True
        self._status        = "RUNNING"

        log.info("Container started, OK.")

    @contextmanager
    def _push_status(self, new_status):
        """
        Temporarily sets the internal status flag.
        Use this as a decorator or in a with-statement before calling a temporary status changing
        method, like start_rel_from_url.
        """
        curstatus = self._status
        self._status = new_status
        try:
            yield
        finally:
            self._status = curstatus

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()

        # serve forever short-circuits if immediate is on and children len is ok
        num_procs = len(self.proc_manager.proc_sup.children)
        immediate = CFG.system.get('immediate', False)
        if not (immediate and num_procs == 1):  # only spawned greenlet is the CC-Agent

            # print a warning just in case
            if immediate and num_procs != 1:
                log.warn("CFG.system.immediate=True but number of spawned processes is not 1 (%d)", num_procs)

            try:
                # This just waits in this Greenlet for all child processes to complete,
                # which is triggered somewhere else.
                self.proc_manager.proc_sup.join_children()
            except (KeyboardInterrupt, SystemExit) as ex:
                log.info('Received a kill signal, shutting down the container.')
                watch_parent = CFG.system.get('watch_parent', None)
                if watch_parent:
                    watch_parent.kill()
            except:
                log.exception('Unhandled error! Forcing container shutdown')
        else:
            log.debug("Container.serve_forever short-circuiting due to CFG.system.immediate")

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)

    def status(self):
        """
        Returns the internal status.
        """
        return self._status
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Esempio n. 24
0
class TestExchangeObjects(PyonTestCase):
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.pt = Mock(spec=BaseTransport)
        self.ex_manager.get_transport = Mock(return_value=self.pt)

        # set up some nodes
        self.ex_manager._nodes = {'primary': Mock(), 'priviledged': Mock()}

        # patch for setUp and test
        self.patch_cfg('pyon.ion.exchange.CFG', {'container':{'exchange':{'auto_register':False}}, 'messaging':{'server':{}}})

        # start ex manager
        self.ex_manager.start()

    def test_exchange_by_name(self):
        # defaults: Root XS, no XNs
        self.assertIn(ION_ROOT_XS, self.ex_manager.xs_by_name)
        self.assertIn(self.ex_manager.default_xs, self.ex_manager.xs_by_name.itervalues())
        self.assertEquals(len(self.ex_manager.xn_by_name), 0)

        # create another XS
        xs = self.ex_manager.create_xs('exchange')
        self.assertIn('exchange', self.ex_manager.xs_by_name)
        self.assertIn(xs, self.ex_manager.xs_by_name.values())
        self.assertEquals(len(self.ex_manager.xn_by_name), 0)

        # now create some XNs underneath default exchange
        xn1 = self.ex_manager.create_xn_process('xn1')
        self.assertEquals(xn1._xs, self.ex_manager.default_xs)
        self.assertIn('xn1', self.ex_manager.xn_by_name)
        self.assertIn(xn1, self.ex_manager.xn_by_name.values())
        self.assertEquals(xn1, self.ex_manager.xn_by_name['xn1'])
        self.assertIsInstance(xn1, ExchangeNameProcess)

        self.assertEquals({ION_ROOT_XS:[xn1]}, self.ex_manager.xn_by_xs)

        xn2 = self.ex_manager.create_xn_service('xn2')
        self.assertIn('xn2', self.ex_manager.xn_by_name)
        self.assertIn(xn2, self.ex_manager.xn_by_xs[ION_ROOT_XS])
        self.assertEquals(xn2.xn_type, 'XN_SERVICE')

        # create one under our second xn3
        xn3 = self.ex_manager.create_xn_queue('xn3', xs)
        self.assertIn('xn3', self.ex_manager.xn_by_name)
        self.assertIn(xn3, self.ex_manager.xn_by_xs['exchange'])
        self.assertNotIn(xn3, self.ex_manager.xn_by_xs[ION_ROOT_XS])

    def test_create_xs(self):
        xs      = self.ex_manager.create_xs(sentinel.xs)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        self.assertEquals(xs._exchange, sentinel.xs)
        self.assertEquals(xs.exchange, exstr)
        self.assertEquals(xs.queue, None)
        self.assertEquals(xs.binding, None)

        self.assertEquals(xs._xs_exchange_type, 'topic')
        self.assertEquals(xs._xs_durable, False)
        self.assertEquals(xs._xs_auto_delete, True)

        # should be in our map too
        self.assertIn(sentinel.xs, self.ex_manager.xs_by_name)
        self.assertEquals(self.ex_manager.xs_by_name[sentinel.xs], xs)

        # should've tried to declare
        self.pt.declare_exchange_impl.assert_called_with(exstr, auto_delete=True, durable=False, exchange_type='topic')

    def test_create_xs_with_params(self):
        xs      = self.ex_manager.create_xs(sentinel.xs, exchange_type=sentinel.ex_type, durable=True)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        self.assertEquals(xs._xs_durable, True)
        self.assertEquals(xs._xs_exchange_type, sentinel.ex_type)

        # declaration?
        self.pt.declare_exchange_impl.assert_called_with(exstr, auto_delete=True, durable=True, exchange_type=sentinel.ex_type)

    def test_delete_xs(self):
        # need an XS first
        xs      = self.ex_manager.create_xs(sentinel.delete_me)
        exstr   = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.delete_me))     # what we expect the exchange property to return

        self.assertIn(sentinel.delete_me, self.ex_manager.xs_by_name)

        self.ex_manager.delete_xs(xs)

        self.assertNotIn(sentinel.delete_me, self.ex_manager.xs_by_name)

        # call to broker
        self.pt.delete_exchange_impl.assert_called_once_with(exstr)

    def test_create_xp(self):
        xp      = self.ex_manager.create_xp(sentinel.xp)
        exstr   = "%s.ion.xs.%s.xp.%s" % (get_sys_name(), self.ex_manager.default_xs._exchange, str(sentinel.xp))

        self.assertEquals(xp._exchange, sentinel.xp)
        self.assertEquals(xp._xs, self.ex_manager.default_xs)
        self.assertEquals(xp._xptype, 'ttree')
        self.assertEquals(xp._queue, None)
        self.assertEquals(xp._binding, None)

        self.assertEquals(xp.exchange, exstr)

        # declaration
        self.pt.declare_exchange_impl.assert_called_with(exstr)

    def test_create_xp_with_params(self):
        xp = self.ex_manager.create_xp(sentinel.xp, xptype=sentinel.xptype)
        self.assertEquals(xp._xptype, sentinel.xptype)

    def test_create_xp_with_different_xs(self):
        xs = self.ex_manager.create_xs(sentinel.xs)
        xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        xp = self.ex_manager.create_xp(sentinel.xp, xs)
        xp_exstr = '%s.xp.%s' % (xs_exstr, str(sentinel.xp))

        # check mappings
        self.assertIn(sentinel.xp, self.ex_manager.xn_by_name)
        self.assertIn(xp, self.ex_manager.xn_by_xs[sentinel.xs])

        self.assertEquals(xp.exchange, xp_exstr)

    def test_delete_xp(self):
        xp      = self.ex_manager.create_xp(sentinel.xp)
        exstr   = "%s.ion.xs.%s.xp.%s" % (get_sys_name(), self.ex_manager.default_xs._exchange, str(sentinel.xp))

        self.assertIn(sentinel.xp, self.ex_manager.xn_by_name)

        self.ex_manager.delete_xp(xp)

        self.assertNotIn(sentinel.xp, self.ex_manager.xn_by_name)

        # deletion
        self.pt.delete_exchange_impl.assert_called_once_with(exstr)

    def test__create_xn_unknown_type(self):
        self.assertRaises(StandardError, self.ex_manager._create_xn, sentinel.unknown)

    def test_create_xn_service(self):
        xn      = self.ex_manager.create_xn_service('servicename')
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameService)

        # exclusive attrs to XN
        self.assertEquals(xn._xs, self.ex_manager.default_xs)
        self.assertEquals(xn._xn_auto_delete, ExchangeNameService._xn_auto_delete)
        self.assertEquals(xn._xn_durable, ExchangeNameService._xn_durable)
        self.assertEquals(xn.xn_type, 'XN_SERVICE')

        # underlying attrs
        self.assertEquals(xn._exchange, None)
        self.assertEquals(xn._queue, 'servicename')
        self.assertEquals(xn._binding, None)

        # top level props
        self.assertEquals(xn.exchange, self.ex_manager.default_xs.exchange)
        self.assertEquals(xn.queue, qstr)
        self.assertEquals(xn.binding, 'servicename')

        # should be in mapping
        self.assertIn('servicename', self.ex_manager.xn_by_name)
        self.assertIn(xn, self.ex_manager.xn_by_xs[ION_ROOT_XS])

        # declaration
        self.pt.declare_queue_impl.assert_called_once(qstr, durable=ExchangeNameService._xn_durable, auto_delete=ExchangeNameService._xn_auto_delete)

    def test_create_xn_process(self):
        xn = self.ex_manager.create_xn_process('procname')

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameProcess)

    def test_create_xn_queue(self):
        xn = self.ex_manager.create_xn_queue('queuename')

        self.assertIsInstance(xn, ExchangeName)
        self.assertIsInstance(xn, ExchangeNameQueue)

    def test_create_xn_with_different_xs(self):
        xs = self.ex_manager.create_xs(sentinel.xs)
        xs_exstr = '%s.ion.xs.%s' % (get_sys_name(), str(sentinel.xs))     # what we expect the exchange property to return

        xn      = self.ex_manager.create_xn_service('servicename', xs)
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        # check mappings
        self.assertIn('servicename', self.ex_manager.xn_by_name)
        self.assertIn(xn, self.ex_manager.xn_by_xs[sentinel.xs])

        self.assertEquals(xn.queue, qstr)

    def test_delete_xn(self):
        xn      = self.ex_manager.create_xn_process('procname')
        qstr    = '%s.%s' % (xn.exchange, 'procname')

        self.assertIn('procname', self.ex_manager.xn_by_name)

        self.ex_manager.delete_xn(xn)

        self.assertNotIn('procname', self.ex_manager.xn_by_name)

        # call to broker
        self.pt.delete_queue_impl.assert_called_once_with(qstr)

    def test_xn_setup_listener(self):
        xn      = self.ex_manager.create_xn_service('servicename')
        qstr    = '%s.%s' % (xn.exchange, 'servicename')        # what we expect the queue name to look like

        xn.setup_listener(sentinel.binding, None)

        self.pt.bind_impl.assert_called_once_with(xn.exchange, qstr, sentinel.binding)

    def test_xn_bind(self):
        xn      = self.ex_manager.create_xn_service('servicename')

        xn.bind(sentinel.bind)

        self.pt.bind_impl.assert_called_once_with(xn.exchange, xn.queue, sentinel.bind)

    def test_xn_unbind(self):
        xn      = self.ex_manager.create_xn_service('servicename')

        xn.unbind(sentinel.bind)

        self.pt.unbind_impl.assert_called_once_with(xn.exchange, xn.queue, sentinel.bind)
Esempio n. 25
0
 def setUp(self):
     self.ex_manager = ExchangeManager(Mock())
     self.ex_manager._transport  = Mock(BaseTransport)
     self.ex_manager._client     = Mock()
Esempio n. 26
0
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system.
    """

    node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs)
        from pyon.core import bootstrap
        bootstrap.sys_name = CFG.system.name or bootstrap.sys_name
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.sys_name)

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = DictModifier(CFG, kwargs)

        # Load object and service registry
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)
        
        log.debug("Container initialized, OK.")


    def start(self):
        log.debug("Container starting...")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise Exception("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            from pyon.core.bootstrap import sys_name
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': sys_name }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)


        # Start ExchangeManager. In particular establish broker connection
        self.ex_manager.start()

        # TODO: Move this in ExchangeManager - but there is an error
        self.node, self.ioloop = messaging.make_node() # TODO: shortcut hack


        # Instantiate Directory singleton and self-register
        # TODO: At this point, there is no special config override
        self.directory = Directory()
        self.directory.register("/Containers", self.id, cc_agent=self.name)

        self.proc_manager.start()

        self.app_manager.start()

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc)
        rsvc.get_ready_event().wait(timeout=10)   # @TODO: no hardcode
        log.info("Container started, OK.")

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()
            
        try:
            # This just waits in this Greenlet for all child processes to complete,
            # which is triggered somewhere else.
            self.proc_manager.proc_sup.join_children()
        except (KeyboardInterrupt, SystemExit) as ex:
            log.info('Received a kill signal, shutting down the container.')
        except:
            log.exception('Unhandled error! Forcing container shutdown')

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Esempio n. 27
0
class TestExchangeManager(PyonTestCase):
    def setUp(self):
        self.container = Mock()
        self.ex_manager = ExchangeManager(self.container)
        self.ex_manager._get_channel = Mock()

    def test_verify_service(self, mockmessaging):
        PyonTestCase.test_verify_service(self)

    @patch.dict('pyon.ion.exchange.CFG', container=_make_server_cfg())
    def test_start_with_no_connections(self, mockmessaging):
        self.assertRaises(ExchangeManagerError, self.ex_manager.start)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':sentinel.amqp}, container=_make_server_cfg(primary='amqp'))
    def test_start_with_one_connection(self, mockmessaging):
        mockmessaging.make_node.return_value = (Mock(), Mock())     # node, ioloop
        self.ex_manager.start()

        mockmessaging.make_node.assert_called_once_with(sentinel.amqp, 'primary', 0)
        self.assertIn('primary', self.ex_manager._nodes)
        self.assertIn('primary', self.ex_manager._ioloops)
        self.assertEquals(self.ex_manager._nodes['primary'], mockmessaging.make_node.return_value[0])
        self.assertEquals(self.ex_manager._ioloops['primary'], mockmessaging.make_node.return_value[1])

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':sentinel.amqp, 'amqp_again':sentinel.amqp_again}, container=_make_server_cfg(primary='amqp', secondary='amqp_again'))
    def test_start_with_multi_connections(self, mockmessaging):
        mockmessaging.make_node.return_value = (Mock(), Mock())     # node, ioloop
        self.ex_manager.start()

        mockmessaging.make_node.assert_calls(call(sentinel.amqp, 'primary', 0), call(sentinel.amqp_again, 'secondary', 0))

        self.assertIn('primary', self.ex_manager._nodes)
        self.assertIn('primary', self.ex_manager._ioloops)
        self.assertEquals(self.ex_manager._nodes['primary'], mockmessaging.make_node.return_value[0])
        self.assertEquals(self.ex_manager._ioloops['primary'], mockmessaging.make_node.return_value[1])

        self.assertIn('secondary', self.ex_manager._nodes)
        self.assertIn('secondary', self.ex_manager._ioloops)
        self.assertEquals(self.ex_manager._nodes['secondary'], mockmessaging.make_node.return_value[0])
        self.assertEquals(self.ex_manager._ioloops['secondary'], mockmessaging.make_node.return_value[1])

    @patch.dict('pyon.ion.exchange.CFG', server={}, container=_make_server_cfg(primary='idontexist'))
    def test_start_with_non_existing_connection_in_server(self, mockmessaging):
        mockmessaging.make_node.return_value = (Mock(), Mock())     # node, ioloop

        self.assertRaises(ExchangeManagerError, self.ex_manager.start)
        self.assertFalse(mockmessaging.make_node.called)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':sentinel.amqp, 'amqp_fail':sentinel.amqp_fail}, container=_make_server_cfg(primary='amqp', secondary='amqp_fail'))
    def test_start_with_working_and_failing_connection(self, mockmessaging):

        # set up return values - first is amqp (Working) second is amqp_fail (not working)
        nodemock = Mock()
        nodemock.running = False
        iomock = Mock()
        def ret_vals(conf, name, timeout):
            if name == 'secondary':
                return (nodemock, iomock)
            return (Mock(), Mock())

        mockmessaging.make_node.side_effect = ret_vals

        self.ex_manager.start()

        self.assertEquals(len(self.ex_manager._nodes), 1)
        iomock.kill.assert_called_once_with()

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp_fail':sentinel.amqp_fail}, container=_make_server_cfg(primary='amqp_fail'))
    def test_start_with_only_failing_connections(self, mockmessaging):
        nodemock = Mock()
        nodemock.running = False
        iomock = Mock()

        mockmessaging.make_node.return_value = (nodemock, iomock)

        self.assertRaises(ExchangeManagerError, self.ex_manager.start)
        iomock.kill.assert_called_once_with()

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':sentinel.amqp}, container=_make_server_cfg(primary='amqp'))
    def test_start_stop(self, mockmessaging):
        nodemock = Mock()
        iomock = Mock()
        mockmessaging.make_node.return_value = (nodemock, iomock)

        self.ex_manager.start()
        self.ex_manager.stop()

        nodemock.stop_node.assert_called_once_with()
        iomock.kill.assert_called_once_with()

    def test_default_node_no_connections(self, mockmessaging):
        self.assertIsNone(self.ex_manager.default_node)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp_not_default':sentinel.amqp_not_default}, container=_make_server_cfg(secondary='amqp_not_default'))
    def test_default_node_no_default_name(self, mockmessaging):
        nodemock = Mock()
        mockmessaging.make_node.return_value = (nodemock, Mock())     # node, ioloop

        self.ex_manager.start()

        self.assertEquals(self.ex_manager.default_node, nodemock)

    @patch.dict('pyon.ion.exchange.CFG', server={'amqp':sentinel.amqp, 'amqp_again':sentinel.amqp_again}, container=_make_server_cfg(primary='amqp', secondary='amqp_again'))
    def test_default_node(self, mockmessaging):

        # set up return values - amqp returns this named version, amqp_again does not
        nodemock = Mock()
        iomock = Mock()
        def ret_vals(conf, name, timeout):
            if name == 'primary':
                return (nodemock, iomock)
            return (Mock(), Mock())

        mockmessaging.make_node.side_effect = ret_vals
        self.ex_manager.start()

        self.assertEquals(self.ex_manager.default_node, nodemock)
Esempio n. 28
0
 def setUp(self):
     self.container = Mock()
     self.ex_manager = ExchangeManager(self.container)
     self.ex_manager.get_transport = Mock()
Esempio n. 29
0
 def setUp(self):
     self.container = Mock()
     self.ex_manager = ExchangeManager(self.container)
     self.ex_manager._get_channel = Mock()
Esempio n. 30
0
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.ex_manager._nodes = MagicMock()
        self.ex_manager._nodes.get.return_value.client.parameters.host = "testhost" # stringifies so don't use sentinel

        self.ex_manager._ems_client = Mock()
Esempio n. 31
0
File: cc.py Progetto: dstuebe/pyon
class Container(BaseContainerAgent):
    """
    The Capability Container. Its purpose is to spawn/monitor processes and services
    that do the bulk of the work in the ION system.
    """

    # Singleton static variables
    node        = None
    id          = None
    name        = None
    pidfile     = None
    instance    = None

    def __init__(self, *args, **kwargs):
        BaseContainerAgent.__init__(self, *args, **kwargs)

        self._is_started = False

        # set id and name (as they are set in base class call)
        self.id = string.replace('%s_%d' % (os.uname()[1], os.getpid()), ".", "_")
        self.name = "cc_agent_%s" % self.id

        Container.instance = self

        # TODO: Bug: Replacing CFG instance not work because references are already public. Update directly
        dict_merge(CFG, kwargs, inplace=True)
        from pyon.core import bootstrap
        bootstrap.container_instance = self
        bootstrap.assert_configuration(CFG)
        bootstrap.sys_name = CFG.system.name or bootstrap.sys_name
        log.debug("Container (sysname=%s) initializing ..." % bootstrap.sys_name)

        # Keep track of the overrides from the command-line, so they can trump app/rel file data
        self.spawn_args = kwargs

        # Load object and service registry etc.
        bootstrap_pyon()

        # Create this Container's specific ExchangeManager instance
        self.ex_manager = ExchangeManager(self)

        # Create this Container's specific ProcManager instance
        self.proc_manager = ProcManager(self)

        # Create this Container's specific AppManager instance
        self.app_manager = AppManager(self)

        # DatastoreManager - controls access to Datastores (both mock and couch backed)
        self.datastore_manager = DatastoreManager()

        # File System - Interface to the OS File System, using correct path names and setups
        self.file_system = FileSystem(CFG)

        # Coordinates the container start
        self._is_started = False
        self._capabilities = []

        log.debug("Container initialized, OK.")


    def start(self):
        log.debug("Container starting...")
        if self._is_started:
            raise ContainerError("Container already started")

        # Check if this UNIX process already runs a Container.
        self.pidfile = "cc-pid-%d" % os.getpid()
        if os.path.exists(self.pidfile):
            raise ContainerError("Container.on_start(): Container is a singleton per UNIX process. Existing pid file found: %s" % self.pidfile)

        # write out a PID file containing our agent messaging name
        with open(self.pidfile, 'w') as f:
            from pyon.core.bootstrap import get_sys_name
            pid_contents = {'messaging': dict(CFG.server.amqp),
                            'container-agent': self.name,
                            'container-xp': get_sys_name() }
            f.write(msgpack.dumps(pid_contents))
            atexit.register(self._cleanup_pid)
            self._capabilities.append("PID_FILE")

        # set up abnormal termination handler for this container
        def handl(signum, frame):
            try:
                self._cleanup_pid()     # cleanup the pidfile first
                self.quit()             # now try to quit - will not error on second cleanup pidfile call
            finally:
                signal.signal(signal.SIGTERM, self._normal_signal)
                os.kill(os.getpid(), signal.SIGTERM)
        self._normal_signal = signal.signal(signal.SIGTERM, handl)

        self._capabilities.append("EXCHANGE_CONNECTION")

        self.datastore_manager.start()
        self._capabilities.append("DATASTORE_MANAGER")

        # Instantiate Directory and self-register
        self.directory = Directory()
        self.directory.register("/Containers", self.id, cc_agent=self.name)
        self._capabilities.append("DIRECTORY")

        # Create other repositories to make sure they are there and clean if needed
        self.datastore_manager.get_datastore("resources", DataStore.DS_PROFILE.RESOURCES)

        self.datastore_manager.get_datastore("objects", DataStore.DS_PROFILE.OBJECTS)

        self.state_repository = StateRepository()
        self._capabilities.append("STATE_REPOSITORY")

        self.event_repository = EventRepository()
        self._capabilities.append("EVENT_REPOSITORY")

        # Start ExchangeManager. In particular establish broker connection
        self.ex_manager.start()

        # TODO: Move this in ExchangeManager - but there is an error
        self.node, self.ioloop = messaging.make_node() # TODO: shortcut hack
        self._capabilities.append("EXCHANGE_MANAGER")

        self.proc_manager.start()
        self._capabilities.append("PROC_MANAGER")

        self.app_manager.start()
        self._capabilities.append("APP_MANAGER")

        # Start the CC-Agent API
        rsvc = ProcessRPCServer(node=self.node, name=self.name, service=self, process=self)

        # Start an ION process with the right kind of endpoint factory
        proc = self.proc_manager.proc_sup.spawn((CFG.cc.proctype or 'green', None), listener=rsvc)
        self.proc_manager.proc_sup.ensure_ready(proc)
        self._capabilities.append("CONTAINER_AGENT")

        self._is_started = True

        log.info("Container started, OK.")

    def serve_forever(self):
        """ Run the container until killed. """
        log.debug("In Container.serve_forever")
        
        if not self.proc_manager.proc_sup.running:
            self.start()
            
        try:
            # This just waits in this Greenlet for all child processes to complete,
            # which is triggered somewhere else.
            self.proc_manager.proc_sup.join_children()
        except (KeyboardInterrupt, SystemExit) as ex:
            log.info('Received a kill signal, shutting down the container.')
        except:
            log.exception('Unhandled error! Forcing container shutdown')

        self.proc_manager.proc_sup.shutdown(CFG.cc.timeout.shutdown)
            
    def _cleanup_pid(self):
        if self.pidfile:
            log.debug("Cleanup pidfile: %s", self.pidfile)
            try:
                os.remove(self.pidfile)
            except Exception, e:
                log.warn("Pidfile could not be deleted: %s" % str(e))
            self.pidfile = None
Esempio n. 32
0
class QueueBlame(Plugin):
    name = 'queueblame'

    def __init__(self):
        Plugin.__init__(self)
        import uuid
        self.ds_name = "queueblame-%s" % str(uuid.uuid4())[0:6]

        self.queues_by_test = defaultdict(lambda: defaultdict(dict))

    def options(self, parser, env):
        super(QueueBlame, self).options(parser, env=env)

        parser.add_option(
            '--queueblame-purge',
            action='store_true',
            dest='queueblame_purge',
            help='Purge queues with leftover messages and remove all bindings')

    def configure(self, options, conf):
        """Configure the plugin and system, based on selected options."""
        super(QueueBlame, self).configure(options, conf)

        self._queueblame_purge = options.queueblame_purge

    def begin(self):
        self._active_queues = set()
        self._test_changes = {}
        self._queues_declared = []  # ordered list of queues declared
        self._queues = defaultdict(list)  # queue name -> list of accesses

        from pyon.ion.exchange import ExchangeManager
        from pyon.util.containers import DotDict
        from pyon.core.bootstrap import CFG
        from mock import Mock

        containermock = Mock()
        containermock.resource_registry.find_resources.return_value = ([],
                                                                       None)

        self.ex_manager = ExchangeManager(
            containermock)  # needs to be able to setattr
        self.ex_manager._nodes['priviledged'] = DotDict(client=DotDict(
            parameters=DotDict(
                host=CFG.get_safe('server.amqp.host', 'localhost'))))

    def finalize(self, result):
        pass

    def beforeTest(self, test):
        self._pre_defs = self.ex_manager.get_definitions()

        import os
        os.environ['QUEUE_BLAME'] = str(test.id())

    def afterTest(self, test):
        import os
        from pyon.core.bootstrap import get_sys_name  # can't guarantee exclusive access

        #os.environ.pop('QUEUE_BLAME')
        tid = test.id()

        post_defs = self.ex_manager.get_definitions()

        # diff the defs
        pre_queues = {str(x['name']) for x in self._pre_defs['queues']}
        post_queues = {str(x['name']) for x in post_defs['queues']}

        pre_exchanges = {str(x['name']) for x in self._pre_defs['exchanges']}
        post_exchanges = {str(x['name']) for x in post_defs['exchanges']}

        pre_binds = {(x['source'], x['destination'], x['routing_key'])
                     for x in self._pre_defs['bindings']
                     if x['destination_type'] == 'queue'}
        post_binds = {(x['source'], x['destination'], x['routing_key'])
                      for x in post_defs['bindings']
                      if x['destination_type'] == 'queue'}

        queue_diff_add = post_queues.difference(pre_queues)
        exchange_diff_add = post_exchanges.difference(pre_exchanges)
        binds_diff_add = post_binds.difference(pre_binds)

        queue_diff_sub = pre_queues.difference(post_queues)
        exchange_diff_sub = pre_exchanges.difference(post_exchanges)
        binds_diff_sub = pre_binds.difference(post_binds)

        # maintain active queue set
        map(self._active_queues.add, queue_diff_add)
        map(self._active_queues.discard, queue_diff_sub)

        # maintain changelog for tests
        self._test_changes[tid] = (queue_diff_add, queue_diff_sub,
                                   exchange_diff_add, exchange_diff_sub,
                                   binds_diff_add, binds_diff_sub)

        # add any new leftover queues to the list
        for q in queue_diff_add:
            if not q in self._queues_declared:
                self._queues_declared.append(q)

        # get stats about each leftover queue and record the access

        raw_queues_list = self.ex_manager._list_queues()
        raw_queues = {str(x['name']): x for x in raw_queues_list}

        for q in self._queues_declared:

            # detect if queue has been deleted (and not readded)
            if len(self._queues[q]) > 0 and isinstance(
                    self._queues[q][-1], str) and not q in queue_diff_add:
                continue

            # did we just delete it this test? add the sentinel
            if q in queue_diff_sub:
                self._queues[q].append(tid)
                continue

            # record the test, # messages on it, + bindings on the queue, - bindings on the queue
            self._queues[q].append(
                (tid, str(raw_queues[q]['messages']),
                 [x for x in binds_diff_add if str(x[1]) == str(q)
                  ], [x for x in binds_diff_sub if str(x[1]) == str(q)]))

            # are we supposed to purge it / kill bindings?
            if self._queueblame_purge and raw_queues[q]['messages'] > 0:

                # remove bindings via API
                binds = self.ex_manager.list_bindings_for_queue(q)
                for bind in binds:
                    self.ex_manager.delete_binding_tuple(bind)

                    # add to list of removed bindings for report
                    rem_binds = self._queues[q][-1][3]
                    rem_binds.append(
                        tuple(bind[0:2] + (bind[2] + " (PURGED)", )))

                # purge
                self.ex_manager.purge_queue(q)

    def report(self, stream):
        table = []
        for q in self._queues_declared:

            qd = self._queues[q]

            # first rows are:
            # queue     + test          # messages
            #             +B exchange   binding
            table.append([q, "+", qd[0][0], qd[0][1]])

            for bind in qd[0][2]:
                table.append(
                    ["", "",
                     "    +B ex: %s key: %s" % (bind[0], bind[2]), ""])

            # add rest of accesses
            #             test          # messages
            #             +B exchange   binding
            #             -B exchange   binding
            for qdd in qd[1:]:
                if isinstance(qdd, str):
                    table.append(["", "-", qdd, ""])
                else:
                    table.append(["", "", qdd[0], qdd[1]])

                    for bind in qdd[2]:
                        table.append([
                            "", "",
                            "    +B ex: %s key: %s" % (bind[0], bind[2]), ""
                        ])
                    for bind in qdd[3]:
                        table.append([
                            "", "",
                            "    -B ex: %s key: %s" % (bind[0], bind[2]), ""
                        ])

        # header
        table.insert(0, ['Queue', '', 'Test', '# Msg'])

        # get widths
        widths = [
            max([len(row[x]) for row in table]) for x in xrange(len(table[0]))
        ]
        fmt_out = [
            " ".join([x.ljust(widths[i]) for i, x in enumerate(row)])
            for row in table
        ]

        # insert col separation row
        fmt_out.insert(
            1,
            " ".join([''.ljust(widths[i], '=') for i in xrange(len(widths))]))

        # write this all to sstream
        stream.write("Queue blame report (purge: %s)\n" %
                     (self._queueblame_purge))

        stream.write("\n".join(fmt_out))
        stream.write("\n")
Esempio n. 33
0
    def setUp(self):
        self.ex_manager = ExchangeManager(Mock())
        self.ex_manager._nodes = MagicMock()
        self.ex_manager._nodes.get.return_value.client.parameters.host = "testhost"  # stringifies so don't use sentinel

        self.ex_manager._ems_client = Mock()