def subscription_callback(self, message, headers):
        #The message body should only contain the event description for now and a standard header: "ION Event SMS"...

        """
        This callback is given to all the event subscribers that this user wants notifications for.
        If this callback gets called the user in this processor should get an email
        """

        log.debug("UserEventProcessor.subscription_callback(): message=" + str(message))
        log.debug("event type = " + str(message._get_type()))
        log.debug('type of message: %s' % type(message))

        time_stamp = str( datetime.fromtimestamp(time.mktime(time.gmtime(float(message.ts_created)/1000))))

        event = message.type_
        origin = message.origin
        description = message.description
        log.info("description: %s" % str(description))


        # build the email from the event content
        msg_body = "Description: %s" % description + '\r\n'

        msg_subject = "(SysName: " + get_sys_name() + ") ION event " + event + " from " + origin
        msg_sender = ION_NOTIFICATION_EMAIL_ADDRESS

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = msg_sender
        msg['To'] = self.msg_recipient
        log.debug("UserEventProcessor.subscription_callback(): sending email to %s"\
        %self.msg_recipient)
        self.smtp_client.sendmail(msg_sender, self.msg_recipient, msg.as_string())
Exemplo n.º 2
0
    def post_datastore(self, config):
        # Make sure to detect that system was already bootstrapped.
        # Look in datastore for secret cookie\

        cookie_name = get_sys_name() + ".ION_INIT"
        try:
            res = self.clients.datastore.read_doc(cookie_name)
            log.error("System %s already initialized: %s" % (get_sys_name(), res))
            return
        except iex.NotFound:
            pass

        # Now set the secret cookie
        import time
        cookie = dict(container=self.container.id, time=time.time())
        cid, _ = self.clients.datastore.create_doc(cookie, cookie_name)
Exemplo n.º 3
0
    def post_datastore(self, config):
        # Make sure to detect that system was already bootstrapped.
        # Look in datastore for secret cookie\

        cookie_name = get_sys_name() + ".ION_INIT"
        try:
            res = self.clients.datastore.read_doc(cookie_name)
            log.error("System %s already initialized: %s" %
                      (get_sys_name(), res))
            return
        except iex.NotFound:
            pass

        # Now set the secret cookie
        import time
        cookie = dict(container=self.container.id, time=time.time())
        cid, _ = self.clients.datastore.create_doc(cookie, cookie_name)
def send_email(message, msg_recipient, smtp_client):
    '''
    A common method to send email with formatting

    @param message              Event
    @param msg_recipient        str
    @param smtp_client          fake or real smtp client object

    '''

    time_stamp = message.ts_created
    event = message.type_
    origin = message.origin
    description = message.description


    #------------------------------------------------------------------------------------
    # build the email from the event content
    #------------------------------------------------------------------------------------

    msg_body = string.join(("Event: %s," %  event,
                            "",
                            "Originator: %s," %  origin,
                            "",
                            "Description: %s," % description ,
                            "",
                            "Time stamp: %s," %  time_stamp,
                            "",
                            "You received this notification from ION because you asked to be "\
                            "notified about this event from this source. ",
                            "To modify or remove notifications about this event, "\
                            "please access My Notifications Settings in the ION Web UI.",
                            "Do not reply to this email.  This email address is not monitored "\
                            "and the emails will not be read."),
        "\r\n")
    msg_subject = "(SysName: " + get_sys_name() + ") ION event " + event + " from " + origin

    #------------------------------------------------------------------------------------
    # the 'from' email address for notification emails
    #------------------------------------------------------------------------------------

    ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'

    msg_sender = ION_NOTIFICATION_EMAIL_ADDRESS

    msg = MIMEText(msg_body)
    msg['Subject'] = msg_subject
    msg['From'] = msg_sender
    msg['To'] = msg_recipient
    log.debug("UserEventProcessor.subscription_callback(): sending email to %s"\
    %msg_recipient)

    smtp_sender = CFG.get_safe('server.smtp.sender')

    smtp_client.sendmail(smtp_sender, msg_recipient, msg.as_string())
Exemplo n.º 5
0
    def subscription_callback(self, message, headers):
        """
        This callback is given to all the event subscribers that this user wants notifications for.
        If this callback gets called the user in this processor should get an email
        """

        log.debug("UserEventProcessor.subscription_callback(): message=" +
                  str(message))
        log.debug("event type = " + str(message._get_type()))
        log.debug('type of message: %s' % type(message))

        time_stamp = str(
            datetime.fromtimestamp(
                time.mktime(time.gmtime(float(message.ts_created) / 1000))))

        event = message.type_
        origin = message.origin
        description = message.description

        # build the email from the event content
        msg_body = string.join(("Event: %s" %  event,
                                "",
                                "Originator: %s" %  origin,
                                "",
                                "Description: %s" % description ,
                                "",
                                "Time stamp: %s" %  time_stamp,
                                "",
                                "You received this notification from ION because you asked to be "\
                                "notified about this event from this source. ",
                                "To modify or remove notifications about this event, "\
                                "please access My Notifications Settings in the ION Web UI.",
                                "Do not reply to this email.  This email address is not monitored "\
                                "and the emails will not be read."),
                                "\r\n")
        msg_subject = "(SysName: " + get_sys_name(
        ) + ") ION event " + event + " from " + origin
        msg_sender = ION_NOTIFICATION_EMAIL_ADDRESS
        #        msg_recipient = self.user_email_addr

        msg_recipient = self.notification._res_obj.delivery_config.delivery[
            'email']

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = msg_sender
        msg['To'] = msg_recipient
        log.debug("UserEventProcessor.subscription_callback(): sending email to %s"\
        %msg_recipient)

        self.smtp_client.sendmail(self.smtp_sender, msg_recipient,
                                  msg.as_string())
    def format_and_send_email(self, events_for_message=None, user_id=None, smtp_client=None):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """
        message = str(events_for_message)
        log.debug("The user, %s, will get the following events in his batch notification email: %s", user_id, message)

        msg = convert_events_to_email_message(events_for_message, self.clients.resource_registry)
        msg["Subject"] = "(SysName: " + get_sys_name() + ") ION event "
        msg["To"] = self.user_info[user_id]['user_contact'].email
        self.send_batch_email(msg, smtp_client)
    def subscription_callback(self, message, headers):
        """
        This callback is given to all the event subscribers that this user wants notifications for.
        If this callback gets called the user in this processor should get an email
        """

        log.debug("UserEventProcessor.subscription_callback(): message=" + str(message))
        log.debug("event type = " + str(message._get_type()))
        log.debug('type of message: %s' % type(message))

        time_stamp = str( datetime.fromtimestamp(time.mktime(time.gmtime(float(message.ts_created)/1000))))

        event = message.type_
        origin = message.origin
        description = message.description


        # build the email from the event content
        msg_body = string.join(("Event: %s" %  event,
                                "",
                                "Originator: %s" %  origin,
                                "",
                                "Description: %s" % description ,
                                "",
                                "Time stamp: %s" %  time_stamp,
                                "",
                                "You received this notification from ION because you asked to be "\
                                "notified about this event from this source. ",
                                "To modify or remove notifications about this event, "\
                                "please access My Notifications Settings in the ION Web UI.",
                                "Do not reply to this email.  This email address is not monitored "\
                                "and the emails will not be read."),
                                "\r\n")
        msg_subject = "(SysName: " + get_sys_name() + ") ION event " + event + " from " + origin
        msg_sender = ION_NOTIFICATION_EMAIL_ADDRESS
#        msg_recipient = self.user_email_addr

        msg_recipient = self.notification._res_obj.delivery_config.delivery['email']

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = msg_sender
        msg['To'] = msg_recipient
        log.debug("UserEventProcessor.subscription_callback(): sending email to %s"\
        %msg_recipient)

        self.smtp_client.sendmail(self.smtp_sender, msg_recipient, msg.as_string())
    def format_and_send_email(self, events_for_message = None, user_id = None, smtp_client = None):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """

        message = str(events_for_message)
        log.debug("The user, %s, will get the following events in his batch notification email: %s", user_id, message)

        msg_body = ''
        count = 1

        for event in events_for_message:

            ts_created = _convert_to_human_readable(event.ts_created)

            msg_body += string.join(("\r\n",
                                     "Event %s: %s" %  (count, event),
                                     "",
                                     "Originator: %s" %  event.origin,
                                     "",
                                     "Description: %s" % event.description or "Not provided",
                                     "",
                                     "ts_created: %s" %  ts_created,
                                     "\r\n",
                                     "------------------------"
                                     "\r\n"))
            count += 1

        msg_body += "You received this notification from ION because you asked to be " +\
                    "notified about this event from this source. " +\
                    "To modify or remove notifications about this event, " +\
                    "please access My Notifications Settings in the ION Web UI. " +\
                    "Do not reply to this email.  This email address is not monitored " +\
                    "and the emails will not be read. \r\n "


        log.debug("The email has the following message body: %s", msg_body)

        msg_subject = "(SysName: " + get_sys_name() + ") ION event "

        self.send_batch_email(  msg_body = msg_body,
            msg_subject = msg_subject,
            msg_recipient=self.user_info[user_id]['user_contact'].email,
            smtp_client=smtp_client )
    def format_and_send_email(self, events_for_message = None, user_id = None, smtp_client = None):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """

        message = str(events_for_message)
        log.debug("The user, %s, will get the following events in his batch notification email: %s", user_id, message)

        msg_body = ''
        count = 1

        for event in events_for_message:

            ts_created = _convert_to_human_readable(event.ts_created)

            msg_body += string.join(("\r\n",
                                     "Event %s: %s" %  (count, event),
                                     "",
                                     "Originator: %s" %  event.origin,
                                     "",
                                     "Description: %s" % event.description or "Not provided",
                                     "",
                                     "ts_created: %s" %  ts_created,
                                     "\r\n",
                                     "------------------------"
                                     "\r\n"))
            count += 1

        msg_body += "You received this notification from ION because you asked to be " +\
                    "notified about this event from this source. " +\
                    "To modify or remove notifications about this event, " +\
                    "please access My Notifications Settings in the ION Web UI. " +\
                    "Do not reply to this email.  This email address is not monitored " +\
                    "and the emails will not be read. \r\n "


        log.debug("The email has the following message body: %s", msg_body)

        msg_subject = "(SysName: " + get_sys_name() + ") ION event "

        self.send_batch_email(  msg_body = msg_body,
            msg_subject = msg_subject,
            msg_recipient=self.user_info[user_id]['user_contact'].email,
            smtp_client=smtp_client )
Exemplo n.º 10
0
    def on_start(self):
        super(CacheLauncher, self).on_start()
        tms_cli = TransformManagementServiceClient()
        pubsub_cli = PubsubManagementServiceClient()
        pd_cli = ProcessDispatcherServiceClient()
        dname = CACHE_DATASTORE_NAME
        number_of_workers = self.CFG.get_safe('process.number_of_workers', 2)

        proc_def = ProcessDefinition(
            name='last_update_worker_process',
            description=
            'Worker process for caching the last update from a stream')
        proc_def.executable['module'] = 'ion.processes.data.last_update_cache'
        proc_def.executable['class'] = 'LastUpdateCache'
        proc_def_id = pd_cli.create_process_definition(
            process_definition=proc_def)

        xs_dot_xp = CFG.core_xps.science_data
        try:
            self.XS, xp_base = xs_dot_xp.split('.')
            self.XP = '.'.join([get_sys_name(), xp_base])
        except ValueError:
            raise StandardError(
                'Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure'
                % xs_dot_xp)

        subscription_id = pubsub_cli.create_subscription(
            query=ExchangeQuery(), exchange_name='last_update_cache')

        config = {
            'couch_storage': {
                'datastore_name': dname,
                'datastore_profile': 'SCIDATA'
            }
        }

        for i in xrange(number_of_workers):

            transform_id = tms_cli.create_transform(
                name='last_update_cache%d' % i,
                description=
                'last_update that compiles an aggregate of metadata',
                in_subscription_id=subscription_id,
                process_definition_id=proc_def_id,
                configuration=config)

        tms_cli.activate_transform(transform_id=transform_id)
Exemplo n.º 11
0
    def format_and_send_email(self,
                              events_for_message=None,
                              user_id=None,
                              smtp_client=None):
        """
        Format the message for a particular user containing information about the events he is to be notified about

        @param events_for_message list
        @param user_id str
        """
        message = str(events_for_message)
        log.debug(
            "The user, %s, will get the following events in his batch notification email: %s",
            user_id, message)

        msg = convert_events_to_email_message(events_for_message,
                                              self.clients.resource_registry)
        msg["Subject"] = "(SysName: " + get_sys_name() + ") ION event "
        msg["To"] = self.user_info[user_id]['user_contact'].email
        self.send_batch_email(msg, smtp_client)
Exemplo n.º 12
0
    def on_start(self):
        super(CacheLauncher,self).on_start()
        tms_cli = TransformManagementServiceClient()
        pubsub_cli = PubsubManagementServiceClient()
        pd_cli = ProcessDispatcherServiceClient()
        dname = CACHE_DATASTORE_NAME
        number_of_workers = self.CFG.get_safe('process.number_of_workers', 2)

        proc_def = ProcessDefinition(name='last_update_worker_process',description='Worker process for caching the last update from a stream')
        proc_def.executable['module'] = 'ion.processes.data.last_update_cache'
        proc_def.executable['class'] = 'LastUpdateCache'
        proc_def_id = pd_cli.create_process_definition(process_definition=proc_def)

        xs_dot_xp = CFG.core_xps.science_data
        try:
            self.XS, xp_base = xs_dot_xp.split('.')
            self.XP = '.'.join([get_sys_name(), xp_base])
        except ValueError:
            raise StandardError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)

        subscription_id = pubsub_cli.create_subscription(query=ExchangeQuery(), exchange_name='last_update_cache')

        config = {
            'couch_storage' : {
                'datastore_name' : dname,
                'datastore_profile' : 'SCIDATA'
            }
        }

        for i in xrange(number_of_workers):

            transform_id = tms_cli.create_transform(
                name='last_update_cache%d' % i,
                description='last_update that compiles an aggregate of metadata',
                in_subscription_id=subscription_id,
                process_definition_id=proc_def_id,
                configuration=config
            )


        tms_cli.activate_transform(transform_id=transform_id)
    def format_and_send_email(self, events_for_message, user_name):
        '''
        Format the message for a particular user containing information about the events he is to be notified about
        '''

        message = str(events_for_message)
        log.info("The user, %s, will get the following events in his batch notification email: %s" % (user_name, message))

        msg_body = ''
        count = 1
        for event in events_for_message:
            # build the email from the event content
            msg_body += string.join(("\r\n",
                                     "Event %s: %s" %  (count, event),
                                    "",
                                    "Originator: %s" %  event.origin,
                                    "",
                                    "Description: %s" % event.description ,
                                    "",
                                    "Event time stamp: %s" %  event.ts_created,
                                    "\r\n",
                                    "------------------------"
                                    "\r\n"))
            count += 1

        msg_body += "You received this notification from ION because you asked to be " + \
                    "notified about this event from this source. " + \
                    "To modify or remove notifications about this event, " + \
                    "please access My Notifications Settings in the ION Web UI. " + \
                    "Do not reply to this email.  This email address is not monitored " + \
                    "and the emails will not be read. \r\n "


        log.debug("The email has the following message body: %s" % msg_body)

        msg_subject = "(SysName: " + get_sys_name() + ") ION event "

        self.send_batch_email(  msg_body = msg_body,
                                msg_subject = msg_subject,
                                msg_recipient=self.event_processor.user_info[user_name]['user_contact'].email,
                                smtp_client=self.smtp_client )
Exemplo n.º 14
0
    def subscription_callback(self, *args, **kwargs):
        # this callback is given to all the event subscribers that this user wants notifications for
        # if this callback gets called the user in this processor should get an email
        log.debug("UserEventProcessor.subscription_callback(): args[0]=" + str(args[0]))
        log.debug("event type = " + str(args[0]._get_type()))
        
        origin = args[0].origin
        event = str(args[0]._get_type())
        description = args[0].description
        time_stamp = str( datetime.fromtimestamp(time.mktime(time.gmtime(float(args[0].ts_created)/1000))))

        # build the email from the event content
        BODY = string.join(("Event: %s" %  event,
                            "",
                            "Originator: %s" %  origin,
                            "",
                            "Description: %s" %  description,
                            "",
                            "Time stamp: %s" %  time_stamp,
                            "",
                            "You received this notification from ION because you asked to be notified about this event from this source. ",
                            "To modify or remove notifications about this event, please access My Notifications Settings in the ION Web UI.",
                            "Do not reply to this email.  This email address is not monitored and the emails will not be read."), 
                           "\r\n")
        SUBJECT = "(SysName: " + get_sys_name() + ") ION event " + event + " from " + origin
        FROM = ION_NOTIFICATION_EMAIL_ADDRESS
        TO = self.user_email_addr
        msg = MIMEText(BODY)
        msg['Subject'] = SUBJECT
        msg['From'] = FROM
        msg['To'] = TO
        log.debug("UserEventProcessor.subscription_callback(): sending email to %s via %s" %(TO, self.smtp_server))
        try:
            smtp_client = smtplib.SMTP(self.smtp_server)
        except Exception as ex:
            log.warning("UserEventProcessor.subscription_callback(): failed to connect to SMTP server %s <%s>" %(ION_SMTP_SERVER, ex))
            return
        try:
            smtp_client.sendmail(FROM, TO, msg.as_string())
        except Exception as ex:
            log.warning("UserEventProcessor.subscription_callback(): failed to send email to %s <%s>" %(TO, ex))           
Exemplo n.º 15
0
    def report(self, stream):
        """ all tests have completed but --with-pycc has not yet stopped external container.
            request that containers log statistics now
        """

        # initialize pyon so we can get system name
        from pyon.core import bootstrap
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()
        from pyon.public import get_sys_name, CFG

        # make request: bin/pycc --sysname mgmt -x ion.processes.test.manage_system.ReportStats
        null = open('/dev/null', 'w')
        cmd = ['bin/pycc', '--sysname', get_sys_name(), '-x', 'ion.processes.test.manage_system.ReportStats' ]
        status = subprocess.call(cmd, stdout=null, stderr=null)
        if status==0:
            stream.write('container statistics: a report request has been sent\n')
            time.sleep(5) # give time to handle before container shutdown begins
        else:
            stream.write('container statistics: failed to send report request (logging anyway -- who needs a container?)\n')
            from ooi.timer import get_accumulators
            for a in get_accumulators().values():
                a.log()
Exemplo n.º 16
0
    def subscription_callback(self, message, headers):
        #The message body should only contain the event description for now and a standard header: "ION Event SMS"...
        """
        This callback is given to all the event subscribers that this user wants notifications for.
        If this callback gets called the user in this processor should get an email
        """

        log.debug("UserEventProcessor.subscription_callback(): message=" +
                  str(message))
        log.debug("event type = " + str(message._get_type()))
        log.debug('type of message: %s' % type(message))

        time_stamp = str(
            datetime.fromtimestamp(
                time.mktime(time.gmtime(float(message.ts_created) / 1000))))

        event = message.type_
        origin = message.origin
        description = message.description
        log.info("description: %s" % str(description))

        # build the email from the event content
        msg_body = "Description: %s" % description + '\r\n'

        msg_subject = "(SysName: " + get_sys_name(
        ) + ") ION event " + event + " from " + origin
        msg_sender = ION_NOTIFICATION_EMAIL_ADDRESS

        msg = MIMEText(msg_body)
        msg['Subject'] = msg_subject
        msg['From'] = msg_sender
        msg['To'] = self.msg_recipient
        log.debug("UserEventProcessor.subscription_callback(): sending email to %s"\
        %self.msg_recipient)
        self.smtp_client.sendmail(msg_sender, self.msg_recipient,
                                  msg.as_string())
Exemplo n.º 17
0
    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        try:

            from pyon.public import get_sys_name
            sysname = get_sys_name()

            # Force datastore loader to use the same sysname
            from ion.processes.bootstrap.datastore_loader import DatastoreLoader
            DatastoreLoader.clear_datastore(prefix=sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame = frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                                (frame.f_code.co_name,
                                 frame.f_code.co_filename, frame.f_lineno))
                debug.write(
                    'Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write(
                    'Child pid %d with exit status %d and signum %d\n' %
                    (pid, exitstatus, signum))

            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = [
                'bin/pycc', '--noshell', '-sp',
                '--system.name=%s' % sysname,
                '--logcfg=res/config/logging.pycc.yml',
                '--rel=%s' % self.rel, 'system.force_clean=False'
            ]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            DatastoreLoader.dump_datastore(path='res/dd')
            debug.write('Dump child container state to file...\n')

            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e
Exemplo n.º 18
0
    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        # Make sure we initialize pyon before anything in this plugin executes
        from pyon.core import bootstrap
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()

        try:
            from pyon.public import get_sys_name, CFG
            self.sysname = get_sys_name()

            # Clean exchanges and system queues out there
            try:
                rmh = RabbitManagementHelper(make_parser(), '-H %s -P 55672 -u %s -p %s -V %s'
                        % (CFG.server.amqp.host, CFG.server.amqp.username,
                        CFG.server.amqp.password, CFG.server.amqp.vhost))
                exchanges = rmh.list_names('exchanges')
                deleted = rmh.delete_names_with_prefix('exchange', exchanges, self.sysname)
                debug.write('Deleted exchanges:\n%s \n' % '\n'.join(deleted))
                queues = rmh.list_names('queues')
                deleted = rmh.delete_names_with_prefix('queue', queues, self.sysname)
                debug.write('Deleted queues:\n%s \n' % '\n'.join(deleted))
            except Exception as e:
                pass

            # Force datastore loader to use the same sysname
            from pyon.datastore.datastore_admin import DatastoreAdmin
            self.datastore_admin = DatastoreAdmin(config=CFG)

            self.datastore_admin.clear_datastore(prefix=self.sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame =frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                            (frame.f_code.co_name,
                                frame.f_code.co_filename, frame.f_lineno))
                debug.write('Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = ['bin/pycc', '--noshell', '-sp', '--sysname=%s' % self.sysname,
                    '--logcfg=res/config/logging.pycc.yml',
                    '--rel=%s' % self.rel,
                    "--config={'system': {'auto_bootstrap': True}}"]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            self.datastore_admin.dump_datastore(path='res/dd', compact=True)
            debug.write('Dump child container state to file...\n')

            # Clean again to make sure the first nosetest starts on a clean
            # slate
            self.datastore_admin.clear_datastore(prefix=self.sysname)
            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e
def convert_events_to_email_message(events=None, rr_client=None):

    if events is None: events = []

    if 0 == len(events): raise BadRequest("Tried to convert events to email, but none were supplied")

    web_ui_url = CFG.get_safe('system.web_ui_url', None)
    log.debug("found CFG.system.web_ui_url = %s" % web_ui_url)
    if web_ui_url is None:
        web_ui_txt = ""
    else:
        web_ui_txt = ": %s" % web_ui_url

    msg_body = ""

    resource_human_readable = "<uninitialized string>"
    for idx, event in enumerate(events, 1):

        ts_created = convert_timestamp_to_human_readable(event.ts_created)

        # build human readable resource string
        resource_human_readable = "'%s' with ID='%s' (not found)" % (event.origin_type, event.origin)
        try:
            resource = rr_client.read(event.origin)
            resource_human_readable = "%s '%s'" % (type(resource).__name__, resource.name)
        except NotFound:
            pass

        if 1 == len(events):
            eventtitle = "Type"
        else:
            eventtitle = "%s Type" % idx

        msg_body += string.join(("\r\n",
                                 "Event %s: %s" %  (eventtitle, event.type_),
                                 "",
                                 "Resource: %s" %  resource_human_readable,
                                 "",
                                 "Date & Time: %s" %  ts_created,
                                 "",
                                 "Description: %s" % get_event_summary(event) or event.description or "Not provided",
#                                 "",
#                                 "Event object as a dictionary: %s," %  str(event),
                                 "\r\n",
                                 "------------------------",
                                 "\r\n",
                                 ),         # necessary!
                                 "\r\n")


    msg_body += ("\r\n\r\nAutomated alert from the OOI ION system (%s). " % get_sys_name()) +\
                "This notification was received based on " +\
                "your current subscription settings for this event type from this resource. To unsubscribe " +\
                "from notifications of this event type, please access the actions menu for the resource " +\
                ("listed above in the ION interface%s.  \r\n\r\n" % web_ui_txt) +\
                "Do not reply to this email.  This email address is not monitored and the emails will not be read.\r\n"


    #log.debug("The email has the following message body: %s", msg_body)

    msg_subject = ""
    if 1 == len(events):
        msg_subject += "ION event " + events[0].type_ + " from " + resource_human_readable
    else:
        msg_subject += "summary of %s ION events" % len(events)

    msg = MIMEText(msg_body)
    msg['Subject'] = msg_subject
    #    msg['From'] = smtp_sender
    #    msg['To'] = msg_recipient

    return msg
    def on_init(self):
        if not HighAvailabilityCore:
            msg = "HighAvailabilityCore isn't available. Use autolaunch.cfg buildout"
            log.error(msg)
            return

        cfg = self.CFG.get_safe("highavailability")

        # use default PD name as the sole PD if none are provided in config
        self.pds = self.CFG.get_safe("highavailability.process_dispatchers",
                                     [ProcessDispatcherService.name])
        if not len(self.pds) == 1:
            raise Exception(
                "HA Service doesn't support multiple Process Dispatchers")

        self.process_definition_id, self.process_definition = self._get_process_definition(
        )

        self.process_configuration = self.CFG.get_safe(
            "highavailability.process_configuration")
        aggregator_config = _get_aggregator_config(self.CFG)

        self.service_id, self.service_name = self._register_service()
        self.policy_event = Event()

        stored_policy = self._stored_policy
        if stored_policy != {}:
            policy_name = stored_policy.get('name')
            policy_parameters = stored_policy.get('parameters')
            self._validate_policy_name(policy_name)
            self.policy_name = policy_name.lower()
            self.policy_parameters = policy_parameters
        else:

            policy_name = self.CFG.get_safe("highavailability.policy.name")
            self._validate_policy_name(policy_name)
            self.policy_name = policy_name.lower()
            self.policy_parameters = self.CFG.get_safe(
                "highavailability.policy.parameters")

        self.policy_interval = self.CFG.get_safe(
            "highavailability.policy.interval", DEFAULT_INTERVAL)

        self.logprefix = "HA Agent (%s): " % self.service_name

        self.control = HAProcessControl(self.pds[0],
                                        self.container.resource_registry,
                                        self.service_id,
                                        self.policy_event.set,
                                        logprefix=self.logprefix)

        self.core = HighAvailabilityCore(
            cfg,
            self.control,
            self.pds,
            self.policy_name,
            process_definition_id=self.process_definition_id,
            parameters=self.policy_parameters,
            process_configuration=self.process_configuration,
            aggregator_config=aggregator_config,
            name=self.service_name)

        dashi_messaging = self.CFG.get_safe("highavailability.dashi_messaging",
                                            False)
        if dashi_messaging:

            dashi_name = self.CFG.get_safe("highavailability.dashi_name")
            if not dashi_name:
                raise Exception("dashi_name unknown")
            dashi_uri = self.CFG.get_safe("highavailability.dashi_uri")
            if not dashi_uri:
                rabbit_host = self.CFG.get_safe("server.amqp.host")
                rabbit_user = self.CFG.get_safe("server.amqp.username")
                rabbit_pass = self.CFG.get_safe("server.amqp.password")

                if not (rabbit_host and rabbit_user and rabbit_pass):
                    raise Exception("cannot form dashi URI")

                dashi_uri = "amqp://%s:%s@%s/" % (rabbit_user, rabbit_pass,
                                                  rabbit_host)
            dashi_exchange = self.CFG.get_safe(
                "highavailability.dashi_exchange")
            if not dashi_exchange:
                dashi_exchange = get_sys_name()

            self.dashi_handler = HADashiHandler(self, dashi_name, dashi_uri,
                                                dashi_exchange)

        else:
            self.dashi_handler = None
Exemplo n.º 21
0
def send_email(message, msg_recipient, smtp_client):
    '''
    A common method to send email with formatting

    @param message              Event
    @param msg_recipient        str
    @param smtp_client          fake or real smtp client object

    '''

    log.debug("Got type of event to notify on: %s", message.type_)

    # Get the diffrent attributes from the event message
    event = message.type_
    origin = message.origin
    description = message.description or "Not provided for this event"
    event_obj_as_string = str(message)
    ts_created = _convert_to_human_readable(message.ts_created)

    #------------------------------------------------------------------------------------
    # build the email from the event content
    #------------------------------------------------------------------------------------

    msg_body = string.join(("Event type: %s," %  event,
                            "",
                            "Originator: %s," %  origin,
                            "",
                            "Description: %s," % description,
                            "",
                            "ts_created: %s," %  ts_created,
                            "",
                            "Event object as a dictionary: %s," %  event_obj_as_string,
                            "",
                            "You received this notification from ION because you asked to be "\
                            "notified about this event from this source. ",
                            "To modify or remove notifications about this event, "\
                            "please access My Notifications Settings in the ION Web UI.",
                            "Do not reply to this email.  This email address is not monitored "\
                            "and the emails will not be read."),
        "\r\n")
    msg_subject = "(SysName: " + get_sys_name() + ") ION event " + event + " from " + origin

    log.debug("msg_body::: %s", msg_body)

    #------------------------------------------------------------------------------------
    # the 'from' email address for notification emails
    #------------------------------------------------------------------------------------

    ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'
    smtp_sender = CFG.get_safe('server.smtp.sender', ION_NOTIFICATION_EMAIL_ADDRESS)

    msg = MIMEText(msg_body)
    msg['Subject'] = msg_subject
    msg['From'] = smtp_sender
    msg['To'] = msg_recipient
    log.debug("UNS sending email from %s to %s for event type: %s", smtp_sender,msg_recipient, message.type_)
    log.debug("UNS using the smtp client: %s", smtp_client)

    try:
        smtp_client.sendmail(smtp_sender, [msg_recipient], msg.as_string())
    except: # Can be due to a broken connection... try to create a connection
        smtp_client = setting_up_smtp_client()
        log.debug("Connect again...message received after ehlo exchange: %s", str(smtp_client.ehlo()))
        smtp_client.sendmail(smtp_sender, [msg_recipient], msg.as_string())
Exemplo n.º 22
0
    def on_restart(self, process, config, **kwargs):
        """
        Handles bootstrapping of system restart for exchange resources and broker state.

        - Ensures ExchangePoint and ExchangeSpace resources in system have a properly
          declared AMQP exchange
        - Ensures ExchangeName resources in system have a properly declared queue
        - Logs all exchanges/queues it didn't understand
        - Purges all service queues as long as no consumers are attached, or can be
          overridden with force=True on pycc command line
        """
        ex_manager = process.container.ex_manager
        old_use_ems = ex_manager.use_ems
        ex_manager.use_ems = False
        sys_name = get_sys_name()

        # get list of queues from broker with full props that have to do with our sysname
        all_queues = ex_manager._list_queues()
        queues = {
            q['name']: q
            for q in all_queues if q['name'].startswith(sys_name)
        }

        # get list of exchanges from broker with full props
        all_exchanges = ex_manager._list_exchanges()
        exchanges = {
            e['name']: e
            for e in all_exchanges if e['name'].startswith(sys_name)
        }

        # now get list of XOs from RR
        xs_objs, _ = process.container.resource_registry.find_resources(
            RT.ExchangeSpace)
        xp_objs, _ = process.container.resource_registry.find_resources(
            RT.ExchangePoint)
        xn_objs, _ = process.container.resource_registry.find_resources(
            RT.ExchangeName)

        xs_by_xp = {}
        assocs = process.container.resource_registry.find_associations(
            predicate=PRED.hasExchangePoint, id_only=False)
        for assoc in assocs:
            if assoc.st == RT.ExchangeSpace and assoc.ot == RT.ExchangePoint:
                xs_by_xp[assoc.o] = assoc.s

        #
        # VERIFY XSs have a declared exchange
        #
        rem_exchanges = set(exchanges)

        xs_by_id = {}
        for rrxs in xs_objs:
            xs = ExchangeSpace(ex_manager, ex_manager._priviledged_transport,
                               rrxs.name)
            xs_by_id[rrxs._id] = xs

            if xs.exchange in rem_exchanges:
                rem_exchanges.remove(xs.exchange)
            else:
                log.warn(
                    "BootstrapExchange restart: RR XS %s, id: %s NOT FOUND in exchanges",
                    rrxs.name, rrxs._id)

        for rrxp in xp_objs:
            xs_id = xs_by_xp.get(rrxp._id, None)
            if not xs_id or xs_id not in xs_by_id:
                log.warn("Inconsistent!! XS for XP %s not found", rrxp.name)
                continue
            xs = xs_by_id[xs_id]
            xp = ExchangePoint(ex_manager, ex_manager._priviledged_transport,
                               rrxp.name, xs)

            if xp.exchange in rem_exchanges:
                rem_exchanges.remove(xp.exchange)
            else:
                log.warn(
                    "BootstrapExchange restart: RR XP %s, id %s NOT FOUND in exchanges",
                    rrxp.name, rrxp._id)

        # TODO: WARNING this is based on hardcoded names
        # events and main service exchange should be left
        if sys_name in rem_exchanges:
            rem_exchanges.remove(sys_name)
        else:
            log.warn("BootstrapExchange restart: no main service exchange %s",
                     sys_name)

        event_ex = "%s.pyon.events" % sys_name
        if event_ex in rem_exchanges:
            rem_exchanges.remove(event_ex)
        else:
            log.warn("BootstrapExchange restart: no events exchange %s",
                     event_ex)

        # what is left?
        for exchange in rem_exchanges:
            log.warn(
                "BootstrapExchange restart: unknown exchange on broker %s",
                exchange)

        #
        # VERIFY XNs have a declared queue
        #
        rem_queues = set(queues)

        for rrxn in xn_objs:
            # can instantiate ExchangeNames, don't need specific types

            # @TODO: most queue types have a name instead of anon
            """
            # @TODO: except queue type, which needs to be fixed to record declared name type
            if rrxn.xn_type == "QUEUE":
                log.info("TODO: queue type XNs, %s", rrxn.name)
                continue
            """

            exchange_space_list, assoc_list = process.container.resource_registry.find_subjects(
                RT.ExchangeSpace, PRED.hasExchangeName, rrxn._id)
            if not len(exchange_space_list) == 1:
                raise StandardError(
                    "Association from ExchangeSpace to ExchangeName %s does not exist"
                    % rrxn._id)

            rrxs = exchange_space_list[0]

            xs = ExchangeSpace(ex_manager, ex_manager._priviledged_transport,
                               rrxs.name)
            xn = ExchangeName(ex_manager, ex_manager._priviledged_transport,
                              rrxn.name, xs)

            if xn.queue in rem_queues:
                rem_queues.remove(xn.queue)
            else:
                log.warn(
                    "BootstrapExchange restart: RR XN %s, type %s NOT FOUND in queues",
                    xn.queue, xn.xn_type)

        # get list of service name possibilities
        svc_objs, _ = process.container.resource_registry.find_resources(
            RT.ServiceDefinition)
        svc_names = [s.name for s in svc_objs]

        # PROCESS QUEUES + SERVICE QUEUES- not yet represented by resource
        proc_queues = set()
        svc_queues = set()

        for queue in list(rem_queues):

            # PROCESS QUEUES: proc manager spawned
            # pattern "<sysname>.<containerid>.<pid>"
            pieces = queue.split(".")
            if len(pieces) > 2 and pieces[-1].isdigit():
                proc_queues.add(queue)
                rem_queues.remove(queue)
                continue

            # SERVICE QUEUES
            # pattern "<sysname>.<service name>"
            if len(pieces) == 2:
                if pieces[-1] in svc_names:
                    svc_queues.add(queue)
                    rem_queues.remove(queue)

            # @TODO: PD-spawned process queues
            # pattern "<sysname>.<service_name><hex>"

        # leftover queues now
        for queue in rem_queues:
            log.warn("Unknown queue: %s", queue)

        #
        # EMPTY SERVICE QUEUES
        #
        for queue in svc_queues:
            if int(queues[queue]['consumers']
                   ) > 0 and not process.CFG.get_safe('force', False):
                log.warn(
                    "Refusing to empty service queue %s with consumers (%s), specify force=True to override",
                    queue, queues[queue]['consumers'])
            else:
                ex_manager.purge_queue(queue)
                log.info("Purged service queue %s of %s messages", queue,
                         queues[queue]['messages'])

        ex_manager.use_ems = old_use_ems
Exemplo n.º 23
0
    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        # Make sure we initialize pyon before anything in this plugin executes
        from pyon.core import bootstrap
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()

        try:
            from pyon.public import get_sys_name, CFG
            self.sysname = get_sys_name()

            # Clean exchanges and system queues out there
            try:
                connect_str = '-H %s -P 55672 -u %s -p %s -V %s' % (CFG.server.amqp.host,
                                                                    CFG.server.amqp.username,
                                                                    CFG.server.amqp.password,
                                                                    CFG.server.amqp.vhost)

                deleted_exchanges, deleted_queues = clean_by_sysname(connect_str, self.sysname)

                debug.write('Deleted exchanges:\n%s \n' % '\n'.join(deleted_exchanges))
                debug.write('Deleted queues:\n%s \n' % '\n'.join(deleted_queues))

            except Exception as e:
                pass

            # Force datastore loader to use the same sysname
            from pyon.datastore.datastore_admin import DatastoreAdmin
            self.datastore_admin = DatastoreAdmin(config=CFG)

            self.datastore_admin.clear_datastore(prefix=self.sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame =frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                            (frame.f_code.co_name,
                                frame.f_code.co_filename, frame.f_lineno))
                debug.write('Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = ['bin/pycc', '-o', '--noshell', '-sp', '--sysname=%s' % self.sysname,
                    '--logcfg=res/config/logging.pycc.yml',
                    '--rel=%s' % self.rel,
                    "--config={'system': {'auto_bootstrap': True}}"]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            self.datastore_admin.dump_datastore(path='res/dd', compact=True)
            debug.write('Dump child container state to file...\n')

            # Clean again to make sure the first nosetest starts on a clean
            # slate
            self.datastore_admin.clear_datastore(prefix=self.sysname)
            # Set PYCC env var in case CEI needs to skip tests in pycc mode
            os.environ['PYCC_MODE'] = '1'
            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e
Exemplo n.º 24
0
def convert_events_to_email_message(events=None,
                                    notifications_map=None,
                                    rr_client=None):

    if events is None: events = []

    # map event origins to resource objects to provide additional context in the email
    event_origin_to_resource_map = {}

    if 0 == len(events):
        raise BadRequest(
            "Tried to convert events to email, but none were supplied")

    web_ui_url = CFG.get_safe('system.web_ui_url', None)
    log.debug("found CFG.system.web_ui_url = %s" % web_ui_url)
    if web_ui_url is None:
        web_ui_txt = ""
    else:
        web_ui_txt = ": %s" % web_ui_url

    msg_body = ""

    #collect all the resources from the RR in one call
    event_origin_to_resource_map = _collect_resources_from_event_origins(
        events=None, rr_client=None)

    resource_human_readable = "<uninitialized string>"
    for idx, event in enumerate(events, 1):

        ts_created = convert_timestamp_to_human_readable(event.ts_created)

        # build human readable resource string
        resource_human_readable = "'%s' with ID='%s' (not found)" % (
            event.origin_type, event.origin)
        notification_name = _get_notification_name(
            event_id=event._id, notifications_map=notifications_map)

        resource = ''
        # pull the resource from the map if the origin id was found
        if event.origin in event_origin_to_resource_map:
            resource = event_origin_to_resource_map[event.origin]
            resource_human_readable = "%s '%s'" % (type(resource).__name__,
                                                   resource.name)

        if 1 == len(events):
            eventtitle = "Type"
        else:
            eventtitle = "%s Type" % idx

        msg_body += string.join(
            (
                "\r\n",
                "Event %s: %s" % (eventtitle, event.type_),
                "",
                "Notification Request Name: %s" % notification_name,
                "",
                "Resource: %s" % resource_human_readable,
                "",
                "Date & Time: %s" % ts_created,
                "",
                "Description: %s" % get_event_summary(event)
                or event.description or "Not provided",
                #                                 "",
                #                                 "Event object as a dictionary: %s," %  str(event),
                "\r\n",
                "------------------------",
                "\r\n",
            ),  # necessary!
            "\r\n")


    msg_body += ("\r\n\r\nAutomated alert from the OOI ION system (%s). " % get_sys_name()) +\
                "This notification was received based on " +\
                "your current subscription settings for this event type from this resource. To unsubscribe " +\
                "from notifications of this event type, please access the actions menu for the resource " +\
                ("listed above in the ION interface%s.  \r\n\r\n" % web_ui_txt) +\
                "Do not reply to this email.  This email address is not monitored and the emails will not be read.\r\n"

    #log.debug("The email has the following message body: %s", msg_body)

    msg_subject = ""
    if 1 == len(events):
        msg_subject += "ION event " + events[
            0].type_ + " from " + resource_human_readable
    else:
        msg_subject += "summary of %s ION events" % len(events)

    msg = MIMEText(msg_body)
    msg['Subject'] = msg_subject
    #    msg['From'] = smtp_sender
    #    msg['To'] = msg_recipient

    return msg
    def on_restart(self, process, config, **kwargs):
        """
        Handles bootstrapping of system restart for exchange resources and broker state.

        - Ensures ExchangePoint and ExchangeSpace resources in system have a properly
          declared AMQP exchange
        - Ensures ExchangeName resources in system have a properly declared queue
        - Logs all exchanges/queues it didn't understand
        - Purges all service queues as long as no consumers are attached, or can be
          overridden with force=True on pycc command line
        """
        ex_manager = process.container.ex_manager
        old_use_ems = ex_manager.use_ems
        ex_manager.use_ems = False
        sys_name = get_sys_name()

        # get list of queues from broker with full props that have to do with our sysname
        all_queues = ex_manager._list_queues()
        queues = {q["name"]: q for q in all_queues if q["name"].startswith(sys_name)}

        # get list of exchanges from broker with full props
        all_exchanges = ex_manager._list_exchanges()
        exchanges = {e["name"]: e for e in all_exchanges if e["name"].startswith(sys_name)}

        # now get list of XOs from RR
        xs_objs, _ = process.container.resource_registry.find_resources(RT.ExchangeSpace)
        xp_objs, _ = process.container.resource_registry.find_resources(RT.ExchangePoint)
        xn_objs, _ = process.container.resource_registry.find_resources(RT.ExchangeName)

        xs_by_xp = {}
        assocs = process.container.resource_registry.find_associations(predicate=PRED.hasExchangePoint, id_only=False)
        for assoc in assocs:
            if assoc.st == RT.ExchangeSpace and assoc.ot == RT.ExchangePoint:
                xs_by_xp[assoc.o] = assoc.s

        #
        # VERIFY XSs have a declared exchange
        #
        rem_exchanges = set(exchanges)

        xs_by_id = {}
        for rrxs in xs_objs:
            xs = ExchangeSpace(ex_manager, ex_manager._priviledged_transport, rrxs.name)
            xs_by_id[rrxs._id] = xs

            if xs.exchange in rem_exchanges:
                rem_exchanges.remove(xs.exchange)
            else:
                log.warn("BootstrapExchange restart: RR XS %s, id: %s NOT FOUND in exchanges", rrxs.name, rrxs._id)

        for rrxp in xp_objs:
            xs_id = xs_by_xp.get(rrxp._id, None)
            if not xs_id or xs_id not in xs_by_id:
                log.warn("Inconsistent!! XS for XP %s not found", rrxp.name)
                continue
            xs = xs_by_id[xs_id]
            xp = ExchangePoint(ex_manager, ex_manager._priviledged_transport, rrxp.name, xs)

            if xp.exchange in rem_exchanges:
                rem_exchanges.remove(xp.exchange)
            else:
                log.warn("BootstrapExchange restart: RR XP %s, id %s NOT FOUND in exchanges", rrxp.name, rrxp._id)

        # TODO: WARNING this is based on hardcoded names
        # events and main service exchange should be left
        if sys_name in rem_exchanges:
            rem_exchanges.remove(sys_name)
        else:
            log.warn("BootstrapExchange restart: no main service exchange %s", sys_name)

        event_ex = "%s.pyon.events" % sys_name
        if event_ex in rem_exchanges:
            rem_exchanges.remove(event_ex)
        else:
            log.warn("BootstrapExchange restart: no events exchange %s", event_ex)

        # what is left?
        for exchange in rem_exchanges:
            log.warn("BootstrapExchange restart: unknown exchange on broker %s", exchange)

        #
        # VERIFY XNs have a declared queue
        #
        rem_queues = set(queues)

        for rrxn in xn_objs:
            # can instantiate ExchangeNames, don't need specific types

            # @TODO: most queue types have a name instead of anon
            """
            # @TODO: except queue type, which needs to be fixed to record declared name type
            if rrxn.xn_type == "QUEUE":
                log.info("TODO: queue type XNs, %s", rrxn.name)
                continue
            """

            exchange_space_list, assoc_list = process.container.resource_registry.find_subjects(
                RT.ExchangeSpace, PRED.hasExchangeName, rrxn._id
            )
            if not len(exchange_space_list) == 1:
                raise StandardError("Association from ExchangeSpace to ExchangeName %s does not exist" % rrxn._id)

            rrxs = exchange_space_list[0]

            xs = ExchangeSpace(ex_manager, ex_manager._priviledged_transport, rrxs.name)
            xn = ExchangeName(ex_manager, ex_manager._priviledged_transport, rrxn.name, xs)

            if xn.queue in rem_queues:
                rem_queues.remove(xn.queue)
            else:
                log.warn("BootstrapExchange restart: RR XN %s, type %s NOT FOUND in queues", xn.queue, xn.xn_type)

        # get list of service name possibilities
        svc_objs, _ = process.container.resource_registry.find_resources(RT.ServiceDefinition)
        svc_names = [s.name for s in svc_objs]

        # PROCESS QUEUES + SERVICE QUEUES- not yet represented by resource
        proc_queues = set()
        svc_queues = set()

        for queue in list(rem_queues):

            # PROCESS QUEUES: proc manager spawned
            # pattern "<sysname>.<containerid>.<pid>"
            pieces = queue.split(".")
            if len(pieces) > 2 and pieces[-1].isdigit():
                proc_queues.add(queue)
                rem_queues.remove(queue)
                continue

            # SERVICE QUEUES
            # pattern "<sysname>.<service name>"
            if len(pieces) == 2:
                if pieces[-1] in svc_names:
                    svc_queues.add(queue)
                    rem_queues.remove(queue)

            # @TODO: PD-spawned process queues
            # pattern "<sysname>.<service_name><hex>"

        # leftover queues now
        for queue in rem_queues:
            log.warn("Unknown queue: %s", queue)

        #
        # EMPTY SERVICE QUEUES
        #
        for queue in svc_queues:
            if int(queues[queue]["consumers"]) > 0 and not process.CFG.get_safe("force", False):
                log.warn(
                    "Refusing to empty service queue %s with consumers (%s), specify force=True to override",
                    queue,
                    queues[queue]["consumers"],
                )
            else:
                ex_manager.purge_queue(queue)
                log.info("Purged service queue %s of %s messages", queue, queues[queue]["messages"])

        ex_manager.use_ems = old_use_ems
Exemplo n.º 26
0
 def _get_dir_entry(self, scenario):
     return dict(ts=get_ion_ts(), name=scenario, sys_name=get_sys_name())
Exemplo n.º 27
0
    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        try:

            from pyon.public import get_sys_name
            self.sysname = get_sys_name()

            # Force datastore loader to use the same sysname
            from ion.processes.bootstrap.datastore_loader import DatastoreLoader
            DatastoreLoader.clear_datastore(prefix=self.sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame =frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                            (frame.f_code.co_name,
                                frame.f_code.co_filename, frame.f_lineno))
                debug.write('Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = ['bin/pycc', '--noshell', '-sp', '--sysname=%s' %
                    self.sysname,
                    '--logcfg=res/config/logging.pycc.yml',
                    '--rel=%s' % self.rel,
                    "--config={'system': {'force_clean': False, 'auto_bootstrap': True}}"]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            DatastoreLoader.dump_datastore(path='res/dd')
            debug.write('Dump child container state to file...\n')

            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e
Exemplo n.º 28
0
    def on_init(self):
        if not HighAvailabilityCore:
            msg = "HighAvailabilityCore isn't available. Use autolaunch.cfg buildout"
            log.error(msg)
            return

        cfg = self.CFG.get_safe("highavailability")

        # use default PD name as the sole PD if none are provided in config
        self.pds = self.CFG.get_safe("highavailability.process_dispatchers",
            [ProcessDispatcherService.name])
        if not len(self.pds) == 1:
            raise Exception("HA Service doesn't support multiple Process Dispatchers")

        self.process_definition_id, self.process_definition = self._get_process_definition()

        self.process_configuration = self.CFG.get_safe("highavailability.process_configuration")
        aggregator_config = _get_aggregator_config(self.CFG)

        self.service_id, self.service_name = self._register_service()
        self.policy_event = Event()

        stored_policy = self._stored_policy
        if stored_policy != {}:
            policy_name = stored_policy.get('name')
            policy_parameters = stored_policy.get('parameters')
            self._validate_policy_name(policy_name)
            self.policy_name = policy_name.lower()
            self.policy_parameters = policy_parameters
        else:

            policy_name = self.CFG.get_safe("highavailability.policy.name")
            self._validate_policy_name(policy_name)
            self.policy_name = policy_name.lower()
            self.policy_parameters = self.CFG.get_safe("highavailability.policy.parameters")

        self.policy_interval = self.CFG.get_safe("highavailability.policy.interval",
                DEFAULT_INTERVAL)

        self.logprefix = "HA Agent (%s): " % self.service_name

        self.control = HAProcessControl(self.pds[0],
            self.container.resource_registry, self.service_id,
            self.policy_event.set, logprefix=self.logprefix)

        self.core = HighAvailabilityCore(cfg, self.control,
                self.pds, self.policy_name, process_definition_id=self.process_definition_id,
                parameters=self.policy_parameters,
                process_configuration=self.process_configuration,
                aggregator_config=aggregator_config, name=self.service_name)

        dashi_messaging = self.CFG.get_safe("highavailability.dashi_messaging", False)
        if dashi_messaging:

            dashi_name = self.CFG.get_safe("highavailability.dashi_name")
            if not dashi_name:
                raise Exception("dashi_name unknown")
            dashi_uri = self.CFG.get_safe("highavailability.dashi_uri")
            if not dashi_uri:
                rabbit_host = self.CFG.get_safe("server.amqp.host")
                rabbit_user = self.CFG.get_safe("server.amqp.username")
                rabbit_pass = self.CFG.get_safe("server.amqp.password")

                if not (rabbit_host and rabbit_user and rabbit_pass):
                    raise Exception("cannot form dashi URI")

                dashi_uri = "amqp://%s:%s@%s/" % (rabbit_user, rabbit_pass,
                                                  rabbit_host)
            dashi_exchange = self.CFG.get_safe("highavailability.dashi_exchange")
            if not dashi_exchange:
                dashi_exchange = get_sys_name()

            self.dashi_handler = HADashiHandler(self, dashi_name, dashi_uri, dashi_exchange)

        else:
            self.dashi_handler = None
Exemplo n.º 29
0
    def on_restart(self, process, config, **kwargs):
        """
        Handles bootstrapping of system restart for exchange resources and broker state.

        - Ensures ExchangePoint and ExchangeSpace resources in system have a properly
          declared AMQP exchange
        - Ensures ExchangeName resources in system have a properly declared queue
        - Logs all exchanges/queues it didn't understand
        - Purges all service queues as long as no consumers are attached, or can be
          overridden with force=True on pycc command line
        """
        rr = process.container.resource_registry
        ex_manager = process.container.ex_manager
        sys_name = get_sys_name()

        # get list of queues from broker with full props that have to do with our sysname
        all_queues = ex_manager._list_queues()
        queues = {q['name']: q for q in all_queues if q['name'].startswith(sys_name)}

        # get list of exchanges from broker with full props
        all_exchanges = ex_manager._list_exchanges()
        exchanges = {e['name']: e for e in all_exchanges if e['name'].startswith(sys_name)}

        # now get list of XOs from RR
        xs_objs, _ = rr.find_resources(RT.ExchangeSpace)
        xp_objs, _ = rr.find_resources(RT.ExchangePoint)
        xn_objs, _ = rr.find_resources(RT.ExchangeName)

        xs_by_xp = {}
        assocs = rr.find_associations(predicate=PRED.hasExchangePoint, id_only=False)
        for assoc in assocs:
            if assoc.st == RT.ExchangeSpace and assoc.ot == RT.ExchangePoint:
                xs_by_xp[assoc.o] = assoc.s

        sys_xs_name = CFG.get_safe("exchange.core.system_xs", DEFAULT_SYSTEM_XS)
        sys_node_name, sys_node = ex_manager._get_node_for_xs(sys_xs_name)

        #
        # VERIFY XSs have a declared exchange
        #
        rem_exchanges = set(exchanges)

        xs_by_id = {}
        for rrxs in xs_objs:
            xs = ExchangeSpace(ex_manager, ex_manager._get_priv_transport(sys_node_name), sys_node, rrxs.name)
            xs_by_id[rrxs._id] = xs

            if xs.exchange in rem_exchanges:
                rem_exchanges.remove(xs.exchange)
            else:
                log.warn("BootstrapExchange restart: RR XS %s, id=%s NOT FOUND in exchanges", rrxs.name, rrxs._id)

        for rrxp in xp_objs:
            xs_id = xs_by_xp.get(rrxp._id, None)
            if not xs_id or xs_id not in xs_by_id:
                log.warn("Inconsistent!! XS for XP %s not found", rrxp.name)
                continue
            xs = xs_by_id[xs_id]
            xp = ExchangePoint(ex_manager, ex_manager._get_priv_transport(sys_node_name), sys_node, rrxp.name, xs)

            if xp.exchange in rem_exchanges:
                rem_exchanges.remove(xp.exchange)
            else:
                log.warn("BootstrapExchange restart: RR XP %s, id=%s NOT FOUND in exchanges", rrxp.name, rrxp._id)

        # # events and main service exchange should be left
        system_rpc_ex = "%s.%s" % (sys_name, sys_xs_name)
        event_ex = "%s.%s.%s" % (sys_name, sys_xs_name, CFG.get_safe("exchange.core.events", DEFAULT_EVENTS_XP))
        data_ex = "%s.%s.%s" % (sys_name, sys_xs_name, CFG.get_safe("exchange.core.data_streams", "data"))

        if system_rpc_ex in rem_exchanges:
            rem_exchanges.remove(system_rpc_ex)
        if event_ex in rem_exchanges:
            rem_exchanges.remove(event_ex)
        if data_ex in rem_exchanges:
            rem_exchanges.remove(data_ex)

        # what is left?
        for exchange in rem_exchanges:
            log.warn("BootstrapExchange restart: unknown exchange on broker %s", exchange)

        #
        # VERIFY XNs have a declared queue
        #
        rem_queues = set(queues)

        for rrxn in xn_objs:
            # can instantiate ExchangeNames, don't need specific types

            # @TODO: most queue types have a name instead of anon
            """
            # @TODO: except queue type, which needs to be fixed to record declared name type
            if rrxn.xn_type == "QUEUE":
                log.info("TODO: queue type XNs, %s", rrxn.name)
                continue
            """

            exchange_space_list, _ = rr.find_subjects(RT.ExchangeSpace, PRED.hasExchangeName, rrxn._id)
            if not len(exchange_space_list) == 1:
                raise StandardError("Association from ExchangeSpace to ExchangeName %s does not exist" % rrxn._id)

            rrxs = exchange_space_list[0]

            xs = ExchangeSpace(ex_manager, ex_manager._get_priv_transport(sys_node_name), sys_node, rrxs.name)
            xn = ExchangeName(ex_manager, ex_manager._get_priv_transport(sys_node_name), sys_node, rrxn.name, xs)

            if xn.queue in rem_queues:
                rem_queues.remove(xn.queue)
            else:
                log.warn("BootstrapExchange restart: RR XN %s, type %s NOT FOUND in queues", xn.queue, xn.xn_type)

        # get list of service name possibilities
        svc_objs, _ = rr.find_resources(RT.ServiceDefinition)
        svc_names = [s.name for s in svc_objs]

        proc_objs, _ = rr.find_resources(RT.Process, id_only=False)
        current_proc_names = [p.name for p in proc_objs]
        cont_objs, _ = rr.find_resources(RT.CapabilityContainer, id_only=False)
        current_containers = [c.name for c in cont_objs]

        from pyon.ion.event import local_event_queues

        # PROCESS QUEUES + SERVICE QUEUES - not yet represented by resource
        proc_queues = set()
        svc_queues = set()
        event_queues = set()

        for queue in list(rem_queues):
            pieces = queue.split(".")

            # EVENT QUEUES
            if queue.startswith(event_ex) and pieces[-1] in local_event_queues:
                event_queues.add(queue)
                rem_queues.remove(queue)
                continue

            # CC AGENT QUEUES
            if pieces[-1].startswith("cc_agent_") and pieces[-1][9:] in current_containers:
                proc_queues.add(queue)
                rem_queues.remove(queue)
                continue

            # PROCESS QUEUES: proc manager spawned
            # pattern "<sysname>.<root_xs>.<containerid>.<pid>"
            if len(pieces) > 3 and pieces[-1].isdigit():
                if "%s.%s" % (pieces[-2], pieces[-1]) in current_proc_names:
                    proc_queues.add(queue)
                    rem_queues.remove(queue)
                continue

            # SERVICE QUEUES
            # pattern "<sysname>.<root_xs>.<service name>"
            if len(pieces) == 3:
                if pieces[-1] in svc_names:
                    svc_queues.add(queue)
                    rem_queues.remove(queue)

        # EMPTY LEFTOVER QUEUES - they are unaccounted for

        for qn in rem_queues:
            if int(queues[qn]['consumers']) == 0:
                ex_manager.delete_queue(qn)
                log.debug("Deleted unused queue: %s (%s messages)", qn, queues[qn]['messages'])

        #
        # EMPTY SERVICE QUEUES
        #
        for queue in svc_queues:
            if int(queues[queue]['messages']) > 0:
                ex_manager.purge_queue(queue)
                log.info("Purged service queue %s (%s messages)", queue, queues[queue]['messages'])
Exemplo n.º 30
0
def send_email(message, msg_recipient, smtp_client):
    '''
    A common method to send email with formatting

    @param message              Event
    @param msg_recipient        str
    @param smtp_client          fake or real smtp client object

    '''

    log.debug("Got type of event to notify on: %s", message.type_)

    # Get the diffrent attributes from the event message
    event = message.type_
    origin = message.origin
    description = message.description or "Not provided for this event"
    event_obj_as_string = str(message)
    ts_created = _convert_to_human_readable(message.ts_created)

    #------------------------------------------------------------------------------------
    # build the email from the event content
    #------------------------------------------------------------------------------------

    msg_body = string.join(("Event type: %s," %  event,
                            "",
                            "Originator: %s," %  origin,
                            "",
                            "Description: %s," % description,
                            "",
                            "ts_created: %s," %  ts_created,
                            "",
                            "Event object as a dictionary: %s," %  event_obj_as_string,
                            "",
                            "You received this notification from ION because you asked to be "\
                            "notified about this event from this source. ",
                            "To modify or remove notifications about this event, "\
                            "please access My Notifications Settings in the ION Web UI.",
                            "Do not reply to this email.  This email address is not monitored "\
                            "and the emails will not be read."),
        "\r\n")
    msg_subject = "(SysName: " + get_sys_name(
    ) + ") ION event " + event + " from " + origin

    log.debug("msg_body::: %s", msg_body)

    #------------------------------------------------------------------------------------
    # the 'from' email address for notification emails
    #------------------------------------------------------------------------------------

    ION_NOTIFICATION_EMAIL_ADDRESS = '*****@*****.**'
    smtp_sender = CFG.get_safe('server.smtp.sender',
                               ION_NOTIFICATION_EMAIL_ADDRESS)

    msg = MIMEText(msg_body)
    msg['Subject'] = msg_subject
    msg['From'] = smtp_sender
    msg['To'] = msg_recipient
    log.debug("UNS sending email from %s to %s for event type: %s",
              smtp_sender, msg_recipient, message.type_)
    log.debug("UNS using the smtp client: %s", smtp_client)

    try:
        smtp_client.sendmail(smtp_sender, [msg_recipient], msg.as_string())
    except:  # Can be due to a broken connection... try to create a connection
        smtp_client = setting_up_smtp_client()
        log.debug("Connect again...message received after ehlo exchange: %s",
                  str(smtp_client.ehlo()))
        smtp_client.sendmail(smtp_sender, [msg_recipient], msg.as_string())
    def on_init(self):
        if not HighAvailabilityCore:
            msg = "HighAvailabilityCore isn't available. Use autolaunch.cfg buildout"
            log.error(msg)
            return
        log.debug("HighAvailabilityCore Pyon on_init")

        policy_name = self.CFG.get_safe("highavailability.policy.name")
        if policy_name is None:
            msg = "HA service requires a policy name at CFG.highavailability.policy.name"
            raise Exception(msg)
        try:
            self.policy = policy.policy_map[policy_name.lower()]
        except KeyError:
            raise Exception("HA Service doesn't support '%s' policy" % policy_name)

        policy_parameters = self.CFG.get_safe("highavailability.policy.parameters")

        self.policy_interval = self.CFG.get_safe("highavailability.policy.interval",
                DEFAULT_INTERVAL)

        cfg = self.CFG.get_safe("highavailability")

        # use default PD name as the sole PD if none are provided in config
        self.pds = self.CFG.get_safe("highavailability.process_dispatchers",
            [ProcessDispatcherService.name])

        self.process_definition_id = self.CFG.get_safe("highavailability.process_definition_id")
        self.process_configuration = self.CFG.get_safe("highavailability.process_configuration")
        aggregator_config = self.CFG.get_safe("highavailability.aggregator")

        self.service_id = self._register_service()

        # TODO: Allow other core class?
        self.core = HighAvailabilityCore(cfg, ProcessDispatcherSimpleAPIClient,
                self.pds, self.policy, process_definition_id=self.process_definition_id,
                parameters=policy_parameters,
                process_configuration=self.process_configuration,
                aggregator_config=aggregator_config,
                pd_client_kwargs={'container': self.container,
                    'service_id': self.service_id})

        self.policy_thread = looping_call(self.policy_interval, self.core.apply_policy)

        dashi_messaging = self.CFG.get_safe("highavailability.dashi_messaging", False)
        if dashi_messaging:

            dashi_name = self.CFG.get_safe("highavailability.dashi_name")
            if not dashi_name:
                raise Exception("dashi_name unknown")
            dashi_uri = self.CFG.get_safe("highavailability.dashi_uri")
            if not dashi_uri:
                rabbit_host = self.CFG.get_safe("server.amqp.host")
                rabbit_user = self.CFG.get_safe("server.amqp.username")
                rabbit_pass = self.CFG.get_safe("server.amqp.password")

                if not (rabbit_host and rabbit_user and rabbit_pass):
                    raise Exception("cannot form dashi URI")

                dashi_uri = "amqp://%s:%s@%s/" % (rabbit_user, rabbit_pass,
                                                  rabbit_host)
            dashi_exchange = self.CFG.get_safe("highavailability.dashi_exchange")
            if not dashi_exchange:
                dashi_exchange = get_sys_name()

            self.dashi_handler = HADashiHandler(self, dashi_name, dashi_uri, dashi_exchange)

        else:
            self.dashi_handler = None
Exemplo n.º 32
0
    def on_restart(self, process, config, **kwargs):
        """
        Handles bootstrapping of system restart for exchange resources and broker state.

        - Ensures ExchangePoint and ExchangeSpace resources in system have a properly
          declared AMQP exchange
        - Ensures ExchangeName resources in system have a properly declared queue
        - Logs all exchanges/queues it didn't understand
        - Purges all service queues as long as no consumers are attached, or can be
          overridden with force=True on pycc command line
        """
        rr = process.container.resource_registry
        ex_manager = process.container.ex_manager
        sys_name = get_sys_name()

        # get list of queues from broker with full props that have to do with our sysname
        all_queues = ex_manager._list_queues()
        queues = {
            q['name']: q
            for q in all_queues if q['name'].startswith(sys_name)
        }

        # get list of exchanges from broker with full props
        all_exchanges = ex_manager._list_exchanges()
        exchanges = {
            e['name']: e
            for e in all_exchanges if e['name'].startswith(sys_name)
        }

        # now get list of XOs from RR
        xs_objs, _ = rr.find_resources(RT.ExchangeSpace)
        xp_objs, _ = rr.find_resources(RT.ExchangePoint)
        xn_objs, _ = rr.find_resources(RT.ExchangeName)

        xs_by_xp = {}
        assocs = rr.find_associations(predicate=PRED.hasExchangePoint,
                                      id_only=False)
        for assoc in assocs:
            if assoc.st == RT.ExchangeSpace and assoc.ot == RT.ExchangePoint:
                xs_by_xp[assoc.o] = assoc.s

        sys_xs_name = CFG.get_safe("exchange.core.system_xs",
                                   DEFAULT_SYSTEM_XS)
        sys_node_name, sys_node = ex_manager._get_node_for_xs(sys_xs_name)

        #
        # VERIFY XSs have a declared exchange
        #
        rem_exchanges = set(exchanges)

        xs_by_id = {}
        for rrxs in xs_objs:
            xs = ExchangeSpace(ex_manager,
                               ex_manager._get_priv_transport(sys_node_name),
                               sys_node, rrxs.name)
            xs_by_id[rrxs._id] = xs

            if xs.exchange in rem_exchanges:
                rem_exchanges.remove(xs.exchange)
            else:
                log.warn(
                    "BootstrapExchange restart: RR XS %s, id=%s NOT FOUND in exchanges",
                    rrxs.name, rrxs._id)

        for rrxp in xp_objs:
            xs_id = xs_by_xp.get(rrxp._id, None)
            if not xs_id or xs_id not in xs_by_id:
                log.warn("Inconsistent!! XS for XP %s not found", rrxp.name)
                continue
            xs = xs_by_id[xs_id]
            xp = ExchangePoint(ex_manager,
                               ex_manager._get_priv_transport(sys_node_name),
                               sys_node, rrxp.name, xs)

            if xp.exchange in rem_exchanges:
                rem_exchanges.remove(xp.exchange)
            else:
                log.warn(
                    "BootstrapExchange restart: RR XP %s, id=%s NOT FOUND in exchanges",
                    rrxp.name, rrxp._id)

        # # events and main service exchange should be left
        system_rpc_ex = "%s.%s" % (sys_name, sys_xs_name)
        event_ex = "%s.%s.%s" % (sys_name, sys_xs_name,
                                 CFG.get_safe("exchange.core.events",
                                              DEFAULT_EVENTS_XP))
        data_ex = "%s.%s.%s" % (sys_name, sys_xs_name,
                                CFG.get_safe("exchange.core.data_streams",
                                             "data"))

        if system_rpc_ex in rem_exchanges:
            rem_exchanges.remove(system_rpc_ex)
        if event_ex in rem_exchanges:
            rem_exchanges.remove(event_ex)
        if data_ex in rem_exchanges:
            rem_exchanges.remove(data_ex)

        # what is left?
        for exchange in rem_exchanges:
            log.warn(
                "BootstrapExchange restart: unknown exchange on broker %s",
                exchange)

        #
        # VERIFY XNs have a declared queue
        #
        rem_queues = set(queues)

        for rrxn in xn_objs:
            # can instantiate ExchangeNames, don't need specific types

            # @TODO: most queue types have a name instead of anon
            """
            # @TODO: except queue type, which needs to be fixed to record declared name type
            if rrxn.xn_type == "QUEUE":
                log.info("TODO: queue type XNs, %s", rrxn.name)
                continue
            """

            exchange_space_list, _ = rr.find_subjects(RT.ExchangeSpace,
                                                      PRED.hasExchangeName,
                                                      rrxn._id)
            if not len(exchange_space_list) == 1:
                raise StandardError(
                    "Association from ExchangeSpace to ExchangeName %s does not exist"
                    % rrxn._id)

            rrxs = exchange_space_list[0]

            xs = ExchangeSpace(ex_manager,
                               ex_manager._get_priv_transport(sys_node_name),
                               sys_node, rrxs.name)
            xn = ExchangeName(ex_manager,
                              ex_manager._get_priv_transport(sys_node_name),
                              sys_node, rrxn.name, xs)

            if xn.queue in rem_queues:
                rem_queues.remove(xn.queue)
            else:
                log.warn(
                    "BootstrapExchange restart: RR XN %s, type %s NOT FOUND in queues",
                    xn.queue, xn.xn_type)

        # get list of service name possibilities
        svc_objs, _ = rr.find_resources(RT.ServiceDefinition)
        svc_names = [s.name for s in svc_objs]

        proc_objs, _ = rr.find_resources(RT.Process, id_only=False)
        current_proc_names = [p.name for p in proc_objs]
        cont_objs, _ = rr.find_resources(RT.CapabilityContainer, id_only=False)
        current_containers = [c.name for c in cont_objs]

        from pyon.ion.event import local_event_queues

        # PROCESS QUEUES + SERVICE QUEUES - not yet represented by resource
        proc_queues = set()
        svc_queues = set()
        event_queues = set()

        for queue in list(rem_queues):
            pieces = queue.split(".")

            # EVENT QUEUES
            if queue.startswith(event_ex) and pieces[-1] in local_event_queues:
                event_queues.add(queue)
                rem_queues.remove(queue)
                continue

            # CC AGENT QUEUES
            if pieces[-1].startswith(
                    "cc_agent_") and pieces[-1][9:] in current_containers:
                proc_queues.add(queue)
                rem_queues.remove(queue)
                continue

            # PROCESS QUEUES: proc manager spawned
            # pattern "<sysname>.<root_xs>.<containerid>.<pid>"
            if len(pieces) > 3 and pieces[-1].isdigit():
                if "%s.%s" % (pieces[-2], pieces[-1]) in current_proc_names:
                    proc_queues.add(queue)
                    rem_queues.remove(queue)
                continue

            # SERVICE QUEUES
            # pattern "<sysname>.<root_xs>.<service name>"
            if len(pieces) == 3:
                if pieces[-1] in svc_names:
                    svc_queues.add(queue)
                    rem_queues.remove(queue)

            # LOCAL RPC QUEUES
            # pattern "<sysname>.<root_xs>.rpc_<uuid>"
            if len(pieces) == 3:
                if pieces[-1].startswith("rpc_"):
                    rem_queues.remove(queue)

        # EMPTY LEFTOVER QUEUES - they are unaccounted for
        # TODO - current container used queues, e.g. process_dispatcher

        for qn in rem_queues:
            if int(queues[qn]['consumers']) == 0:
                ex_manager.delete_queue(qn)
                log.debug("Deleted unused queue: %s (%s messages)", qn,
                          queues[qn]['messages'])

        #
        # EMPTY SERVICE QUEUES
        #
        for queue in svc_queues:
            if int(queues[queue]['messages']) > 0:
                ex_manager.purge_queue(queue)
                log.info("Purged service queue %s (%s messages)", queue,
                         queues[queue]['messages'])