Beispiel #1
0
    def _start_container(self):
        # hack to force queue auto delete on for int tests
        self._turn_on_queue_auto_delete()
        self._patch_out_diediedie()
        self._patch_out_fail_fast_kill()

        bootstrap.testing_fast = True

        if os.environ.get('CEI_LAUNCH_TEST', None):
            # Let's force clean again.  The static initializer is causing
            # issues
            self._force_clean()
            self._patch_out_start_rel()
            from pyon.datastore.datastore_admin import DatastoreAdmin
            da = DatastoreAdmin(config=CFG)
            da.load_datastore('res/dd')
        else:
            # We cannot live without pre-initialized datastores and resource objects
            pre_initialize_ion()

        # hack to force_clean on filesystem
        try:
            CFG['container']['filesystem']['force_clean'] = True
        except KeyError:
            CFG['container']['filesystem'] = {}
            CFG['container']['filesystem']['force_clean'] = True

        self.container = None
        self.addCleanup(self._stop_container)
        self.container = Container()
        self.container.start()

        bootstrap.testing_fast = False
Beispiel #2
0
    def _start_container(self):
        # hack to force queue auto delete on for int tests
        self._turn_on_queue_auto_delete()
        self._patch_out_diediedie()
        self._patch_out_fail_fast_kill()

        bootstrap.testing_fast = True

        if os.environ.get('CEI_LAUNCH_TEST', None):
            # Let's force clean again.  The static initializer is causing
            # issues
            self._force_clean()
            self._patch_out_start_rel()
            from pyon.datastore.datastore_admin import DatastoreAdmin
            da = DatastoreAdmin(config=CFG)
            da.load_datastore('res/dd')
        else:
            # We cannot live without pre-initialized datastores and resource objects
            pre_initialize_ion()

        # hack to force_clean on filesystem
        try:
            CFG['container']['filesystem']['force_clean'] = True
        except KeyError:
            CFG['container']['filesystem'] = {}
            CFG['container']['filesystem']['force_clean'] = True

        self.container = None
        self.addCleanup(self._stop_container)
        self.container = Container()
        self.container.start()

        bootstrap.testing_fast = False
Beispiel #3
0
def main():

    usage = \
    """
    %prog [options] prefix
    """
    description = "Use this program to clear databases that match a given prefix"
    parser = OptionParser(usage=usage, description=description)
    parser.add_option("-P", "--port", dest="db_port", default=None, help="Port number for db", action="store", type=int, metavar="PORT")
    parser.add_option("-H", "--host", dest="db_host", default='localhost', help="The host name or ip address of the db server", action="store", type=str, metavar="HOST")
    parser.add_option("-u", "--username", dest="db_uname", default=None, help="Username for the db server", action="store", type=str, metavar="UNAME")
    parser.add_option("-p", "--password", dest="db_pword", default=None, help="Password for the db server", action="store", type=str, metavar="PWORD")
    parser.add_option("-s", "--sysname", dest="sysname", default=None, help="The sysname prefix to clear databases", action="store", type=str, metavar="SYSNAME")
    parser.add_option("-t", "--store_type", dest="db_type", default="postgresql", help="Datastore type", action="store", type=str, metavar="DSTYPE")
    parser.add_option("-v", "--verbose", help="More verbose output", action="store_true")
    parser.add_option("-d", "--dump", dest="dump_path", default=None, help="Dump sysname datastores to path", action="store", type=str, metavar="DPATH")
    parser.add_option("-l", "--load", dest="load_path", default=None, help="Load dumped datastore from path", action="store", type=str, metavar="LPATH")

    (options, args) = parser.parse_args()

    from pyon.core import log as logutil
    logutil.configure_logging(logutil.DEFAULT_LOGGING_PATHS)

    if options.dump_path:
        config = create_config(options.db_host, options.db_port, options.db_uname, options.db_pword)
        sysname = options.sysname or "scion"
        log.info("dumping %s datastores to %s", sysname, options.dump_path)
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.dump_datastore(path=options.dump_path)
    elif options.load_path:
        config = create_config(options.db_host, options.db_port, options.db_uname, options.db_pword)
        sysname = options.sysname or "scion"
        log.info("loading %s datastores from dumped content in %ss", sysname, options.dump_path)
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.load_datastore(path=options.load_path)
    else:
        if len(args) == 0:
            log.error("Error: no prefix argument specified")
            parser.print_help()
            sys.exit()

        if len(args) != 1:
            log.error("Error: You can not specify multiple prefixes. Received args: %s", str(args))
            parser.print_help()
            sys.exit()

        prefix = args[0]

        if prefix == "":
            log.error("Error: You can not give the empty string as a prefix!")
            parser.print_help()
            sys.exit()

        config = create_config(options.db_host, options.db_port, options.db_uname, options.db_pword, options.db_type)
        _clear_db(config, prefix=prefix, sysname=options.sysname, verbose=bool(options.verbose))
Beispiel #4
0
def main():

    usage = \
    """
    %prog [options] prefix
    """
    description = \
    """Use this program to clear databases in couch that match a given prefix
    """
    parser = OptionParser(usage=usage, description=description)
    parser.add_option("-P", "--port", dest="couch_port", default=None, help="Port number for couch", action="store", type=int, metavar="PORT")
    parser.add_option("-H", "--host", dest="couch_host", default='localhost', help="The host name or ip address of the couch server", action="store", type=str, metavar="HOST")
    parser.add_option("-u", "--username", dest="couch_uname", default=None, help="Username for the couch server", action="store", type=str, metavar="UNAME")
    parser.add_option("-p", "--password", dest="couch_pword", default=None, help="Password for the couch server", action="store", type=str, metavar="PWORD")
    parser.add_option("-s", "--sysname", dest="sysname", default=None, help="The sysname prefix to clear in couch", action="store", type=str, metavar="SYSNAME")
    parser.add_option("-t", "--store_type", dest="couch_type", default="postgresql", help="Datastore type", action="store", type=str, metavar="DSTYPE")
    parser.add_option("-v", "--verbose", help="More verbose output", action="store_true")
    parser.add_option("-d", "--dump", dest="dump_path", default=None, help="Dump sysname datastores to path", action="store", type=str, metavar="DPATH")
    parser.add_option("-l", "--load", dest="load_path", default=None, help="Load dumped datastore from path", action="store", type=str, metavar="LPATH")

    (options, args) = parser.parse_args()

    if options.dump_path:
        config = create_config(options.couch_host, options.couch_port, options.couch_uname, options.couch_pword)
        sysname = options.sysname or "mine"
        print "clear_couch: dumping", sysname, "datastores to", options.dump_path
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.dump_datastore(path=options.dump_path)
    elif options.load_path:
        config = create_config(options.couch_host, options.couch_port, options.couch_uname, options.couch_pword)
        sysname = options.sysname or "mine"
        print "clear_couch: loading", sysname, "datastores from dumped content in", options.dump_path
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.load_datastore(path=options.load_path)
    else:
        if len(args) == 0:
            print 'clear_couch: Error: no prefix argument specified'
            parser.print_help()
            sys.exit()

        if len(args) != 1:
            print 'clear_couch: Error: You can not specify multiple prefixes. Received args: %s' % str(args)
            parser.print_help()
            sys.exit()

        prefix = args[0]

        if prefix is '':
            print 'clear_couch: Error: You can not give the empty string as a prefix!'
            parser.print_help()
            sys.exit()

        config = create_config(options.couch_host, options.couch_port, options.couch_uname, options.couch_pword, options.couch_type)
        _clear_db(config, prefix=prefix, sysname=options.sysname, verbose=bool(options.verbose))
Beispiel #5
0
    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        # Make sure we initialize pyon before anything in this plugin executes
        from pyon.core import bootstrap
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()

        try:
            from pyon.public import get_sys_name, CFG
            self.sysname = get_sys_name()

            # Clean exchanges and system queues out there
            try:
                connect_str = '-H %s -P 55672 -u %s -p %s -V %s' % (CFG.server.amqp.host,
                                                                    CFG.server.amqp.username,
                                                                    CFG.server.amqp.password,
                                                                    CFG.server.amqp.vhost)

                deleted_exchanges, deleted_queues = clean_by_sysname(connect_str, self.sysname)

                debug.write('Deleted exchanges:\n%s \n' % '\n'.join(deleted_exchanges))
                debug.write('Deleted queues:\n%s \n' % '\n'.join(deleted_queues))

            except Exception as e:
                pass

            # Force datastore loader to use the same sysname
            from pyon.datastore.datastore_admin import DatastoreAdmin
            self.datastore_admin = DatastoreAdmin(config=CFG)

            self.datastore_admin.clear_datastore(prefix=self.sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame =frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                            (frame.f_code.co_name,
                                frame.f_code.co_filename, frame.f_lineno))
                debug.write('Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = ['bin/pycc', '-o', '--noshell', '-sp', '--sysname=%s' % self.sysname,
                    '--logcfg=res/config/logging.pycc.yml',
                    '--rel=%s' % self.rel,
                    "--config={'system': {'auto_bootstrap': True}}"]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            self.datastore_admin.dump_datastore(path='res/dd', compact=True)
            debug.write('Dump child container state to file...\n')

            # Clean again to make sure the first nosetest starts on a clean
            # slate
            self.datastore_admin.clear_datastore(prefix=self.sysname)
            # Set PYCC env var in case CEI needs to skip tests in pycc mode
            os.environ['PYCC_MODE'] = '1'
            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e
Beispiel #6
0
class PYCC(Plugin):
    name = 'pycc'

    def __init__(self):
        Plugin.__init__(self)
        self.ccs = []
        self.container_started = False
        self.blames = {'scidata':[], 'state':[], 'directory':[], 'events':[],
                'resources':[], 'objects':[]}
        self.last_blame = {}
        self.sysname = None

    def options(self, parser, env):
        """Register command line options"""
        super(PYCC, self).options(parser, env=env)
        parser.add_option('--pycc-rel', type='string', dest='pycc_rel',
                help='Rel file path, res/deploy/r2deploy.yml by default',
                default='res/deploy/r2deploy.yml')

    def configure(self, options, conf):
        """Configure the plugin and system, based on selected options."""
        super(PYCC, self).configure(options, conf)
        if self.enabled:
            self.rel = options.pycc_rel

    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        # Make sure we initialize pyon before anything in this plugin executes
        from pyon.core import bootstrap
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()

        try:
            from pyon.public import get_sys_name, CFG
            self.sysname = get_sys_name()

            # Clean exchanges and system queues out there
            try:
                connect_str = '-H %s -P 55672 -u %s -p %s -V %s' % (CFG.server.amqp.host,
                                                                    CFG.server.amqp.username,
                                                                    CFG.server.amqp.password,
                                                                    CFG.server.amqp.vhost)

                deleted_exchanges, deleted_queues = clean_by_sysname(connect_str, self.sysname)

                debug.write('Deleted exchanges:\n%s \n' % '\n'.join(deleted_exchanges))
                debug.write('Deleted queues:\n%s \n' % '\n'.join(deleted_queues))

            except Exception as e:
                pass

            # Force datastore loader to use the same sysname
            from pyon.datastore.datastore_admin import DatastoreAdmin
            self.datastore_admin = DatastoreAdmin(config=CFG)

            self.datastore_admin.clear_datastore(prefix=self.sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame =frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                            (frame.f_code.co_name,
                                frame.f_code.co_filename, frame.f_lineno))
                debug.write('Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = ['bin/pycc', '-o', '--noshell', '-sp', '--sysname=%s' % self.sysname,
                    '--logcfg=res/config/logging.pycc.yml',
                    '--rel=%s' % self.rel,
                    "--config={'system': {'auto_bootstrap': True}}"]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            self.datastore_admin.dump_datastore(path='res/dd', compact=True)
            debug.write('Dump child container state to file...\n')

            # Clean again to make sure the first nosetest starts on a clean
            # slate
            self.datastore_admin.clear_datastore(prefix=self.sysname)
            # Set PYCC env var in case CEI needs to skip tests in pycc mode
            os.environ['PYCC_MODE'] = '1'
            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e

    def finalize(self, result):
        """Called after all report output, including output from all
        plugins, has been sent to the stream. Use this to print final
        test results or perform final cleanup. Return None to allow
        other plugins to continue printing, or any other value to stop
        them.
        """
        self.container_shutdown()
        self.datastore_admin.clear_datastore(prefix=self.sysname)
        import subprocess
        subprocess.call(['rm', '-rf', 'res/dd'])

    def container_shutdown(self):
        debug.write('Shut down cc process\n')
        for cc in self.ccs:
            pid = cc.pid
            debug.write('\tClosing container with pid:%d\n' % pid)
            os.kill(pid, signal.SIGINT)
            os.waitpid(pid, 0)

    def beforeTest(self, test):
        os.environ['BLAME'] = test.id()

    def afterTest(self, test):
        blame = self.datastore_admin.get_blame_objects()
        # Having a hard time detecting skips.  Since skipped tests don't
        # clean we should not save duplicate blames...
        if blame != self.last_blame:
            for key in blame.keys():
                self.blames[key].extend(blame[key])
        self.last_blame = blame

    def report(self, stream):
        stream.write('Blame Report on left over objects in couchd db\n')
        stream.write('='* 20 + '\n')
        for key, value in self.blames.items():
            if value != []:
                stream.write(key + ':\n')
                stream.write('-'*20 + ':\n')
                last_blame = None
                for item in value:
                    blame = item['blame_']
                    if blame != last_blame:
                        stream.write(item['blame_'] + ':\n')
                    stream.write('\t' + str(item) + '\n')
                    last_blame = blame
Beispiel #7
0
def main():

    usage = \
    """
    %prog [options] prefix
    """
    description = "Use this program to clear databases that match a given prefix"
    parser = OptionParser(usage=usage, description=description)
    parser.add_option("-P",
                      "--port",
                      dest="db_port",
                      default=None,
                      help="Port number for db",
                      action="store",
                      type=int,
                      metavar="PORT")
    parser.add_option("-H",
                      "--host",
                      dest="db_host",
                      default='localhost',
                      help="The host name or ip address of the db server",
                      action="store",
                      type=str,
                      metavar="HOST")
    parser.add_option("-u",
                      "--username",
                      dest="db_uname",
                      default=None,
                      help="Username for the db server",
                      action="store",
                      type=str,
                      metavar="UNAME")
    parser.add_option("-p",
                      "--password",
                      dest="db_pword",
                      default=None,
                      help="Password for the db server",
                      action="store",
                      type=str,
                      metavar="PWORD")
    parser.add_option("-s",
                      "--sysname",
                      dest="sysname",
                      default=None,
                      help="The sysname prefix to clear databases",
                      action="store",
                      type=str,
                      metavar="SYSNAME")
    parser.add_option("-t",
                      "--store_type",
                      dest="db_type",
                      default="postgresql",
                      help="Datastore type",
                      action="store",
                      type=str,
                      metavar="DSTYPE")
    parser.add_option("-v",
                      "--verbose",
                      help="More verbose output",
                      action="store_true")
    parser.add_option("-d",
                      "--dump",
                      dest="dump_path",
                      default=None,
                      help="Dump sysname datastores to path",
                      action="store",
                      type=str,
                      metavar="DPATH")
    parser.add_option("-l",
                      "--load",
                      dest="load_path",
                      default=None,
                      help="Load dumped datastore from path",
                      action="store",
                      type=str,
                      metavar="LPATH")

    (options, args) = parser.parse_args()

    from pyon.core import log as logutil
    logutil.configure_logging(logutil.DEFAULT_LOGGING_PATHS)

    if options.dump_path:
        config = create_config(options.db_host, options.db_port,
                               options.db_uname, options.db_pword)
        sysname = options.sysname or "scion"
        log.info("dumping %s datastores to %s", sysname, options.dump_path)
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.dump_datastore(path=options.dump_path)
    elif options.load_path:
        config = create_config(options.db_host, options.db_port,
                               options.db_uname, options.db_pword)
        sysname = options.sysname or "scion"
        log.info("loading %s datastores from dumped content in %ss", sysname,
                 options.dump_path)
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.load_datastore(path=options.load_path)
    else:
        if len(args) == 0:
            log.error("Error: no prefix argument specified")
            parser.print_help()
            sys.exit()

        if len(args) != 1:
            log.error(
                "Error: You can not specify multiple prefixes. Received args: %s",
                str(args))
            parser.print_help()
            sys.exit()

        prefix = args[0]

        if prefix == "":
            log.error("Error: You can not give the empty string as a prefix!")
            parser.print_help()
            sys.exit()

        config = create_config(options.db_host, options.db_port,
                               options.db_uname, options.db_pword,
                               options.db_type)
        _clear_db(config,
                  prefix=prefix,
                  sysname=options.sysname,
                  verbose=bool(options.verbose))
Beispiel #8
0
def main():

    usage = \
    """
    %prog [options] prefix
    """
    description = \
    """Use this program to clear databases in couch that match a given prefix
    """
    parser = OptionParser(usage=usage, description=description)
    parser.add_option("-p",
                      "--port",
                      dest="couch_port",
                      default=5984,
                      help="Port number for couch",
                      action="store",
                      type=int,
                      metavar="PORT")
    parser.add_option("-H",
                      "--host",
                      dest="couch_host",
                      default='localhost',
                      help="The host name or ip address of the couch server",
                      action="store",
                      type=str,
                      metavar="HOST")
    parser.add_option("-u",
                      "--username",
                      dest="couch_uname",
                      default=None,
                      help="Username for the couch server",
                      action="store",
                      type=str,
                      metavar="UNAME")
    parser.add_option("-n",
                      "--password",
                      dest="couch_pword",
                      default=None,
                      help="Password for the couch server",
                      action="store",
                      type=str,
                      metavar="PWORD")
    parser.add_option("-s",
                      "--sysname",
                      dest="sysname",
                      default=None,
                      help="The sysname prefix to clear in couch",
                      action="store",
                      type=str,
                      metavar="SYSNAME")
    parser.add_option("-v",
                      "--verbose",
                      help="More verbose output",
                      action="store_true")
    parser.add_option("-d",
                      "--dump",
                      dest="dump_path",
                      default=None,
                      help="Dump sysname datastores to path",
                      action="store",
                      type=str,
                      metavar="DPATH")
    parser.add_option("-l",
                      "--load",
                      dest="load_path",
                      default=None,
                      help="Load dumped datastore from path",
                      action="store",
                      type=str,
                      metavar="LPATH")

    (options, args) = parser.parse_args()

    if options.dump_path:
        config = create_config(options.couch_host, options.couch_port,
                               options.couch_uname, options.couch_pword)
        sysname = options.sysname or "mine"
        print "clear_couch: dumping", sysname, "datastores to", options.dump_path
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.dump_datastore(path=options.dump_path, compact=True)
    elif options.load_path:
        config = create_config(options.couch_host, options.couch_port,
                               options.couch_uname, options.couch_pword)
        sysname = options.sysname or "mine"
        print "clear_couch: loading", sysname, "datastores from dumped content in", options.dump_path
        from pyon.datastore.datastore_admin import DatastoreAdmin
        datastore_admin = DatastoreAdmin(config=config, sysname=sysname)
        datastore_admin.load_datastore(path=options.load_path)
    else:
        if len(args) == 0:
            print 'clear_couch: Error: no prefix argument specified'
            parser.print_help()
            sys.exit()

        if len(args) != 1:
            print 'clear_couch: Error: You can not specify multiple prefixes. Received args: %s' % str(
                args)
            parser.print_help()
            sys.exit()

        prefix = args[0]

        if prefix is '':
            print 'clear_couch: Error: You can not give the empty string as a prefix!'
            parser.print_help()
            sys.exit()

        _clear_couch(options.couch_host,
                     options.couch_port,
                     options.couch_uname,
                     options.couch_pword,
                     prefix=prefix,
                     verbose=bool(options.verbose))
Beispiel #9
0
    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        # Make sure we initialize pyon before anything in this plugin executes
        from pyon.core import bootstrap
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()

        try:
            from pyon.public import get_sys_name, CFG
            self.sysname = get_sys_name()

            # Clean exchanges and system queues out there
            try:
                rmh = RabbitManagementHelper(make_parser(), '-H %s -P 55672 -u %s -p %s -V %s'
                        % (CFG.server.amqp.host, CFG.server.amqp.username,
                        CFG.server.amqp.password, CFG.server.amqp.vhost))
                exchanges = rmh.list_names('exchanges')
                deleted = rmh.delete_names_with_prefix('exchange', exchanges, self.sysname)
                debug.write('Deleted exchanges:\n%s \n' % '\n'.join(deleted))
                queues = rmh.list_names('queues')
                deleted = rmh.delete_names_with_prefix('queue', queues, self.sysname)
                debug.write('Deleted queues:\n%s \n' % '\n'.join(deleted))
            except Exception as e:
                pass

            # Force datastore loader to use the same sysname
            from pyon.datastore.datastore_admin import DatastoreAdmin
            self.datastore_admin = DatastoreAdmin(config=CFG)

            self.datastore_admin.clear_datastore(prefix=self.sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame =frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                            (frame.f_code.co_name,
                                frame.f_code.co_filename, frame.f_lineno))
                debug.write('Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = ['bin/pycc', '--noshell', '-sp', '--sysname=%s' % self.sysname,
                    '--logcfg=res/config/logging.pycc.yml',
                    '--rel=%s' % self.rel,
                    "--config={'system': {'auto_bootstrap': True}}"]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            self.datastore_admin.dump_datastore(path='res/dd', compact=True)
            debug.write('Dump child container state to file...\n')

            # Clean again to make sure the first nosetest starts on a clean
            # slate
            self.datastore_admin.clear_datastore(prefix=self.sysname)
            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e
Beispiel #10
0
class PYCC(Plugin):
    name = 'pycc'

    def __init__(self):
        Plugin.__init__(self)
        self.ccs = []
        self.container_started = False
        self.blames = {'scidata':[], 'state':[], 'directory':[], 'events':[],
                'resources':[], 'objects':[]}
        self.last_blame = {}
        self.sysname = None

    def options(self, parser, env):
        """Register command line options"""
        super(PYCC, self).options(parser, env=env)
        parser.add_option('--pycc-rel', type='string', dest='pycc_rel',
                help='Rel file path, res/deploy/r2deploy.yml by default',
                default='res/deploy/r2deploy.yml')

    def configure(self, options, conf):
        """Configure the plugin and system, based on selected options."""
        super(PYCC, self).configure(options, conf)
        if self.enabled:
            self.rel = options.pycc_rel

    def begin(self):
        """Called before any tests are collected or run. Use this to
        perform any setup needed before testing begins.
        """
        # Make sure we initialize pyon before anything in this plugin executes
        from pyon.core import bootstrap
        if not bootstrap.pyon_initialized:
            bootstrap.bootstrap_pyon()

        try:
            from pyon.public import get_sys_name, CFG
            self.sysname = get_sys_name()

            # Clean exchanges and system queues out there
            try:
                rmh = RabbitManagementHelper(make_parser(), '-H %s -P 55672 -u %s -p %s -V %s'
                        % (CFG.server.amqp.host, CFG.server.amqp.username,
                        CFG.server.amqp.password, CFG.server.amqp.vhost))
                exchanges = rmh.list_names('exchanges')
                deleted = rmh.delete_names_with_prefix('exchange', exchanges, self.sysname)
                debug.write('Deleted exchanges:\n%s \n' % '\n'.join(deleted))
                queues = rmh.list_names('queues')
                deleted = rmh.delete_names_with_prefix('queue', queues, self.sysname)
                debug.write('Deleted queues:\n%s \n' % '\n'.join(deleted))
            except Exception as e:
                pass

            # Force datastore loader to use the same sysname
            from pyon.datastore.datastore_admin import DatastoreAdmin
            self.datastore_admin = DatastoreAdmin(config=CFG)

            self.datastore_admin.clear_datastore(prefix=self.sysname)

            def die(signum, frame):
                # For whatever reason, the parent doesn't die some times
                # when getting KeyboardInterrupt.  Hence this signal
                # handler.

                # Signal is pass through. The child pycc gets
                # its own KeyboardInterrupt and will shut down accordingly.
                debug.write('Received Keyboard Interrupt. Exiting now.\n')
                os._exit(9)

            signal.signal(signal.SIGINT, die)

            def no_zombie(signum, frame):
                # Debug to figure out who's dying
                debug.write('SIGCHLD received\n')
                stack = []
                while frame:
                    stack.append(frame)
                    frame =frame.f_back
                stack.reverse()
                for frame in stack:
                    debug.write('Frame %s in %s at line %s\n' %
                            (frame.f_code.co_name,
                                frame.f_code.co_filename, frame.f_lineno))
                debug.write('Child is dead...Clean up now so there is no zombie\n')
                (pid, status) = os.wait()
                exitstatus, signum = status & 0xff, (status & 0xff00) >> 8
                debug.write('Child pid %d with exit status %d and signum %d\n' % (pid, exitstatus, signum))
            # Could be dangerous.  Comment this out.
            # signal.signal(signal.SIGCHLD, no_zombie)

            def container_started_cb(signum, frame):
                """Callback when child pycc service is ready"""
                self.container_started = True

            signal.signal(signal.SIGUSR1, container_started_cb)

            # Make sure the pycc process has the same sysname as the nose
            ccargs = ['bin/pycc', '--noshell', '-sp', '--sysname=%s' % self.sysname,
                    '--logcfg=res/config/logging.pycc.yml',
                    '--rel=%s' % self.rel,
                    "--config={'system': {'auto_bootstrap': True}}"]
            debug.write('Starting cc process: %s\n' % ' '.join(ccargs))
            newenv = os.environ.copy()
            po = subprocess.Popen(ccargs, env=newenv, close_fds=True)
            self.ccs.append(po)

            # Wait for container to be ready
            while not self.container_started:
                time.sleep(0.2)
            debug.write('Child container is ready...\n')

            # Dump datastore
            self.datastore_admin.dump_datastore(path='res/dd', compact=True)
            debug.write('Dump child container state to file...\n')

            # Clean again to make sure the first nosetest starts on a clean
            # slate
            self.datastore_admin.clear_datastore(prefix=self.sysname)
            # Enable CEI mode for the tests
            os.environ['CEI_LAUNCH_TEST'] = '1'

            debug.write('Start nose tests now...\n')
        except Exception as e:
            self.container_shutdown()
            raise e

    def finalize(self, result):
        """Called after all report output, including output from all
        plugins, has been sent to the stream. Use this to print final
        test results or perform final cleanup. Return None to allow
        other plugins to continue printing, or any other value to stop
        them.
        """
        self.container_shutdown()
        self.datastore_admin.clear_datastore(prefix=self.sysname)
        import subprocess
        subprocess.call(['rm', '-rf', 'res/dd'])

    def container_shutdown(self):
        debug.write('Shut down cc process\n')
        for cc in self.ccs:
            debug.write('\tClosing container with pid:%d\n' % cc.pid)
            os.kill(cc.pid, signal.SIGINT)

    def beforeTest(self, test):
        os.environ['BLAME'] = test.id()

    def afterTest(self, test):
        blame = self.datastore_admin.get_blame_objects()
        # Having a hard time detecting skips.  Since skipped tests don't
        # clean we should not save duplicate blames...
        if blame != self.last_blame:
            for key in blame.keys():
                self.blames[key].extend(blame[key])
        self.last_blame = blame

    def report(self, stream):
        stream.write('Blame Report on left over objects in couchd db\n')
        stream.write('='* 20 + '\n')
        for key, value in self.blames.items():
            if value != []:
                stream.write(key + ':\n')
                stream.write('-'*20 + ':\n')
                last_blame = None
                for item in value:
                    blame = item['blame_']
                    if blame != last_blame:
                        stream.write(item['blame_'] + ':\n')
                    stream.write('\t' + str(item) + '\n')
                    last_blame = blame