def setUp(self):
        # Save existing RAM instance and force a rebuild.
        self.orig_ram = RAM._RAM
        RAM._RAM = None
        RAM.configure('')

        self.user = getpass.getuser()
        self.node = platform.node()
        self.name = self.node.replace('.', '_')
        self.python = find_python()
        self.cluster = None

        if sys.platform == 'win32' or self.user not in SSH_USERS:
            self.skip_ssh = True
        else:
            self.skip_ssh = False

        self.machines = []
        self.machines.append({'hostname': self.node, 'python': self.python})

        # Ensure we aren't held up by local host load problems.
        for allocator in RAM.list_allocators():
            if allocator.name == 'LocalHost':
                self.local = allocator
                self.local.max_load = 10
                break
        else:
            raise RuntimeError('No LocalHost allocator!?')
    def setUp(self):
        # Save existing RAM instance and force a rebuild.
        self.orig_ram = RAM._RAM
        RAM._RAM = None
        RAM.configure('')

        self.user = getpass.getuser()
        self.node = platform.node()
        self.name = self.node.replace('.', '_')
        self.python = find_python()
        self.cluster = None

        if sys.platform == 'win32' or self.user not in SSH_USERS:
            self.skip_ssh = True
        else:
            self.skip_ssh = False

        self.machines = []
        self.machines.append({'hostname': self.node,
                              'python': self.python})

        # Ensure we aren't held up by local host load problems.
        for allocator in RAM.list_allocators():
            if allocator.name == 'LocalHost':
                self.local = allocator
                self.local.max_load = 10
                break
        else:
            raise RuntimeError('No LocalHost allocator!?')
 def config_ram(self, filename):
     """
     Configure the :class:`ResourceAllocationManager` instance from `filename`.
     Used to define resources needed for model execution.
     """
     self._logger.debug('config_ram %r', filename)
     from openmdao.main.resource import ResourceAllocationManager
     ResourceAllocationManager.configure(filename)
 def config_ram(self, filename):
     """
     Configure the :class:`ResourceAllocationManager` instance from `filename`.
     Used to define resources needed for model execution.
     """
     self._logger.debug('config_ram %r', filename)
     from openmdao.main.resource import ResourceAllocationManager
     ResourceAllocationManager.configure(filename)
            "--dmz-host",
            hostname,
            "--poll-delay",
            "1",
            "--ssh",
            " ".join(ssh),
            "--scp",
            " ".join(scp),
        )
        out = open("rje.stdout", "w")
        proc = subprocess.Popen(args, stdout=out, stderr=subprocess.STDOUT)
    finally:
        os.chdir(orig_dir)

    heartbeat = "%s=%s" % (os.path.join(_DMZ_ROOT, root), "heartbeat")
    for retry in range(20):
        time.sleep(0.5)
        if os.path.exists(heartbeat) and os.path.getsize(heartbeat) > 0:
            return proc
    raise RuntimeError("server startup timeout")


if __name__ == "__main__":
    sys.argv.append("--cover-package=nas_access.")
    sys.argv.append("--cover-erase")

    # Avoid having any user-defined resources causing problems during testing.
    RAM.configure("")

    nose.runmodule()
def main():  # pragma no cover
    """
    OpenMDAO factory service process.

    Usage: python objserverfactory.py [--allow-public][--allow-shell][--hosts=filename][--types=filename][--users=filename][--address=address][--port=number][--prefix=name][--tunnel][--resources=filename][--log-host=hostname][--log-port=number][--log-prefix=string]

    --allow-public:
        Allows access by anyone from any allowed host. Use with care!

    --allow-shell:
        Allows access to :meth:`execute_command` and :meth:`load_model`.
        Use with care!

    --hosts: string
        Filename for allowed hosts specification. Default ``hosts.allow``.
        Ignored if '--users' is specified.
        The file should contain IPv4 host addresses, IPv4 domain addresses,
        or hostnames, one per line. Blank lines are ignored, and '#' marks the
        start of a comment which continues to the end of the line.
        For security reasons this file must be accessible only by the user
        running this server.

    --types: string
        Filename for allowed types specification.
        If not specified then allow types listed by
        :meth:`factorymanager.get_available_types`.
        The file should contain one type name per line.

    --users: string
        Filename for allowed users specification.
        Ignored if '--allow-public' is specified.
        Default is ``~/.ssh/authorized_keys``, other files should be of the
        same format: each line has ``key-type public-key-data user@host``,
        where `user` is the username on `host`. `host` will be translated to an
        IPv4 address and included in the allowed hosts list.
        Note that this ``user@host`` form is not necessarily enforced by
        programs which generate keys.
        For security reasons this file must be accessible only by the user
        running this server.

    --address: string
        IPv4 address, hostname, or pipe name.
        Default is the host's default IPv4 address.

    --port: int
        Server port (default of 0 implies next available port).
        Note that ports below 1024 typically require special privileges.
        If port is negative, then a local pipe is used for communication.

    --prefix: string
        Prefix for configuration and stdout/stderr files (default ``server``).

    --tunnel:
        Report host IP address but listen for connections from a local
        SSH tunnel.

    --resources: string
        Filename for resource configuration. If not specified then the
        default of ``~/.openmdao/resources.cfg`` will be used.

    --log-host: string
        Hostname to send remote log messages to.

    --log-port: int
        Port on `log-host` to send remote log messages to.

    --log-prefix: string
        Prefix to apply to remote log messages. Default is ``pid@host``.

    If ``prefix.key`` exists, it is read for an authorization key string.
    Otherwise, public key authorization and encryption is used.

    Allowed hosts *must* be specified if `port` is >= 0. Only allowed hosts
    may connect to the server.

    Once initialized ``prefix.cfg`` is written with address, port, and
    public key information.
    """
    parser = optparse.OptionParser()
    parser.add_option('--address',
                      action='store',
                      type='str',
                      help='Network address to serve.')
    parser.add_option('--allow-public',
                      action='store_true',
                      default=False,
                      help='Allows access by any user, use with care!')
    parser.add_option('--allow-shell',
                      action='store_true',
                      default=False,
                      help='Allows potential shell access, use with care!')
    parser.add_option('--hosts',
                      action='store',
                      type='str',
                      default='hosts.allow',
                      help='Filename for allowed hosts')
    parser.add_option('--types',
                      action='store',
                      type='str',
                      help='Filename for allowed types')
    parser.add_option('--users',
                      action='store',
                      type='str',
                      default='~/.ssh/authorized_keys',
                      help='Filename for allowed users')
    parser.add_option('--port',
                      action='store',
                      type='int',
                      default=0,
                      help='Server port (0 implies next available port)')
    parser.add_option('--prefix',
                      action='store',
                      default='server',
                      help='Prefix for config and stdout/stderr files')
    parser.add_option('--tunnel',
                      action='store_true',
                      default=False,
                      help='Report host IP address but listen for connections'
                      ' from a local SSH tunnel')
    parser.add_option('--resources',
                      action='store',
                      type='str',
                      default=None,
                      help='Filename for resource configuration')
    parser.add_option('--log-host',
                      action='store',
                      type='str',
                      default=None,
                      help='hostname for remote log messages')
    parser.add_option('--log-port',
                      action='store',
                      type='int',
                      default=None,
                      help='port for remote log messages')
    parser.add_option('--log-prefix',
                      action='store',
                      type='str',
                      default=None,
                      help='prefix for remote log messages')

    options, arguments = parser.parse_args()
    if arguments:
        parser.print_help()
        sys.exit(1)

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    if options.log_host and options.log_port:
        install_remote_handler(options.log_host, int(options.log_port),
                               options.log_prefix)

    server_key = options.prefix + '.key'
    server_cfg = options.prefix + '.cfg'
    global _SERVER_CFG
    _SERVER_CFG = server_cfg

    # Get authkey.
    authkey = 'PublicKey'
    try:
        with open(server_key, 'r') as inp:
            authkey = inp.readline().strip()
        os.remove(server_key)
    except IOError:
        pass

    if options.allow_shell:
        msg = 'Shell access is ALLOWED'
        logger.warning(msg)
        print msg

    allowed_users = None
    allowed_hosts = None

    # Get allowed_users.
    if options.allow_public:
        allowed_users = None
        msg = 'Public access is ALLOWED'
        logger.warning(msg)
        print msg

        if options.port >= 0:
            # Get allowed_hosts.
            if os.path.exists(options.hosts):
                try:
                    allowed_hosts = read_allowed_hosts(options.hosts)
                except Exception as exc:
                    msg = "Can't read allowed hosts file %r: %s" \
                          % (options.hosts, exc)
                    logger.error(msg)
                    print msg
                    sys.exit(1)
            else:
                msg = 'Allowed hosts file %r does not exist.' % options.hosts
                logger.error(msg)
                print msg
                sys.exit(1)

            if not allowed_hosts:
                msg = 'No hosts in allowed hosts file %r.' % options.hosts
                logger.error(msg)
                print msg
                sys.exit(1)
    else:
        if os.path.exists(options.users):
            try:
                allowed_users = read_authorized_keys(options.users, logger)
            except Exception as exc:
                msg = "Can't read allowed users file %r: %s" \
                      % (options.users, exc)
                logger.error(msg)
                print msg
                sys.exit(1)
        else:
            msg = 'Allowed users file %r does not exist.' % options.users
            logger.error(msg)
            print msg
            sys.exit(1)

        if not allowed_users:
            msg = 'No users in allowed users file %r.' % options.users
            logger.error(msg)
            print msg
            sys.exit(1)

    # Get allowed_types.
    allowed_types = None
    if options.types:
        if os.path.exists(options.types):
            allowed_types = []
            with open(options.types, 'r') as inp:
                line = inp.readline()
                while line:
                    line = line.strip()
                    if line:
                        allowed_types.append(line)
                    line = inp.readline()
        else:
            msg = 'Allowed types file %r does not exist.' % options.types
            logger.error(msg)
            print msg
            sys.exit(1)

    # Optionally configure resources.
    if options.resources:
        # Import here to avoid import loop.
        from openmdao.main.resource import ResourceAllocationManager as RAM
        RAM.configure(options.resources)

    # Get address and create manager.
    if options.port >= 0:
        if options.address:  # Specify IPv4/hostname.
            address = (options.address, options.port)
        else:
            address = (platform.node(), options.port)
    else:
        if options.address:  # Specify pipename.
            address = options.address
        else:
            address = None

    logger.info('Starting FactoryManager %s %r', address, keytype(authkey))
    current_process().authkey = authkey
    bind_address = ('127.0.0.1', options.port) if options.tunnel else address
    manager = _FactoryManager(bind_address,
                              authkey,
                              name='Factory',
                              allowed_hosts=allowed_hosts,
                              allowed_users=allowed_users,
                              allow_tunneling=options.tunnel)

    # Set defaults for created ObjServerFactories.
    # There isn't a good method to propagate these through the manager.
    ObjServerFactory._address = address
    ObjServerFactory._allow_shell = options.allow_shell
    ObjServerFactory._allowed_types = allowed_types
    ObjServerFactory._allow_tunneling = options.tunnel

    # Get server, retry if specified address is in use.
    server = None
    retries = 0
    while server is None:
        try:
            server = manager.get_server()
        except socket.error as exc:
            if str(exc).find('Address already in use') >= 0:
                if retries < 10:
                    msg = 'Address %s in use, retrying...' % (address, )
                    logger.debug(msg)
                    print msg
                    time.sleep(5)
                    retries += 1
                else:
                    msg = 'Address %s in use, too many retries.' % (address, )
                    logger.error(msg)
                    print msg
                    sys.exit(1)
            else:
                raise

    # Record configuration.
    real_ip = None if address is None else address[0]
    write_server_config(server, _SERVER_CFG, real_ip)
    msg = 'Serving on %s' % (server.address, )
    logger.info(msg)
    print msg
    sys.stdout.flush()

    # And away we go...
    signal.signal(signal.SIGTERM, _sigterm_handler)
    try:
        server.serve_forever()
    finally:
        _cleanup()
    sys.exit(0)
Exemple #7
0
        logging.debug('')
        logging.debug('test_bad_host')

        if self.skip_ssh:
            logging.debug('    requires ssh, skipping')
            return

        self.machines.append({'hostname': 'xyzzy', 'python': self.python})
        self.cluster = ClusterAllocator(self.name, self.machines)
        self.assertEqual(len(self.cluster), len(self.machines) - 1)

    def test_bad_python(self):
        logging.debug('')
        logging.debug('test_bad_python')

        if self.skip_ssh:
            logging.debug('    requires ssh, skipping')
            return

        self.machines = [{'hostname': self.node, 'python': 'no-such-python'}]
        self.cluster = ClusterAllocator(self.name, self.machines)
        self.assertEqual(len(self.cluster), 0)


if __name__ == '__main__':
    # Avoid any user-defined resources from causing issues.
    ResourceAllocationManager.configure('')
    sys.argv.append('--cover-package=openmdao.main')
    sys.argv.append('--cover-erase')
    nose.runmodule()
Exemple #8
0
def main(): # pragma no cover
    """
    Runs the RJE server.

    Usage: python rje.py [--allocator=name][--dmz-host=name][--poll-delay=secs][--resources=filename]

    --allocator: string
        Allocator to provide remote access to. Default ``PBS``.

    --dmz-host: string
        DMZ file server to use. Default ``dmzfs1``.

    --poll-delay: int
        Maximum seconds between checks for new client activity. Default 60.

    --resources: string
        Filename for resource configuration. If not specified then the
        default of ``~/.openmdao/resources.cfg`` will be used.
    """

    parser = optparse.OptionParser()
    parser.add_option('--allocator', action='store', type='str', default='PBS',
                      help='Allocator to provide remote access to')
    parser.add_option('--dmz-host', action='store', type='str', default='dmzfs1',
                      help='DMZ file server to use')
    parser.add_option('--poll-delay', action='store', type='int', default=60,
                      help='Max seconds between checks for new client activity')
    parser.add_option('--resources', action='store', type='str', default=None,
                      help='Filename for resource configuration')
    parser.add_option('--ssh', action='store', type='str', default=None,
                      help='ssh command (used during testing)')
    parser.add_option('--scp', action='store', type='str', default=None,
                      help='scp command (used during testing)')

    options, arguments = parser.parse_args()
    if arguments:
        parser.print_help()
        sys.exit(1)

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    # Configure ssh and scp.
    if options.ssh:
        configure_ssh(options.ssh.split())
    if options.scp:
        configure_scp(options.scp.split())

    # Optionally configure resources.
    if options.resources is not None:
        RAM.configure(options.resources)

    # Get allocator to wrap.
    try:
        allocator = RAM.get_allocator(options.allocator)
    except ValueError:
        msg = "Can't find allocator %r" % options.allocator
        print msg
        logger.error(msg)
        sys.exit(1)

    dmz_host = options.dmz_host
    poll_delay = options.poll_delay

    # Initialize DMZ protocol.
    server_init(dmz_host, logger)
    global _DMZ_HOST
    _DMZ_HOST = dmz_host
    msg = 'RJE server ready'
    print msg
    logger.info(msg)

    # Setup for cleanup by this process only.
    global _RJE_PID
    _RJE_PID = os.getpid()
    signal.signal(signal.SIGTERM, _sigterm_handler)

    # And away we go...
    wrappers = {}
    try:
        delay = 1  # Start with high polling rate.
        while True:
            conn_info, removed = server_accept(dmz_host, poll_delay, logger)
            for client in removed:
                wrapper = wrappers.pop(client, None)
                if wrapper is not None:
                    wrapper.shutdown()
            if conn_info is None:
                server_heartbeat(dmz_host, poll_delay, logger)
                delay = min(delay + 1, poll_delay)  # Back-off.
                time.sleep(delay)
            else:
                client, connection = conn_info
                wrapper = AllocatorWrapper(allocator, client, connection)
                handler = threading.Thread(name='%s_handler' % client,
                                           target=wrapper.process_requests)
                handler.daemon = True
                handler.start()
                wrappers[client] = wrapper
                delay = 1  # Reset.
    except KeyboardInterrupt:
        pass
    finally:
        _cleanup()
    sys.exit(0)
        comp.sub_group.s1d = ['lkjhlk', '654', '#$%^']
        self.assertEqual(comp.get('sub_group.s1d'), ['lkjhlk', '654', '#$%^'])
        trait = comp.sub_group.get_trait('s1d')
        self.assertEqual(trait.desc, '1D string array')
        comp.pre_delete()

    def test_units(self):
        logging.debug('')
        logging.debug('test_units')

        self.assertFalse(analysis_server.have_translation('FroBoz'))
        self.assertTrue(analysis_server.have_translation('Btu/hr'))

        self.assertEqual(analysis_server.get_translation('Btu/hr'), 'Btu/h')
        self.assertEqual(analysis_server.get_translation('FroBoz'), 'FroBoz')

        analysis_server.set_translation('FroBoz', 'degF')
        self.assertTrue(analysis_server.have_translation('FroBoz'))
        self.assertEqual(analysis_server.get_translation('FroBoz'), 'degF')


if __name__ == '__main__':
    sys.argv.append('--cover-package=analysis_server')
    sys.argv.append('--cover-erase')

    # Avoid having any user-defined resources causing problems during testing.
    RAM.configure('')

    nose.runmodule()

Exemple #10
0
def run_openmdao_suite(argv=None):
    """This function is exported as a script that is runnable as part of
    an OpenMDAO virtual environment as openmdao test.
    
    This function wraps nosetests, so any valid nose args should also
    work here.
    """
    if argv is None:
        argv = sys.argv

    #Add any default packages/directories to search for tests to tlist.
    tlist = _get_openmdao_packages()

    break_check = ['--help', '-h', '--all']

    covpkg = False  # if True, --cover-package was specified by the user

    # check for args not starting with '-'
    args = argv
    for i, arg in enumerate(args):
        if arg.startswith('--cover-package'):
            covpkg = True
        if (i > 0 and not arg.startswith('-')) or arg in break_check:
            break
    else:  # no non '-' args, so assume they want to run the whole test suite
        args.append('--all')

    args.append('--exe')  # by default, nose will skip any .py files that are
    # executable. --exe prevents this behavior

    # Clobber cached eggsaver data in case Python environment has changed.
    base = os.path.expanduser(os.path.join('~', '.openmdao'))
    path = os.path.join(base, 'eggsaver.dat')
    if os.path.exists(path):
        os.remove(path)

    # Avoid having any user-defined resources causing problems during testing.
    ResourceAllocationManager.configure('')

    if '--with-coverage' in args:
        args.append('--cover-erase')
        if '--all' in args and not covpkg:
            for pkg in tlist:
                opt = '--cover-package=%s' % pkg
                if opt not in args:
                    args.append(opt)

            # Better coverage if we clobber credential data.
            path = os.path.join(base, 'keys')
            if os.path.exists(path):
                os.remove(path)

    # this tells it to enable the console in the environment so that
    # the logger will print output to stdout. This helps greatly when
    # debugging openmdao scripts running in separate processes.
    if '--enable_console' in args:
        args.remove('--enable_console')
        os.environ['OPENMDAO_ENABLE_CONSOLE'] = '1'

    if '--all' in args:
        args.remove('--all')
        args.extend(tlist)

    # some libs we use call multiprocessing.cpu_count() on import, which can
    # raise NotImplementedError, so try to monkeypatch it here to return 1 if
    # that's the case
    try:
        import multiprocessing
        multiprocessing.cpu_count()
    except ImportError:
        pass
    except NotImplementedError:
        multiprocessing.cpu_count = lambda: 1

    nose.run_exit(argv=args)
    def test_configure(self):
        logging.debug('')
        logging.debug('test_configure')

        # Reconfigure.
        with open('resources.cfg', 'w') as out:
            out.write("""
[LocalHost]
max_load: 100
""")
        local = RAM.get_allocator('LocalHost')
        max_load = local.max_load
        try:
            self.assertTrue(max_load < 100)
            RAM.configure('resources.cfg')
            self.assertEqual(local.max_load, 100)
            local.max_load = max_load
        finally:
            os.remove('resources.cfg')

        # Add another local.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
authkey: PublicKey
allow_shell: False
total_cpus: 42
max_load: 200
""")
        try:
            RAM.configure('resources.cfg')
            local2 = RAM.get_allocator('Local2')
            self.assertEqual(local2.factory._authkey, 'PublicKey')
            self.assertEqual(local2.factory._allow_shell, False)
            self.assertEqual(local2.total_cpus, 42)
            self.assertEqual(local2.max_load, 200)
            self.assertEqual(local2.host, socket.gethostname())
            self.assertTrue(local2.pid > 0)
            RAM.remove_allocator('Local2')
        finally:
            os.remove('resources.cfg')

        # Bad local total_cpus.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
total_cpus: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')", globals(),
                          locals(), ValueError,
                          'Local2: total_cpus must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad local max_load.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
max_load: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')", globals(),
                          locals(), ValueError,
                          'Local2: max_load must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad module.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadModule]
classname: no-such-module.Allocator
max_load: 100
""")
        try:
            assert_raises(
                self, "RAM.configure('resources.cfg')", globals(), locals(),
                RuntimeError, "RAM configure BadModule: can't import"
                " 'no-such-module'")
        finally:
            os.remove('resources.cfg')

        # Bad class.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadClass]
classname: openmdao.main.resource.NoSuchAllocator
max_load: 100
""")
        try:
            assert_raises(
                self, "RAM.configure('resources.cfg')", globals(), locals(),
                RuntimeError, "RAM configure BadClass: no class"
                " 'NoSuchAllocator' in openmdao.main.resource")
        finally:
            os.remove('resources.cfg')

        # Add, insert, get, remove.
        local3 = LocalAllocator('Local3')
        local4 = LocalAllocator('Local4', total_cpus=4)
        RAM.add_allocator(local3)
        try:
            allocator_names = \
                [allocator.name for allocator in RAM.list_allocators()]
            self.assertEqual(allocator_names, ['LocalHost', 'Local3'])
            self.assertTrue(RAM.get_allocator('Local3') is local3)
            self.assertTrue(RAM.get_allocator(1) is local3)
            RAM.insert_allocator(0, local4)
            try:
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                self.assertEqual(allocator_names,
                                 ['Local4', 'LocalHost', 'Local3'])
            finally:
                RAM.remove_allocator('Local4')
        finally:
            RAM.remove_allocator(1)

        assert_raises(self, "RAM.get_allocator('Local3')", globals(), locals(),
                      ValueError, "allocator 'Local3' not found")

        assert_raises(self, "RAM.remove_allocator('Local3')", globals(),
                      locals(), ValueError, "allocator 'Local3' not found")

        assert_raises(self, "LocalAllocator('BadLoad', max_load=-2)",
                      globals(), locals(), ValueError,
                      "BadLoad: max_load must be > 0, got -2")
Exemple #12
0
def main():  # pragma no cover
    """
    Runs the RJE server.

    Usage: python rje.py [--allocator=name][--dmz-host=name][--poll-delay=secs][--resources=filename]

    --allocator: string
        Allocator to provide remote access to. Default ``PBS``.

    --dmz-host: string
        DMZ file server to use. Default ``dmzfs1``.

    --poll-delay: int
        Maximum seconds between checks for new client activity. Default 60.

    --resources: string
        Filename for resource configuration. If not specified then the
        default of ``~/.openmdao/resources.cfg`` will be used.
    """

    parser = optparse.OptionParser()
    parser.add_option('--allocator',
                      action='store',
                      type='str',
                      default='PBS',
                      help='Allocator to provide remote access to')
    parser.add_option('--dmz-host',
                      action='store',
                      type='str',
                      default='dmzfs1',
                      help='DMZ file server to use')
    parser.add_option(
        '--poll-delay',
        action='store',
        type='int',
        default=60,
        help='Max seconds between checks for new client activity')
    parser.add_option('--resources',
                      action='store',
                      type='str',
                      default=None,
                      help='Filename for resource configuration')
    parser.add_option('--ssh',
                      action='store',
                      type='str',
                      default=None,
                      help='ssh command (used during testing)')
    parser.add_option('--scp',
                      action='store',
                      type='str',
                      default=None,
                      help='scp command (used during testing)')

    options, arguments = parser.parse_args()
    if arguments:
        parser.print_help()
        sys.exit(1)

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)

    # Configure ssh and scp.
    if options.ssh:
        configure_ssh(options.ssh.split())
    if options.scp:
        configure_scp(options.scp.split())

    # Optionally configure resources.
    if options.resources is not None:
        RAM.configure(options.resources)

    # Get allocator to wrap.
    try:
        allocator = RAM.get_allocator(options.allocator)
    except ValueError:
        msg = "Can't find allocator %r" % options.allocator
        print msg
        logger.error(msg)
        sys.exit(1)

    dmz_host = options.dmz_host
    poll_delay = options.poll_delay

    # Initialize DMZ protocol.
    server_init(dmz_host, logger)
    global _DMZ_HOST
    _DMZ_HOST = dmz_host
    msg = 'RJE server ready'
    print msg
    logger.info(msg)

    # Setup for cleanup by this process only.
    global _RJE_PID
    _RJE_PID = os.getpid()
    signal.signal(signal.SIGTERM, _sigterm_handler)

    # And away we go...
    wrappers = {}
    try:
        delay = 1  # Start with high polling rate.
        while True:
            conn_info, removed = server_accept(dmz_host, poll_delay, logger)
            for client in removed:
                wrapper = wrappers.pop(client, None)
                if wrapper is not None:
                    wrapper.shutdown()
            if conn_info is None:
                server_heartbeat(dmz_host, poll_delay, logger)
                delay = min(delay + 1, poll_delay)  # Back-off.
                time.sleep(delay)
            else:
                client, connection = conn_info
                wrapper = AllocatorWrapper(allocator, client, connection)
                handler = threading.Thread(name='%s_handler' % client,
                                           target=wrapper.process_requests)
                handler.daemon = True
                handler.start()
                wrappers[client] = wrapper
                delay = 1  # Reset.
    except KeyboardInterrupt:
        pass
    finally:
        _cleanup()
    sys.exit(0)
Exemple #13
0
def run_openmdao_suite(argv=None):
    """This function is exported as a script that is runnable as part of
    an OpenMDAO virtual environment as openmdao test.
    
    This function wraps nosetests, so any valid nose args should also
    work here.
    """
    if argv is None:
        argv = sys.argv

    #Add any default packages/directories to search for tests to tlist.
    tlist = _get_openmdao_packages()

    break_check = ['--help', '-h', '--all']

    covpkg = False  # if True, --cover-package was specified by the user

    # check for args not starting with '-'
    args = argv[:]
    for i, arg in enumerate(args):
        if arg.startswith('--cover-package'):
            covpkg = True
        if (i > 0 and not arg.startswith('-')) or arg in break_check:
            break
    else:  # no non '-' args, so assume they want to run the default test suite
        # in a release install, default is the set of tests specified in release_tests.cfg
        if not is_dev_install() or '--small' in args:
            if '--small' in args:
                args.remove('--small')
            args.extend([
                '-c',
                os.path.join(os.path.dirname(__file__), 'release_tests.cfg')
            ])
        else:  # in a dev install, default is all tests
            args.append('--all')

    args.append('--exe')  # by default, nose will skip any .py files that are
    # executable. --exe prevents this behavior

    # Clobber cached data in case Python environment has changed.
    base = os.path.expanduser(os.path.join('~', '.openmdao'))
    for name in ('eggsaver.dat', 'fileanalyzer.dat'):
        path = os.path.join(base, name)
        if os.path.exists(path):
            os.remove(path)

    # Avoid having any user-defined resources causing problems during testing.
    ResourceAllocationManager.configure('')

    if '--with-coverage' in args:
        args.append('--cover-erase')
        if '--all' in args and not covpkg:
            for pkg in tlist:
                opt = '--cover-package=%s' % pkg
                if opt not in args:
                    args.append(opt)

            # Better coverage if we clobber credential data.
            path = os.path.join(base, 'keys')
            if os.path.exists(path):
                os.remove(path)

    # this tells it to enable the console in the environment so that
    # the logger will print output to stdout. This helps greatly when
    # debugging openmdao scripts running in separate processes.
    if '--enable_console' in args:
        args.remove('--enable_console')
        os.environ['OPENMDAO_ENABLE_CONSOLE'] = '1'

    if '--all' in args:
        args.remove('--all')
        args.extend(tlist)

    if '--plugins' in args:
        args.remove('--plugins')
        from openmdao.main.plugin import plugin_install, _get_plugin_parser
        argv = ['install', '--all']
        parser = _get_plugin_parser()
        options, argz = parser.parse_known_args(argv)
        plugin_install(parser, options, argz)

    # The default action should be to run the GUI functional tests.
    # The 'win32' test here is to allow easily changing the default for Windows
    # where testing still has occasional problems not terminating on EC2.
    if sys.platform == 'win32':
        do_gui_tests = False
    else:
        do_gui_tests = True

    # run GUI functional tests, overriding default action
    if '--gui' in args:
        args.remove('--gui')
        do_gui_tests = True

    # skip GUI functional tests, overriding default action
    if '--skip-gui' in args:
        args.remove('--skip-gui')
        do_gui_tests = False

    if not do_gui_tests:
        os.environ['OPENMDAO_SKIP_GUI'] = '1'

    # some libs we use call multiprocessing.cpu_count() on import, which can
    # raise NotImplementedError, so try to monkeypatch it here to return 1 if
    # that's the case
    try:
        import multiprocessing
        multiprocessing.cpu_count()
    except ImportError:
        pass
    except NotImplementedError:
        multiprocessing.cpu_count = lambda: 1


#    _trace_atexit()
    nose.run_exit(argv=args)
Exemple #14
0
        self.assertEqual(comp.sub_group.s1d, ['froboz', 'xyzzy'])
        comp.sub_group.s1d = ['lkjhlk', '654', '#$%^']
        self.assertEqual(comp.get('sub_group.s1d'), ['lkjhlk', '654', '#$%^'])
        trait = comp.sub_group.get_trait('s1d')
        self.assertEqual(trait.desc, '1D string array')
        comp.pre_delete()

    def test_units(self):
        logging.debug('')
        logging.debug('test_units')

        self.assertFalse(analysis_server.have_translation('FroBoz'))
        self.assertTrue(analysis_server.have_translation('Btu/hr'))

        self.assertEqual(analysis_server.get_translation('Btu/hr'), 'Btu/h')
        self.assertEqual(analysis_server.get_translation('FroBoz'), 'FroBoz')

        analysis_server.set_translation('FroBoz', 'degF')
        self.assertTrue(analysis_server.have_translation('FroBoz'))
        self.assertEqual(analysis_server.get_translation('FroBoz'), 'degF')


if __name__ == '__main__':
    sys.argv.append('--cover-package=analysis_server')
    sys.argv.append('--cover-erase')

    # Avoid having any user-defined resources causing problems during testing.
    RAM.configure('')

    nose.runmodule()
        logging.debug('test_bad_host')

        if self.skip_ssh:
            logging.debug('    requires ssh, skipping')
            return

        self.machines.append({'hostname':'xyzzy', 'python':self.python})
        self.cluster = ClusterAllocator(self.name, self.machines)
        self.assertEqual(len(self.cluster), len(self.machines)-1)

    def test_bad_python(self):
        logging.debug('')
        logging.debug('test_bad_python')

        if self.skip_ssh:
            logging.debug('    requires ssh, skipping')
            return

        self.machines = [{'hostname':self.node, 'python':'no-such-python'}]
        self.cluster = ClusterAllocator(self.name, self.machines)
        self.assertEqual(len(self.cluster), 0)


if __name__ == '__main__':
    # Avoid any user-defined resources from causing issues.
    ResourceAllocationManager.configure('')
    sys.argv.append('--cover-package=openmdao.main')
    sys.argv.append('--cover-erase')
    nose.runmodule()

Exemple #16
0
def run_openmdao_suite(argv=None):
    """This function is exported as a script that is runnable as part of
    an OpenMDAO virtual environment as openmdao test.
    
    This function wraps nosetests, so any valid nose args should also
    work here.
    """
    if argv is None:
        argv = sys.argv

    #Add any default packages/directories to search for tests to tlist.
    tlist = _get_openmdao_packages()
    
    break_check = ['--help', '-h', '--all']
    
    covpkg = False # if True, --cover-package was specified by the user
    
    # check for args not starting with '-'
    args = argv
    for i, arg in enumerate(args):
        if arg.startswith('--cover-package'):
            covpkg = True
        if (i>0 and not arg.startswith('-')) or arg in break_check:
            break
    else:  # no non '-' args, so assume they want to run the whole test suite
        args.append('--all')
        
    args.append('--exe') # by default, nose will skip any .py files that are
                         # executable. --exe prevents this behavior
    
    # Clobber cached eggsaver data in case Python environment has changed.
    base = os.path.expanduser(os.path.join('~', '.openmdao'))
    path = os.path.join(base, 'eggsaver.dat')
    if os.path.exists(path):
        os.remove(path)

    # Avoid having any user-defined resources causing problems during testing.
    ResourceAllocationManager.configure('')

    if '--with-coverage' in args:
        args.append('--cover-erase')
        if '--all' in args and not covpkg:
            for pkg in tlist:
                opt = '--cover-package=%s' % pkg
                if opt not in args:
                    args.append(opt)

            # Better coverage if we clobber credential data.
            path = os.path.join(base, 'keys')
            if os.path.exists(path):
                os.remove(path)

    # this tells it to enable the console in the environment so that
    # the logger will print output to stdout. This helps greatly when 
    # debugging openmdao scripts running in separate processes.
    if '--enable_console' in args:
        args.remove('--enable_console')
        os.environ['OPENMDAO_ENABLE_CONSOLE'] = '1'

    if '--all' in args:
        args.remove('--all')
        args.extend(tlist)
        
    # some libs we use call multiprocessing.cpu_count() on import, which can
    # raise NotImplementedError, so try to monkeypatch it here to return 1 if
    # that's the case
    try:
        import multiprocessing
        multiprocessing.cpu_count()
    except ImportError:
        pass
    except NotImplementedError:
        multiprocessing.cpu_count = lambda: 1
    
    nose.run_exit(argv=args)
    def test_configure(self):
        logging.debug('')
        logging.debug('test_configure')

        # Reconfigure.
        with open('resources.cfg', 'w') as out:
            out.write("""
[LocalHost]
max_load: 100
""")
        local = RAM.get_allocator('LocalHost')
        max_load = local.max_load
        try:
            self.assertTrue(max_load < 100)
            RAM.configure('resources.cfg')
            self.assertEqual(local.max_load, 100)
            local.max_load = max_load
        finally:
            os.remove('resources.cfg')

        # Add another local.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
authkey: PublicKey
allow_shell: False
total_cpus: 42
max_load: 200
""")
        try:
            RAM.configure('resources.cfg')
            local2 = RAM.get_allocator('Local2')
            self.assertEqual(local2.factory._authkey, 'PublicKey')
            self.assertEqual(local2.factory._allow_shell, False)
            self.assertEqual(local2.total_cpus, 42)
            self.assertEqual(local2.max_load, 200)
            self.assertEqual(local2.host, socket.gethostname())
            self.assertTrue(local2.pid > 0)
            RAM.remove_allocator('Local2')
        finally:
            os.remove('resources.cfg')

        # Bad local total_cpus.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
total_cpus: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), ValueError,
                          'Local2: total_cpus must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad local max_load.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
max_load: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), ValueError,
                          'Local2: max_load must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad module.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadModule]
classname: no-such-module.Allocator
max_load: 100
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), RuntimeError,
                          "RAM configure BadModule: can't import"
                          " 'no-such-module'")
        finally:
            os.remove('resources.cfg')

        # Bad class.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadClass]
classname: openmdao.main.resource.NoSuchAllocator
max_load: 100
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), RuntimeError,
                          "RAM configure BadClass: no class"
                          " 'NoSuchAllocator' in openmdao.main.resource")
        finally:
            os.remove('resources.cfg')

        # Add, insert, get, remove.
        local3 = LocalAllocator('Local3')
        local4 = LocalAllocator('Local4', total_cpus=4)
        RAM.add_allocator(local3)
        try:
            allocator_names = \
                [allocator.name for allocator in RAM.list_allocators()]
            self.assertEqual(allocator_names, ['LocalHost', 'Local3'])
            self.assertTrue(RAM.get_allocator('Local3') is local3)
            self.assertTrue(RAM.get_allocator(1) is local3)
            RAM.insert_allocator(0, local4)
            try:
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                self.assertEqual(allocator_names,
                                 ['Local4', 'LocalHost', 'Local3'])
            finally:
                RAM.remove_allocator('Local4')
        finally:
            RAM.remove_allocator(1)

        assert_raises(self, "RAM.get_allocator('Local3')",
                      globals(), locals(), ValueError,
                      "allocator 'Local3' not found")

        assert_raises(self, "RAM.remove_allocator('Local3')",
                      globals(), locals(), ValueError,
                      "allocator 'Local3' not found")

        assert_raises(self, "LocalAllocator('BadLoad', max_load=-2)",
                      globals(), locals(), ValueError,
                      "BadLoad: max_load must be > 0, got -2")
def main():  #pragma no cover
    """
    OpenMDAO factory service process.

    Usage: python objserverfactory.py [--allow-public][--allow-shell][--hosts=filename][--types=filename][--users=filename][--address=address][--port=number][--prefix=name][--tunnel][--resources=filename][--log-host=hostname][--log-port=number][--log-prefix=string]

    --allow-public:
        Allows access by anyone from any allowed host. Use with care!

    --allow-shell:
        Allows access to :meth:`execute_command` and :meth:`load_model`.
        Use with care!

    --hosts: string
        Filename for allowed hosts specification. Default ``hosts.allow``.
        Ignored if '--users' is specified.
        The file should contain IPv4 host addresses, IPv4 domain addresses,
        or hostnames, one per line. Blank lines are ignored, and '#' marks the
        start of a comment which continues to the end of the line.
        For security reasons this file must be accessible only by the user
        running this server.

    --types: string
        Filename for allowed types specification.
        If not specified then allow types listed by
        :meth:`factorymanager.get_available_types`.
        The file should contain one type name per line.

    --users: string
        Filename for allowed users specification.
        Ignored if '--allow-public' is specified.
        Default is ``~/.ssh/authorized_keys``, other files should be of the
        same format: each line has ``key-type public-key-data user@host``,
        where `user` is the username on `host`. `host` will be translated to an
        IPv4 address and included in the allowed hosts list.
        Note that this ``user@host`` form is not necessarily enforced by
        programs which generate keys.
        For security reasons this file must be accessible only by the user
        running this server.

    --address: string
        IPv4 address, hostname, or pipe name.
        Default is the host's default IPv4 address.

    --port: int
        Server port (default of 0 implies next available port).
        Note that ports below 1024 typically require special privileges.
        If port is negative, then a local pipe is used for communication.

    --prefix: string
        Prefix for configuration and stdout/stderr files (default ``server``).

    --tunnel:
        Report host IP address but listen for connections from a local
        SSH tunnel.

    --resources: string
        Filename for resource configuration. If not specified then the
        default of ``~/.openmdao/resources.cfg`` will be used.

    --log-host: string
        Hostname to send remote log messages to.

    --log-port: int
        Port on `log-host` to send remote log messages to.

    --log-prefix: string
        Prefix to apply to remote log messages. Default is ``pid@host``.

    If ``prefix.key`` exists, it is read for an authorization key string.
    Otherwise public key authorization and encryption is used.

    Allowed hosts *must* be specified if `port` is >= 0. Only allowed hosts
    may connect to the server.

    Once initialized ``prefix.cfg`` is written with address, port, and
    public key information.
    """
    parser = optparse.OptionParser()
    parser.add_option('--address', action='store', type='str',
                      help='Network address to serve.')
    parser.add_option('--allow-public', action='store_true', default=False,
                      help='Allows access by any user, use with care!')
    parser.add_option('--allow-shell', action='store_true', default=False,
                      help='Allows potential shell access, use with care!')
    parser.add_option('--hosts', action='store', type='str',
                      default='hosts.allow', help='Filename for allowed hosts')
    parser.add_option('--types', action='store', type='str',
                      help='Filename for allowed types')
    parser.add_option('--users', action='store', type='str',
                      default='~/.ssh/authorized_keys',
                      help='Filename for allowed users')
    parser.add_option('--port', action='store', type='int', default=0,
                      help='Server port (0 implies next available port)')
    parser.add_option('--prefix', action='store', default='server',
                      help='Prefix for config and stdout/stderr files')
    parser.add_option('--tunnel', action='store_true', default=False,
                      help='Report host IP address but listen for connections'
                           ' from a local SSH tunnel')
    parser.add_option('--resources', action='store', type='str',
                      default=None, help='Filename for resource configuration')
    parser.add_option('--log-host', action='store', type='str',
                      default=None, help='hostname for remote log messages')
    parser.add_option('--log-port', action='store', type='int',
                      default=None, help='port for remote log messages')
    parser.add_option('--log-prefix', action='store', type='str',
                      default=None, help='prefix for remote log messages')

    options, arguments = parser.parse_args()
    if arguments:
        parser.print_help()
        sys.exit(1)

    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    if options.log_host and options.log_port:
        install_remote_handler(options.log_host, int(options.log_port),
                               options.log_prefix)

    server_key = options.prefix+'.key'
    server_cfg = options.prefix+'.cfg'
    global _SERVER_CFG
    _SERVER_CFG = server_cfg

    # Get authkey.
    authkey = 'PublicKey'
    try:
        with open(server_key, 'r') as inp:
            authkey = inp.readline().strip()
        os.remove(server_key)
    except IOError:
        pass

    if options.allow_shell:
        msg = 'Shell access is ALLOWED'
        logger.warning(msg)
        print msg

    allowed_users = None
    allowed_hosts = None

    # Get allowed_users.
    if options.allow_public:
        allowed_users = None
        msg = 'Public access is ALLOWED'
        logger.warning(msg)
        print msg

        if options.port >= 0:
            # Get allowed_hosts.
            if os.path.exists(options.hosts):
                try:
                    allowed_hosts = read_allowed_hosts(options.hosts)
                except Exception as exc:
                    msg = "Can't read allowed hosts file %r: %s" \
                          % (options.hosts, exc)
                    logger.error(msg)
                    print msg
                    sys.exit(1)
            else:
                msg = 'Allowed hosts file %r does not exist.' % options.hosts
                logger.error(msg)
                print msg
                sys.exit(1)

            if not allowed_hosts:
                msg = 'No hosts in allowed hosts file %r.' % options.hosts
                logger.error(msg)
                print msg
                sys.exit(1)
    else:
        if os.path.exists(options.users):
            try:
                allowed_users = read_authorized_keys(options.users, logger)
            except Exception as exc:
                msg = "Can't read allowed users file %r: %s" \
                      % (options.users, exc)
                logger.error(msg)
                print msg
                sys.exit(1)
        else:
            msg = 'Allowed users file %r does not exist.' % options.users
            logger.error(msg)
            print msg
            sys.exit(1)

        if not allowed_users:
            msg = 'No users in allowed users file %r.' % options.users
            logger.error(msg)
            print msg
            sys.exit(1)

    # Get allowed_types.
    allowed_types = None
    if options.types:
        if os.path.exists(options.types):
            allowed_types = []
            with open(options.types, 'r') as inp:
                line = inp.readline()
                while line:
                    line = line.strip()
                    if line:
                        allowed_types.append(line)
                    line = inp.readline()
        else:
            msg = 'Allowed types file %r does not exist.' % options.types
            logger.error(msg)
            print msg
            sys.exit(1)

    # Optionally configure resources.
    if options.resources:
        # Import here to avoid import loop.
        from openmdao.main.resource import ResourceAllocationManager as RAM
        RAM.configure(options.resources)

    # Get address and create manager.
    if options.port >= 0:
        if options.address:  # Specify IPv4/hostname.
            address = (options.address, options.port)
        else:
            address = (platform.node(), options.port)
    else:
        if options.address:  # Specify pipename.
            address = options.address
        else:
            address = None

    logger.info('Starting FactoryManager %s %r', address, keytype(authkey))
    current_process().authkey = authkey
    bind_address = ('127.0.0.1', options.port) if options.tunnel else address
    manager = _FactoryManager(bind_address, authkey, name='Factory',
                              allowed_hosts=allowed_hosts,
                              allowed_users=allowed_users,
                              allow_tunneling=options.tunnel)

    # Set defaults for created ObjServerFactories.
    # There isn't a good method to propagate these through the manager.
    ObjServerFactory._address = address
    ObjServerFactory._allow_shell = options.allow_shell
    ObjServerFactory._allowed_types = allowed_types
    ObjServerFactory._allow_tunneling = options.tunnel

    # Get server, retry if specified address is in use.
    server = None
    retries = 0
    while server is None:
        try:
            server = manager.get_server()
        except socket.error as exc:
            if str(exc).find('Address already in use') >= 0:
                if retries < 10:
                    msg = 'Address %s in use, retrying...' % (address,)
                    logger.debug(msg)
                    print msg
                    time.sleep(5)
                    retries += 1
                else:
                    msg = 'Address %s in use, too many retries.' % (address,)
                    logger.error(msg)
                    print msg
                    sys.exit(1)
            else:
                raise

    # Record configuration.
    real_ip = None if address is None else address[0]
    write_server_config(server, _SERVER_CFG, real_ip)
    msg = 'Serving on %s' % (server.address,)
    logger.info(msg)
    print msg
    sys.stdout.flush()

    # And away we go...
    signal.signal(signal.SIGTERM, _sigterm_handler)
    try:
        server.serve_forever()
    finally:
        _cleanup()
    sys.exit(0)
Exemple #19
0
def run_openmdao_suite(argv=None):
    """This function is exported as a script that is runnable as part of
    an OpenMDAO virtual environment as openmdao test.
    
    This function wraps nosetests, so any valid nose args should also
    work here.
    """
    if argv is None:
        argv = sys.argv

    #Add any default packages/directories to search for tests to tlist.
    tlist = _get_openmdao_packages()
    
    break_check = ['--help', '-h', '--all']
    
    covpkg = False # if True, --cover-package was specified by the user
    
    # check for args not starting with '-'
    args = argv[:]
    for i, arg in enumerate(args):
        if arg.startswith('--cover-package'):
            covpkg = True
        if (i>0 and not arg.startswith('-')) or arg in break_check:
            break
    else:  # no non '-' args, so assume they want to run the default test suite
        # in a release install, default is the set of tests specified in release_tests.cfg
        if not is_dev_install() or '--small' in args:
            if '--small' in args:
                args.remove('--small')
            args.extend(['-c', os.path.join(os.path.dirname(__file__), 'release_tests.cfg')])
        else: # in a dev install, default is all tests
            args.append('--all') 
        
    args.append('--exe') # by default, nose will skip any .py files that are
                         # executable. --exe prevents this behavior
    
    # Clobber cached data in case Python environment has changed.
    base = os.path.expanduser(os.path.join('~', '.openmdao'))
    for name in ('eggsaver.dat', 'fileanalyzer.dat'):
        path = os.path.join(base, name)
        if os.path.exists(path):
            os.remove(path)

    # Avoid having any user-defined resources causing problems during testing.
    ResourceAllocationManager.configure('')

    if '--with-coverage' in args:
        args.append('--cover-erase')
        if '--all' in args and not covpkg:
            for pkg in tlist:
                opt = '--cover-package=%s' % pkg
                if opt not in args:
                    args.append(opt)

            # Better coverage if we clobber credential data.
            path = os.path.join(base, 'keys')
            if os.path.exists(path):
                os.remove(path)

    # this tells it to enable the console in the environment so that
    # the logger will print output to stdout. This helps greatly when 
    # debugging openmdao scripts running in separate processes.
    if '--enable_console' in args:
        args.remove('--enable_console')
        os.environ['OPENMDAO_ENABLE_CONSOLE'] = '1'
        
    if '--all' in args:
        args.remove('--all')
        args.extend(tlist)
    
    if '--plugins' in args:
        args.remove('--plugins')
        from openmdao.main.plugin import plugin_install, _get_plugin_parser
        argv = ['install', '--all']
        parser = _get_plugin_parser()
        options, argz = parser.parse_known_args(argv) 
        plugin_install(parser, options, argz)

    # The default action should be to run the GUI functional tests.
    # The 'win32' test here is to allow easily changing the default for Windows
    # where testing still has occasional problems not terminating on EC2.
    if sys.platform == 'win32':
        do_gui_tests = False
    else:
        do_gui_tests = True

    # run GUI functional tests, overriding default action
    if '--gui' in args:
        args.remove('--gui')
        do_gui_tests = True

    # skip GUI functional tests, overriding default action
    if '--skip-gui' in args:
        args.remove('--skip-gui')
        do_gui_tests = False

    if not do_gui_tests:
        os.environ['OPENMDAO_SKIP_GUI'] = '1'

    # some libs we use call multiprocessing.cpu_count() on import, which can
    # raise NotImplementedError, so try to monkeypatch it here to return 1 if
    # that's the case
    try:
        import multiprocessing
        multiprocessing.cpu_count()
    except ImportError:
        pass
    except NotImplementedError:
        multiprocessing.cpu_count = lambda: 1
    
#    _trace_atexit()
    nose.run_exit(argv=args)