def setUp(self):
        self.user = getpass.getuser()
        self.node = platform.node()
        self.name = self.node.replace('.', '_')
        self.python = find_python()
        self.cluster = None

        if sys.platform == 'win32' or self.user not in SSH_USERS:
            self.skip_ssh = True
        else:
            self.skip_ssh = False

        self.machines = []
        if self.node.startswith('gxterm'):
            # User environment assumed OK on this GRC cluster front-end.
            for i in range(1, 55):
                self.machines.append({'hostname':'gx%02d' % i,
                                      'python':self.python})
        else:
            self.machines.append({'hostname':self.node,
                                  'python':self.python})

        # Ensure we aren't held up by local host load problems.
        for allocator in ResourceAllocationManager.list_allocators():
            if allocator.name == 'LocalHost':
                self.local = allocator
                self.local.max_load = 10
                break
        else:
            raise RuntimeError('No LocalHost allocator!?')
def setup_cluster(encrypted=True):
    """ Use openmdao.testing.cluster.init_cluster, but fix 'max_load'. """
    name = init_cluster(encrypted, allow_shell=True)
    for allocator in ResourceAllocationManager.list_allocators():
        if allocator.name == 'LocalHost':
            allocator.max_load = 1.
    return name
    def setUp(self):
        # Save existing RAM instance and force a rebuild.
        self.orig_ram = RAM._RAM
        RAM._RAM = None
        RAM.configure('')

        self.user = getpass.getuser()
        self.node = platform.node()
        self.name = self.node.replace('.', '_')
        self.python = find_python()
        self.cluster = None

        if sys.platform == 'win32' or self.user not in SSH_USERS:
            self.skip_ssh = True
        else:
            self.skip_ssh = False

        self.machines = []
        self.machines.append({'hostname': self.node,
                              'python': self.python})

        # Ensure we aren't held up by local host load problems.
        for allocator in RAM.list_allocators():
            if allocator.name == 'LocalHost':
                self.local = allocator
                self.local.max_load = 10
                break
        else:
            raise RuntimeError('No LocalHost allocator!?')
    def setUp(self):
        # Save existing RAM instance and force a rebuild.
        self.orig_ram = RAM._RAM
        RAM._RAM = None
        RAM.configure('')

        self.user = getpass.getuser()
        self.node = platform.node()
        self.name = self.node.replace('.', '_')
        self.python = find_python()
        self.cluster = None

        if sys.platform == 'win32' or self.user not in SSH_USERS:
            self.skip_ssh = True
        else:
            self.skip_ssh = False

        self.machines = []
        self.machines.append({'hostname': self.node, 'python': self.python})

        # Ensure we aren't held up by local host load problems.
        for allocator in RAM.list_allocators():
            if allocator.name == 'LocalHost':
                self.local = allocator
                self.local.max_load = 10
                break
        else:
            raise RuntimeError('No LocalHost allocator!?')
def setup_cluster(encrypted=True):
    """ Use openmdao.testing.cluster.init_cluster, but fix 'max_load'. """
    name = init_cluster(encrypted, allow_shell=True)
    for allocator in ResourceAllocationManager.list_allocators():
        if allocator.name == 'LocalHost':
            allocator.max_load = 1.
    return name
Exemple #6
0
def init_cluster(encrypted=True, clean_dir=True, allow_shell=False):
    """
    If not already done, initializes the ResourceAllocationManager and
    adds a cluster using encrypted or unencrypted communication.
    Returns the name of the configured cluster.
    """
    authkey = 'PublicKey' if encrypted else 'AuthKey'
    allocators = ResourceAllocationManager.list_allocators()

    if len(allocators) == 1:
        local = ResourceAllocationManager.get_allocator(0)
        if local.max_load < 10:  # First time we've been called.
            # Ensure we aren't held up by local host load problems.
            local.max_load = 10

            if clean_dir:
                # Remove any local allocator-created directories.
                for path in glob.glob('Sim-*'):
                    shutil.rmtree(path, onerror=onerror)

    node = platform.node()
    name = '%s_%s' % (node.replace('.', '_'), authkey)
    for allocator in allocators:
        if allocator.name == name:
            return name  # Don't add multiple copies.

    machines = []
    python = sys.executable

    if node.startswith('gxterm'):
        # User environment assumed OK on this GRC cluster front-end.
        # Using less than full machine (55 nodes) to allow multiple
        # cluster testing without hitting limit on open files (sockets).
        for i in range(20):
            machines.append({'hostname': 'gx%02d' % i, 'python': python})
    elif local_ssh_available():
        machines.append({'hostname': node, 'python': python})

    if machines:
        cluster = ClusterAllocator(name, machines, authkey, allow_shell)
        ResourceAllocationManager.insert_allocator(0, cluster)
        return name
    elif not encrypted:
        # Create a LocalAllocator so we have *something*.
        name = 'LocalUnencrypted'
        for allocator in allocators:
            if allocator.name == name:
                return name  # Don't add multiple copies.
        local = LocalAllocator(name, authkey=authkey, allow_shell=allow_shell)
        ResourceAllocationManager.insert_allocator(0, local)
        return name
    return None
def init_cluster(encrypted=True, clean_dir=True, allow_shell=False):
    """
    If not already done, initializes the ResourceAllocationManager and
    adds a cluster using encrypted or unencrypted communication.
    Returns the name of the configured cluster.
    """
    authkey = 'PublicKey' if encrypted else 'AuthKey'
    allocators = ResourceAllocationManager.list_allocators()

    if len(allocators) == 1:
        local = ResourceAllocationManager.get_allocator(0)
        if local.max_load < 10:  # First time we've been called.
            # Ensure we aren't held up by local host load problems.
            local.max_load = 10

            if clean_dir:
                # Remove any local allocator-created directories.
                for path in glob.glob('Sim-*'):
                    shutil.rmtree(path, onerror=onerror)

    node = platform.node()
    name = '%s_%s' % (node.replace('.', '_'), authkey)
    for allocator in allocators:
        if allocator.name == name:
            return name  # Don't add multiple copies.

    machines = []
    python = sys.executable

    if node.startswith('gxterm'):
        # User environment assumed OK on this GRC cluster front-end.
        # Using less than full machine (55 nodes) to allow multiple
        # cluster testing without hitting limit on open files (sockets).
        for i in range(20):
            machines.append({'hostname': 'gx%02d' % i, 'python': python})
    elif local_ssh_available():
        machines.append({'hostname': node, 'python': python})

    if machines:
        cluster = ClusterAllocator(name, machines, authkey, allow_shell)
        ResourceAllocationManager.insert_allocator(0, cluster)
        return name
    elif not encrypted:
        # Create a LocalAllocator so we have *something*.
        name = 'LocalUnencrypted'
        for allocator in allocators:
            if allocator.name == name:
                return name  # Don't add multiple copies.
        local = LocalAllocator(name, authkey=authkey, allow_shell=allow_shell)
        ResourceAllocationManager.insert_allocator(0, local)
        return name
    return None
    def setup(self, replicate=True):
        """
        Setup to begin new run.

        replicate: bool
             If True, then replicate the model and save to an egg file
             first (for concurrent evaluation).
        """
        self._cleanup(remove_egg=replicate)

        if not self.sequential:
            if replicate or self._egg_file is None:
                # Save model to egg.
                # Must do this before creating any locks or queues.
                self._replicants += 1
                version = 'replicant.%d' % (self._replicants)

                # If only local host will be used, we can skip determining
                # distributions required by the egg.
                allocators = RAM.list_allocators()
                need_reqs = False
                if not self.ignore_egg_requirements:
                    for allocator in allocators:
                        if not isinstance(allocator, LocalAllocator):
                            need_reqs = True
                            break

                driver = self.parent.driver
                self.parent.add('driver', Driver()) # this driver will execute the workflow once
                self.parent.driver.workflow = self.workflow
                try:
                    #egg_info = self.model.save_to_egg(self.model.name, version)
                    # FIXME: what name should we give to the egg?
                    egg_info = self.parent.save_to_egg(self.name, version,
                                                    need_requirements=need_reqs)
                finally:
                    self.parent.driver = driver

                self._egg_file = egg_info[0]
                self._egg_required_distributions = egg_info[1]
                self._egg_orphan_modules = [name for name, path in egg_info[2]]

        self._iter = self.get_case_iterator()
        self._seqno = 0
Exemple #9
0
    def setUp(self):
        self.user = getpass.getuser()
        self.node = platform.node()
        self.name = self.node.replace('.', '_')
        self.python = find_python()
        self.cluster = None

        if sys.platform == 'win32' or self.user not in SSH_USERS:
            self.skip_ssh = True
        else:
            self.skip_ssh = False

        self.machines = []
        self.machines.append({'hostname': self.node, 'python': self.python})

        # Ensure we aren't held up by local host load problems.
        for allocator in ResourceAllocationManager.list_allocators():
            if allocator.name == 'LocalHost':
                self.local = allocator
                self.local.max_load = 10
                break
        else:
            raise RuntimeError('No LocalHost allocator!?')
    def setUp(self):
        self.user = getpass.getuser()
        self.node = platform.node()
        self.name = self.node.replace('.', '_')
        self.python = find_python()
        self.cluster = None

        if sys.platform == 'win32' or self.user not in SSH_USERS:
            self.skip_ssh = True
        else:
            self.skip_ssh = False

        self.machines = []
        self.machines.append({'hostname':self.node,
                              'python':self.python})

        # Ensure we aren't held up by local host load problems.
        for allocator in ResourceAllocationManager.list_allocators():
            if allocator.name == 'LocalHost':
                self.local = allocator
                self.local.max_load = 10
                break
        else:
            raise RuntimeError('No LocalHost allocator!?')
    def _setup(self):
        """ Setup to begin new run. """
        # if params have changed we need to setup systems again
        if self.workflow._system is None:
            obj = self
            while obj.parent is not None:
                obj = obj.parent
            obj._setup()

        if not self.sequential:
            # Save model to egg.
            # Must do this before creating any locks or queues.
            self._replicants += 1
            version = 'replicant.%d' % (self._replicants)

            # If only local host will be used, we can skip determining
            # distributions required by the egg.
            allocators = RAM.list_allocators()
            need_reqs = False
            if not self.ignore_egg_requirements:
                for allocator in allocators:
                    if not isinstance(allocator, LocalAllocator):
                        need_reqs = True
                        break

            # Replicate and mutate model to run our workflow once.
            # Originally this was done in-place, but that 'invalidated'
            # various workflow quantities.
            replicant = self.parent.copy()
            workflow = replicant.get(self.name + '.workflow')
            driver = replicant.add('driver', Driver())
            workflow.parent = driver
            workflow.scope = None
            replicant.driver.workflow = workflow
            egg_info = replicant.save_to_egg(self.name,
                                             version,
                                             need_requirements=need_reqs)
            replicant = workflow = driver = None  # Release objects.
            gc.collect()  # Collect/compact before possible fork.

            self._egg_file = egg_info[0]
            self._egg_required_distributions = egg_info[1]
            self._egg_orphan_modules = [name for name, path in egg_info[2]]

        inp_paths = []
        inp_values = []
        for path, param in self.get_parameters().items():
            if isinstance(path, tuple):
                path = path[0]  # Use first target of ParameterGroup.
            path = make_legal_path(path)
            value = self.get('case_inputs.' + path)
            for target in param.targets:
                inp_paths.append(target)
                inp_values.append(value)

        outputs = self.get_responses().keys()
        extra_outputs = self.workflow._rec_outputs

        length = len(inp_values[0]) if inp_values else 0
        cases = []
        for i in range(length):
            inputs = []
            for j in range(len(inp_paths)):
                inputs.append((inp_paths[j], inp_values[j][i]))
            cases.append(
                _Case(i,
                      inputs,
                      outputs,
                      extra_outputs,
                      parent_uuid=self._case_uuid))
        self.init_responses(length)

        self._iter = iter(cases)
        self._abort_exc = None
    def _setup(self):
        """ Setup to begin new run. """
        if not self.sequential:
            # Save model to egg.
            # Must do this before creating any locks or queues.
            self._replicants += 1
            version = 'replicant.%d' % (self._replicants)

            # If only local host will be used, we can skip determining
            # distributions required by the egg.
            allocators = RAM.list_allocators()
            need_reqs = False
            if not self.ignore_egg_requirements:
                for allocator in allocators:
                    if not isinstance(allocator, LocalAllocator):
                        need_reqs = True
                        break

            # Replicate and mutate model to run our workflow once.
            # Originally this was done in-place, but that 'invalidated'
            # various workflow quantities.
            replicant = self.parent.copy()
            workflow = replicant.get(self.name+'.workflow')
            driver = replicant.add('driver', Driver())
            workflow.parent = driver
            workflow.scope = None
            replicant.driver.workflow = workflow
            egg_info = replicant.save_to_egg(self.name, version,
                                             need_requirements=need_reqs)
            replicant = workflow = driver = None  # Release objects.
            gc.collect()  # Collect/compact before possible fork.

            self._egg_file = egg_info[0]
            self._egg_required_distributions = egg_info[1]
            self._egg_orphan_modules = [name for name, path in egg_info[2]]

        inp_paths = []
        inp_values = []
        for path, param in self.get_parameters().items():
            if isinstance(path, tuple):
                path = path[0]  # Use first target of ParameterGroup.
            path = make_legal_path(path)
            value = self.get('case_inputs.'+path)
            for target in param.targets:
                inp_paths.append(target)
                inp_values.append(value)

        outputs = self.get_responses().keys()
        extra_outputs = self.workflow._rec_outputs

        length = len(inp_values[0]) if inp_values else 0
        cases = []
        for i in range(length):
            inputs = []
            for j in range(len(inp_paths)):
                inputs.append((inp_paths[j], inp_values[j][i]))
            cases.append(_Case(i, inputs, outputs, extra_outputs,
                               parent_uuid=self._case_uuid))
        self.init_responses(length)

        self._iter = iter(cases)
        self._abort_exc = None
    def test_configure(self):
        logging.debug('')
        logging.debug('test_configure')

        # Reconfigure.
        with open('resources.cfg', 'w') as out:
            out.write("""
[LocalHost]
max_load: 100
""")
        local = RAM.get_allocator('LocalHost')
        max_load = local.max_load
        try:
            self.assertTrue(max_load < 100)
            RAM.configure('resources.cfg')
            self.assertEqual(local.max_load, 100)
            local.max_load = max_load
        finally:
            os.remove('resources.cfg')

        # Add another local.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
authkey: PublicKey
allow_shell: False
total_cpus: 42
max_load: 200
""")
        try:
            RAM.configure('resources.cfg')
            local2 = RAM.get_allocator('Local2')
            self.assertEqual(local2.factory._authkey, 'PublicKey')
            self.assertEqual(local2.factory._allow_shell, False)
            self.assertEqual(local2.total_cpus, 42)
            self.assertEqual(local2.max_load, 200)
            self.assertEqual(local2.host, socket.gethostname())
            self.assertTrue(local2.pid > 0)
            RAM.remove_allocator('Local2')
        finally:
            os.remove('resources.cfg')

        # Bad local total_cpus.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
total_cpus: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), ValueError,
                          'Local2: total_cpus must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad local max_load.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
max_load: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), ValueError,
                          'Local2: max_load must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad module.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadModule]
classname: no-such-module.Allocator
max_load: 100
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), RuntimeError,
                          "RAM configure BadModule: can't import"
                          " 'no-such-module'")
        finally:
            os.remove('resources.cfg')

        # Bad class.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadClass]
classname: openmdao.main.resource.NoSuchAllocator
max_load: 100
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')",
                          globals(), locals(), RuntimeError,
                          "RAM configure BadClass: no class"
                          " 'NoSuchAllocator' in openmdao.main.resource")
        finally:
            os.remove('resources.cfg')

        # Add, insert, get, remove.
        local3 = LocalAllocator('Local3')
        local4 = LocalAllocator('Local4', total_cpus=4)
        RAM.add_allocator(local3)
        try:
            allocator_names = \
                [allocator.name for allocator in RAM.list_allocators()]
            self.assertEqual(allocator_names, ['LocalHost', 'Local3'])
            self.assertTrue(RAM.get_allocator('Local3') is local3)
            self.assertTrue(RAM.get_allocator(1) is local3)
            RAM.insert_allocator(0, local4)
            try:
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                self.assertEqual(allocator_names,
                                 ['Local4', 'LocalHost', 'Local3'])
            finally:
                RAM.remove_allocator('Local4')
        finally:
            RAM.remove_allocator(1)

        assert_raises(self, "RAM.get_allocator('Local3')",
                      globals(), locals(), ValueError,
                      "allocator 'Local3' not found")

        assert_raises(self, "RAM.remove_allocator('Local3')",
                      globals(), locals(), ValueError,
                      "allocator 'Local3' not found")

        assert_raises(self, "LocalAllocator('BadLoad', max_load=-2)",
                      globals(), locals(), ValueError,
                      "BadLoad: max_load must be > 0, got -2")
    def test_remote(self):
        logging.debug('')
        logging.debug('test_remote')

        # Start remote server.
        server_dir = 'Factory'
        if os.path.exists(server_dir):
            shutil.rmtree(server_dir, onerror=onerror)
        os.mkdir(server_dir)
        os.chdir(server_dir)
        try:
            server, server_cfg = start_server()
            cfg = read_server_config(server_cfg)
            factory = None
            try:
                factory = connect(cfg['address'], cfg['port'],
                                  pubkey=cfg['key'])
                prefix = RAM._make_prefix(factory.host)
                remote = '%s_LocalHost' % prefix

                # Show no remotes currently in RAM.
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                logging.debug('%s', allocator_names)
                self.assertFalse(remote in allocator_names)

                # Add remote server's allocator.
                RAM.add_remotes(factory)
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                logging.debug('%s', allocator_names)
                self.assertTrue(remote in allocator_names)
                self.assertFalse(RAM.get_allocator(remote) is RAM.list_allocators()[0])
                self.assertTrue(RAM.get_allocator(remote) is RAM.list_allocators()[1])

                # Max servers.
                max_servers = RAM.max_servers(dict(allocator=remote))
                self.assertTrue(max_servers >= 0)  # Avoid host load issues.

                remote_alloc = RAM.get_allocator(remote)

                max_servers, info = \
                    remote_alloc.max_servers(dict(localhost=True))
                self.assertEqual(max_servers, 0)
                self.assertEqual(info, dict(localhost='requested local host'))

                max_servers, info = \
                    remote_alloc.max_servers(dict(allocator='LocalHost'))
                self.assertEqual(max_servers, 0)
                self.assertEqual(info, dict(allocator='wrong allocator'))

                estimate, info = \
                    remote_alloc.time_estimate(dict(allocator='LocalHost'))
                self.assertEqual(estimate, -2)
                self.assertEqual(info, dict(allocator='wrong allocator'))

                # Allocate, release.
                remote_server, info = RAM.allocate(dict(allocator=remote))
                RAM.release(remote_server)

                # Remove remote allocators.
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                for name in allocator_names:
                    if name.startswith(prefix):
                        RAM.remove_allocator(name)
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                logging.debug('%s', allocator_names)
                self.assertFalse(remote in allocator_names)

            finally:
                if factory is not None:
                    factory.cleanup()
                server.terminate(timeout=10)
        finally:
            os.chdir('..')
            shutil.rmtree(server_dir, onerror=onerror)

        # Access local RAM in manner it would be accessed in the server.
        self.assertEqual(RAM._get_instance().get_total_allocators(), 1)
        self.assertTrue(RAM._get_instance().get_allocator_proxy(0)
                        is RAM.list_allocators()[0])
    def test_configure(self):
        logging.debug('')
        logging.debug('test_configure')

        # Reconfigure.
        with open('resources.cfg', 'w') as out:
            out.write("""
[LocalHost]
max_load: 100
""")
        local = RAM.get_allocator('LocalHost')
        max_load = local.max_load
        try:
            self.assertTrue(max_load < 100)
            RAM.configure('resources.cfg')
            self.assertEqual(local.max_load, 100)
            local.max_load = max_load
        finally:
            os.remove('resources.cfg')

        # Add another local.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
authkey: PublicKey
allow_shell: False
total_cpus: 42
max_load: 200
""")
        try:
            RAM.configure('resources.cfg')
            local2 = RAM.get_allocator('Local2')
            self.assertEqual(local2.factory._authkey, 'PublicKey')
            self.assertEqual(local2.factory._allow_shell, False)
            self.assertEqual(local2.total_cpus, 42)
            self.assertEqual(local2.max_load, 200)
            self.assertEqual(local2.host, socket.gethostname())
            self.assertTrue(local2.pid > 0)
            RAM.remove_allocator('Local2')
        finally:
            os.remove('resources.cfg')

        # Bad local total_cpus.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
total_cpus: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')", globals(),
                          locals(), ValueError,
                          'Local2: total_cpus must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad local max_load.
        with open('resources.cfg', 'w') as out:
            out.write("""
[Local2]
classname: openmdao.main.resource.LocalAllocator
max_load: 0
""")
        try:
            assert_raises(self, "RAM.configure('resources.cfg')", globals(),
                          locals(), ValueError,
                          'Local2: max_load must be > 0, got 0')
        finally:
            os.remove('resources.cfg')

        # Bad module.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadModule]
classname: no-such-module.Allocator
max_load: 100
""")
        try:
            assert_raises(
                self, "RAM.configure('resources.cfg')", globals(), locals(),
                RuntimeError, "RAM configure BadModule: can't import"
                " 'no-such-module'")
        finally:
            os.remove('resources.cfg')

        # Bad class.
        with open('resources.cfg', 'w') as out:
            out.write("""
[BadClass]
classname: openmdao.main.resource.NoSuchAllocator
max_load: 100
""")
        try:
            assert_raises(
                self, "RAM.configure('resources.cfg')", globals(), locals(),
                RuntimeError, "RAM configure BadClass: no class"
                " 'NoSuchAllocator' in openmdao.main.resource")
        finally:
            os.remove('resources.cfg')

        # Add, insert, get, remove.
        local3 = LocalAllocator('Local3')
        local4 = LocalAllocator('Local4', total_cpus=4)
        RAM.add_allocator(local3)
        try:
            allocator_names = \
                [allocator.name for allocator in RAM.list_allocators()]
            self.assertEqual(allocator_names, ['LocalHost', 'Local3'])
            self.assertTrue(RAM.get_allocator('Local3') is local3)
            self.assertTrue(RAM.get_allocator(1) is local3)
            RAM.insert_allocator(0, local4)
            try:
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                self.assertEqual(allocator_names,
                                 ['Local4', 'LocalHost', 'Local3'])
            finally:
                RAM.remove_allocator('Local4')
        finally:
            RAM.remove_allocator(1)

        assert_raises(self, "RAM.get_allocator('Local3')", globals(), locals(),
                      ValueError, "allocator 'Local3' not found")

        assert_raises(self, "RAM.remove_allocator('Local3')", globals(),
                      locals(), ValueError, "allocator 'Local3' not found")

        assert_raises(self, "LocalAllocator('BadLoad', max_load=-2)",
                      globals(), locals(), ValueError,
                      "BadLoad: max_load must be > 0, got -2")
    def test_remote(self):
        logging.debug('')
        logging.debug('test_remote')

        # Start remote server.
        server_dir = 'Factory'
        if os.path.exists(server_dir):
            shutil.rmtree(server_dir, onerror=onerror)
        os.mkdir(server_dir)
        os.chdir(server_dir)
        try:
            server, server_cfg = start_server()
            cfg = read_server_config(server_cfg)
            factory = None
            try:
                factory = connect(cfg['address'],
                                  cfg['port'],
                                  pubkey=cfg['key'])
                prefix = RAM._make_prefix(factory.host)
                remote = '%s_LocalHost' % prefix

                # Show no remotes currently in RAM.
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                logging.debug('%s', allocator_names)
                self.assertFalse(remote in allocator_names)

                # Add remote server's allocator.
                RAM.add_remotes(factory)
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                logging.debug('%s', allocator_names)
                self.assertTrue(remote in allocator_names)
                self.assertFalse(
                    RAM.get_allocator(remote) is RAM.list_allocators()[0])
                self.assertTrue(
                    RAM.get_allocator(remote) is RAM.list_allocators()[1])

                # Max servers.
                max_servers = RAM.max_servers(dict(allocator=remote))
                self.assertTrue(max_servers >= 0)  # Avoid host load issues.

                remote_alloc = RAM.get_allocator(remote)

                max_servers, info = \
                    remote_alloc.max_servers(dict(localhost=True))
                self.assertEqual(max_servers, 0)
                self.assertEqual(info, dict(localhost='requested local host'))

                max_servers, info = \
                    remote_alloc.max_servers(dict(allocator='LocalHost'))
                self.assertEqual(max_servers, 0)
                self.assertEqual(info, dict(allocator='wrong allocator'))

                estimate, info = \
                    remote_alloc.time_estimate(dict(allocator='LocalHost'))
                self.assertEqual(estimate, -2)
                self.assertEqual(info, dict(allocator='wrong allocator'))

                # Allocate, release.
                remote_server, info = RAM.allocate(dict(allocator=remote))
                RAM.release(remote_server)

                # Remove remote allocators.
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                for name in allocator_names:
                    if name.startswith(prefix):
                        RAM.remove_allocator(name)
                allocator_names = \
                    [allocator.name for allocator in RAM.list_allocators()]
                logging.debug('%s', allocator_names)
                self.assertFalse(remote in allocator_names)

            finally:
                if factory is not None:
                    factory.cleanup()
                server.terminate(timeout=10)
        finally:
            os.chdir('..')
            shutil.rmtree(server_dir, onerror=onerror)

        # Access local RAM in manner it would be accessed in the server.
        self.assertEqual(RAM._get_instance().get_total_allocators(), 1)
        self.assertTrue(RAM._get_instance().get_allocator_proxy(0) is
                        RAM.list_allocators()[0])