예제 #1
0
    def test_class_instance_member(self):
        class Foo(object):
            def foo(self, x):
                return random.random()+x

        r = all_global_vars(Foo())
        assert len(r.keys()) == 1 and \
            r.has_key('random') and inspect.ismodule(r['random'])
예제 #2
0
    def test_class_instance_member(self):
        class Foo(object):
            def foo(self, x):
                return random.random() + x

        r = all_global_vars(Foo())
        assert len(list(r.keys())) == 1 and \
            'random' in r and inspect.ismodule(r['random'])
예제 #3
0
    def test_class_member(self):
        class Foo(object):
            def foo(self, x):
                return random.random() + x

        r = all_global_vars(Foo)
        assert len(r.keys()) == 1 and \
            r.has_key('random') and inspect.ismodule(r['random'])
예제 #4
0
    def test_class_member_calls_func(self):
        class Foo(object):
            def foo(self, x):
                return func1(x)

        r = all_global_vars(Foo)
        assert len(list(r.keys())) == 2 and \
            'random' in r and inspect.ismodule(r['random']) and \
            'func1' in r and inspect.isfunction(r['func1'])
예제 #5
0
    def test_class_member_calls_func(self):
        class Foo(object):
            def foo(self, x):
                return func1(x)

        r = all_global_vars(Foo)
        assert len(r.keys()) == 2 and \
            r.has_key('random') and inspect.ismodule(r['random']) and \
            r.has_key('func1') and inspect.isfunction(r['func1'])
예제 #6
0
    def test_class_in_func(self):
        class Foo(object):
            def foo(self, x):
                return random.random() + x

        def bar(x):
            return Foo().foo(x)

        r = all_global_vars(bar)
        assert len(list(r.keys())) == 2 and \
            'random' in r and inspect.ismodule(r['random'])
예제 #7
0
    def test_class_in_func(self):
        class Foo(object):
            def foo(self, x):
                return random.random()+x

        def bar(x):
            return Foo().foo(x)

        r = all_global_vars(bar)
        assert len(r.keys()) == 2 and \
            r.has_key('random') and inspect.ismodule(r['random'])
예제 #8
0
    def test_class_child(self):
        class Foo(object):
            def foo(self, x):
                return random.random() + x

        class Bar(Foo):
            def foo(self, x):
                return super(Bar, self).foo(x)

        r = all_global_vars(Bar)
        assert len(list(r.keys())) == 3 and \
            'random' in r and inspect.ismodule(r['random']) and \
            'Foo' in r and inspect.isclass(r['Foo']) and \
            'Bar' in r and inspect.isclass(r['Bar'])
예제 #9
0
    def test_class_child(self):
        class Foo(object):
            def foo(self, x):
                return random.random()+x

        class Bar(Foo):
            def foo(self, x):
                return super(Bar, self).foo(x)

        r = all_global_vars(Bar)
        assert len(r.keys()) == 3 and \
            r.has_key('random') and inspect.ismodule(r['random']) and \
            r.has_key('Foo') and inspect.isclass(r['Foo']) and \
            r.has_key('Bar') and inspect.isclass(r['Bar'])
예제 #10
0
 def test_func_imported(self):
     r = all_global_vars(func2)
     assert len(list(r.keys())) == 1 and \
         'Integral' in r and inspect.isclass(r['Integral'])
예제 #11
0
 def test_func(self):
     r = all_global_vars(func1)
     assert 'random' in r and inspect.ismodule(r['random'])
    def spawn(self, part_map):
        """
        Spawn MPI processes for and execute each of the managed targets.

        Parameters
        ----------
        part_map : dict
            Maps GPU ID to list of target MPI ranks.
        """

        if self._is_parent:

            # The number of GPUs over which the targets are partitioned may not
            # exceed the actual number of supported devices:
            n_part_gpus = len(part_map.keys())
            n_avail_gpus = 0
            drv.init()
            for i in xrange(drv.Device.count()):

                # MPS requires Tesla/Quadro GPUs with compute capability 3.5 or greater:
                if mps_avail:
                    d = drv.Device(i)
                    if d.compute_capability() >= (3, 5) and \
                       re.search('Tesla|Quadro', d.name()):
                        n_avail_gpus += 1
                else:
                    n_avail_gpus += 1
            if n_part_gpus > n_avail_gpus:
                raise RuntimeError('partition size (%s) exceeds '
                                   'number of available GPUs (%s)' % \
                                   (n_part_gpus, n_avail_gpus))
                
            # Start MPS control daemons (this assumes that the available GPUs
            # are numbered consecutively from 0 onwards - as are the elements of
            # part_map.keys()):
            if self._mps_man:
                self._mps_man.start()
                self.log_info('starting MPS')

            # Find the path to the mpi_backend.py script (which should be in the
            # same directory as this module:
            import neurokernel.mpi
            parent_dir = os.path.dirname(neurokernel.mpi.__file__)
            mpi_backend_path = os.path.join(parent_dir, 'mpi_backend.py')

            # Check that the union ranks in the partition correspond exactly to 
            # those of the targets added to the manager:
            n_targets = len(self._targets.keys())
            if set(self._targets.keys()) != \
               set([t for t in itertools.chain.from_iterable(part_map.values())]):
                raise ValueError('partition must contain all target ranks')

            # Invert mapping of GPUs to MPI ranks:
            rank_to_gpu_map = {rank:gpu for gpu in part_map.keys() for rank in part_map[gpu]}

            # Set MPS pipe directory:
            info = MPI.Info.Create()
            if self._mps_man:
                mps_dir = self._mps_man.get_mps_dir(self._mps_man.get_mps_ctrl_proc())
                info.Set('env', 'CUDA_MPS_PIPE_DIRECTORY=%s' % mps_dir)

            # Spawn processes:
            self._intercomm = MPI.COMM_SELF.Spawn(sys.executable,
                                                  args=[mpi_backend_path],
                                                  maxprocs=n_targets,
                                                  info=info)

            # First, transmit twiggy logging emitters to spawned processes so
            # that they can configure their logging facilities:
            for i in self._targets.keys():
                self._intercomm.send(twiggy.emitters, i)

            # Next, serialize the routing table ONCE and then transmit it to all
            # of the child nodes:
            self._intercomm.bcast(self.routing_table, root=MPI.ROOT)

            # Transmit class to instantiate, globals required by the class, and
            # the constructor arguments; the backend will wait to receive
            # them and then start running the targets on the appropriate nodes.
            req = MPI.Request()
            r_list = []
            for i in self._targets.keys():
                target_globals = all_global_vars(self._targets[i])

                # Serializing atexit with dill appears to fail in virtualenvs
                # sometimes if atexit._exithandlers contains an unserializable function:
                if 'atexit' in target_globals:
                    del target_globals['atexit']
                data = (self._targets[i], target_globals, self._kwargs[i])
                r_list.append(self._intercomm.isend(data, i))

                # Need to clobber data to prevent all_global_vars from
                # including it in its output:
                del data
            req.Waitall(r_list)
예제 #13
0
 def test_func_imported(self):
     r = all_global_vars(func2)
     assert len(r.keys()) == 1 and \
         r.has_key('Integral') and inspect.isclass(r['Integral'])
예제 #14
0
 def test_func_imported(self):
     r = all_global_vars(func2)
     assert len(r.keys()) == 1 and \
         r.has_key('Integral') and inspect.isclass(r['Integral'])
예제 #15
0
 def test_func(self):
     r = all_global_vars(func1)
     assert r.has_key('random') and inspect.ismodule(r['random'])
예제 #16
0
    def spawn(self, part_map):
        """
        Spawn MPI processes for and execute each of the managed targets.

        Parameters
        ----------
        part_map : dict
            Maps GPU ID to list of target MPI ranks.
        """

        if self._is_parent:

            # The number of GPUs over which the targets are partitioned may not
            # exceed the actual number of supported devices:
            n_part_gpus = len(part_map.keys())
            n_avail_gpus = 0
            drv.init()
            for i in xrange(drv.Device.count()):

                # MPS requires Tesla/Quadro GPUs with compute capability 3.5 or greater:
                if mps_avail:
                    d = drv.Device(i)
                    if d.compute_capability() >= (3, 5) and \
                       re.search('Tesla|Quadro', d.name()):
                        n_avail_gpus += 1
                else:
                    n_avail_gpus += 1
            if n_part_gpus > n_avail_gpus:
                raise RuntimeError('partition size (%s) exceeds '
                                   'number of available GPUs (%s)' % \
                                   (n_part_gpus, n_avail_gpus))

            # Start MPS control daemons (this assumes that the available GPUs
            # are numbered consecutively from 0 onwards - as are the elements of
            # part_map.keys()):
            if self._mps_man:
                self._mps_man.start()
                self.log_info('starting MPS')

            # Find the path to the mpi_backend.py script (which should be in the
            # same directory as this module:
            import neurokernel.mpi
            parent_dir = os.path.dirname(neurokernel.mpi.__file__)
            mpi_backend_path = os.path.join(parent_dir, 'mpi_backend.py')

            # Check that the union ranks in the partition correspond exactly to
            # those of the targets added to the manager:
            n_targets = len(self._targets.keys())
            if set(self._targets.keys()) != \
               set([t for t in itertools.chain.from_iterable(part_map.values())]):
                raise ValueError('partition must contain all target ranks')

            # Invert mapping of GPUs to MPI ranks:
            rank_to_gpu_map = {
                rank: gpu
                for gpu in part_map.keys() for rank in part_map[gpu]
            }

            # Set MPS pipe directory:
            info = MPI.Info.Create()
            if self._mps_man:
                mps_dir = self._mps_man.get_mps_dir(
                    self._mps_man.get_mps_ctrl_proc())
                info.Set('env', 'CUDA_MPS_PIPE_DIRECTORY=%s' % mps_dir)

            # Spawn processes:
            self._intercomm = MPI.COMM_SELF.Spawn(sys.executable,
                                                  args=[mpi_backend_path],
                                                  maxprocs=n_targets,
                                                  info=info)

            # First, transmit twiggy logging emitters to spawned processes so
            # that they can configure their logging facilities:
            for i in self._targets.keys():
                self._intercomm.send(twiggy.emitters, i)

            # Next, serialize the routing table ONCE and then transmit it to all
            # of the child nodes:
            self._intercomm.bcast(self.routing_table, root=MPI.ROOT)

            # Transmit class to instantiate, globals required by the class, and
            # the constructor arguments; the backend will wait to receive
            # them and then start running the targets on the appropriate nodes.
            req = MPI.Request()
            r_list = []
            for i in self._targets.keys():
                target_globals = all_global_vars(self._targets[i])

                # Serializing atexit with dill appears to fail in virtualenvs
                # sometimes if atexit._exithandlers contains an unserializable function:
                if 'atexit' in target_globals:
                    del target_globals['atexit']
                data = (self._targets[i], target_globals, self._kwargs[i])
                r_list.append(self._intercomm.isend(data, i))

                # Need to clobber data to prevent all_global_vars from
                # including it in its output:
                del data
            req.Waitall(r_list)
예제 #17
0
 def test_func(self):
     r = all_global_vars(func1)
     assert r.has_key('random') and inspect.ismodule(r['random'])