Esempio n. 1
0
def test_init():
    if not initialized():
        initialize()
    from mpi4py import MPI

    assert initialized() == True
    assert MPI.Is_initialized() == True
    assert finalized() == False
    assert MPI.Is_finalized() == False
Esempio n. 2
0
def myexit():

    # Tell children to end
    if is_master_process(): MPI_done()

    ## TODO: By telling children to exit, it may take a while for them to finish..

    global out
    if out is not None:
        out.close()  # close this -- meaning the subprocess is told to stop

    if not MPI.Is_finalized():
        MPI.Finalize()
    def fetch_minibatch_data(self,
                             mode,
                             pop=False,
                             fetch_meta_data=False,
                             data_shape=None,
                             channels="last"):
        # Return a dictionary object with keys 'image', 'label', and others as needed
        # self._queueloaders['train'].fetch_data(keyword_label).dim() as an example

        # Don't try to pull data if MPI is finalized, since IO is shutdown:
        if MPI.Is_finalized():
            return None

        if self._count[mode] != 0:
            if self._warning:
                print(
                    "Calling fetch_minibatch_data without calling prepare_next. This will not give new data."
                )
                print(
                    "To quiet this wanring, call prepare_next before fetch_minibatch_data or call queueloader.no_warnings()"
                )

        if pop:
            # This function will pop the data
            while self._queueloaders[mode].is_reading():
                time.sleep(0.01)
            self._queueloaders[mode].pop_current_data()
        else:
            if self._warning:
                print(
                    "Calling fetch_minibatch_data with pop = False.  This will give you the same data as last time."
                )
                print("To quiet this warning, call queueloader.no_warnings()")

        self._queueloaders[mode].next(store_entries=fetch_meta_data,
                                      store_event_ids=fetch_meta_data)
        this_data = {}

        for key in self._data_keys[mode]:
            this_data[key] = self._queueloaders[mode].fetch_data(
                self._data_keys[mode][key]).data(shape=data_shape,
                                                 channels=channels)
            # this_data[key] = numpy.reshape(this_data[key], self._dims[mode][key])

        if fetch_meta_data:
            this_data['entries'] = self._queueloaders[mode].fetch_entries()
            this_data['event_ids'] = self._queueloaders[mode].fetch_event_ids()

        self._count[mode] += 1
        return this_data
Esempio n. 4
0
    def onexit():
        if not MPI.Is_initialized() or MPI.Is_finalized():
            return

        # Get the current exception (if any)
        exc = excepthook.exception

        # If we are exiting normally then call MPI_Finalize
        if (MPI.COMM_WORLD.size == 1 or exc is None
                or isinstance(exc, KeyboardInterrupt)
                or (isinstance(exc, SystemExit) and exc.code == 0)):
            MPI.Finalize()
        # Otherwise forcefully abort
        else:
            MPI.COMM_WORLD.Abort(1)
    def coordinate_next_batch_indexes(self, mode, comm, root_rank=0):
        '''
        This function is a little naieve (sp?).  But it will take the root rank,
        use it to determine the next batch indexes, then scatter those indexes.

        It will scatter it evenly to the entire comm that is passed in.
        '''

        # Don't coordinate if MPI is finalized:
        if MPI.Is_finalized():
            return None

        comm_size = comm.Get_size()

        if comm.Get_rank() == root_rank:
            set_entries = self.get_next_batch_indexes(
                mode, self._minibatch_size[mode])

        # Create a buffer for the data to scatter
        # Which is just a reference to the numpy array
        sendbuff = None
        if comm.Get_rank() == root_rank:
            sendbuff = set_entries

        local_size = int(self._minibatch_size[mode] / comm_size)

        if (self._minibatch_size[mode] % comm_size != 0):
            print(
                'You have requested to scatter {} image(s) over {} ranks. This is not possible as the number are not divisible.'
                .format(self._minibatch_size[mode], comm_size))
            raise Exception(
                "Please change either the minibatch size or the number or ranks to scatter to."
            )

        # The recvbuff must be properly sized:
        recvbuff = numpy.empty((local_size), dtype=numpy.int32)

        # Scatterv will scatter numpy arrays, and note the automatic lookup of
        # dtypes from numpy to MPI.  If you are getting crazy, undefined or NaN values,
        # the dtype is a good start for investigating.
        comm.Scatter(sendbuff, recvbuff, root=root_rank)

        #print (self._rank, self._local_rank, 'done Scatterv!')

        return recvbuff
Esempio n. 6
0
    def exit(self, status=0):
        """Exit the mpi4py processor with the given status.

        @keyword status:    The program exit status.
        @type status:       int
        """

        # Execution on the slave.
        if MPI.COMM_WORLD.rank != 0:
            # Catch sys.exit being called on an executing slave.
            if self.in_main_loop:
                raise Exception('sys.exit unexpectedly called on slave!')

            # Catch sys.exit
            else:
                sys.stderr.write('\n')
                sys.stderr.write('***********************************************\n')
                sys.stderr.write('\n')
                sys.stderr.write('warning sys.exit called before mpi4py main loop\n')
                sys.stderr.write('\n')
                sys.stderr.write('***********************************************\n')
                sys.stderr.write('\n')
                MPI.COMM_WORLD.Abort()

        # Execution on the master.
        else:
            # Slave clean up.
            if MPI.Is_initialized() and not MPI.Is_finalized() and MPI.COMM_WORLD.rank == 0:
                # Send the exit command to all slaves.
                self._broadcast_command(Exit_command())

                # Dump all results.
                self._ditch_all_results()

            # Exit the program with the given status.
            sys.exit(status)
Esempio n. 7
0
import os
from mpi4py import rc

assert rc.initialize is True
assert rc.finalize is None
assert rc.thread_level == 'multiple'

os.environ['MPI4PY_RC_INITIALIZE'] = 'false'
os.environ['MPI4PY_RC_FINALIZE'] = 'off'
os.environ['MPI4PY_RC_THREAD_LEVEL'] = 'single'

from mpi4py import MPI
assert not MPI.Is_initialized()
assert not MPI.Is_finalized()

assert rc.initialize is False
assert rc.finalize is False
assert rc.thread_level == 'single'
Esempio n. 8
0
 def test_finalized_mpi4py(self):
     # test mpi finalization (automatically when including mpi4py, but only just before the Python process terminates)
     self.assertFalse(mpi.Is_finalized())
Esempio n. 9
0
 def testIsFinalized(self):
     flag = MPI.Is_finalized()
     self.assertTrue(type(flag) is bool)
     self.assertFalse(flag)
Esempio n. 10
0
 def cleanup():
     global init_by_devito
     if init_by_devito and MPI.Is_initialized() and not MPI.Is_finalized():
         MPI.Finalize()
Esempio n. 11
0
# mpi4py.rc(initialize=False)
# or use the following way

# we must set the initialization and runtime configuration options
# before the import of MPI module
from mpi4py import rc
# the default of rc.initialize is True and rc.finalize is None
# change rc.initialize to False
# rc.finalize will also be False when it value is None
rc.initialize = False

from mpi4py import MPI

print 'Before call Init(), MPI.Is_initialized: %s' % MPI.Is_initialized()
print 'Before call Init(), MPI.Is_finalized: %s' % MPI.Is_finalized()

# now we have to call Init explicitly
MPI.Init()
print 'After call Init(), MPI.Is_initialized: %s' % MPI.Is_initialized()
print 'After call Init(), MPI.Is_finalized: %s' % MPI.Is_finalized()

comm = MPI.COMM_WORLD
rank = comm.rank

print 'I am Process %d...' % rank

# have to call Finalize explicitly to terminate MPI
MPI.Finalize()
print 'After call Finalize(), MPI.Is_initialized: %s' % MPI.Is_initialized()
print 'After call Finalize(), MPI.Is_finalized: %s' % MPI.Is_finalized()
Esempio n. 12
0
def finalize_process_group():
    if not MPI.Is_finalized():
        MPI.Finalize()
Esempio n. 13
0
def finalize():
    if not MPI.Is_finalized():
        MPI.Finalize()
Esempio n. 14
0
def INNT():

    # Initializing the MPI and testing if it's been initialized
    MPI.Init()
    print(MPI.Is_initialized())
    print(MPI.Is_finalized())

    # Get Parameters
    generation, dataset, mutationChance, param, groupSize = getParameters()

    # Initialize the fitness
    fitnessParent = -1
    # The fitness of the parent
    fitnessChild = -1
    # The fitness of the child
    networkFitness = -1
    # The fitness of the network
    genBestFitness = -1
    # Fitness of the generation

    # Initialize the classes
    net, ga, com, pd = initClasses(param, MPI, groupSize, networkFitness)

    # Get the logger
    # filename = 'output{}.log'.format(pd.rank)
    filename = 'output.log'
    logger = logging.getLogger()
    handler = logging.FileHandler(filename)
    handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)

    # Split the communicator
    subGroup = pd.rank / groupSize
    subComm = MPI.Comm.Split(MPI.COMM_WORLD, subGroup, pd.rank)

    # initialize the networks
    # one random network at every processor

    # INITIALIZZEEEE ISLAND WITH SOME SPECIALITYYYYY
    data = net.initNetwork()

    # Islands differ in activation function
    # Since there will be at min 2 subgroups
    if pd.subGroup == 0:
        data['activation'] = 'sigmoid'
    elif pd.subGroup == 1:
        data['activation'] = 'elu'
    else:
        data['activation'] = 'selu'

    # Start running GA (Genetic Algorithm) generation
    for g in range(generation):

        if genBestFitness < 100:

            # GET PARENT FITNESS/ACCURACY
            # Every processor trains and evaluate the accuracy/fitness of the parent network
            fitnessParent = ga.getFitness(data, dataset)
            print('loop_1 done', g, pd.rank)

            # BREED THE CHILD
            # This to be done using MPI ISend
            # Get the parent using Non Blocking exchange
            child = ga.breeding(pd.rank, g, data, mutationChance,
                                pd.intraIslandExchange(data, subComm))
            MPI.COMM_WORLD.Barrier()

            # GET CHILD'S FITNESS/ACCURACY
            # Every processor trains and evaluate the accuracy/fitness of the child network
            fitnessChild = ga.getFitness(child, dataset)
            '''
			If the network fitness has improved over previous generation, 
				then pass on the features/hyperparameters
			Pass on the better of the two (parent or child) from this generation to the next generation
			Comparison done - of the previous value at the procecssor with the new computed value
			'''
            networkFitness, data = com.networkData(fitnessParent, fitnessChild,
                                                   data, child)
            '''
			Compare the fitness of the best networks of all the families
			Compares the fitness of all the networks data that are with all the processors in the communication
			Get the best fitness the generation 
			Kill the poorest performing of the population 
			Randomly initialize the poorest fitness population to keep the population constant
			'''
            genBestFitness, data = com.genFitness(data, param, MPI, groupSize)
            # print(genBestFitness, data)

            logger.debug(
                'generation=%d, Rank=%d, processid=%s, group=ID%d, subRank=%d, parent=%s, child=%s, parentFitness=%0.4f, childFitness=%0.4f, networkFitness=%0.4f, genBestFitness=%0.4f',
                g, pd.rank, socket.gethostname(), pd.subGroup,
                subComm.Get_rank(), data, child, fitnessParent, fitnessChild,
                networkFitness, genBestFitness)
            '''
			Do inter-island exchange after every 5 generations
				In this all the ranks are sending the data to the previous ranks
			'''
            if g % 5 == 0:
                pd.interIslandExchange(data, subComm)
            print('loop_6 done', pd.rank)
            MPI.COMM_WORLD.Barrier()

        else:
            # Broadcast the best results to all the processors
            pd.broadcast(data, pd.rank)
            print('best fitness achieved')
            # And halt
            MPI.Finalize()

    MPI.Finalize()
def DNNT():

    # Initializing the MPI and testing if it's been initialized
    MPI.Init()
    print(MPI.Is_initialized())
    print(MPI.Is_finalized())

    # Get Parameters
    generation, dataset, mutationChance, param = getParameters()

    # Initialize the fitness
    fitnessParent = -1
    # The fitness of the parent
    fitnessChild = -1
    # The fitness of the child
    networkFitness = -1
    # The fitness of the network
    genBestFitness = -1
    # Fitness of the generation

    # Initialize the classes
    net, ga, com, pd = initClasses(param, MPI, networkFitness)

    # Get the logger
    # filename = 'output{}.log'.format(pd.rank)
    filename = 'output.log'
    logger = logging.getLogger()
    handler = logging.FileHandler(filename)
    handler.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(asctime)s - %(levelname)s - %(message)s")
    handler.setFormatter(formatter)
    logger.addHandler(handler)
    logger.setLevel(logging.DEBUG)

    # initialize the networks
    # one random network at every processor
    data = net.initNetwork()

    # Start running GA (Genetic Algorithm) generation
    for g in range(generation):

        if genBestFitness < 100:

            # GET PARENT FITNESS/ACCURACY
            # Every processor trains and evaluate the accuracy/fitness of the parent network
            fitnessParent = ga.getFitness(data, dataset)

            # BREED THE CHILD
            # This to be done using MPI ISend
            # Get the parent using Non Blocking exchange
            child = ga.breeding(data, mutationChance,
                                pd.nonBlockingExchange(data))
            MPI.COMM_WORLD.Barrier()

            # GET CHILD'S FITNESS/ACCURACY
            # Every processor trains and evaluate the accuracy/fitness of the child network
            fitnessChild = ga.getFitness(child, dataset)
            '''
			If the network fitness has improved over previous generation, 
				then pass on the features/hyperparameters
			Pass on the better of the two (parent or child) from this generation to the next generation
			Comparison done of the previous value at the procecssor with the new computed value
			'''
            networkFitness, data = com.networkData(fitnessParent, fitnessChild,
                                                   data, child)

            logger.debug(
                'generation=%d, Rank=%d, processid=%s, parent=%s, child=%s, '
                'parentFitness=%0.4f, childFitness=%0.4f, networkFitness=%0.4f',
                g, pd.rank, socket.gethostname(), data, child, fitnessParent,
                fitnessChild, networkFitness)
            '''
			Compare the fitness of the best networks of all the families
			Compares the fitness of all the networks data that are with all the processors in the communication
			Get the best fitness the generation 
			Kill the poorest performing of the population 
			Randomly initialize the poorest fitness population to keep the population constant
			'''
            genBestFitness, data = com.genFitness(data, param, MPI)
            print(genBestFitness, data)

        else:
            # Broadcast the best results to all the processors
            pd.broadcast(data, pd.rank)
            print('best fitness achieved')
            # And halt
            MPI.Finalize()

    MPI.Finalize()
Esempio n. 16
0
def finalize():
    if not MPI.Is_finalized():
        # print "finalizing..."
        MPI.Finalize()
    else:
        pass