Beispiel #1
0
 def finalize(self):
     """
     End being parallel
     """
     if self.is_parallel is True:
         import pypar
         pypar.finalize()
def do_run(pdb, i, cur, db, mutationList):

    if mutationList != "ALA":
        mfile = Core.Data.MutationListFile(filename=mutationList, create=True)
        mfile.removeDuplicates(autoUpdate=False)
        mutList = mfile.mutantList()
        if isRoot(myid):
            print mfile.numberOfMutants()
    else:
        mutList = Core.Data.CreateScanList(pdbFile=i,
                                           mutation='ALA',
                                           skipResidueTypes=['ALA', 'GLY'])

    results = DeltaStability(
        inputFile=i,
        mutationList=mutList,
        configurationFile='/home/satnam/proteinDesignTool.conf',
        workingDirectory=os.getcwd(),
        outputDirectory=os.getcwd())

    # Results are submitted to results_pdb+chain and only by one processor
    if isRoot(myid):
        cur.execute(
            "create table if not exists results_%s_%s(mutation VARCHAR(20), score FLOAT);"
            % (pdb, os.path.split(mutationList)[1]))
        for mutant in range(results.stabilityResults.numberOfRows()):
            cur.execute(
                "insert into results_%s_%s (mutation, score) VALUES (%s%s%s, %s%s%s);"
                % (pdb, os.path.split(mutationList)[1], '"',
                   results.stabilityResults[mutant][0], '"', '"',
                   results.stabilityResults[mutant][-1], '"'))
        print "Calculated %s stability and results added to database" % (pdb)

    pypar.finalize()
Beispiel #3
0
 def finalize(self):
     """
     End being parallel
     """
     if self.is_parallel is True:
         import pypar
         pypar.finalize()
Beispiel #4
0
def run():
    """
    Run the process, handling any parallelisation.
    """
    
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument("-c", "--config",
                        help="Configuration file",
                        type=str)
    parser.add_argument("-i", "--inputfile",
                        help="Input DEM file (ascii format)",
                        type=str)
    parser.add_argument("-o", "--output", 
                        help="Output path",
                        type=str)
    parser.add_argument("-v", "--verbose", 
                        help=("Verbose output (not available when invoking"
                                "parallel run)") )
                                
    args = parser.parse_args() 
                          
    logfile = 'topomult.log'
    loglevel = 'INFO'
    
    if args.verbose:
        verbose = args.verbose
    else:
        verbose = False

    if args.config:
        cfg = ConfigParser.ConfigParser()
        cfg.read(args.config)

        input_file = cfg.get('Input', 'Filename')
        output_path = cfg.get('Output', 'Path')
        logfile = cfg.get('Logging', 'LogFile')
        loglevel = cfg.get('Logging', 'LogLevel')
        verbose = cfg.get('Logging', 'Verbose')
        
    if args.inputfile:
        input_file = args.inputfile

    if args.output:
        output_path = args.output
    
    attemptParallel()
    if pp.size() > 1 and pp.rank() > 0:
        logfile += '-' + str(pp.rank())
        verbose = False  # to stop output to console

    flStartLog(logfile, loglevel, verbose)
    
    pp.barrier()
    work(input_file, output_path,
             ['n','s','e','w','ne','nw','se','sw'])
    pp.barrier()
    
    pp.finalize()
Beispiel #5
0
def abnormalexit(reason):
    """this tells each worker node to exit, then kills the server process.
       this should only be called by the server node"""
    print 'abnormal exit'
    print reason
    sendtoall(('Die', 0))
    pypar.barrier()
    pypar.finalize()
    sys.exit(2)
Beispiel #6
0
    def runMapReduce(self):
        if self.MPI_myid == 0:
            self.result = self.master()
        else:
            self.slave()

        pypar.finalize()
        logging.debug('[PROCESS %d]: MPI environment finalized.'%(self.MPI_myid, ))
        return
Beispiel #7
0
def run_client():
    '''
    Runs
    '''
    # Identification
    myid = pypar.rank() # id of this process
    nproc = pypar.size() # number of processors

    print "I am client", myid
    pypar.finalize()
Beispiel #8
0
def run_client():
    '''
    Runs
    '''
    # Identification
    myid = pypar.rank()  # id of this process
    nproc = pypar.size()  # number of processors

    print "I am client", myid
    pypar.finalize()
Beispiel #9
0
    def runMapReduce(self):
        if self.MPI_myid == 0:
            self.result = self.master()
        else:
            self.slave()

        pypar.finalize()
        logging.debug('[PROCESS %d]: MPI environment finalized.' %
                      (self.MPI_myid, ))
        return
def _mpi_end_embarrass():
    global _mpi_initialized
    if _mpi_initialized:
        import pypar
        print(pypar.rank() + 1, " of ", pypar.size(), ": BARRIER")
        pypar.barrier()
        print(pypar.rank() + 1, " of ", pypar.size(), ": FINALIZE")
        pypar.finalize()
        _mpi_initialized = False
    else:
        print("Non-MPI run : Exit without MPI_Finalize")
Beispiel #11
0
 def run(self):
     if self.myid == 0:
         self.work.masterBeforeWork()
         self.master()
         self.work.masterAfterWork()
     else:
         self.work.slaveBeforeWork()
         self.slave()
         self.work.slaveAfterWork()
     
     pypar.finalize()
     if self.myid != 0:
         sys.exit()
Beispiel #12
0
    def run(self):
        if self.myid == 0:
            self.work.masterBeforeWork()
            self.master()
            self.work.masterAfterWork()
        else:
            self.work.slaveBeforeWork()
            self.slave()
            self.work.slaveAfterWork()

        pypar.finalize()
        if self.myid != 0:
            sys.exit()
Beispiel #13
0
def two_example():
    txt = ["yes", "no", "when", "what the", "a", "5ive!"]

    rank = pypar.rank()
    size = pypar.size()

    print
    print "I am processor %d of %d. " % (rank, size)
    for i, ele in enumerate(txt):
        if i % size == rank:
            print "i" + str(i) + " P" + str(rank) + " len " + str(len(ele)) + " for " + ele

    pypar.finalize()
Beispiel #14
0
def two_example():
    txt = ["yes", "no", "when", "what the", "a", "5ive!"]

    rank = pypar.rank()
    size = pypar.size()

    print
    print "I am processor %d of %d. " % (rank, size)
    for i, ele in enumerate(txt):
        if i % size == rank:
            print "i" + str(i) + " P" + str(rank) + " len " + str(
                len(ele)) + " for " + ele

    pypar.finalize()
Beispiel #15
0
def main():
    
#    #=========================================================================
#    #==============    Electronic TISE   =====================================
#    #=========================================================================
#    #Get parameters.
#    m_max, nu_max, mu_max, R_grid, beta, theta = el_tise_problem_parameters()
#   
#    #Do calculations.
#    electronic_BO.save_electronic_eigenstates(m_max, nu_max, mu_max, R_grid, beta, theta)
#
#    #=========================================================================   
#    #==============    Electronic TDSE   =====================================
#    #=========================================================================
#    #Get parameters.
#    filename, m, q, E_lim = el_tdse_problem_parameters()
#    
#    #Do calculations.
#    tdse = tdse_electron.TDSE_length_z(filename = filename)
#    tdse.BO_dipole_couplings(m, q, E_lim)
#
#    #=========================================================================   
#    #==============    Vibrational TISE   ====================================
    #=========================================================================
    #Get problem parameters.
    filename_el, nr_kept, xmin, xmax, xsize, order = vib_problem_parameters()
    
    #Do calculations.
    vibrational_BO.save_all_eigenstates(filename_el, nr_kept, 
	xmin, xmax, xsize, order)
    
    #=========================================================================   
    #==============    Vibrational TDSE   ====================================
    #=========================================================================
    #Get problem parameters.
    filename_el, nr_kept, xmin, xmax, xsize, order = vib_problem_parameters()
    
    #Do calculations.
    vibrational_BO.save_all_couplings(filename_el, nr_kept, 
    	xmin, xmax, xsize, order)
    
    #Calculate couplings for the eigenfunction basis.
    #vibrational_BO.save_eigenfunction_couplings(filename_el, nr_kept, 
    #	xmin, xmax, xsize, order)
    
    #=========================================================================
   
    pypar.finalize()
Beispiel #16
0
def main():

    # Parse command line

    options, args = cookbook.doc_optparse.parse( __doc__ )

    #try:
    pos_fname, neg_fname, out_dir = args
    align_count, mapping = rp.mapping.alignment_mapping_from_file( file( options.mapping ) )
    #except:
    #    cookbook.doc_optparse.exit()

    try:
        run( open( pos_fname ), open( neg_fname ), out_dir, options.format, align_count, mapping )
    finally:
        pypar.finalize()
Beispiel #17
0
    def __init__(self, aWorkList):

        self.WORKTAG = 1
        self.DIETAG = 2

        self.MPI_myid = pypar.rank()
        self.MPI_numproc = pypar.size()
        self.MPI_node = pypar.get_processor_name()

        self.works = aWorkList
        self.numWorks = len(self.works)

        self.reduceFunction = None
        self.mapFunction = None
        self.result = None

        if self.MPI_numproc < 2:
            pypar.finalize()
            if self.MPI_myid == 0:
                raise Exception, 'ERROR: Number of processors must be greater than 2.'
Beispiel #18
0
    def __init__(self, aWorkList):  
       
        self.WORKTAG = 1
        self.DIETAG =  2
        
        self.MPI_myid =    pypar.rank()
        self.MPI_numproc = pypar.size()
        self.MPI_node =    pypar.get_processor_name()

        self.works = aWorkList
        self.numWorks = len(self.works)

        self.reduceFunction = None
        self.mapFunction = None
        self.result = None
   
        if self.MPI_numproc < 2:
            pypar.finalize()
            if self.MPI_myid == 0:
                raise Exception, 'ERROR: Number of processors must be greater than 2.'
Beispiel #19
0
def start(initializer=None, initargs=(), maxtasks=None):
    global Pool
    try:
        # make pypar available
        global pp
        import pypar as pp

        if pp.size() > 1:
            Pool = PyparPool
            if pp.rank() > 0:
                worker(initializer, initargs, maxtasks)
        else:
            # fallback to multiprocessing
            print 'Using multiprocessing'
            pp.finalize()
            import multiprocessing as mp
            Pool = mp.Pool

    except ImportError:  # no pypar
        return
Beispiel #20
0
def start(initializer=None, initargs=(), maxtasks=None):
    global Pool
    try:
        # make pypar available
        global pp
        import pypar as pp

        if pp.size() > 1:
            Pool = PyparPool
            if pp.rank() > 0:
                worker(initializer, initargs, maxtasks)
        else:
            # fallback to multiprocessing
            print "Using multiprocessing"
            pp.finalize()
            import multiprocessing as mp

            Pool = mp.Pool

    except ImportError:  # no pypar
        return
def do_run(pdb, i, cur, db, mutationList):
    
    if mutationList != "ALA":
        mfile = Core.Data.MutationListFile(filename=mutationList,create=True)
        mfile.removeDuplicates(autoUpdate=False)
        mutList = mfile.mutantList()
        if isRoot(myid):
            print mfile.numberOfMutants()
    else:
        mutList = Core.Data.CreateScanList(pdbFile=i, mutation='ALA', skipResidueTypes=['ALA', 'GLY'])
    
    results = DeltaStability(inputFile = i, mutationList = mutList, configurationFile='/home/satnam/proteinDesignTool.conf', workingDirectory = os.getcwd(), outputDirectory = os.getcwd())
    
    # Results are submitted to results_pdb+chain and only by one processor
    if isRoot(myid):
        cur.execute("create table if not exists results_%s_%s(mutation VARCHAR(20), score FLOAT);" % (pdb,os.path.split(mutationList)[1]))
        for mutant in range(results.stabilityResults.numberOfRows()):
            cur.execute("insert into results_%s_%s (mutation, score) VALUES (%s%s%s, %s%s%s);" % (pdb,os.path.split(mutationList)[1], '"', results.stabilityResults[mutant][0], '"', '"', results.stabilityResults[mutant][-1],'"'))
        print "Calculated %s stability and results added to database" % (pdb)
            
    pypar.finalize()
def main():

    # Ensure all Processors are ready
    pypar.barrier()
    print "Processor %d is ready" % (myid)

    # Connect to MySQL db
    db = MySQLdb.connect(host="localhost",
                         user="******",
                         passwd="samsung",
                         db="sat")
    cur = db.cursor()

    # Option parser from wrapper script
    parser = optparse.OptionParser()
    # PDB
    parser.add_option("-p",
                      "--pdb",
                      help="Choose all or a pdb id",
                      dest="pdb",
                      default="all")
    # PDB directory
    parser.add_option("-d", "--dir", help="i", dest="i", default="all")

    parser.add_option("-m",
                      "--mutationList",
                      help="Location of mutation list file",
                      dest="m",
                      default="ALA")

    (opts, args) = parser.parse_args()

    # Run calculations
    do_run(opts.pdb, opts.i, cur, db, opts.m)

    # Finalize and exit
    pypar.finalize()
def main():
    
    # Ensure all Processors are ready
    pypar.barrier()
    print "Processor %d is ready" % (myid)
    
    # Connect to MySQL db
    db = MySQLdb.connect(host="localhost", 
                         user = "******", 
                         passwd = "samsung", 
                         db = "sat")
    cur = db.cursor()


    # Option parser from wrapper script
    parser = optparse.OptionParser()
    # PDB
    parser.add_option("-p", "--pdb", 
                      help="Choose all or a pdb id", 
                      dest="pdb", default ="all")
    # PDB directory
    parser.add_option("-d", "--dir", 
                      help="i", 
                      dest="i", default ="all")

    parser.add_option("-m", "--mutationList", 
                      help="Location of mutation list file", 
                      dest="m", default="ALA")
    
    (opts, args) = parser.parse_args()
    
    # Run calculations
    do_run(opts.pdb, opts.i, cur, db, opts.m)

    # Finalize and exit
    pypar.finalize()
Beispiel #24
0
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()


    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' %  hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')


    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'


    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2*p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i%P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' % (i, p, windfield))



            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(windfield) # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' % (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file, hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder, 'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (count_all, time.time() - t_start)


    pypar.finalize()
Beispiel #25
0
                frame_a = np.array(
                    Image.open(os.path.join(vidpath,
                                            tif_files[frame_pair[0]])))
                frame_b = np.array(
                    Image.open(os.path.join(vidpath,
                                            tif_files[frame_pair[1]])))

                # Code below simulates a task running
                u, v = PIVCompute(frame_a,
                                  frame_b,
                                  window_size=window_size,
                                  overlap=overlap)
                print "Received work frame pair " + str(
                    frame_pair) + " u origin value is " + str(u[0, 0])

                # package up into work array
                work_array = np.zeros((2, u.shape[0], u.shape[1]))
                work_array[0, :, :] = u
                work_array[1, :, :] = v

                result_array = work_array.copy()

                pp.send(result_array,
                        destination=MASTER_PROCESS,
                        tag=work_index)
        #### while
    #### if worker

    pp.finalize()
Beispiel #26
0
    from pypar_balancer import PyparWork, PyparBalancer

    NUM_NODES = pp.size()

    if NUM_NODES > 1:
        HAVE_MPI = 1  # we have pypar, and we're running with more than one node

    if DEBUG:
        if MY_RANK == 0:
            if HAVE_PYPAR and HAVE_MPI:
                print "Running full MPI"
            elif HAVE_PYPAR:
                print "MPI available, but not enough nodes for master/slave"

    if HAVE_PYPAR and not HAVE_MPI:
        pp.finalize(
        )  # not enough nodes to actually run master/slave... shut down MPI now.

except:
    if DEBUG:
        import traceback
        traceback.print_exc()
        if HAVE_PYPAR and HAVE_MPI:
            print "Running full MPI"
        elif HAVE_PYPAR:
            print "MPI available, but not enough nodes for master/slave"
        else:
            print "No MPI."

if HAVE_MPI:

    class GenericMPI(PyparWork):
Beispiel #27
0
            if status.tag == DIE_TAG:
                continue_working = False
            
            # not being put to sleep, load in videos of interest and compute
            else:
                frame_pair, status = pp.receive(source=MASTER_PROCESS, tag=pp.any_tag, 
                    return_status=True)
                work_index = status.tag
                

                frame_a = np.array(Image.open(os.path.join(vidpath, tif_files[frame_pair[0]])));
                frame_b = np.array(Image.open(os.path.join(vidpath, tif_files[frame_pair[1]])));
                
                # Code below simulates a task running
                u, v = PIVCompute(frame_a, frame_b, window_size = window_size, overlap = overlap)
                print  "Received work frame pair " + str(frame_pair) + " u origin value is " + str(u[0,0])                 
                
                # package up into work array
                work_array = np.zeros((2,u.shape[0], u.shape[1]))
                work_array[0,:,:] = u
                work_array[1,:,:] = v

                result_array = work_array.copy()

                pp.send(result_array, destination=MASTER_PROCESS, tag=work_index)
        #### while
    #### if worker

    pp.finalize()

Beispiel #28
0
def propagate_graphene_pulse(Nx=20, Ny=20, frame_num=10, magnetic_B=None):
    """
    Since in lanczos in the exponent exp(E*t/hbar) we are using E in eV
    """
    ham = envtb.ldos.hamiltonian.HamiltonianGraphene(Nx, Ny)

    Nall = 250

    w, v = ham.sorted_eigenvalue_problem(k=Nall, sigma=0.0)

    '''
        Store eigenvalue_problem
    '''
    fout = open('eigenvalue_problem.out', 'w')
    for i in xrange(Nall):
        fout.writelines(`w[i]`+'   '+`v[:,i].tolist()`+'\n')


    ''' Make vector potential'''

    A_pot = envtb.time_propagator.vector_potential.SinSqEnvelopePulse(
        amplitude_E0=laser_amp, frequency=laser_freq, Nc=Nc, cep=CEP, direction=direct)

    import pypar

    proc = pypar.size()                                # Number of processes as specified by mpirun
    myid = pypar.rank()                                # Id of of this process (myid in [0, proc-1]) 
    node = pypar.get_processor_name()                  # Host name on which current process is running

    Nthread = Nall / proc

    N_range = range(myid * Nthread, (myid + 1) * Nthread, 10)

    for Nstate in N_range:

        wf_out = open('wave_functions_%(Nstate)d.out' % vars(), 'w')
        expansion_out = open('expansion_%(Nstate)d.out' % vars(), 'w')
        coords_out = open('coords_current_%(Nstate)d.out' % vars(), 'w')
        dipole_out = open('dipole_%(Nstate)d.out' % vars(), 'w')

        dt_new = dt
        NK_new = NK
        time_counter = 0.0

        '''initialize wave function
        create wave function from file (WaveFunction(coords=ham.coords).wave_function_from_file),
        wave function from eigenstate (WaveFunction(vec=v[:, Nstate],coords=ham.coords)) or
        create Gaussian wave packet (GaussianWavePacket(coords=ham.coords, ic=ic, p0=[0.0, 1.5], sigma=7.))
        '''
        #wf_final = envtb.time_propagator.wave_function.WaveFunction(coords=ham.coords)
        #time_counter = wf_final.wave_function_from_file('wave_functions_0.out')
        wf_final = envtb.time_propagator.wave_function.WaveFunction(vec=v[:, Nstate],coords=ham.coords)
        ##ic = Nx/2 * Ny + Ny/2
        ##wf_final = envtb.time_propagator.wave_function.GaussianWavePacket(
        ##        ham.coords, ic, p0=[0.0, 1.5], sigma=7.)
        #maxel = max(wf_final.wf1d)

        wf_final.save_wave_function_data(wf_out, time_counter)

        import time

        '''main loop'''
        for i in xrange(frame_num):

            #print 'frame %(i)d' % vars()
            time_counter += dt_new

            st = time.time()
            ham2 = ham.apply_vector_potential(A_pot(time_counter))
            #print 'efficiency ham2', time.time() - st

            #print 'time', time_counter, 'A', A_pot(time)
            st = time.time()
            wf_init = wf_final
            wf_final, dt_new, NK_new = propagate_wave_function(
                  wf_init, ham2, NK=NK_new, dt=dt_new, maxel=None,
                  regime='TSC', alpha=0.7)
                  #file_out = directory+'f%03d_2d.png' % i)
            #print 'efficiency lanz', time.time() - st

            if np.mod(i,10) == 0:
                  wf_final.save_wave_function_data(wf_out, time_counter)
                  wf_final.save_wave_function_expansion(expansion_out, v)
                  wf_final.save_coords_current(coords_out, A_pot(time))


        wf_out.close()
        expansion_out.close()
        coords_out.close()
        dipole_out.close()

    pypar.finalize()

    return None
Beispiel #29
0
def propagate_graphene_pulse(Nx=20, Ny=20, frame_num=10, magnetic_B=None):
    """
    Since in lanczos in the exponent exp(E*t/hbar) we are using E in eV
    """
    ham = envtb.ldos.hamiltonian.HamiltonianGraphene(Nx, Ny)

    Nall = 250

    w, v = ham.sorted_eigenvalue_problem(k=Nall, sigma=0.0)
    '''
        Store eigenvalue_problem
    '''
    fout = open('eigenvalue_problem.out', 'w')
    for i in xrange(Nall):
        fout.writelines( ` w[i] ` + '   ' + ` v[:, i].tolist() ` + '\n')
    ''' Make vector potential'''

    A_pot = envtb.time_propagator.vector_potential.SinSqEnvelopePulse(
        amplitude_E0=laser_amp,
        frequency=laser_freq,
        Nc=Nc,
        cep=CEP,
        direction=direct)

    import pypar

    proc = pypar.size()  # Number of processes as specified by mpirun
    myid = pypar.rank()  # Id of of this process (myid in [0, proc-1])
    node = pypar.get_processor_name(
    )  # Host name on which current process is running

    Nthread = Nall / proc

    N_range = range(myid * Nthread, (myid + 1) * Nthread, 10)

    for Nstate in N_range:

        wf_out = open('wave_functions_%(Nstate)d.out' % vars(), 'w')
        expansion_out = open('expansion_%(Nstate)d.out' % vars(), 'w')
        coords_out = open('coords_current_%(Nstate)d.out' % vars(), 'w')
        dipole_out = open('dipole_%(Nstate)d.out' % vars(), 'w')

        dt_new = dt
        NK_new = NK
        time_counter = 0.0
        '''initialize wave function
        create wave function from file (WaveFunction(coords=ham.coords).wave_function_from_file),
        wave function from eigenstate (WaveFunction(vec=v[:, Nstate],coords=ham.coords)) or
        create Gaussian wave packet (GaussianWavePacket(coords=ham.coords, ic=ic, p0=[0.0, 1.5], sigma=7.))
        '''
        #wf_final = envtb.time_propagator.wave_function.WaveFunction(coords=ham.coords)
        #time_counter = wf_final.wave_function_from_file('wave_functions_0.out')
        wf_final = envtb.time_propagator.wave_function.WaveFunction(
            vec=v[:, Nstate], coords=ham.coords)
        ##ic = Nx/2 * Ny + Ny/2
        ##wf_final = envtb.time_propagator.wave_function.GaussianWavePacket(
        ##        ham.coords, ic, p0=[0.0, 1.5], sigma=7.)
        #maxel = max(wf_final.wf1d)

        wf_final.save_wave_function_data(wf_out, time_counter)

        import time
        '''main loop'''
        for i in xrange(frame_num):

            #print 'frame %(i)d' % vars()
            time_counter += dt_new

            st = time.time()
            ham2 = ham.apply_vector_potential(A_pot(time_counter))
            #print 'efficiency ham2', time.time() - st

            #print 'time', time_counter, 'A', A_pot(time)
            st = time.time()
            wf_init = wf_final
            wf_final, dt_new, NK_new = propagate_wave_function(wf_init,
                                                               ham2,
                                                               NK=NK_new,
                                                               dt=dt_new,
                                                               maxel=None,
                                                               regime='TSC',
                                                               alpha=0.7)
            #file_out = directory+'f%03d_2d.png' % i)
            #print 'efficiency lanz', time.time() - st

            if np.mod(i, 10) == 0:
                wf_final.save_wave_function_data(wf_out, time_counter)
                wf_final.save_wave_function_expansion(expansion_out, v)
                wf_final.save_coords_current(coords_out, A_pot(time))

        wf_out.close()
        expansion_out.close()
        coords_out.close()
        dipole_out.close()

    pypar.finalize()

    return None
Beispiel #30
0
            result = 'X'+result
            pypar.send(result, destination=0, tag=WORKTAG)
            print '[SLAVE %d]: sent result "%s" to node %d'\
            %(MPI_myid, result, 0)



if __name__ == '__main__':
    MPI_myid =    pypar.rank()
    MPI_numproc = pypar.size()
    MPI_node =    pypar.get_processor_name()

    workList = ('_dummy_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j')
    numWorks = len(workList) - 1

    #FIXME, better control here
    if MPI_numproc > numWorks or MPI_numproc < 2:
        pypar.finalize()
        if MPI_myid == 0:
              print 'ERROR: Number of processors must be in the interval [2,%d].'%numWorks
              sys.exit(-1)

    if MPI_myid == 0:
        master()
    else:
        slave()

    pypar.finalize()
    print 'MPI environment finalized.'
    
Beispiel #31
0
def main():
    #--------------------#
    # server code
    #--------------------#
    if rank == 0:
        print 'server running on ', procname

        opts = task(sys.argv)

        opts.printruninfo()

        sendtoall(('Start', sys.argv))
        server = serverdata(opts)

        #set up the collector and generator
        start = time.time()

        collector = resultcollector(server)
        end = time.time()
        print end-start
        
        jobs = jobgenerator(server)

        numjobsreceived = 0
        #begin distributing work
        for proc in xrange(1, min(numnodes, jobs.numjobs+1)):
            job = jobs.next(proc)
            pypar.send(('job',job), proc, tag=OUT)
        while numjobsreceived < jobs.jobindex:#while any job is still running
            #wait for any node to send a result
            msg, status = pypar.receive(pypar.any_source, return_status=True, tag=RETURN)
            numjobsreceived += 1
            proc, response = msg

            if jobs.hasnext(proc):#see if there is more work to be done
                job = jobs.next(proc)
                pypar.send(('job',job), proc, tag=OUT)#send it to the node that just completed

            #combine the results *after* sending the new job
            #(this way the worker can proceed while the results are being combined)
            collector.collect(response)


        #all jobs collected, kill the workers
        sendtoall(('Done', 0))

        #finish up the computation
        collector.finish()
        
    #--------------------#    
    # worker code
    #--------------------#
    else:
        while True:
            start = time.time()
            (code, msg), status = pypar.receive(0, return_status=True, tag=OUT)
            end = time.time()
            print 'waiting', end-start
            if code == 'Done':#all work is done
                opts.printruninfo()
                break
            elif code == 'Die':#abnormal exit
                break
            elif code == 'Start':
                opts = task(msg)
                sys.stdout = open(opts.logprefix+'%02d.log'%rank, 'w') #logfile
                print 'client', rank, 'running on', procname                
            else:
                start = time.time()
                jobnum, job = msg
                print jobnum
                result = opts.dojob(job)#do the job
                end = time.time()
                print 'working',msg[0], end-start

                start = time.time()
                pypar.send((rank, (jobnum, result)), 0, tag=RETURN)#return the result to the server
                end = time.time()
                print 'sending', end-start

    #------------------#
    #end of parallel code
    pypar.barrier()
    pypar.finalize()
Beispiel #32
0
        res = p_dot_all(v, v)
        #import time
        #time.sleep(p.rank()*2+1)
        print p.rank(), res

    if False:
        s = 0
        for i in xrange(100):
            r = p.rank()
            r = broadcast(r)
            s += (r + 1)
            p.barrier()
        print "%d %d" % (p.rank(), s)

    if False:
        m = None
        v = None
        if root():
            m = eye_matrix(3000)
            v = range(3000)
        r = p_mv(m, v)
        if root():
            print r

    if root():
        end = p.time()
        total = end - start
        print "total time: %.14f" % total

    p.finalize()
        x[0,:]=changebase(i)
        x[1,:]=getNextState(x[0,:])
        tmp=run(x)
        if tmp in data:
            data[tmp]+=1
        else:
            data[tmp]=1
    print 'time of '+str(samplesize)+' calculations '+ str((time.time() - start_time)/60)+' minutes'
    print data
main()

# For parallel usage on a cluster or multi core computer uncomment below
# and comment main() above.
# Must have pypar installed, uses a "stepping" of 100, which means splits up
# the job in batches of 100 over the processors
"""
#Initialise
t = pypar.time()
P = pypar.size()
p = pypar.rank()
processor_name = pypar.get_processor_name()
# Block stepping
stepping = 100
samplesize = int(end) - int(start)
B = samplesize/stepping +10 # Number of blocks

print 'Processor %d initialised on node %s' % (p, processor_name)
assert P > 1, 'Must have at least one slave'
assert B > P - 1, 'Must have more work packets than slaves'

    #test_lock(Nmpi,fields[:2],'xy')
    #test_lock(Nmpi,fields[:2],'xyz')

    # three fields
    #test_lock(Nmpi,fields[:3])
    #test_lock(Nmpi,fields[:3],'x')
    #test_lock(Nmpi,fields[:3],'xy')
    #test_lock(Nmpi,fields[:3],'xyz')

    Nmpi = (2,2,2)

    # one field
    #test_lock(Nmpi,fields[:1])
    #test_lock(Nmpi,fields[:1],'x')
    #test_lock(Nmpi,fields[:1],'xy')
    #test_lock(Nmpi,fields[:1],'xyz')

    # two fields
    #test_lock(Nmpi,fields[:2])
    #test_lock(Nmpi,fields[:2],'x')
    #test_lock(Nmpi,fields[:2],'xy')
    #test_lock(Nmpi,fields[:2],'xyz')

    # three fields
    #test_lock(Nmpi,fields[:3])
    #test_lock(Nmpi,fields[:3],'x')
    #test_lock(Nmpi,fields[:3],'xy')
    #test_lock(Nmpi,fields[:3],'xyz')

    mpi.finalize()
Beispiel #35
0
 def finalize(self):
    pypar.finalize()
Beispiel #36
0
def main():
    EMAN.appinit(sys.argv)
    if sys.argv[-1].startswith("usefs="):
        sys.argv = sys.argv[:-1]  # remove the runpar fileserver info

    (options, rawimage, refmap) = parse_command_line()

    sffile = options.sffile
    verbose = options.verbose
    shrink = options.shrink
    mask = options.mask
    first = options.first
    last = options.last
    scorefunc = options.scorefunc

    projfile = options.projection
    output_ptcls = options.update_rawimage
    cmplstfile = options.cmplstfile
    ortlstfile = options.ortlstfile
    startSym = options.startSym
    endSym = options.endSym

    if not options.nocmdlog:
        pid = EMAN.LOGbegin(sys.argv)
        EMAN.LOGInfile(pid, rawimage)
        EMAN.LOGInfile(pid, refmap)
        if projfile:
            EMAN.LOGOutfile(pid, projfile)
        if output_ptcls:
            EMAN.LOGOutfile(pid, output_ptcls)
        if cmplstfile:
            EMAN.LOGOutfile(pid, cmplstfile)
        if ortlstfile:
            EMAN.LOGOutfile(pid, ortlstfile)

    ptcls = []
    if not (mpi or pypar) or ((mpi and mpi.rank == 0) or (pypar and pypar.rank == 0)):
        ptcls = EMAN.image2list(rawimage)
        ptcls = ptcls[first:last]

        print "Read %d particle parameters" % (len(ptcls))
        # ptcls = ptcls[0:10]

    if mpi and mpi.size > 1:
        ptcls = mpi.bcast(ptcls)
        print "rank=%d\t%d particles" % (mpi.rank, len(ptcls))
    elif pypar and pypar.size() > 1:
        ptcls = pypar.broadcast(ptcls)
        print "rank=%d\t%d particles" % (pypar.rank(), len(ptcls))

    if sffile:
        sf = EMAN.XYData()
        sf.readFile(sffile)
        sf.logy()

    if not mpi or ((mpi and mpi.rank == 0) or (pypar and pypar.rank() == 0)):
        if cmplstfile and projfile:
            if output_ptcls:
                raw_tmp = output_ptcls
            else:
                raw_tmp = rawimage
            raw_tmp = rawimage
            fp = open("tmp-" + cmplstfile, "w")
            fp.write("#LST\n")
            for i in range(len(ptcls)):
                fp.write("%d\t%s\n" % (first + i, projfile))
                fp.write("%d\t%s\n" % (first + i, raw_tmp))
            fp.close()
        if (mpi and mpi.size > 1 and mpi.rank == 0) or (pypar and pypar.size() > 1 and pypar.rank() == 0):
            total_recv = 0
            if output_ptcls:
                total_recv += len(ptcls)
            if projfile:
                total_recv += len(ptcls)
            for r in range(total_recv):
                # print "before recv from %d" % (r)
                if mpi:
                    msg, status = mpi.recv()
                else:
                    msg = pypar.receive(r)
                    # print "after recv from %d" % (r)
                    # print msg, status
                d = emdata_load(msg[0])
                fname = msg[1]
                index = msg[2]
                d.writeImage(fname, index)
                print "wrtie %s %d" % (fname, index)
            if options.ortlstfile:
                solutions = []
                for r in range(1, mpi.size):
                    msg, status = mpi.recv(source=r, tag=r)
                    solutions += msg

                def ptcl_cmp(x, y):
                    eq = cmp(x[0], y[0])
                    if not eq:
                        return cmp(x[1], y[1])
                    else:
                        return eq

                solutions.sort(ptcl_cmp)
    if (not mpi or (mpi and ((mpi.size > 1 and mpi.rank > 0) or mpi.size == 1))) or (
        not pypar or (pypar and ((pypar.size() > 1 and pypar.rank() > 0) or pypar.size() == 1))
    ):
        map3d = EMAN.EMData()
        map3d.readImage(refmap, -1)
        map3d.normalize()
        if shrink > 1:
            map3d.meanShrink(shrink)
        map3d.realFilter(0, 0)  # threshold, remove negative pixels

        imgsize = map3d.ySize()

        img = EMAN.EMData()

        ctffilter = EMAN.EMData()
        ctffilter.setSize(imgsize + 2, imgsize, 1)
        ctffilter.setComplex(1)
        ctffilter.setRI(1)

        if (mpi and mpi.size > 1) or (pypar and pypar.size() > 1):
            ptclset = range(mpi.rank - 1, len(ptcls), mpi.size - 1)
        else:
            ptclset = range(0, len(ptcls))

        if mpi:
            print "Process %d/%d: %d/%d particles" % (mpi.rank, mpi.size, len(ptclset), len(ptcls))

        solutions = []
        for i in ptclset:
            ptcl = ptcls[i]
            e = EMAN.Euler(ptcl[2], ptcl[3], ptcl[4])
            dx = ptcl[5] - imgsize / 2
            dy = ptcl[6] - imgsize / 2
            print "%d\talt,az,phi=%8g,%8g,%8g\tx,y=%8g,%8g" % (
                i + first,
                e.alt() * 180 / pi,
                e.az() * 180 / pi,
                e.phi() * 180 / pi,
                dx,
                dy,
            ),

            img.readImage(ptcl[0], ptcl[1])
            img.setTAlign(-dx, -dy, 0)
            img.setRAlign(0, 0, 0)
            img.rotateAndTranslate()  # now img is centered
            img.applyMask(int(mask - max(abs(dx), abs(dy))), 6, 0, 0, 0)
            if img.hasCTF():
                fft = img.doFFT()

                ctfparm = img.getCTF()
                ctffilter.setCTF(ctfparm)
                if options.phasecorrected:
                    if sffile:
                        ctffilter.ctfMap(64, sf)  # Wiener filter with 1/CTF (no sign) correction
                else:
                    if sffile:
                        ctffilter.ctfMap(32, sf)  # Wiener filter with 1/CTF (including sign) correction
                    else:
                        ctffilter.ctfMap(2, EMAN.XYData())  # flip phase

                fft.mult(ctffilter)
                img2 = fft.doIFT()  # now img2 is the CTF-corrected raw image

                img.gimmeFFT()
                del fft
            else:
                img2 = img

            img2.normalize()
            if shrink > 1:
                img2.meanShrink(shrink)
            # if sffile:
            # 	snrcurve = img2.ctfCurve(9, sf)	# absolute SNR
            # else:
            # 	snrcurve = img2.ctfCurve(3, EMAN.XYData())		# relative SNR

            e.setSym(startSym)
            maxscore = -1e30  # the larger the better
            scores = []
            for s in range(e.getMaxSymEl()):
                ef = e.SymN(s)
                # proj = map3d.project3d(ef.alt(), ef.az(), ef.phi(), -6)		# Wen's direct 2D accumulation projection
                proj = map3d.project3d(
                    ef.alt(), ef.az(), ef.phi(), -1
                )  # Pawel's fast projection, ~3 times faster than mode -6 with 216^3
                # don't use mode -4, it modifies its own data
                # proj2 = proj
                proj2 = proj.matchFilter(img2)
                proj2.applyMask(int(mask - max(abs(dx), abs(dy))), 6, 0, 0, 0)
                if scorefunc == "ncccmp":
                    score = proj2.ncccmp(img2)
                elif scorefunc == "lcmp":
                    score = -proj2.lcmp(img2)[0]
                elif scorefunc == "pcmp":
                    score = -proj2.pcmp(img2)
                elif scorefunc == "fsccmp":
                    score = proj2.fscmp(img2, [])
                elif scorefunc == "wfsccmp":
                    score = proj2.fscmp(img2, snrcurve)
                if score > maxscore:
                    maxscore = score
                    best_proj = proj2
                    best_ef = ef
                    best_s = s
                scores.append(score)
                # proj2.writeImage("proj-debug.img",s)
                # print "\tsym %2d/%2d: euler=%8g,%8g,%8g\tscore=%12.7g\tbest=%2d euler=%8g,%8g,%8g score=%12.7g\n" % \
                # 		   (s,60,ef.alt()*180/pi,ef.az()*180/pi,ef.phi()*180/pi,score,best_s,best_ef.alt()*180/pi,best_ef.az()*180/pi,best_ef.phi()*180/pi,maxscore)
            scores = Numeric.array(scores)
            print "\tbest=%2d euler=%8g,%8g,%8g max score=%12.7g\tmean=%12.7g\tmedian=%12.7g\tmin=%12.7g\n" % (
                best_s,
                best_ef.alt() * 180 / pi,
                best_ef.az() * 180 / pi,
                best_ef.phi() * 180 / pi,
                maxscore,
                MLab.mean(scores),
                MLab.median(scores),
                MLab.min(scores),
            )
            if projfile:
                best_proj.setTAlign(dx, dy, 0)
                best_proj.setRAlign(0, 0, 0)
                best_proj.rotateAndTranslate()

                best_proj.set_center_x(ptcl[5])
                best_proj.set_center_y(ptcl[6])
                best_proj.setRAlign(best_ef)
                # print "before proj send from %d" % (mpi.rank)

                if mpi and mpi.size > 1:
                    mpi.send((emdata_dump(best_proj), projfile, i + first), 0)
                elif pypar and pypar.size() > 1:
                    pypar.send((emdata_dump(best_proj), projfile, i + first), 0)
                # print "after proj send from %d" % (mpi.rank)
                else:
                    best_proj.writeImage(projfile, i + first)

            img2.setTAlign(0, 0, 0)
            img2.setRAlign(best_ef)
            img2.setNImg(1)
            # print "before raw send from %d" % (mpi.rank)
            if output_ptcls:
                if mpi and mpi.size > 1:
                    mpi.send((emdata_dump(img2), output_ptcls, i + first), 0)
                elif pypar and pypar.size() > 1:
                    pypar.send((emdata_dump(img2), output_ptcls, i + first), 0)
                # print "after raw send from %d" % (mpi.rank)
                else:
                    img2.writeImage(output_ptcls, i + first)

            solutions.append((ptcl[0], ptcl[1], best_ef.alt(), best_ef.az(), best_ef.phi(), ptcl[5], ptcl[6]))
        if mpi and (mpi.size > 1 and mpi.rank > 0):
            mpi.send(solutions, 0, tag=mpi.rank)

    if mpi:
        mpi.barrier()
    elif pypar:
        pypar.barrier()
    if mpi:
        mpi.finalize()
    elif pypar:
        pypar.finalize()

    if options.cmplstfile:
        os.rename("tmp-" + cmplstfile, cmplstfile)
    if options.ortlstfile:
        lFile = open(options.ortlstfile, "w")
        lFile.write("#LST\n")
        for i in solutions:
            lFile.write(
                "%d\t%s\t%g\t%g\t%g\t%g\t%g\n"
                % (i[1], i[0], i[2] * 180.0 / pi, i[3] * 180.0 / pi, i[4] * 180.0 / pi, i[5], i[6])
            )
        lFile.close()

    if not options.nocmdlog:
        EMAN.LOGend()
Beispiel #37
0
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()

    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' % hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')

    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'

    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2 * p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i % P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' %
                   (i, p, windfield))

            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(
                windfield)  # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' %
                                (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname  # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file,
                                    hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder,
                                     'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (
            count_all, time.time() - t_start)

    pypar.finalize()
Beispiel #38
0
import pypar                                       # Import module and initialise MPI 

proc = pypar.size()                                # Number of processes as specified by mpirun
myid = pypar.rank()                                # Id of of this process (myid in [0, proc-1]) 
node = pypar.get_processor_name()                  # Host name on which current process is running

print 'I am proc %d of %d on node %s' %(myid, proc, node)

if myid == 0:                                      # Actions for process 0:
  msg = 'P0'  
  pypar.send(msg, destination=1)                   # Send message to proces 1 (right hand neighbour)
  msg = pypar.receive(source=proc-1)               # Receive message from last process
      
  print 'Processor 0 received message "%s" from processor %d' %(msg, proc-1)

else:                                              # Actions for all other processes:

  source = myid-1                                  # Source is the process to the left
  destination = (myid+1)%proc                      # Destination is process to the right
                                                   # wrapped so that last processor will 
						   # send back to proces 0  
  
  msg = pypar.receive(source)                      # Receive message from source 
  msg = msg + 'P' + str(myid)                      # Update message     
  pypar.send(msg, destination)                     # Send message to destination   

pypar.finalize()                                   # Stop MPI 
Beispiel #39
0
		print p.rank(), res

	if False:
		s = 0
		for i in xrange(100):
			r = p.rank()
			r = broadcast(r)
			s += (r + 1)
			p.barrier()
		print "%d %d" % ( p.rank(), s )

	if False:
		m = None
		v = None
		if root():
			m = eye_matrix(3000)
			v = range(3000)
		r = p_mv(m,v)
		if root():
			print r

	if root():
		end = p.time()
		total = end - start
		print "total time: %.14f" % total
			
	p.finalize()
		
		
		
Beispiel #40
0
    from pypar_balancer import PyparWork, PyparBalancer

    NUM_NODES=pp.size()
    
    if NUM_NODES > 1:
        HAVE_MPI=1  # we have pypar, and we're running with more than one node
        
    if DEBUG:
        if MY_RANK==0:
            if HAVE_PYPAR and HAVE_MPI:
                print "Running full MPI"
            elif HAVE_PYPAR:
                print "MPI available, but not enough nodes for master/slave"

    if HAVE_PYPAR and not HAVE_MPI:
        pp.finalize() # not enough nodes to actually run master/slave... shut down MPI now.
        
except:
    if DEBUG:
        import traceback
        traceback.print_exc()
        if HAVE_PYPAR and HAVE_MPI:
            print "Running full MPI"
        elif HAVE_PYPAR:
            print "MPI available, but not enough nodes for master/slave"
        else:
            print "No MPI."
        
if HAVE_MPI:
    class GenericMPI (PyparWork):
    
Beispiel #41
0
def finalize():
    pypar.finalize()
Beispiel #42
0
  mr2 = mr.copy()
  mr2.reduce(output)
  fp.close()
  mr2.destroy()

# stats to screen
# include stats on number of nonzeroes per row

if me == 0:
  print order,"rows in matrix"
  print ntotal,"nonzeroes in matrix"

mr.reduce(nonzero)
mr.collate()
mr.reduce(degree)
mr.collate()
mr.reduce(histo)
mr.gather(1)
mr.sort_keys(ncompare)
total = 0
mr.map_kv(mr,stats)
if me == 0: print order-total,"rows with 0 nonzeroes"

if me == 0:
  print "%g secs to generate matrix on %d procs in %d iterations" % \
        (tstop-tstart,nprocs,niterate)

mr.destroy()
  
pypar.finalize()
Beispiel #43
0
        value= defaultValue;
    return value;



if __name__=='__main__':
    
    dsetname= "oxMini20_v2";
    if len(sys.argv)>1: dsetname= sys.argv[1];
    configFn= "../src/ui/web/config/config.cfg";
    if len(sys.argv)>2: configFn= sys.argv[2];
    
    config= ConfigParser.ConfigParser();
    config.read( configFn );
    
    RootSIFT= getOptional( lambda: config.getboolean(dsetname, 'RootSIFT'), True );

    clstFn= os.path.expanduser( config.get(dsetname, 'clstFn') );
    trainFilesPrefix= os.path.expanduser( config.get(dsetname, 'trainFilesPrefix') );
    pntsFn= trainFilesPrefix + "descs.e3bin";
    
    vocSize= getOptional( lambda: config.getint(dsetname, 'vocSize'), 100 );
    clusterNumIteration = getOptional( lambda: config.getint(dsetname, 'clusterNumIteration'), 30 );
    seed= 43;
  
    compute_clusters(clstFn, pntsFn, vocSize,
                     clusterNumIteration, approx=True, seed= seed,
                     featureWrapper= ("hell" if RootSIFT else None) );
    
    mpi.finalize();