Exemple #1
0
	def processorName(self):

		if Environment.isParallel:
			import pypar
			return pypar.get_processor_name()
		else:
			return "localhost"
    def __init__(self):

        self.proc = pypar.size()
        self.myid = pypar.rank()
        self.node = pypar.get_processor_name()

        return
Exemple #3
0
   def __init__(self, LookPos, LookDir, LookYaw, WindowRows = 40, WindowCols = 40):
      self.LookPos = np.array(LookPos)
      self.LookDir = np.array(LookDir)
      self.Yaw = LookYaw
      self.WindowRows = WindowRows
      self.WindowCols = WindowCols
      rhop = np.linalg.norm(np.array([LookDir[0],LookDir[1]]))
      self.__Lon = math.atan2(LookDir[1], LookDir[0])
      self.__Lat = math.atan2(LookDir[2],rhop)
      self.start = time.time()
      
      # initialize the MPI
      self.numproc = pypar.size()
      self.myid =    pypar.rank()
      self.node =    pypar.get_processor_name()
      
      if self.myid != self.numproc - 1:
         self.Rows = self.WindowRows/self.numproc
         self.RowEnd = self.WindowRows/self.numproc * (self.myid+1) - 1
      else:
         self.Rows = self.WindowRows/self.numproc + self.WindowRows%self.numproc
         self.RowEnd = self.WindowRows

      self.RowStart = self.WindowRows/self.numproc * self.myid
      self.Window = np.zeros(shape = (self.Rows, self.WindowCols))
Exemple #4
0
    def __init__(self, is_parallel=True):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        if is_parallel is True:
            try:
                import pypar
            except ImportError:
                self._not_parallel()
            else:
                if pypar.size() >= 2:
                    self.rank = pypar.rank()
                    self.size = pypar.size()
                    self.node = pypar.get_processor_name()
                    self.is_parallel = True
                    self.file_tag = FILE_TAG_DELIMITER + str(self.rank)
                    self.log_file_tag = FILE_TAG_DELIMITER + str(self.rank)
                else:
                    self._not_parallel()
        else:
            self._not_parallel()

        # Some constants to identify messages
        self.load_event_set = 0
Exemple #5
0
    def __init__(self, is_parallel=True):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        if is_parallel is True:
            try:
                import pypar
            except ImportError:
                self._not_parallel()
            else:
                if pypar.size() >= 2:
                    self.rank = pypar.rank()
                    self.size = pypar.size()
                    self.node = pypar.get_processor_name()
                    self.is_parallel = True
                    self.file_tag = FILE_TAG_DELIMITER + str(self.rank)
                    self.log_file_tag = FILE_TAG_DELIMITER + str(self.rank)
                else:
                    self._not_parallel()
        else:
            self._not_parallel()

        # Some constants to identify messages
        self.load_event_set = 0
Exemple #6
0
    def __init__(self, work, debug = False):
        self.numprocs = pypar.size()           # Number of processes as specified by mpirun
        self.myid = pypar.rank()               # Id of of this process (myid in [0, numproc-1]) 
        self.node = pypar.get_processor_name() # Host name on which current process is running
        self.debug= debug
        self.work = work

        # Added by Ole Nielsen, 15 May 2008
        if self.numprocs < 2:
            msg = 'PyparBalancer must run on at least 2 processes'
            msg += ' for the Master Slave paradigm to make sense.'
            raise Exception, msg

        
        self.work.uplink(self, self.myid, self.numprocs, self.node)
        
        self.numworks = self.work.getNumWorkItems()
        print "PyparBalancer initialised on proc %d of %d on node %s" %(self.myid, self.numprocs, self.node)
Exemple #7
0
    def __init__(self, aWorkList):

        self.WORKTAG = 1
        self.DIETAG = 2

        self.MPI_myid = pypar.rank()
        self.MPI_numproc = pypar.size()
        self.MPI_node = pypar.get_processor_name()

        self.works = aWorkList
        self.numWorks = len(self.works)

        self.reduceFunction = None
        self.mapFunction = None
        self.result = None

        if self.MPI_numproc < 2:
            pypar.finalize()
            if self.MPI_myid == 0:
                raise Exception, 'ERROR: Number of processors must be greater than 2.'
Exemple #8
0
    def __init__(self, aWorkList):  
       
        self.WORKTAG = 1
        self.DIETAG =  2
        
        self.MPI_myid =    pypar.rank()
        self.MPI_numproc = pypar.size()
        self.MPI_node =    pypar.get_processor_name()

        self.works = aWorkList
        self.numWorks = len(self.works)

        self.reduceFunction = None
        self.mapFunction = None
        self.result = None
   
        if self.MPI_numproc < 2:
            pypar.finalize()
            if self.MPI_myid == 0:
                raise Exception, 'ERROR: Number of processors must be greater than 2.'
Exemple #9
0
    def __init__(self, work, debug=False):
        self.numprocs = pypar.size(
        )  # Number of processes as specified by mpirun
        self.myid = pypar.rank(
        )  # Id of of this process (myid in [0, numproc-1])
        self.node = pypar.get_processor_name(
        )  # Host name on which current process is running
        self.debug = debug
        self.work = work

        # Added by Ole Nielsen, 15 May 2008
        if self.numprocs < 2:
            msg = 'PyparBalancer must run on at least 2 processes'
            msg += ' for the Master Slave paradigm to make sense.'
            raise Exception, msg

        self.work.uplink(self, self.myid, self.numprocs, self.node)

        self.numworks = self.work.getNumWorkItems()
        print "PyparBalancer initialised on proc %d of %d on node %s" % (
            self.myid, self.numprocs, self.node)
Exemple #10
0
def use_w90_example(Ny=30, Nx=30, magnetic_B=None):
    

    #pypar.finalize()  
    # Ny: number of atoms in slice is 2*(Ny+1)
    ham_w90 = define_zigzag_ribbon_w90(
        "../../exampledata/02_graphene_3rdnn/graphene3rdnnlist.dat", 
        Ny, Nx, magnetic_B=magnetic_B)
    
    ham = envtb.ldos.hamiltonian.HamiltonianFromW90(ham_w90, Nx)
    #print ham.mtot
    """
    i0 = ham.Nx / 2
    j0 = ham.Ny / 2
    ic = (i0 - 1) * ham.Ny + (j0-1)
   
    potential = envtb.ldos.potential.Potential2DFromFunction(
        lambda x: 0.01 * (ham.coords[ic][1] - x[1])**2 + 0.01 * \
                 (ham.coords[ic][0] - x[0])**2)
    
    ham2 = ham.apply_potential(potential)
    envtb.ldos.plotter.Plotter().plot_potential(ham2, ham)
    plt.axes().set_aspect('equal')
    plt.show()
    """
    import pypar
    
    proc = pypar.size()
    myid = pypar.rank()
    node = pypar.get_processor_name()
    print('I am proc %d of %d on node %s' % (myid, proc, node))
    
    local_dos=envtb.ldos.local_density.LocalDensityOfStates(ham)
    
    envtb.ldos.plotter.Plotter().plot_density(local_dos(0.7), ham.coords)
    plt.title('E = 0.7')
    plt.axes().set_aspect('equal')
    plt.show()
    
    return None
Exemple #11
0
def use_w90_example(Ny=30, Nx=30, magnetic_B=None):
    

    #pypar.finalize()  
    # Ny: number of atoms in slice is 2*(Ny+1)
    ham_w90 = define_zigzag_ribbon_w90(
        "../../exampledata/02_graphene_3rdnn/graphene3rdnnlist.dat", 
        Ny, Nx, magnetic_B=magnetic_B)
    
    ham = envtb.ldos.hamiltonian.HamiltonianFromW90(ham_w90, Nx)
    #print ham.mtot
    """
    i0 = ham.Nx / 2
    j0 = ham.Ny / 2
    ic = (i0 - 1) * ham.Ny + (j0-1)
   
    potential = envtb.ldos.potential.Potential2DFromFunction(
        lambda x: 0.01 * (ham.coords[ic][1] - x[1])**2 + 0.01 * \
                 (ham.coords[ic][0] - x[0])**2)
    
    ham2 = ham.apply_potential(potential)
    envtb.ldos.plotter.Plotter().plot_potential(ham2, ham)
    plt.axes().set_aspect('equal')
    plt.show()
    """
    import pypar
    
    proc = pypar.size()
    myid = pypar.rank()
    node = pypar.get_processor_name()
    print 'I am proc %d of %d on node %s' % (myid, proc, node)
    
    local_dos=envtb.ldos.local_density.LocalDensityOfStates(ham)
    
    envtb.ldos.plotter.Plotter().plot_density(local_dos(0.7), ham.coords)
    plt.title('E = 0.7')
    plt.axes().set_aspect('equal')
    plt.show()
    
    return None
Exemple #12
0
    def __init__(self):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        try:
            import pypar  # pylint: disable=W0404
        except ImportError:
            self._not_parallel()
        else:
            if pypar.size() >= 2:
                self.rank = pypar.rank()
                self.size = pypar.size()
                self.node = pypar.get_processor_name()
                self.is_parallel = True
                self.file_tag = str(self.rank)
                self.log_file_tag = str(self.rank)

                # Ensure a clean MPI exit
                atexit.register(pypar.finalize)
            else:
                self._not_parallel()
Exemple #13
0
    def __init__(self):
        """
        Use is_parallel = False to stop parallelism, eg when running
        several scenarios.
        """

        try:
            import pypar   # pylint: disable=W0404
        except ImportError:
            self._not_parallel()
        else:
            if pypar.size() >= 2:
                self.rank = pypar.rank()
                self.size = pypar.size()
                self.node = pypar.get_processor_name()
                self.is_parallel = True
                self.file_tag = str(self.rank)
                self.log_file_tag = str(self.rank)

                # Ensure a clean MPI exit
                atexit.register(pypar.finalize)
            else:
                self._not_parallel()
def propagate_graphene_pulse(Nx=20, Ny=20, frame_num=10, magnetic_B=None):
    """
    Since in lanczos in the exponent exp(E*t/hbar) we are using E in eV
    """
    ham = envtb.ldos.hamiltonian.HamiltonianGraphene(Nx, Ny)

    Nall = 250

    w, v = ham.sorted_eigenvalue_problem(k=Nall, sigma=0.0)
    '''
        Store eigenvalue_problem
    '''
    fout = open('eigenvalue_problem.out', 'w')
    for i in xrange(Nall):
        fout.writelines( ` w[i] ` + '   ' + ` v[:, i].tolist() ` + '\n')
    ''' Make vector potential'''

    A_pot = envtb.time_propagator.vector_potential.SinSqEnvelopePulse(
        amplitude_E0=laser_amp,
        frequency=laser_freq,
        Nc=Nc,
        cep=CEP,
        direction=direct)

    import pypar

    proc = pypar.size()  # Number of processes as specified by mpirun
    myid = pypar.rank()  # Id of of this process (myid in [0, proc-1])
    node = pypar.get_processor_name(
    )  # Host name on which current process is running

    Nthread = Nall / proc

    N_range = range(myid * Nthread, (myid + 1) * Nthread, 10)

    for Nstate in N_range:

        wf_out = open('wave_functions_%(Nstate)d.out' % vars(), 'w')
        expansion_out = open('expansion_%(Nstate)d.out' % vars(), 'w')
        coords_out = open('coords_current_%(Nstate)d.out' % vars(), 'w')
        dipole_out = open('dipole_%(Nstate)d.out' % vars(), 'w')

        dt_new = dt
        NK_new = NK
        time_counter = 0.0
        '''initialize wave function
        create wave function from file (WaveFunction(coords=ham.coords).wave_function_from_file),
        wave function from eigenstate (WaveFunction(vec=v[:, Nstate],coords=ham.coords)) or
        create Gaussian wave packet (GaussianWavePacket(coords=ham.coords, ic=ic, p0=[0.0, 1.5], sigma=7.))
        '''
        #wf_final = envtb.time_propagator.wave_function.WaveFunction(coords=ham.coords)
        #time_counter = wf_final.wave_function_from_file('wave_functions_0.out')
        wf_final = envtb.time_propagator.wave_function.WaveFunction(
            vec=v[:, Nstate], coords=ham.coords)
        ##ic = Nx/2 * Ny + Ny/2
        ##wf_final = envtb.time_propagator.wave_function.GaussianWavePacket(
        ##        ham.coords, ic, p0=[0.0, 1.5], sigma=7.)
        #maxel = max(wf_final.wf1d)

        wf_final.save_wave_function_data(wf_out, time_counter)

        import time
        '''main loop'''
        for i in xrange(frame_num):

            #print 'frame %(i)d' % vars()
            time_counter += dt_new

            st = time.time()
            ham2 = ham.apply_vector_potential(A_pot(time_counter))
            #print 'efficiency ham2', time.time() - st

            #print 'time', time_counter, 'A', A_pot(time)
            st = time.time()
            wf_init = wf_final
            wf_final, dt_new, NK_new = propagate_wave_function(wf_init,
                                                               ham2,
                                                               NK=NK_new,
                                                               dt=dt_new,
                                                               maxel=None,
                                                               regime='TSC',
                                                               alpha=0.7)
            #file_out = directory+'f%03d_2d.png' % i)
            #print 'efficiency lanz', time.time() - st

            if np.mod(i, 10) == 0:
                wf_final.save_wave_function_data(wf_out, time_counter)
                wf_final.save_wave_function_expansion(expansion_out, v)
                wf_final.save_coords_current(coords_out, A_pot(time))

        wf_out.close()
        expansion_out.close()
        coords_out.close()
        dipole_out.close()

    pypar.finalize()

    return None
Exemple #15
0
        if (status.tag == DIETAG):
            print '[SLAVE %d]: received termination from node %d'\
                             %(MPI_myid, 0)
            return
        else:
            result = 'X'+result
            pypar.send(result, destination=0, tag=WORKTAG)
            print '[SLAVE %d]: sent result "%s" to node %d'\
            %(MPI_myid, result, 0)



if __name__ == '__main__':
    MPI_myid =    pypar.rank()
    MPI_numproc = pypar.size()
    MPI_node =    pypar.get_processor_name()

    workList = ('_dummy_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j')
    numWorks = len(workList) - 1

    #FIXME, better control here
    if MPI_numproc > numWorks or MPI_numproc < 2:
        pypar.finalize()
        if MPI_myid == 0:
              print 'ERROR: Number of processors must be in the interval [2,%d].'%numWorks
              sys.exit(-1)

    if MPI_myid == 0:
        master()
    else:
        slave()
Exemple #16
0
import pypar                                       # Import module and initialise MPI 

proc = pypar.size()                                # Number of processes as specified by mpirun
myid = pypar.rank()                                # Id of of this process (myid in [0, proc-1]) 
node = pypar.get_processor_name()                  # Host name on which current process is running

print 'I am proc %d of %d on node %s' %(myid, proc, node)

if myid == 0:                                      # Actions for process 0:
  msg = 'P0'  
  pypar.send(msg, destination=1)                   # Send message to proces 1 (right hand neighbour)
  msg = pypar.receive(source=proc-1)               # Receive message from last process
      
  print 'Processor 0 received message "%s" from processor %d' %(msg, proc-1)

else:                                              # Actions for all other processes:

  source = myid-1                                  # Source is the process to the left
  destination = (myid+1)%proc                      # Destination is process to the right
                                                   # wrapped so that last processor will 
						   # send back to proces 0  
  
  msg = pypar.receive(source)                      # Receive message from source 
  msg = msg + 'P' + str(myid)                      # Update message     
  pypar.send(msg, destination)                     # Send message to destination   

pypar.finalize()                                   # Stop MPI 
Exemple #17
0
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()

    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' % hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')

    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'

    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2 * p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i % P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' %
                   (i, p, windfield))

            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(
                windfield)  # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' %
                                (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname  # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file,
                                    hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder,
                                     'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (
            count_all, time.time() - t_start)

    pypar.finalize()
Exemple #18
0
def run_multiple_windfields(scenario,
                            windfield_directory=None,
                            hazard_output_folder=None,
                            dircomment=None,
                            echo=False,
                            verbose=True):
    """Run volcanic ash impact model for multiple wind fields.

    The wind fields are assumed to be in subfolder specified by windfield_directory,
    have the extension *.profile and follow the format use with scenarios.

    This function makes use of Open MPI and Pypar to execute in parallel but can also run sequentially.
    """

    try:
        import pypar
    except:
        P = 1
        p = 0
        processor_name = os.uname()[1]

        print 'Pypar could not be imported. Running sequentially on node %s' % processor_name,
    else:
        time.sleep(1)
        P = pypar.size()
        p = pypar.rank()
        processor_name = pypar.get_processor_name()

        print 'Processor %d initialised on node %s' % (p, processor_name)

        pypar.barrier()


    if p == 0:

        # Put logs along with the results
        logdir = os.path.join(hazard_output_folder, 'logs')
        makedir(logdir)

        header('Hazard modelling using multiple wind fields')
        print '*  Wind profiles obtained from: %s' % windfield_directory
        print '*  Scenario results stored in:  %s' %  hazard_output_folder
        print '*  Log files:'

        t_start = time.time()

        # Communicate hazard output directory name to all nodes to ensure they have exactly the same time stamp.
        for i in range(P):
            pypar.send((hazard_output_folder), i)
    else:
        # Receive correctly timestamped output directory names
        hazard_output_folder = pypar.receive(0)
        logdir = os.path.join(hazard_output_folder, 'logs')


    try:
        name = os.path.splitext(scenario)[0]
    except:
        name = 'run'


    # Wait until log dir has been created
    pypar.barrier()

    params = get_scenario_parameters(scenario)

    # Start processes staggered to avoid race conditions for disk access (otherwise it is slow to get started)
    time.sleep(2*p)

    # Logging
    s = 'Proc %i' % p
    print '     %s -' % string.ljust(s, 8),
    AIM_logfile = os.path.join(logdir, 'P%i.log' % p)
    start_logging(filename=AIM_logfile, echo=False)

    # Get cracking
    basename, _ = os.path.splitext(scenario)
    count_local = 0
    count_all = 0
    for i, file in enumerate(os.listdir(windfield_directory)):

        count_all += 1

        # Distribute jobs cyclically to processors
        if i%P == p:

            if not file.endswith('.profile'):
                continue

            count_local += 1

            windfield = '%s/%s' % (windfield_directory, file)
            windname, _ = os.path.splitext(file)
            header('Computing event %i on processor %i using wind field: %s' % (i, p, windfield))



            if dircomment is None:
                dircomment = params['eruption_comment']

            # Override or create parameters derived from native Fall3d wind field
            params['wind_profile'] = windfield
            params['wind_altitudes'] = get_layers_from_windfield(windfield) # FIXME: Try to comment this out.
            params['Meteorological_model'] = 'profile'

            if hazard_output_folder is None:
                hazard_output_folder = basename + '_hazard_outputs'

            if p == 0:
                print 'Storing multiple outputs in directory: %s' % hazard_output_folder

            # Run scenario
            aim = _run_scenario(params,
                                timestamp_output=True,
                                dircomment=dircomment + '_run%i_proc%i' % (i, p))

            # Make sure folder is present and can be shared by group
            makedir(hazard_output_folder)
            s = 'chmod -R g+w %s' % hazard_output_folder
            run(s)

            # Copy result file to output folder
            result_file = aim.scenario_name + '.res.nc'
            newname = aim.scenario_name + '.%s.res.nc' % windname # Name after wind file
            s = 'cp %s/%s %s/%s' % (aim.output_dir, result_file, hazard_output_folder, newname)
            run(s)

            # Create projectionfile in hazard output
            if i == 0:
                s = 'cp %s %s/%s' % (aim.projection_file, hazard_output_folder, 'HazardMaps.res.prj')
                run(s)

            # Clean up outputs from this scenario
            print 'P%i: Cleaning up %s' % (p, aim.output_dir)
            s = '/bin/rm -rf %s' % aim.output_dir
            run(s)

    print 'Processor %i done %i windfields' % (p, count_local)
    print 'Outputs available in directory: %s' % hazard_output_folder

    pypar.barrier()
    if p == 0:
        print 'Parallel simulation finished %i windfields in %i seconds' % (count_all, time.time() - t_start)


    pypar.finalize()
Exemple #19
0
import openpiv.scaling
import cv2
import time
import multiprocessing
import os
import sys
import ctypes
import sys
import scipy.io as sio
from PIL import Image
import pypar as pp
import warnings

num_processors = pp.size()
rank = pp.rank()
node = pp.get_processor_name()

# MPI Constants
MASTER_PROCESS = 0
WORK_TAG = 1
DIE_TAG = 2

DeltaT = 1  # frames to skip
window_size = 24
overlap = 12


def PIVCompute(frame_a, frame_b, window_size=24, overlap=12):

    tmpu, tmpv, sig2noise = openpiv.pyprocess.piv(frame_a,
                                                  frame_b,
Exemple #20
0
import openpiv.scaling
import cv2
import time
import multiprocessing 
import os
import sys
import ctypes
import sys
import scipy.io as sio
from PIL import Image
import pypar as pp
import warnings

num_processors = pp.size()
rank = pp.rank()
node = pp.get_processor_name()

# MPI Constants
MASTER_PROCESS = 0
WORK_TAG = 1
DIE_TAG = 2

DeltaT = 1 # frames to skip
window_size = 24
overlap = 12



def PIVCompute(frame_a, frame_b, window_size = 24, overlap = 12):
    
    tmpu, tmpv, sig2noise = openpiv.pyprocess.piv(frame_a, frame_b,
def propagate_graphene_pulse(Nx=20, Ny=20, frame_num=10, magnetic_B=None):
    """
    Since in lanczos in the exponent exp(E*t/hbar) we are using E in eV
    """
    ham = envtb.ldos.hamiltonian.HamiltonianGraphene(Nx, Ny)

    Nall = 250

    w, v = ham.sorted_eigenvalue_problem(k=Nall, sigma=0.0)

    '''
        Store eigenvalue_problem
    '''
    fout = open('eigenvalue_problem.out', 'w')
    for i in xrange(Nall):
        fout.writelines(`w[i]`+'   '+`v[:,i].tolist()`+'\n')


    ''' Make vector potential'''

    A_pot = envtb.time_propagator.vector_potential.SinSqEnvelopePulse(
        amplitude_E0=laser_amp, frequency=laser_freq, Nc=Nc, cep=CEP, direction=direct)

    import pypar

    proc = pypar.size()                                # Number of processes as specified by mpirun
    myid = pypar.rank()                                # Id of of this process (myid in [0, proc-1]) 
    node = pypar.get_processor_name()                  # Host name on which current process is running

    Nthread = Nall / proc

    N_range = range(myid * Nthread, (myid + 1) * Nthread, 10)

    for Nstate in N_range:

        wf_out = open('wave_functions_%(Nstate)d.out' % vars(), 'w')
        expansion_out = open('expansion_%(Nstate)d.out' % vars(), 'w')
        coords_out = open('coords_current_%(Nstate)d.out' % vars(), 'w')
        dipole_out = open('dipole_%(Nstate)d.out' % vars(), 'w')

        dt_new = dt
        NK_new = NK
        time_counter = 0.0

        '''initialize wave function
        create wave function from file (WaveFunction(coords=ham.coords).wave_function_from_file),
        wave function from eigenstate (WaveFunction(vec=v[:, Nstate],coords=ham.coords)) or
        create Gaussian wave packet (GaussianWavePacket(coords=ham.coords, ic=ic, p0=[0.0, 1.5], sigma=7.))
        '''
        #wf_final = envtb.time_propagator.wave_function.WaveFunction(coords=ham.coords)
        #time_counter = wf_final.wave_function_from_file('wave_functions_0.out')
        wf_final = envtb.time_propagator.wave_function.WaveFunction(vec=v[:, Nstate],coords=ham.coords)
        ##ic = Nx/2 * Ny + Ny/2
        ##wf_final = envtb.time_propagator.wave_function.GaussianWavePacket(
        ##        ham.coords, ic, p0=[0.0, 1.5], sigma=7.)
        #maxel = max(wf_final.wf1d)

        wf_final.save_wave_function_data(wf_out, time_counter)

        import time

        '''main loop'''
        for i in xrange(frame_num):

            #print 'frame %(i)d' % vars()
            time_counter += dt_new

            st = time.time()
            ham2 = ham.apply_vector_potential(A_pot(time_counter))
            #print 'efficiency ham2', time.time() - st

            #print 'time', time_counter, 'A', A_pot(time)
            st = time.time()
            wf_init = wf_final
            wf_final, dt_new, NK_new = propagate_wave_function(
                  wf_init, ham2, NK=NK_new, dt=dt_new, maxel=None,
                  regime='TSC', alpha=0.7)
                  #file_out = directory+'f%03d_2d.png' % i)
            #print 'efficiency lanz', time.time() - st

            if np.mod(i,10) == 0:
                  wf_final.save_wave_function_data(wf_out, time_counter)
                  wf_final.save_wave_function_expansion(expansion_out, v)
                  wf_final.save_coords_current(coords_out, A_pot(time))


        wf_out.close()
        expansion_out.close()
        coords_out.close()
        dipole_out.close()

    pypar.finalize()

    return None
Exemple #22
0
    """this tells each worker node to exit, then kills the server process.
       this should only be called by the server node"""
    print 'abnormal exit'
    print reason
    sendtoall(('Die', 0))
    pypar.barrier()
    pypar.finalize()
    sys.exit(2)



############

rank = pypar.rank()

procname = pypar.get_processor_name()

#these are message tags
#"OUT" messages go from the server to workers
#"RETURN" messages go from the workers to the server
#the value of these variables should remain constant!
OUT = 0
RETURN = 1


 

class invalidatedgenes:
    def __init__(self, numworkers, numgenes):
        self.workerindices = []
        self.invalidatedlist = []
Exemple #23
0
            Utility.enable_debugging_msgs(None)
        args_to_remove.append(arg)
    elif arg.startswith('--disableC'):
        disable_c = True
        args_to_remove.append(arg)
currdir = os.getcwd()
# We need to remove these arguments from the list before they get to uniitest
#  or it will complain.
for arg in args_to_remove:
    sys.argv.remove(arg)

try:
    import pypar
    os.chdir(currdir)
    HAVE_PYPAR = True
    num_procs = pypar.size()
    my_rank = pypar.rank()
    my_host = pypar.get_processor_name()
    import atexit
    atexit.register(pypar.finalize)
except ImportError:
    os.chdir(currdir)
    HAVE_PYPAR = False
    num_procs = 1
    my_rank = 0
    import socket
    my_host = socket.gethostname()
logger.debug('Node %i is on host %s.' % (my_rank, my_host))

if my_rank == 0 and not os.path.isdir(_TEMP_DIR):
    os.mkdir(_TEMP_DIR)
Exemple #24
0

#--------------------------------------------------------------
# Main program
#
MAXI  = 10         # Number of blocks 
MAXM  = 500000     # Largest block 
BLOCK = MAXM/MAXI  # Block size 

repeats = 10
msgid = 0
vanilla = 0 #Select vanilla mode (slower but general)

numprocs = pypar.size()
myid = pypar.rank()
processor_name = pypar.get_processor_name()

if myid == 0:
  # Main process - Create message, pass on, verify correctness and log timing
  #
  print "MAXM = %d, number of processors = %d" %(MAXM, numprocs)
  print "Measurements are repeated %d times for reliability" %repeats

if numprocs < 2:
  print "Program needs at least two processors - aborting\n"
  pypar.abort()
   
pypar.barrier() #Synchronize all before timing   
print "I am process %d on %s" %(myid,processor_name)

"""
import logging
logging.basicConfig()
logger = logging.getLogger('RunInParallel')

import cPickle, os, sys, traceback

# With some versions of pypar and on some systems, importing pypar changes
# the current working directory. We save it here so we can change back.
currdir = os.getcwd()
try:
    import pypar
    HAVE_PYPAR = True
    num_procs = pypar.size()
    my_rank = pypar.rank()
    my_host = pypar.get_processor_name()
    import atexit
    atexit.register(pypar.finalize)
except ImportError:
    HAVE_PYPAR = False
    num_procs = 1
    my_rank = 0
    import socket
    my_host = socket.gethostname()
os.chdir(currdir)
logger.debug('Node %i is on host %s.' % (my_rank, my_host))

class Statement:
    """
    Class for sending Python statements to workers.
    """