def empty_matrix( m, n ): res = [ [None for i in xrange(n)] for i in xrange(m) ] return res def eye_matrix( m ): res = [ [ 0.0 for i in xrange(m) ] for i in xrange(m) ] for i in xrange(m): res[i][i] = 1.0 return res if __name__ == '__main__': if root(): start = p.time() if False: data = p.rank() data = broadcast(data) print data if False: vec = range(p.size()) data = scatter(vec) print data, p.rank() if False: data = p.rank() vec = gather(data) if root():
if me == 0: print "ERROR: a,b,c,d must sum to 1" sys.exit() if fraction >= 1.0: if me == 0: print "ERROR: fraction must be < 1" sys.exit() random.seed(seed+me) order = 1 << nlevels mr = mrmpi() # loop until desired number of unique nonzero entries pypar.barrier() tstart = pypar.time() niterate = 0 ntotal = (1 << nlevels) * nnonzero nremain = ntotal while nremain: niterate += 1 ngenerate = nremain/nprocs if me < nremain % nprocs: ngenerate += 1 mr.map(nprocs,generate,None,1) nunique = mr.collate() if nunique == ntotal: break mr.reduce(cull) nremain = ntotal - nunique pypar.barrier()
% (args.year, args.year, args.month))) else: input_files = sorted( glob.glob( '/panfs/scratch3/vol7/reidpr/wp-access/raw/*/*/pagecounts*gz')) output_directory_string = '/panfs/scratch3/vol3/gfairchild/wikipedia/index/%s/%s/%s' #YYYY, MM, DD index_schema = '../data/index_schema.sql' mr = mrmpi() mr.verbosity(1) mr.timer(1) #get start time pypar.barrier() time_start = pypar.time() #do actual work mr.map(len(input_files), process_file) #get stop time pypar.barrier() time_stop = pypar.time() #clean up mr.destroy() #output stats if pypar.rank() == 0: print('time to process %d files on %d procs: %g (secs)' % (len(input_files), pypar.size(), time_stop - time_start))
np.arange(map_config['min_lat'], map_config['max_lat'], 5)) basemap1.m.drawparallels( np.arange(map_config['min_lon'], map_config['max_lon'], 5)) plt.colorbar(label='log10(Smoothed rate per cell)') plt.legend() figname = smoother_filename[:-4] + '_smoothed_rates_map.png' plt.savefig(figname) # Set up paralell proc = pypar.size() # Number of processors as specified by mpirun myid = pypar.rank() # Id of of this process (myid in [0, proc-1]) node = pypar.get_processor_name( ) # Host name on which current process is running print 'I am proc %d of %d on node %s' % (myid, proc, node) t0 = pypar.time() parser = CsvCatalogueParser(catalogue_filename) # From .csv to hmtk # Read and process the catalogue content in a variable called "catalogue" catalogue = parser.read_file(start_year=1965, end_year=2016) # How many events in the catalogue? print "The catalogue contains %g events" % catalogue.get_number_events() # What is the geographical extent of the catalogue? bbox = catalogue.get_bounding_box() print "Catalogue ranges from %.4f E to %.4f E Longitude and %.4f N to %.4f N Latitude\n" % bbox catalogue.sort_catalogue_chronologically() catalogue.data['magnitude']
#print elsize noelem = [0]*MAXI bytes = [0]*MAXI avgtime = [0.0]*MAXI mintime = [ 1000000.0]*MAXI maxtime = [-1000000.0]*MAXI if myid == 0: # Determine timer overhead cpuOH = 1.0; for k in range(repeats): # Repeat to get reliable timings t1 = pypar.time() t2 = pypar.time() if t2-t1 < cpuOH: cpuOH = t2-t1 print "Timing overhead is %f seconds.\n" %cpuOH # Pass msg circularly for k in range(repeats): if myid == 0: print "Run %d of %d" %(k+1,repeats) for i in range(MAXI): m=BLOCK*i+1 noelem[i] = m
kmax = 2 ** 15 # Maximal number of iterations (=number of colors) M = N = 700 # width = height = N B = 24 # Number of blocks (first dim) # Region in complex plane [-2:2] real_min = -2.0 real_max = 1.0 imag_min = -1.5 imag_max = 1.5 # MPI controls work_tag = 0 result_tag = 1 #Initialise t = pypar.time() P = pypar.size() p = pypar.rank() processor_name = pypar.get_processor_name() print 'Processor %d initialised on node %s' % (p, processor_name) assert P > 1, 'Must have at least one slave' assert B > P - 1, 'Must have more work packets than slaves' A = numpy.zeros((M, N), dtype='i') if p == 0: # Create work pool (B blocks) # using balanced work partitioning workpool = []
return 1 return 0 def convert_to_dict(itask, key, value, mr): """ Add each date/number of access to a Python dict for simpler operation. """ timestamp_counts[key] = value mr = mrmpi() mr.verbosity(1) mr.timer(1) #get start time pypar.barrier() time_start = pypar.time() #do actual work mr.map(len(months), process_file) mr.collate() mr.reduce(total) #get stop time pypar.barrier() time_stop = pypar.time() #gather all results on a single processor, sort, and output mr.gather(1) #mr.sort_keys(compare) timestamp_counts = dict() mr.map_mr(mr, convert_to_dict)
# main program nprocs = pypar.size() me = pypar.rank() if len(sys.argv) < 2: print "Syntax: wordfreq.py file1 file2 ..." sys.exit() files = sys.argv[1:] mr = mrmpi() pypar.barrier() tstart = pypar.time() nwords = mr.map(len(files), fileread) mr.collate() nunique = mr.reduce(sum) pypar.barrier() tstop = pypar.time() mr.sort_values(ncompare) count = [0, 10, 0] mr.map_kv(mr, output) mr.gather(1) mr.sort_values(ncompare) count = [0, 10, 1]
x[0,:]=samples[i,:] #x = samples[i,:] x[1,:]=getNextState(x[0,:]) #x=np.vstack((x,getNextState(x))) tmp=run(x) if tmp in data: data[tmp]+=1 else: data[tmp]=1 #data.append(run(x)) print 'time of '+str(np.shape(samples)[0])+' calculations '+ str((time.time() - start)/60)+' minutes' print data #main() #Initialise t = pypar.time() P = pypar.size() p = pypar.rank() processor_name = pypar.get_processor_name() # Block stepping stepping = 100 # Number of blocks #print end ,start samplesize = int(end) - int(start) print 'samplesize = ',samplesize print 1.*samplesize/stepping B = samplesize/stepping +10 # Number of blocks print 'Processor %d initialised on node %s' % (p, processor_name) assert P > 1, 'Must have at least one slave'
kmax = 2**15 # Maximal number of iterations (=number of colors) M = N = 700 # width = height = N B = 24 # Number of blocks (first dim) # Region in complex plane [-2:2] real_min = -2.0 real_max = 1.0 imag_min = -1.5 imag_max = 1.5 # MPI controls work_tag = 0 result_tag = 1 #Initialise t = pypar.time() P = pypar.size() p = pypar.rank() processor_name = pypar.get_processor_name() print 'Processor %d initialised on node %s' % (p, processor_name) assert P > 1, 'Must have at least one slave' assert B > P - 1, 'Must have more work packets than slaves' A = numpy.zeros((M, N), dtype='i') if p == 0: # Create work pool (B blocks) # using balanced work partitioning workpool = [] for i in range(B):
from mandelbrot import calculate_region, balance from mandelplot import plot import pypar # User definable parameters kmax = 2**15 # Maximal number of iterations (=number of colors) M = N = 700 # width = height = N (200 or 700) # Region in complex plane real_min = -2.0 real_max = 1.0 imag_min = -1.5 imag_max = 1.5 #Initialise t = pypar.time() P = pypar.size() p = pypar.rank() processor_name = pypar.get_processor_name() print 'Processor %d initialised on node %s' %(p,processor_name) # Balanced work partitioning (row wise) Mlo, Mhi = pypar.balance(M, P, p) print 'p%d: [%d, %d], Interval length=%d' %(p, Mlo, Mhi, Mhi-Mlo) # Parallel computation A = calculate_region(real_min, real_max, imag_min, imag_max, kmax, M, N, Mlo = Mlo, Mhi = Mhi)
from mandelbrot import calculate_region_cyclic from mandelplot import plot import pypar # User definable parameters kmax = 2**15 # Maximal number of iterations (=number of colors) M = N = 700 # width = height = N (200 or 700) # Region in complex plane [-2:2] real_min = -2.0 real_max = 1.0 imag_min = -1.5 imag_max = 1.5 #Initialise t = pypar.time() P = pypar.size() p = pypar.rank() processor_name = pypar.get_processor_name() print 'Processor %d initialised on node %s' % (p, processor_name) # Parallel computation A = calculate_region_cyclic(real_min, real_max, imag_min, imag_max, kmax, M, N, p, P) print 'Processor %d: time = %.2f' % (p, pypar.time() - t) # Communication phase if p == 0: for d in range(1, P):
def empty_matrix(m, n): res = [[None for i in xrange(n)] for i in xrange(m)] return res def eye_matrix(m): res = [[0.0 for i in xrange(m)] for i in xrange(m)] for i in xrange(m): res[i][i] = 1.0 return res if __name__ == '__main__': if root(): start = p.time() if False: data = p.rank() data = broadcast(data) print data if False: vec = range(p.size()) data = scatter(vec) print data, p.rank() if False: data = p.rank() vec = gather(data) if root():