def mpfhfft(nproc, *args, **kwargs): ''' Subdivide, along receive channels, the work of fhfft() among nproc processes to Hadamard-decode and Fourier transform the WaveformSet stored in infile into a WaveformSet file that will be written to outfile. The positional and keyward arguments are passed to fhfft(). Any 'stride', 'start', 'lock', or 'event' kwargs will be overridden by internally generated values. ''' if nproc == 1: # For a single process, don't spawn fhfft(*args, **kwargs) return # Add the stride to the kwargs kwargs['stride'] = nproc # Create a multiprocessing lock and event to serialize output access kwargs['lock'] = multiprocessing.Lock() kwargs['event'] = multiprocessing.Event() # Span the desired processes to perform FHFFT with process.ProcessPool() as pool: for i in range(nproc): # Give each process a meaningful name procname = process.procname(i) # Note the starting index for this processor kwargs['start'] = i pool.addtask(target=fhfft, name=procname, args=args, kwargs=kwargs) pool.start() pool.wait()
def mpchanlists(infiles, nproc=1): ''' Subdivide the infiles lists among nproc processors, and merge the dictionaries of results. ''' # Don't spawn more processes than files nproc = max(nproc, len(infiles)) # Don't fork for a single input file if nproc < 2: return getchanlist(infiles, ) # Create a Queue to collect results queue = multiprocessing.Queue() # Accumulate results here chanlist = {} # Spawn the desired processes to collect header information with process.ProcessPool() as pool: for i in range(nproc): # Assign a meaningful process name procname = process.procname(i) # Stride the input files args = (infiles[i::nproc], queue) pool.addtask(target=getchanlist, name=procname, args=args) pool.start() # Wait for all processes to respond responses = 0 while responses < nproc: try: results = queue.get(timeout=0.1) responses += 1 except Queue.Empty: pass else: for chan, rec in results.items(): if chan in chanlist: raise ValueError('Channel index collision') chanlist[chan] = rec # Allow all processe to finish pool.wait() return chanlist
def main(argv=None): if argv is None: argv = sys.argv[1:] progname = sys.argv[0] # Default values nrpoc = process.preferred_process_count() chunk, stdev, pad, bgval = 8, 8, 24, 0. optlist, args = getopt.getopt(argv, 'p:c:g:b:h') # Parse the options list for opt in optlist: if opt[0] == '-p': nproc = int(opt[1]) elif opt[0] == '-c': chunk = int(opt[1]) elif opt[0] == '-b': bgval = float(opt[1]) elif opt[0] == '-g': kstr = opt[1].split(',') pad = int(kstr[0]) stdev = float(kstr[1]) else: usage(progname) return 128 # The input and output files must be specified if len(args) < 2: usage(progname) return 128 # Grab the shape of the input file and the number of slices infile = mio.Slicer(args[0]) # The output file must be created and truncated outfile = mio.Slicer(args[1], infile.shape, infile.dtype, True) try: with process.ProcessPool() as pool: for n in range(nproc): args = (args[0], args[1], stdev, pad, bgval, n, nproc, chunk) pool.addtask(target=filtblks, args=args) pool.start() pool.wait() except: outfile._backer.truncate(0) raise return 0
print('Trying %s for %s CUDA program' % (evalscat, defprogname), file=sys.stderr) # Grab the list of GPUs to use gpus = [int(s.strip()) for s in sys.argv[1].split(',')] ngpus = len(gpus) # Grab the frequency and the names of the element location and sensitivity files freq, elemlocs, elemsens = sys.argv[2:5] # Grab the remaining files and pull out each file's associated facet index scatfiles = [(f, facetindex(f)) for f in sys.argv[5:]] # The only positional argument for subprocesses is the executable name args = [evalscat] # Process files in groups corresponding to common facet indices for fidx in set(f[-1] for f in scatfiles): # Grab all files that share the current facet facetfiles = [f[0] for f in scatfiles if f[1] == fidx] with process.ProcessPool() as pool: for i, g in enumerate(gpus): kwargs = {} # In the "args" argument, pass the device ID, frequency, source # facet index, and the element location and sensitivity files kwargs['args'] = [g, freq, fidx, elemlocs, elemsens] # Inputs the "inputs" argument, pass a share of the inputs kwargs['inputs'] = facetfiles[i::ngpus] pool.addtask(target=procexec, args=args, kwargs=kwargs) pool.start() pool.wait()
def main(argv=None): if argv is None: argv = sys.argv[1:] progname = sys.argv[0] # Default values random = True nproc = process.preferred_process_count() chunk = 8 optlist, args = getopt.getopt(argv, 'p:nd:s:c:g:h') # Extra arguments are added as kwargs kwargs = {} # Parse the options list for opt in optlist: if opt[0] == '-n': random = False elif opt[0] == '-p': nproc = int(opt[1]) elif opt[0] == '-d': kwargs['scatden'] = float(opt[1]) elif opt[0] == '-s': kwargs['scatsd'] = float(opt[1]) elif opt[0] == '-c': chunk = int(opt[1]) elif opt[0] == '-g': kstr = opt[1].split(',') kwargs['smoothp'] = [int(kstr[0]), float(kstr[1])] else: usage(progname) return 128 # The segmentation file and the parameter file must be specified if len(args) < 5: usage(progname) return 128 # Read the tissue parameters pmat = np.loadtxt(args[1]) # Split the parameters into sound speed, attenuation and density params = [p.tolist() for p in [pmat[:, :2], pmat[:, 2:4], pmat[:, 4:6]]] # Eliminate the standard deviation if random scatterers are not desired if not random: params = [[[p[0], None] for p in pv] for pv in params] # Grab the shape of the segmentation file and the number of slices segfile = mio.Slicer(args[0]) # The output files need to be created and truncated outputs = args[2:] outfiles = [ mio.Slicer(o, segfile.shape, segfile.dtype, True) for o in outputs ] try: with process.ProcessPool() as pool: for n in range(nproc): args = (args[0], outputs, params, n, nproc, chunk) pool.addtask(target=mapblks, args=args, kwargs=kwargs) pool.start() pool.wait() except: for f in outfiles: f._backer.truncate(0) raise return 0
def finddelays(nproc=1, *args, **kwargs): ''' Distribute, among nproc processes, delay analysis for waveforms using calcdelays(). All *args and **kwargs, are passed to calcdelays on each participating process. This function explicitly sets the "queue", "rank", "grpsize", and "delaycache" arguments of calcdelays, so *args and **kwargs should not contain these values. The delaycache argument is built from an optional file specified in cachefile, which should be a map from transmit-receive pair (t, r) to a precomputed delay, loadable with habis.formats.loadkeymat. ''' forbidden = {'queue', 'rank', 'grpsize', 'delaycache'} forbidden.intersection_update(kwargs) if forbidden: raise TypeError("Forbidden argument '{next(iter(forbidden))}'") cachefile = kwargs.pop('cachefile', None) # Try to read an existing delay map try: kwargs['delaycache'] = loadkeymat(cachefile) except (KeyError, ValueError, IOError): pass # Create a result queue and a dictionary to accumulate results queue = multiprocessing.Queue(nproc) delays = {} # Extend the kwargs to include the result queue kwargs['queue'] = queue # Extend the kwargs to include the group size kwargs['grpsize'] = nproc # Keep track of waveform statistics stats = defaultdict(int) # Spawn the desired processes to perform the cross-correlation with process.ProcessPool() as pool: for i in range(nproc): # Pick a useful process name procname = process.procname(i) # Add the group rank to the kwargs kwargs['rank'] = i # Extend kwargs to contain the queue (copies kwargs) pool.addtask(target=calcdelays, name=procname, args=args, kwargs=kwargs) pool.start() # Wait for all processes to respond responses, deadpool = 0, False while responses < nproc: try: results = queue.get(timeout=0.1) except pyqueue.Empty: # Loosely join to watch for a dead pool pool.wait(timeout=0.1, limit=1) if not pool.unjoined: # Note a dead pool, give read one more try if deadpool: break else: deadpool = True else: delays.update(results[0]) for k, v in results[1].items(): if v: stats[k] += v responses += 1 if responses != nproc: print(f'WARNING: Proceeding with {responses} of {nproc} ' 'subprocess results. A subprocess may have died.') pool.wait() if stats: print(f'For file {os.path.basename(args[0])} ' f'({len(delays)} identfied times):') for k, v in sorted(stats.items()): if v: wfn = 'waveforms' if v > 1 else 'waveform' print(f' {v} {k} {wfn}') if len(delays) and cachefile: # Save the computed delays, if desired try: savez_keymat(cachefile, delays) except (ValueError, IOError): pass return delays