def realSpaceCorrFromRandoms(paramsToUpdate, valsToUpdate): """ @brief compute the real space correlation between a true data map and random map, for the nMaps pairs of maps @param paramsToUpdate: the parameters to change for each pair of maps that we are correlating (dict) @param valsToUpdate: the numbers to insert into the paramsToUpdate values (list) """ # establish communication queues for different processes tasks = mp.JoinableQueue() results = mp.Queue() # store the system time sys_time = time.time() # start a worker for each cpu available num_workers = mp.cpu_count() print 'Creating %d workers' % num_workers workers = [ xcorrParallel.worker(tasks, results) for i in xrange(num_workers) ] for w in workers: w.start() num_jobs = len(valsToUpdate) # the number of correlations to do # read the base parameter file params = flipperDict.flipperDict() try: params.readFromFile("RealSpaceCorr.dict") except: raise # enqueue the tasks, which is equal to number of maps we are correlating for i in xrange(num_jobs): tasks.put(xcorrParallel.realCorrTask(params, valsToUpdate[i], paramsToUpdate)) # Add a poison pill for each worker for i in xrange(num_workers): tasks.put(None) # wait for all of the tasks to finish tasks.join() # summarize the results dataDir = './output/' xcorrUtils.summarizeRealSpaceResults(dataDir, baseTime=sys_time) return 0
import healpy import flipper.flipperDict as flipperDict import matplotlib.pyplot as plt from pixell import curvedsky, enmap, utils, lensing, sharp import numpy as np import get_cmb_powerspectra p = flipperDict.flipperDict() p.read_from_file('lenswebsky.dict') do_all = False if do_all: kappa_map_hp = healpy.read_map(p['websky_dir'] + p['kappa_map_name']) kappa_alms = healpy.sphtfunc.map2alm(kappa_map_hp) isw_map_hp = healpy.read_map(p['websky_dir'] + p['isw_map_name']) isw_alms = healpy.sphtfunc.map2alm(isw_map_hp) cl_cross = healpy.alm2cl(isw_alms, kappa_alms) cl_kappa = healpy.alm2cl(kappa_alms) cl_isw = healpy.alm2cl(isw_alms) if False: cmb_powers = get_cmb_powerspectra.websky_cmb_spectra(return_lensing=True) cmb_powers_scal = cmb_powers['unlensed_scalar'] ells = np.arange(len(cl_cross))
def fourierSpaceCorrFromRandoms(paramsToUpdate, valsToUpdate, nProcs, corrType="cross"): """ @brief compute the real space correlation between a true data map and random map, for the nMaps pairs of maps @param paramsToUpdate: the parameters to change for each pair of maps that we are correlating (dict) @param valsToUpdate: the numbers to insert into the paramsToUpdate values (list) @param nProcs: the number of processes to use (int) @keyword corrType: type of spectrum to compute ('cross' or 'auto') """ num_jobs = len(valsToUpdate) # the number of correlations to do # read the base parameter file params = flipperDict.flipperDict() try: params.readFromFile("global.dict") except: raise originalStdOut = sys.stdout # might not have this package try: bar = initializeProgressBar(num_jobs) except: pass def worker(job_queue, results_queue): """ @brief worker function for multiprocessing """ # pull tasks until there are none left while True: # dequeue the next job next_task = job_queue.get() # task=None means this worker is finished if next_task is None: # make sure we tell the queue we finished the task job_queue.task_done() break else: # tasks are tuples of params oldParams, num = next_task # try to update the progress bar try: bar.update(num + 1) except: pass initialPath = os.getcwd() # do the work # make new directory and cd there if not os.path.exists("tmp_%d" % num): os.makedirs("tmp_%d" % num) os.chdir("tmp_%d" % num) # update the parameters newParams = xcorrUtils.updateParams(paramsToUpdate, oldParams, valsToUpdate[num]) # hide output from the speck(Cross) output sys.stdout = open(os.devnull, "w") # compute the fourier space correlation, given the parameters if corrType == "cross": err = runSpeckCross(newParams) if corrType == "auto": err = runSpeck(newParams) # go back to old directory and delete temporary directory os.chdir(initialPath) if os.path.exists("tmp_%d" % num): os.system("rm -rf ./tmp_%d" % num) # restore stdout to original value sys.stdout = originalStdOut # store the results results_queue.put((num)) # make sure we tell the queue we finished the task job_queue.task_done() return 0 # store the system time sys_time = time.time() # establish communication queues that contain all jobs job_numbers = mp.JoinableQueue() results = mp.Queue() # create a process for each cpu available or up to the limit specified by user if nProcs <= mp.cpu_count(): num_workers = nProcs else: num_workers = mp.cpu_count() print "Creating %d workers" % num_workers procs = [mp.Process(target=worker, args=(job_numbers, results)) for i in xrange(num_workers)] # start the processes for proc in procs: proc.start() # enqueue the positions for i in xrange(num_jobs): job_numbers.put((params, i)) # Add a poison pill (=None) for each worker for i in xrange(num_workers): job_numbers.put(None) # wait for all of the jobs to finish job_numbers.join() return 0