def calc_gradPlikPdet(self, iter, key): """ Caches the det term for iter via MC sims, together with the data one, for maximal //isation. """ assert key.lower() in ['p', 'o'], key # potential or curl potential. fname_likterm = self.lib_dir + '/qlm_grad%slik_it%03d.npy' % ( key.upper(), iter - 1) fname_detterm = self.lib_dir + '/qlm_grad%sdet_it%03d.npy' % ( key.upper(), iter - 1) assert iter > 0, iter if os.path.exists(fname_likterm) and os.path.exists(fname_detterm): return 0 assert self.is_previous_iter_done(iter, key) # Identical MF here self.cache_qlm( fname_detterm, self.load_qlm( self.get_MFresp(key.lower()) * self.get_Plm(iter - 1, key.lower()))) self.cov.set_ffi(self.load_f(iter - 1, key), self.load_finv(iter - 1, key)) mchain = fs.qcinv.multigrid.multigrid_chain( self.opfilt, self.type, self.chain_descr, self.cov, no_deglensing=self.nodeglensing) # FIXME : The solution input is not working properly sometimes. We give it up for now. # FIXME don't manage to find the right d0 to input for a given sol ?!! soltn = self.load_soltn(iter, key).copy() * self.soltn_cond self.opfilt._type = self.type mchain.solve(soltn, self.get_datmaps(), finiop='MLIK') self.cache_TEBmap(soltn, iter - 1, key) # soltn = self.opfilt.MLIK2BINV(soltn,self.cov,self.get_datmaps()) # grad = - ql.get_qlms(self.type, self.cov.lib_skyalm, soltn, self.cov.cls, self.lib_qlm, # use_Pool=self.use_Pool, f=self.cov.f)[{'p': 0, 'o': 1}[key.lower()]] TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov) ResTQUMlik = self.Mlik2ResTQUMlik(TQUMlik, iter, key) grad = -ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm, use_Pool=self.use_Pool, f=self.load_f(iter - 1, key))[{ 'p': 0, 'o': 1 }[key.lower()]] self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK) # It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space. return 0
def calc_gradPlikPdet(self, iter, key, callback='default_callback'): """ Caches the det term for iter via MC sims, together with the data one, for maximal //isation. """ assert key.lower() in ['p', 'o'], key # potential or curl potential. fname_detterm = self.lib_dir + '/qlm_grad%sdet_it%03d.npy' % ( key.upper(), iter - 1) fname_likterm = self.lib_dir + '/qlm_grad%slik_it%03d.npy' % ( key.upper(), iter - 1) if os.path.exists(fname_detterm) and os.path.exists(fname_likterm): return 0 assert self.is_previous_iter_done(iter, key) pix_pha, cmb_pha = self.build_pha(iter) if self.PBSRANK == 0 and not os.path.exists(self.lib_dir + '/mf_it%03d' % (iter - 1)): os.makedirs(self.lib_dir + '/mf_it%03d' % (iter - 1)) self.barrier() # Caching gradients for the mc_sims_mf sims , plus the dat map. # The gradient of the det term is the data averaged lik term, with the opposite sign. jobs = [] try: self.load_qlm(fname_likterm) except: jobs.append(-1) # data map for idx in range(self.nsims): # sims if not os.path.exists(self.lib_dir + '/mf_it%03d/g%s_%04d.npy' % (iter - 1, key.lower(), idx)): jobs.append(idx) else: try: # just checking if file is OK. self.load_qlm(self.lib_dir + '/mf_it%03d/g%s_%04d.npy' % (iter - 1, key.lower(), idx)) except: jobs.append(idx) self.opfilt._type = self.type # By setting the chain outside the main loop we avoid potential MPI barriers # in degrading the lib_alm libraries: mchain = fs.qcinv.multigrid.multigrid_chain( self.opfilt, self.type, self.chain_descr, self.cov, no_deglensing=self.nodeglensing) for i in range(self.PBSRANK, len(jobs), self.PBSSIZE): idx = jobs[i] print "rank %s, doing mc det. gradients idx %s, job %s in %s at iter level %s:" \ % (self.PBSRANK, idx, i, len(jobs), iter) ti = time.time() if idx >= 0: # sim grad_fname = self.lib_dir + '/mf_it%03d/g%s_%04d.npy' % ( iter - 1, key.lower(), idx) self.cov.set_ffi(self.load_f(iter - 1, key), self.load_finv(iter - 1, key)) MFest = ql.MFestimator(self.cov, self.opfilt, mchain, self.lib_qlm, pix_pha=pix_pha, cmb_pha=cmb_pha, use_Pool=self.use_Pool) grad = MFest.get_MFqlms(self.type, self.MFkey, idx)[{ 'p': 0, 'o': 1 }[key.lower()]] if self.subtract_phi0: isofilt = self.cov.turn2isofilt() chain_descr_iso = fs.qcinv.chain_samples.get_isomgchain( self.cov.lib_skyalm.ellmax, self.cov.lib_datalm.shape, iter_max=self.maxiter) mchain_iso = fs.qcinv.multigrid.multigrid_chain( self.opfilt, self.type, chain_descr_iso, isofilt, no_deglensing=self.nodeglensing) MFest = ql.MFestimator(isofilt, self.opfilt, mchain_iso, self.lib_qlm, pix_pha=pix_pha, cmb_pha=cmb_pha, use_Pool=self.use_Pool) grad -= MFest.get_MFqlms(self.type, self.MFkey, idx)[{ 'p': 0, 'o': 1 }[key.lower()]] self.cache_qlm(grad_fname, grad, pbs_rank=self.PBSRANK) else: # This is the data. # FIXME : The solution input is not working properly sometimes. We give it up for now. # FIXME don't manage to find the right d0 to input for a given sol ?!! self.cov.set_ffi(self.load_f(iter - 1, key), self.load_finv(iter - 1, key)) soltn = self.load_soltn(iter, key).copy() * self.soltn_cond mchain.solve(soltn, self.get_datmaps(), finiop='MLIK') self.cache_TEBmap(soltn, iter - 1, key) TQUMlik = self.opfilt.soltn2TQUMlik(soltn, self.cov) ResTQUMlik = self.Mlik2ResTQUMlik(TQUMlik, iter, key) grad = -ql.get_qlms_wl(self.type, self.cov.lib_skyalm, TQUMlik, ResTQUMlik, self.lib_qlm, use_Pool=self.use_Pool, f=self.load_f(iter - 1, key))[{ 'p': 0, 'o': 1 }[key.lower()]] self.cache_qlm(fname_likterm, grad, pbs_rank=self.PBSRANK) print "%s it. %s sim %s, rank %s cg status " % (key.lower(), iter, idx, self.PBSRANK) # It does not help to cache both grad_O and grad_P as they do not follow the trajectory in plm space. # Saves some info about current iteration : if idx == -1: # Saves some info about iteration times etc. with open(self.lib_dir + '/cghistories/history_dat.txt', 'a') as file: file.write('%04d %.3f \n' % (iter, time.time() - ti)) file.close() else: with open( self.lib_dir + '/cghistories/history_sim%04d.txt' % idx, 'a') as file: file.write('%04d %.3f \n' % (iter, time.time() - ti)) file.close() self.barrier() if self.PBSRANK == 0: # Collecting terms and caching det term. # We also cache arrays formed from independent sims for tests. print "rank 0, collecting mc det. %s gradients :" % key.lower() det_term = np.zeros(self.lib_qlm.alm_size, dtype=complex) for i in range(self.nsims): fname = self.lib_dir + '/mf_it%03d/g%s_%04d.npy' % ( iter - 1, key.lower(), i) det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.) self.cache_qlm(fname_detterm, det_term, pbs_rank=0) det_term *= 0. fname_detterm1 = fname_detterm.replace('.npy', 'MF1.npy') assert 'MF1' in fname_detterm1 for i in np.arange(self.nsims)[0::2]: fname = self.lib_dir + '/mf_it%03d/g%s_%04d.npy' % ( iter - 1, key.lower(), i) det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.) self.cache_qlm(fname_detterm1, det_term, pbs_rank=0) det_term *= 0. fname_detterm2 = fname_detterm.replace('.npy', 'MF2.npy') assert 'MF2' in fname_detterm2 for i in np.arange(self.nsims)[1::2]: fname = self.lib_dir + '/mf_it%03d/g%s_%04d.npy' % ( iter - 1, key.lower(), i) det_term = (det_term * i + self.load_qlm(fname)) / (i + 1.) self.cache_qlm(fname_detterm2, det_term, pbs_rank=0) # Erase some temp files if requested to do so : if self.tidy > 1: # We erase as well the gradient determinant term that were stored on disk : files_to_remove = \ [self.lib_dir + '/mf_it%03d/g%s_%04d.npy' % (iter - 1, key.lower(), i) for i in range(self.nsims)] print 'rank %s removing %s maps in ' % ( self.PBSRANK, len(files_to_remove) ), self.lib_dir + '/mf_it%03d/' % (iter - 1) for file in files_to_remove: os.remove(file) self.barrier()