Esempio n. 1
0
    def read_c_ell(self, filename, comm=None):
        '''
        Read in c_ell file and populate c_ell attribute.

        Parameters
        ----------
        filename : str
            Absolute path to spectra file.
        comm : MPI communicator, optional
            If provided, broadcast after load.
        '''

        if comm is None:
            comm = utils.FakeMPIComm()

        if comm.Get_rank() == 0:
            self.c_ell = {}

            with h5py.File(filename + '.hdf5', 'r') as f:
                ells = f['lensed_scalar/ells'][()]
                c_ell = f['lensed_scalar/c_ell'][()]
                self.c_ell['lensed_scalar'] = {}
                self.c_ell['lensed_scalar']['ells'] = ells
                self.c_ell['lensed_scalar']['c_ell'] = c_ell

                ells = f['unlensed_scalar/ells'][()]
                c_ell = f['unlensed_scalar/c_ell'][()]
                self.c_ell['unlensed_scalar'] = {}
                self.c_ell['unlensed_scalar']['ells'] = ells
                self.c_ell['unlensed_scalar']['c_ell'] = c_ell
        else:
            self.c_ell = None

        self.c_ell = utils.bcast(self.c_ell, comm)
Esempio n. 2
0
    def compute_estimate_batch(self,
                               alm_loader,
                               alm_files,
                               comm=None,
                               verbose=False,
                               **kwargs):
        '''
        Compute fNL estimates for a collection of maps in parallel using MPI.

        Arguments
        ---------
        alm_loader : callable
            Function that returns alms on rank given filename as first argument.
        alm_files : array_like
            List of alm files to load.
        comm : MPI communicator, optional
        verbose : bool, optional
            Print process.
        kwargs : dict, optional
            Optional keyword arguments passed to "compute_estimate".        

        Returns
        -------
        estimates : (nalm_files) array, None
            Estimates for each input file in same order as "alm_files".            
        '''

        if comm is None:
            comm = utils.FakeMPIComm()

        estimates = np.zeros(len(alm_files))

        # Split alm_file loop over ranks.
        for aidx in range(comm.Get_rank(), len(alm_files), comm.Get_size()):

            alm_file = alm_files[aidx]
            if verbose:
                print('rank {:3}: loading {}'.format(comm.Get_rank(),
                                                     alm_file))
            alm = alm_loader(alm_file)

            estimate = self.compute_estimate(alm, **kwargs)
            if verbose:
                print('rank {:3}: estimate : {}'.format(
                    comm.Get_rank(), estimate))

            estimates[aidx] = estimate

        return utils.allreduce_array(estimates, comm)
Esempio n. 3
0
    def add_reduced_bispectrum_from_file(self, filename, comm=None):
        '''
        Load reduced bispectrum and add to internal list
        of reduced bispectra.

        Parameters
        ----------
        filename : str
            Absolute path to file.
        comm : MPI communicator, optional
            If provided, broadcast after load.
        '''

        if comm is None:
            comm = utils.FakeMPIComm()

        if comm.Get_rank() == 0:
            rb = ReducedBispectrum.init_from_file(filename)
        else:
            rb = None

        rb = utils.bcast(rb, comm)
        self.red_bispectra.append(rb)
Esempio n. 4
0
    def compute_fisher_isotropic(self,
                                 icov_ell,
                                 return_matrix=False,
                                 fsky=1,
                                 comm=None):
        '''
        Return Fisher information assuming that inverse noise + signal
        covariance is diagonal in harmonic space.
        
        Arguments
        ---------
        icov_ell : (npol, npol, nell) or (npol, nell) array
            Inverse covariance matrix diagonal in ell. Unlike "icov" this 
            should be: 1 / (S_ell + (b^{-1} N b^{-1})_ell), so no beam in 
            the numerator.
        return_matrix : bool, optonal
            If set, also return nfact x nfact Fisher matrix.       
        fsky : int or (npol,) array.
            Fraction of sky observed, allowed to vary between polarizations.
        comm : MPI communicator, optional        

        Returns
        -------
        fisher : float, None
            Fisher information.
        fisher_nxn : (nfact, nfact) array, None
            nfact x nfact Fisher matrix (only if return_matrix is set).
        '''

        if comm is None:
            comm = utils.FakeMPIComm()

        red_bisp = self.red_bispectra[0]
        f_i_ell, rule, weights = self._init_reduced_bispectrum(red_bisp)
        f_ell_i = np.ascontiguousarray(np.transpose(f_i_ell, (2, 1, 0)))
        del f_i_ell
        f_ell_i *= np.atleast_1d(fsky**(1 / 6))[np.newaxis, :, np.newaxis]

        sqrt_icov_ell = mat_utils.matpow(icov_ell, 0.5)
        sqrt_icov_ell = np.ascontiguousarray(np.transpose(
            sqrt_icov_ell, (2, 0, 1)),
                                             dtype=self.dtype)

        nrule = rule.shape[0]
        fisher_nxn = np.zeros((nrule, nrule), dtype=self.dtype)

        thetas_per_rank = np.array_split(self.thetas,
                                         comm.Get_size())[comm.Get_rank()]
        ct_weights_per_rank = np.array_split(self.theta_weights,
                                             comm.Get_size())[comm.Get_rank()]

        fisher_core.fisher_nxn(sqrt_icov_ell, f_ell_i, thetas_per_rank,
                               ct_weights_per_rank, rule, weights, fisher_nxn)

        fisher_nxn = utils.allreduce_array(fisher_nxn, comm)
        fisher_nxn = np.triu(fisher_nxn, 1).T + np.triu(fisher_nxn)
        fisher = np.sum(fisher_nxn)

        if return_matrix:
            return fisher, fisher_nxn

        return fisher
Esempio n. 5
0
    def step_batch(self,
                   alm_loader,
                   alm_files,
                   comm=None,
                   verbose=False,
                   **kwargs):
        '''
        Add iterations to <grad T (C^-1 a) C^-1 grad T(C^-1 a)^*> and 
        <grad T (C^-1 a)> Monte Carlo estimates by loading and processing several 
        alms in parallel using MPI.

        Arguments
        ---------
        alm_loader : callable
            Function that returns alms on rank given filename as first argument.
        alm_files : array_like
            List of alm files to load.
        comm : MPI communicator, optional
        verbose : bool, optional
            Print process.
        kwargs : dict, optional
            Optional keyword arguments passed to "_step".        
        '''

        if comm is None:
            comm = utils.FakeMPIComm()

        # Monte carlo quantities local to rank.
        mc_idx_loc = 0
        mc_gt_sq_loc = None
        mc_gt_loc = None

        # Split alm_file loop over ranks
        for alm_file in alm_files[comm.Get_rank():len(alm_files):comm.Get_size(
        )]:

            if verbose:
                print('rank {:3}: loading {}'.format(comm.Get_rank(),
                                                     alm_file))
            alm = alm_loader(alm_file)
            if verbose:
                print('rank {:3}: done loading'.format(comm.Get_rank()))
            grad_t = self._step(alm, **kwargs)

            if mc_gt_loc is None:
                mc_gt_loc = grad_t
            else:
                mc_gt_loc += grad_t

            mc_gt_sq = utils.contract_almxblm(
                grad_t, self.icov(self.beam(np.conj(grad_t))))

            if mc_gt_sq_loc is None:
                mc_gt_sq_loc = mc_gt_sq
            else:
                mc_gt_sq_loc += mc_gt_sq

            mc_idx_loc += 1

        # To allow allreduce when number of ranks > alm files.
        shape, dtype = utils.bcast_array_meta(mc_gt_loc, comm, root=0)
        if mc_gt_loc is None: mc_gt_loc = np.zeros(shape, dtype=dtype)
        if mc_gt_sq_loc is None: mc_gt_sq_loc = 0.
        if mc_idx_loc is None: mc_idx_loc = 0

        mc_gt = utils.allreduce_array(mc_gt_loc, comm)
        mc_gt_sq = utils.allreduce(mc_gt_sq_loc, comm)
        mc_idx = utils.allreduce(mc_idx_loc, comm)

        # All ranks get to update the internal mc variables themselves.
        if self.mc_gt is None:
            self.mc_gt = mc_gt
        else:
            self.__mc_gt += mc_gt

        if self.mc_gt_sq is None:
            self.mc_gt_sq = mc_gt_sq
        else:
            self.__mc_gt_sq += mc_gt_sq

        self.mc_idx += mc_idx
Esempio n. 6
0
    def test_utils_reduce_fake(self):

        obj = 2
        obj_out = utils.reduce(obj, comm=utils.FakeMPIComm())

        self.assertEqual(obj, obj_out)
Esempio n. 7
0
    def test_utils_reduce_array_fake(self):

        arr = np.random.randn(100).reshape((5, 20)).astype(complex)
        arr_out = utils.reduce_array(arr, comm=utils.FakeMPIComm())

        np.testing.assert_array_equal(arr, arr_out)
Esempio n. 8
0
    def test_utils_fakempicomm(self):

        comm = utils.FakeMPIComm()
        self.assertEqual(comm.Get_rank(), 0)
        self.assertEqual(comm.Get_size(), 1)