Beispiel #1
0
    def __init__(self,
                 cmb_type='LensedUnabberatedCMB',
                 dobeam=True,
                 add_foregrounds=True,
                 apply_window=True,
                 max_cached=1,
                 model="act_mr3",
                 apply_rotation=False,
                 alpha_map=None,
                 eulers=None,
                 camb_unlensed_file=None,
                 camb_lensed_file=None,
                 lmax=8000):
        """
        model: The name of an implemented soapack datamodel
        eulers            : rotate alm by euler angles (psi, theta, phi) (i.e (0,15,0) ->  maps are rotated by 15 deg in theta) 
        """
        super(GaussGen, self).__init__(cmb_type=cmb_type, dobeam=dobeam, add_foregrounds=add_foregrounds, apply_window=apply_window, max_cached=max_cached, model=model,\
                apply_rotation=apply_rotation, alpha_map=alpha_map)
        self.cmb_types = ['UnlensedCMB', 'LensedUnabberatedCMB']
        self.signal_path = None
        self.lmax = lmax
        assert (cmb_type in self.cmb_types)

        if not (camb_unlensed_file):
            self.camb_unlensed_file = os.path.join(
                actsim_root, '../inputParams/cosmo2017_10K_acc3_scalCls.dat')
        if not (camb_lensed_file):
            self.camb_lensed_file = os.path.join(
                actsim_root, '../inputParams/cosmo2017_10K_acc3_lensedCls.dat')

        self.ps_unlen = powspec.read_camb_scalar(self.camb_unlensed_file)[0]
        self.ps_len = powspec.read_spectrum(self.camb_lensed_file)
Beispiel #2
0
def get_lens_result(res=1.,lmax=400,dtype=np.float64,seed=1):
    shape,wcs  = enmap.fullsky_geometry(res=np.deg2rad(res))
    shape = (3,) + shape
    # ells = np.arange(lmax)
    ps_cmb,ps_lens = powspec.read_camb_scalar(DATA_PREFIX+"test_scalCls.dat")
    ps_lensinput = np.zeros((4,4,ps_cmb.shape[-1]))
    ps_lensinput[0,0] = ps_lens
    ps_lensinput[1:,1:] = ps_cmb
    lensed = lensing.rand_map(shape, wcs, ps_lensinput, lmax=lmax, maplmax=None, dtype=dtype, seed=seed, phi_seed=None, oversample=2.0, spin=[0,2], output="lu", geodesic=True, verbose=False, delta_theta=None)
    return lensed
Beispiel #3
0
    def __init__(self, cuda, ngpu, nbatch, norm_info_file, pixgan_state_file, tuner_state_file,
                 clkk_spec_file, cmb_spec_file, transfer_1dspec_file, transfer_2dspec_file, ir_spectra_index_file, radio_spectra_index_file, taper_width, nprocess=1, xgrid_file=None,
                 weight_file=None, output_dir=None):
        ## fixed full sky geometry
        self.shape = (21600, 43200)
        _, self.wcs = enmap.fullsky_geometry(res=0.5 * utils.arcmin)
        self.template = enmap.zeros(self.shape, self.wcs)
        self.stamp_shape = (5, 128, 128)
        self.nbatch = nbatch
        self.taper_width = taper_width
        self.fg_compts = ["kappa", "ksz", "tsz", "ir_pts", "rad_pts"]
        self.freqs =  [30, 90, 148, 219, 277, 350]

        Ny, Nx = self.shape
        ny, nx = self.stamp_shape[-2:]
        num_ybatch = int(np.ceil((Ny - self.taper_width) / (ny - self.taper_width)))
        num_xbatch = int(np.ceil((Nx - self.taper_width) / (nx - self.taper_width)))
        self.num_batch = (num_ybatch, num_xbatch)

        Ny_pad, Nx_pad = num_ybatch * ny, num_xbatch * nx
        self.shape_padded = (Ny_pad, Nx_pad)

        self.lmax = 10000
        self.output_dir = output_dir
        if self.output_dir is None:
            self.output_dir = os.path.join(os.getcwd(), "output")

        self.nprocess = nprocess
        self.cuda = cuda
        self.ngpu = 0 if not self.cuda else ngpu
        if torch.cuda.is_available() and not cuda:
            print("[WARNING] You have a CUDA device. You probably want to run with CUDA enabled")
        self.Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
        self.device = torch.device("cuda" if cuda else "cpu")

        self.norm_info_file = norm_info_file
        self.normalizer = transforms.SehgalDataNormalizerScaledLogZShrink(self.norm_info_file, channel_idxes=["kappa"])
        self.unnormalizer = transforms.SehgalDataUnnormalizerScaledLogZShrink(self.norm_info_file)

        ## network specific infos
        STanh = cnn.ScaledTanh(15., 2. / 15.)
        nconv_fc = 64
        kernal_size = 4
        stride = 2
        padding = 1
        output_padding = 0
        dropout_rate = 0

        ## prepare input specs
        self.clkk_spec = np.load(clkk_spec_file)
        self.cmb_spec = powspec.read_camb_scalar(cmb_spec_file)

        ## transfer
        self.transf_1dspec = np.load(transfer_1dspec_file)
        self.transf_2dspec = load_data(transfer_2dspec_file)

        LF = cnn.LinearFeature(4, 4)
        nconv_layer_gen = 4
        nthresh_layer_gen = 3
        self.pixgan_generator = model.UNET_Generator(self.stamp_shape, nconv_layer=nconv_layer_gen, nconv_fc=nconv_fc,
                                                     ngpu=ngpu,
                                                     kernal_size=kernal_size, stride=stride, padding=padding,
                                                     output_padding=output_padding, normalize=True,
                                                     activation=[LF, STanh], nin_channel=1, nout_channel=4,
                                                     nthresh_layer=nthresh_layer_gen, dropout_rate=dropout_rate).to(
            device=self.device)
        print(f"Loading {pixgan_state_file}")
        self.pixgan_generator.load_state_dict(torch.load(pixgan_state_file, map_location=self.device))

        ## tuner layer
        LF = cnn.LinearFeature(5, 5, bias=True)
        nconv_layer_gen = 5
        nthresh_layer_gen = 0
        self.forse_generator = model.VAEGAN_Generator(self.stamp_shape, nconv_layer=nconv_layer_gen, nconv_fc=nconv_fc,
                                                      ngpu=ngpu,
                                                      kernal_size=kernal_size, stride=stride, padding=padding,
                                                      output_padding=output_padding, normalize=True,
                                                      activation=[LF, STanh], nin_channel=5, nout_channel=5,
                                                      nthresh_layer=nthresh_layer_gen, dropout_rate=dropout_rate).to(
            device=self.device)
        print(f"Loading {tuner_state_file}")
        self.forse_generator.load_state_dict(torch.load(tuner_state_file, map_location=self.device))

        self.pixgan_generator.eval()
        self.forse_generator.eval()

        ## load the xgrid later
        self.xgrid_file = xgrid_file
        self.xgrid = None
        self.xscales = None
        ## load weight later
        self.weight_file = weight_file
        self.weight = None

        self.taper = None
        self.jysr2thermo = None

        self.spectral_indxes = {}
        #self.spectral_indxes["rad_pts"] = {"mean":-0.81, "std":0.11}
        #self.spectral_indxes["ir_pts"] = {"mean":3.02, "std":0.17}
        self.spectral_indxes["rad_pts"] = np.load(radio_spectra_index_file)
        self.spectral_indxes["ir_pts"] = np.load(ir_spectra_index_file)