def __init__(self, accountID, top_config): self.accountID = accountID self.top_config = top_config self.db = db_handler.DBHandler() self.corr = correlation.Correlation(accountID) account = self.db.get_websim_account(accountID) self.ws = websim.WebSim(login = account[0], password = account[1]) self.ws.authorise()
def setUp(self): cosmo_multi = cosmology.MultiEpoch(0.0, 5.0, cosmo_dict=c_dict) lens_dist = kernel.dNdzMagLim(z_min=0.0, z_max=2.0, a=2, z0=0.3, b=2) source_dist = kernel.dNdzGaussian(z_min=0.0, z_max=2.0, z0=1.0, sigma_z=0.2) lens_window = kernel.WindowFunctionGalaxy( lens_dist, cosmo_multi_epoch=cosmo_multi) source_window = kernel.WindowFunctionConvergence( source_dist, cosmo_multi_epoch=cosmo_multi) kern = kernel.Kernel(0.001*0.001*deg_to_rad, 1.0*100.0*deg_to_rad, window_function_a=lens_window, window_function_b=source_window, cosmo_multi_epoch=cosmo_multi) zheng = hod.HODZheng(hod_dict) cosmo_single = cosmology.SingleEpoch(0.0, cosmo_dict=c_dict) h = halo.Halo(input_hod=zheng, cosmo_single_epoch=cosmo_single) self.corr = correlation.Correlation(0.001, 1.0, input_kernel=kern, input_halo=h, power_spec='power_mm') self.theta_array = numpy.logspace(-3, 0, 4)*deg_to_rad
import correlation import torch B, C, H, W = 1, 1, 32, 32 a = torch.randint(1, 4, (B, C, H, W), dtype=torch.float32).cuda() b = torch.randint_like(a, 1, 4).cuda() print(a.dtype) print(a.shape, b.shape) print(a.device, b.device) corr = correlation.Correlation(pad_size=4, kernel_size=1, max_displacement=4, stride1=1, stride2=1, corr_multiply=1).cuda() c = corr(a, b) print(c.shape)
lens_dist_low = kernel.dNdzInterpolation( p_z[:25, 0], p_z[:25, 1]) #dNdzGaussian(0.0, 5.0, 3.1, 0.05)#0.1)#0.5 lens_window_low = kernel.WindowFunctionGalaxy(lens_dist_low, cosmo_multi) con_kernel_low = kernel.Kernel(ktheta_min=0.001 * 0.001 * deg_to_rad, ktheta_max=100.0 * 1.0 * deg_to_rad, window_function_a=lens_window_low, window_function_b=lens_window_low, cosmo_multi_epoch=cosmo_multi) V = comoving_volume(3., 0.0356402381150449, 0.5) #0.01745,0.5) gal_dens, dens_err = galaxy_density( '/users/bhernandez/thesis/work/galdens_u.txt', V) corr_low = correlation.Correlation(theta_min_deg=0.01, theta_max_deg=2.0, input_kernel=con_kernel_low, input_halo=halo_model_low) #, #power_spec='power_gg') corr_low.compute_correlation() data = np.loadtxt( '/users/bhernandez/thesis/work/Wtheta_23.0t24.5_individual_weights_udropouts_density' ) #'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/udropouts/final/Wtheta_23t24_individual_weights')#'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/udropouts/final/Wtheta_udropouts_m23t24_with_proper_weight')#'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/magnitude_bins/udropouts_0.2/Wtheta_combined_24.2t24.4_udropouts') #'/users/bhernandez/thesis/work/Wtheta_23t24_udropouts')#'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/udropouts/final/Wtheta_pointings_weights_small_scales_m23t24_RR.txt')#Wtheta_combined_small_scales_noregions_m23t24') #data=np.log10(data) density = data[-1, 1] data = data[5:-7, :] Wtheta = data[:, 1] theta = data[:, 0] covariance_tmp = np.loadtxt( '/users/bhernandez/thesis/work/Wcovar_23.0t24.5_individual_weights_udropouts_density' ) #'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/udropouts/final/Wcovar_23t24_individual_weights')#/users/bhernandez/thesis/work/Wcovar_23t24.5_combined')#'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/udropouts/final/Wcovar_udropouts_m23t24_with_proper_weight')#'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/magnitude_bins/udropouts_0.2/Wcovar_combined_24.2t24.4_udropouts') #'/users/bhernandez/thesis/work/covariance_23t24_udropouts')#'/vol/fohlen11/fohlen11_1/bhernandez/data/corr/udropouts/final/Wcovar_pointings_weights_small_scales_m23t24_RR.txt')#Wtheta_combined_small_scales_noregions_m23t24')
### the limits to k_min*theta_min - k_max*theta_max where k_min and k_max are ### set in the code as 0.001 and 100.0 respectively. con_kernel = kernel.Kernel(ktheta_min=0.001 * 0.001 * deg_to_rad, ktheta_max=100.0 * 1.0 * deg_to_rad, window_function_a=lens_window, window_function_b=source_window, cosmo_multi_epoch=cosmo_multi) con_kernel.write('test_kernel.ascii') ### Finally we define and run our correlation function, writing the results out ### to test_corr.ascii. Correlation does the job of defining the k space ### integral for a given theta. It also takes responsibility for setting the ### halo model object redshift to that of the peak kernel redshift. It also ### convenient allows for the setting of both the kernel and halo model ### cosmologies through the set_cosmology method. Note that like the kernel ### module, cosmology takes an input as radians. corr = correlation.Correlation(theta_min_deg=0.001, theta_max_deg=1.0, input_kernel=con_kernel, input_halo=halo_model, power_spec='power_gm') corr.compute_correlation() corr.write('test_corr.ascii') ### and done, to make this a proper magnification correlation though, the user ### will have to multiply the output wtheta by 2. ### If you want to make this a script that could MCMCed create all of the ### objects as shown here and then in the MCMC loop call corr.set_cosmology ### and corr.set_hod (in this case) to change the cosmology/HOD and recompute.
def __init__(self, args, use_batch_norm=True, div_flow=20): r"""FlowNet2 C module. Check out the FlowNet2 paper for more details https://arxiv.org/abs/1612.01925 Args: args (obj): Network initialization arguments use_batch_norm (bool): Use batch norm or not. Default is true. div_flow (int): Flow devision factor. Default is 20. """ super(FlowNetC, self).__init__() self.use_batch_norm = use_batch_norm self.div_flow = div_flow self.conv1 = conv(self.use_batch_norm, 3, 64, kernel_size=7, stride=2) self.conv2 = conv(self.use_batch_norm, 64, 128, kernel_size=5, stride=2) self.conv3 = conv(self.use_batch_norm, 128, 256, kernel_size=5, stride=2) self.conv_redir = conv(self.use_batch_norm, 256, 32, kernel_size=1, stride=1) self.args = args # if args.fp16: # self.corr = nn.Sequential( # tofp32(), # correlation.Correlation(pad_size=20, kernel_size=1, # max_displacement=20, stride1=1, # stride2=2, corr_multiply=1), # tofp16()) # else: self.corr = correlation.Correlation(pad_size=20, kernel_size=1, max_displacement=20, stride1=1, stride2=2, corr_multiply=1) self.corr_activation = nn.LeakyReLU(0.1, inplace=True) self.conv3_1 = conv(self.use_batch_norm, 473, 256) self.conv4 = conv(self.use_batch_norm, 256, 512, stride=2) self.conv4_1 = conv(self.use_batch_norm, 512, 512) self.conv5 = conv(self.use_batch_norm, 512, 512, stride=2) self.conv5_1 = conv(self.use_batch_norm, 512, 512) self.conv6 = conv(self.use_batch_norm, 512, 1024, stride=2) self.conv6_1 = conv(self.use_batch_norm, 1024, 1024) self.deconv5 = deconv(1024, 512) self.deconv4 = deconv(1026, 256) self.deconv3 = deconv(770, 128) self.deconv2 = deconv(386, 64) self.predict_flow6 = predict_flow(1024) self.predict_flow5 = predict_flow(1026) self.predict_flow4 = predict_flow(770) self.predict_flow3 = predict_flow(386) self.predict_flow2 = predict_flow(194) self.upsampled_flow6_to_5 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) self.upsampled_flow5_to_4 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) self.upsampled_flow4_to_3 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) self.upsampled_flow3_to_2 = nn.ConvTranspose2d(2, 2, 4, 2, 1, bias=True) for m in self.modules(): if isinstance(m, nn.Conv2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) if isinstance(m, nn.ConvTranspose2d): if m.bias is not None: init.uniform_(m.bias) init.xavier_uniform_(m.weight) # init_deconv_bilinear(m.weight) self.upsample1 = nn.Upsample(scale_factor=4, mode='bilinear', align_corners=False)
import db_handler import websim import correlation c = correlation.Correlation(1) #c.request_several(['f838811ba41a421ea2a993fac447440b', 'e86d7948449449c9b9264fd45a872914', '2abd00ab916042df9addc0bbb92645e8', '23203453305048599977d15dbd1c44dd', 'd8ac4db69c7b43288ec1ccdab4c45542', '4ebc11aa196c4a128a55dd187db317a3', '95c55b460029421bb047231f3994d478', '3a47b524f5774c5eaab0afbc21bf79f9', '4d9f4c43a95841e383afc143e646b647', 'f22261104c1d42f58d7bd04bcdd667fa']) print c.correlation(49, 51, False)