Exemplo n.º 1
0
def test_alpha_method_fft():
    pixels = 64
    phys = PhysicalModel(pixels=pixels, method="fft")
    phys_analytic = AnalyticalPhysicalModel(pixels=pixels, image_fov=7)
    phys2 = PhysicalModel(pixels=pixels, method="conv2d")

    # test out noise
    kappa = tf.random.uniform(shape=[1, pixels, pixels, 1])
    alphax, alphay = phys.deflection_angle(kappa)
    alphax2, alphay2 = phys2.deflection_angle(kappa)

    # assert np.allclose(alphax, alphax2, atol=1e-4)
    # assert np.allclose(alphay, alphay2, atol=1e-4)

    # test out an analytical profile
    kappa = phys_analytic.kappa_field(2, 0.4, 0, 0.1, 0.5)
    alphax, alphay = phys.deflection_angle(kappa)

    alphax2, alphay2 = phys2.deflection_angle(kappa)
    #
    # assert np.allclose(alphax, alphax2, atol=1e-4)
    # assert np.allclose(alphay, alphay2, atol=1e-4)
    im1 = phys_analytic.lens_source_func_given_alpha(
        tf.concat([alphax, alphay], axis=-1))
    im2 = phys_analytic.lens_source_func_given_alpha(
        tf.concat([alphax2, alphay2], axis=-1))
    return alphax, alphax2, im1, im2
Exemplo n.º 2
0
def test_lagrange_multiplier_for_lens_intensity():
    phys = PhysicalModel(pixels=128)
    phys_a = AnalyticalPhysicalModel(pixels=128)
    kappa = phys_a.kappa_field(2.0, e=0.2)
    x = np.linspace(-1, 1, 128) * phys.src_fov / 2
    xx, yy = np.meshgrid(x, x)
    rho = xx**2 + yy**2
    source = tf.math.exp(-0.5 * rho / 0.5**2)[tf.newaxis, ..., tf.newaxis]
    source = tf.cast(source, tf.float32)

    y_true = phys.forward(source, kappa)
    y_pred = phys.forward(0.001 * source,
                          kappa)  # rescale it, say it has different units
    lam_lagrange = tf.reduce_sum(y_true * y_pred, axis=(
        1, 2, 3)) / tf.reduce_sum(y_pred**2, axis=(1, 2, 3))
    lam_tests = tf.squeeze(
        tf.cast(tf.linspace(lam_lagrange / 10, lam_lagrange * 10, 1000),
                tf.float32))[..., tf.newaxis, tf.newaxis, tf.newaxis]
    log_likelihood_best = 0.5 * tf.reduce_mean(
        (lam_lagrange * y_pred - y_true)**2 / phys.noise_rms**2,
        axis=(1, 2, 3))
    log_likilhood_test = 0.5 * tf.reduce_mean(
        (lam_tests * y_pred - y_true)**2 / phys.noise_rms**2, axis=(1, 2, 3))
    return log_likilhood_test, log_likelihood_best, tf.squeeze(
        lam_tests), lam_lagrange
Exemplo n.º 3
0
def test_analytical_lensing():
    phys = AnalyticalPhysicalModel()
    source = tf.random.normal([1, 256, 256, 1])
    params = [1., 0.1, 0., 0.1, -0.1, 0.01, 3.14]
    im = phys.lens_source(source, *params)

    im = phys.lens_source_func(e=0.6)

    kap = phys.kappa_field(e=0.2)
    return im.numpy()[0, ..., 0]
Exemplo n.º 4
0
def test_lens_source_conv2():
    pixels = 64
    src_pixels = 32
    phys = PhysicalModel(pixels=pixels,
                         src_pixels=src_pixels,
                         kappa_fov=16,
                         image_fov=16)
    phys_analytic = AnalyticalPhysicalModel(pixels=pixels, image_fov=16)
    source = tf.random.normal([1, src_pixels, src_pixels, 1])
    kappa = phys_analytic.kappa_field(7, 0.1, 0, 0, 0)
    lens = phys.lens_source(source, kappa)
    return lens
Exemplo n.º 5
0
def test_interpolated_kappa():
    import tensorflow_addons as tfa
    phys = PhysicalModel(pixels=128,
                         src_pixels=32,
                         image_fov=7.68,
                         kappa_fov=5)
    phys_a = AnalyticalPhysicalModel(pixels=128, image_fov=7.68)
    kappa = phys_a.kappa_field(r_ein=2., e=0.2)
    kappa += phys_a.kappa_field(r_ein=1., x0=2., y0=2.)
    true_lens = phys.lens_source_func(kappa, w=0.2)
    true_kappa = kappa

    # Test interpolation of alpha angles on a finer grid
    # phys = PhysicalModel(pixels=128, src_pixels=32, kappa_pixels=32)
    phys_a = AnalyticalPhysicalModel(pixels=32, image_fov=7.68)
    kappa = phys_a.kappa_field(r_ein=2., e=0.2)
    kappa += phys_a.kappa_field(r_ein=1., x0=2., y0=2.)

    # kappa2 = phys_a.kappa_field(r_ein=2., e=0.2)
    # kappa2 += phys_a.kappa_field(r_ein=1., x0=2., y0=2.)
    #
    # kappa = tf.concat([kappa, kappa2], axis=1)

    # Test interpolated kappa lens
    x = np.linspace(-1, 1, 128) * phys.kappa_fov / 2
    x, y = np.meshgrid(x, x)
    x = tf.constant(x[np.newaxis, ..., np.newaxis], tf.float32)
    y = tf.constant(y[np.newaxis, ..., np.newaxis], tf.float32)
    dx = phys.kappa_fov / (32 - 1)
    xmin = -0.5 * phys.kappa_fov
    ymin = -0.5 * phys.kappa_fov
    i_coord = (x - xmin) / dx
    j_coord = (y - ymin) / dx
    wrap = tf.concat([i_coord, j_coord], axis=-1)
    # test_kappa1 = tfa.image.resampler(kappa, wrap)  # bilinear interpolation of source on wrap grid
    # test_lens1 = phys.lens_source_func(test_kappa1, w=0.2)
    phys2 = PhysicalModel(pixels=128,
                          kappa_pixels=32,
                          method="fft",
                          image_fov=7.68,
                          kappa_fov=5)
    test_lens1 = phys2.lens_source_func(kappa, w=0.2)

    # Test interpolated alpha angles lens
    phys2 = PhysicalModel(pixels=32,
                          src_pixels=32,
                          image_fov=7.68,
                          kappa_fov=5)
    alpha1, alpha2 = phys2.deflection_angle(kappa)
    alpha = tf.concat([alpha1, alpha2], axis=-1)
    alpha = tfa.image.resampler(alpha, wrap)
    test_lens2 = phys.lens_source_func_given_alpha(alpha, w=0.2)

    return true_lens, test_lens1, test_lens2
Exemplo n.º 6
0
def test_lens_func_given_alpha():
    phys = PhysicalModel(pixels=128)
    phys_a = AnalyticalPhysicalModel(pixels=128)
    alpha = phys_a.analytical_deflection_angles(x0=0.5,
                                                y0=0.5,
                                                e=0.4,
                                                phi=0.,
                                                r_ein=1.)
    lens_true = phys_a.lens_source_func(x0=0.5,
                                        y0=0.5,
                                        e=0.4,
                                        phi=0.,
                                        r_ein=1.,
                                        xs=0.5,
                                        ys=0.5)
    lens_pred = phys_a.lens_source_func_given_alpha(alpha, xs=0.5, ys=0.5)
    lens_pred2 = phys.lens_source_func_given_alpha(alpha, xs=0.5, ys=0.5)
    fig = raytracer_residual_plot(alpha[0], alpha[0], lens_true[0],
                                  lens_pred2[0])
Exemplo n.º 7
0
def distributed_strategy(args):
    tf.random.set_seed(args.seed)
    np.random.seed(args.seed)

    model = os.path.join(os.getenv('CENSAI_PATH'), "models", args.model)
    files = glob.glob(
        os.path.join(os.getenv('CENSAI_PATH'), "data", args.train_dataset,
                     "*.tfrecords"))
    files = tf.data.Dataset.from_tensor_slices(files)
    train_dataset = files.interleave(lambda x: tf.data.TFRecordDataset(
        x, compression_type=args.compression_type).shuffle(len(files)),
                                     block_length=1,
                                     num_parallel_calls=tf.data.AUTOTUNE)
    # Read off global parameters from first example in dataset
    for physical_params in train_dataset.map(decode_physical_model_info):
        break
    train_dataset = train_dataset.map(decode_results).shuffle(
        buffer_size=args.buffer_size)

    files = glob.glob(
        os.path.join(os.getenv('CENSAI_PATH'), "data", args.val_dataset,
                     "*.tfrecords"))
    files = tf.data.Dataset.from_tensor_slices(files)
    val_dataset = files.interleave(lambda x: tf.data.TFRecordDataset(
        x, compression_type=args.compression_type).shuffle(len(files)),
                                   block_length=1,
                                   num_parallel_calls=tf.data.AUTOTUNE)
    val_dataset = val_dataset.map(decode_results).shuffle(
        buffer_size=args.buffer_size)

    files = glob.glob(
        os.path.join(os.getenv('CENSAI_PATH'), "data", args.test_dataset,
                     "*.tfrecords"))
    files = tf.data.Dataset.from_tensor_slices(files)
    test_dataset = files.interleave(lambda x: tf.data.TFRecordDataset(
        x, compression_type=args.compression_type).shuffle(len(files)),
                                    block_length=1,
                                    num_parallel_calls=tf.data.AUTOTUNE)
    test_dataset = test_dataset.map(decode_results).shuffle(
        buffer_size=args.buffer_size)

    ps_lens = PowerSpectrum(bins=args.lens_coherence_bins,
                            pixels=physical_params["pixels"].numpy())
    ps_source = PowerSpectrum(bins=args.source_coherence_bins,
                              pixels=physical_params["src pixels"].numpy())
    ps_kappa = PowerSpectrum(bins=args.kappa_coherence_bins,
                             pixels=physical_params["kappa pixels"].numpy())

    phys = PhysicalModel(
        pixels=physical_params["pixels"].numpy(),
        kappa_pixels=physical_params["kappa pixels"].numpy(),
        src_pixels=physical_params["src pixels"].numpy(),
        image_fov=physical_params["image fov"].numpy(),
        kappa_fov=physical_params["kappa fov"].numpy(),
        src_fov=physical_params["source fov"].numpy(),
        method="fft",
    )

    phys_sie = AnalyticalPhysicalModel(
        pixels=physical_params["pixels"].numpy(),
        image_fov=physical_params["image fov"].numpy(),
        src_fov=physical_params["source fov"].numpy())

    with open(os.path.join(model, "unet_hparams.json")) as f:
        unet_params = json.load(f)
    unet_params["kernel_l2_amp"] = args.l2_amp
    unet = Model(**unet_params)
    ckpt = tf.train.Checkpoint(net=unet)
    checkpoint_manager = tf.train.CheckpointManager(ckpt, model, 1)
    checkpoint_manager.checkpoint.restore(
        checkpoint_manager.latest_checkpoint).expect_partial()
    with open(os.path.join(model, "rim_hparams.json")) as f:
        rim_params = json.load(f)
    rim = RIM(phys, unet, **rim_params)

    dataset_names = [args.train_dataset, args.val_dataset, args.test_dataset]
    dataset_shapes = [args.train_size, args.val_size, args.test_size]
    model_name = os.path.split(model)[-1]

    # from censai.utils import nulltape
    # def call_with_mask(self, lensed_image, noise_rms, psf, mask, outer_tape=nulltape):
    #     """
    #     Used in training. Return linked kappa and source maps.
    #     """
    #     batch_size = lensed_image.shape[0]
    #     source, kappa, source_grad, kappa_grad, states = self.initial_states(batch_size)  # initiate all tensors to 0
    #     source, kappa, states = self.time_step(lensed_image, source, kappa, source_grad, kappa_grad,
    #                                            states)  # Use lens to make an initial guess with Unet
    #     source_series = tf.TensorArray(DTYPE, size=self.steps)
    #     kappa_series = tf.TensorArray(DTYPE, size=self.steps)
    #     chi_squared_series = tf.TensorArray(DTYPE, size=self.steps)
    #     # record initial guess
    #     source_series = source_series.write(index=0, value=source)
    #     kappa_series = kappa_series.write(index=0, value=kappa)
    #     # Main optimization loop
    #     for current_step in tf.range(self.steps - 1):
    #         with outer_tape.stop_recording():
    #             with tf.GradientTape() as g:
    #                 g.watch(source)
    #                 g.watch(kappa)
    #                 y_pred = self.physical_model.forward(self.source_link(source), self.kappa_link(kappa), psf)
    #                 flux_term = tf.square(
    #                     tf.reduce_sum(y_pred, axis=(1, 2, 3)) - tf.reduce_sum(lensed_image, axis=(1, 2, 3)))
    #                 log_likelihood = 0.5 * tf.reduce_sum(
    #                     tf.square(y_pred - mask * lensed_image) / noise_rms[:, None, None, None] ** 2, axis=(1, 2, 3))
    #                 cost = tf.reduce_mean(log_likelihood + self.flux_lagrange_multiplier * flux_term)
    #             source_grad, kappa_grad = g.gradient(cost, [source, kappa])
    #             source_grad, kappa_grad = self.grad_update(source_grad, kappa_grad, current_step)
    #         source, kappa, states = self.time_step(lensed_image, source, kappa, source_grad, kappa_grad, states)
    #         source_series = source_series.write(index=current_step + 1, value=source)
    #         kappa_series = kappa_series.write(index=current_step + 1, value=kappa)
    #         chi_squared_series = chi_squared_series.write(index=current_step,
    #                                                       value=log_likelihood / self.pixels ** 2)  # renormalize chi squared here
    #     # last step score
    #     log_likelihood = self.physical_model.log_likelihood(y_true=lensed_image, source=self.source_link(source),
    #                                                         kappa=self.kappa_link(kappa), psf=psf, noise_rms=noise_rms)
    #     chi_squared_series = chi_squared_series.write(index=self.steps - 1, value=log_likelihood)
    #     return source_series.stack(), kappa_series.stack(), chi_squared_series.stack()

    with h5py.File(
            os.path.join(
                os.getenv("CENSAI_PATH"), "results", args.experiment_name +
                "_" + model_name + f"_{THIS_WORKER:02d}.h5"), 'w') as hf:
        for i, dataset in enumerate([train_dataset, val_dataset,
                                     test_dataset]):
            g = hf.create_group(f'{dataset_names[i]}')
            data_len = dataset_shapes[i] // N_WORKERS
            g.create_dataset(name="lens",
                             shape=[data_len, phys.pixels, phys.pixels, 1],
                             dtype=np.float32)
            g.create_dataset(name="psf",
                             shape=[
                                 data_len, physical_params['psf pixels'],
                                 physical_params['psf pixels'], 1
                             ],
                             dtype=np.float32)
            g.create_dataset(name="psf_fwhm",
                             shape=[data_len],
                             dtype=np.float32)
            g.create_dataset(name="noise_rms",
                             shape=[data_len],
                             dtype=np.float32)
            g.create_dataset(
                name="source",
                shape=[data_len, phys.src_pixels, phys.src_pixels, 1],
                dtype=np.float32)
            g.create_dataset(
                name="kappa",
                shape=[data_len, phys.kappa_pixels, phys.kappa_pixels, 1],
                dtype=np.float32)
            g.create_dataset(name="lens_pred",
                             shape=[data_len, phys.pixels, phys.pixels, 1],
                             dtype=np.float32)
            g.create_dataset(name="lens_pred_reoptimized",
                             shape=[data_len, phys.pixels, phys.pixels, 1],
                             dtype=np.float32)
            g.create_dataset(name="source_pred",
                             shape=[
                                 data_len, rim.steps, phys.src_pixels,
                                 phys.src_pixels, 1
                             ],
                             dtype=np.float32)
            g.create_dataset(
                name="source_pred_reoptimized",
                shape=[data_len, phys.src_pixels, phys.src_pixels, 1])
            g.create_dataset(name="kappa_pred",
                             shape=[
                                 data_len, rim.steps, phys.kappa_pixels,
                                 phys.kappa_pixels, 1
                             ],
                             dtype=np.float32)
            g.create_dataset(
                name="kappa_pred_reoptimized",
                shape=[data_len, phys.kappa_pixels, phys.kappa_pixels, 1],
                dtype=np.float32)
            g.create_dataset(name="chi_squared",
                             shape=[data_len, rim.steps],
                             dtype=np.float32)
            g.create_dataset(name="chi_squared_reoptimized",
                             shape=[data_len],
                             dtype=np.float32)
            g.create_dataset(name="chi_squared_reoptimized_series",
                             shape=[data_len, args.re_optimize_steps],
                             dtype=np.float32)
            g.create_dataset(name="source_optim_mse",
                             shape=[data_len],
                             dtype=np.float32)
            g.create_dataset(name="source_optim_mse_series",
                             shape=[data_len, args.re_optimize_steps],
                             dtype=np.float32)
            g.create_dataset(name="kappa_optim_mse",
                             shape=[data_len],
                             dtype=np.float32)
            g.create_dataset(name="kappa_optim_mse_series",
                             shape=[data_len, args.re_optimize_steps],
                             dtype=np.float32)
            g.create_dataset(name="lens_coherence_spectrum",
                             shape=[data_len, args.lens_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="source_coherence_spectrum",
                             shape=[data_len, args.source_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="lens_coherence_spectrum2",
                             shape=[data_len, args.lens_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="lens_coherence_spectrum_reoptimized",
                             shape=[data_len, args.lens_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="source_coherence_spectrum2",
                             shape=[data_len, args.source_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="source_coherence_spectrum_reoptimized",
                             shape=[data_len, args.source_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="kappa_coherence_spectrum",
                             shape=[data_len, args.kappa_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="kappa_coherence_spectrum_reoptimized",
                             shape=[data_len, args.kappa_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="lens_frequencies",
                             shape=[args.lens_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="source_frequencies",
                             shape=[args.source_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="kappa_frequencies",
                             shape=[args.kappa_coherence_bins],
                             dtype=np.float32)
            g.create_dataset(name="kappa_fov", shape=[1], dtype=np.float32)
            g.create_dataset(name="source_fov", shape=[1], dtype=np.float32)
            g.create_dataset(name="lens_fov", shape=[1], dtype=np.float32)
            dataset = dataset.skip(data_len * (THIS_WORKER - 1)).take(data_len)
            for batch, (lens, source, kappa, noise_rms, psf,
                        fwhm) in enumerate(
                            dataset.batch(1).prefetch(
                                tf.data.experimental.AUTOTUNE)):
                checkpoint_manager.checkpoint.restore(
                    checkpoint_manager.latest_checkpoint).expect_partial(
                    )  # reset model weights
                # Compute predictions for kappa and source
                source_pred, kappa_pred, chi_squared = rim.predict(
                    lens, noise_rms, psf)
                lens_pred = phys.forward(source_pred[-1], kappa_pred[-1], psf)
                # Re-optimize weights of the model
                STEPS = args.re_optimize_steps
                learning_rate_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
                    initial_learning_rate=args.learning_rate,
                    decay_rate=args.decay_rate,
                    decay_steps=args.decay_steps,
                    staircase=args.staircase)
                optim = tf.keras.optimizers.RMSprop(
                    learning_rate=learning_rate_schedule)

                chi_squared_series = tf.TensorArray(DTYPE, size=STEPS)
                source_mse = tf.TensorArray(DTYPE, size=STEPS)
                kappa_mse = tf.TensorArray(DTYPE, size=STEPS)
                best = chi_squared[-1, 0]
                # best = abs(2*chi_squared[-1, 0] - 1)
                # best_chisq = 2*chi_squared[-1, 0]
                source_best = source_pred[-1]
                kappa_best = kappa_pred[-1]
                # source_mean = source_pred[-1]
                # kappa_mean = rim.kappa_link(kappa_pred[-1])
                # source_std = tf.zeros_like(source_mean)
                # kappa_std = tf.zeros_like(kappa_mean)
                # counter = 0
                for current_step in tqdm(range(STEPS)):
                    with tf.GradientTape() as tape:
                        tape.watch(unet.trainable_variables)
                        # s, k, chi_sq = call_with_mask(rim, lens, noise_rms, psf, mask, tape)
                        s, k, chi_sq = rim.call(lens,
                                                noise_rms,
                                                psf,
                                                outer_tape=tape)
                        cost = tf.reduce_mean(chi_sq)  # mean over time steps
                        cost += tf.reduce_sum(rim.unet.losses)

                    log_likelihood = chi_sq[-1]
                    chi_squared_series = chi_squared_series.write(
                        index=current_step, value=log_likelihood)
                    source_o = s[-1]
                    kappa_o = k[-1]
                    source_mse = source_mse.write(
                        index=current_step,
                        value=tf.reduce_mean(
                            (source_o - rim.source_inverse_link(source))**2))
                    kappa_mse = kappa_mse.write(
                        index=current_step,
                        value=tf.reduce_mean(
                            (kappa_o - rim.kappa_inverse_link(kappa))**2))
                    if chi_sq[-1, 0] < args.converged_chisq:
                        source_best = rim.source_link(source_o)
                        kappa_best = rim.kappa_link(kappa_o)
                        best = chi_sq[-1, 0]
                        break
                    if chi_sq[-1, 0] < best:
                        source_best = rim.source_link(source_o)
                        kappa_best = rim.kappa_link(kappa_o)
                        best = chi_sq[-1, 0]
                        source_mse_best = tf.reduce_mean(
                            (source_best - rim.source_inverse_link(source))**2)
                        kappa_mse_best = tf.reduce_mean(
                            (kappa_best - rim.kappa_inverse_link(kappa))**2)
                    # if counter > 0:
                    #     # Welford's online algorithm
                    #     # source
                    #     delta = source_o - source_mean
                    #     source_mean = (counter * source_mean + (counter + 1) * source_o)/(counter + 1)
                    #     delta2 = source_o - source_mean
                    #     source_std += delta * delta2
                    #     # kappa
                    #     delta = rim.kappa_link(kappa_o) - kappa_mean
                    #     kappa_mean = (counter * kappa_mean + (counter + 1) * rim.kappa_link(kappa_o)) / (counter + 1)
                    #     delta2 = rim.kappa_link(kappa_o) - kappa_mean
                    #     kappa_std += delta * delta2
                    # if best_chisq < args.converged_chisq:
                    #     counter += 1
                    #     if counter == args.window:
                    #         break
                    # if 2*chi_sq[-1, 0] < best_chisq:
                    #     best_chisq = 2*chi_sq[-1, 0]
                    # if abs(2*chi_sq[-1, 0] - 1) < best:
                    #     source_best = rim.source_link(source_o)
                    #     kappa_best = rim.kappa_link(kappa_o)
                    #     best = abs(2 * chi_squared[-1, 0] - 1)
                    #     source_mse_best = tf.reduce_mean((source_best - rim.source_inverse_link(source)) ** 2)
                    #     kappa_mse_best = tf.reduce_mean((kappa_best - rim.kappa_inverse_link(kappa)) ** 2)

                    grads = tape.gradient(cost, unet.trainable_variables)
                    optim.apply_gradients(zip(grads, unet.trainable_variables))

                source_o = source_best
                kappa_o = kappa_best
                y_pred = phys.forward(source_o, kappa_o, psf)
                chi_sq_series = tf.transpose(chi_squared_series.stack(),
                                             perm=[1, 0])
                source_mse = source_mse.stack()[None, ...]
                kappa_mse = kappa_mse.stack()[None, ...]
                # kappa_std /= float(args.window)
                # source_std /= float(args.window)

                # Compute Power spectrum of converged predictions
                _ps_lens = ps_lens.cross_correlation_coefficient(
                    lens[..., 0], lens_pred[..., 0])
                _ps_lens3 = ps_lens.cross_correlation_coefficient(
                    lens[..., 0], y_pred[..., 0])
                _ps_kappa = ps_kappa.cross_correlation_coefficient(
                    log_10(kappa)[..., 0],
                    log_10(kappa_pred[-1])[..., 0])
                _ps_kappa2 = ps_kappa.cross_correlation_coefficient(
                    log_10(kappa)[..., 0], log_10(kappa_o[..., 0]))
                _ps_source = ps_source.cross_correlation_coefficient(
                    source[..., 0], source_pred[-1][..., 0])
                _ps_source3 = ps_source.cross_correlation_coefficient(
                    source[..., 0], source_o[..., 0])

                # save results
                g["lens"][batch] = lens.numpy().astype(np.float32)
                g["psf"][batch] = psf.numpy().astype(np.float32)
                g["psf_fwhm"][batch] = fwhm.numpy().astype(np.float32)
                g["noise_rms"][batch] = noise_rms.numpy().astype(np.float32)
                g["source"][batch] = source.numpy().astype(np.float32)
                g["kappa"][batch] = kappa.numpy().astype(np.float32)
                g["lens_pred"][batch] = lens_pred.numpy().astype(np.float32)
                g["lens_pred_reoptimized"][batch] = y_pred.numpy().astype(
                    np.float32)
                g["source_pred"][batch] = tf.transpose(
                    source_pred,
                    perm=(1, 0, 2, 3, 4)).numpy().astype(np.float32)
                g["source_pred_reoptimized"][batch] = source_o.numpy().astype(
                    np.float32)
                g["kappa_pred"][batch] = tf.transpose(
                    kappa_pred,
                    perm=(1, 0, 2, 3, 4)).numpy().astype(np.float32)
                g["kappa_pred_reoptimized"][batch] = kappa_o.numpy().astype(
                    np.float32)
                g["chi_squared"][batch] = tf.transpose(
                    chi_squared).numpy().astype(np.float32)
                g["chi_squared_reoptimized"][batch] = best.numpy().astype(
                    np.float32)
                g["chi_squared_reoptimized_series"][
                    batch] = chi_sq_series.numpy().astype(np.float32)
                g["source_optim_mse"][batch] = source_mse_best.numpy().astype(
                    np.float32)
                g["source_optim_mse_series"][batch] = source_mse.numpy(
                ).astype(np.float32)
                g["kappa_optim_mse"][batch] = kappa_mse_best.numpy().astype(
                    np.float32)
                g["kappa_optim_mse_series"][batch] = kappa_mse.numpy().astype(
                    np.float32)
                g["lens_coherence_spectrum"][batch] = _ps_lens
                g["lens_coherence_spectrum_reoptimized"][batch] = _ps_lens3
                g["source_coherence_spectrum"][batch] = _ps_source
                g["source_coherence_spectrum_reoptimized"][batch] = _ps_source3
                g["lens_coherence_spectrum"][batch] = _ps_lens
                g["lens_coherence_spectrum"][batch] = _ps_lens
                g["kappa_coherence_spectrum"][batch] = _ps_kappa
                g["kappa_coherence_spectrum_reoptimized"][batch] = _ps_kappa2

                if batch == 0:
                    _, f = np.histogram(np.fft.fftfreq(
                        phys.pixels)[:phys.pixels // 2],
                                        bins=ps_lens.bins)
                    f = (f[:-1] + f[1:]) / 2
                    g["lens_frequencies"][:] = f
                    _, f = np.histogram(np.fft.fftfreq(
                        phys.src_pixels)[:phys.src_pixels // 2],
                                        bins=ps_source.bins)
                    f = (f[:-1] + f[1:]) / 2
                    g["source_frequencies"][:] = f
                    _, f = np.histogram(np.fft.fftfreq(
                        phys.kappa_pixels)[:phys.kappa_pixels // 2],
                                        bins=ps_kappa.bins)
                    f = (f[:-1] + f[1:]) / 2
                    g["kappa_frequencies"][:] = f
                    g["kappa_fov"][0] = phys.kappa_fov
                    g["source_fov"][0] = phys.src_fov

        # Create SIE test
        g = hf.create_group(f'SIE_test')
        data_len = args.sie_size // N_WORKERS
        sie_dataset = test_dataset.skip(data_len *
                                        (THIS_WORKER - 1)).take(data_len)
        g.create_dataset(name="lens",
                         shape=[data_len, phys.pixels, phys.pixels, 1],
                         dtype=np.float32)
        g.create_dataset(name="psf",
                         shape=[
                             data_len, physical_params['psf pixels'],
                             physical_params['psf pixels'], 1
                         ],
                         dtype=np.float32)
        g.create_dataset(name="psf_fwhm", shape=[data_len], dtype=np.float32)
        g.create_dataset(name="noise_rms", shape=[data_len], dtype=np.float32)
        g.create_dataset(name="source",
                         shape=[data_len, phys.src_pixels, phys.src_pixels, 1],
                         dtype=np.float32)
        g.create_dataset(
            name="kappa",
            shape=[data_len, phys.kappa_pixels, phys.kappa_pixels, 1],
            dtype=np.float32)
        g.create_dataset(name="lens_pred",
                         shape=[data_len, phys.pixels, phys.pixels, 1],
                         dtype=np.float32)
        g.create_dataset(name="lens_pred2",
                         shape=[data_len, phys.pixels, phys.pixels, 1],
                         dtype=np.float32)
        g.create_dataset(
            name="source_pred",
            shape=[data_len, rim.steps, phys.src_pixels, phys.src_pixels, 1],
            dtype=np.float32)
        g.create_dataset(name="kappa_pred",
                         shape=[
                             data_len, rim.steps, phys.kappa_pixels,
                             phys.kappa_pixels, 1
                         ],
                         dtype=np.float32)
        g.create_dataset(name="chi_squared",
                         shape=[data_len, rim.steps],
                         dtype=np.float32)
        g.create_dataset(name="lens_coherence_spectrum",
                         shape=[data_len, args.lens_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="source_coherence_spectrum",
                         shape=[data_len, args.source_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="lens_coherence_spectrum2",
                         shape=[data_len, args.lens_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="source_coherence_spectrum2",
                         shape=[data_len, args.source_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="kappa_coherence_spectrum",
                         shape=[data_len, args.kappa_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="lens_frequencies",
                         shape=[args.lens_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="source_frequencies",
                         shape=[args.source_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="kappa_frequencies",
                         shape=[args.kappa_coherence_bins],
                         dtype=np.float32)
        g.create_dataset(name="einstein_radius",
                         shape=[data_len],
                         dtype=np.float32)
        g.create_dataset(name="position",
                         shape=[data_len, 2],
                         dtype=np.float32)
        g.create_dataset(name="orientation",
                         shape=[data_len],
                         dtype=np.float32)
        g.create_dataset(name="ellipticity",
                         shape=[data_len],
                         dtype=np.float32)
        g.create_dataset(name="kappa_fov", shape=[1], dtype=np.float32)
        g.create_dataset(name="source_fov", shape=[1], dtype=np.float32)
        g.create_dataset(name="lens_fov", shape=[1], dtype=np.float32)

        for batch, (_, source, _, noise_rms, psf, fwhm) in enumerate(
                sie_dataset.take(data_len).batch(args.batch_size).prefetch(
                    tf.data.experimental.AUTOTUNE)):
            batch_size = source.shape[0]
            # Create some SIE kappa maps
            _r = tf.random.uniform(shape=[batch_size, 1, 1, 1],
                                   minval=0,
                                   maxval=args.max_shift)
            _theta = tf.random.uniform(shape=[batch_size, 1, 1, 1],
                                       minval=-np.pi,
                                       maxval=np.pi)
            x0 = _r * tf.math.cos(_theta)
            y0 = _r * tf.math.sin(_theta)
            ellipticity = tf.random.uniform(shape=[batch_size, 1, 1, 1],
                                            minval=0.,
                                            maxval=args.max_ellipticity)
            phi = tf.random.uniform(shape=[batch_size, 1, 1, 1],
                                    minval=-np.pi,
                                    maxval=np.pi)
            einstein_radius = tf.random.uniform(shape=[batch_size, 1, 1, 1],
                                                minval=args.min_theta_e,
                                                maxval=args.max_theta_e)
            kappa = phys_sie.kappa_field(x0=x0,
                                         y0=y0,
                                         e=ellipticity,
                                         phi=phi,
                                         r_ein=einstein_radius)
            lens = phys.noisy_forward(source,
                                      kappa,
                                      noise_rms=noise_rms,
                                      psf=psf)

            # Compute predictions for kappa and source
            source_pred, kappa_pred, chi_squared = rim.predict(
                lens, noise_rms, psf)
            lens_pred = phys.forward(source_pred[-1], kappa_pred[-1], psf)
            # Compute Power spectrum of converged predictions
            _ps_lens = ps_lens.cross_correlation_coefficient(
                lens[..., 0], lens_pred[..., 0])
            _ps_kappa = ps_kappa.cross_correlation_coefficient(
                log_10(kappa)[..., 0],
                log_10(kappa_pred[-1])[..., 0])
            _ps_source = ps_source.cross_correlation_coefficient(
                source[..., 0], source_pred[-1][..., 0])

            # save results
            i_begin = batch * args.batch_size
            i_end = i_begin + batch_size
            g["lens"][i_begin:i_end] = lens.numpy().astype(np.float32)
            g["psf"][i_begin:i_end] = psf.numpy().astype(np.float32)
            g["psf_fwhm"][i_begin:i_end] = fwhm.numpy().astype(np.float32)
            g["noise_rms"][i_begin:i_end] = noise_rms.numpy().astype(
                np.float32)
            g["source"][i_begin:i_end] = source.numpy().astype(np.float32)
            g["kappa"][i_begin:i_end] = kappa.numpy().astype(np.float32)
            g["lens_pred"][i_begin:i_end] = lens_pred.numpy().astype(
                np.float32)
            g["source_pred"][i_begin:i_end] = tf.transpose(
                source_pred, perm=(1, 0, 2, 3, 4)).numpy().astype(np.float32)
            g["kappa_pred"][i_begin:i_end] = tf.transpose(
                kappa_pred, perm=(1, 0, 2, 3, 4)).numpy().astype(np.float32)
            g["chi_squared"][i_begin:i_end] = 2 * tf.transpose(
                chi_squared).numpy().astype(np.float32)
            g["lens_coherence_spectrum"][i_begin:i_end] = _ps_lens.numpy(
            ).astype(np.float32)
            g["source_coherence_spectrum"][i_begin:i_end] = _ps_source.numpy(
            ).astype(np.float32)
            g["kappa_coherence_spectrum"][i_begin:i_end] = _ps_kappa.numpy(
            ).astype(np.float32)
            g["einstein_radius"][
                i_begin:i_end] = einstein_radius[:, 0, 0,
                                                 0].numpy().astype(np.float32)
            g["position"][i_begin:i_end] = tf.stack(
                [x0[:, 0, 0, 0], y0[:, 0, 0, 0]],
                axis=1).numpy().astype(np.float32)
            g["ellipticity"][i_begin:i_end] = ellipticity[:, 0, 0,
                                                          0].numpy().astype(
                                                              np.float32)
            g["orientation"][i_begin:i_end] = phi[:, 0, 0,
                                                  0].numpy().astype(np.float32)

            if batch == 0:
                _, f = np.histogram(np.fft.fftfreq(phys.pixels)[:phys.pixels //
                                                                2],
                                    bins=ps_lens.bins)
                f = (f[:-1] + f[1:]) / 2
                g["lens_frequencies"][:] = f
                _, f = np.histogram(np.fft.fftfreq(
                    phys.src_pixels)[:phys.src_pixels // 2],
                                    bins=ps_source.bins)
                f = (f[:-1] + f[1:]) / 2
                g["source_frequencies"][:] = f
                _, f = np.histogram(np.fft.fftfreq(
                    phys.kappa_pixels)[:phys.kappa_pixels // 2],
                                    bins=ps_kappa.bins)
                f = (f[:-1] + f[1:]) / 2
                g["kappa_frequencies"][:] = f
                g["kappa_fov"][0] = phys.kappa_fov
                g["source_fov"][0] = phys.src_fov
Exemplo n.º 8
0
        psf = tf.transpose(psf, perm=[1, 2, 0, 3])  # put different psf on "in channels" dimension
        convolved_images = tf.nn.depthwise_conv2d(images, psf, strides=[1, 1, 1, 1], padding="SAME", data_format="NHWC")
        convolved_images = tf.transpose(convolved_images, perm=[3, 1, 2, 0]) # put channels back to batch dimension
        return convolved_images


if __name__ == '__main__':
    phys = PhysicalModel(128)
    from censai import AnalyticalPhysicalModel
    # kappa = AnalyticalPhysicalModel(64).kappa_field(r_ein=np.array([1., 2.])[:, None, None, None])
    # psf = phys.psf_models(np.array([0.4, 0.12]))
    import matplotlib.pyplot as plt
    # x = tf.random.normal(shape=(2, 64, 64, 1))
    # y = phys.noisy_forward(x, kappa, np.array([0.01, 0.04]), psf)
    # # out = phys.convolve_with_psf(x, psf)
    # fig, (ax1, ax2) = plt.subplots(1, 2)
    # ax1.imshow(y[0, ..., 0])
    # ax1.set_title("0.4")
    # ax2.imshow(y[1, ..., 0])
    # ax2.set_title("0.12")
    # # print(out.shape)
    # plt.show()
    # print(psf.numpy().sum(axis=(1, 2, 3)))
    kappa = AnalyticalPhysicalModel(128).kappa_field(r_ein=1.5, e=0.4)
    jacobian = phys.jacobian(kappa)
    jac_det = tf.linalg.det(jacobian)
    plt.imshow(jac_det[0], cmap="seismic", extent=[-7.69/2, 7.69/2]*2)
    plt.colorbar()
    contour = plt.contour(jac_det[0], levels=[0], cmap="gray", extent=[-7.69/2, 7.69/2]*2)
    plt.show()