Exemplo n.º 1
0
def test_wav_op():
    wav = 'dirac'
    levels = 3
    shape = (128, )
    op = linear_operators.db_wavelets(wav, levels, shape)
    inp = np.random.normal(0, 10., shape)
    out = op.dir_op(inp)
    forward_operator(op, inp, out)
    inp = np.random.normal(0, 10., out.shape)
    out = op.adj_op(inp)
    adjoint_operator(op, inp, out)
    buff1 = op.adj_op(op.dir_op(out))
    assert np.allclose(out, buff1, 1e-6)
    wav = 'db1'
    levels = 3
    shape = (128, )
    op = linear_operators.db_wavelets(wav, levels, shape)
    inp = np.random.normal(0, 10., shape)
    out = op.dir_op(inp)
    forward_operator(op, inp, out)
    inp = np.random.normal(0, 10., out.shape)
    out = op.adj_op(inp)
    adjoint_operator(op, inp, out)
    buff1 = op.adj_op(op.dir_op(out))
    assert np.allclose(out, buff1, 1e-6)
    wav = ['db1', 'db2', 'dirac']
    levels = 3
    shape = (128, )
    op = linear_operators.dictionary(wav, levels, shape)
    inp = np.random.normal(0, 10., shape)
    out = op.dir_op(inp)
    forward_operator(op, inp, out)
    inp = np.random.normal(0, 10., out.shape)
    out = op.adj_op(inp)
    adjoint_operator(op, inp, out)
    buff1 = op.adj_op(op.dir_op(out))
    assert np.allclose(out, buff1, 1e-6)
    wav = 'db2'
    levels = 3
    shape = (128, 128)
    op = linear_operators.db_wavelets(wav, levels, shape)
    inp = np.random.normal(0, 10., shape)
    out = op.dir_op(inp)
    forward_operator(op, inp, out)
    inp = np.random.normal(0, 10., out.shape)
    out = op.adj_op(inp)
    adjoint_operator(op, inp, out)
    buff1 = op.adj_op(op.dir_op(out))
    assert np.allclose(out, buff1, 1e-6)
    wav = ['db1', 'db2', 'dirac']
    levels = 3
    shape = (128, 128)
    op = linear_operators.dictionary(wav, levels, shape)
    inp = np.random.normal(0, 10., shape)
    out = op.dir_op(inp)
    forward_operator(op, inp, out)
    inp = np.random.normal(0, 10., out.shape)
    out = op.adj_op(inp)
    adjoint_operator(op, inp, out)
    buff1 = op.adj_op(op.dir_op(out))
Exemplo n.º 2
0
def test_l1_constrained():
    options = {
        'tol': 1e-5,
        'iter': 5000,
        'update_iter': 50,
        'record_iters': False
    }
    ISNR = 40.
    sigma = 10**(-ISNR / 20.)
    size = 1024
    epsilon = np.sqrt(size + 2. * np.sqrt(size)) * sigma
    x = np.linspace(0, 1 * np.pi, size)

    W = np.ones((size, ))

    y = W * x + np.random.normal(0, sigma, size)

    p = prox_operators.l2_ball(epsilon, y,
                               linear_operators.diag_matrix_operator(W))

    wav = ['db1', 'db4']
    levels = 6
    shape = (size, )
    psi = linear_operators.dictionary(wav, levels, shape)

    h = prox_operators.l1_norm(np.max(np.abs(psi.dir_op(y))) * 1e-3, psi)
    h.beta = 1.
    f = prox_operators.real_prox()
    z, diag = primal_dual.FBPD(y, options, None, f, h, p)
    assert (np.linalg.norm(z - W * y) < epsilon * 1.05)
    assert (diag['max_iter'] < 500)
    #testing warm start
    z1, diag1 = primal_dual.FBPD_warm_start(z, diag['y'], diag['z'], diag['w'],
                                            options, None, f, h, p)
    assert (diag1['max_iter'] < diag['max_iter'])
Exemplo n.º 3
0
def test_l1_unconstrained():
    options = {
        'tol': 1e-5,
        'iter': 500,
        'update_iter': 50,
        'record_iters': False
    }
    ISNR = 20.
    sigma = 10**(-ISNR / 20.)
    size = 1024
    epsilon = np.sqrt(size + 2. * np.sqrt(size)) * sigma
    x = np.linspace(0, 1 * np.pi, size)

    W = np.ones((size, ))

    y = W * x + np.random.normal(0, sigma, size)

    g = grad_operators.l2_norm(sigma, y,
                               linear_operators.diag_matrix_operator(W))

    wav = ['db1', 'db4']
    levels = 6
    shape = (size, )
    psi = linear_operators.dictionary(wav, levels, shape)

    h = prox_operators.l1_norm(np.max(np.abs(psi.dir_op(y))) * 5e-3, psi)
    h.beta = 1.
    f = prox_operators.real_prox()
    z, diag = primal_dual.FBPD(y, options, g, f, h)
Exemplo n.º 4
0
def uncertainty_quantification(
    x_sol,
    data,
    sigma,
    weights,
    wav,
    levels,
    gamma,
    options={
        'alpha': 0.99,
        "top": 1e3,
        "bottom": 0,
        "region_size": 16,
        "iters": 10,
        "tol": 1e-3
    }):
    psi = linear_operators.dictionary(wav, levels, x_sol.shape)
    W = weights
    obj = lambda data_sol, data_mask, wav_sol, wav_mask: gamma * np.sum(
        np.abs(wav_sol + wav_mask)) + np.sum(
            np.abs(W * (data_sol + data_mask) - W * data)**2) / (2 * sigma**2)
    bound = obj(x_sol, 0, psi.dir_op(x_sol), 0) + float(len(
        np.ravel(x_sol))) + np.sqrt(
            float(len(np.ravel(x_sol))) * 16. * np.log(3. / options['alpha']))
    print(obj(x_sol, 0, psi.dir_op(x_sol), 0))
    print(
        np.sqrt(
            float(len(np.ravel(x_sol))) * 16. * np.log(3. / options['alpha'])))
    phi = linear_operators.identity()
    return map_uncertainty.create_local_credible_interval_fast(
        x_sol, phi, psi, options['region_size'], obj, bound, options['iters'],
        options['tol'], options['bottom'], options['top'])
Exemplo n.º 5
0
def separation_solver(
        algo,
        image,
        sigma,
        weights,
        wav1=["dirac"],
        wav2=["fourier"],
        levels=6,
        gamma=1,
        beta=1e-3,
        options={
            'tol': 1e-5,
            'iter': 5000,
            'update_iter': 50,
            'record_iters': False,
            "positivity": False
        },
        warm_start=None,
        background=None,
        axes=None):
    """
    Chooses algorithm and solver
    """
    logger.info("Image shape %s", image.shape)
    logger.info("Using wavelets %s with %s levels", wav1, levels)
    logger.info("Using wavelets %s with %s levels", wav2, levels)
    logger.info(
        "Using an estimated noise level of %s (weighted image units, i.e. Jy/Beam)",
        sigma)
    psi1 = linear_operators.dictionary(wav1, levels, image.shape, axes)
    psi2 = linear_operators.dictionary(wav2, levels, image.shape, axes)
    data = image * weights
    starting_data = data * weights
    if (warm_start is not None):
        logger.info("Using warm start.")
        starting_data = warm_start
    if algo == algorithm.l1_constrained:
        logger.info("Signal separation using constrained l1 regularization")
        return clearskies.core.l1_constrained_separation_solver(
            data, starting_data, sigma, weights, psi1, psi2, gamma, beta,
            options)
    if algo == algorithm.tv_constrained:
        logger.info("Signal separation using constrained tv regularization")
        return clearskies.core.tv_constrained_separation_solver(
            data, starting_data, sigma, weights, psi1, gamma, beta, options)
Exemplo n.º 6
0
def test_constrained():
    input_file = "data/lmc.fits"
    x_true = open_fits(input_file)
    x_true = x_true[:128, :128]
    options = {
        'tol': 1e-5,
        'iter': 5000,
        'update_iter': 50,
        'record_iters': False,
        'real': False,
        'positivity': False
    }
    ISNR = 30.
    sigma = 10**(-ISNR / 20.) * np.sqrt(
        np.sum(np.abs(x_true)**2) / (x_true.shape[0] * x_true.shape[1]))
    width, height = x_true.shape

    W = np.ones(x_true.shape)

    y = W * x_true + np.random.normal(0, sigma, x_true.shape)

    wav = ["dirac", "db1", "db2", "db3", "db4", "db5", "db6", "db7", "db8"]
    levels = 2
    psi = linear_operators.dictionary(wav, levels, y.shape, None)
    data = y
    warm_start = data
    z, diag = solver.solver(solver.algorithm.l1_constrained, y, sigma, W, wav,
                            levels, 1e-2, options)
    z_expected, diag_expected = core.l1_constrained_solver(
        data, warm_start, sigma, W, psi, 1e-2, options)

    SNR = np.log10(
        np.sqrt(np.sum(np.abs(x_true)**2)) /
        np.sqrt(np.sum(np.abs(x_true - z)**2))) * 20.
    assert (SNR > ISNR)
    size = z.shape[0] * z.shape[1]
    assert (np.linalg.norm(W * z - y) <
            np.sqrt(size + 2 * np.sqrt(size)) * sigma * 1.1)
    assert (np.all(np.isclose(z, z_expected)))
size = 1024
epsilon = np.sqrt(size + 2. * np.sqrt(size)) * sigma
x = np.linspace(0, 1 * np.pi, size)

W = np.ones((size, ))

y = W * x + np.random.normal(0, sigma, size)

p = prox_operators.l2_ball(epsilon, y,
                           linear_operators.diag_matrix_operator(W))
p.beta = 1.

wav = ['db1', 'db4']
levels = 6
shape = (size, )
psi = linear_operators.dictionary(wav, levels, shape)

h = prox_operators.l1_norm(np.max(np.abs(psi.dir_op(y))) * 1e-3, psi)
h.beta = 1.
f = prox_operators.real_prox()
z, diag = primal_dual.FBPD(y, options, None, f, h, p, None)

plt.plot(np.real(y))
plt.plot(np.real(x))
plt.plot(np.real(z))
plt.legend(['data', 'true', 'fit'])
SNR = np.log10(
    np.sqrt(np.sum(np.abs(x)**2)) / np.sqrt(np.sum(np.abs(x - z)**2))) * 20.
plt.title('SNR = ' + str(SNR))
plt.savefig(output_dir + '1d_constrained_example.png')
Exemplo n.º 8
0
def solver(
        algo,
        image,
        sigma,
        weights,
        wav=["dirac"],
        levels=6,
        beta=1e-3,
        options={
            'tol': 1e-5,
            'iter': 5000,
            'update_iter': 50,
            'record_iters': False,
            "positivity": False
        },
        warm_start=None,
        background=None,
        axes=None):
    """
    Chooses algorithm and solver
    """
    logger.info("Image shape %s", image.shape)
    logger.info("Using wavelets %s with %s levels", wav, levels)
    logger.info(
        "Using an estimated noise level of %s (weighted image units, i.e. Jy/Beam)",
        sigma)
    psi = linear_operators.dictionary(wav, levels, image.shape, axes)
    data = image * weights
    starting_data = data * weights
    if (warm_start is not None):
        logger.info("Using warm start.")
        starting_data = warm_start
    if algo == algorithm.l1_constrained:
        logger.info("Denosing using constrained l1 regularization")
        return clearskies.core.l1_constrained_solver(data, starting_data,
                                                     sigma, weights, psi, beta,
                                                     options)
    if algo == algorithm.l1_unconstrained:
        logger.info("Denosing using unconstrained l1 regularization")
        return clearskies.core.l1_unconstrained_solver(data, starting_data,
                                                       sigma, weights, psi,
                                                       beta, options)
    if algo == algorithm.l2_unconstrained:
        logger.info("Denosing using unconstrained l2 regularization")
        return clearskies.core.l2_unconstrained_solver(data, starting_data,
                                                       sigma, weights, psi,
                                                       beta, options)
    if algo == algorithm.l1_poisson_constrained:
        logger.info(
            "Denosing using constrained l1 regularization with poisson constraint"
        )
        logger.info(
            "Ignoring Sigma since noise level is determined by the data")
        if (background is not None):
            logger.info("Using background.")
        else:
            background = image * 0
        return clearskies.core.l1_poissonian_constrained_solver(
            data, starting_data, len(np.ravel(image[image > 0])), weights,
            background, psi, beta, options)
    if algo == algorithm.l1_poisson_unconstrained:
        logger.info(
            "Denosing using unconstrained l1 regularization with poisson likelihood"
        )
        logger.info(
            "Ignoring Sigma since noise level is determined by the data")
        if (background is not None):
            logger.info("Using background.")
        else:
            background = image * 0
        return clearskies.core.l1_poissonian_unconstrained_solver(
            data, starting_data, weights, background, psi, beta, options)
    if algo == algorithm.tv_constrained:
        logger.info("Denosing using constrained tv regularization")
        return clearskies.core.tv_constrained_solver(data, starting_data,
                                                     sigma, weights, beta,
                                                     options)
    if algo == algorithm.tv_unconstrained:
        logger.info("Denosing using unconstrained tv regularization")
        return clearskies.core.tv_unconstrained_solver(data, starting_data,
                                                       sigma, weights, beta,
                                                       options)
    raise ValueError("Algorithm not reconginized.")