Esempio n. 1
0
def calculate_coverage_of_difmap_model(original_dfm_model,
                                       bootstrapped_dfm_models_for_sample,
                                       alpha=0.68):
    from utils import hdi_of_mcmc
    original_comps = import_difmap_model(original_dfm_model)
    true_params = dict()
    boot_params = dict()
    coverage_params = dict()
    for i, comp in enumerate(original_comps):
        true_params[i] = dict()
        boot_params[i] = dict()
        coverage_params[i] = dict()
        for j, parname in enumerate(comp.parnames):
            true_params[i].update({parname: comp.p[j]})
            boot_params[i].update({parname: list()})
            coverage_params[i].update({parname: list()})
    for k, bootstrapped_dfm_models in enumerate(
            bootstrapped_dfm_models_for_sample):
        boot_params_ = boot_params.copy()
        for bootstrapped_dfm_model in bootstrapped_dfm_models:
            comps = import_difmap_model(bootstrapped_dfm_model)
            for i, comp in enumerate(comps):
                for j, parname in enumerate(comp.parnames):
                    boot_params_[i][parname].append(comp.p[j])
        for i, comp in enumerate(comps):
            for j, parname in enumerate(comp.parnames):
                low, high = hdi_of_mcmc(boot_params_[i][parname])
                coverage_params[i][parname].append(
                    low < true_params[i][parname] < high)
    return true_params, boot_params, coverage_params
Esempio n. 2
0
def bootstrap_uvfits_with_difmap_model(
        uv_fits_path,
        dfm_model_path,
        nonparametric=False,
        use_kde=False,
        use_v=False,
        n_boot=100,
        stokes='I',
        boot_dir=None,
        recenter=True,
        pairs=False,
        niter=100,
        bootstrapped_uv_fits=None,
        additional_noise=None,
        boot_mdl_outname_base="bootstrapped_model"):
    dfm_model_dir, dfm_model_fname = os.path.split(dfm_model_path)
    comps = import_difmap_model(dfm_model_fname, dfm_model_dir)
    if boot_dir is None:
        boot_dir = os.getcwd()
    if bootstrapped_uv_fits is None:
        uvdata = UVData(uv_fits_path)
        model = Model(stokes=stokes)
        model.add_components(*comps)
        boot = CleanBootstrap([model],
                              uvdata,
                              additional_noise=additional_noise)
        os.chdir(boot_dir)
        boot.run(nonparametric=nonparametric,
                 use_kde=use_kde,
                 recenter=recenter,
                 use_v=use_v,
                 n=n_boot,
                 pairs=pairs)
        bootstrapped_uv_fits = sorted(
            glob.glob(os.path.join(boot_dir, 'bootstrapped_data*.fits')))
    for j, bootstrapped_fits in enumerate(bootstrapped_uv_fits):
        modelfit_difmap(bootstrapped_fits,
                        dfm_model_fname,
                        '{}_{}.mdl'.format(boot_mdl_outname_base, j),
                        path=boot_dir,
                        mdl_path=dfm_model_dir,
                        out_path=boot_dir,
                        niter=niter)
    booted_mdl_paths = glob.glob(
        os.path.join(boot_dir, '{}*'.format(boot_mdl_outname_base)))

    # Clean uv_fits
    for file_ in bootstrapped_uv_fits:
        os.unlink(file_)
    logs = glob.glob(os.path.join(boot_dir, "*.log*"))
    for file_ in logs:
        os.unlink(file_)
    comms = glob.glob(os.path.join(boot_dir, "*commands*"))
    for file_ in comms:
        os.unlink(file_)

    return booted_mdl_paths
Esempio n. 3
0
def score(uv_fits_path, mdl_path, stokes='I'):
    """
    Returns rms of model on given uv-data for stokes 'I'.
    
    :param uv_fits_path: 
        Path to uv-fits file.
    :param mdl_path: 
        Path to difmap model text file or FITS-file with CLEAN model.
    :param stokes: (optional)
        Stokes parameter string. ``I``, ``RR`` or ``LL`` currently supported.
        (default: ``I``)
    :return: 
        Per-point rms between given data and model evaluated at given data
        points.
    """
    if stokes not in ('I', 'RR', 'LL'):
        raise Exception("Only stokes (I, RR, LL) supported!")
    uvdata = UVData(uv_fits_path)
    uvdata_model = UVData(uv_fits_path)
    try:
        model = create_model_from_fits_file(mdl_path)
    except IOError:
        dfm_mdl_dir, dfm_mdl_fname = os.path.split(mdl_path)
        comps = import_difmap_model(dfm_mdl_fname, dfm_mdl_dir)
        model = Model(stokes=stokes)
        model.add_components(*comps)
    uvdata_model.substitute([model])
    uvdata_diff = uvdata - uvdata_model
    if stokes == 'I':
        i_diff = 0.5 * (uvdata_diff.uvdata_weight_masked[..., 0] +
                        uvdata_diff.uvdata_weight_masked[..., 1])
    elif stokes == 'RR':
        i_diff = uvdata_diff.uvdata_weight_masked[..., 0]
    elif stokes == 'LL':
        i_diff = uvdata_diff.uvdata_weight_masked[..., 1]
    else:
        raise Exception("Only stokes (I, RR, LL) supported!")
    # 2 means that Re & Im are counted independently
    factor = 2 * np.count_nonzero(i_diff)
    # factor = np.count_nonzero(~uvdata_diff.uvdata_weight_masked.mask[:, :, :2])
    # squared_diff = uvdata_diff.uvdata_weight_masked[:, :, :2] * \
    #                uvdata_diff.uvdata_weight_masked[:, :, :2].conj()
    squared_diff = i_diff * i_diff.conj()
    return np.sqrt(float(np.sum(squared_diff)) / factor)
Esempio n. 4
0
        os.makedirs(data_dir)
    # download_mojave_uv_fits(source, [epoch], download_dir=data_dir)
    path_to_script = '/home/ilya/github/vlbi_errors/difmap/final_clean_nw'

    uv_fits_fname = mojave_uv_fits_fname(source, 'u', epoch)
    uv_fits_path = os.path.join(data_dir, uv_fits_fname)
    # get_mojave_mdl_file('/home/ilya/Dropbox/papers/boot/new_pics/mojave_mod_first/asu.tsv',
    #                     source, epoch_, outfile='initial.mdl', outdir=data_dir)
    uvdata = UVData(uv_fits_path)
    # modelfit_difmap(uv_fits_fname, 'initial.mdl',
    #                 'initial.mdl', niter=300,
    #                 path=data_dir, mdl_path=data_dir,
    #                 out_path=data_dir)
    original_model_path = os.path.join(data_dir, 'initial.mdl')
    from spydiff import import_difmap_model, clean_difmap
    comps = import_difmap_model(original_model_path)
    from automodel import plot_clean_image_and_components
    path_to_script = '/home/ilya/github/vlbi_errors/difmap/final_clean_nw'

    # clean_difmap(uv_fits_path, os.path.join(data_dir, 'cc.fits'), 'I',
    #              (1024, 0.1), path=data_dir, path_to_script=path_to_script,
    #              outpath=data_dir)
    from from_fits import create_clean_image_from_fits_file
    ccimage = create_clean_image_from_fits_file(
        os.path.join(data_dir, 'cc.fits'))
    plot_clean_image_and_components(ccimage,
                                    comps,
                                    outname=os.path.join(
                                        data_dir, "model_image.png"))
    # # LC for CLEAN model
    # cv_means_cc, train_means_cc =\
Esempio n. 5
0
    elif uvdata_sc._check_stokes_present("LL"):
        stokes = "LL"
    else:
        raise Exception

    uvdata_raw = UVData(os.path.join(data_dir, original_raw_uvf))
    uvdata_template = UVData(os.path.join(data_dir, original_raw_uvf))

    sc_data = uvdata_sc.hdu.data
    raw_data = uvdata_raw.hdu.data

    # Find gains products
    corrections = uvdata_raw.uvdata / uvdata_sc.uvdata

    # Create artificial raw data with known sky model and given corrections
    original_dfm_model = import_difmap_model(
        os.path.join(data_dir, "{}.mod".format(epoch)))

    # modelfit_difmap("myselfcaled.uvf", "{}.mod".format(epoch), "artificial.mdl", niter=200, stokes=stokes,
    #                 path=data_dir, mdl_path=data_dir, out_path=data_dir, show_difmap_output=True)
    # new_dfm_model = import_difmap_model("artificial.mdl", data_dir)
    # print([cg.p for cg in new_dfm_model])
    # print([cg.p for cg in original_dfm_model])
    # Create template model file
    # export_difmap_model([cg], os.path.join(data_dir, "template.mdl"), uvdata_template.frequency/10**9)
    # Modelfit artificial self-calibrated data

    # cg = CGComponent(0.5, 0, 0, 0.5)
    # model = Model(stokes=stokes)
    # model.add_components(*new_dfm_model)
    # model.add_components(ccmodel)
Esempio n. 6
0
from utils import hdi_of_mcmc


path_to_script = '/home/ilya/Dropbox/Zhenya/to_ilya/clean/final_clean_nw'
data_dir = '/home/ilya/sandbox/modelfit/2models'
uv_fname = '1226+023.q1.2009_08_16.uvp'
mdl_fname = '1226+023.q1.2009_08_16.mdl'
outname = 'boot_uv'
n = 300


if __name__ == '__main__':

    uvdata = UVData(os.path.join(data_dir, uv_fname))
    model = Model(stokes='I')
    comps = import_difmap_model(mdl_fname, data_dir)
    model.add_components(*comps)
    boot = CleanBootstrap([model], uvdata)
    curdir = os.getcwd()
    os.chdir(data_dir)
    boot.run(n=n, nonparametric=True, outname=[outname, '.fits'])
    os.chdir(curdir)

    # # Radplot uv-data and model
    # comps = import_difmap_model(mdl_fname, data_dir)
    # uvdata.uvplot(style='a&p')
    # uvdata.substitute([model])
    # uvdata.uvplot(style='a&p', sym='.r')

    # # Radplot residuals
    # uvdata_ = create_uvdata_from_fits_file(os.path.join(data_dir, uv_fname_cc))
Esempio n. 7
0
        art_fits_fname = 'art_{}_{}.fits'.format(freq, i)
        art_fits_path = os.path.join(data_dir, art_fits_fname)
        uvdata.save(art_fits_path)

        # Here we should MCMC posterior
        modelfit_difmap(art_fits_fname,
                        'initial.mdl',
                        'out_{}_{}.mdl'.format(freq, i),
                        niter=100,
                        path=data_dir,
                        mdl_path=data_dir,
                        out_path=data_dir)

    params = list()
    for i in range(1, 101):
        comps = import_difmap_model('out_{}_{}.mdl'.format(freq, i), data_dir)
        params.append([
            comps[0].p[0], comps[0].p[2], comps[1].p[0], comps[1].p[2],
            comps[2].p[0], comps[2].p[2]
        ])
    params = np.array(params)

    label_size = 16
    import matplotlib
    matplotlib.rcParams['xtick.labelsize'] = label_size
    matplotlib.rcParams['ytick.labelsize'] = label_size
    matplotlib.rcParams['axes.titlesize'] = label_size
    matplotlib.rcParams['axes.labelsize'] = label_size
    matplotlib.rcParams['font.size'] = label_size
    matplotlib.rcParams['legend.fontsize'] = label_size
Esempio n. 8
0
def coverage_of_model(original_uv_fits,
                      original_mdl_file,
                      outdir=None,
                      n_cov=100,
                      n_boot=300,
                      mapsize=(1024, 0.1),
                      path_to_script=None):
    """
    Conduct coverage analysis of uv-data & model

    :param original_uv_fits:
        Self-calibrated uv-fits file.
    :param original_mdl_file:
        Difmap txt-file with model.
    :param outdir:
        Output directory to store results.
    :param n_cov:
        Number of samples to create.
    """
    # Create sample of 100 uv-fits data & models
    sample_uv_fits_paths, sample_model_paths = create_sample(original_uv_fits,
                                                             original_mdl_file,
                                                             outdir=outdir,
                                                             n_sample=n_cov)

    # For each sample uv-fits & model find 1) conventional errors & 2) bootstrap
    # errors
    for j, (sample_uv_fits_path, sample_mdl_path) in enumerate(
            zip(sample_uv_fits_paths, sample_model_paths)):
        sample_uv_fits, dir = os.path.split(sample_uv_fits_path)
        sample_mdl_file, dir = os.path.split(sample_mdl_path)
        try:
            comps = import_difmap_model(sample_mdl_file, dir)
        except ValueError:
            print('Problem import difmap model')
        model = Model(stokes='I')
        model.add_components(*comps)

        # Find errors by using Fomalont way
        # 1. Clean uv-data
        clean_difmap(sample_uv_fits,
                     'sample_cc_{}.fits'.format(j),
                     'I',
                     mapsize,
                     path=dir,
                     path_to_script=path_to_script,
                     outpath=dir)
        # 2. Get beam
        ccimage = create_clean_image_from_fits_file(
            os.path.join(dir, 'sample_cc_{}.fits'.format(j)))
        beam = ccimage.beam_image

        # 2. Subtract components convolved with beam
        ccimage.substract_model(model)

        # Find errors by using Lee way
        # a) fit uv-data and find model
        # b) CLEAN uv-data
        # c) substract model from CLEAN image
        # d) find errors
        pass

        # Find errors by using bootstrap
        # FT model to uv-plane
        uvdata = UVData(sample_uv_fits_path)
        try:
            boot = CleanBootstrap([model], uvdata)
        # If uv-data contains only one Stokes parameter (e.g. `0838+133`)
        except IndexError:
            print('Problem bootstrapping')
        curdir = os.getcwd()
        os.chdir(dir)
        boot.run(n=n_boot, nonparametric=True, outname=[outname, '.fits'])
        os.chdir(curdir)

        booted_uv_paths = sorted(
            glob.glob(os.path.join(data_dir, outname + "*")))
        # Modelfit bootstrapped uvdata
        for booted_uv_path in booted_uv_paths:
            path, booted_uv_file = os.path.split(booted_uv_path)
            i = booted_uv_file.split('_')[-1].split('.')[0]
            modelfit_difmap(booted_uv_file,
                            dfm_model_fname,
                            dfm_model_fname + '_' + i,
                            path=path,
                            mdl_path=data_dir,
                            out_path=data_dir)

        # Get params of initial model used for bootstrap
        comps = import_difmap_model(dfm_model_fname, data_dir)
        comps_params0 = {i: [] for i in range(len(comps))}
        for i, comp in enumerate(comps):
            comps_params0[i].extend(list(comp.p))

        # Load bootstrap models
        booted_mdl_paths = glob.glob(
            os.path.join(data_dir, dfm_model_fname + "_*"))
        comps_params = {i: [] for i in range(len(comps))}
        for booted_mdl_path in booted_mdl_paths:
            path, booted_mdl_file = os.path.split(booted_mdl_path)
            comps = import_difmap_model(booted_mdl_file, path)
            for i, comp in enumerate(comps):
                comps_params[i].extend(list(comp.p))

        # Print 65-% intervals (1 sigma)
        for i, comp in enumerate(comps):
            errors_fname = '68_{}_{}_comp{}.txt'.format(source, last_epoch, i)
            fn = open(os.path.join(data_dir, errors_fname), 'w')
            print "Component #{}".format(i + 1)
            for j in range(len(comp)):
                low, high, mean, median = hdi_of_mcmc(np.array(
                    comps_params[i]).reshape((n_boot, len(comp))).T[j],
                                                      cred_mass=0.68,
                                                      return_mean_median=True)
                fn.write("{} {} {} {} {}".format(comp.p[j], low, high, mean,
                                                 median))
                fn.write("\n")
            fn.close()

    # For source in sources with component close to core
    # 1. Find residuals or estimate noise
    # 2. N times add resampled residuals (or just gaussian noise) to model and
    # create N new datasets
    # 3. Fit them using difmap.
    # 4. Find errors using Fomalont, Yee and using bootstrap. Check coverage.
    base_dir = '/home/ilya/vlbi_errors/model_cov'
    n_boot = 300
    outname = 'boot_uv'
    names = [
        'source', 'id', 'trash', 'epoch', 'flux', 'r', 'pa', 'bmaj', 'e', 'bpa'
    ]
    df = pd.read_table(os.path.join(base_dir, 'asu.tsv'),
                       sep=';',
                       header=None,
                       names=names,
                       dtype={key: str
                              for key in names},
                       index_col=False)

    # Mow for all sources get the latest epoch and create directory for analysis
    for source in df['source'].unique():
        epochs = df.loc[df['source'] == source]['epoch']
        last_epoch_ = list(epochs)[-1]
        last_epoch = last_epoch_.replace('-', '_')
        data_dir = os.path.join(base_dir, source, last_epoch)
        if not os.path.exists(data_dir):
            os.makedirs(data_dir)
        try:
            download_mojave_uv_fits(source,
                                    epochs=[last_epoch],
                                    bands=['u'],
                                    download_dir=data_dir)
        except:
            open(
                'problem_download_from_mojave_{}_{}'.format(
                    source, last_epoch), 'a').close()
            continue
        uv_fits_fname = mojave_uv_fits_fname(source, 'u', last_epoch)

        # Create instance of Model and bootstrap uv-data
        dfm_model_fname = 'dfmp_original_model.mdl'
        fn = open(os.path.join(data_dir, dfm_model_fname), 'w')
        model_df = df.loc[np.logical_and(df['source'] == source,
                                         df['epoch'] == last_epoch_)]
        for (flux, r, pa, bmaj, e, bpa) in np.asarray(
                model_df[['flux', 'r', 'pa', 'bmaj', 'e', 'bpa']]):
            print flux, r, pa, bmaj, e, bpa
            if not r.strip(' '):
                r = '0.0'
            if not pa.strip(' '):
                pa = '0.0'

            if not bmaj.strip(' '):
                bmaj = '0.0'
            if not e.strip(' '):
                e = "1.0"

            if np.isnan(float(bpa)):
                bpa = "0.0"
            else:
                bpa = bpa + 'v'

            if bmaj == '0.0':
                type_ = 0
                bpa = "0.0"
            else:
                bmaj = bmaj + 'v'
                type_ = 1
            fn.write("{}v {}v {}v {} {} {} {} {} {}".format(
                flux, r, pa, bmaj, e, bpa, type_, "0", "0\n"))
        fn.close()
Esempio n. 9
0
import os
from uv_data import UVData
from model import Model
from spydiff import import_difmap_model
from bootstrap import CleanBootstrap

data_dir = '/home/ilya/code/vlbi_errors/tests/ft'
uv_fits = '1308+326.U1.2009_08_28.UV_CAL'
uvdata = UVData(os.path.join(data_dir, uv_fits))
model = Model(stokes='I')
comps = import_difmap_model('1308+326.U1.2009_08_28.mdl', data_dir)
model.add_components(*comps)
boot = CleanBootstrap([model], uvdata)
fig = boot.data.uvplot()
boot.model_data.uvplot(fig=fig, color='r')
# boot.find_outliers_in_residuals()
# boot.find_residuals_centers(split_scans=False)
# boot.fit_residuals_kde(split_scans=False, combine_scans=False,
#                        recenter=True)
Esempio n. 10
0
def fit_model_with_nestle(uv_fits, model_file, components_priors, outdir=None,
                          **nestle_kwargs):
    """
    :param uv_fits:
        Path to uv-fits file with self-calibrated visibilities.
    :param model_file:
        Path to file with difmap model.
    :param components_priors:
        Components prior's ppf. Close to phase center component goes first.
        Iterable of dicts with keys - name of the parameter and values -
        (callable, args, kwargs,) where args & kwargs - additional arguments to
        callable. Each callable is called callable.ppf(p, *args, **kwargs).
        Thus callable should has ``ppf`` method.

        Example of prior on single component:
            {'flux': (scipy.stats.uniform.ppf, [0., 10.], dict(),),
             'bmaj': (scipy.stats.uniform.ppf, [0, 5.], dict(),),
             'e': (scipy.stats.beta.ppf, [alpha, beta], dict(),)}
        First key will result in calling: scipy.stats.uniform.ppf(u, 0, 10) as
        value from prior for ``flux`` parameter.
    :param outdir: (optional)
        Directory to output results. If ``None`` then use cwd. (default:
        ``None``)
    :param nestle_kwargs: (optional)
        Any arguments passed to ``nestle.sample`` function.

    :return
        Results of ``nestle.sample`` work on that model.
    """
    if outdir is None:
        outdir = os.getcwd()

    mdl_file = model_file
    uv_data = UVData(uv_fits)
    mdl_dir, mdl_fname = os.path.split(mdl_file)
    comps = import_difmap_model(mdl_fname, mdl_dir)

    # Sort components by distance from phase center
    comps = sorted(comps, key=lambda x: np.sqrt(x.p[1]**2 + x.p[2]**2))

    ppfs = list()
    labels = list()
    for component_prior in components_priors:
        for comp_name in ('flux', 'x', 'y', 'bmaj', 'e', 'bpa'):
            try:
                ppfs.append(_function_wrapper(*component_prior[comp_name]))
                labels.append(comp_name)
            except KeyError:
                pass

    for ppf in ppfs:
        print(ppf.args)

    hypercube = hypercube_partial(ppfs)

    # Create model
    mdl = Model(stokes=stokes)
    # Add components to model
    mdl.add_components(*comps)
    loglike = LnLikelihood(uv_data, mdl)
    time0 = time.time()
    result = nestle.sample(loglikelihood=loglike, prior_transform=hypercube,
                           ndim=mdl.size, npoints=50, method='multi',
                           callback=nestle.print_progress, **nestle_kwargs)
    print("Time spent : {}".format(time.time()-time0))
    samples = nestle.resample_equal(result.samples, result.weights)
    # Save re-weighted samples from posterior to specified ``outdir``
    # directory
    np.savetxt(os.path.join(outdir, 'samples.txt'), samples)
    fig = corner.corner(samples, show_titles=True, labels=labels,
                        quantiles=[0.16, 0.5, 0.84], title_fmt='.3f')
    # Save corner plot os samples from posterior to specified ``outdir``
    # directory
    fig.savefig(os.path.join(outdir, "corner.png"), bbox_inches='tight',
                dpi=200)

    return result
Esempio n. 11
0
# matplotlib.rcParams['text.latex.preview'] = True
# matplotlib.rcParams['font.family'] = 'serif'
# matplotlib.rcParams['font.serif'] = 'cm'

import matplotlib.pyplot as plt

base_dir = "/home/ilya/Dropbox/papers/boot/new_pics/corner/new/parametric/1807+698/"
mcmc_samples = os.path.join(base_dir, "samples_of_mcmc.txt")
# (60000, 50)
mcmc = np.loadtxt(mcmc_samples)
mcmc = mcmc[::10, :]

booted_mdl_paths = glob.glob(os.path.join(base_dir, "mdl_booted_*"))
boot_samples = list()
for booted_mdl in booted_mdl_paths:
    comps = import_difmap_model(booted_mdl)
    comps = sorted(comps, key=lambda x: np.hypot(x.p[1], x.p[2]))
    params = list()
    for comp in comps:
        params.extend(list(comp.p))
    boot_samples.append(params)

boot = np.atleast_2d(boot_samples)

cred_mass = 0.68
count = 0
param_n = 2
ratios = list()
distances = list()
fluxes = list()
boot_stds = list()
Esempio n. 12
0
File: mcmc.py Progetto: akutkin/SACA
# #                bounds=[(0., 2), (None, None), (None, None), (0., +np.inf),
# #                        (0., 1.), (None, None),
# #                        (0., 2), (None, None), (None, None), (0., 5),
# #                        (0., 1), (None, None), (None, None), (0., 20)])
# fit = minimize(lambda p: -lnlik(p), mdl.p, method='L-BFGS-B',
#                options={'maxiter': 30000, 'maxfev': 1000000, 'xtol': 0.00001,
#                         'ftol': 0.00001, 'approx_grad': True},
#                bounds=[(0., 5), (None, None), (None, None), (0., +np.inf),
#                        (0., 5), (None, None), (None, None), (0., +np.inf)])
# if fit['success']:
#     print "Succesful fit!"
#     p_ml = fit['x']
#     print p_ml

# Create several components
comps = import_difmap_model('0235+164.q1.2008_09_02_delta_fitted.mdl',
                            data_dir)
eg1 = comps[0]
# cg2 = comps[1]
cg2 = comps[1]
cg3 = comps[3]
cg4 = comps[2]
eg1.add_prior(flux=(
    sp.stats.uniform.logpdf,
    [0., 5.],
    dict(),
),
              bmaj=(
                  sp.stats.uniform.logpdf,
                  [0, 1.],
                  dict(),
              ),
Esempio n. 13
0
    #     errors = find_2D_position_errors_using_chi2(os.path.join(data_dir, mdl_file),
    #                                                 os.path.join(data_dir, uvfits_file),
    #                                                 stokes=stokes,
    #                                                 show_difmap_output=False)
    #     with open(os.path.join(data_dir, "errors_{}.pkl".format(epoch)), "wb") as fo:
    #         pickle.dump(errors, fo)
    # # Or just load already calculated
    # else:
    with open(os.path.join(data_dir, "errors_{}.pkl".format(epoch)),
              "rb") as fo:
        errors = pickle.load(fo)
    # Make dummy elliptical components for plotting errors
    error_comps = convert_2D_position_errors_to_ell_components(
        os.path.join(data_dir, mdl_file), errors, include_shfit=False)

    comps = import_difmap_model(os.path.join(data_dir, mdl_file))
    ccimage = create_clean_image_from_fits_file(
        os.path.join(data_dir, ccfits_file))
    beam = ccimage.beam
    npixels_beam = np.pi * beam[0] * beam[1] / (4 * np.log(2) * pixsize_mas**2)
    std = find_image_std(ccimage.image, beam_npixels=npixels_beam)
    blc, trc = find_bbox(ccimage.image,
                         level=4 * std,
                         min_maxintensity_mjyperbeam=6 * std,
                         min_area_pix=4 * npixels_beam,
                         delta=10)
    fig, axes = plt.subplots(1, 1, figsize=(10, 15))
    fig = iplot(ccimage.image,
                x=ccimage.x,
                y=ccimage.y,
                min_abs_level=3 * std,
Esempio n. 14
0
import os
import numpy as np
from mcmc_difmap_model import fit_model_with_mcmc
from uv_data import UVData
from spydiff import import_difmap_model, modelfit_difmap
from model import Model


data_dir = '/home/ilya/code/vlbi_errors/bin_c1/'
uv_fits = '0235+164.c1.2008_09_02.uvf_difmap'
mdl_file = '0235+164.c1.2008_09_02.mdl'

uvdata = UVData(os.path.join(data_dir, uv_fits))

original_comps = import_difmap_model(mdl_file, data_dir)
lnpost, sampler = fit_model_with_mcmc(os.path.join(data_dir, uv_fits),
                                      os.path.join(data_dir, mdl_file),
                                      samples_file='samples_of_mcmc.txt',
                                      outdir='/home/ilya/code/vlbi_errors/bin_c1/')
samples = sampler.flatchain[::10, :]

# Create a sample of models with parameters from posterior distribution
models = list()
for i, s in enumerate(samples[np.random.randint(len(samples), size=100)]):
    model = Model(stokes='I')
    j = 0
    for orig_comp in original_comps:
        comp = orig_comp.__class__(*(s[j: j + orig_comp.size]))
        model.add_component(comp)
        j += orig_comp.size
    models.append(model)
Esempio n. 15
0
def plot_comps(components_to_plot,
               samples,
               original_dfm_mdl,
               title_fontsize=12,
               label_fontsize=12,
               outdir=None,
               outfname=None,
               fig=None,
               limits=None):
    if outfname is None:
        outfname = str(components_to_plot) + '_corner.pdf'
    if outdir is None:
        outdir = os.getcwd()

    n_samples, n_dim = np.shape(samples)
    indxs = np.zeros(n_dim)
    components_to_plot = sorted(components_to_plot)

    # Load difmap model
    mdl_dir, mdl_fname = os.path.split(original_dfm_mdl)
    comps = import_difmap_model(mdl_fname, mdl_dir)
    # Sort components by distance from phase center
    comps = sorted(comps, key=lambda x: np.sqrt(x.p[1]**2 + x.p[2]**2))

    # Construct labels for corner and truth values (of difmap models)
    labels = list()
    truths = list()
    j = 0
    for i, comp in enumerate(comps):
        if i in components_to_plot:
            truths.extend(comp.p)
            indxs[j:j + comp.size] = np.ones(comp.size)
            if isinstance(comp, EGComponent):
                if comp.size == 6:
                    labels.extend([
                        r'$flux$', r'$x$', r'$y$', r'$bmaj$', r'$e$', r'$bpa$'
                    ])
                elif comp.size == 4:
                    labels.extend([r'$flux$', r'$x$', r'$y$', r'$bmaj$'])
                elif comp.size == 3:
                    labels.extend([r'$flux$', r'$x$', r'$y$'])
                else:
                    raise Exception("Gauss component should have size 4 or 6!")
            elif isinstance(comp, DeltaComponent):
                labels.extend([r'$flux$', r'$x$', r'$y$'])
            else:
                raise Exception("Unknown type of component!")
        j += comp.size

    samples = samples[:, np.array(indxs, dtype=bool)]

    ndim = len(truths)
    if fig is None:
        fig, axes = plt.subplots(nrows=ndim, ncols=ndim)
        fig.set_size_inches(14.5, 14.5)

    fig = corner.corner(
        samples,
        fig=fig,
        labels=labels,
        truths=truths,
        hist_kwargs={
            'normed': True,
            'histtype': 'step',
            'stacked': True,
            'ls': 'dashdot'
        },
        smooth=0.5,
        # show_titles=True,
        title_kwargs={'fontsize': title_fontsize},
        # quantiles=[0.16, 0.5, 0.84],
        label_kwargs={'fontsize': label_fontsize},
        title_fmt=".4f",
        max_n_ticks=4,
        plot_datapoints=False,
        plot_contours=True,
        levels=[0.68, 0.95],
        bins=20,
        range=limits,
        fill_contours=True,
        color='green')
    # fig.tight_layout()
    # fig.savefig(os.path.join(outdir, outfname), bbox_inches='tight',
    #             dpi=200, format='pdf')
    return fig
Esempio n. 16
0
def fit_model_with_mcmc(uv_fits,
                        mdl_file,
                        outdir=None,
                        nburnin_1=100,
                        nburnin_2=300,
                        nproduction=500,
                        nwalkers=50,
                        samples_file=None,
                        stokes='I',
                        use_weights=False):

    # Initialize ``UVData`` instance
    uvdata = UVData(uv_fits)

    # Load difmap model
    mdl_dir, mdl_fname = os.path.split(mdl_file)
    comps = import_difmap_model(mdl_fname, mdl_dir)
    # Sort components by distance from phase center
    comps = sorted(comps, key=lambda x: np.sqrt(x.p[1]**2 + x.p[2]**2))

    # Cycle for components, add prior and calculate std for initial position of
    # walkers: 3% of flux for flux, 1% of size for position, 3% of size for
    # size, 0.01 for e, 0.01 for bpa
    p0_dict = dict()
    for comp in comps:
        print comp
        if isinstance(comp, EGComponent):
            flux_high = 2 * comp.p[0]
            try:
                bmaj_high = 4 * comp.p[3]
            except IndexError:
                pass
            if comp.size == 6:
                comp.add_prior(flux=(
                    sp.stats.uniform.logpdf,
                    [0., flux_high],
                    dict(),
                ),
                               bmaj=(
                                   sp.stats.uniform.logpdf,
                                   [0, bmaj_high],
                                   dict(),
                               ),
                               e=(
                                   sp.stats.uniform.logpdf,
                                   [0, 1.],
                                   dict(),
                               ),
                               bpa=(
                                   sp.stats.uniform.logpdf,
                                   [0, np.pi],
                                   dict(),
                               ))
                p0_dict[comp] = [
                    0.03 * comp.p[0], 0.01 * comp.p[3], 0.01 * comp.p[3],
                    0.03 * comp.p[3], 0.01, 0.01
                ]
            elif comp.size == 4:
                flux_high = 2 * comp.p[0]
                bmaj_high = 4 * comp.p[3]
                comp.add_prior(flux=(
                    sp.stats.uniform.logpdf,
                    [0., flux_high],
                    dict(),
                ),
                               bmaj=(
                                   sp.stats.uniform.logpdf,
                                   [0, bmaj_high],
                                   dict(),
                               ))
                p0_dict[comp] = [
                    0.03 * comp.p[0], 0.01 * comp.p[3], 0.01 * comp.p[3],
                    0.03 * comp.p[3]
                ]
            elif comp.size == 3:
                flux_high = 2 * comp.p[0]
                comp.add_prior(flux=(
                    sp.stats.uniform.logpdf,
                    [0., flux_high],
                    dict(),
                ))
                p0_dict[comp] = [0.03 * comp.p[0], 0.01, 0.01]
            else:
                raise Exception("Gauss component should have size 4 or 6!")
        elif isinstance(comp, DeltaComponent):
            flux_high = 5 * comp.p[0]
            comp.add_prior(flux=(
                sp.stats.uniform.logpdf,
                [0., flux_high],
                dict(),
            ))
            p0_dict[comp] = [0.03 * comp.p[0], 0.01, 0.01]
        else:
            raise Exception("Unknown type of component!")

    # Construct labels for corner and truth values (of difmap models)
    labels = list()
    truths = list()
    for comp in comps:
        truths.extend(comp.p)
        if isinstance(comp, EGComponent):
            if comp.size == 6:
                labels.extend(
                    [r'$flux$', r'$x$', r'$y$', r'$bmaj$', r'$e$', r'$bpa$'])
            elif comp.size == 4:
                labels.extend([r'$flux$', r'$x$', r'$y$', r'$bmaj$'])
            elif comp.size == 3:
                labels.extend([r'$flux$', r'$x$', r'$y$'])
            else:
                raise Exception("Gauss component should have size 4 or 6!")
        elif isinstance(comp, DeltaComponent):
            labels.extend([r'$flux$', r'$x$', r'$y$'])
        else:
            raise Exception("Unknown type of component!")

    # Create model
    mdl = Model(stokes=stokes)
    # Add components to model
    mdl.add_components(*comps)
    # Create posterior for data & model
    lnpost = LnPost(uvdata, mdl, use_weights=use_weights)
    ndim = mdl.size

    # Initialize sampler
    sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)

    # Initialize pool of walkers
    p_std = list()
    for comp in comps:
        p_std.extend(p0_dict[comp])
    print "Initial std of parameters: {}".format(p_std)
    p0 = emcee.utils.sample_ball(mdl.p, p_std, size=nwalkers)
    print p0[0]

    # Run initial burnin
    pos, prob, state = sampler.run_mcmc(p0, nburnin_1)
    print "Acceptance fraction for initial burning: ", sampler.acceptance_fraction
    sampler.reset()
    # Run second burning
    pos, lnp, _ = sampler.run_mcmc(pos, nburnin_2)
    print "Acceptance fraction for second burning: ", sampler.acceptance_fraction
    sampler.reset()
    pos, lnp, _ = sampler.run_mcmc(pos, nproduction)
    print "Acceptance fraction for production: ", sampler.acceptance_fraction

    # # Plot corner
    # fig, axes = plt.subplots(nrows=ndim, ncols=ndim)
    # fig.set_size_inches(14.5, 14.5)

    # # Choose fontsize
    # if len(comps) <= 2:
    #     fontsize = 16
    # elif 2 < len(comps) <= 4:
    #     fontsize = 13
    # else:
    #     fontsize = 11

    # if plot_corner:
    #     corner.corner(sampler.flatchain[::10, :], fig=fig, labels=labels,
    #                   truths=truths, show_titles=True,
    #                   title_kwargs={'fontsize': fontsize},
    #                   quantiles=[0.16, 0.5, 0.84],
    #                   label_kwargs={'fontsize': fontsize}, title_fmt=".3f")
    #     fig.savefig(os.path.join(outdir, 'corner.png'), bbox_inches='tight',
    #                 dpi=200)
    if not samples_file:
        samples_file = 'mcmc_samples.txt'
    print "Saving thinned samples to {} file...".format(samples_file)
    np.savetxt(os.path.join(outdir, samples_file), sampler.flatchain[::, :])
    return sampler, labels, truths
Esempio n. 17
0
def fit_model_with_ptmcmc(uv_fits, mdl_file, outdir=None, nburnin=1000,
                          nproduction=10000, nwalkers=50,
                          samples_file=None, stokes='I', use_weights=False,
                          ntemps=10, thin=10):

    # Initialize ``UVData`` instance
    uvdata = UVData(uv_fits)

    # Load difmap model
    mdl_dir, mdl_fname = os.path.split(mdl_file)
    comps = import_difmap_model(mdl_fname, mdl_dir)
    # Sort components by distance from phase center
    comps = sorted(comps, key=lambda x: np.sqrt(x.p[1]**2 + x.p[2]**2))

    # Cycle for components, add prior and calculate std for initial position of
    # walkers: 3% of flux for flux, 1% of size for position, 3% of size for
    # size, 0.01 for e, 0.01 for bpa
    p0_dict = dict()
    for comp in comps:
        print comp
        if isinstance(comp, EGComponent):
            # flux_high = 2 * comp.p[0]
            flux_high = 10. * comp.p[0]
            # bmaj_high = 4 * comp.p[3]
            bmaj_high = 4.
            if comp.size == 6:
                comp.add_prior(flux=(sp.stats.uniform.logpdf, [0., flux_high], dict(),),
                               bmaj=(sp.stats.uniform.logpdf, [0, bmaj_high], dict(),),
                               e=(sp.stats.uniform.logpdf, [0, 1.], dict(),),
                               bpa=(sp.stats.uniform.logpdf, [0, np.pi], dict(),))
                p0_dict[comp] = [0.03 * comp.p[0],
                                 0.01 * comp.p[3],
                                 0.01 * comp.p[3],
                                 0.03 * comp.p[3],
                                 0.01,
                                 0.01]
            elif comp.size == 4:
                # flux_high = 2 * comp.p[0]
                flux_high = 10. * comp.p[0]
                # bmaj_high = 4 * comp.p[3]
                bmaj_high = 4.
                comp.add_prior(flux=(sp.stats.uniform.logpdf, [0., flux_high], dict(),),
                               bmaj=(sp.stats.uniform.logpdf, [0, bmaj_high], dict(),))
                p0_dict[comp] = [0.03 * comp.p[0],
                                 0.01 * comp.p[3],
                                 0.01 * comp.p[3],
                                 0.03 * comp.p[3]]
            else:
                raise Exception("Gauss component should have size 4 or 6!")
        elif isinstance(comp, DeltaComponent):
            flux_high = 5 * comp.p[0]
            comp.add_prior(flux=(sp.stats.uniform.logpdf, [0., flux_high], dict(),))
            p0_dict[comp] = [0.03 * comp.p[0],
                             0.01,
                             0.01]
        else:
            raise Exception("Unknown type of component!")

    # Construct labels for corner and truth values (of difmap models)
    labels = list()
    truths = list()
    for comp in comps:
        truths.extend(comp.p)
        if isinstance(comp, EGComponent):
            if comp.size == 6:
                labels.extend([r'$flux$', r'$x$', r'$y$', r'$bmaj$', r'$e$', r'$bpa$'])
            elif comp.size == 4:
                labels.extend([r'$flux$', r'$x$', r'$y$', r'$bmaj$'])
            else:
                raise Exception("Gauss component should have size 4 or 6!")
        elif isinstance(comp, DeltaComponent):
            labels.extend([r'$flux$', r'$x$', r'$y$'])
        else:
            raise Exception("Unknown type of component!")

    # Create model
    mdl = Model(stokes=stokes)
    # Add components to model
    mdl.add_components(*comps)

    # Create likelihood for data & model
    lnlik = LnLikelihood(uvdata, mdl, use_weights=use_weights,)
    lnpr = LnPrior(mdl)
    ndim = mdl.size

    # Initialize pool of walkers
    p_std = list()
    for comp in comps:
        p_std.extend(p0_dict[comp])
    print "Initial std of parameters: {}".format(p_std)
    p0 = emcee.utils.sample_ball(mdl.p, p_std,
                                 size=ntemps*nwalkers).reshape((ntemps,
                                                                nwalkers, ndim))
    betas = np.exp(np.linspace(0, -(ntemps - 1) * 0.5 * np.log(2), ntemps))
    # Initialize sampler
    ptsampler = emcee.PTSampler(ntemps, nwalkers, ndim, lnlik, lnpr,
                                betas=betas)

    # Burning in
    print "Burnin"
    for p, lnprob, lnlike in ptsampler.sample(p0, iterations=nburnin):
        pass
    print "Acceptance fraction for initial burning: ", ptsampler.acceptance_fraction
    ptsampler.reset()

    print "Production"
    for p, lnprob, lnlike in ptsampler.sample(p, lnprob0=lnprob, lnlike0=lnlike,
                                              iterations=nproduction,
                                              thin=thin):
        pass
    print "Acceptance fraction for production: ", ptsampler.acceptance_fraction

    # Plot corner
    fig, axes = plt.subplots(nrows=ndim, ncols=ndim)
    fig.set_size_inches(14.5, 14.5)

    # Choose fontsize
    if len(comps) <= 2:
        fontsize = 16
    elif 2 < len(comps) <= 4:
        fontsize = 13
    else:
        fontsize = 11

    # Use zero-temperature chain
    samples = ptsampler.flatchain[0, :, :]

    corner.corner(samples, fig=fig, labels=labels,
                  truths=truths, show_titles=True,
                  title_kwargs={'fontsize': fontsize},
                  quantiles=[0.16, 0.5, 0.84],
                  label_kwargs={'fontsize': fontsize}, title_fmt=".3f")
    fig.savefig(os.path.join(outdir, 'corner_mcmc_x.png'), bbox_inches='tight',
                dpi=200)
    if not samples_file:
        samples_file = 'mcmc_samples.txt'
    print "Saving thinned samples to {} file...".format(samples_file)
    np.savetxt(samples_file, samples)
    return ptsampler
Esempio n. 18
0
# fig = iplot(original_selfcaled_image.image,
#             x=original_selfcaled_image.x, y=original_selfcaled_image.y,
#             abs_levels=[2*std], colors_mask=None, color_clim=None, blc=blc, trc=trc,
#             beam=beam, close=False, show_beam=True, show=True,
#             cmap='viridis', contour_color='red', fig=fig)
#
#
# fig.savefig(os.path.join(data_dir, "original_selfcaled.png"), dpi=300, bbox_inches="tight")
# plt.close()


# Find gains products
corrections = uvdata_raw.uvdata/uvdata_sc.uvdata

# Create artificial raw data with known sky model and given corrections
original_dfm_model = import_difmap_model(os.path.join(data_dir, "2019_08_27.mod"))

modelfit_difmap("myselfcaled.uvf", "2019_08_27.mod", "artificial.mdl", niter=100, stokes='I',
                path=data_dir, mdl_path=data_dir, out_path=data_dir, show_difmap_output=True)
new_dfm_model = import_difmap_model("artificial.mdl", data_dir)
print([cg.p for cg in new_dfm_model])
print([cg.p for cg in original_dfm_model])


# cg = CGComponent(0.5, 0, 0, 0.5)
model = Model(stokes="I")
model.add_components(*new_dfm_model)
noise = uvdata_template.noise(use_V=True)

params = list()
Esempio n. 19
0
path_to_script = '/home/ilya/Dropbox/Zhenya/to_ilya/clean/final_clean_nw'

# Colors used
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']

# Workflow for one source
source = '0945+408'
epoch = '2007_04_18'
band = 'u'
# TODO: Standard it
image_fname = 'original_cc.fits'
uv_fname_cc = '0945+408.u.2007_04_18.uvf'
uv_fname_uv = '0945+408.u.2007_04_18.uvf'
dfm_model_fname = 'dfmp_original_model.mdl'

comps = import_difmap_model(dfm_model_fname, base_path)
model_uv = Model(stokes='I')
model_uv.add_components(*comps)
uvdata = UVData(os.path.join(base_path, uv_fname_uv))
uvdata_m = UVData(os.path.join(base_path, uv_fname_uv))
uvdata_m.substitute([model_uv])
uvdata_r = uvdata - uvdata_m

# Plot uv-data
label_size = 12
matplotlib.rcParams['xtick.labelsize'] = label_size
matplotlib.rcParams['ytick.labelsize'] = label_size
uvdata.uvplot(style='re&im', freq_average=True)
matplotlib.pyplot.show()
matplotlib.pyplot.savefig('/home/ilya/sandbox/heteroboot/uvdata_original.png',
                          bbox_inches='tight', dpi=400)
Esempio n. 20
0
eg1 = EGComponent(5., 0, 0, 0.15, 0.33, 0.2)
eg2 = EGComponent(2.5, 1, 1, 0.5, 0.5, 0.)
model = Model(stokes='I')
model.add_components(eg1, eg2)
# model.add_components(eg1, eg2)
uvdata_c = copy.deepcopy(uvdata)
uvdata_c.substitute([model])
uvdata_c.noise_add(noise)
uvdata_c.save(os.path.join(data_dir, 'fake.fits'), rewrite=True)
modelfit_difmap('fake.fits',
                'mod_c2_2ee.mdl',
                'out_2c.mdl',
                path=data_dir,
                mdl_path=data_dir,
                out_path=data_dir,
                niter=100)
comps = import_difmap_model('out_2c.mdl', data_dir)
print[comp.p for comp in comps]
model_fitted = Model(stokes='I')
model_fitted.add_components(*comps)
uvdata_mf = copy.deepcopy(uvdata)
uvdata_mf.substitute([model_fitted])

fig = uvdata_c.uvplot(color='g', phase_range=[-1, 1])
uvdata_mf.uvplot(fig=fig, color='r', phase_range=[-1, 1])

hdus_orig = pf.open(os.path.join(data_dir, uv_fits))
hdus_fake = pf.open(os.path.join(data_dir, 'fake.fits'))
print "orig", hdus_orig[0].data[0][0]
print "fake", hdus_fake[0].data[0][0]
Esempio n. 21
0
from from_fits import create_model_from_fits_file
from utils import mas_to_rad
from stats import LnLikelihood
from spydiff import import_difmap_model
from scipy.optimize import minimize, fmin


# uv_file = '/home/ilya/github/bck/jetshow/uvf/0716+714_raks01xg_C_LL_0060s_uva.fits'
uv_file = '/home/ilya/github/bck/jetshow/uvf/2200+420_K_SVLBI.uvf'
uvdata_ext = UVData(uv_file)
uvdata_orig = UVData(uv_file)
# clean_difmap('2200+420_K_SVLBI.uvf', 'bllac_cc.fits', 'I', (8192, 0.0035),
#              path='/home/ilya/github/bck/jetshow/uvf/',
#              path_to_script='/home/ilya/github/vlbi_errors/difmap/final_clean_nw',
#              show_difmap_output=True)
comps = import_difmap_model('/home/ilya/github/bck/jetshow/uvf/ell_c_ell.mdl')
ext_model = Model(stokes='I')
ext_model.add_component(comps[-1])
# cc_fits = '/home/ilya/github/vlbi_errors/vlbi_errors/bllac_cc.fits'
# fig = uvdata_ext.uvplot()
# ccmodel = create_model_from_fits_file(cc_fits)
# ccmodel.filter_components_by_r(r_max_mas=0.15)
uvdata_ext.substitute([ext_model])
uvdata_core = uvdata_orig - uvdata_ext
# uvdata_core.save('/home/ilya/github/vlbi_errors/vlbi_errors/bllac_core.uvf')

# Set up ModelImage component
image = '/home/ilya/github/bck/jetshow/cmake-build-debug/map_i.txt'
image = np.loadtxt(image)
imsize = 1734
imsize = (imsize, imsize)
Esempio n. 22
0
def score(uv_fits_path,
          mdl_path,
          stokes='I',
          bmaj=None,
          score="l2",
          use_weights=True):
    """
    Returns rms of the trained model (CLEAN or difmap) on a given test UVFITS
    data set.

    :param uv_fits_path:
        Path to uv-fits file (test data).
    :param mdl_path:
        Path to difmap model text file or FITS-file with CLEAN model (trained
        model).
    :param stokes: (optional)
        Stokes parameter string. ``I``, ``RR`` or ``LL`` currently supported.
        (default: ``I``)
    :param bmaj: (optional)
        FWHM of the circular beam to account for. If ``None`` than do not
        account for the beam. (default: ``None``)
    :return:
        Per-point rms between given test data and trained model evaluated at a
        given test data points.
    """
    stokes = stokes.upper()
    if stokes not in ('I', 'RR', 'LL'):
        raise Exception("Only stokes I, RR or LL are supported!")

    if bmaj is not None:
        c = (np.pi * bmaj * mas_to_rad)**2 / (4 * np.log(2))
    else:
        c = 1.0

    # Loading test data with its own big mask
    uvdata = UVData(uv_fits_path)
    uvdata_model = UVData(uv_fits_path)

    # Loading trained model
    # CC-model
    try:
        model = create_model_from_fits_file(mdl_path)
    # Difmap model
    except IOError:
        dfm_mdl_dir, dfm_mdl_fname = os.path.split(mdl_path)
        comps = import_difmap_model(dfm_mdl_fname, dfm_mdl_dir)
        model = Model(stokes=stokes)
        model.add_components(*comps)

    # Computing difference and score
    uvdata_model.substitute([model])
    uvdata_diff = uvdata - uvdata_model
    if stokes == 'I':
        i_diff = 0.5 * (uvdata_diff.uvdata_weight_masked[..., 0] +
                        uvdata_diff.uvdata_weight_masked[..., 1])
        weights = uvdata.weights_nw_masked[...,
                                           0] + uvdata.weights_nw_masked[...,
                                                                         1]
    elif stokes == 'RR':
        i_diff = uvdata_diff.uvdata_weight_masked[..., 0]
        weights = uvdata.weights_nw_masked[..., 0]
    elif stokes == 'LL':
        i_diff = uvdata_diff.uvdata_weight_masked[..., 1]
        weights = uvdata.weights_nw_masked[..., 1]
    else:
        raise Exception("Only stokes (I, RR, LL) supported!")

    # Normalize weights
    weights = weights / np.ma.sum(weights)

    # Account for beam
    if bmaj is not None:
        u = uvdata_diff.uv[:, 0]
        v = uvdata_diff.uv[:, 1]
        taper = np.exp(-c * (u * u + v * v))
        i_diff = i_diff * taper[:, np.newaxis]

    # Number of unmasked visibilities (accounting each IF)
    if stokes == "I":
        # 2 means that Re & Im are counted independently
        factor = 2 * np.count_nonzero(~i_diff.mask)
    else:
        factor = np.count_nonzero(~i_diff.mask)

    print("Number of independent test data points = ", factor)
    if score == "l2":
        if use_weights:
            result = np.sqrt(
                (np.ma.sum(i_diff * i_diff.conj() * weights)).real)
        else:
            result = np.sqrt((np.ma.sum(i_diff * i_diff.conj())).real / factor)
    elif score == "l1":
        if use_weights:
            result = (np.ma.sum(np.abs(i_diff) * weights)).real
        else:
            result = (np.ma.sum(np.abs(i_diff))).real / factor
    else:
        raise Exception("score must be in (l1, l2)!")
    return result
Esempio n. 23
0
    def __call__(self, p):
        lnpr = self.lnpr(p[:])
        if not np.isfinite(lnpr):
            return -np.inf
        return self.lnlik(p[:]) + lnpr


if __name__ == '__main__':
    from spydiff import import_difmap_model
    from uv_data import UVData
    from model import Model, Jitter
    uv_fits = '/home/ilya/code/vlbi_errors/pet/0235+164_X.uvf_difmap'
    uvdata = UVData(uv_fits)
    # Create model
    mdl = Model(stokes='RR')
    comps = import_difmap_model('0235+164_X.mdl',
                                '/home/ilya/code/vlbi_errors/pet')
    comps[0].add_prior(flux=(sp.stats.uniform.logpdf, [0., 10], dict(),),
                       bmaj=(sp.stats.uniform.logpdf, [0, 1], dict(),),
                       e=(sp.stats.uniform.logpdf, [0, 1.], dict(),),
                       bpa=(sp.stats.uniform.logpdf, [0, np.pi], dict(),))
    comps[1].add_prior(flux=(sp.stats.uniform.logpdf, [0., 3], dict(),),
                       bmaj=(sp.stats.uniform.logpdf, [0, 5], dict(),))
    mdl.add_components(*comps)

    # Create log of likelihood function
    lnlik = LnLikelihood(uvdata, mdl)
    lnpr = LnPrior(mdl)
    lnpost = LnPost(uvdata, mdl)
    p = mdl.p + [0.04]
    print lnpr(p)
    print lnlik(p)
Esempio n. 24
0
#
# # That is test data set with last IF only
# imdata = uvdata.hdu.data['DATA'][:, :, :, 7, :, :, :]
# pardata = [uvdata.hdu.data[key] for key in uvdata.hdu.data.parnames]
# x = pf.GroupData(imdata, parnames=uvdata.hdu.data.parnames, pardata=pardata,
#                  bitpix=-32)
# header = uvdata.hdu.header.copy()
# header['NAXIS5'] = 1
# hdu = pf.GroupsHDU(data=x, header=header)
# hdu.writeto(os.path.join(data_dir, 'oj287_test.fits'), clobber=True)
# uvdata_test = UVData(os.path.join(data_dir, 'oj287_test.fits'))


original_model_fname = '2017_01_28us'
original_model_path = os.path.join(data_dir, original_model_fname)
comps = import_difmap_model(original_model_fname, data_dir)
model = Model(stokes='I')
model.add_components(*comps)


cv_scores = list()
train_scores = list()
for i, fname in enumerate(['1IF.fits', '12IF.fits', '123IF.fits', '1234IF.fits',
                          '12345IF.fits', '123456IF.fits', '1234567IF.fits']):
    current_fits = os.path.join(data_dir, fname)
    modelfit_difmap(current_fits,
                    original_model_fname, 'out_{}.mdl'.format(i),
                    path=data_dir, mdl_path=data_dir,
                    out_path=data_dir, niter=100)
    comps = import_difmap_model('out_{}.mdl'.format(i), data_dir)
    model = Model(stokes='I')
Esempio n. 25
0
def create_sample(original_uv_fits,
                  original_mdl_file,
                  outdir=None,
                  n_sample=100,
                  stokes='I'):
    """
    Create `sample` from `true` or `model` source

    :param outdir: (optional)
        Directory to store intermediate results. If ``None`` then use CWD.
        (default: ``None``)
    :param n_sample: (optional)
        Number of `samples` from infinite population to consider in coverage
        analysis of intervals. Here `samples` - observations of known source
        with different realisations of noise with known parameters. (default:
         ``100``)
    :param stokes: (optional)
        Stokes parameter to use. If ``None`` then use ``I``. (default: ``None``)
    """
    original_uv_data = UVData(original_uv_fits)
    noise = original_uv_data.noise()
    path, _ = os.path.split(original_mdl_file)
    comps = import_difmap_model(original_mdl_file, path)
    original_model = Model(stokes=stokes)
    original_model.add_components(*comps)

    # Substitute uv-data with original model and create `model` uv-data
    print("Substituting `original` uv-data with CLEAN model...")
    model_uv_data = copy.deepcopy(original_uv_data)
    model_uv_data.substitute([original_model])

    # Create `sample` uv-data
    # Add noise to `model` uv-data ``n_cov`` times and get ``n_cov`` `samples`
    # from population
    sample_uv_fits_paths = list()
    print("Creating {} `samples` from population".format(n_sample))
    for i in range(n_sample):
        sample_uv_data = copy.deepcopy(model_uv_data)
        sample_uv_data.noise_add(noise)
        sample_uv_fits_path = os.path.join(
            outdir, 'sample_uv_{}.uvf'.format(str(i + 1).zfill(3)))
        sample_uv_data.save(sample_uv_fits_path)
        sample_uv_fits_paths.append(sample_uv_fits_path)

    # Fitting in difmap each `sample` FITS-file
    print("Fitting `samples` uv-data")
    for uv_fits_path in sample_uv_fits_paths:
        uv_fits_dir, uv_fits_fname = os.path.split(uv_fits_path)
        j = uv_fits_fname.split('.')[0].split('_')[-1]
        print("Fitting {} sample uv-data to"
              " {}".format(
                  uv_fits_path,
                  os.path.join(outdir, 'sample_model_{}.mdl'.format(j))))
        modelfit_difmap(uv_fits_fname,
                        original_mdl_file,
                        'sample_model_{}.mdl'.format(j),
                        path=uv_fits_dir,
                        mdl_path=uv_fits_dir,
                        out_path=uv_fits_dir)

    sample_mdl_paths = sorted(
        glob.glob(os.path.join(outdir, 'sample_model_*.mdl')))
    sample_uv_fits_paths = sorted(
        glob.glob(os.path.join(outdir, 'sample_uv_*.uvf')))
    return sample_uv_fits_paths, sample_mdl_paths
Esempio n. 26
0
            cellsize = 0.2

        clean_difmap(uv_fits_save_fname,
                     cc_fits_save_fname,
                     'I', (1024, cellsize),
                     path=out_dir,
                     path_to_script=path_to_script,
                     show_difmap_output=False,
                     outpath=out_dir)

        ccimage = create_clean_image_from_fits_file(
            os.path.join(out_dir, cc_fits_save_fname))
        beam = ccimage.beam
        rms = rms_image(ccimage)
        blc, trc = find_bbox(ccimage.image, rms, 10)
        comps = import_difmap_model(out_dfm_model_fn, out_dir)

        plot_fitted_model(os.path.join(out_dir, uv_fits_save_fname),
                          comps,
                          savefig=os.path.join(
                              out_dir, "difmap_model_uvplot_{}_{}.png".format(
                                  str(i).zfill(2), freq)))

        fig = iplot(ccimage.image,
                    x=ccimage.x,
                    y=ccimage.y,
                    min_abs_level=3 * rms,
                    beam=beam,
                    show_beam=True,
                    blc=blc,
                    trc=trc,
Esempio n. 27
0
download_mojave_uv_fits(source, [epoch.replace('-', '_')],
                        bands=['u'],
                        download_dir=data_dir)
# Fetch model file
get_mojave_mdl_file(tsv_table, source, epoch, outdir=data_dir)
# Clean uv-fits
clean_difmap(uv_fits,
             'cc.fits',
             'I', [1024, 0.1],
             path=data_dir,
             path_to_script=path_to_script,
             outpath=data_dir)

# Create clean image instance
cc_image = create_clean_image_from_fits_file(os.path.join(data_dir, 'cc.fits'))
comps = import_difmap_model(mdl_fname, data_dir)
model = Model(stokes='I')
model.add_components(*comps)

# Check that model fits UV-data well
uv_data = UVData(os.path.join(data_dir, uv_fits))
uv_data.uvplot()
mdl_data = copy.deepcopy(uv_data)
mdl_data.substitute([model])
mdl_data.uvplot(sym='.r')

cc_image_ = copy.deepcopy(cc_image)
cc_image_._image = np.zeros(cc_image._image.shape, dtype=float)
cc_image_.add_model(model)
plt.figure()
plt.matshow(cc_image_.cc_image - cc_image.cc_image)
Esempio n. 28
0
def abc_simulations(param,
                    z,
                    freqs,
                    uv_fits_templates,
                    cc_images,
                    pickle_history,
                    out_dir=None):
    """
    Simulation function for ABC. Simulates data given parameters (``b, n, los``)
    and returns the vector of the summary statistics.

    :param b:
        Value of the magnetic field at 1 pc [G].
    :param n:
        Value of particle density at 1 pc [cm^(-3)].
    :param los:
        Jet angle to the line of site [rad].
    :param z:
        Redshift of the source.
    :param freqs:
        Iterable of frequencies.
    :param uv_fits_templates:
        Iterable of paths to FITS files with self-calibrated uv-data. In order
        of ``freqs``.
    :param cc_images:
        Iterable of paths to FITS files with CLEAN maps. In order of ``freqs``.
    :param out_dir: (optional)
        Directory to store files. If ``None`` then use CWD. (default: ``None``)
    :param pickle_history:
        Path to pickle file with dictionary of simulations history.

    :return:
        Summary statistics. E.g. "observed" core flux at highest frequency,
        "observed" core size at highest frequency, "observed" ellipticity of the
        core at highest frequency, core shift between frequencies (distance
        between "core shift - frequency" curves).
    """
    if out_dir is None:
        out_dir = os.getcwd()
    else:
        # FIXME: Create directory if it doesn't exist
        if not os.path.exists(out_dir):
            pass

    result_dict = dict()
    b, n, los = np.exp(param)
    p = Parameters(b, n, los)
    with open(pickle_history, 'r') as fo:
        history = pickle.load(fo)
    print("Running simulations with b={}, n={}, los={}".format(b, n, los))
    for freq, uv_fits_template, cc_image in zip(freqs, uv_fits_templates,
                                                cc_images):

        # Cleaning old results if any
        simulated_maps_old = glob.glob(os.path.join(exe_dir, "map*.txt"))
        for to_remove in simulated_maps_old:
            os.unlink(to_remove)

        # Checking if simulations on highest frequency are done. If so then use
        # scaled image parameters
        if freqs[0] in history[p].keys():
            pixel_size_mas = history[p][
                freqs[0]]["pixel_size_mas"] * freqs[0] / freq
            number_of_pixels = history[p][
                freqs[0]]["number_of_pixels"] * freq / freqs[0]
            map_size = (number_of_pixels, pixel_size_mas)
            print("Using scaled image parameters: {}, {}".format(
                number_of_pixels, pixel_size_mas))
        else:
            pixel_size_mas = 0.01
            number_of_pixels = 400
            map_size = None

        update_dict = {
            "jet": {
                "bfield": {
                    "parameters": {
                        "b_1": b
                    }
                },
                "nfield": {
                    "parameters": {
                        "n_1": n
                    }
                }
            },
            "observation": {
                "los_angle": los,
                "frequency_ghz": freq,
                "redshift": z
            },
            "image": {
                "pixel_size_mas": pixel_size_mas,
                "number_of_pixels": number_of_pixels
            }
        }
        update_config(cfg_file, update_dict)

        # FIXME: Handle ``FailedFindBestImageParamsException`` during ABC run
        simulation_params = run_simulations(cfg_file,
                                            path_to_executable,
                                            map_size=map_size)

        # Find total flux on simulated image
        image = os.path.join(exe_dir, "map_i.txt")
        image = np.loadtxt(image)

        # Rare case of strange fluxes
        image[image < 0] = 0
        image[image > 10.0] = 0

        # Total model flux at current frequency
        total_flux = image.sum()
        # Maximum model pixel flux at current frequency
        max_flux = image.max()
        cc_image = create_clean_image_from_fits_file(cc_image)
        noise_factor = 1.0

        initial_dfm_model = os.path.join(main_dir, 'initial_cg.mdl')
        out_dfm_model_fn = "bk_{}.mdl".format(freq)
        uv_fits_save_fname = "bk_{}.fits".format(freq)
        modelfit_simulation_result(exe_dir,
                                   initial_dfm_model,
                                   noise_factor=noise_factor,
                                   out_dfm_model_fn=out_dfm_model_fn,
                                   out_dir=out_dir,
                                   params=simulation_params,
                                   uv_fits_save_fname=uv_fits_save_fname,
                                   uv_fits_template=uv_fits_template)

        # Find measured and true distance of core to jet component
        dr_obs = find_core_separation_from_jet_using_difmap_model(
            os.path.join(out_dir, out_dfm_model_fn))
        dr_true = find_core_separation_from_center_using_simulations(
            os.path.join(exe_dir, "map_i.txt"), simulation_params)

        # This means something wrong with jetshow
        if total_flux < 0:
            print("b = {}, n = {}, los = {}".format(b, n, los))
            open(
                os.path.join(
                    out_dir,
                    "total_disaster_{}_{}_{}_{}.txt".format(b, n, los, freq)),
                'a').close()
            raise TotalDisasterException

        # Plot map with components superimposed
        cc_fits_save_fname = "bk_cc_{}.fits".format(freq)

        # For 18 cm we need large pixel size
        cellsize = 0.1
        if freq == 1.665:
            cellsize = 0.5
        elif freq == 8.1:
            cellsize = 0.2

        clean_difmap(uv_fits_save_fname,
                     cc_fits_save_fname,
                     'I', (1024, cellsize),
                     path=out_dir,
                     path_to_script=path_to_script,
                     show_difmap_output=False,
                     outpath=out_dir)

        ccimage = create_clean_image_from_fits_file(
            os.path.join(out_dir, cc_fits_save_fname))
        beam = ccimage.beam
        rms = rms_image(ccimage)
        blc, trc = find_bbox(ccimage.image, rms, 10)
        comps = import_difmap_model(out_dfm_model_fn, out_dir)

        plot_fitted_model(os.path.join(out_dir, uv_fits_save_fname),
                          comps,
                          savefig=os.path.join(
                              out_dir,
                              "difmap_model_uvplot_{}.png".format(freq)))

        fig = iplot(ccimage.image,
                    x=ccimage.x,
                    y=ccimage.y,
                    min_abs_level=3 * rms,
                    beam=beam,
                    show_beam=True,
                    blc=blc,
                    trc=trc,
                    components=comps,
                    close=True,
                    colorbar_label="Jy/beam")
        fig.savefig(os.path.join(out_dir, "cc_{}.png".format(freq)))

        # Move simulated images to data directory
        cut_image(os.path.join(exe_dir, "map_i.txt"))
        cut_image(os.path.join(exe_dir, "map_l.txt"))
        cut_image(os.path.join(exe_dir, "map_q.txt"))
        cut_image(os.path.join(exe_dir, "map_u.txt"))
        cut_image(os.path.join(exe_dir, "map_v.txt"))
        cut_image(os.path.join(exe_dir, "map_tau.txt"))
        for name in ('i', 'q', 'u', 'v', 'tau', 'l'):
            shutil.move(
                os.path.join(exe_dir, "map_{}.txt".format(name)),
                os.path.join(out_dir, "map_{}_{}.txt".format(name, freq)))

        # Calculate some info
        dr_pc = distance_from_SMBH(dr_true, los, z=z)
        b_core = b_field(b, dr_pc)
        t_syn_years = t_syn(b_core, freq) / (np.pi * 10**7)
        result_dict[freq] = dict()
        to_results = {
            "dr_obs":
            dr_obs,
            "dr_true":
            dr_true,
            "flux":
            total_flux,
            "flux_obs":
            comps[0].p[0],
            "bmaj_obs":
            comps[0].p[3],
            "tb_difmap":
            np.log10(tb_comp(comps[0].p[0], comps[0].p[3], freq, z=z)),
            "tb_pix":
            np.log10(
                tb(max_flux,
                   freq,
                   simulation_params[u'image'][u'pixel_size_mas'],
                   z=z)),
            "b_core":
            b_core,
            "dr_core_pc":
            dr_pc,
            "t_syn_core":
            t_syn_years,
            "pixel_size_mas":
            simulation_params[u'image'][u'pixel_size_mas'],
            "number_of_pixels":
            simulation_params[u'image'][u'number_of_pixels']
        }
        result_dict[freq] = to_results

        history[p] = result_dict
        with open(pickle_history, 'w') as fo:
            pickle.dump(history, fo)

    return create_summary_from_result_dict(result_dict, (0.3, 0.2, 0.1))