def smooth_image(input_file=None,
                 output_file=None,
                 final_res=None,
                 clobber=True,
                 verbose=True):
    ''' Smooths an image to a desired resolution in arcsec.
    '''

    # import external modules
    from scipy.ndimage.filters import gaussian_filter as smooth
    import pyfits as pf
    import numpy as np

    # Load fits file
    image, header = pf.getdata(input_file, header=True)

    print input_file, image.shape

    # Smooth the image
    # convert from degrees/pix to "/pix
    fwhm = final_res / (header['CDELT2'] * 3600.)
    if image.ndim > 2:
        image_smooth = np.zeros(image.shape)
        for i in range(image.shape[0]):
            image_smooth[i, :, :] = smooth(image[i, :, :],
                                           sigma=(fwhm / 2., fwhm / 2.))
    else:
        image_smooth = smooth(image, sigma=(fwhm / 2., fwhm / 2.))

    # Write out the fits file
    pf.writeto(output_file, image_smooth, header=header, clobber=clobber)
Exemplo n.º 2
0
def get_bubble_from_signal(sig_x,
                           sig_y,
                           sig_z,
                           calib_radius_func,
                           sigma=1,
                           flip_signal=False,
                           verbose=False):
    if flip_signal:
        sig_z = np.flip(sig_z, axis=0)

    sig_z = smooth(sig_z, sigma=sigma)
    first_peak_arg, second_peak_arg = get_salient_peaks(sig_z)
    radius = calib_radius_func(np.abs(second_peak_arg - first_peak_arg) / 2)
    max_peak_x = sig_x[first_peak_arg]

    if verbose:
        print("first_peak_arg:", first_peak_arg)
        print("second_peak_arg", second_peak_arg)

    if np.isnan(second_peak_arg):
        return Circle(0, 0, 1)
    else:
        second_peak_x = sig_x[second_peak_arg]

        if verbose:
            print("max_peak_x", max_peak_x)
            print("second_peak_x", second_peak_x)

        cent_x = (max_peak_x + second_peak_x) / 2
        cent_y = sig_y[0]

        return Circle(cent_x, cent_y, radius)
Exemplo n.º 3
0
def test_autocorrelation():
    x = []
    for ii in range(10):
        n = np.random.random_integers(300, 500)
        t = np.arange(n) / 100.
        x += [smooth(np.random.normal(0, 1, t.shape), sigma=3)]

    acorr, t, psd, f = autocorrelation(x, fs=100)
    fig, axs = plt.subplots(3, 1)
    axs[0].plot(x[0])
    axs[0].plot(x[1])
    axs[0].plot(x[2])
    axs[1].plot(f, psd)
    axs[2].plot(t, acorr)
def smooth_image(input_file=None, output_file=None, final_res=None,
        clobber=True, verbose=True):
    ''' Smooths an image to a desired resolution in arcsec.
    '''

    # import external modules
    from scipy.ndimage.filters import gaussian_filter as smooth
    import pyfits as pf

    # Load fits file
    image, header = pf.getdata(input_file, header=True)

    # Smooth the image
    # convert from degrees/pix to "/pix
    fwhm = final_res / (header['CDELT2'] * 3600.)
    image_smooth = smooth(image,sigma=(fwhm/2.,fwhm/2.))

    # Write out the fits file
    pf.writeto(output_file, image_smooth, header=header, clobber=clobber)
Exemplo n.º 5
0
    def add_timepoints(self, models, session, heading_smoothing=1):

        # get smoothed headings
        hxyz = smooth(self.hxyz, heading_smoothing)

        # add timepoints
        for tp_ctr in xrange(self.ts + 1):

            tp = models.Timepoint()

            tp.hxyz = hxyz[tp_ctr]
            tp.xidx, tp.yidx, tp.zidx = self.pos_idx[tp_ctr]
            tp.odor = self.odor[tp_ctr]
            tp.detected_odor = self.detected_odor[tp_ctr]
            tp.src_entropy = self.entropies[tp_ctr]
            session.add(tp)

            # get timepoint start and end ids if first iteration
            if tp_ctr == 0:
                session.flush()
                self.start_tp_id = tp.id
                self.end_tp_id = self.start_tp_id + self.ts
Exemplo n.º 6
0
def get_candidate_signals(img,
                          signal_len,
                          left_offset=5,
                          threshold_abs=50,
                          min_distance=2,
                          smooth_img=False,
                          verbose=False):

    if smooth_img:
        img = smooth(img, sigma=1, axis=0)

    local_max = peak_local_max(img,
                               min_distance=min_distance,
                               threshold_abs=threshold_abs,
                               exclude_border=True)

    signals_z = []
    signals_x = []
    signals_y = []

    for idx, lm in enumerate(local_max):
        xmin_offset = lm[1] - left_offset
        if xmin_offset < 0:
            xmin_offset = 0
        xmax_offset = lm[1] + signal_len - left_offset
        if xmax_offset > img.shape[1]:
            xmax_offset = img.shape[1] - 1

        sz = img[lm[0], xmin_offset:xmax_offset]
        sx = np.arange(xmin_offset, xmax_offset, 1)
        sy = np.ones(sz.shape) * lm[0]

        if len(sz) == len(sx):
            signals_z.append(sz)
            signals_x.append(sx)
            signals_y.append(sy)

    return signals_x, signals_y, signals_z
Exemplo n.º 7
0
def sort_trials(trialsfile, sortedfile):
    # Load trials
    trials, ntrials = load_trials(trialsfile)

    # Preferred targets
    preferred_targets = get_preferred_targets(trials)

    # Smoothing parameter
    t = trials[0]['t']
    dt = t[1] - t[0]
    sigma_smooth = int(50 / dt)

    #-------------------------------------------------------------------------------------
    # Sort
    #-------------------------------------------------------------------------------------

    sortby = [
        'all', 'choice', 'motion_choice', 'colour_choice', 'context_choice'
    ]

    sorted_trials = {s: {} for s in sortby}
    ncorrect = 0
    for i, trial in enumerate(trials):
        choice = get_choice(trial)
        if choice == 0:
            target = +1
        else:
            target = -1

        for s in ['all']:
            sorted_trial = sort_func(s, preferred_targets, target, trial)
            for unit, cond in enumerate(sorted_trial):
                sorted_trials[s].setdefault(cond, []).append((i, unit))

        if choice == trial['info']['choice']:
            ncorrect += 1
            for s in sortby:
                if s in ['all']:
                    continue
                sorted_trial = sort_func(s, preferred_targets, target, trial)
                for unit, cond in enumerate(sorted_trial):
                    sorted_trials[s].setdefault(cond, []).append((i, unit))
    print("[ {}.sort_trials ] {:.2f}% correct.".format(
        THIS, 100 * ncorrect / ntrials))

    #-------------------------------------------------------------------------------------
    # Average within conditions
    #-------------------------------------------------------------------------------------

    nunits, ntime = trial['r'].shape
    for s in sorted_trials:
        # Average
        for cond, i_unit in sorted_trials[s].items():
            r = np.zeros((nunits, ntime))
            n = np.zeros(nunits)
            for i, unit in i_unit:
                r[unit] += trials[i]['r'][unit]
                n[unit] += 1
            r = r * np.tile(safe_divide(n), (ntime, 1)).T
            sorted_trials[s][cond] = smooth(r, sigma_smooth, axis=1)

        # Normalize
        X = 0
        X2 = 0
        n = 0
        for cond, r in sorted_trials[s].items():
            X += np.sum(r, axis=1)
            X2 += np.sum(r**2, axis=1)
            n += r.shape[1]
        mean = X / n
        std = np.sqrt(X2 / n - mean**2)

        mean = np.tile(mean, (ntime, 1)).T
        std = np.tile(std, (ntime, 1)).T
        for cond, r in sorted_trials[s].items():
            sorted_trials[s][cond] = (r - mean) / std

    #-------------------------------------------------------------------------------------
    # Save
    #-------------------------------------------------------------------------------------

    with open(sortedfile, 'wb') as f:
        pickle.dump((t, sorted_trials), f, pickle.HIGHEST_PROTOCOL)
    print("[ {}.sort_trials ] Sorted trials saved to {}".format(
        THIS, sortedfile))
Exemplo n.º 8
0
def calculate_yy(bin_edges,arrays,region,version,cov_versions,beam_version,
                 effective_freq,overwrite,maxval,unsanitized_beam=False,do_weights=False,
                 pa1_shift = None,
                 pa2_shift = None,
                 pa3_150_shift = None,
                 pa3_090_shift = None,
                 no_act_color_correction=False, ccor_exp = -1,
                 sim_splits=None,unblind=False,all_analytic=False,beta_samples=None):


    """
    
    We calculate the yy power spectrum as follows.
    We restrict the Fourier modes in our analysis to those within bin_edges.
    This way we don't carry irrelevant pixels and thus speed up the ability to MC.
    We accept two covariance versions in cov_versions, which correspond to 
    [act_covariance_from_split_0,act_covariance_from_split_1,other_covs].
    Thus the ACT auto covariances are pre-calculated

    """
    arrays = arrays.split(',')
    narrays = len(arrays)
    if sim_splits is not None: assert not(unblind)
    def warn(): print("WARNING: no bandpass file found. Assuming array ",dm.c['id']," has no response to CMB, tSZ and CIB.")
    aspecs = tutils.ASpecs().get_specs
    bandpasses = not(effective_freq)
    savedir = tutils.get_save_path(version,region)
    assert len(cov_versions)==3
    covdirs = [tutils.get_save_path(cov_versions[i],region) for i in range(3)]
    for covdir in covdirs: assert os.path.exists(covdir)
    if not(overwrite):
        assert not(os.path.exists(savedir)), \
       "This version already exists on disk. Please use a different version identifier."
    try: os.makedirs(savedir)
    except:
        if overwrite: pass
        else: raise


    mask = enmap.read_map(covdir+"tilec_mask.fits")


    from scipy.ndimage.filters import gaussian_filter as smooth
    pm = enmap.read_map("/scratch/r/rbond/msyriac/data/planck/data/pr2/COM_Mask_Lensing_2048_R2.00_car_deep56_interp_order0.fits")
    wcs = pm.wcs
    mask = enmap.enmap(smooth(pm,sigma=10),wcs) * mask


    shape,wcs = mask.shape,mask.wcs
    Ny,Nx = shape
    modlmap = enmap.modlmap(shape,wcs)
    omodlmap = modlmap.copy()
    ells = np.arange(0,modlmap.max())
    minell = maps.minimum_ell(shape,wcs)
    sel = np.where(np.logical_and(modlmap>=bin_edges[0]-minell,modlmap<=bin_edges[-1]+minell))
    modlmap = modlmap[sel]

    bps = []
    lbeams = []
    kbeams = []
    shifts = []
    cfreqs = []
    lmins = []
    lmaxs = []
    names = []
    for i,qid in enumerate(arrays):
        dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)
        if dm.name=='act_mr3':
            season,array1,array2 = sints.arrays(qid,'season'),sints.arrays(qid,'array'),sints.arrays(qid,'freq')
            array = '_'.join([array1,array2])
        elif dm.name=='planck_hybrid':
            season,patch,array = None,None,sints.arrays(qid,'freq')
        else:
            raise ValueError
        lmin,lmax,hybrid,radial,friend,cfreq,fgroup,wrfit = aspecs(qid)
        lmins.append(lmin)
        lmaxs.append(lmax)
        names.append(qid)
        cfreqs.append(cfreq)
        if bandpasses:
            try: 
                fname = dm.get_bandpass_file_name(array) 
                bps.append("data/"+fname)
                if (pa1_shift is not None) and 'PA1' in fname:
                    shifts.append(pa1_shift)
                elif (pa2_shift is not None) and 'PA2' in fname:
                    shifts.append(pa2_shift)
                elif (pa3_150_shift is not None) and ('PA3' in fname) and ('150' in fname):
                    shifts.append(pa3_150_shift)
                elif (pa3_090_shift is not None) and ('PA3' in fname) and ('090' in fname):
                    shifts.append(pa3_90_shift)
                else:
                    shifts.append(0)

            except:
                warn()
                bps.append(None)
        else:
            try: bps.append(cfreq)
            except:
                warn()
                bps.append(None)

        kbeam = tutils.get_kbeam(qid,modlmap,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=True)
        if dm.name=='act_mr3':
            lbeam = tutils.get_kbeam(qid,ells,sanitize=not(unsanitized_beam),version=beam_version,planck_pixwin=False) # note no pixwin but doesnt matter since no ccorr for planck
        elif dm.name=='planck_hybrid':
            lbeam = None
        else:
            raise ValueError
        lbeams.append(lbeam)
        kbeams.append(kbeam.copy())
    # Make responses
    responses = {}

    def _get_response(comp,param_override=None):
        if bandpasses:
            if no_act_color_correction:
                r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,
                                           param_dict_override=param_override)
            else:
                r = tfg.get_mix_bandpassed(bps, comp, bandpass_shifts=shifts,
                                           ccor_cen_nus=cfreqs, ccor_beams=lbeams, 
                                           ccor_exps = [ccor_exp] * narrays,
                                           param_dict_override=param_override)
        else:
            r = tfg.get_mix(bps, comp,param_dict_override=param_override)
        return r

    for comp in ['tSZ','CMB','CIB']:
        responses[comp] = _get_response(comp,None)


    
    from tilec.utils import is_planck
    ilcgens = []
    okcoadds = []
    for splitnum in range(2):
        covdir = covdirs[splitnum]
        kcoadds = []
        for i,qid in enumerate(arrays):
            lmin = lmins[i]
            lmax = lmaxs[i]

            if is_planck(qid):
                dm = sints.models[sints.arrays(qid,'data_model')](region=mask,calibrated=True)

                _,kcoadd,_ = kspace.process(dm,region,qid,mask,
                                            skip_splits=True,
                                            splits_fname=sim_splits[i] if sim_splits is not None else None,
                                            inpaint=False,fn_beam = None,
                                            plot_inpaint_path = None,
                                            split_set=splitnum)
            else:
                kcoadd_name = covdir + "kcoadd_%s.npy" % qid
                kcoadd = enmap.enmap(np.load(kcoadd_name),wcs)

            kmask = maps.mask_kspace(shape,wcs,lmin=lmin,lmax=lmax)
            dtype = kcoadd.dtype
            kcoadds.append((kcoadd.copy()*kmask)[sel])

        kcoadds = enmap.enmap(np.stack(kcoadds),wcs)
        okcoadds.append(kcoadds.copy())


        # Read Covmat
        ctheory = ilc.CTheory(modlmap)
        nells = kcoadds[0].size
        cov = np.zeros((narrays,narrays,nells))
        for aindex1 in range(narrays):
            for aindex2 in range(aindex1,narrays):
                qid1 = names[aindex1]
                qid2 = names[aindex2]
                if is_planck(names[aindex1]) or is_planck(names[aindex2]) or all_analytic:
                    lmin,lmax,hybrid,radial,friend,f1,fgroup,wrfit = aspecs(qid1)
                    lmin,lmax,hybrid,radial,friend,f2,fgroup,wrfit = aspecs(qid2)
                    # If both are Planck and same array, get white noise from last bin
                    icov = ctheory.get_theory_cls(f1,f2,a_cmb=1,a_gal=0.8)*kbeams[aindex1]*kbeams[aindex2]
                    if aindex1==aindex2:
                        pcov = enmap.enmap(np.load(covdirs[2]+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2])),wcs)
                        pbin_edges = np.append(np.arange(500,3000,200) ,[3000,4000,5000,5800])
                        pbinner = stats.bin2D(omodlmap,pbin_edges)
                        w = pbinner.bin(pcov)[1][-1]
                        icov = icov + w
                else:
                    icov = np.load(covdir+"tilec_hybrid_covariance_%s_%s.npy" % (names[aindex1],names[aindex2]))[sel]
                if aindex1==aindex2: 
                    icov[modlmap<lmins[aindex1]] = maxval
                    icov[modlmap>lmaxs[aindex1]] = maxval
                cov[aindex1,aindex2] = icov
                cov[aindex2,aindex1] = icov

        assert np.all(np.isfinite(cov))

        ilcgen = ilc.HILC(modlmap,np.stack(kbeams),cov=cov,responses=responses,invert=True)
        ilcgens.append(ilcgen)
      

    solutions = ['tSZ','tSZ-CMB','tSZ-CIB']
    ypowers = {}
    w2 = np.mean(mask**2.)
    binner = stats.bin2D(modlmap,bin_edges)
    np.random.seed(100)
    blinding = np.random.uniform(0.8,1.2) if not(unblind) else 1


    def _get_ypow(sname,dname,dresponse=None,dcmb=False):

        if dresponse is not None:
            assert dname is not None
            for splitnum in range(2):
                ilcgens[splitnum].add_response(dname,dresponse)

        ykmaps = []
        for splitnum in range(2):
            if dcmb:
                assert dname is not None
                ykmap = ilcgens[splitnum].multi_constrained_map(okcoadds[splitnum],sname,[dname,"CMB"])
            else:
                if dname is None:
                    ykmap = ilcgens[splitnum].standard_map(okcoadds[splitnum],sname)
                else:
                    ykmap = ilcgens[splitnum].constrained_map(okcoadds[splitnum],sname,dname)
            ykmaps.append(ykmap.copy())

        ypower = (ykmaps[0]*ykmaps[1].conj()).real / w2
        return binner.bin(ypower)[1] * blinding


    # The usual solutions
    for solution in solutions:

        sols = solution.split('-')
        if len(sols)==2:
            sname = sols[0]
            dname = sols[1]
        elif len(sols)==1:
            sname = sols[0]
            dname = None
        else:
            raise ValueError

        ypowers[solution] = _get_ypow(sname,dname,dresponse=None)


    # The CIB SED samples
    if beta_samples is not None:
        y_bsamples = []
        y_bsamples_cmb = []
        for beta in beta_samples:
            pdict = tfg.default_dict.copy()
            pdict['beta_CIB'] = beta
            response = _get_response("CIB",param_override=pdict)
            y_bsamples.append(  _get_ypow("tSZ","iCIB",dresponse=response,dcmb=False) )
            y_bsamples_cmb.append(  _get_ypow("tSZ","iCIB",dresponse=response,dcmb=True) )
    else:
        y_bsamples = None
        y_bsamples_cmb = None


    return binner.centers,ypowers,y_bsamples,y_bsamples_cmb
Exemplo n.º 9
0
fig = plt.figure(1, figsize=(12, 12))
labels = ['$x$', '$y$', '$z$', '$v_x$', '$v_y$', '$v_z$']
plt.clf()
for col in range(6):
    for row in range(col + 1):
        print(row, col)
        ax = plt.axes([.05 + col * .15, .05 + row * .15, .15, .15])
        if row == col:
            x = h[row][col][1][1:]
            y = h[row][col][0]
            ax.plot(x, y, linewidth=.5)
            plt.ylabel(labels[row], rotation=0)
        else:
            z = h[row][col][0]
            z[z == 0] = min(z[z > 0])
            z = smooth(z, 10)
            ax.contour(np.log(z),
                       100,
                       linewidths=.2,
                       linestyles='solid',
                       colors='black',
                       alpha=.5)
            ax.imshow(z, alpha=.8)
        ax.set_xticks([])
        ax.set_yticks([])
        if row == 0:
            plt.xlabel(labels[col])

#-------------------------------------- Marginals

d = len(coordinates)
Exemplo n.º 10
0
    print 'Data saved! Collecting more...' + 'You have %s minutes left.' %(str(recording_time*5))

print 'Tattoo done, making the images now...'

# Plot the raw data
plt.plot(range(len(all_data)), all_data)
plt.title("Oscilloscope Channel 1")
plt.ylabel("Voltage (V)")
plt.xlabel("Time")
plt.savefig('%s/%s/%s_raw.pdf' %(current_dir,file_name,file_name),dpi=1000)

# Plot fancy data
all_data = np.array(all_data)
noise = np.random.normal(np.mean(all_data),(np.std(all_data)/3),len(all_data))
all_data = all_data + noise
x = smooth(all_data,sigma = np.std(all_data)*10)
grid = x.reshape(ort,-1)
figure(1)
imshow(grid, cmap = 'spectral', interpolation='gaussian')
plt.savefig('%s/%s/%s_fancy.pdf' %(current_dir,file_name,file_name),dpi=1000)

# Make movie!
def write_movie(data, fps=5):
    global x
    global current_dir
    global file_name
    print 'Making movie! Might take a bit of time (10-15 minutes). Be patient :)'
    name = 0
    for i in range(data.shape[0]):
        for j in range(data.shape[1]):
            name = name+1
Exemplo n.º 11
0
"""
from __future__ import division

import numpy as np
from numpy import concatenate as cc
from scipy.signal import fftconvolve
from scipy.stats import pearsonr
from scipy import stats
from scipy.ndimage.filters import gaussian_filter1d as smooth
import matplotlib.pyplot as plt

from math_tools import stats as mt_stats

# TEST SIGNALS
T = np.arange(300)
X = smooth((np.random.uniform(0, 1, 300) > .95).astype(float), 5)
HT = np.arange(-20, 20)
H = np.exp(-HT / 7.)
H[H > 1] = 0
Y = np.convolve(X, H, mode='same')

T1 = np.array([1, 1, 0, 0, 1, 1, 0, 0, 1, 1])


def fftxcorr(x, y, dt=1.):
    """Calculate the cross correlation between two signals using fft.
    
    Returns:
        time vector, cross correlation vector
    """
def sort_trials(trialsfile, sortedfile):
    # Load trials
    trials, ntrials = load_trials(trialsfile)

    # Preferred targets
    preferred_targets = get_preferred_targets(trials)

    # Smoothing parameter
    t  = trials[0]['t']
    dt = t[1] - t[0]
    sigma_smooth = int(50/dt)

    #-------------------------------------------------------------------------------------
    # Sort
    #-------------------------------------------------------------------------------------

    sortby = ['all', 'choice', 'motion_choice', 'colour_choice', 'context_choice']

    sorted_trials = {s: {} for s in sortby}
    ncorrect = 0
    for i, trial in enumerate(trials):
        choice = get_choice(trial)
        if choice == 0:
            target = +1
        else:
            target = -1

        for s in ['all']:
            sorted_trial = sort_func(s, preferred_targets, target, trial)
            for unit, cond in enumerate(sorted_trial):
                sorted_trials[s].setdefault(cond, []).append((i, unit))

        if choice == trial['info']['choice']:
            ncorrect += 1
            for s in sortby:
                if s in ['all']:
                    continue
                sorted_trial = sort_func(s, preferred_targets, target, trial)
                for unit, cond in enumerate(sorted_trial):
                    sorted_trials[s].setdefault(cond, []).append((i, unit))
    print("[ {}.sort_trials ] {:.2f}% correct.".format(THIS, 100*ncorrect/ntrials))

    #-------------------------------------------------------------------------------------
    # Average within conditions
    #-------------------------------------------------------------------------------------

    nunits, ntime = trial['r'].shape
    for s in sorted_trials:
        # Average
        for cond, i_unit in sorted_trials[s].items():
            r = np.zeros((nunits, ntime))
            n = np.zeros(nunits)
            for i, unit in i_unit:
                r[unit] += trials[i]['r'][unit]
                n[unit] += 1
            r = r*np.tile(safe_divide(n), (ntime, 1)).T
            sorted_trials[s][cond] = smooth(r, sigma_smooth, axis=1)

        # Normalize
        X  = 0
        X2 = 0
        n  = 0
        for cond, r in sorted_trials[s].items():
            X  += np.sum(r,    axis=1)
            X2 += np.sum(r**2, axis=1)
            n  += r.shape[1]
        mean = X/n
        std  = np.sqrt(X2/n - mean**2)

        mean = np.tile(mean, (ntime, 1)).T
        std  = np.tile(std,  (ntime, 1)).T
        for cond, r in sorted_trials[s].items():
            sorted_trials[s][cond] = (r - mean)/std

    #-------------------------------------------------------------------------------------
    # Save
    #-------------------------------------------------------------------------------------

    with open(sortedfile, 'wb') as f:
        pickle.dump((t, sorted_trials), f, pickle.HIGHEST_PROTOCOL)
    print("[ {}.sort_trials ] Sorted trials saved to {}".format(THIS, sortedfile))
Exemplo n.º 13
0
gamts = np.zeros((szg,szg))
nadv = szx/szg
for i in range(szg):
    for j in range(szg):
        gamts[i,j] = gamt[nadv*i:nadv*(i+1),nadv*j:nadv*(j+1)].mean()

m,c,r,p,stderr = stats.linregress(gamts.ravel(),gamr.ravel())
gamr = (gamr-c)/m
diff = gamts-gamr
print 'mc',m,c
print 'rp',r,p
print 'SN',np.abs(gamt).mean()/diff.std()

if len(sys.argv)>2:
    smth = float(sys.argv[2])
    gamts = smooth(gamts,smth)
    gamr = smooth(gamr,smth)

##plt.figure('In')
###plt.imshow(gamt,origin='lower',extent=(-600,600,-600,600),interpolation='nearest')
plt.figure('In small')
plt.pcolor(xs,xs,gamts)
plt.figure('out')
plt.pcolor(xs,xs,gamr)
plt.figure('in vs out')
plt.plot(gamts.ravel(),gamr.ravel(),'.')
plt.show()



Exemplo n.º 14
0
from orphics import maps,io,cosmology,stats,mpi
from pixell import enmap,curvedsky
from enlib import bench
import numpy as np
import os,sys,shutil
from tilec import fg as tfg,ilc,kspace,utils as tutils
from soapack import interfaces as sints
from szar import foregrounds as fgs
import healpy as hp
from actsims.util import seed_tracker

from scipy.ndimage.filters import gaussian_filter as smooth

pm = enmap.read_map("/scratch/r/rbond/msyriac/data/planck/data/pr2/COM_Mask_Lensing_2048_R2.00_car_deep56_interp_order0.fits")
wcs = pm.wcs
pmask = enmap.enmap(smooth(pm,sigma=10),wcs)

region = "deep56"
arrays = "p04,p05,p06,p07,p08".split(',')
mask = sints.get_act_mr3_crosslinked_mask(region) * pmask

# io.hplot(mask,"pmask.png")
# sys.exit()

w2 = np.mean(mask**2.)
modlmap = mask.modlmap()

lmax = 5500
ells = np.arange(0,lmax,1)
ctheory = ilc.CTheory(ells)
aspecs = tutils.ASpecs().get_specs
Exemplo n.º 15
0
rho = np.sqrt(uu**2+vv**2)
umax = u.max()
upix = np.gradient(u)[0]

nd,xb,yb = np.histogram2d(b1,b2,bins=np.linspace(-szx*pix/2,szx*pix/2,szk+1))
fov = nd>0

gamr = np.fft.fftshift(np.fft.ifftn(np.fft.ifftshift(fgamr))).real

if (szx/szk)%2==0:
    gamt = np.pad(gamt[:-(szx/(2*szk)),:-(szx/(2*szk))],((szx/(2*szk),0),(szx/(2*szk),0)),'constant')
gamt = gamt.reshape(szk,szx/szk,szk,-1).mean((1,3))
x = np.linspace(-szx*pix/2,szx*pix/2,szk)
xx,yy = np.meshgrid(x,x,sparse=1)

gamr = smooth(gamr,sfac)
gamt = smooth(gamt,sfac)
m,c,r,p,stderr = stats.linregress(gamt[fov],gamr[fov])
gamr -= c
gamr /= m
gamr *= fov
gamt *= fov
diff = gamr-gamt
print 'Real space'
print 'mc',m,c
print 'rps',r,p,stderr
print 'SN',diff.std()/gamt.std()

print '...'

fm,fc,fr,fp,fstderr = stats.linregress(fgamt[rho<=umax],fgamr[rho<=umax])