Ejemplo n.º 1
0
def make_corr(filename, verbose=False, identifier=None, cross_power=False,
              multiplier=1.):
    """wrap the plot correlation class which reads correlation object shelve
    files; uses new binning methods in freq-slices. Note that correlations are
    converted into units of mK.
    """
    output = {}
    corr_shelve = shelve.open(filename + ".shelve")

    if (multiplier != 1.):
        print "WARNING: using a multiplier of: " + repr(multiplier)

    corr = corr_shelve["corr"] * multiplier
    run_params = corr_shelve["params"]

    try:
        corr_counts = corr_shelve["counts"]
    except KeyError:
        print "WARNING: unable to find counts weighting for correlation"
        corr_counts = None

    if identifier:
        print "binning the correlation function in: " + filename + \
              ".shelve" + " with id=" + identifier
    else:
        print "binning the correlation function in: " + filename + \
              ".shelve"

    if verbose:
        for key in run_params:
            print key + ": " + repr(run_params[key])
        #np.set_printoptions(threshold=np.nan)
        #print corr_shelve["corr"]

    corr[np.isnan(corr)] = 0.
    corr[np.isinf(corr)] = 0.

    lags = sp.array(run_params["lags"])
    real_lags = lags.copy()
    real_lags[0] = 0
    real_lags[1:] -= sp.diff(lags) / 2.0

    frange = run_params["freq"]
    realrange = corr_shelve["freq_axis"]
    corr_2d = ce.rebin_corr_freq_lag(corr, realrange[list(frange)],
                                     weights=corr_counts, return_fbins=True,
                                     nfbins=200)

    corr_1d = ce.collapse_correlation_1d(corr_2d[0], corr_2d[2], real_lags,
                                         weights=corr_2d[1])

    if cross_power:
        correlation_1d = corr_1d[0] * 1.e3
        correlation_2d = corr_2d[0] * 1.e3
    else:
        print "Taking the signed square root"
        correlation_1d = sp.sign(corr_1d[0]) * sp.sqrt(abs(corr_1d[0])) * 1e3
        correlation_2d = sp.sign(corr_2d[0]) * sp.sqrt(abs(corr_2d[0])) * 1e3

    output["run_params"] = run_params
    output["lags"] = run_params["lags"]
    output["real_lags"] = real_lags
    # uncomment these only if you need them in the shelve file; makes it huge
    #output["corr"] = corr
    #output["corr_counts"] = corr_counts
    output["freq"] = run_params["freq"]
    output["freq_axis"] = corr_shelve["freq_axis"]
    output["corr1D"] = correlation_1d
    output["corr1D_weights"] = corr_1d[1]
    output["corr1D_lags"] = tuple(corr_1d[2])
    output["corr2D"] = correlation_2d
    output["corr2D_weights"] = corr_2d[1]
    output["corr2D_fbins"] = corr_2d[2]
    if identifier:
        output["identifier"] = identifier

    return output
Ejemplo n.º 2
0
def make_autocorr(filename, identifier=None,
                  thousand_multiplier=True, multiplier=1.):
    """Same as above but for autocorrs in NewSlices pickle objects.
    filename is the full path to the file and should inlude the .pkl ending.
    wrap the plot correlation class which reads correlation object shelve
    files; uses new binning methods in freq-slices. Note that correlations are
    converted into units of mK.
    """
    output = {}
    # Load the New_Slices_object.pkl
    pkl_handle = open(filename, "r")
    print filename
    pkl_obj = cPickle.load(pkl_handle)
    pkl_handle.close()

    # Setting axis info after pickling. Make sure to use a map with the proper
    # info set.
    map_file = "/mnt/raid-project/gmrt/calinliv/wiggleZ/maps/" + \
                   "sec_A_15hr_41-73_clean_map_I.npy"
    orig_map = algebra.make_vect(algebra.load(map_file))
    for pair in pkl_obj.pairs:
        pair.Map1.info = orig_map.info
        pair.Map2.info = orig_map.info
        pair.Noise_inv1.info = orig_map.info
        pair.Noise_inv2.info = orig_map.info

    # 3D->2D->1D
    corr_2d_list = []
    corr_1d_list = []
    for i in range(0, len(pkl_obj.pairs)):
        # The corr to use.
        corr = pkl_obj.pairs[i].corr
        if (multiplier != 1.):
            print "WARNING: using a multiplier of: " + repr(multiplier)
        corr *= multiplier
        # The lags used
        lags = sp.array(pkl_obj.params['lags'])
        real_lags = copy.deepcopy(lags)
        real_lags[0] = 0
        real_lags[1:] -= sp.diff(lags) / 2.0
        # The range selected in ini file.
        frange = pkl_obj.params['freq']
        # The corresponding real frequencies for that range.
        realrange = [pkl_obj.pairs[i].Map1.get_axis('freq')[f] for f in frange]
        # The 2D correlation.
        corr_2d = ce.rebin_corr_freq_lag(corr, realrange, nfbins=200,
                             weights=pkl_obj.pairs[i].counts, return_fbins=True)
        corr_2d_list.append(corr_2d[0])
        # The 1D correlation.
        corr_1d = ce.collapse_correlation_1d(corr_2d[0], corr_2d[2],
                                             real_lags, corr_2d[1])
        corr_1d_list.append(copy.deepcopy(corr_1d[0]))
        # The values for x_left, x_centre, x_right.
        x_axis = corr_1d[2]

    # Put the 1D correlations into a matrix to be averaged easily.
    matrix_1d = []
    for corr_1d in corr_1d_list:
        matrix_1d.append(corr_1d.tolist())

    matrix_1d = sp.array(matrix_1d)

    # Get the average 1D corr and it's sample variance.
    vals = []
    std = []
    for i in range(0, matrix_1d.shape[1]):
        # Get the sqrt to get mK.
        vals.append(sp.mean(sp.sign(matrix_1d[:, i]) * \
                    sp.sqrt(abs(matrix_1d[:, i]))))

        std.append(sp.std(sp.sign(matrix_1d[:, i]) * \
                   sp.sqrt(abs(matrix_1d[:, i]))))

    vals = sp.array(vals)
    std = sp.array(std)

    # Go from K to mK if True. If data is already in mK, then make it False.
    if thousand_multiplier:
        vals *= 1000.
        std *= 1000.

    # Build output dictionary.
    output["run_params"] = pkl_obj.params
    output["lags"] = pkl_obj.params["lags"]
    output["real_lags"] = real_lags
    # uncomment these only if you need them in the shelve file; makes it huge
    #output["corr"] = corr # Not supported for 6 pairs
    #output["corr_counts"] = corr_counts # not supported for 6 pairs.
    output["freq"] = pkl_obj.params["freq"]
    output["freq_axis"] = pkl_obj.pairs[0].Map1.get_axis('freq')
    output["corr1D"] = vals
    output["corr1D_std"] = std
#    output["corr1D_weights"] = corr_1d[1]
#    output["corr1D_lags"] = corr_1d[2]   # This is now the x-axis
    output["x_axis"] = x_axis
#    There are 6 of these now so it's weird.
#    output["corr2D"] = correlation_2d
#    output["corr2D_weights"] = corr_2d[1]   # Same as above.
    output["corr2D_fbins"] = corr_2d[2]  # Ok. Bins are the same for each pair.

    if identifier:
        return (identifier, output)

    return output