예제 #1
0
def weight_prime_from_data(corr,
                           bg1,
                           bg2,
                           delta=0.,
                           scale_factor=1.,
                           mode="corr",
                           pre_filter=True):
    """Computes weighting function for weighted normalization.
    
    Parameters
    ----------
    corr : ndarray
        Correlation (or difference) data
    scale_factor : ndarray
        Scaling factor as returned by :func:`.core.scale_factor`. If not provided,
        corr data must be computed with scale = True option.
    mode : str
        Representation mode of the data, either 'corr' (default) or 'diff'
    pre_filter : bool
        Whether to perform denoising and filtering. If set to False, user has 
        to perform data filtering.

        
    Returns
    -------
    out : ndarray
        Weight data for weighted sum calculation.
    """
    scale_factor = np.asarray(scale_factor)
    delta = np.asarray(delta)
    bg1, bg2 = np.asarray(bg1), np.asarray(bg2)
    if mode == "corr":
        #make sure it is decreasing and clipped between 0 and 1
        if pre_filter == True:
            corr = _avg.denoise(corr)
            corr = _avg.decreasing(corr)
            corr = np.clip(corr, 0., scale_factor[..., None])
            corr = _avg.denoise(corr)

        g = np.divide(corr, scale_factor[..., None])

        return weight_prime_from_g(g, delta[..., None], bg1[..., None],
                                   bg2[..., None])

    elif mode == "diff":
        if pre_filter == True:
            corr = _avg.denoise(corr)
            corr = _avg.increasing(corr)
            corr = np.clip(corr, 0., scale_factor[..., None] * 2)
            corr = _avg.denoise(corr)

        d = np.divide(corr, scale_factor[..., None])

        return weight_prime_from_d(d, delta[..., None], bg1[..., None],
                                   bg2[..., None])
    else:
        raise ValueError("Wrong mode.")
예제 #2
0
def _data_estimator(data, size=8, n=3, multilevel=False):
    from cddm.multitau import log_average, merge_multilevel
    from cddm.avg import denoise
    if multilevel == False:
        x, y = log_average(data, size)
    else:
        x, y = merge_multilevel(data)
    #in case we have nans, remove them before denoising, and return only valid data
    mask = np.isnan(y)
    mask = np.logical_not(np.all(mask, axis=tuple(range(mask.ndim - 1))))
    return x[mask], denoise(y[..., mask], n=n)
예제 #3
0
            ax2.semilogx(x[1:],
                         std[1:],
                         marker=MARKERS.get(norm, "o"),
                         linestyle='',
                         fillstyle="none",
                         label="${}$".format(LABELS.get(norm)))
        else:
            #ax1.semilogx(x[1:],y[0,1:],linestyle = ':',fillstyle = "none")

            ax2.semilogx(x[1:], std[1:], linestyle=':', fillstyle="none")

ax1.plot(x[1:], g1(x[1:], i, j), "k", label="$g$")

#: take first run, norm = 3 data for g estimation
x, g = log_average(data[0, 3, i, j, :])
g = denoise(g)
g = decreasing(g)
g = g.clip(0, 1)

#ax1.plot(x[1:],g[1:], "k:",label = "denoised")

x = np.arange(NFRAMES)
ax1.plot(x[1:], w[1:], "k--", label="$w$")
ax1.plot(x[1:], wp[1:], "k:", label="$w'$")

#ax2.set_ylim(ax1.get_ylim())

x, err2 = merge_multilevel(multilevel(err2, binning=0))
x, err3 = merge_multilevel(multilevel(err3, binning=0))
x, err6 = merge_multilevel(multilevel(err6, binning=0))
x, err0 = merge_multilevel(multilevel(err0, binning=0))
lin_5 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_fast_norm_5.npy"))
multi_5 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_slow_norm_5.npy"))

#: norm = 3 data normalized with scale = True
lin_6 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_fast_norm_6.npy"))
multi_6 = np.load(p.join(DATA_PATH,"cross_correlate_multi_raw_slow_norm_6.npy"))

#: take (i,j) k value
(i,j) = (12,2)

f, (ax1, ax2) = plt.subplots(2, 1, sharex=True)

x,y = log_merge(lin_6, multi_6)

#: denoised data, used as a estimator for the weight
yd = denoise(decreasing(np.clip(denoise(y),0,1)))

x,y = log_merge(lin_5, multi_5)
ax2.semilogx(x,y[i,j], label = "norm 5")

x,y = log_merge(lin_6, multi_6)
ax2.semilogx(x,y[i,j], label = "norm 6")

ax1.semilogx(x,yd[i,j], label = "g1")

#: now calculate weights and do weighted sum.

#: x values for the interpolator
x_lin = np.arange(lin_6.shape[-1])
#: iweight for linear part. We have already filtered the data, so pre_filter = False
w_lin = weight_from_data(yd, pre_filter = False)