Beispiel #1
0
def _data_estimator(data, size=8, n=3, multilevel=False):
    from cddm.multitau import log_average, merge_multilevel
    from cddm.avg import denoise
    if multilevel == False:
        x, y = log_average(data, size)
    else:
        x, y = merge_multilevel(data)
    #in case we have nans, remove them before denoising, and return only valid data
    mask = np.isnan(y)
    mask = np.logical_not(np.all(mask, axis=tuple(range(mask.ndim - 1))))
    return x[mask], denoise(y[..., mask], n=n)
Beispiel #2
0
    def _get_avg_data(self):
        data = normalize(self.data,
                         self.background,
                         self.variance,
                         norm=self.norm,
                         scale=self.scale,
                         mask=self.mask)

        if self.size is not None:
            t, data = log_average(data, self.size)
        else:
            t = np.arange(data.shape[-1])

        return t, np.nanmean(data, axis=-2)
Beispiel #3
0
        if binning == BINNING_ERROR:
            ax2.semilogx(x[1:],
                         std[1:],
                         marker=MARKERS.get(norm, "o"),
                         linestyle='',
                         fillstyle="none",
                         label="${}$".format(LABELS.get(norm)))
        else:
            #ax1.semilogx(x[1:],y[0,1:],linestyle = ':',fillstyle = "none")

            ax2.semilogx(x[1:], std[1:], linestyle=':', fillstyle="none")

ax1.plot(x[1:], g1(x[1:], i, j), "k", label="$g$")

#: take first run, norm = 3 data for g estimation
x, g = log_average(data[0, 3, i, j, :])
g = denoise(g)
g = decreasing(g)
g = g.clip(0, 1)

#ax1.plot(x[1:],g[1:], "k:",label = "denoised")

x = np.arange(NFRAMES)
ax1.plot(x[1:], w[1:], "k--", label="$w$")
ax1.plot(x[1:], wp[1:], "k:", label="$w'$")

#ax2.set_ylim(ax1.get_ylim())

x, err2 = merge_multilevel(multilevel(err2, binning=0))
x, err3 = merge_multilevel(multilevel(err3, binning=0))
x, err6 = merge_multilevel(multilevel(err6, binning=0))
Beispiel #4
0
#load in numpy array
fft_array, = asarrays(fft, NFRAMES_RANDOM)

if __name__ == "__main__":
    import os.path as p

    #: now perform auto correlation calculation with default parameters 
    data = acorr(fft_array, t = video_simulator.t, n = int(NFRAMES/DT_RANDOM))
    bg, var = stats(fft_array)
    
    for norm in (1,2,3,5,6,7,9,10,11):
    
        #: perform normalization and merge data
        data_lin = normalize(data, bg, var, scale = True, norm = norm)
    
        #: change size, to define time resolution in log space
        x,y = log_average(data_lin, size = 16)
        
        #: save the normalized data to numpy files
        np.save(p.join(DATA_PATH, "corr_random_t.npy"),x*DT_RANDOM)
        np.save(p.join(DATA_PATH, "corr_random_data_norm{}.npy".format(norm)),y)

    
    #: inspect the data
    viewer = CorrViewer(scale = True)
    viewer.set_data(data,bg, var)
    viewer.set_mask(k = 25, angle = 0, sector = 30)
    viewer.plot()
    viewer.show()