Esempio n. 1
0
    def __init__(self, originals: ep.Tensor, random_noise: str = "normal", basis_type: str = "dct", **kwargs : Any):
        """
        Args:
            random_noise (str, optional): When basis is created, a noise will be added.This noise can be normal or 
                                          uniform. Defaults to "normal".
            basis_type (str, optional): Type of the basis: DCT, Random, Genetic,. Defaults to "random".
            device (int, optional): [description]. Defaults to -1.
            args, kwargs: In args and kwargs, there is the basis params:
                    * Random: No parameters                    
                    * DCT:
                            * function (tanh / constant / linear): function applied on the dct
                            * beta
                            * gamma
                            * frequence_range: tuple of 2 float
                            * dct_type: 8x8 or full
        """
        self._originals = originals
        if isinstance(self._originals.raw, torch.Tensor):
            self._f_dct2 = lambda a: torch_dct.dct_2d(a)
            self._f_idct2 = lambda a: torch_dct.idct_2d(a)
        elif isinstance(v.raw, np.array):
            from scipy import fft
            self._f_dct2 = lambda a: fft.dct(fft.dct(a, axis=2, norm='ortho' ), axis=3, norm='ortho')
            self._f_idct2 = lambda a: fft.idct(fft.idct(a, axis=2, norm='ortho'), axis=3, norm='ortho')

        self.basis_type = basis_type
        self._function_generation = getattr(self, "_get_vector_" + self.basis_type)
        self._load_params(**kwargs)

        assert random_noise in ["normal", "uniform"]
        self.random_noise = random_noise
Esempio n. 2
0
def read_and_resize_zeroModes(path_to_vols, vol_name, ntup, toDir='./'):
    '''
   Open and pad (or truncate) the Chebyshev coefficients of the mean fields
   stored in CheckPoints. In contrast to fluctations (kxky fileds), spectral 
   coefficients are stored directly. Hence, instead of NZAA (grid points),
   we store (NZ - nbc), where NZ = NZAA*3//2 and nbc is the number of 
   boundary conditions for a given field.
   '''
    ierror = 0
    domain_decomp_infos = np.fromfile(path_to_vols +
                                      '../Geometry/domDecmp.core0000',
                                      dtype=np.int32)
    readVec = np.fromfile(path_to_vols + vol_name, dtype=np.float_)
    Nold = readVec.shape[0]
    Nnew = ntup[2]

    coscoefs = dct(readVec)
    if (Nnew > Nold):
        newcoefs = np.zeros((Nnew), dtype=np.float_)
        newcoefs[:Nold] = coscoefs
    else:
        newcoefs = coscoefs[:Nnew]

    aux2 = idct(newcoefs) * Nnew / Nold
    aux2.tofile(toDir + '/Restart/' + vol_name)
    return ierror
Esempio n. 3
0
def IDCT(x):
    """
	离散余弦逆变换
	:param x:
	:return:
	"""
    return idct(x, norm='ortho')
    def __apply_inverse_trans(self, trans_tensor, transformation, axis):

        if transformation == "dwt":
            cA, cD = np.split(trans_tensor, 2, axis=axis)
            result = pywt.idwt(cA, cD, "haar", axis=axis)
        elif transformation == "dct":
            result = sfft.idct(trans_tensor, axis=axis)
        elif transformation == "dft":
            raise NotImplementedError()
        else:
            raise ValueError(
                f"{self.transformation} is not a valid transformation")

        return result
Esempio n. 5
0
    def __call__(self, signal, noise, slope=np.nan, flagged=None):

        self.flagged = flagged if not flagged is None else self.flagged
        self.calculate_power_spectrum()

        self.signal, self.noise = signal, noise

        lpwr0 = np.log10(np.mean(self.pwr[:signal*2//3])*2)
        self.slope = slope if np.isfinite(slope) else (self.lpwr[signal] - lpwr0) / signal
        s = np.power(10, lpwr0 + self.slope * self.bins)
        ff = s / (s + 10**noise)

        smooth = idct(self.ft*ff, norm='ortho') * self.p(self.spectrum.wave)

        return smooth
Esempio n. 6
0
def dct_transform(sample_rate, data, file, sample_per_frame, compress_ratio):
    print("Original data: ", data)

    # Pad zeroes to the end of the data array so that the number of array elements is divisible to {sample}
    numzeros = sample_per_frame - len(
        data) % sample_per_frame  # Number of zeroes to be padded
    print("Padding zeroes to original data...")
    padded_data = pad(data, (0, numzeros), "constant", constant_values=0)

    # Divide the data into frames of {sample} each
    frames = {}
    count = 0
    print(f"Dividing data into {sample_per_frame} frames...")
    for i in range(0, len(padded_data), sample_per_frame):
        frames[count] = padded_data[i:i + sample_per_frame]
        count += 1

    # Perform DCT on each frame, slice the frame to the data cutoff index, pad zeroes and use IDCT
    frames_idct = {}
    compressed_data = []
    sample_taken = int(round(sample_per_frame * compress_ratio))

    print("Performing DCT on each frame...")
    for num in frames:
        dct_frame = dct(frames[num], norm="ortho")[:sample_taken]
        compressed_data.append(dct_frame.astype(data.dtype))
        padded_dct_frame = pad(dct_frame, (0, sample_per_frame - sample_taken),
                               "constant",
                               constant_values=0)
        frames_idct[num] = idct(padded_dct_frame, norm="ortho")

    # Write the compressed data to a new binary file with extension cpz
    print("Writing to a compressed file in './compressed/dct/' ...")
    filename = file.split("/")[1]  # Get the filename
    compressed = CompressedFile(Type.DCT, compressed_data)
    write_compressed_file(compressed, "dct/" + filename.split(".")[0] + ".cpz")

    # Reconstruct the data array from frames
    reconstrusted_data = []
    for num in frames_idct:
        reconstrusted_data.extend(frames_idct[num])
    reconstrusted_data = array(reconstrusted_data)[:len(data)].astype(
        data.dtype)

    return reconstrusted_data
def FJLT(sz,  rng=np.random.default_rng() ):
    m, M    = sz
    d       = np.sign( rng.standard_normal(size=M) ).astype( np.int64 ) # or rng.choice([1, -1], M)
    ind     = rng.choice( M, size=m, replace=False, shuffle=False)
    # IMPORTANT: make sure axis=0
    DCT_type = 3  # 2 or 3
    myDCT   = lambda X : dct( X, norm='ortho',type=DCT_type, axis=0)
    # and its transpose
    myDCT_t = lambda X : idct( X, norm='ortho',type=DCT_type, axis=0)

    f       = lambda X : np.sqrt(M/m)*_subsample( myDCT( _elementwiseMultiply(d,X)) , ind)
    # and make adjoint operator
    def upsample(Y):
        if Y.ndim == 1:
            Z = np.zeros( M )
            Z[ind] = Y
        else:
            Z = np.zeros( (M,Y.shape[1]))
            Z[ind,:] = Y
        return Z
    adj     = lambda Z : np.sqrt(M/m)*_elementwiseMultiply(d,myDCT_t(upsample(Z)) )
        
    S       = LinearOperator( (m,M), matvec = f, matmat = f, rmatvec=adj,rmatmat=adj )
    return S
Esempio n. 8
0
        K = product(range(1, 2 * s, 2)) / sqrt(2 * π)
        C = (1 + (1 / 2)**(s + 1 / 2)) / 3
        t = (2 * C * K / N / f)**(2 / (3 + 2 * s))
        f = 2 * π**(2 * s) * sum(k2**s * a2 * exp(-π**2 * k2 * t))
    return (2 * N * sqrt(π) * f)**(-2 / 5)


# Solve for optimal diffusion time t*.
ts = brentq(lambda t: t - ξγ(t), 0, 0.1)

# Apply Gaussian filter with optimized kernel.
smoothed = transformed * exp(-π**2 * ts / 2 * k**2)

# Reverse transformation.
smoothed[0] *= 2
inverse = idct(smoothed)

# Normalize density.
density = inverse * n / Δx

# Determine bandwidth from diffusion time.
bandwidth = sqrt(ts) * Δx

# Plot (slightly different) density versus reference.
figure = pyplot.figure()
axes = figure.add_subplot()
axes.grid()
axes.plot(ref['density'])
axes.plot(density)
pyplot.show()
Esempio n. 9
0
def idct2(block):
    return idct(idct(block.T, norm='ortho').T, norm='ortho')
Esempio n. 10
0
def idct2(a):
    return idct(idct(a.T, norm='ortho').T, norm='ortho')
             j += 1
         else:
             y.append(o3_smooth)
             x.append(t[i] - N/2)
             j = 1
             o3_smooth = y_data[i]/N
     #fitting for rolling average
     t = x
     y_o3 = y
 
 try:
     #descrete cosine transform
     y = dct(y_o3, norm='ortho')
     window = np.zeros(len(t))
     window[:5] = 1
     yr = idct(y*window, norm='ortho')
     
     #peak day for plotting
     if t[np.argmax(yr)] != t[len(t)-1] and t[np.argmax(yr)] != t[0]:
         peaks.append(t[np.argmax(yr)])
         no_data_years +=1
     
     #for plotting individual years
     """
     ax1 = plt.subplot()
     ax1.plot(t,y_o3,color="blue")
     ax1.plot(t, yr,color="red")
     plt.show()
     """
 except ValueError:
     #not enough data for this year
Esempio n. 12
0
def blockIdct(block):
    return idct(idct(block.T, norm='ortho').T, norm='ortho')
Esempio n. 13
0
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 16 11:41:33 2020

@author: Max Sours, Anntara Khan
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.fft as fft

dow = np.loadtxt("dow2.txt")

dow_fft = np.fft.rfft(dow)
dow_fft[len(dow_fft) // 50:] = 0  # Set all but top 2% of terms to 0
smoothed_dow = np.fft.irfft(
    dow_fft)  # Get the inverse transform with top 2% of terms

dow_dct = fft.dct(dow)
dow_dct[len(dow_dct) // 50:] = 0
smoothed_dow2 = fft.idct(dow_dct)

#This is the plotting code
plt.plot(range(len(dow)), dow, ":", label="Dow Data")
plt.plot(range(len(smoothed_dow)), smoothed_dow, label="Smoothed Dow (FFT)")
plt.plot(range(len(smoothed_dow2)), smoothed_dow2, label="Smoothed Dow (DCT)")
plt.title("Dow from 2004 to 2008")
plt.xlabel("Days since 2004")
plt.ylabel("Dow")
plt.legend()
plt.show()
def main():

    path = ''
    fname = ''
    if len(sys.argv) < 2:
        print('syntax is: ./src/load_andrei_DCTvDFT.py <datafile>')
        return
    m = re.match('^(.+)/(.+)$', sys.argv[1])
    if m:
        path = m.group(1)
        fname = m.group(2)
    else:
        print('Failed the filename match')
        return

    sizeoftrace = getAndreiSize('%s/%s_HEADER.txt' % (path, fname))
    sz = sizeoftrace // 8
    ninstances = 32
    nbatches = 4
    tstep_ns = 1. / 40

    tid = get_ident()
    clkID = time.pthread_getcpuclockid(tid)

    timeslist = []
    dft_timeslist = []
    for batch in range(nbatches):
        print('Processing batch %i of %i instances' % (batch, ninstances))
        raw = np.fromfile('%s/%s' % (path, fname),
                          count=sz * ninstances,
                          offset=batch * ninstances * sizeoftrace,
                          dtype=float)
        data = raw.reshape(ninstances, sz).T
        slim = 2**int(np.log2(data.shape[0]))
        lim = int(2**12)
        print(slim, lim)
        d = np.row_stack((data[:slim, :], np.flipud(data[:slim, :])))
        freqs = np.fft.fftfreq(d.shape[0])
        freqsmat = np.tile(freqs, (d.shape[1], 1)).T
        frq = np.arange(lim, dtype=float)
        flt1d = frq * (1. + np.cos(frq * np.pi / frq.shape[0]))
        filt = np.tile(flt1d, (d.shape[1], 1)).T
        t_0 = time.clock_gettime_ns(clkID)
        DC = fft.dct(d, axis=0)
        #DC[lim:2*lim,:] = 0
        DC[lim:, :] = 0
        DC[:lim, :] *= filt
        #DS[5000:,:] = 0
        dcc = fft.idct(DC[:2 * lim, :], axis=0)
        dsc = fft.idst(DC[:2 * lim, :], axis=0)
        #dcc = fft.idct(DC,axis=0)
        #dsc = fft.idst(DC,axis=0)
        timeslist += [time.clock_gettime_ns(clkID) - t_0]
        logic = dsc * dcc
        np.savetxt('%s/%s_b%i_sample.dat' % (path, fname, batch),
                   d * 1e5,
                   fmt='%i')
        np.savetxt('%s/%s_b%i_dct.dat' % (path, fname, batch), DC, fmt='%.3f')
        #np.savetxt('%s/%s_b%i_dst.dat'%(path,fname,batch),DS,fmt='%.3f')
        np.savetxt('%s/%s_b%i_idct.dat' % (path, fname, batch),
                   dcc,
                   fmt='%.3f')
        np.savetxt('%s/%s_b%i_idst.dat' % (path, fname, batch),
                   dsc,
                   fmt='%.3f')
        np.savetxt('%s/%s_b%i_logic.dat' % (path, fname, batch),
                   logic[:lim, :],
                   fmt='%.3f')
        t_0 = time.clock_gettime_ns(clkID)
        D = np.fft.fft(d, axis=0)
        DD = 1j * freqsmat * D
        logic = np.fft.ifft(D * DD, axis=0).real
        dft_timeslist += [time.clock_gettime_ns(clkID) - t_0]
        np.savetxt('%s/%s_b%i_dft_logic.dat' % (path, fname, batch),
                   logic,
                   fmt='%.3f')

    print(timeslist)
    print(dft_timeslist)

    return
Esempio n. 15
0
def solve_time_domain(Nx, f):
    dt = 10e-6
    Ntime = int(round(f.size / 2))
    T = Ntime * dt

    t2 = np.arange(0, T, dt / 2)
    t = np.arange(0, T, dt)

    g = 0.8
    gamma = np.ones(Nx) * g
    pc = params_cochlea(Nx, gamma)

    Tpre_start = time()

    alpha2 = 4 * pc.rho * pc.b / pc.H / pc.m1

    kx = np.arange(1, Nx + 1)
    ax = np.pi * (2 * kx - 1) / 4 / Nx
    mwx = -4 * np.sin(ax)**2 / pc.dx**2

    vb = np.zeros((Ntime, Nx))
    ub = np.zeros((Ntime, Nx))
    vt = np.zeros((Ntime, Nx))
    ut = np.zeros((Ntime, Nx))

    p = np.zeros((Ntime, Nx))

    phat = np.zeros(Nx)
    Tpre = time() - Tpre_start

    Tmain_start = time()

    for ii in tqdm.tqdm(range(Ntime - 1)):
        ######### RK4 ##################

        # (ii)
        gb, gt = get_g(pc, vb[ii], ub[ii], vt[ii], ut[ii])

        k = -alpha2 * gb
        k[0] -= f[ii * 2] * 2 / pc.dx

        #(iii)
        khat = dct(k, type=3)
        phat = khat / (mwx - alpha2)
        p[ii] = idct(phat, type=3)

        #(iv)-(v)
        dvb1 = (p[ii] - gb) / pc.m1
        ub1 = ub[ii] + 0.5 * dt * vb[ii]
        vb1 = vb[ii] + 0.5 * dt * dvb1

        dvt1 = -gt / pc.m2
        ut1 = ut[ii] + 0.5 * dt * vt[ii]
        vt1 = vt[ii] + 0.5 * dt * dvt1

        # (ii)
        gb, gt = get_g(pc, vb1, ub1, vt1, ut1)

        k = -alpha2 * gb
        k[0] -= f[ii * 2 + 1] * 2 / pc.dx
        print(k[0])
        #(iii)

        khat = dct(k, type=3)
        phat = khat / (mwx - alpha2)
        p1 = idct(phat, type=3)

        #(iv)-(v)
        dvb2 = (p1 - gb) / pc.m1
        ub2 = ub[ii] + 0.5 * dt * vb1
        vb2 = vb[ii] + 0.5 * dt * dvb2

        dvt2 = -gt / pc.m2
        ut2 = ut[ii] + 0.5 * dt * vt1
        vt2 = vt[ii] + 0.5 * dt * dvt2

        # (ii)
        gb, gt = get_g(pc, vb2, ub2, vt2, ut2)

        k = -alpha2 * gb
        k[0] -= f[ii * 2 + 1] * 2 / pc.dx

        #(iii)

        khat = dct(k, type=3)
        phat = khat / (mwx - alpha2)
        p2 = idct(phat, type=3)

        #(iv)-(v)
        dvb3 = (p2 - gb) / pc.m1
        ub3 = ub[ii] + dt * vb2
        vb3 = vb[ii] + dt * dvb3

        dvt3 = -gt / pc.m2
        ut3 = ut[ii] + dt * vt2
        vt3 = vt[ii] + dt * dvt3

        # (ii)
        gb, gt = get_g(pc, vb3, ub3, vt3, ut3)

        k = -alpha2 * gb
        k[0] -= f[ii * 2 + 2] * 2 / pc.dx

        #(iii)

        khat = dct(k, type=3)
        phat = khat / (mwx - alpha2)
        p3 = idct(phat, type=3)

        #(iv)-(v)
        dvb4 = (p3 - gb) / pc.m1

        dvt4 = -gt / pc.m2

        ub[ii + 1] = ub[ii] + dt / 6 * (vb[ii] + 2 * vb1 + 2 * vb2 + vb3)
        vb[ii + 1] = vb[ii] + dt / 6 * (dvb1 + 2 * dvb2 + 2 * dvb3 + dvb4)
        ut[ii + 1] = ut[ii] + dt / 6 * (vt[ii] + 2 * vt1 + 2 * vt2 + vt3)
        vt[ii + 1] = vt[ii] + dt / 6 * (dvt1 + 2 * dvt2 + 2 * dvt3 + dvt4)

    Tmain = time() - Tmain_start

    return vb, ub, p, Tpre, Tmain
Esempio n. 16
0
def kde1d(x, n=1024, limits=None):
    """
    Estimates the 1d density from discrete observations.

    The input is a list/array `x` of numbers that represent discrete
    observations of a random variable. They are binned on a grid of
    `n` points within the data `limits`, if specified, or within
    the limits given by the values' range. `n` will be coerced to the
    next highest power of two if it isn't one to begin with.

    The limits may be given as a tuple (`xmin`, `xmax`) or a single
    number denoting the upper bound of a range centered at zero.
    If any of those values are `None`, they will be inferred from the
    data.

    After binning, the function determines the optimal bandwidth
    according to the diffusion-based method. It then smooths the
    binned data over the grid using a Gaussian kernel with a standard
    deviation corresponding to that bandwidth.

    Returns the estimated `density` and the `grid` upon which it was
    computed, as well as the optimal `bandwidth` value the algorithm
    determined. Raises `ValueError` if the algorithm did not converge.
    """

    # Convert to array in case a list is passed in.
    x = array(x)

    # Round up number of bins to next power of two.
    n = int(2**ceil(log2(n)))

    # Determine missing data limits.
    if limits is None:
        xmin = xmax = None
    elif isinstance(limits, tuple):
        (xmin, xmax) = limits
    else:
        xmin = -limits
        xmax = +limits
    if None in (xmin, xmax):
        delta = x.max() - x.min()
        if xmin is None:
            xmin = x.min() - delta/10
        if xmax is None:
            xmax = x.max() + delta/10

    # Determine data range, required for scaling.
    Δx = xmax - xmin

    # Determine number of data points.
    N = len(x)

    # Bin samples on regular grid.
    (binned, edges) = histogram(x, bins=n, range=(xmin, xmax))
    grid = edges[:-1]

    # Compute 2d discrete cosine transform, then adjust first component.
    transformed = dct(binned/N)
    transformed[0] /= 2

    # Pre-compute squared indices and transform components before solver loop.
    k  = arange(n, dtype='float')      # "float" avoids integer overflow.
    k2 = k**2
    a2 = (transformed/2)**2

    # Define internal function to be solved iteratively.
    def ξγ(t, l=7):
        """Returns ξ γ^[l] as a function of diffusion time t."""
        f = 2*π**(2*l) * sum(k2**l * a2 * exp(-π**2 * k2*t))
        for s in range(l-1, 1, -1):
            K = product(range(1, 2*s, 2)) / sqrt(2*π)
            C = (1 + (1/2)**(s+1/2)) / 3
            t = (2*C*K/N/f)**(2/(3+2*s))
            f = 2*π**(2*s) * sum(k2**s * a2 * exp(-π**2 * k2*t))
        return (2*N*sqrt(π)*f)**(-2/5)

    # Solve for optimal diffusion time t*.
    try:
        ts = brentq(lambda t: t - ξγ(t), 0, 0.1)
    except ValueError:
        raise ValueError('Bandwidth optimization did not converge.') from None

    # Apply Gaussian filter with optimized kernel.
    smoothed = transformed * exp(-π**2 * ts/2 * k**2)

    # Reverse transformation after adjusting first component.
    smoothed[0] *= 2
    inverse = idct(smoothed)

    # Normalize density.
    density = inverse * n/Δx

    # Determine bandwidth from diffusion time.
    bandwidth = sqrt(ts) * Δx

    # Return results.
    return (density, grid, bandwidth)
def predictions(df1):
    """### Import Data"""
    data = df1  #pd.read_csv("../new_data.csv")[-100:]
    data["fecha"] = pd.to_datetime(data["fecha"])
    data = data.sort_values(by=['fecha']).reset_index(drop=True)
    """### Check for null values and outliers"""

    # Drop NA
    data.dropna(inplace=True)

    # Drop Outliers
    data = data[np.abs(stats.zscore(data.valores)) < 3]
    """## 2.- DCT & FFT Analysis
    ---

    ### Imports
    """

    N = len(data)
    t = np.linspace(0, 1, N, endpoint=False)

    x = data["valores"].values
    y = dct(x, norm='ortho')
    windows = []
    transformations = {}

    for i in range(2, 12):
        temp_window = np.zeros(N)
        temp_window[:N // i] = 1
        windows.append(temp_window)

    for idx, window in enumerate(windows):
        temp_transform = idct(y * window, norm='ortho')
        transformations[idx + 2] = temp_transform

    differences = {}
    for key in transformations:
        transformation = transformations[key]
        temp_diff = 0
        for idx, val in enumerate(transformation):
            if idx == 0:
                continue
            current_diff = np.round(abs(val - transformation[idx - 1]), 4)
            if current_diff != 0.0:
                temp_diff += current_diff

        if temp_diff != 0:
            differences[key] = temp_diff

    keys = list(transformations.keys())
    chosen_key = keys[0]
    secondary_key = keys[1]
    time_series = transformations[chosen_key]
    secondary_series = transformations[secondary_key]

    timestamps = data["fecha"].values
    transformed_data = pd.DataFrame({
        'fecha': timestamps,
        'valores': time_series
    })
    """## 4.- Model
    ---
    """
    """Rename DF columns"""

    transformed_data.columns = ['ds', 'y']
    """### FB Prophet"""

    pm = Prophet()

    pm.fit(transformed_data)

    pfuture = pm.make_future_dataframe(periods=7)

    pforecast = pm.predict(pfuture)

    # Dataframe en JSON a ser regresado al front
    #   |    |   |
    #   V    V   V
    forecast_json = pforecast.to_json(orient="split")

    return json.loads(forecast_json)

    mae = np.mean(
        abs(transformed_data.y - pforecast.yhat[:len(transformed_data.y)]))
    """### Prediction Convolutions

    #### FB Prophet
    """
    # Arreglo a ser regresado al front
    #   |    |   |
    #   V    V   V
    prophet_future_conv = convolve_2_dfs(pforecast, climaData, "yhat", "prec")
Esempio n. 18
0
def FastIHT_DCT(y, K, Q, d, Sigma):
    """
    Fast iterative hard thresholding algorithm with partial Discrete Cosine Transform sensing matrices.
    y : numpy.ndarray
        the measurement vector
    K : int
        number of nonzero entries in the recovered signal
    Q : int
        dimension of y
    d : int
        dimension of the recovered signal
    Sigma : numpy.ndarray
        a Q-dimensional array consisting of row indices of the partial DCT matrix
    """

    eps = 1e-4
    max_iter = 25

    res_norms = np.zeros(max_iter)
    dct_factor = np.sqrt(d/Q)

    g = np.zeros(d)
    g_prev = g

    res_tmp = np.zeros(d)
    res_tmp[Sigma] = y

    res = fft.idct(res_tmp, norm="ortho") * dct_factor

    Omega = np.argpartition(np.abs(res), -K)[-K:]
    g[Omega] = res[Omega]

    for s in range(max_iter):
        if s == 0:
            w_vec = g
        else:
            g_dct = fft.dct(g, type=2, norm="ortho") * dct_factor
            g_diff_dct = fft.dct(g - g_prev, norm="ortho") * dct_factor

            tau = np.dot(y - g_dct[Sigma], g_diff_dct[Sigma]) / np.dot(g_diff_dct[Sigma], g_diff_dct[Sigma])
            w_vec = g + tau * (g - g_prev)

        w_vec_dct = fft.dct(w_vec, norm="ortho") * dct_factor

        res_tmp[Sigma] = y - w_vec_dct[Sigma]
        res_w = fft.idct(res_tmp, norm="ortho") * dct_factor

        res_norms[s] = np.linalg.norm(res_w)
        if s >= 3 and np.std(res_norms[s-3:s+1]) / np.mean(res_norms[s-3:s+1]) < 1e-2:
            break
        elif res_norms[s] < eps:
            break

        Omega_w = (w_vec != 0)
        res_w_proj_dct = np.zeros(d)
        res_w_proj_dct[Omega_w] = res_w[Omega_w] * dct_factor
        fft.dct(res_w_proj_dct, norm="ortho", overwrite_x=True)

        alpha_tilde = np.dot(res_w[Omega_w], res_w[Omega_w]) / np.dot(res_w_proj_dct[Sigma], res_w_proj_dct[Sigma])

        g_prev = g

        h_vec = w_vec + alpha_tilde * res_w
        Omega = np.argpartition(np.abs(h_vec), -K)[-K:]
        g = np.zeros(d)
        g[Omega] = h_vec[Omega]

        g_dct = fft.dct(g, norm="ortho") * dct_factor

        res_tmp[Sigma] = y - g_dct[Sigma]
        res = fft.idct(res_tmp, norm="ortho") * dct_factor

        res_proj_dct = np.zeros(d)
        res_proj_dct[Omega] = res[Omega] * dct_factor
        fft.dct(res_proj_dct, norm="ortho", overwrite_x=True)

        alpha = np.dot(res[Omega], res[Omega]) / np.dot(res_proj_dct[Sigma], res_proj_dct[Sigma])

        g[Omega] += alpha * res[Omega]

    return g
Esempio n. 19
0
def read_and_resize(path_to_vols, vol_name, ntup, toDir='./'):
    '''
   Open and pad (or truncate) the Chebyshev-Fourier-Fourier coefficients 
   of the kxky fields stored in CheckPoints. As they are stored in physical
   space, the interpolation is done by cosine transform along z, followed by padding 
   or truncation, then inverse cosine transform. Along x and y, the grid is 
   evenly space, and we elect to use linear interpolation (which seems faster for
   large resolutions, according to a few tests).
   '''
    ierror = 0
    domain_decomp_infos = np.fromfile(path_to_vols +
                                      '../Geometry/domDecmp.core0000',
                                      dtype=np.int32)
    NX = ntup[0]
    NY = ntup[1]
    NZ = ntup[2]
    NXAA = domain_decomp_infos[3]
    NYAA = domain_decomp_infos[4]
    NZAA = domain_decomp_infos[5]
    print('----------- :: Resizing (' + str(NXAA) + ',' + str(NYAA) + ',' +
          str(NZAA) + ') into' + ' (' + str(NX) + ',' + str(NY) + ',' +
          str(NZ) + ').')
    curPhys = np.fromfile(path_to_vols + vol_name,
                          dtype=np.float_).reshape(NXAA, NYAA, NZAA)
    coscoefs = dct(curPhys, axis=-1)
    if (NZ > NZAA):
        newcoefs = np.zeros((NXAA, NYAA, NZ), dtype=np.float_)
        newcoefs[:, :, :NZAA] = coscoefs
    else:
        newcoefs = coscoefs[:, :, :NZ]

    aux2 = idct(newcoefs, axis=-1) * NZ / NZAA

    ## now we need to interpolate in the xy-plane...
    aux1 = np.zeros((NXAA, ntup[1], ntup[2]), dtype=np.float_)
    if (NYAA == ntup[1]):
        aux1 = np.copy(aux2)
    elif (NYAA < ntup[1]):
        spectral_buffer = rfft(aux2, axis=1)
        padded_spectral_buffer = np.zeros((NXAA, ntup[1] // 2 + 1, ntup[2]),
                                          dtype=np.complex_)
        padded_spectral_buffer[:, :(NYAA // 2 + 1), :] = spectral_buffer
        aux1 = irfft(padded_spectral_buffer, axis=1) / NYAA * ntup[1]
        del padded_spectral_buffer, spectral_buffer
    else:
        spectral_buffer = rfft(aux2, axis=1)
        truncated_spectral_buffer = np.zeros((NXAA, ntup[1] // 2 + 1, ntup[2]),
                                             dtype=np.complex_)
        truncated_spectral_buffer[:, :(ntup[1] //
                                       3), :] = spectral_buffer[:, :(ntup[1] //
                                                                     3), :]
        truncated_spectral_buffer.imag[:, -1, :] = 0.
        truncated_spectral_buffer.imag[:, 0, :] = 0.
        aux1 = irfft(truncated_spectral_buffer, axis=1) / NYAA * ntup[1]
        del truncated_spectral_buffer, spectral_buffer
    del aux2
    deaPhys = np.zeros((ntup[0], ntup[1], ntup[2]), dtype=np.float_)
    if (NXAA == ntup[0]):
        deaPhys = np.copy(aux1)
    elif (NXAA < ntup[0]):
        spectral_buffer = rfft(aux1, axis=0)
        padded_spectral_buffer = np.zeros((ntup[0] // 2 + 1, ntup[1], ntup[2]),
                                          dtype=np.complex_)
        padded_spectral_buffer[:(NXAA // 2 + 1), :, :] = spectral_buffer
        deaPhys = irfft(padded_spectral_buffer, axis=0) / NXAA * ntup[0]
        del padded_spectral_buffer, spectral_buffer
    else:
        spectral_buffer = rfft(aux1, axis=0)
        truncated_spectral_buffer = np.zeros(
            (ntup[0] // 2 + 1, ntup[1], ntup[2]), dtype=np.complex_)
        truncated_spectral_buffer[:(ntup[0] //
                                    3), :, :] = spectral_buffer[:(ntup[0] //
                                                                  3), :, :]
        truncated_spectral_buffer.imag[-1, :, :] = 0.
        truncated_spectral_buffer.imag[0, :, :] = 0.
        deaPhys = irfft(truncated_spectral_buffer, axis=0) / NXAA * ntup[0]
        del truncated_spectral_buffer, spectral_buffer
    del aux1
    deaPhys.tofile(toDir + '/Restart/' + vol_name)
    return ierror
def run_upscale(upscale, params):
    out = []
    path = params['path']
    outpath = params['outpath']
    fname = params['fname']
    sz = params['sz']
    sizeoftrace = params['sizeoftrace']
    nbatches = params['nbatches']
    ninstances = params['ninstances']
    tstep_ns = 1. / 40
    #thresh = hard to catch for upscale=1; -40 for upscale=2; -20 for upscale=3; -10 for upscale=4;-5 for upscale=6;-2.5 for upscale=8
    tstep_under_ns = tstep_ns * 6 / upscale  ## acutal Abaco sampling will be 6 GSps, but for this we simply take every 6th step of the waveform.

    hbins = np.arange(-10, 10, 0.025)
    tofbins = np.arange(0, 500, 0.05)

    upthresh = {1: -40, 2: -40, 3: -20, 4: -10, 6: -5, 8: -2.5, 10: -1.5}

    lastpass = False
    batch = 0
    while not lastpass:
        print('Processing batch %i of %i instances for upscale %.2f' %
              (batch, ninstances, upscale))
        raw = np.fromfile('%s/%s' % (path, fname),
                          count=sz * ninstances,
                          offset=batch * ninstances * sizeoftrace,
                          dtype=float)
        if (raw.shape[0] < ninstances * sz):
            ninstances = raw.shape[0] // sz
            lastpass = True
        if nbatches < 32 and batch == 31:
            lastpass = True
        data = 1e3 * raw.reshape(ninstances, sz).T  # data in millivolts
        d = np.row_stack((data, np.flipud(data)))
        d_ = np.row_stack((data[::6, :], np.flipud(data[::6, :])))
        frq = np.arange(d.shape[0], dtype=float)
        flt = (1. + np.cos(frq * np.pi / frq.shape[0]))
        filt = np.tile(flt, (d.shape[1], 1)).T
        frq_ = np.arange(d_.shape[0], dtype=float)
        flt_ = (1. + np.cos(frq_ * np.pi / frq_.shape[0]))
        filt_ = np.tile(flt_, (d_.shape[1], 1)).T
        DC = fft.dct(d, axis=0)
        DC_ = fft.dct(d_, axis=0)
        DC[frq.shape[0]:, :] = 0
        DC[:frq.shape[0], :] *= filt
        DC_[frq_.shape[0]:, :] = 0
        DC_[:frq_.shape[0], :] *= filt_

        DC_up = np.zeros((DC_.shape[0] * upscale, DC_.shape[1]), dtype=float)
        DC_up[:DC_.shape[0], :] = DC_
        dcc = fft.idct(DC, axis=0)
        dsc = fft.idst(DC, axis=0)
        #dcc_ = fft.idct(DC_,axis=0)
        #dsc_ = fft.idst(DC_,axis=0)
        dcc_up = fft.idct(DC_up, axis=0)
        dsc_up = fft.idst(DC_up, axis=0)
        logic = (dsc * dcc)[:DC.shape[0] // 2, :]
        #logic_ = (dsc_*dcc_)[:frq_.shape[0]//2,:]
        logic_up = (dsc_up * dcc_up)[:DC_.shape[0] // 2 * upscale, :]
        if batch % 16 == 0:
            np.savetxt('%s/%s_b%i_sample.dat' % (outpath, fname, batch),
                       d,
                       fmt='%.3f')
            np.savetxt('%s/%s_b%i_dct.dat' % (outpath, fname, batch),
                       DC,
                       fmt='%.3f')
            np.savetxt('%s/%s_b%i_idct.dat' % (outpath, fname, batch),
                       dcc,
                       fmt='%.3f')
            np.savetxt('%s/%s_b%i_idst.dat' % (outpath, fname, batch),
                       dsc,
                       fmt='%.3f')
            np.savetxt('%s/%s_b%i_logic.dat' % (outpath, fname, batch),
                       logic,
                       fmt='%.3f')
            #np.savetxt('%s/%s_b%i_logic_.dat'%(outpath,fname,batch),logic_,fmt='%.3f')
            np.savetxt('%s/%s_b%i_logic_up.dat' % (outpath, fname, batch),
                       logic_up,
                       fmt='%.3f')
        #f = open('%s/%s_b%i_logic_edges.dat'%(outpath,fname,batch),'w')
        #f_up = open('%s/%s_b%i_logic_up_edges.dat'%(outpath,fname,batch),'w')
        for i in range(ninstances):
            logic_edges = scanedges(logic[:, i], -100)
            #line = '\t'.join(['%.3f'%(e) for e in logic_edges]) + '\n'
            #f.write(line)
            #print(len(logic_edges),line)
            logic_up_edges = scanedges(logic_up[:, i], upthresh[upscale])
            #line = '\t'.join(['%.3f'%(4*e/6.) for e in logic_up_edges]) + '\n'
            #f_up.write(line)
            out += pairedges([tstep_ns * e for e in logic_edges],
                             [tstep_under_ns * e for e in logic_up_edges])
        #f.close()
        #f_up.close()
        if batch % 16 == 0:
            #np.savetxt('%s/%s_logic_compare.out'%(outpath,fname),np.column_stack(out),fmt='%.3f')
            #f = open('%s/%s_logic_compare_upscale%i.dat'%(outpath,fname,upscale),'w')
            #_ = [f.write('%.4f\t%.4f\n'%(p[0],p[1])) for p in out]
            #f.close()
            h = np.histogram(np.array(out)[:, 1] - np.array(out)[:, 0],
                             hbins)[0]
            np.savetxt('%s/%s_logic_difference_upscale%i.hist' %
                       (outpath, fname, upscale),
                       np.column_stack((hbins[:-1], h)),
                       fmt='%.3f')

            h0 = np.histogram(np.array(out)[:, 0], tofbins)[0]
            h1 = np.histogram(np.array(out)[:, 1], tofbins)[0]
            np.savetxt('%s/%s_logic_compare_upscale%i.hist' %
                       (outpath, fname, upscale),
                       np.column_stack((tofbins[:-1], h0, h1)),
                       fmt='%.3f')

        batch += 1

    tofbins = np.arange(0, 500, 0.05)
    h0 = np.histogram(np.array(out)[:, 0], tofbins)[0]
    h1 = np.histogram(np.array(out)[:, 1], tofbins)[0]
    np.savetxt('%s/%s_logic_compare_upscale%i.hist' %
               (outpath, fname, upscale),
               np.column_stack((tofbins[:-1], h0, h1)),
               fmt='%.3f')
    hbins = np.arange(-10, 10, 0.02)
    h = np.histogram(np.array(out)[:, 1] - np.array(out)[:, 0], hbins)[0]
    np.savetxt('%s/%s_logic_difference_upscale%i.hist' %
               (outpath, fname, upscale),
               np.column_stack((hbins[:-1], h)),
               fmt='%.3f')

    return