def transform(self, segment: np.ndarray) -> np.ndarray:
     """ turn an audio segment into mdct spectras """
     if len(segment) < self.win_len:
         # zero pad frame to window length before transformation
         mdct = abs(
             dct([segment], type=4, n=self.win_len,
                 axis=1)[:, :self.win_len // 2])
     else:
         # use dynamic stride for larger inputs
         dyn_stride = self.stride * (len(segment) // self.win_len)
         # create stride matrix from segment
         rows = (len(segment) - self.win_len) // dyn_stride + 1
         n_stride = segment.strides[0]
         frames = np.lib.stride_tricks.as_strided(
             segment,
             shape=(rows, self.win_len),
             strides=(dyn_stride * n_stride, n_stride),
         )
         mdct = abs(dct(frames, type=4, axis=1)[:, :self.win_len // 2])
     # remove spectral offset
     mdct -= mdct.min(keepdims=True, axis=1)
     # strip zero rows from spectrum to avoid division by zero
     mdct = mdct[~np.all(mdct == 0, axis=1)]
     # scale spectrum between 0 and 1
     mdct /= mdct.max(keepdims=True, axis=1)
     # segment is either silent or corrupt -> return zeros array
     if np.isnan(mdct).any() or len(mdct) == 0:
         logging.warning(
             "invalid input introduced nan values. check audio file integrity!"
         )
         mdct = np.zeros((1, self.win_len // 2))
     return mdct
Exemplo n.º 2
0
    def __init__(self, originals: ep.Tensor, random_noise: str = "normal", basis_type: str = "dct", **kwargs : Any):
        """
        Args:
            random_noise (str, optional): When basis is created, a noise will be added.This noise can be normal or 
                                          uniform. Defaults to "normal".
            basis_type (str, optional): Type of the basis: DCT, Random, Genetic,. Defaults to "random".
            device (int, optional): [description]. Defaults to -1.
            args, kwargs: In args and kwargs, there is the basis params:
                    * Random: No parameters                    
                    * DCT:
                            * function (tanh / constant / linear): function applied on the dct
                            * beta
                            * gamma
                            * frequence_range: tuple of 2 float
                            * dct_type: 8x8 or full
        """
        self._originals = originals
        if isinstance(self._originals.raw, torch.Tensor):
            self._f_dct2 = lambda a: torch_dct.dct_2d(a)
            self._f_idct2 = lambda a: torch_dct.idct_2d(a)
        elif isinstance(v.raw, np.array):
            from scipy import fft
            self._f_dct2 = lambda a: fft.dct(fft.dct(a, axis=2, norm='ortho' ), axis=3, norm='ortho')
            self._f_idct2 = lambda a: fft.idct(fft.idct(a, axis=2, norm='ortho'), axis=3, norm='ortho')

        self.basis_type = basis_type
        self._function_generation = getattr(self, "_get_vector_" + self.basis_type)
        self._load_params(**kwargs)

        assert random_noise in ["normal", "uniform"]
        self.random_noise = random_noise
Exemplo n.º 3
0
def get_dWs(ret_lk, getter_kwargs, stride=1, size=int(1e4), get_comps=False):
    t, (a, e, W, I, w), [t_lks, _] = ret_lk
    a_int = interp1d(t, a)
    e_int = interp1d(t, e)
    I_int = interp1d(t, I)
    W_int = interp1d(t, W)
    w_int = interp1d(t, w)
    dWeff_mags = []
    dWslx = []
    dWslz = []
    dWdot_mags = []
    times = []
    comps = []

    # # test, plot Wdot/Wsl over a single "period"
    # start, end = t_lks[1000:1002]
    # ts = np.linspace(start, end, size)
    # e_t = e_int(ts)
    # dWdt = (3 * a_int(ts)**(3/2) * np.cos(I_int(ts)) *
    #         (5 * e_t**2 * np.cos(w_int(ts))**2
    #          - 4 * e_t**2 - 1)
    #     / (4 * np.sqrt(1 - e_t**2)))
    # W_sl = getter_kwargs['eps_sl'] / (a_int(ts)**(5/2) * (1 - e_t**2))
    # plt.semilogy(ts, W_sl, 'r:')
    # plt.semilogy(ts, -dWdt, 'g:')
    # plt.savefig('/tmp/Wdots', dpi=200)
    # plt.close()
    # return

    mult = 1 if I[0] < np.radians(90) else -1
    for start, end in zip(t_lks[:-1:stride], t_lks[1::stride]):
        ts = np.linspace(start, end, size)
        e_t = e_int(ts)
        dWdt = (3 * a_int(ts)**(3 / 2) * np.cos(I_int(ts)) *
                (5 * e_t**2 * np.cos(w_int(ts))**2 - 4 * e_t**2 - 1) /
                (4 * np.sqrt(1 - e_t**2)))  #* mult
        W_sl = getter_kwargs['eps_sl'] / (a_int(ts)**(5 / 2) * (1 - e_t**2))

        # dWdt = (W_int(end) - W_int(start)) / (end - start)# * mult
        Wtot = np.sqrt((W_sl * np.cos(I_int(ts)) - dWdt)**2 +
                       (W_sl * np.sin(I_int(ts)))**2)
        # NB: dW = int(Wdot dt)
        dWeff_mags.append(np.mean(Wtot))
        dWslx.append(np.mean(W_sl * np.sin(I_int(ts))))
        dWslz.append(np.mean(W_sl * np.cos(I_int(ts))))
        dWdot_mags.append(np.mean(dWdt))
        times.append((end + start) / 2)
        if get_comps:
            comps.append((
                dct(W_sl * np.sin(I_int(ts)), type=1)[::2] / (2 * size),
                dct(W_sl * np.cos(I_int(ts)) - dWdt, type=1)[::2] / (2 * size),
            ))
    dWslx = np.array(dWslx)
    dWslz = np.array(dWslz)
    dWsl_mags = np.sqrt(dWslx**2 + dWslz**2)
    if get_comps:
        return (np.array(dWeff_mags), dWsl_mags, np.array(dWdot_mags),
                np.array(times), dWslx, dWslz, comps)
    return (np.array(dWeff_mags), dWsl_mags, np.array(dWdot_mags),
            np.array(times), dWslx, dWslz)
Exemplo n.º 4
0
def spectral_mutual_information(image_a, image_b, normalised=True):
    norm_image_a = image_a / norm(image_a.flatten(), 2)
    norm_image_b = image_b / norm(image_b.flatten(), 2)

    dct_norm_true_image = dct(dct(norm_image_a, axis=0), axis=1)
    dct_norm_test_image = dct(dct(norm_image_b, axis=0), axis=1)

    return mutual_information(
        dct_norm_true_image, dct_norm_test_image, normalised=normalised
    )
Exemplo n.º 5
0
def test_orthogonalize_dct1(norm):
    x = np.random.rand(100)

    x2 = x.copy()
    x2[0] *= SQRT_2
    x2[-1] *= SQRT_2

    y1 = dct(x, type=1, norm=norm, orthogonalize=True)
    y2 = dct(x2, type=1, norm=norm, orthogonalize=False)

    y2[0] /= SQRT_2
    y2[-1] /= SQRT_2
    assert_allclose(y1, y2)
Exemplo n.º 6
0
    def __init__(self,
                 sample_rate: int = 22050,
                 n_mfcc: int = 128,
                 n_fft: int = 1024,
                 hop_length: int = 512,
                 window: str = 'hann'):

        super(MFCC, self).__init__()

        mel_filterbank = librosa.filters.mel(sr=sample_rate,
                                             n_fft=n_fft,
                                             n_mels=n_mfcc)
        mel_filterbank = torch.from_numpy(mel_filterbank).to(
            torch.get_default_dtype())
        self.register_buffer('mel', mel_filterbank)

        dct_buf = spf.dct(np.eye(n_mfcc), type=2, norm='ortho').T
        dct_buf = torch.from_numpy(dct_buf).to(torch.get_default_dtype())
        self.register_buffer('dct_mat', dct_buf)

        window_buffer: torch.Tensor = torch.from_numpy(
            sps.get_window(window=window, Nx=n_fft,
                           fftbins=True)).to(torch.get_default_dtype())
        self.register_buffer('window', window_buffer)

        self.sample_rate = sample_rate
        self.n_fft = n_fft
        self.n_mfcc = n_mfcc
        self.hop_length = hop_length
Exemplo n.º 7
0
def DCT(x):
    """
	计算离散余弦变换,MATLAB dct()
	:param x:
	:return:
	"""
    return dct(x, norm='ortho')
Exemplo n.º 8
0
def frequency_domain(data):
    # Map the individual groups of the track
    # from the time domain into the frequency space.
    res = np.empty(data.shape, dtype=np.float64)
    for i, group in enumerate(data):
        res[i] = np.abs(dct(group, norm='ortho'))
    return res
Exemplo n.º 9
0
def read_and_resize_zeroModes(path_to_vols, vol_name, ntup, toDir='./'):
    '''
   Open and pad (or truncate) the Chebyshev coefficients of the mean fields
   stored in CheckPoints. In contrast to fluctations (kxky fileds), spectral 
   coefficients are stored directly. Hence, instead of NZAA (grid points),
   we store (NZ - nbc), where NZ = NZAA*3//2 and nbc is the number of 
   boundary conditions for a given field.
   '''
    ierror = 0
    domain_decomp_infos = np.fromfile(path_to_vols +
                                      '../Geometry/domDecmp.core0000',
                                      dtype=np.int32)
    readVec = np.fromfile(path_to_vols + vol_name, dtype=np.float_)
    Nold = readVec.shape[0]
    Nnew = ntup[2]

    coscoefs = dct(readVec)
    if (Nnew > Nold):
        newcoefs = np.zeros((Nnew), dtype=np.float_)
        newcoefs[:Nold] = coscoefs
    else:
        newcoefs = coscoefs[:Nnew]

    aux2 = idct(newcoefs) * Nnew / Nold
    aux2.tofile(toDir + '/Restart/' + vol_name)
    return ierror
Exemplo n.º 10
0
def spectral_psnr(image_true, image_test):
    norm_true_image = image_true / norm(image_true.flatten(), 2)
    norm_test_image = image_test / norm(image_test.flatten(), 2)

    dct_norm_true_image = dct(dct(norm_true_image, axis=0), axis=1)
    dct_norm_test_image = dct(dct(norm_test_image, axis=0), axis=1)

    norm_dct_norm_true_image = dct_norm_true_image / norm(
        dct_norm_true_image.flatten(), 2)
    norm_dct_norm_test_image = dct_norm_test_image / norm(
        dct_norm_test_image.flatten(), 2)

    norm_true_image = math.log1p(abs(norm_dct_norm_true_image))
    norm_test_image = math.log1p(abs(norm_dct_norm_test_image))

    psnr = peak_signal_noise_ratio(norm_true_image, norm_test_image)
    return psnr
Exemplo n.º 11
0
def normalized_dct(Y):
    Z = dct(Y)
    # print(Z)
    Z = Z / len(Z)
    # print(Z)
    Z[0] = Z[0] / 2
    # print(Z)
    return Z
Exemplo n.º 12
0
def fct(f):
    # =============================================================================
    #     INPUT: Function evauluated at callocation points
    #     OUTPUT: Chebyshev coefficients
    #==============================================================================
    ##Calculate chebyshev coefficients with DCT4
    b = dct(f) / (len(f))
    ##Correct to coincide with Moore 2017
    b[0] = b[0] / 2
    return b
Exemplo n.º 13
0
def plot_weff_fft(Weff_vec, times, fn='6_vecfft', plot=True):
    # cosine transform
    x_coeffs = dct(Weff_vec[0], type=1)[::2] / (2 * len(times))
    z_coeffs = dct(Weff_vec[2], type=1)[::2] / (2 * len(times))
    # print(np.mean(Weff_vec[2]), z_coeffs[0]) # are equal
    # print(np.mean(Weff_vec[0]), x_coeffs[0]) # are equal

    if plot:
        fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(9, 7))
        # fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(9, 7))
        # ax3.semilogy(times, -Weff_vec[2], 'k', lw=1, label=r'$-z$')
        # ax3.semilogy(times, Weff_vec[0], 'b', lw=1, label=r'$x$')
        # ax3.legend(fontsize=12)
        # ax3.set_ylabel(r'$\Omega_{\rm eff}$')

        N = np.arange(len(z_coeffs))
        ax1.semilogy(N, x_coeffs, 'bo', ms=1)
        ax1.semilogy(N, -x_coeffs, 'ro', ms=1)
        # ax2.semilogy(N, z_coeffs, 'bo', ms=1)
        ax2.semilogy(N, -z_coeffs, 'ro', ms=1)
        ax1.set_ylabel(
            r'$\tilde{\Omega}_{\rm eff, x, N}$ (Rad / $t_{\rm LK,0}$)')
        ax2.set_ylabel(
            r'$\tilde{\Omega}_{\rm eff, z, N}$ (Rad / $t_{\rm LK,0}$)')
        ax2.set_xlabel(r'$N$')

        min_fact = 1e-6
        Nmax = np.where(abs(z_coeffs) / np.max(abs(z_coeffs)) < min_fact)[0][0]
        ax1.set_ylim(bottom=np.abs(x_coeffs).max() * min_fact / 3)
        ax2.set_ylim(bottom=np.abs(z_coeffs).max() * min_fact / 3)
        ax1.set_xlim((0, Nmax))
        ax2.set_xlim((0, Nmax))

        plt.tight_layout()
        plt.savefig(TOY_FOLDER + fn, dpi=200)
        plt.clf()

    return x_coeffs, z_coeffs
Exemplo n.º 14
0
def local_mfcc(signal,
               samplerate=16000,
               winlen=0.025,
               winstep=0.01,
               numcep=13,
               nfilt=26,
               nfft=512,
               lowfreq=0,
               highfreq=None,
               preemph=0.97,
               ceplifter=22,
               filtertype='mel',
               appendEnergy=True,
               winfunc=lambda x: np.hamming((x, ))):
    """Compute MFCC features from an audio signal.

    :param signal: the audio signal from which to compute features. Should be an N*1 array
    :param samplerate: the samplerate of the signal we are working with.
    :param winlen: the length of the analysis window in seconds. Default is 0.025s (25 milliseconds)
    :param winstep: the step between successive windows in seconds. Default is 0.01s (10 milliseconds)
    :param numcep: the number of cepstrum to return, default 13
    :param nfilt: the number of filters in the filterbank, default 26.
    :param nfft: the FFT size. Default is 512.
    :param lowfreq: lowest band edge of mel filters. In Hz, default is 0.
    :param highfreq: highest band edge of mel filters. In Hz, default is samplerate/2
    :param preemph: apply preemphasis filter with preemph as coefficient. 0 is no filter. Default is 0.97.
    :param ceplifter: apply a lifter to final cepstral coefficients. 0 is no lifter. Default is 22.
    :param appendEnergy: if this is true, the zeroth cepstral coefficient is replaced with the log of the total frame energy.
    :param winfunc: the analysis window to apply to each frame. By default no window is applied. You can use numpy window functions here e.g. winfunc=numpy.hamming
    :returns: A numpy array of size (NUMFRAMES by numcep) containing features. Each row holds 1 feature vector.
    """
    feat, energy = local_fbank(signal=signal,
                               samplerate=samplerate,
                               winlen=winlen,
                               winstep=winstep,
                               nfilt=nfilt,
                               nfft=nfft,
                               lowfreq=lowfreq,
                               highfreq=highfreq,
                               preemph=preemph,
                               winfunc=winfunc,
                               filtertype=filtertype)
    feat = np.log(feat)
    feat = dct(feat, type=2, axis=1, norm='ortho')[:, :numcep]
    feat = lifter(feat, ceplifter)
    if appendEnergy:
        feat[:, 0] = np.log(
            energy
        )  # replace first cepstral coefficient with log of frame energy
    return feat
Exemplo n.º 15
0
def main():
    t = np.arange(256)
    s = 10*gauss(t,10,15)*np.cos(t*2*np.pi/7) + 20*gauss(t,105,20)*np.cos(t*2*np.pi/11)
    #_ = [print(' '*(20+int(v)) + '|') for v in s]
    y = np.concatenate((s,np.flip(s))) 
    #_ = [print(' '*(20+int(v)) + '+') for v in y]
    Y=fft.fft(y)
    YC = fft.dct(y)
    sz=y.shape[0]
    np.savetxt('powers.out',np.column_stack((Y.real,YC)))
    #print(len(s),len(y),len(Y),len(YC))
    #_ = [print(' '*(20+int(v)//20) + '.') for v in YC]

    return
Exemplo n.º 16
0
def imageCompression(inputImage, m, row, col):
    # Step 1.4
    outImage = np.zeros(
        (int((row / 8) * m), int((col / 8) * m), 3), dtype=np.float16)

    blockRow = int(row / 8)
    blockCol = int(col / 8)
    blockComponents = 3
    noIterations = 0

    # Step 1.5
    for x in range(0, blockRow):
        for y in range(0, blockCol):
            for z in range(0, blockComponents):
                noIterations += 1
                currentBlock = inputImage[x *
                                          8: x * 8 + 8, y * 8: y * 8 + 8, z]
                # Step 1.6, 1.7
                blockDCT = dct(dct(currentBlock.T, norm='ortho').T, norm='ortho')[0:m, 0:m]
                outImage[x * m: x * m + m, y * m: y * m + m, z] = blockDCT
    print("no Iterations", noIterations)
    print("outImage", outImage)
    return outImage
Exemplo n.º 17
0
    def calculate_power_spectrum(self):

        if self.flagged == self._flagged:
            return
        self._flagged = self.flagged.copy()

        i = self.get_mask()
        flux = np.interp(self.spectrum.wave, self.spectrum.wave[i], self.spectrum.flux[i])
        self.p = np.poly1d(np.polyfit(self.spectrum.wave, flux, 2))
        self.ft = dct(flux/self.p(self.spectrum.wave), norm='ortho')

        self.bins = np.arange(self.spectrum.N)
        self.pwr = self.ft**2
        self.lpwr = np.log10(self.pwr)
Exemplo n.º 18
0
def apply_srct(r, e, mat, perm=None):
    """
    Apply a subsampled randomized cosine transform (SRCT) to the columns
    of the ndarray mat. The transform is defined by data (r, e).

    Parameters
    ----------
    r : ndarray
        The random restriction used in the SRCT. The entries of "r" must
        be unique integers between 0 and mat.shape[0] (exclusive).
    e : ndarray
        The vector of signs used in the SRCT; e.size == mat.shape[0].
    mat : ndarray
        The operand for the embedding. If mat.ndim == 1, then simply apply
        the SRCT to mat as a vector.
    perm : ndarray
        permutation of range(mat.shape[0]).

    Returns
    -------
    mat : ndarray
        The transformed input.
    """
    if mat.ndim > 1:
        if perm is not None:
            mat = mat[perm, :]
        mat = mat * e[:, None]
        mat = dct(mat, axis=0, norm='ortho')
        mat = mat[r, :]
    else:
        if perm is not None:
            mat = mat[perm]
        mat = mat * e
        mat = dct(mat, norm='ortho')
        mat = mat[r]
    return mat
Exemplo n.º 19
0
def dct_transform(sample_rate, data, file, sample_per_frame, compress_ratio):
    print("Original data: ", data)

    # Pad zeroes to the end of the data array so that the number of array elements is divisible to {sample}
    numzeros = sample_per_frame - len(
        data) % sample_per_frame  # Number of zeroes to be padded
    print("Padding zeroes to original data...")
    padded_data = pad(data, (0, numzeros), "constant", constant_values=0)

    # Divide the data into frames of {sample} each
    frames = {}
    count = 0
    print(f"Dividing data into {sample_per_frame} frames...")
    for i in range(0, len(padded_data), sample_per_frame):
        frames[count] = padded_data[i:i + sample_per_frame]
        count += 1

    # Perform DCT on each frame, slice the frame to the data cutoff index, pad zeroes and use IDCT
    frames_idct = {}
    compressed_data = []
    sample_taken = int(round(sample_per_frame * compress_ratio))

    print("Performing DCT on each frame...")
    for num in frames:
        dct_frame = dct(frames[num], norm="ortho")[:sample_taken]
        compressed_data.append(dct_frame.astype(data.dtype))
        padded_dct_frame = pad(dct_frame, (0, sample_per_frame - sample_taken),
                               "constant",
                               constant_values=0)
        frames_idct[num] = idct(padded_dct_frame, norm="ortho")

    # Write the compressed data to a new binary file with extension cpz
    print("Writing to a compressed file in './compressed/dct/' ...")
    filename = file.split("/")[1]  # Get the filename
    compressed = CompressedFile(Type.DCT, compressed_data)
    write_compressed_file(compressed, "dct/" + filename.split(".")[0] + ".cpz")

    # Reconstruct the data array from frames
    reconstrusted_data = []
    for num in frames_idct:
        reconstrusted_data.extend(frames_idct[num])
    reconstrusted_data = array(reconstrusted_data)[:len(data)].astype(
        data.dtype)

    return reconstrusted_data
Exemplo n.º 20
0
def fjlt(A, k):
    """
    A variant of FJLT. Embed each row of matrix A with FJLT. See the following resources:
        - The review section (page 3) of https://arxiv.org/abs/1909.04801
        - Page 1 of https://www.sketchingbigdata.org/fall17/lec/lec9.pdf
    
    Note:
        I name it sfd because the matrices are called S(ample), F(ourier transform), D(iagonal).
    """
    A = A.T
    d = A.shape[0]
    sign_vector = np.random.randint(0, 2, size=(d, 1)) * 2 - 1
    idx = np.zeros(k, dtype=int)
    idx[1:] = np.random.choice(d - 1, k - 1, replace=False) + 1
    DA = sign_vector * A
    FDA = dct(DA, axis=0, norm='ortho')
    A_embedded = np.sqrt(d / k) * FDA[idx]
    return A_embedded.T
Exemplo n.º 21
0
    def produce(self, inputs):

        dataframe = inputs
        processed_df = utils.pandas.DataFrame()
        try:
            for target_column in dataframe.columns:
                dct_input = dataframe[target_column].values
                dct_output = dct(x=dct_input,
                                 type=self._type,
                                 n=self._n,
                                 axis=self._axis,
                                 overwrite_x=self._overwrite_x,
                                 norm=self._norm,
                                 workers=self._workers)
                processed_df[target_column +
                             "_dct_coeff"] = pd.Series(dct_output)

        except IndexError:
            logging.warning("Index not found in dataframe")

        return processed_df
Exemplo n.º 22
0
    def __apply_trans(self, tensor, transformation, axis):

        if transformation == "dwt":

            # Only allow even axis size
            if tensor.shape[axis] % 2 != 0:
                raise ValueError(
                    f"{tensor.shape[axis]} is not a valid axis size for DWT. Only even sizes are allowed"
                )

            cA, cD = pywt.dwt(tensor, "haar", axis=axis)
            result = np.append(cA, cD, axis=axis)
        elif transformation == "dct":
            result = sfft.dct(tensor, axis=axis)
        elif transformation == "dft":
            raise NotImplementedError()
        else:
            raise ValueError(
                f"{self.transformation} is not a valid transformation")

        return result
Exemplo n.º 23
0
def get_mfcc(signal, bank_size, fft_size, sr):
    """
        Get MFCC from signal.

        # Args
            signal (ndarray, axis=(time,)): input signal
            bank_size (int): size of mel-filter-bank
            fft_size (int): window size of stft
            sr (int): sampling rate
        # Returns
            mfcc (ndarray, axis=(qefrency, frame)): MFCC feature
    """
    spec = np.abs(ex1.stft(signal, fft_size))
    filter = melfilterbank(bank_size, fft_size, sr)
    # apply mel-filter-bank to spectrogram
    melspec = filter @ spec

    melspec_db = librosa.amplitude_to_db(melspec)
    ceps = dct(melspec_db, axis=0)
    mfcc = ceps[1:13]
    return mfcc
def FJLT(sz,  rng=np.random.default_rng() ):
    m, M    = sz
    d       = np.sign( rng.standard_normal(size=M) ).astype( np.int64 ) # or rng.choice([1, -1], M)
    ind     = rng.choice( M, size=m, replace=False, shuffle=False)
    # IMPORTANT: make sure axis=0
    DCT_type = 3  # 2 or 3
    myDCT   = lambda X : dct( X, norm='ortho',type=DCT_type, axis=0)
    # and its transpose
    myDCT_t = lambda X : idct( X, norm='ortho',type=DCT_type, axis=0)

    f       = lambda X : np.sqrt(M/m)*_subsample( myDCT( _elementwiseMultiply(d,X)) , ind)
    # and make adjoint operator
    def upsample(Y):
        if Y.ndim == 1:
            Z = np.zeros( M )
            Z[ind] = Y
        else:
            Z = np.zeros( (M,Y.shape[1]))
            Z[ind,:] = Y
        return Z
    adj     = lambda Z : np.sqrt(M/m)*_elementwiseMultiply(d,myDCT_t(upsample(Z)) )
        
    S       = LinearOperator( (m,M), matvec = f, matmat = f, rmatvec=adj,rmatmat=adj )
    return S
Exemplo n.º 25
0
def read_and_resize(path_to_vols, vol_name, ntup, toDir='./'):
    '''
   Open and pad (or truncate) the Chebyshev-Fourier-Fourier coefficients 
   of the kxky fields stored in CheckPoints. As they are stored in physical
   space, the interpolation is done by cosine transform along z, followed by padding 
   or truncation, then inverse cosine transform. Along x and y, the grid is 
   evenly space, and we elect to use linear interpolation (which seems faster for
   large resolutions, according to a few tests).
   '''
    ierror = 0
    domain_decomp_infos = np.fromfile(path_to_vols +
                                      '../Geometry/domDecmp.core0000',
                                      dtype=np.int32)
    NX = ntup[0]
    NY = ntup[1]
    NZ = ntup[2]
    NXAA = domain_decomp_infos[3]
    NYAA = domain_decomp_infos[4]
    NZAA = domain_decomp_infos[5]
    print('----------- :: Resizing (' + str(NXAA) + ',' + str(NYAA) + ',' +
          str(NZAA) + ') into' + ' (' + str(NX) + ',' + str(NY) + ',' +
          str(NZ) + ').')
    curPhys = np.fromfile(path_to_vols + vol_name,
                          dtype=np.float_).reshape(NXAA, NYAA, NZAA)
    coscoefs = dct(curPhys, axis=-1)
    if (NZ > NZAA):
        newcoefs = np.zeros((NXAA, NYAA, NZ), dtype=np.float_)
        newcoefs[:, :, :NZAA] = coscoefs
    else:
        newcoefs = coscoefs[:, :, :NZ]

    aux2 = idct(newcoefs, axis=-1) * NZ / NZAA

    ## now we need to interpolate in the xy-plane...
    aux1 = np.zeros((NXAA, ntup[1], ntup[2]), dtype=np.float_)
    if (NYAA == ntup[1]):
        aux1 = np.copy(aux2)
    elif (NYAA < ntup[1]):
        spectral_buffer = rfft(aux2, axis=1)
        padded_spectral_buffer = np.zeros((NXAA, ntup[1] // 2 + 1, ntup[2]),
                                          dtype=np.complex_)
        padded_spectral_buffer[:, :(NYAA // 2 + 1), :] = spectral_buffer
        aux1 = irfft(padded_spectral_buffer, axis=1) / NYAA * ntup[1]
        del padded_spectral_buffer, spectral_buffer
    else:
        spectral_buffer = rfft(aux2, axis=1)
        truncated_spectral_buffer = np.zeros((NXAA, ntup[1] // 2 + 1, ntup[2]),
                                             dtype=np.complex_)
        truncated_spectral_buffer[:, :(ntup[1] //
                                       3), :] = spectral_buffer[:, :(ntup[1] //
                                                                     3), :]
        truncated_spectral_buffer.imag[:, -1, :] = 0.
        truncated_spectral_buffer.imag[:, 0, :] = 0.
        aux1 = irfft(truncated_spectral_buffer, axis=1) / NYAA * ntup[1]
        del truncated_spectral_buffer, spectral_buffer
    del aux2
    deaPhys = np.zeros((ntup[0], ntup[1], ntup[2]), dtype=np.float_)
    if (NXAA == ntup[0]):
        deaPhys = np.copy(aux1)
    elif (NXAA < ntup[0]):
        spectral_buffer = rfft(aux1, axis=0)
        padded_spectral_buffer = np.zeros((ntup[0] // 2 + 1, ntup[1], ntup[2]),
                                          dtype=np.complex_)
        padded_spectral_buffer[:(NXAA // 2 + 1), :, :] = spectral_buffer
        deaPhys = irfft(padded_spectral_buffer, axis=0) / NXAA * ntup[0]
        del padded_spectral_buffer, spectral_buffer
    else:
        spectral_buffer = rfft(aux1, axis=0)
        truncated_spectral_buffer = np.zeros(
            (ntup[0] // 2 + 1, ntup[1], ntup[2]), dtype=np.complex_)
        truncated_spectral_buffer[:(ntup[0] //
                                    3), :, :] = spectral_buffer[:(ntup[0] //
                                                                  3), :, :]
        truncated_spectral_buffer.imag[-1, :, :] = 0.
        truncated_spectral_buffer.imag[0, :, :] = 0.
        deaPhys = irfft(truncated_spectral_buffer, axis=0) / NXAA * ntup[0]
        del truncated_spectral_buffer, spectral_buffer
    del aux1
    deaPhys.tofile(toDir + '/Restart/' + vol_name)
    return ierror
def predictions(df1):
    """### Import Data"""
    data = df1  #pd.read_csv("../new_data.csv")[-100:]
    data["fecha"] = pd.to_datetime(data["fecha"])
    data = data.sort_values(by=['fecha']).reset_index(drop=True)
    """### Check for null values and outliers"""

    # Drop NA
    data.dropna(inplace=True)

    # Drop Outliers
    data = data[np.abs(stats.zscore(data.valores)) < 3]
    """## 2.- DCT & FFT Analysis
    ---

    ### Imports
    """

    N = len(data)
    t = np.linspace(0, 1, N, endpoint=False)

    x = data["valores"].values
    y = dct(x, norm='ortho')
    windows = []
    transformations = {}

    for i in range(2, 12):
        temp_window = np.zeros(N)
        temp_window[:N // i] = 1
        windows.append(temp_window)

    for idx, window in enumerate(windows):
        temp_transform = idct(y * window, norm='ortho')
        transformations[idx + 2] = temp_transform

    differences = {}
    for key in transformations:
        transformation = transformations[key]
        temp_diff = 0
        for idx, val in enumerate(transformation):
            if idx == 0:
                continue
            current_diff = np.round(abs(val - transformation[idx - 1]), 4)
            if current_diff != 0.0:
                temp_diff += current_diff

        if temp_diff != 0:
            differences[key] = temp_diff

    keys = list(transformations.keys())
    chosen_key = keys[0]
    secondary_key = keys[1]
    time_series = transformations[chosen_key]
    secondary_series = transformations[secondary_key]

    timestamps = data["fecha"].values
    transformed_data = pd.DataFrame({
        'fecha': timestamps,
        'valores': time_series
    })
    """## 4.- Model
    ---
    """
    """Rename DF columns"""

    transformed_data.columns = ['ds', 'y']
    """### FB Prophet"""

    pm = Prophet()

    pm.fit(transformed_data)

    pfuture = pm.make_future_dataframe(periods=7)

    pforecast = pm.predict(pfuture)

    # Dataframe en JSON a ser regresado al front
    #   |    |   |
    #   V    V   V
    forecast_json = pforecast.to_json(orient="split")

    return json.loads(forecast_json)

    mae = np.mean(
        abs(transformed_data.y - pforecast.yhat[:len(transformed_data.y)]))
    """### Prediction Convolutions

    #### FB Prophet
    """
    # Arreglo a ser regresado al front
    #   |    |   |
    #   V    V   V
    prophet_future_conv = convolve_2_dfs(pforecast, climaData, "yhat", "prec")
Exemplo n.º 27
0
def dct2(block):
    return dct(dct(block.T, norm='ortho').T, norm='ortho')
Exemplo n.º 28
0
def kde1d(x, n=1024, limits=None):
    """
    Estimates the 1d density from discrete observations.

    The input is a list/array `x` of numbers that represent discrete
    observations of a random variable. They are binned on a grid of
    `n` points within the data `limits`, if specified, or within
    the limits given by the values' range. `n` will be coerced to the
    next highest power of two if it isn't one to begin with.

    The limits may be given as a tuple (`xmin`, `xmax`) or a single
    number denoting the upper bound of a range centered at zero.
    If any of those values are `None`, they will be inferred from the
    data.

    After binning, the function determines the optimal bandwidth
    according to the diffusion-based method. It then smooths the
    binned data over the grid using a Gaussian kernel with a standard
    deviation corresponding to that bandwidth.

    Returns the estimated `density` and the `grid` upon which it was
    computed, as well as the optimal `bandwidth` value the algorithm
    determined. Raises `ValueError` if the algorithm did not converge.
    """

    # Convert to array in case a list is passed in.
    x = array(x)

    # Round up number of bins to next power of two.
    n = int(2**ceil(log2(n)))

    # Determine missing data limits.
    if limits is None:
        xmin = xmax = None
    elif isinstance(limits, tuple):
        (xmin, xmax) = limits
    else:
        xmin = -limits
        xmax = +limits
    if None in (xmin, xmax):
        delta = x.max() - x.min()
        if xmin is None:
            xmin = x.min() - delta/10
        if xmax is None:
            xmax = x.max() + delta/10

    # Determine data range, required for scaling.
    Δx = xmax - xmin

    # Determine number of data points.
    N = len(x)

    # Bin samples on regular grid.
    (binned, edges) = histogram(x, bins=n, range=(xmin, xmax))
    grid = edges[:-1]

    # Compute 2d discrete cosine transform, then adjust first component.
    transformed = dct(binned/N)
    transformed[0] /= 2

    # Pre-compute squared indices and transform components before solver loop.
    k  = arange(n, dtype='float')      # "float" avoids integer overflow.
    k2 = k**2
    a2 = (transformed/2)**2

    # Define internal function to be solved iteratively.
    def ξγ(t, l=7):
        """Returns ξ γ^[l] as a function of diffusion time t."""
        f = 2*π**(2*l) * sum(k2**l * a2 * exp(-π**2 * k2*t))
        for s in range(l-1, 1, -1):
            K = product(range(1, 2*s, 2)) / sqrt(2*π)
            C = (1 + (1/2)**(s+1/2)) / 3
            t = (2*C*K/N/f)**(2/(3+2*s))
            f = 2*π**(2*s) * sum(k2**s * a2 * exp(-π**2 * k2*t))
        return (2*N*sqrt(π)*f)**(-2/5)

    # Solve for optimal diffusion time t*.
    try:
        ts = brentq(lambda t: t - ξγ(t), 0, 0.1)
    except ValueError:
        raise ValueError('Bandwidth optimization did not converge.') from None

    # Apply Gaussian filter with optimized kernel.
    smoothed = transformed * exp(-π**2 * ts/2 * k**2)

    # Reverse transformation after adjusting first component.
    smoothed[0] *= 2
    inverse = idct(smoothed)

    # Normalize density.
    density = inverse * n/Δx

    # Determine bandwidth from diffusion time.
    bandwidth = sqrt(ts) * Δx

    # Return results.
    return (density, grid, bandwidth)
Exemplo n.º 29
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('-vinfo',
                        '--vid_info_path',
                        type=argparse.FileType('r'),
                        default=False)
    parser.add_argument('-c',
                        '--calibration_path',
                        type=argparse.FileType('r'),
                        default=False)
    parser.add_argument('-l',
                        '--limit',
                        nargs=2,
                        type=float,
                        default=[None, None])
    parser.add_argument('-t', '--max_time', type=int, default=512)
    parser.add_argument('-f', '--max_freq_fraction', type=float, default=0.4)
    parser.add_argument('ddm_npy_path', type=Path)
    dimensions_subparsers = parser.add_subparsers(dest='dimensions',
                                                  required=True)
    d2i_parser = dimensions_subparsers.add_parser(
        '2di', help='2d view, varying j, fix i')
    d2i_parser.add_argument('i', type=int, default=0)
    d2_parser = dimensions_subparsers.add_parser(
        '2d', help='2d view, polar coordinates')
    d2_parser.add_argument('-a', '--angular_bins', type=int, default=18)
    d2_parser.add_argument('-ao', '--angle_offset', type=float, default=0.)
    d2_parser.add_argument('-p', '--radial_bin_size', type=int, default=2)
    d2_parser.add_argument('-s', '--save', type=str, default=False)
    d2_parser.add_argument('angle_index', type=int)
    dimensions_subparsers.add_parser('3d')
    params = parser.parse_args()

    ddm_array = np.load(params.ddm_npy_path, mmap_mode='r')
    print('ddm_array.shape =', ddm_array.shape)

    wavenumber_factor = 1
    wavenumber_unit = r'\mathrm{pixel}^{-1}'
    max_time_index = params.max_time
    # Only view 40% of the available freq data by default
    max_freq_index = int(params.max_freq_fraction * max_time_index)
    ddm_array = ddm_array[..., :max_time_index]
    print('new ddm_array.shape =', ddm_array.shape)

    if params.vid_info_path:
        vid_info = json.load(params.vid_info_path)
        frame_interval = vid_info['framerate'][1] / vid_info['framerate'][0]
        freq_factor = np.pi / ((max_time_index - 1) * frame_interval)
        if params.calibration_path:
            calibration = json.load(params.calibration_path)
            wavenumber_factor = 2 * np.pi / (vid_info['fft']['size'] *
                                             calibration['calibration_factor'])
            wavenumber_unit = '\\mathrm{{rad}}\\,\\mathrm{{{}}}^{{-1}}'.format(
                calibration['physical_unit'])
            print(f'wavenumber_factor = {wavenumber_factor}')
        print(f'frame_interval = {frame_interval} s')
    else:
        frame_interval = 1
        freq_factor = 1

    if params.dimensions == '3d' or params.dimensions == '2di':
        ddm_dct = dct(ddm_array, type=1)
        ddm_dct /= (2 * (max_time_index - 1))
        print(ddm_dct.shape)
        ddm_dct *= -1

        if params.dimensions == '3d':
            fig2 = plot_ddm_dct_volume(ddm_dct[:, :, :max_freq_index],
                                       wavenumber_factor, wavenumber_unit,
                                       freq_factor)
            fig2.show()

        elif params.dimensions == '2di':
            y = params.i
            if y > ddm_dct.shape[0] // 2:
                y -= ddm_dct.shape[0]
            qy = y * wavenumber_factor

            i = params.i
            if i < 0:
                i += ddm_dct.shape[0]

            ddm_osc = ddm_dct[i, :, :max_freq_index]

            fig1, ax1 = plt.subplots(figsize=(8., 6.))
            fig1: matplotlib.figure.Figure
            ax1: matplotlib.axes.Axes
            plot_ddm_osc_slice(ddm_osc, fig1, ax1, wavenumber_factor,
                               wavenumber_unit, freq_factor, params.limit[0],
                               params.limit[1])
            fig1.suptitle(
                'Heatmap of the inverse of the coefficients of\n'
                'the cosine decomposition of $I(q,\\tau)$ (i.e. $-C_2(q,\\Omega)$) '
                f'along $q_y={qy}\\,{wavenumber_unit}$')
            ax1.set_ylabel('$\\Omega$ (rad $\\mathrm{s}^{-1}$)')
            ax1.set_xlabel(f'$q_x$ (${wavenumber_unit}$)')
            # fig.savefig(cdir + '/../plot/try2_multifreq_decomposition.png', dpi=300)
            plt.show()

    elif params.dimensions == '2d':
        ddm_polar, median_angle, _, _, _, \
            lower_angle, _, upper_angle, _, blank_bins = \
            map_to_polar(ddm_array, params.angular_bins, params.radial_bin_size, ddm_array.shape[1] - 1,
                         params.angle_offset, True)
        print('ddm_polar.shape: ', ddm_polar.shape)
        print('blank_bins:', blank_bins)

        ddm_dct = dct(ddm_polar, type=1)
        ddm_dct /= (2 * (max_time_index - 1))
        print(ddm_dct.shape)
        ddm_dct *= -1
        ddm_dct[..., 0] = np.nan

        if params.angle_index >= 0:
            ddm_osc = ddm_dct[params.angle_index, :, :max_freq_index]
            print('angle between:', lower_angle[params.angle_index],
                  upper_angle[params.angle_index])
            print('median angle:', median_angle[params.angle_index])

            fig, ax = plt.subplots(figsize=(8., 6.))
            fig: matplotlib.figure.Figure
            ax: matplotlib.axes.Axes
            plot_ddm_osc_slice(ddm_osc, fig, ax, wavenumber_factor,
                               wavenumber_unit, freq_factor, params.limit[0],
                               params.limit[1], params.radial_bin_size)
            fig.suptitle(
                'Radial heatmap of the inverse of the coefficients of '
                'the cosine decomposition\nof $I(q,\\tau)$ (i.e. $-C_2(q,\\Omega)$) '
                f'averged within $q_\\theta$ between ${lower_angle[params.angle_index]}\\degree$ and '
                f'${upper_angle[params.angle_index]}\\degree$')
            ax.set_ylabel('$\\Omega$ (rad $\\mathrm{s}^{-1}$)')
            ax.set_xlabel(f'$q_r$ (${wavenumber_unit}$)')
            if params.save:
                fig.savefig(params.save, dpi=300)
            else:
                plt.show()
        elif params.angle_index == -1:
            fig_path = params.ddm_npy_path.with_suffix('.png')
            fig_path: Path
            for ai in range(ddm_dct.shape[0]):
                ddm_osc = ddm_dct[ai, :, :max_freq_index]

                fig, ax = plt.subplots(figsize=(8., 6.))
                fig: matplotlib.figure.Figure
                ax: matplotlib.axes.Axes
                plot_ddm_osc_slice(ddm_osc, fig, ax, wavenumber_factor,
                                   wavenumber_unit, freq_factor,
                                   params.limit[0], params.limit[1],
                                   params.radial_bin_size)
                fig.suptitle(
                    'Radial heatmap of the inverse of the coefficients of '
                    'the cosine decomposition\nof $I(q,\\tau)$ (i.e. $-C_2(q,\\Omega)$) '
                    f'averged within $q_\\theta$ between ${lower_angle[ai]}\\degree$ and '
                    f'${upper_angle[ai]}\\degree$')
                custom_suffix = ('.' + params.save) if params.save else str()
                ax.set_ylabel('$\\Omega$ (rad $\\mathrm{s}^{-1}$)')
                ax.set_xlabel(f'$q_r$ (${wavenumber_unit}$)')
                fig.savefig(
                    fig_path.with_stem(fig_path.stem +
                                       f'{custom_suffix}.polar.freq{ai:02}'),
                    dpi=300)
        elif params.angle_index == -2:
            matplotlib.rcParams.update({'font.size': 8.5})
            fig = plt.figure(figsize=(17 / 2.54, 23.25 / 2.54),
                             dpi=400.,
                             constrained_layout=True)
            gridspec = fig.add_gridspec(5, 4, hspace=2.25 / 23.25)
            # this means we only support 18 angles only
            grid_indices = (0, 1, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
                            17, 18, 19)
            axs_flat = [fig.add_subplot(gridspec[i]) for i in grid_indices]
            for ai, ax in zip(range(ddm_dct.shape[0]), axs_flat):
                ddm_osc = ddm_dct[ai, :, :max_freq_index]
                img = plot_ddm_osc_slice(ddm_osc, fig, ax, wavenumber_factor,
                                         wavenumber_unit, freq_factor,
                                         params.limit[0], params.limit[1],
                                         params.radial_bin_size, False)
                ax.set_title(
                    f'${lower_angle[ai]:.0f}\\degree \\leq q_\\theta < {upper_angle[ai]:.0f}\\degree$',
                    {'fontsize': 'medium'})
            for i in (0, 2, 6, 10, 14):
                axs_flat[i].set_ylabel('$\\Omega$ (rad $\\mathrm{s}^{-1}$)')
            for i in (14, 15, 16, 17):
                axs_flat[i].set_xlabel(f'$q_r$ (${wavenumber_unit}$)')
            fig.colorbar(img, ax=axs_flat, shrink=0.35, extend='max')
            custom_suffix = ('.' + params.save) if params.save else str()
            fig_path = params.ddm_npy_path.with_name(
                params.ddm_npy_path.stem +
                f'{custom_suffix}.polar.allfreq.svg')
            fig.savefig(fig_path)
Exemplo n.º 30
0
assert N == len(x)
assert n == ref['n']
assert n == len(ref['density'])

# Determine data range, required for scaling.
Δx = xmax - xmin

# Determine number of data points.
N = len(x)

# Bin samples on regular grid.
(binned, edges) = histogram(x, bins=n, range=(xmin, xmax))
grid = edges[:-1]

# Compute 2d discrete cosine transform. Adjust first component.
transformed = dct(binned / N)
transformed[0] /= 2

# Pre-compute squared indices and transform components before solver loop.
k = arange(n, dtype='float')
k2 = k**2
a2 = (transformed / 2)**2


# Define internal function to be solved iteratively.
def ξγ(t, l=7):
    f = 2 * π**(2 * l) * sum(k2**l * a2 * exp(-π**2 * k2 * t))
    for s in range(l - 1, 1, -1):
        K = product(range(1, 2 * s, 2)) / sqrt(2 * π)
        C = (1 + (1 / 2)**(s + 1 / 2)) / 3
        t = (2 * C * K / N / f)**(2 / (3 + 2 * s))