def lpf(image, sigma, mode=2):
    (Mx, My) = image.shape
    if mode == 1:
        kernel = matlab_style_gauss2D(image.shape, sigma)
        kernel /= numpy.max(kernel)

        if Mx == 1 or My == 1:
            fft = numpy.fft.fft(image)
            fft = numpy.fft.fftshift(fft)
            fft *= kernel
            result = numpy.real(numpy.fft.ifft(numpy.fft.fftshift(fft)))
        else:
            fft = numpy.fft.fftshift(numpy.fft.fft2(image))
            fft *= kernel
            result = numpy.real(numpy.fft.ifft2(numpy.fft.fftshift(fft)))
    elif mode == 2:
        new_dim = 2 * array(image.shape)
        kernel = matlab_style_gauss2D((new_dim[0], new_dim[1]), sigma * 2)
        kernel /= numpy.max(kernel)
        kernel = kernel[Mx:, My:]

        image = image.astype(numpy.double)
        if Mx == 1 or My == 1:
            dct = fftpack.dct(image, type=1)
            dct *= kernel
            result = numpy.real(fftpack.idct(dct, type=1))
        else:
            dct = fftpack.dct(fftpack.dct(image.T, type=2, norm='ortho').T, type=2, norm='ortho')
            dct *= kernel
            result = numpy.real(fftpack.idct(fftpack.idct(dct.T, type=2, norm='ortho').T, type=2, norm='ortho'))
    return result
Пример #2
0
def process_cube(img, weight, quality):
    # TODO: check to make sure that size of img, and q_tables are consistent

    img = img.copy()

    # print('process_cube input: {}'.format(img))

    this_quality = np.round(np.max(weight)*quality)

    if this_quality < 0:
        this_quality = 0
    if this_quality > quality - 1:
        this_quality = quality - 1

    for i in range(img.shape[3]):
        img[:, :, :, i] = cv2.cvtColor(img[:, :, :, i], cv2.COLOR_BGR2LAB)
    img = np.float32(img)

    # print('process_cube pre DCT: {}'.format(img))

    # img_dct = dct(dct(dct(img, axis=0)/4, axis=1)/4, axis=3)/4
    img_dct = dct(dct(img, axis=0)/4, axis=1)/4

    Q_luma = luminance_tables[:, :, :, this_quality].astype(np.float32)
    Q_chroma = chrominance_tables[:, :, :, this_quality].astype(np.float32)

    # Q_luma[:, :, :] = .01
    # Q_chroma[:, :, :] = .01

    # print('Q_luma: {}'.format(Q_luma))
    # print('Q_chroma: {}'.format(Q_chroma))

    # print('dct, pre rounding: {}'.format(img_dct))
    img_dct[:, :, 0, :] /= Q_luma
    img_dct[:, :, 1, :] /= Q_chroma
    img_dct[:, :, 2, :] /= Q_chroma

    img_dct = np.round(img_dct)

    img_dct[:, :, 0, :] *= Q_luma
    img_dct[:, :, 1, :] *= Q_chroma
    img_dct[:, :, 2, :] *= Q_chroma
    # print('dct, post rounding: {}'.format(img_dct))

    # img_processed = idct(idct(idct(img_dct, axis=0)/4, axis=1)/4, axis=3)/4
    img_processed = idct(idct(img_dct, axis=0)/4, axis=1)/4

    # print('process_cube post DCT: {}'.format(img_processed))

    img_processed = np.clip(img_processed, 0, 255)
    img_processed = np.uint8(img_processed)

    for i in range(img.shape[3]):
        img_processed[:,:,:,i] = cv2.cvtColor(img_processed[:,:,:,i], cv2.COLOR_LAB2BGR)

    # print('process_cube output: {}'.format(img))

    # print('pre dct / post_dct: {}'.format(pre_dct / post_dct))
    return img_processed
def idct2(arr):
    """
    @params { np.ndarray } arr
    @return { np.ndarray }
    """
    array = np.float64(arr)
    result = idct(idct(array, axis=0), axis=1)
    return result
Пример #4
0
def LSDecompFW(wav, width= 16384, max_nnz_rate=8000.0/262144.0, sparsify = 0.01, taps = 10, 
               level = 3, wl_weight = 1, verbose = False,fc=120):
    
    MaxiterA = 60
   
    length = len(wav)
    
    
    n = sft.next_fast_len(length)
    
    signal = np.zeros((n))
    signal[0:length] = wav[0:length]
     
    h0,h1 = daubcqf(taps,'min')
    L = level
    
    
    #print(n)
    original_signal = lambda s: sft.idct(s[0:n]) + (1.0)*(wl_weight)*idwt(s[n+1:],h0,h1,L)[0]
    LSseparate = lambda x: np.concatenate([sft.dct(x),(1.0)*(wl_weight)*dwt(x,h0,h1,L)[0]],axis=0)
    
    #measurment
    y = signal 
    #FISTA
    ###############################
    cnnz = float("Inf")

    
    c = signal 
    temp = LSseparate(y)
    temp2 = original_signal(temp)
    print('aaa'+str(temp2.shape))
    
    maxabsThetaTy = max(abs(temp))
    
    while cnnz > max_nnz_rate * n:
            
        #FISTA
            tau = sparsify * maxabsThetaTy
            tolA = 1.0e-7
            
            fh = (original_signal,LSseparate)
            
            c = relax.fista(A=fh, b=y,x=LSseparate(c),tol=tolA,l=tau,maxiter=MaxiterA )[0]
            
            cnnz = np.size(np.nonzero(original_signal(c)))
            
            print('nnz = '+ str(cnnz)+ ' / ' + str(n) +' at tau = '+str(tau))
            sparsify = sparsify * 2
            if sparsify == 0.166:
                sparsify = 0.1
    signal_dct = sft.idct(c[0:n])
    signal_dct = signal_dct[0:length]
    signal_wl = (1.0) * float(wl_weight) * idwt(c[n+1:],h0,h1,level)[0] 
    signal_wl = signal_wl[0:length]
   
    return  signal_dct,signal_wl
Пример #5
0
def laplacian_pca_TV(res, x, f0, lam, gam, iter = 10):
    '''
    TV version of Laplacian embedding
    :param res: resolution of the grid
    :param x: numpy array of data in rows
    :param f0: initial embedding matrix
    :param lam: sparsity parameter
    :param gam: fidelity parameter
    :param iter: number of iterations to carry out
    :return: returns embedding matrix
    '''
    # f0 is an initial projection
    n = res ** 2
    num_data = x.shape[0]

    D = sparse_discrete_diff(res)
    M = 1/(lam*laplacian_eigenvalues(res).reshape(n)+gam)

    f = f0
    y = x .dot(f)
    z = shrink(y .dot(D.T), lam)

    for i in range(iter):

        # Update z
        z_old = z
        z = shrink(y .dot (D.T), lam)

        # Update f
        f_old = f
        u, s, v = la.svd(x.T .dot (y), full_matrices=False)
        f = u .dot(v)

        # Update y
        y_old = y
        q = lam * z .dot (D) + gam * x .dot(f)
        # print('norm of y before is %f' % np.sum(q ** 2))
        y = fftpack.dct(q.reshape((num_data, res, res)), norm='ortho') # Images unraveled as rows
        y = fftpack.dct(np.swapaxes(y,1,2), norm='ortho') # Swap and apply dct on the other side
        # print('norm of y after is %f' % np.sum(y ** 2))
        y = np.apply_along_axis(lambda v: M * v, 1, y.reshape((num_data, n)))
        y = fftpack.idct(y.reshape((num_data, res, res)), norm='ortho')
        y = fftpack.idct(np.swapaxes(y,1,2), norm='ortho')
        y = y.reshape((num_data, n))

        zres = np.sqrt(np.sum((z - z_old) ** 2))
        znorm = np.sqrt(np.sum((z)**2))
        yres = np.sqrt(np.sum((y - y_old) ** 2))
        ynorm = np.sqrt(np.sum((y)**2))
        fres = np.sqrt(np.sum((f - f_old) ** 2))

        value = np.sum(abs(z)) + 0.5*lam*np.sum((z-y .dot(D.T))**2) + 0.5*gam*np.sum((y- x .dot(f) )**2)
        print('Iter %d Val %f Z norm %f Z res %f Ynorm %f Y res %f F res %f' % (i, value, znorm, zres, ynorm, yres, fres))

    return f
Пример #6
0
 def idct(self, arr,coef_size=0):
     if(coef_size==0):
         idcta2 = fftpack.idct(arr,norm='ortho')
         return idcta2
 
     arrlen = len(arr)
     if coef_size > arrlen:
         new_arr = self.interpolation_with_zeros(arr,coef_size)
     else:
         new_arr = arr[0:int(coef_size)]
     
     idcta2 = fftpack.idct(new_arr,norm='ortho')
     return idcta2
def razafindradina_embed(grayscale_container_path, grayscale_watermark_path, watermarked_image_path, alpha):
    """    
    Razafindradina embedding method implementation. 
    
    Outputs the resulting watermarked image
    
    23-July-2015
    """

    grayscale_container_2darray = numpy.asarray(Image.open(grayscale_container_path).convert("L"))
    grayscale_watermark_2darray = numpy.asarray(Image.open(grayscale_watermark_path).convert("L"))

    assert (
        (grayscale_container_2darray.shape[0] == grayscale_container_2darray.shape[1])
        and (grayscale_container_2darray.shape[0] == grayscale_watermark_2darray.shape[0])
        and (grayscale_container_2darray.shape[1] == grayscale_watermark_2darray.shape[1])
    ), "GrayscaleContainer and GrayscaleWatermark sizes do not match or not square"

    # Perform DCT on GrayscaleContainer

    # print grayscale_container_2darray

    gcdct = dct(dct(grayscale_container_2darray, axis=0, norm="ortho"), axis=1, norm="ortho")

    # print grayscale_container_2darray

    # Perform SchurDecomposition on GrayscaleWatermark

    gwsdt, gwsdu = schur_decomposition(grayscale_watermark_2darray)

    # alpha-blend GrayscaleWatermark TriangularMatrix into GrayscaleContainer DCT coeffs with alpha

    gcdct += gwsdt * alpha

    # Perform IDCT on GrayscaleContainer DCT coeffs to get WatermarkedImage

    watermarked_image_2darray = idct(idct(gcdct, axis=0, norm="ortho"), axis=1, norm="ortho")

    watermarked_image_2darray[watermarked_image_2darray > 255] = 255
    watermarked_image_2darray[watermarked_image_2darray < 0] = 0

    watermarked_image = Image.fromarray(numpy.uint8(watermarked_image_2darray))

    # watermarked_image.show()

    # Write image to file

    watermarked_image.save(watermarked_image_path)

    return
Пример #8
0
def inverse_transform(data):
    result = []
    for i in range(len(data[0])):
        result.append([])

    for i in range(len(data)):
        partial_result = idct(data[i])
        for j in range(len(partial_result)):
            result[j].append(partial_result[j])

    final_result = []
    for i in range(len(result)):
        partial_result = idct(result[i])
        final_result.append(partial_result)
    return final_result
Пример #9
0
def idctii(x, axes=None):
    """
    Compute a multi-dimensional inverse DCT-II over specified array axes.
    This function is implemented by calling the one-dimensional inverse
    DCT-II :func:`scipy.fftpack.idct` with normalization mode 'ortho'
    for each of the specified axes.

    Parameters
    ----------
    a : array_like
      Input array
    axes : sequence of ints, optional (default None)
      Axes over which to compute the inverse DCT-II.

    Returns
    -------
    y : ndarray
      Inverse DCT-II of input array
    """

    if axes is None:
        axes = list(range(x.ndim))
    for ax in axes[::-1]:
        x = fftpack.idct(x, type=2, axis=ax, norm='ortho')
    return x
Пример #10
0
def convolveGaussianDCT(x, sigma, pad_sigma=4, mode="same", cache={}):
    """
    1D convolution of x with Gaussian of width sigma pixels
    If pad_sigma>0, pads ends with zeros by int(pad_sigma*sigma) pixels
    Otherwise does unpadded fast cosine transform, hence reflection from the ends
    """

    fill = int(pad_sigma * sigma)
    actual_size = x.size + fill * 2
    if fill > 0:
        s = nearestFFTnumber(actual_size)
        fill2 = s - x.size - fill
        padded_x = np.pad(x, (fill, fill2), mode="constant")
    else:
        padded_x = x

    s = padded_x.size
    hnorm = sigma / float(s)
    gauss = cache.get((s, hnorm))
    if gauss is None:
        gauss = np.exp(-(np.arange(0, s) * (np.pi * hnorm)) ** 2 / 2.0)
        cache[(s, hnorm)] = gauss
    res = fftpack.idct(fftpack.dct(padded_x, overwrite_x=fill > 0) * gauss, overwrite_x=fill > 0) / (2 * s)
    if fill == 0:
        return res
    if mode == "same":
        return res[fill:-fill2]
    elif mode == "valid":
        return res[fill * 2 : -fill2 - fill]
    else:
        raise ValueError("mode not supported for convolveGaussianDCT")
def partialChebyshev(field, order=1, halfLength=None):

    if halfLength is None:
        halfLength = 1.0;

    if(len(shape(field)) == 2):
        length,breadth = shape(field);
    else:
        length = len(field);
        breadth = 1;
    print length;


    trans = dct(field,type=1,axis=0);
    coeff = arange(length,dtype=float);
    temp = zeros(shape(field), dtype=float);

    trans = 2.0*trans*coeff;

    temp[:,length-2] = trans[:,length-1];

    for i in arange(length-2,0,-1):
        temp[:,i-1] = temp[:,i+1] + trans[:,i]


    return idct(temp, type=1,axis=0)/(2.0*(length-1)*halfLength);
Пример #12
0
def spectrogramToSnippet(spectrogram):
    snippet = np.zeros(shape = (spectrogram.shape[0]* spectrogram.shape[1]))
    num_windows = spectrogram.shape[0]
    for i in range(num_windows):
        idx = [i*window_len, (i+1)*window_len]
        snippet[idx[0], idx[1]] = idct(windows[i, :])
    return snippet
Пример #13
0
def icosine_transform(data):
    w, h = data.shape
    image = data.copy()
    for i in range(w):
        image[i][:] = idct(data[i][:], norm='ortho')

    return image
Пример #14
0
def write_to_image(path, text):
    x1, x2, x3, y1, y2, y3 = [4, 5, 3, 3, 5, 4]
    index = 0
    D = 5
    img = Image.open(path)
    img.getdata()
    bitext = text_to_binary(text)
    bitext = bitext + '0000000000000000'
    r, g, b = [np.array(x) for x in img.split()]
    lx, ly = r.shape()
    for x in xrange(0, lx - 2, 8):
        for y in xrange(0, ly - 2, 8):
            if index == len(bitext) - 1:
                break
            metric = r[x:x + 8, y:y + 8].astype('float')
            metric = dct(metric, norm='ortho')
            if bitext[index] == 1:
                metric[x1, y1] = max(metric[x1, y1], metric[x3, y3] + D + 1)
                metric[x2, y2] = max(metric[x2, y2], metric[x3, y3] + D + 1)
            else:
                metric[x1, y1] = min(metric[x1, y1], metric[x3, y3] - D - 1)
                metric[x2, y2] = min(metric[x2, y2], metric[x3, y3] - D - 1)
            index = index + 1
            metric = idct(metric, norm='ortho')
            r[x:x + 8, y:y + 8] = metric.astype('uint8')
    im = Image.merge("RGB", [Image.fromarray(x) for x in [r, g, b]])
    im.save('%s_writed' % path)
Пример #15
0
    def decompress(self, qmat, block_size,
                   point_count, interval, min_ts):
        """Decompresses the bitarray and returns a Dataset object."""
        coeffs = zeros(block_size)
        values = zeros(point_count)
        ind = 0
        i = 0
        block_num = 0
        while ind < self.bits.length():
            if qmat[i] == 52:
                coeffs[i] = 0.
            elif ind > len(self.bits) - (64 - qmat[i]):
                # File is over
                break
            else:
                v = self.bits[ind:ind + 64 - qmat[i]]
                v.extend(qmat[i] * (False,))
                coeffs[i] = struct.unpack(">d", v.tobytes())[0]
                ind += 64 - qmat[i]

            i += 1

            if i >= block_size:
                values[block_num * block_size:block_num * block_size + block_size] = idct(coeffs, norm='ortho')
                i = 0
                block_num += 1
                # We pad out to a full byte at the end of the block
                if ind % 8 != 0:
                    ind += 8 - (ind % 8)
                if i > 1:
                    raise SystemExit

        return Dataset(point_count, interval, min_ts, values)
Пример #16
0
def outlier_removal_smoothing_dct(tsData, n):
    """
    >>> data = pd.read_csv('input.csv')
    >>> outlier_removal_smoothing_dct(data, 15)
    {'buy': 28833387.903209731, 'sale': 40508532.108399086, 'method': 'outlier_removal_smoothing_dct'}
    >>> outlier_removal_smoothing_dct(data, 20)
    {'buy': 30315166.377325296, 'sale': 42088164.543017924, 'method': 'outlier_removal_smoothing_dct'}
    """

    tsData['ingestdatetime'] = tsData.apply(lambda row: datetime.strptime(str(row['ingestdate']), "%Y%m%d"), axis=1)
    tsData = tsData.sort(['ingestdate'], ascending=[1])

    N = len(tsData)
    t = range(len(tsData))
    x = [float(elem) for elem in tsData['qtyavail']]
    y = dct(x, norm='ortho')
    window = np.zeros(N)
    window[:n] = 1
    yr = idct(y*window, norm='ortho')
    
    diffs = np.diff(yr)
    result_sale = abs(sum(filter(lambda x: x < 0, diffs)))
    result_buy = abs(sum(filter(lambda x: x > 0, diffs)))

    result = {}
    result['sale'] = result_sale
    result['buy'] = result_buy
    result['method'] = inspect.stack()[0][3]
    return result
Пример #17
0
    def on_adjust(self, event=None):
        """
        Executed when slider changes value
        """
        width, height = self.lena.shape
        quant = self.sld.GetValue()

        # reconstructed
        rec = np.zeros(self.lena.shape, dtype=np.int64)

        for y in xrange(0,height,8):
            for x in xrange(0,width,8):
                d = self.lena[y:y+8,x:x+8].astype(np.float)
                D = dct(dct(d.T, norm='ortho').T, norm='ortho').reshape(64)
                Q = np.zeros(64, dtype=np.float)
                Q[self.unzig[:quant]] = D[self.unzig[:quant]]
                Q = Q.reshape([8,8])
                q = np.round(idct(idct(Q.T, norm='ortho').T, norm='ortho'))
                rec[y:y+8,x:x+8] = q.astype(np.int64)

        diff = np.abs(self.lena-rec)

        im = self.axes.flat[0].imshow(self.lena, cmap='gray')
        self.axes.flat[0].set_title('Original Image')

        im = self.axes.flat[1].imshow(rec, cmap='gray')
        self.axes.flat[1].set_title('Reconstructed Image')

        self.hinton(ax=self.axes.flat[2])
        self.axes.flat[2].set_title('DCT Coefficient Mask')

        im = self.axes.flat[3].imshow(diff, cmap='hot', vmin=0, vmax=255)
        self.axes.flat[3].set_title('Error Image')

        for ax in self.axes.flat:
            ax.axis('off')

        self.fig.subplots_adjust(right=0.8)
        cbar_ax = self.fig.add_axes([0.85, 0.15, 0.05, 0.7])
        self.fig.colorbar(im, cax=cbar_ax)

        p = self.psnr(self.lena, rec)
        s = self.compute_ssim(self.lena, rec)

        self.statusbar.SetStatusText('PSNR :%.4f SSIM: %.4f' % (p, s))

        self.canvas.draw()
Пример #18
0
 def _fit_idct(self,func,N):
     """ Return the chebyshev coefficients using the idct """
     pts = -cheb.chebpts1(N)
     y = func(pts)
     coeffs = idct(y,type=3)/N
     coeffs[0] /= 2.
     coeffs[-1] /= 2.
     return coeffs
Пример #19
0
def idct(c):
    """
    Apply inverce DCT to all elements in b

    :param c:
    :return:
    """
    return fftpack.idct(c, norm='ortho')
Пример #20
0
 def foreach_idct(self):
     """
     Выполняет обратное дискретное преобразование косинусов
     :return:
     """
     for i in range(self.nrows):
         for j in range(self.ncols):
             self.image_blocks[i, j, ...] = idct(np.transpose(self.image_blocks_magnitude[i, j, ...]),
                                                 **self.dct_params)
def poisson_neumann(gx, gy):
    height, width = gx.shape

    f = weighted_finite_differences(gx, gy, np.ones_like(gx), np.ones_like(gy))

    # Compute cosine transform
    fcos = dct(dct(f, norm='ortho', axis=0), norm='ortho', axis=1)

    # Compute the solution in the fourier domain
    x, y = np.meshgrid(np.arange(width), np.arange(height))
    denom = (2.0 * np.cos(np.pi * x / width) - 2.0) + (2.0 * np.cos(np.pi * y / height) - 2.0)
    # First element is fixed to 0.0
    fcos /= denom
    fcos[0, 0] = 0

    # Compute inverse discrete cosine transform to find solution
    # in spatial domain
    return idct(idct(fcos, norm='ortho', axis=0), norm='ortho', axis=1)
def trans(a):
    global qa
    b = dct(a,type=2,n=None,axis=-1,norm='ortho',overwrite_x=False)
    c = idct(b, type=2, n=None, axis=-1, norm='ortho', overwrite_x=False)
    F = np.zeros((8,8))
    for i in range(8):
        for j in range(8):
            F[i][j] = b[i][j]/qa[i][j]
    return F
Пример #23
0
def background_reconstruction_dct(I):
    """
    background_reconstruction_dct(I)      where I is a BW image matrix
    Reconstructs the background image by the DCT method proposed in the paper by 
    L.Chen et al (2008) :
    'Automatic TFT-LCD mura defect inspection using discrete cosine transform-based background filtering and ‘just noticeable difference’ quantification strategies'
    
    """
    # Type II DCT is used, the inverse is the Type-III DCT according to SF2 handout
    M = I.shape[0] # number of columns in image matrix
    N = I.shape[1] # number of rows in image matrix
    
    X = fftpack.dct(fftpack.dct(I,  type = 2, norm = 'ortho').T, type = 2, norm = 'ortho').T
    X[1:,:][:,1:]= 0;
    Y =fftpack.idct( fftpack.idct(X, type = 2, norm = 'ortho').T, type =2, norm = 'ortho').T
    Y = np.uint8(Y)
    #cv2.imwrite('stage_3.png', Y)
    return Y
Пример #24
0
    def inverseATA(self, divBound):
        divBound.applyGaussForward()

        div = divBound.divergence.div
        div = 0.5*fft.dct(div, axis=0)
        div = 0.5*fft.dct(div, axis=1)
        div = 0.5*fft.dct(div, axis=2)

        div[0,0,0] = 0.0
        div = div / self.eigvalues

        div = fft.idct(div, axis=0) / ( self.M + 1. )
        div = fft.idct(div, axis=1) / ( self.N + 1. )
        div = fft.idct(div, axis=2) / ( self.P + 1. )

        divBound.divergence.div = div
        divBound.applyGaussBackward()

        return divBound
Пример #25
0
def _build_test_image(size, edge_width, core):
    """Build a synthetic test image from given DCT coefficients.

    The result is a PIL image of size[0] rows and size[1] columns, in the 'F' format. Core is a list
    of lists which describes the DCT coefficients to be placed at the core of the image. It must be
    rectangular (that is, all lists should have the same length), but not necessarily square. An
    initial (size[0]-2*edge_width)x(size[1]-2*edge_width) is generated from these DCT coefficients,
    and then embedded in the center of a black image.

    The output format is 'F' in order to ensure that Hash.hash_image gets as close to the
    coefficients in core as possible.

    Hint: ensure that the coefficients in core are not close to a multiple of Hash.dct_coeff_split,
    since the rounding used is always floor, and the FP errors might lead to a number such as 1000
    turninginto 999.99, which gets rounded to 999 and which is 124 when divided by the standard
    Hash.dct_coeff_split of 8, rather than the original 125.

    Args:
      size: a two-element sequence containing the number of rows and columns of the output image.
      edge_width: number of edge pixels in the image.
      core: a rectangular list-of-lists containing DCT coefficients used to generate the image.

    Returns:
      A PIL image as described above.
    """
    mat_dct = numpy.zeros((size[0] - 2 * edge_width, size[1] - 2 * edge_width))
    row_idx = 0
    for row in core:
        col_idx = 0
        for value in row:
            # Yup, it looks wierd. But we need to generate stuff in transpose space so
            # Image.formarray picks it up correctly. So we generate the coefficients in a transposd
            # form, and they'll be corrected when doing the final transpose.
            mat_dct[col_idx, row_idx] = value
            col_idx += 1
        row_idx += 1

    mat_core = fftpack.idct(fftpack.idct(mat_dct, norm='ortho').T, norm='ortho').T
    mat = numpy.float32(numpy.random.randint(0, 127, size))
    mat[edge_width:size[0] - edge_width, edge_width:size[1] - edge_width] = mat_core
    mat += 128

    return Image.fromarray(numpy.float32(mat).T, mode='F')
Пример #26
0
def reconstruct(yuv_tiles, original_shape, q=20): 
    recon = []
    row, col = int(original_shape[0]/8), int(original_shape[1]/8)
    if original_shape[0]%8 != 0: row+= 1
    if original_shape[1]%8 != 0: col+= 1
    
    for i in range(len(yuv_tiles)):
        if i==0: color = 'y'
        if i==1: color = 'u'
        if i==2: color = 'v'
        color_tiles = []
        for tile in yuv_tiles[i]:
            unquantized_tile = unquantizeTile(tile, color, q)
            idct_tile = idct(idct(unquantized_tile.T,  norm='ortho').T, norm='ortho')
            color_tiles.append(idct_tile)
        recon.append(combineTiles(color_tiles, (row, col), 8))
        
    reconstructed = np.zeros((row*8, col*8, 3),  dtype=float)
    reconstructed[:,:,0], reconstructed[:,:,1],  reconstructed[:,:,2] = recon[0], recon[1], recon[2]
    cropped = reconstructed[:original_shape[0], :original_shape[1]]
    return cropped
def regr(mat):
    global qa
    new = np.zeros((8,8))
    for i in range(8):
        for j in range(8):
            new[i][j] = mat[i][j]*qa[i][j]
    c = idct(new, type=2, n=None, axis=-1, norm='ortho', overwrite_x=False)

    for i in range(8):
        for j in range(8):
            c[i][j] = c[i][j] + 128
    return c
Пример #28
0
def inverse_transform(array):
    """
    Args:
        list(list(numpy.ndarray)): 2D list of numpy array dct coefficients as float32 of each numpy array
    Returns:
        list(list(numpy.ndarry)): the idct casted to uint8 of each numpy array in the 2D list
    """
    return [
        [np.rint(idct(array[x][y], norm='ortho'))
         for y in range(0, len(array[x]))]
         for x in range(0, len(array))
    ]
Пример #29
0
Файл: kde.py Проект: mamday/pisa
def fbw_kde(data, N=None, MIN=None, MAX=None, overfit_factor=1.0):
    # Parameters to set up the mesh on which to calculate
    N = 2**14 if N is None else int(2**np.ceil(np.log2(N)))
    if MIN is None or MAX is None:
        minimum = min(data)
        maximum = max(data)
        Range = maximum - minimum
        MIN = minimum - Range/10 if MIN is None else MIN
        MAX = maximum + Range/10 if MAX is None else MAX

    # Range of the data
    R = MAX-MIN

    # Histogram the data to get a crude first approximation of the density
    M = len(data)
    DataHist, bins = np.histogram(data, bins=N, range=(MIN, MAX))
    DataHist = DataHist/M

    DCTData = fftpack.dct(DataHist, norm=None)

    M = M
    I = np.arange(1, N, dtype=np.float64)**2
    SqDCTData = np.float64((DCTData[1:]/2.0)**2)

    # The fixed point calculation finds the bandwidth = t_star
    failure = True
    for guess in np.logspace(-1, 2, 20):
        try:
            t_star = optimize.brentq(fixed_point,
                                     0, guess,
                                     args=(np.float64(M), I, SqDCTData))
            failure = False
            break
        except ValueError:
            failure = True

    if failure:
        raise ValueError('Initial root-finding failed.')

    # Smooth the DCTransformed data using t_star divided by an overfitting
    # param that allows sub-optimal but allows for "sharper" features
    SmDCTData = DCTData*np.exp(-np.arange(N)**2*pisq*t_star/(2*overfit_factor))

    # Inverse DCT to get density
    density = fftpack.idct(SmDCTData, norm=None)*N/R

    mesh = (bins[0:-1]+bins[1:])/2.

    bandwidth = np.sqrt(t_star)*R

    density = density/np.trapz(density, mesh)

    return bandwidth, mesh, density
Пример #30
0
def process_block(img, weight, quality):

    img = img.copy()

    # print('process_cube input: {}'.format(img))

    this_quality = np.round(np.max(weight)*quality)

    if this_quality < 0:
        this_quality = 0
    if this_quality > quality - 1:
        this_quality = quality - 1

    img = cv2.cvtColor(img, cv2.COLOR_BGR2LAB)
    img = np.float32(img)

    img_dct = dct(dct(img, axis=0)/4, axis=1)/4

    Q_luma = luminance_tables[:, :, :, this_quality][:, :, 0].astype(np.float32)
    Q_chroma = chrominance_tables[:, :, :, this_quality][:, :, 0].astype(np.float32)

    img_dct[:, :, 0] /= Q_luma
    img_dct[:, :, 1] /= Q_chroma
    img_dct[:, :, 2] /= Q_chroma

    img_dct = np.round(img_dct)

    img_dct[:, :, 0] *= Q_luma
    img_dct[:, :, 1] *= Q_chroma
    img_dct[:, :, 2] *= Q_chroma

    img_processed = idct(idct(img_dct, axis=0)/4, axis=1)/4

    img_processed = np.clip(img_processed, 0, 255)
    img_processed = np.uint8(img_processed)

    img_processed = cv2.cvtColor(img_processed, cv2.COLOR_LAB2BGR)

    return img_processed
Пример #31
0
def zz_idct2(blk):
    return idct(idct(blk.T, norm='ortho').T, norm='ortho')
M = np.dot(Mr, Mc)

while (True):
    # Capture frame-by-frame
    [retval, frame] = cap.read()
    cv2.imshow('Original Video, Gruen Komponente', frame[:, :, 1])

    #compute magnitude of 2D DCT of green component
    #with suitable normalization for the display,
    #with norm='ortho' for "energy conservation" in the subbands and for
    #invertibiltity without factor:
    X = sft.dct(frame[:, :, 1] / 255.0, axis=1, norm='ortho')
    X = sft.dct(X, axis=0, norm='ortho')
    #Set to zero the 7/8 highest spatial frequencies in each direction:
    X = X * M
    frame = np.abs(X)

    # Display the resulting frame
    cv2.imshow('2D-DCT mit Null Setzen der hoechsten Ortsfrequenzen', frame)
    #Inverse 2D DCT:
    X = sft.idct(X, axis=1, norm='ortho')
    x = sft.idct(X, axis=0, norm='ortho')
    cv2.imshow('Inverse 2D DCT ohne die hoechsten Ortsfrequenzen', x)

    #Keep window open until key 'q' is pressed:
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
Пример #33
0
def idct2(a):
    return idct(idct(a.T, norm='ortho').T, norm='ortho') 
Пример #34
0
def idct3d(x):
    x2 = sf.idct(x.copy(), axis=2, norm='ortho')
    x1 = sf.idct(x2.copy(), axis=1, norm='ortho')
    x0 = sf.idct(x1.copy(), axis=0, norm='ortho')
    return x0
Пример #35
0
def idctn(x, type=2, axes=None, norm="ortho"):
    if axes == None: axes = np.arange(len(x.shape))
    y = x
    for axis in axes:
        y = ft.idct(y, type=type, axis=axis, norm="ortho")
    return y
Пример #36
0
def fbwkde(data,
           weights=None,
           n_dct=None,
           min=None,
           max=None,
           evaluate_dens=True,
           evaluate_at=None):
    """Fixed-bandwidth (standard) Gaussian KDE using the Improved
    Sheather-Jones bandwidth.

    Code adapted for Python from the implementation in Matlab by Zdravko Botev.

    Ref: Z. I. Botev, J. F. Grotowski, and D. P. Kroese. Kernel density
    estimation via diffusion. The Annals of Statistics, 38(5):2916-2957, 2010.

    Parameters
    ----------
    data : array
    weights : array or None
    n_dct : None or int
        Number of points with which to form a regular grid, from `min` to
        `max`; histogram values at these points are sent through a discrete
        Cosine Transform (DCT), so `n_dct` should be an integer power of 2 for
        speed purposes. If None, uses next-highest-power-of-2 above
        len(data)*10.
    min : float or None
    max : float or None
    evaluate_dens : bool
    evaluate_at : None or array

    Returns
    -------
    bandwidth : float
    evaluate_at : array of float
    density : None or array of float

    """
    if n_dct is None:
        n_dct = int(2**np.ceil(np.log2(len(data) * 10)))
    assert int(n_dct) == n_dct
    n_dct = int(n_dct)
    n_datapoints = len(data)

    # Parameters to set up the points on which to evaluate the density
    if min is None or max is None:
        minimum = data.min()
        maximum = data.max()
        data_range = maximum - minimum
        min = minimum - data_range / 2 if min is None else min
        max = maximum + data_range / 2 if max is None else max

    hist_range = max - min

    # Histogram the data to get a crude first approximation of the density
    data_hist, bins = np.histogram(data,
                                   bins=n_dct,
                                   range=(min, max),
                                   density=False,
                                   weights=weights)

    # Make into a probability mass function
    if weights is None:
        data_hist = data_hist / n_datapoints
    else:
        data_hist = data_hist / np.sum(weights)

    # Define a minimum bandwidth relative to mean of distances between points
    distances = np.diff(np.sort(data))
    min_bandwidth = 2 * np.pi * np.mean(distances)
    logging.trace('min_bandwidth, 2pi*mean: %.5e', min_bandwidth)

    # Solve for the ISJ fixed point to obtain the "optimal" bandwidth
    isj_bw, t_star, dct_data = isj_bandwidth(y=data_hist,
                                             n_datapoints=n_datapoints,
                                             x_range=hist_range,
                                             min_bandwidth=min_bandwidth)

    # TODO: Detect numerical instability issues here, prior to returning!

    if not evaluate_dens:
        return isj_bw, evaluate_at, None

    if evaluate_at is None:
        # Smooth the discrete-cosine-transformed data using t_star
        sm_dct_data = dct_data * np.exp(
            -np.arange(n_dct)**2 * _PISQ * t_star / 2)

        # Inverse DCT to get density
        density = fftpack.idct(sm_dct_data, norm=None) * n_dct / hist_range
        if np.any(density < 0):
            logging.trace(
                'ISJ encountered numerical instability in IDCT (resulting in a'
                ' negative density). Result will be computed without the IDCT.'
            )
            evaluate_at = (bins[0:-1] + bins[1:]) / 2
        else:
            evaluate_at = (bins[0:-1] + bins[1:]) / 2
            density = density / np.trapz(density, evaluate_at)
            return isj_bw, evaluate_at, density
    else:
        evaluate_at = np.asarray(evaluate_at, dtype=FTYPE)

    density = gaussians(x=evaluate_at,
                        mu=data.astype(FTYPE),
                        sigma=np.full(shape=n_datapoints,
                                      fill_value=isj_bw,
                                      dtype=FTYPE),
                        weights=weights)

    return isj_bw, evaluate_at, density
Пример #37
0
def idct2(image_channel):
    return fftpack.idct(fftpack.idct(image_channel.T, norm='ortho').T, norm='ortho')
Пример #38
0
def idct_2d(image):
    return fftpack.idct(fftpack.idct(image.T, norm='ortho').T, norm='ortho')
Пример #39
0
def idct2(a):
    return idct(idct(a, axis=0, norm='ortho'), axis=1, norm='ortho')
def idct2d(data):
    return fftpack.idct(fftpack.idct(data, norm='ortho').T, norm='ortho').T
Пример #41
0
# FFT RHS
rhs_p = fft.dct(rhs, type=dct_type, norm=dct_norm)
rhs_p *= 1. / nx

# Construct solution in Fourier space
l = np.arange(0, nx)
cosl = np.cos(np.pi * l / nx)
sol_p = rhs_p / (2. * (cosl - 1.))

# Normalization is directly obtainable in the case of cosine transform (type 2)
sol_p[0] = sol_leveque(0) - np.sum(sol_p[1:])
sol_p[1:] *= 0.5

# Inverse FFT to get real solution
sol = fft.idct(sol_p, type=dct_type, norm=dct_norm)

# Plot solution
fig1 = pl.figure(figsize=(8, 6))
pl.plot(sol, '.', label='DCT solver')
pl.plot(sol_leveque(x), label='analytic')
pl.legend()
fig1.savefig('sol_dct_leveque.png')

if plot_grad_u:
    fig2 = pl.figure(figsize=(8, 6))
    pl.plot(np.gradient(sol, dx), '.', label='grad DCTsolver')
    pl.plot(bc_leveque(x), label='analytic')
    pl.plot(np.gradient(sol_leveque(x), dx), '--', label='grad anal. sol.')
    pl.legend()
    fig2.savefig('bc_dct_leveque.png')
Пример #42
0
def obj(c00, N, locs, bins, lims):
    '''Objective: choose c0 to minimize difference between histograms.'''
    c0 = np.zeros(N)
    c0[locs] = c00
    return dH(Hy, density(idct(c0), bins, lims))
Пример #43
0
def err_fun(cc, N, bins, lims):
    '''Error function for parallel loop.'''
    xhat = get_xhat(N, [*cc], bins, lims)
    return dH(Hy, density(xhat, bins, lims), mode='l2')


if __name__ == '__main__':

    # Assume there is a k-sparse representation,
    N = 256  # these choices of N,k give unique solution most of the time
    k = 3
    cx = np.zeros(N)
    idx_true = np.random.choice(np.arange(N), k, False)
    cx[idx_true] = 1  # + np.random.normal(0, 1, k)
    x = idct(cx)
    x /= np.linalg.norm(x)

    # We measure a permutation of x
    pi_true = np.random.permutation(np.arange(N))
    y = x[pi_true]

    # Thus the pixel intensity distribution of x is the that of y
    lims = (-.5, .5)
    Hy, bins = np.histogram(y, bins=N, range=lims)

    # Let's try to do things in parallel -- more than twice as fast!
    err_fun_partial = partial(err_fun, N=N, bins=bins, lims=lims)
    with Pool() as pool:
        res = list(
            tqdm(pool.imap(err_fun_partial,
Пример #44
0
def get_2d_idct(coefficients):
    # Get 2D Inverse Cosine Transform of Image
    return fftpack.idct(fftpack.idct(coefficients.T, norm='ortho').T,
                        norm='ortho')
Пример #45
0
import matplotlib.pyplot as plt
plt.close('all')

#%% Use DCT transform from the scipy library

np.set_printoptions(formatter={'float': '{: 0.3f}'.format})

# numpy array
f = np.array([1, 1, 1, 1, 2, 2, 2, 2], dtype='float32')
print("f = ", f)

# apply dct function on array
F = dct(f, norm='ortho')
print("Fu = ", F)  #

f_recon = idct(F, norm='ortho')
print("f_recon = ", f_recon)

status = np.allclose(f, f_recon)
print(" f equal f_recon ? ->", status)

#%% Try find the coefficient for F[0], u=0  frequency 0
u = 0

cosv = np.zeros(8)
F = np.zeros(8)

for i in range(8):
    if u == 0:
        Cu = 1 / np.sqrt(2)
    else:
Пример #46
0
from scipy.fftpack import dct, idct

img = cv2.imread('lenna.jpg', 0)
z = np.zeros([225, 225])
for i in range(28):
    for j in range(28):
        img1 = img[i * 8:i * 8 + 8, j * 8:j * 8 + 8]
        dct0 = dct(img1)
        dct1 = np.transpose(dct0)
        dct2 = dct(dct1)

        for m in range(8):
            for n in range(8):
                if m >= 4 or n >= 4:
                    dct2[m][n] = 0
        idct0 = idct(dct2)
        idct1 = np.transpose(idct0)
        idct2 = np.round(idct(idct1) / 256)
        z[i * 8:i * 8 + 8, j * 8:j * 8 + 8] = idct2
plt.subplot(121)
plt.imshow(img, cmap='gray')

plt.title('image')
plt.subplot(122)
plt.imshow(z, cmap='gray')

plt.title('compression')

psnr = 10 * np.log10(
    (255 * 255) / (1 / (225 * 225) * np.sum(z - img) * np.sum(z - img)))
print('psnr', psnr)
Пример #47
0
def convert():
    original_pic = Image.open("img/1.bmp")
    w, h = original_pic.size
    d = int(parameter_1.get())
    F = int(parameter_2.get())
    size = min(w, h)

    #FARE CROP
    #TAGLIO IMMAGINE IN MODO QUADRATO IN BASE ALLA MISURA MINORE
    resto = size % F
    if (resto != 0 and w > h):
        cropped = original_pic.crop((0, 0, h - resto, h - resto))
        cropped.save('img/cropped.bmp')
    elif (resto != 0 and h > w):
        cropped = original_pic.crop((0, 0, w - resto, w - resto))
        cropped.save('img/cropped.bmp')
    elif (resto == 0 and w > h):
        #immagine rettangolare lato lungo w
        cropped = original_pic.crop((0, 0, h, h))
        cropped.save('img/cropped.bmp')
    else:
        taglio = h - w
        cropped = original_pic.crop((0, 0, w, w))
        cropped.save('img/cropped.bmp')

    #CREO BLOCCHI FXF
    k = 0  #contatore per nome immagine
    w, h = cropped.size
    n = w / F
    n = int(n)
    for y in range(0, n):
        for x in range(0, n):
            immagineCroppata = cropped.crop(
                (x * F, y * F, (x * F) + F, (y * F) + F))
            immagineCroppata.save("img/cropped" + str(k) + ".bmp")
            k += 1

    N = n * n
    for k in range(0, N):
        original_pic = Image.open("img/cropped" + str(k) + ".bmp")
        w1, h1 = original_pic.size
        pix_val = list(original_pic.getdata())
        data = np.array(pix_val)
        shape = (h1, w1)
        skatarata = data.reshape(shape)

        #DCT
        dct1 = dct(skatarata, norm='ortho')
        dct2 = dct(dct1.T, norm='ortho')
        dct2 = dct2.T

        #TAGLIO FREQUENZE D
        for i in range(0, h1):
            for j in range(0, w1):
                if ((i + j >= d)):
                    dct2[i][j] = 0

        #IDCT (DCT INVERSA)
        idct1 = idct(dct2, norm='ortho')
        idct2 = idct(idct1.T, norm='ortho')
        idct2 = idct2.T

        #frequenze <0 assegno 0 frequenza >255 assegno 255
        for i in range(0, h1):
            for j in range(0, w1):
                if ((idct2[i][j] < 0)):
                    idct2[i][j] = 0
                if (idct2[i][j] > 255):
                    idct2[i][j] = 255
                else:
                    round(idct2[i][j])

        #converto in tipo idoneo uint8
        numbers_matrix = np.array(idct2.astype(np.uint8))
        img = Image.fromarray(numbers_matrix)
        img.save('img/SKATARAPUMPUM' + str(k) + '.bmp')

    #CREO NUOVA IMMAGINE
    images = []

    for k in range(0, N):
        images.append(Image.open('img/SKATARAPUMPUM' + str(k) + '.bmp'))
    k = 0
    for t in range(0, N, n):
        img1 = append_images(images[t:t + n], direction='horizontal')
        img1.save("img/PUMPUM" + str(k) + ".bmp")
        k += 1

    images = []
    for k in range(0, n):
        images.append(Image.open('img/PUMPUM' + str(k) + '.bmp'))

    final = append_images(images, direction='vertical')
    final.save("img/FINAL.bmp")

    img_elab = final.resize((450, 450))
    img_elab = ImageTk.PhotoImage(img_elab)
    elab_img.config(image=img_elab)
    elab_img.image = img_elab
Пример #48
0
print('\n直流信号')
print('振幅:', abs_yf[0] / N)  # 直流分量的振幅放大了N倍

# 100hz信号
index_100hz = 100 * N // Fs  # 波形的频率 = i * Fs / N,倒推计算索引:i = 波形频率 * N / Fs
print('\n100hz波形')
print('振幅:', abs_yf[index_100hz] * 2.0 / N)  # 弦波分量的振幅放大了N/2倍
print('相位:', angle_y[index_100hz])

# 150hz信号
index_150hz = 150 * N // Fs  # 波形的频率 = i * Fs / N,倒推计算索引:i = 波形频率 * N / Fs
print('\n150hz波形')
print('振幅:', abs_yf[index_150hz] * 2.0 / N)  # 弦波分量的振幅放大了N/2倍
print('相位:', angle_y[index_150hz])
print('100hz与150hz相位差:', angle_y[index_150hz] - angle_y[index_100hz])
print('\n')

# DCT变换

import numpy as np
from scipy.fftpack import dct, idct

y = dct(np.array([4., 3., 5., 10., 5., 3.]))
print(y)

import numpy as np
from scipy.fftpack import dct, idct

y = idct(np.array([4., 3., 5., 10., 5., 3.]))
print(y)
Пример #49
0
def idct_2d(mat):
    return fftpack.idct(fftpack.idct(mat.T, norm='ortho').T, norm='ortho')
Пример #50
0
def _idct2(a):
    return idct(idct(a.T, norm="ortho").T, norm="ortho")
def block_idct(x, block_size=8, masked=False, ratio=0.5):
    z = np.zeros(shape=x.shape, dtype=np.float32)
    num_blocks = int(x.shape[2] / block_size)
    mask = np.zeros((x.shape[0], x.shape[1], block_size, block_size))
    mask[:, :, :int(block_size * ratio), :int(block_size * ratio)] = 1
    for i in range(num_blocks):
        for j in range(num_blocks):
            submat = x[:, :, (i * block_size):((i + 1) * block_size), (j * block_size):((j + 1) * block_size)]
            if masked:
                submat = submat * mask
            z[:, :, (i * block_size):((i + 1) * block_size), (j * block_size):((j + 1) * block_size)] = idct(idct(submat, axis=3, norm='ortho'), axis=2, norm='ortho')
    return z
Пример #52
0
def idct2(block):
    return idct(idct(block.T, norm='ortho').T, norm='ortho')
Пример #53
0
    def baseline_corr_arPLS(self,
                            lam: float = 1e3,
                            niter: int = 100,
                            tol: float = 2e-3,
                            x0_data=None,
                            x1_data=None):
        """
        Performs baseline correction using asymmetrically reweighted penalized least squares (arPLS). Based on
        10.1016/j.csda.2009.09.020 and 10.1039/c4an01061b, utilizes discrete cosine transform to efficiently
        perform the calculation.

        Correctly smooths only evenly spaced data!!

        If x0_data or x1_data is not None, the data range will be used and weight vector will be fixed during fitting.
        In those region where data are located, w = 0 and 1 otherwise.

        Parameters
        ----------
        lam : float
            Lambda - parametrizes the roughness of the smoothed curve.
        niter : int
            Maximum number of iterations.
        tol: float
            Tolerance for convergence based on weight matrix.
        x0_data : None or int or float
            First x value that denotes the signal from data.
        x1_data : None or int or float
            Last x value that denotes the signal from data.
        """

        N = self.data.shape[0]

        Lambda = -2 + 2 * np.cos(
            np.arange(N) * np.pi /
            N)  # eigenvalues of 2nd order difference matrix

        gamma = 1 / (1 + lam * Lambda * Lambda)

        y_orig = self.data[:, 1].copy()
        z = y_orig  # initialize baseline
        y_corr = None  # data corrected for baseline
        w = np.ones_like(z)  # weight vector

        i = 0
        crit = 1

        fix_w = False
        if x0_data or x1_data:
            start, end = self._get_start_end_indexes(x0_data, x1_data)
            w[start:end] = 0  #  data have zero weight
            fix_w = True

        while crit > tol and i < niter:
            z = idct(gamma * dct(w * (y_orig - z) + z, norm='ortho'),
                     norm='ortho')  # calculate the baseline

            y_corr = y_orig - z  # data corrected for baseline
            y_corr_neg = y_corr[y_corr < 0]  # negative data values

            m = np.mean(y_corr_neg)
            s = np.std(y_corr_neg)

            new_w = 1 / (1 + np.exp(2 * (y_corr - (2 * s - m)) / s)
                         )  # update weights with logistic function

            crit = norm(new_w - w) / norm(new_w)
            if not fix_w:
                w = new_w

            if (i + 1) % int(np.sqrt(niter)) == 0:
                print(f'Iteration={i + 1}, {crit=:.2g}')
            i += 1

        self.data[:, 1] = y_corr

        return self, Spectrum.from_xy_values(self.data[:, 0], z, f'{self.name} - baseline'),\
               Spectrum.from_xy_values(self.data[:, 0], y_orig, f'{self.name} - original data'),\
                Spectrum.from_xy_values(self.data[:, 0], w, f'{self.name} - weights')             # return the corrected data, baseline and the original data
 def dct_window(x, n_coef):
     y = dct(x, norm='ortho')
     window = np.zeros(x.shape[0])
     window[:min(n_coef, len(x))] = 1
     yr = idct(y * window, norm='ortho')
     return yr[-1]
def idct2(x):
    return spfft.idct(spfft.idct(x.T, norm='ortho', axis=0))
def rgb_to_gray(image):
    return np.dot(image[..., :3], [0.299, 0.587, 0.144])


Xorig = im.imread('download.jpeg')
print(rgb_to_gray(Xorig).shape)
X = spimg.zoom(rgb_to_gray(Xorig), 0.2)
ny, nx = X.shape

k = round(nx * ny * 0.5)  # 50% sample
ri = np.random.choice(nx * ny, k, replace=False)  # random sample of indices
b = X.T.flat[ri]
#b = np.expand_dims(b, axis=1)

# create dct matrix operator using kron (memory errors for large ny*nx)
A = np.kron(spfft.idct(np.identity(nx), norm='ortho', axis=0),
            spfft.idct(np.identity(ny), norm='ortho', axis=0))
A = A[ri, :]  # same as phi times kron

# do L1 optimization
vx = cvx.Variable(nx * ny)
objective = cvx.Minimize(cvx.norm(vx, 1))
constraints = [A * vx == b]
prob = cvx.Problem(objective, constraints)
result = prob.solve(verbose=True)
Xat2 = np.array(vx.value).squeeze()

Xat = Xat2.reshape(nx, ny).T  # stack columns
Xa = idct2(Xat)

mask = np.zeros(X.shape)
Пример #57
0
def idct2(array):
    return idct(idct(array, axis=0, norm='ortho'), axis=1, norm='ortho')
Пример #58
0
def dct_2d_reverse(block):
    block = end_T(block)
    block = idct(block, norm='ortho')
    block = end_T(block)
    block = idct(block, norm='ortho')
    return block
Пример #59
0
 def convolve(img, gaus_2d, hx, hy):
     dat = idct(idct(dct(dct(img, axis=0, norm = 'ortho'), axis=1, \
           norm='ortho') * gaus_2d, axis=1, norm='ortho'), axis=0, norm='ortho')[hx, hy]
     return dat
Пример #60
0
    print(fftpack.idst(ydst2, 2) / len(x))

    print("scipy dst III")
    print(fftpack.dst(ydst2, 3) / len(x))

    zidst2 = dst_type3(ydst2) / len(x)
    print("idst_type2")
    print(zidst2)

    zmyidst = myidst(ydst2)
    print("myidst")
    print(zmyidst)

    ydst2_ext = np.concatenate([ydst2[1:], [0]])
    zmyidst_ext = myidst_ext(ydst2_ext)
    print("myidst_ext")
    print(zmyidst_ext)

    print(fftpack.idct(np.flip(ydst2_ext, 0), 2) / 2)

    #expk = 0.5*np.exp(np.arange(N)*1j*np.pi*2/(4*N))
    #v = np.zeros_like(expk)
    #for k in range(N):
    #    if k == 0:
    #        v[k] = expk[k] * (-0 + 1j*ydst2_ext[k])
    #    else:
    #        v[k] = expk[k] * (-ydst2_ext[N-k] + 1j*ydst2_ext[k])
    #print(np.fft.ifft(v))

    pdb.set_trace()