def performance_test(offset, how_many, step=1):
    matrices = []

    for index in range(0, how_many*step, step):
        size = index + offset
        problem = np.random.randint(0, 255, size=(size, size))

        tic = time.perf_counter()
        dctn(problem, type=2, norm='ortho')
        toc = time.perf_counter()

        matrices.append({
            'size': size,
            'duration': float(toc - tic),
            'type': 'scipy'
        })

        print('[FFT] Matrix ({0},{0}) took {1:.4f}ms to complete.'.format(size, float(toc-tic)))

        tic = time.perf_counter()
        custom_dct.dct2(problem)
        toc = time.perf_counter()

        matrices.append({
            'size': size,
            'duration': float(toc - tic),
            'type': 'implemented'
        })

        print('[DCT] Matrix ({0},{0}) took {1:.4f}ms to complete.'.format(size, float(toc-tic)))

    return matrices
Пример #2
0
def dct2d_bb(x, shape=None):
    """
    Computes the band-by-band 2D Normalized DCT-II

    If the input X is a 3D data cube, the 2D dct will be computed for each 2D
    images staked along the 2nd axis.

    Arguments
    ---------
    X: (l, m*n) or (m, n, l) numpy array
        2D or 3D multi-band data.
        If the data has 3 dimensions, the last axis is for spetra.
        If the data is 2D, the first axis is for spectra.
    shape: optional, (m, n, l) tuple
        This is the data shape. This parameter is required only if input data
        are 2D.

    Returns
    -------
    (l, m*n) or (m, n, l) numpy array
        DCT coefficient matrix.
    """
    if x.ndim == 3:

        return fp.dctn(x, axes=(0, 1), norm='ortho')

    else:

        if shape is None:
            raise ValueError('shape parameter required for 2D data.')

        X = fp.dctn(x.T.reshape(shape), axes=(0, 1), norm='ortho')

        m, n, B = shape
        return X.reshape((m * n, B)).T
Пример #3
0
def transform_2d(u, type='E'):
    r'''2D discrete Chebyshev transform.

    Parameters
    ----------
    u : 2D array
        Values at the :math:`M+1 \times N+1` Chebyshev points.
    type : one of {'E', 'R'}
        Specifies whether the input field is on extremal ('E') or root ('R') points.

    Returns
    -------
    v : 2D array
        Chebyshev amplitudes.


    Notes
    -----

    The discrete Chebyshev transform of a function defined on extremal points is given by

    .. math::
        v_{kl} = \frac{4}{MN c_k c_j} \sum_{i=0}^M\sum_{j=0}^N \frac{u_{ij}}{c_ic_j}\cos\frac{k\pi i}{M}\cos\frac{l\pi j}{N}

    where :math:`c_k = 2` for :math:`k = 0,N` and :math:`c_k = 1` otherwise. The transform
    is implmented using the type I discrete cosine transform.

    '''
    if type.upper() == 'E':
        N = u.shape[0]-1
        M = u.shape[1]-1

        v = dctn(u, type=1, norm=None)

        # apply the scaling
        v     /= (M*N)
        v[ 0,:] /= 2
        v[-1,:] /= 2
        v[:, 0] /= 2
        v[:,-1] /= 2

    else:
        N = u.shape[0]
        M = u.shape[1]

        v = dctn(u, type=2, norm=None)
        # apply the scaling
        v     /= (M*N)
        v[ 0,:] /= 2
        v[:, 0] /= 2

    return v
Пример #4
0
 def test_dct2_3(self):
     image = np.array(io.imread('images/sample2048x2048.jpg'),
                      dtype=np.float)
     dct_image = dct2(image)
     blocks = skimage.util.view_as_blocks(image, (8, 8))
     blocks[:] = dctn(blocks, axes=(2, 3), norm='ortho')
     self.assertTrue(np.max(np.abs(image - dct_image)) < 1e-10)
Пример #5
0
def smooth_mask_and_dct(i,frame):
    (ny,nx) = frame.shape;
    mask_data=frame.copy();
    mean    = np.mean(mask_data);
    
    for i in range(ny):
        f  = np.ma.masked_array(mask_data[i],mask=np.logical_not(edges[i]));#full_mask[i]);
        f[0]  = mean;
        f[-1] = mean;
        xs = np.nonzero(f)[0];
        fi = sp.interpolate.interp1d(xs,f.compressed(),kind='linear');
    
        mask_xs = np.nonzero(full_mask[i])[0];        
        mask_data[i,mask_xs] = fi(mask_xs)

    for i in range(nx):
        f  = np.ma.masked_array(mask_data[:,i],mask=np.logical_not(edges[:,i]));#full_mask[i]);
        f[0]  = mean;
        f[-1] = mean;
        xs = np.nonzero(f)[0];
        fi = sp.interpolate.interp1d(xs,f.compressed(),kind='linear');
    
        mask_xs = np.nonzero(full_mask[:,i])[0];        
        mask_data[mask_xs,i] = (mask_data[mask_xs,i]+fi(mask_xs))/2;
        
    for i in range(5):
        mask_data[full_mask] = ndi.gaussian_filter(mask_data,sigma=22-4*i)[full_mask];
    mask_data[full_mask_plus] = ndi.gaussian_filter(mask_data,sigma=1)[full_mask_plus];

    return dctn(mask_data,type=2,shape=(nk1,nk2),norm='ortho',overwrite_x=True);
Пример #6
0
    def block_add_wm(self,block,index,i):
        
        i = i%(self.wm_shape[0]*self.wm_shape[1])

        wm_1 = self.wm_flatten[i]
        block_dct = dctn(block,norm='ortho')
        block_dct_flatten = block_dct.flatten().copy()
        
        block_dct_flatten = block_dct_flatten[index]
        block_dct_shuffled = block_dct_flatten.reshape(self.block_shape)
        U,s,V = np.linalg.svd(block_dct_shuffled)
        max_s = s[0]
        s[0] = (max_s-max_s%self.mod+3/4*self.mod) if wm_1>=128 else (max_s-max_s%self.mod+1/4*self.mod)
        if self.mod2:
            max_s = s[1]
            s[1] = (max_s-max_s%self.mod2+3/4*self.mod2) if wm_1>=128 else (max_s-max_s%self.mod2+1/4*self.mod2)
        # s[1] = (max_s-max_s%self.mod2+3/4*self.mod2) if wm_1<128 else (max_s-max_s%self.mod2+1/4*self.mod2)

        ###np.dot(U[:, :k], np.dot(np.diag(sigma[:k]),v[:k, :]))
        block_dct_shuffled = np.dot(U,np.dot(np.diag(s),V))

        block_dct_flatten = block_dct_shuffled.flatten()
   
        block_dct_flatten[index] = block_dct_flatten.copy()
        block_dct  = block_dct_flatten.reshape(self.block_shape)

        return idctn(block_dct,norm='ortho')
Пример #7
0
 def phash(self, image):
     r = self.__ndarray_for(image, size="32x32!").astype(np.float64)
     h = fft.dctn(r, norm="ortho")[0:8, 0:8]
     avg = np.average(h.reshape(64, )[1:])
     mask = (h <= avg)
     h = mask.reshape(64, ).dot(2**np.arange(mask.size)[::-1])
     return int(h)
Пример #8
0
    def prepare(self, img_stack=None):
        # Load the data
        if img_stack is not None:
            self._load_images(img_stack)
        elif self.input_type in ["directory", "files_list"]:
            self._load_images()
        elif self.input_type in ["images_stack", "images_list"]:
            self._load_images(self.img_stack)

        # Initialize the regularization parameters
        mean_value = self.img_stack_resized.mean(axis=0)
        mean_value /= mean_value.mean(
        )  # Normalized pixel-wise mean of all images
        mean_value_dct = dctn(mean_value, norm='ortho')

        if self.l_s is None:
            self.l_s = np.abs(mean_value_dct).sum() / 800.0
        if self.l_d is None:
            self.l_d = np.abs(mean_value_dct).sum() / 2000.0

        # Construct the measurement matrix
        self.img_sort = np.sort(self.img_stack_resized, axis=0)

        # Initialize the darkfield, flatfield, offset, and weights (for the L1 reweighted loss)
        self.offset = np.zeros([self.working_size] * 2)
        self.flatfield = np.ones([self.working_size] * 2)
        self.flatfield_fullsize = np.ones(self.image_shape)
        self.darkfield = np.random.randn(self.working_size, self.working_size)
        self.darkfield_fullsize = np.zeros(self.image_shape)
        self.W = np.ones_like(self.img_sort)

        # Initialize other parameters
        self.iteration = 0
        self.flag_reweigthing = True
Пример #9
0
def predict_from_trend_unscaled(training_Ftxx, cycle_length, pred_length):
    (nk1, nk2) = training_Ftxx.shape[1:]
    # For now
    training_Ftkk = dctn(training_Ftxx, norm='ortho', axes=[1,
                                                            2])[:, :nk1, :nk2]

    (Ftkk_detrended, bkk,
     Ckk) = separate_trends_unscaled(training_Ftkk, cycle_length)

    t0 = training_Ftxx.shape[0]
    # Where we start predicting from
    training_ts = np.arange(0, t0)
    # ts corresponding to training
    pred_ts = np.arange(t0, t0 + pred_length)

    trend_descriptors = (bkk, Ckk, cycle_length)
    f0kk = Ftkk_detrended[-1]
    # We start where the training data ends

    predicted_Ftkk = kspace_predict_from_trend_unscaled(
        f0kk, trend_descriptors, pred_ts)

    predicted_Ftxx = idctn(predicted_Ftkk, norm='ortho', axes=[1, 2])

    return predicted_Ftxx, predicted_Ftkk
Пример #10
0
def main(args):
    # get image data
    img = io.imread(args.image).astype('float32') / 255
    img = _img_reduce_constast(img, args)
    enc_msg = _get_hidden_image_string(args)
    # compute dct transform
    img_dct = dctn(img, axes=[0, 1], norm='ortho')
    # check encoded message length
    max_length = _max_msg_length(img_dct, enc_msg, args)
    assert len(enc_msg) < max_length,\
        'hidden image too large, needs to be resized under {}KB'.format(max_length >> 10)
    # encode computation
    _encode_length(img_dct, enc_msg)
    _encode_image(img_dct, enc_msg)
    # compute inverse dct transform
    img_ret = idctn(img_dct, axes=[0, 1], norm='ortho')
    if img_ret.min() < 0 or img_ret.max() > 1:
        print(
            'Warning: data encoded may not be perfectly recovered or not at all. Consider lowering reduce ratio'
        )
        img_ret = np.clip(img_ret, 0, 1)
    # quantization
    img_ret = (img_ret * QUANT_SCALE).astype('uint16')
    # save to disk
    assert args.output.endswith(
        '.tiff'), 'output image has to be in tiff format'
    io.imsave(args.output, img_ret)
Пример #11
0
    def opA(U):
        """Evaluates the measurement operator on U U'."""

        temp = np.zeros(m)
        if U.ndim == 1:
            for kk in range(k):
                temp[kk * n:(kk + 1) * n] += dctn(
                    (S[:, kk] * U).reshape(n_img,
                                           n_img), norm='ortho').reshape(n)**2
        else:
            for jj in range(U.shape[1]):
                for kk in range(k):
                    temp[kk * n:(kk + 1) * n] += dctn(
                        (S[:, kk] * U[:, jj]).reshape(n_img, n_img),
                        norm='ortho').reshape(n)**2
        return temp
Пример #12
0
    def dct_encoder(self, imSub_list, Q, blocksize=8, thresh=0.05):
        TransAll_list = []
        TransAllThresh_list = []
        TransAllQuant_list = []
        B = blocksize
        for idx, channel in enumerate(imSub_list):
            channelrows = channel.shape[0]
            channelcols = channel.shape[1]
            vis0 = np.zeros((channelrows, channelcols), np.float32)
            vis0[:channelrows, :channelcols] = channel
            vis0 = vis0 - 128  # before DCT the pixel values of all channels are shifted by -128
            blocks = self.blockshaped(vis0, B, B)
            # dct_blocks = fftpack.dct(fftpack.dct(blocks, axis=1, norm='ortho'), axis=2, norm='ortho')
            dct_blocks = fftpack.dctn(blocks, axes=(-2, -1), norm='ortho')
            thres_blocks = dct_blocks * \
                (abs(dct_blocks) > thresh * np.amax(dct_blocks, axis=(1,2))[:, np.newaxis, np.newaxis]) # need to broadcast
            # quant_blocks = np.round(thres_blocks / Q[idx])
            quant_blocks = np.round(thres_blocks)

            TransAll_list.append(
                self.unblockshaped(dct_blocks, channelrows, channelcols))
            TransAllThresh_list.append(
                self.unblockshaped(thres_blocks, channelrows, channelcols))
            TransAllQuant_list.append(
                self.unblockshaped(quant_blocks, channelrows, channelcols))
        return TransAll_list, TransAllThresh_list, TransAllQuant_list
Пример #13
0
    def dct_and_quant(self, img_path):
        """
        DCT transformation and quantify proccess
        :param img_path: path of the original img
        :return: quantified matrix
        """

        img = Image.open(img_path)
        img_arr = np.array(img)
        width, height = img_arr.shape[:2]

        quant_blocks = []
        for i in range(3):
            img_blocks = [
                np.hsplit(item, width // 8)
                for item in np.vsplit(img_arr[:, :, i], height // 8)
            ]
            dct_blocks = [[
                np.array(fftpack.dctn(block), dtype=int) for block in line
            ] for line in img_blocks]
            quant_tbl = (quant_tbl_1 if i == 0 else quant_tbl_2)
            quant_blocks.append(
                [[np.array(item / quant_tbl, dtype=int) for item in line]
                 for line in dct_blocks])
        return quant_blocks
Пример #14
0
def embed_bit(block, bit):
    patch = block.copy()
    coefs = dctn(patch)
    while not valid_coefficients(coefs, bit,
                                 P) or (bit != retrieve_bit(patch)):
        coefs = change_coefficients(coefs, bit)
        patch = double_to_byte(idctn(coefs) / (2 * n)**2)
    return patch
Пример #15
0
def get_all_lm_blocks(img, X, Y):
    patches = []
    for x, y in zip(X, Y):
        patch = get_landmark_block(img, x, y, 8)
        patch = FFT.dctn(patch)[:8, :8]
        patches.append(patch)
        patch_1d = np.array(patches).flatten()
    return patch_1d
Пример #16
0
def dctn_(nod_values):
    factor = 1
    for d in range(len(nod_values.shape)):
        factor = factor * (nod_values.shape[d] - 1)

    spectrum = dctn(nod_values, type=1) / (factor)

    return spectrum
Пример #17
0
 def dct_quantize(array, table):
     print("dct_quantize")
     row = 8
     col = 8
     dct = lambda x: dctn(x, norm='ortho')
     new_array = Encode.apply_fn(array - 128, row, col, dct)
     quantize = lambda x: Encode.quantize(x, table)
     return Encode.apply_fn(new_array, row, col, quantize)
Пример #18
0
def im2freq(data):
    """ Applies a discrete Fourier transform

    Args:
        f: the 2d image to be transformed
    Returns:
        the Fourier coefficients, as a matrix of the same size as f
    """
    return fp.dctn(data, norm='ortho')
Пример #19
0
        def mult_efficient(v):
            """Efficient matrix-vector multiplication for the adjoint of the measurement operator."""

            temp = np.zeros(n)
            for kk in range(k):
                temp += S[:, kk] * idctn((z[kk * n:(kk + 1) * n] * dctn(
                    (S[:, kk] * v.reshape(-1)).reshape(n_img, n_img),
                    norm='ortho').reshape(n)).reshape(n_img, n_img),
                                         norm='ortho').reshape(n)
            return temp
Пример #20
0
def idctn_trans_(spectrum):

    # spectrum = spectrum/(2**len(spectrum.shape))
    #spectrum   = multiply_both_dir(spectrum, 2,0)
    spectrum = multiply_both_sym(spectrum, 2)
    nod_values = dctn(spectrum, type=1)
    nod_values = nod_values / (2**len(spectrum.shape))
    # nod_values = multiply_both_dir(nod_values, 1/2,0)
    nod_values = multiply_both_sym(nod_values, 1 / 2)
    return nod_values
Пример #21
0
 def testDctn(self, shape, dtype, s, axes, norm):
     rng = jtu.rand_default(self.rng())
     args_maker = lambda: (rng(shape, dtype), )
     jnp_fn = lambda a: jsp_fft.dctn(a, s=s, axes=axes, norm=norm)
     np_fn = lambda a: osp_fft.dctn(a, shape=s, axes=axes, norm=norm)
     self._CheckAgainstNumpy(np_fn,
                             jnp_fn,
                             args_maker,
                             check_dtypes=False,
                             tol=1e-4)
     self._CompileAndCheck(jnp_fn, args_maker, atol=1e-4)
Пример #22
0
def nd_DCT(a):
    dim = np.size(np.shape(a))
    Ns = np.shape(a)

    factor = np.ones(1)
    for d in range(dim):
        factor = factor * (Ns[d] - 1)

    a_hat = dctn(a, type=1) / ((2**dim) * factor)

    return a_hat
Пример #23
0
def pickle_2_img_single(data_file):
    '''load data from pkl'''

    if not os.path.exists(data_file):
        print('file {0} not exists'.format(data_file))
        exit()
    with open(data_file, 'rb') as f:
        data = pickle.load(f)
    total_x1, total_y = [], []
    for i in range(len(data)):
        x1 = []
        x2 = []
        yl = []
        print(len(data[i]['img']))
        for j in range(len(data[i]['labels'])):
            img = data[i]['img'][j]
            img = FFT.dctn(img)
            img_neu = data[i]['img_neu'][j]
            img_neu = FFT.dctn(img_neu)
            diff = img - img_neu
            lms = data[i]['lms'][j]
            lms = np.array(lms)
            '''
             img = data[i]['img'][j]
             img_neu = data[i]['img_neu'][j]
             diff = img - img_neu
             diff = FFT.dctn(diff)
             '''
            label = int(data[i]['labels'][j])
            if label == 7:
                label = 2

            #label = dense_to_one_hot(label, 6)

            x1.append(lms)
            yl.append(label)

        total_x1.append(x1)
        total_y.append(yl)

    return total_x1, total_y
Пример #24
0
def partitioned_dct(img, K1, K2):
    # Get set of 8x8 patches of the image
    partioned = partion_img(img)
    # Set empty array of sizes K^2 per patch
    dct_patches = np.zeros((prange, prange, K1, K2), dtype=np.float64)
    for i in range(partioned.shape[0]):
        for j in range(partioned.shape[1]):
            dct_patches[i, j] = dctn(partioned[i, j],
                                     type=2,
                                     shape=[K1, K2],
                                     norm='ortho')
    return dct_patches
Пример #25
0
Файл: dct.py Проект: nmheim/esn
def dct2_sequence(Ftxx, ksize):
    """Discrete Cosine Transform of a sequence of 2D images.

    Params
    ------
    Ftxx : ndarray with shape (time, ydim, xdim)
    size : (nk1, nk2) determines how many DCT coefficents are kept

    Returns
    -------
    Ftkk: ndarray with shape (time, nk1, nk2)
    """
    Ftkk = dctn(Ftxx, norm='ortho', axes=[1, 2])[:, :ksize[0], :ksize[1]]
    return Ftkk
Пример #26
0
    def alter_freq(self):
        self.progress.setValue(0)
        self.progress.show()

        round_image = np.vectorize(self.round_image_)

        # Applicazione dct
        # d_img = dct(dct(self.img.T, norm='ortho').T, norm='ortho')
        d_img = dctn(self.img, norm='ortho')

        self.progress.setValue(25)

        # modifica frequenze
        for i, row in enumerate(d_img):
            self.progress.setValue(26 + (45 * i) / d_img.shape[0])
            for j, col in enumerate(row):
                if i + j >= self.d:
                    d_img[i, j] *= self.beta


        # Applicazione inversa dct e arrotondamento
        # i_img = round_image(idct(idct(d_img.T, norm='ortho').T, norm='ortho'))
        i_img = round_image(idctn(d_img, norm='ortho'))


        self.progress.setValue(self.progress.value() + 25)

        plt.figure(1)
        # Visualizzazione
        plt.subplot(122)
        plt.imshow(i_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255)
        plt.title('Immagine alterata')

        plt.subplot(121)
        plt.imshow(self.img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255)
        plt.title('Immagine originale')

        fig = plt.gcf()
        fig.canvas.manager.window.wm_geometry("+%d+%d" % (0, 0))
        manager = plt.get_current_fig_manager()
        manager.resize(*manager.window.maxsize())

        self.progress.setValue(self.progress.value() + 5)

        plt.figure(2)
        sub_img = cv2.absdiff(self.img, i_img.astype(float))
        plt.imshow(sub_img, cmap=plt.get_cmap('gray'), vmin=0, vmax=255)
        plt.title('Immagine differenza')

        plt.show()
Пример #27
0
def dctn_trans_(nod_values):
    factor = 1
    for d in range(len(nod_values.shape)):
        factor = factor * (nod_values.shape[d] - 1)

# nod_values = multiply_both_dir(nod_values,2,0)
    nod_values = multiply_both_sym(nod_values, 2)
    spectrum = dctn(nod_values, type=1)
    spectrum = spectrum / (factor)
    # spectrum = multiply_both_dir(spectrum, 1/2,0)
    spectrum = multiply_both_sym(spectrum, 1 / 2)
    # print(spectrum)

    return spectrum
Пример #28
0
def dct_compression(image, F, d):
    #compressed_image = image #copy to store the original image
    h = image.shape[0]
    print(h)
    w = image.shape[1]
    print(w)
    if (h % F != 0):
        h = int(h / F) * F
        print(h)
    if (w % F != 0):
        w = int(w / F) * F
        print(w)
    compressed_image = image[0:h, 0:w]
    print(h)
    print(w)
    # cycle the image in step of F
    for x in range(0, h, F):
        for y in range(0, w, F):
            cell = compressed_image[x:x + F, y:y +
                                    F]  # width of cell = F, height of cell = F
            #print("first cell:\n")
            #print(cell)
            cell = dctn(
                cell,
                norm='ortho')  # discrete cosine transform of the selected cell

            c_h = cell.shape[0]
            c_w = cell.shape[1]
            # delete the frequencies in the cell making reference to d parameter
            for i in range(0, c_h):
                for j in range(0, c_w):
                    if i + j >= d:
                        cell[i, j] = 0

            # compute the inverse dct of the cell
            cell = idctn(cell, norm='ortho')

            #round of ff at the nearest integer, put to 0 negative values, put to 255 bigger values
            for i in range(0, c_h):
                for j in range(0, c_w):
                    value = round(cell[i, j])
                    if value < 0:
                        value = 0
                    elif value > 255:
                        value = 255
                    cell[i, j] = value
            compressed_image[x:x + F, y:y + F] = cell

    return compressed_image
Пример #29
0
def encode_dct(orig, bx, by):
    new_shape = (
        orig.shape[0] // bx * bx,
        orig.shape[1] // by * by
    )
    new = orig[
        :new_shape[0],
        :new_shape[1]
    ].reshape((
        new_shape[0] // bx,
        bx,
        new_shape[1] // by,
        by
    ))
    return sfft.dctn(new, axes=[1,3], norm='ortho')
def test_dctn():
    """
    Verify that dcnt is correct
    """
    mat = np.array([
        [231, 32, 233, 161, 24, 71, 140, 245],
        [247, 40, 248, 245, 124, 204, 36, 107],
        [234, 202, 245, 167, 9, 217, 239, 173],
        [193, 190, 100, 167, 43, 180, 8, 70],
        [11, 24, 210, 177, 81, 243, 8, 112],
        [97, 195, 203, 47, 125, 114, 165, 181],
        [193, 70, 174, 167, 41, 30, 127, 245],
        [87, 149, 57, 192, 65, 129, 178, 228],
    ])
    print(dctn(mat, norm='ortho'))