Exemplo n.º 1
0
def test_data_reconstruction_delete_nodes_2d():
    x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
    wp = pywt.WaveletPacket2D(data=x, wavelet='db1', mode='sym')

    new_wp = pywt.WaveletPacket2D(data=None, wavelet='db1', mode='sym')
    new_wp['vh'] = wp['vh'].data
    new_wp['vv'] = wp['vh'].data
    new_wp['vd'] = np.zeros((2, 2), dtype=np.float64)
    new_wp['a'] = [[3.0, 7.0, 11.0, 15.0]] * 4
    new_wp['d'] = np.zeros((4, 4), dtype=np.float64)
    new_wp['h'] = wp['h']       # all zeros

    assert_allclose(new_wp.reconstruct(update=False),
                    np.array([[1.5, 1.5, 3.5, 3.5, 5.5, 5.5, 7.5, 7.5]] * 8),
                    rtol=1e-12)

    new_wp['va'] = wp['va'].data
    assert_allclose(new_wp.reconstruct(update=False), x, rtol=1e-12)

    del(new_wp['va'])
    new_wp['va'] = wp['va'].data
    assert_(new_wp.data is None)

    assert_allclose(new_wp.reconstruct(update=True), x, rtol=1e-12)
    assert_allclose(new_wp.data, x, rtol=1e-12)
Exemplo n.º 2
0
def filt(array, channel='a'):
    import pywt
    if array.ndim == 3:
        abc = []
        for k in range(3):
            wp = pywt.WaveletPacket2D(data=array[:, :, k],
                                      wavelet='db1',
                                      mode='symmetric')
            abc.append(wp[channel].data)
        return np.stack(abc, axis=2)
    else:
        wp = pywt.WaveletPacket2D(data=array, wavelet='db1', mode='symmetric')
        return wp[channel].data
Exemplo n.º 3
0
def wavelete_packet2D(img_name):
    img = np.array(Image.open(img_name).convert('L'))
    rows, cols = img.shape
    plt.figure(img_name)
    plt.imshow(img, cmap='gray')
    # plt.axis('off')
    plt.show()  # show the original image

    # use 2D wavelete db1 mode='symmetric'
    wp = pywt.WaveletPacket2D(data=img, wavelet='db2', mode='symmetric')

    # show a - LL, low-low coefficients
    plt.imshow((wp['a'].data), cmap='gray')
    plt.show()

    # show h - LH, low-high coefficients
    plt.imshow((wp['h'].data), cmap='gray')
    plt.show()

    # show v - HL, high-low coefficients
    plt.imshow((wp['v'].data), cmap='gray')
    plt.show()

    # show d - HH, high-high coefficients
    plt.imshow((wp['d'].data), cmap='gray')
    plt.show()
Exemplo n.º 4
0
def test_collecting_nodes_2d():
    x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
    wp = pywt.WaveletPacket2D(data=x, wavelet='db1', mode='sym')

    assert_(len(wp.get_level(0)) == 1)
    assert_(wp.get_level(0)[0].path == '')

    # First level
    assert_(len(wp.get_level(1)) == 4)
    assert_([node.path for node in wp.get_level(1)] == ['a', 'h', 'v', 'd'])

    # Second level
    assert_(len(wp.get_level(2)) == 16)
    paths = [node.path for node in wp.get_level(2)]
    expected_paths = ['aa', 'ah', 'av', 'ad', 'ha', 'hh', 'hv', 'hd', 'va',
                      'vh', 'vv', 'vd', 'da', 'dh', 'dv', 'dd']
    assert_(paths == expected_paths)

    # Third level.
    assert_(len(wp.get_level(3)) == 64)
    paths = [node.path for node in wp.get_level(3)]
    expected_paths = ['aaa', 'aah', 'aav', 'aad', 'aha', 'ahh', 'ahv', 'ahd',
                      'ava', 'avh', 'avv', 'avd', 'ada', 'adh', 'adv', 'add',
                      'haa', 'hah', 'hav', 'had', 'hha', 'hhh', 'hhv', 'hhd',
                      'hva', 'hvh', 'hvv', 'hvd', 'hda', 'hdh', 'hdv', 'hdd',
                      'vaa', 'vah', 'vav', 'vad', 'vha', 'vhh', 'vhv', 'vhd',
                      'vva', 'vvh', 'vvv', 'vvd', 'vda', 'vdh', 'vdv', 'vdd',
                      'daa', 'dah', 'dav', 'dad', 'dha', 'dhh', 'dhv', 'dhd',
                      'dva', 'dvh', 'dvv', 'dvd', 'dda', 'ddh', 'ddv', 'ddd']

    assert_(paths == expected_paths)
Exemplo n.º 5
0
def getWaveletComplexShiftData(img, wf='rbio3.1', mo='periodization', log=False, Type=0):
    '''input:
    img: image data should in range [0,1] with shape [2x,2y]
    wf: wavelet family, default is rbio3.1
    mo: mode of wavelet family, default is periodization
    log: whether return the complex data for logging
    Type: 0 performs waveletPacket2D first, then performs fft, output with shape [x, y, 2]; 
            1 only performs fft, output with shape [2x, 2y, 2].
            3 perform the logarithm of absolute fft, output with shape [2x, 2y]

    return:
    return wavelet complex shift data with m x n x 2 shape, where the wavelet data is the sum of 'v' 'h' 'd' components.
    '''
    #wc=np.fft.fft2(img) without wavelet decomposing
    if Type==3:
        wc=np.fft.fft2(img)# without wavelet decomposing
        wcs=np.fft.fftshift(wc)
        wcs=np.log(np.abs(wcs))
        twcs=wcs.real
        twcs=np.reshape(twcs, [twcs.shape[0], twcs.shape[1], 1])
    if Type==0:
        wave=pywt.WaveletPacket2D(data=img, wavelet=wf, mode=mo)
        wst=np.add(np.add(wave['h'].data, wave['v'].data), wave['d'].data)
        wc=np.fft.fft2(wst)
        wcs=np.fft.fftshift(wc)
        twcs=np.stack((wcs.real, wcs.imag), axis=2)
    elif Type==1:
        wc=np.fft.fft2(img)# without wavelet decomposing
        wcs=np.fft.fftshift(wc)
        twcs=np.stack((wcs.real, wcs.imag), axis=2)
    if log:
        return (twcs, np.hstack((wcs.real, wcs.imag)))
    return (twcs, None)
Exemplo n.º 6
0
def attack_rotate_v2(angle, orgimgspatial, Xdelta):
    noisyspatial = rotate(orgimgspatial, angle)
    wp = pywt.WaveletPacket2D(data=noisyspatial, wavelet='db4')
    noisyimg = wp['aaa'].data  # Attacked NOT WMked Image
    noisyimgwm = noisyimg + np.reshape(Xdelta, noisyimg.shape,
                                       order='F')  # Attacked WMked Image
    return noisyimgwm, noisyimg
Exemplo n.º 7
0
def procChanPacket(img, wavelet, nLevels):
    wt = pywt.WaveletPacket2D(img, wavelet=wavelet, maxlevel=nLevels)
    fv = []    
    wts = wt.decompose()
    for item in wts:
        item.walk(lambda node: processPacketNode(node=node, fv=fv))
    
    return fv
Exemplo n.º 8
0
def _compare_trees2(
    wavelet_str: str,
    max_lev: int,
    pywt_boundary: str = "zero",
    ptwt_boundary: str = "zero",
    height: int = 256,
    width: int = 256,
    batch_size: int = 1,
    transform_mode: bool = False,
    multiple_transforms: bool = False,
):

    face = misc.face()[:height, :width]
    face = np.mean(face, axis=-1).astype(np.float64)
    wavelet = pywt.Wavelet(wavelet_str)
    batch_list = []
    for _ in range(batch_size):
        wp_tree = pywt.WaveletPacket2D(
            data=face,
            wavelet=wavelet,
            mode=pywt_boundary,
            maxlevel=max_lev,
        )
        # Get the full decomposition
        wp_keys = list(product(["a", "h", "v", "d"], repeat=wp_tree.maxlevel))
        np_packets = []
        for node in wp_keys:
            np_packet = wp_tree["".join(node)].data
            np_packets.append(np_packet)
        np_packets = np.stack(np_packets, 0)
        batch_list.append(np_packets)
    batch_np_packets = np.stack(batch_list, 0)

    # get the PyTorch decomposition
    pt_data = torch.stack([torch.from_numpy(face)] * batch_size, 0)

    if transform_mode:
        ptwt_wp_tree = WaveletPacket2D(None,
                                       wavelet=wavelet,
                                       mode=ptwt_boundary).transform(
                                           pt_data, max_level=max_lev)
    else:
        ptwt_wp_tree = WaveletPacket2D(pt_data,
                                       wavelet=wavelet,
                                       mode=ptwt_boundary,
                                       max_level=max_lev)

    # if multiple_transform flag is set, recalculcate the packets
    if multiple_transforms:
        ptwt_wp_tree.transform(pt_data, max_level=max_lev)

    packets = []
    for node in wp_keys:
        packet = ptwt_wp_tree["".join(node)]
        packets.append(packet)
    packets_pt = torch.stack(packets, 1).numpy()
    assert wp_tree.maxlevel == ptwt_wp_tree.max_level
    assert np.allclose(packets_pt, batch_np_packets)
def sp_project(image,
               weights,
               wavelet='bior4.4',
               mode='periodization',
               max_lev=1,
               rho=0.02):
    """
	Projects weights onto top rho% of the support of image (in the wavelet basis).

	:param image: Should be in the range [-1, 1] and of shape [num_features] where num_features is a perfect square.
	:param weights: Should be of shape [num_features].
    :param rho: Sparsity level, in the range [0, 1].
    :param wavelet: Wavelet to use in the transform. See https://pywavelets.readthedocs.io/ for more details.
    :param mode: Signal extension mode. See https://pywavelets.readthedocs.io/ for more details.
    :param max_lev: Maximum allowed level of decomposition.
    """
    num_features = image.shape[0]
    num_features_per_dim = np.int(np.sqrt(num_features))
    image = 0.5 * image.reshape([num_features_per_dim, num_features_per_dim
                                 ]) + 0.5
    wp = pywt.WaveletPacket2D(image, wavelet, mode, max_lev)
    paths = [node.path for node in wp.get_level(max_lev)]
    m = wp[paths[0]].data.shape[0]
    l = (4**max_lev) * m * m
    k = np.floor(rho * l).astype('int')
    n = l - k
    coeffs = np.zeros(l)
    for j in range(4**max_lev):
        coeffs[j * m * m:(j + 1) * m * m] = wp[paths[j]].data.flatten()
    indices = np.argpartition(np.abs(coeffs), n)[:n]

    wp_w = pywt.WaveletPacket2D(
        weights.reshape(num_features_per_dim, num_features_per_dim), wavelet,
        mode, max_lev)
    paths_w = [node.path for node in wp_w.get_level(max_lev)]
    coeffs_w = np.zeros(l)
    for j in range(4**max_lev):
        coeffs_w[j * m * m:(j + 1) * m * m] = wp_w[paths_w[j]].data.flatten()
    coeffs_w_proj = coeffs_w.copy()
    coeffs_w_proj[indices] = 0
    for i2 in range(4**max_lev):
        wp_w[paths_w[i2]].data = coeffs_w_proj[i2 * m * m:(i2 + 1) * m *
                                               m].reshape([m, m])
    weights_proj = wp_w.reconstruct(update=False).astype('float32').flatten()
    return weights_proj
Exemplo n.º 10
0
def test_2d_roundtrip():
    # test case corresponding to PyWavelets issue 447
    original = pywt.data.camera()
    wp = pywt.WaveletPacket2D(data=original,
                              wavelet='db3',
                              mode='smooth',
                              maxlevel=3)
    r = wp.reconstruct()
    assert_allclose(original, r, atol=1e-12, rtol=1e-12)
Exemplo n.º 11
0
def emg_dwpt2d(signal, wavelet_name='db1'):
    wavelet_level = 3
    wp = pywt.WaveletPacket2D(signal, wavelet_name, mode='sym')
    coeffs = []
    level_coeff = wp.get_level(wavelet_level)
    for i in range(len(level_coeff)):
        coeffs.append(level_coeff[i].data.flatten())
    coeffs = np.hstack(coeffs)
    #    coeffs = coeffs.flatten()
    return coeffs
def aa():
    #  img = eng.read_img(LEAN_IMG)
    #  img_array = np.array([list(ii) for ii in img])
    #  pkl.dump(img_array, open(PKL_IMG_ARRAY, 'wb'))
    img_array = pkl.load(open(PKL_IMG_ARRAY, 'rb'))
    wp = pywt.WaveletPacket2D(data=img_array, wavelet='bior4.4', maxlevel=9)

    #  py_dwt = pywt.wavedec2(img_array, 'bior4.4')

    return wp
Exemplo n.º 13
0
def test_lazy_evaluation_2D():
    # Note: internal implementation detail not to be relied on.  Testing for
    # now for backwards compatibility, but this test may be broken in needed.
    x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8)
    wp = pywt.WaveletPacket2D(data=x, wavelet='db1', mode='sym')

    assert_(wp.a is None)
    assert_allclose(wp['a'].data, np.array([[3., 7., 11., 15.]] * 4),
                    rtol=1e-12)
    assert_allclose(wp.a.data, np.array([[3., 7., 11., 15.]] * 4), rtol=1e-12)
    assert_allclose(wp.d.data, np.zeros((4, 4)), rtol=1e-12, atol=1e-12)
Exemplo n.º 14
0
def get_WaveletDataC3(img, wf='rbio3.1', mo='periodization'):
    '''input:
    img: image data should in range [0,1] with shape [2x,2y]
    wf: wavelet family, default is rbio3.1
    mo: mode of wavelet family, default is periodization
    
    return:
    return wavelet data with m x n x 3 shape, where the wavelet data is the stacks of 'v' 'h' 'd' components.
    '''
    wave=pywt.WaveletPacket2D(data=img, wavelet=wf, mode=mo)
    wst=np.stack([wave['h'].data,wave['v'].data,wave['d'].data], axis=-1)
    return wst
Exemplo n.º 15
0
def init_dwt(resume=None, shape=None, wave=None, colors=None):
    size = None
    wp_fake = pywt.WaveletPacket2D(data=np.zeros(shape[2:]),
                                   wavelet='db1',
                                   mode='symmetric')
    xfm = DWTForward(J=wp_fake.maxlevel, wave=wave, mode='symmetric').cuda()
    # xfm = DTCWTForward(J=lvl, biort='near_sym_b', qshift='qshift_b').cuda() # 4x more params, biort ['antonini','legall','near_sym_a','near_sym_b']
    ifm = DWTInverse(wave=wave,
                     mode='symmetric').cuda()  # symmetric zero periodization
    # ifm = DTCWTInverse(biort='near_sym_b', qshift='qshift_b').cuda() # 4x more params, biort ['antonini','legall','near_sym_a','near_sym_b']
    if resume is None:  # random init
        Yl_in, Yh_in = xfm(torch.zeros(shape).cuda())
        Ys = [torch.randn(*Y.shape).cuda() for Y in [Yl_in, *Yh_in]]
    elif isinstance(resume, str):
        if os.path.isfile(resume):
            if os.path.splitext(resume)[1].lower()[1:] in [
                    'jpg', 'png', 'tif', 'bmp'
            ]:
                img_in = imread(resume)
                Ys = img2dwt(img_in, wave=wave, colors=colors)
                print(' loaded image', resume, img_in.shape, 'level',
                      len(Ys) - 1)
                size = img_in.shape[:2]
                wp_fake = pywt.WaveletPacket2D(data=np.zeros(size),
                                               wavelet='db1',
                                               mode='symmetric')
                xfm = DWTForward(J=wp_fake.maxlevel,
                                 wave=wave,
                                 mode='symmetric').cuda()
            else:
                Ys = torch.load(resume)
                Ys = [y.detach().cuda() for y in Ys]
        else:
            print(' Snapshot not found:', resume)
            exit()
    else:
        Ys = [y.cuda() for y in resume]
    # print('level', len(Ys)-1, 'low freq', Ys[0].cpu().numpy().shape)
    return Ys, xfm, ifm, size
Exemplo n.º 16
0
def enhance_puncta(img, level=7):
    """
    Removing low frequency wavelet signals to enhance puncta.
    Dependent on image size, try level 6~8.
    """
    if level == 0:
        return img
    wp = pywt.WaveletPacket2D(data=img, wavelet='haar', mode='sym')
    back = resize(
        np.array(wp['d' * level].data), img.shape, order=3,
        mode='reflect') / (2**level)
    cimg = img - back
    cimg[cimg < 0] = 0
    return cimg
Exemplo n.º 17
0
def test_accessing_node_atributes_2d():
    x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
    wp = pywt.WaveletPacket2D(data=x, wavelet='db1', mode='sym')

    assert_allclose(wp['av'].data, np.zeros((2, 2)) - 4, rtol=1e-12)
    assert_(wp['av'].path == 'av')
    assert_(wp['av'].node_name == 'v')
    assert_(wp['av'].parent.path == 'a')

    assert_allclose(wp['av'].parent.data, np.array([[3., 7., 11., 15.]] * 4),
                    rtol=1e-12)
    assert_(wp['av'].level == 2)
    assert_(wp['av'].maxlevel == 3)
    assert_(wp['av'].mode == 'sym')
Exemplo n.º 18
0
def img2dwt(img_in, wave='coif2', sharp=0.3, colors=1.):
    image_t = un_rgb(img_in, colors=colors)
    with torch.no_grad():
        wp_fake = pywt.WaveletPacket2D(data=np.zeros(image_t.shape[2:]),
                                       wavelet='db1',
                                       mode='zero')
        lvl = wp_fake.maxlevel
        # print(image_t.shape, lvl)
        xfm = DWTForward(J=lvl, wave=wave, mode='symmetric').cuda()
        Yl_in, Yh_in = xfm(image_t.cuda())
        Ys = [Yl_in, *Yh_in]
    scale = dwt_scale(Ys, sharp)
    for i in range(len(Ys) - 1):
        Ys[i + 1] /= scale[i]
    return Ys
Exemplo n.º 19
0
def getWaveletData(img, wf='rbio3.1', mo='periodization'):
    '''input:
    img: image data should in range [0,1] with shape [2x,2y]
    wf: wavelet family, default is rbio3.1
    mo: mode of wavelet family, default is periodization
    log: whether return the complex data for logging
    
    return:
    return wavelet data with m x n shape, where the wavelet data is the sum of 'v' 'h' 'd' components.
    '''
    #wc=np.fft.fft2(img) without wavelet decomposing
    wave=pywt.WaveletPacket2D(data=img, wavelet=wf, mode=mo)
    wst=np.add(np.add(wave['h'].data, wave['v'].data), wave['d'].data)
    wst=np.reshape(wst, [wst.shape[0], wst.shape[1], 1])
    return wst
Exemplo n.º 20
0
def localfeature(seg):
    ll2 = []
    lh1 = []
    hl1 = []
    hh1 = []
    dwvt = []
    ht = []
    vt = []

    for i in range(16):
        wp = pywt.WaveletPacket2D(data=seg[i], wavelet='haar', mode='sym')
        lh1.append(wp['v'].data)
        hl1.append(wp['h'].data)
        hh1.append(wp['d'].data)
        level1 = np.hstack((np.vstack((wp['aa'].data, wp['vv'].data)),
                            np.vstack((wp['hh'].data, wp['dd'].data))))
        level2 = np.hstack((np.vstack((wp['aaa'].data, wp['vvv'].data)),
                            np.vstack((wp['hhh'].data, wp['ddd'].data))))
        level3 = np.hstack((np.vstack((wp['aaaa'].data, wp['vvvv'].data)),
                            np.vstack((wp['hhhh'].data, wp['dddd'].data))))
        level4 = np.hstack((np.vstack((wp['aaaaa'].data, wp['vvvvv'].data)),
                            np.vstack((wp['hhhhh'].data, wp['ddddd'].data))))
        level3[:8, :8] = level4
        level2[:16, :16] = level3
        level1[:32, :32] = level2
        ll2.append(level1)
        vt.append(np.vstack((ll2[i], lh1[i])))
        ht.append(np.vstack((hl1[i], hh1[i])))
        dwvt.append(np.hstack((vt[i], ht[i])))
    s1 = []
    s2 = []
    s3 = []
    s4 = []
    subvector = []
    vector = []
    for i in range(16):
        s1.append(np.linalg.svd(ll2[i], compute_uv=False))
        s2.append(np.linalg.svd(hl1[i], compute_uv=False))
        s3.append(np.linalg.svd(lh1[i], compute_uv=False))
        s4.append(np.linalg.svd(hh1[i], compute_uv=False))
        subvector.append(
            np.vstack((np.vstack((s1[i], s2[i])), np.vstack((s3[i], s4[i])))))
        vector.append(subvector[i])
        vector1 = np.concatenate(vector, axis=0)
        vector1 = np.array(vector1, dtype=int)
    return vector1
Exemplo n.º 21
0
def test_wavelet_packet_dtypes():
    shape = (16, 16)
    for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
        x = np.random.randn(*shape).astype(dtype)
        if np.iscomplexobj(x):
            x = x + 1j * np.random.randn(*shape).astype(x.real.dtype)
        wp = pywt.WaveletPacket2D(data=x, wavelet='db1', mode='symmetric')
        # no unnecessary copy made
        assert_(wp.data is x)

        # assiging to a node should not change supported dtypes
        wp['d'] = wp['d'].data
        assert_equal(wp['d'].data.dtype, x.dtype)

        # full decomposition
        wp.get_level(wp.maxlevel)

        # reconstruction from coefficients should preserve dtype
        r = wp.reconstruct(False)
        assert_equal(r.dtype, x.dtype)
        assert_allclose(r, x, atol=1e-5, rtol=1e-5)
Exemplo n.º 22
0
def test_freq_order(level, wavelet_str, pywt_boundary):
    """Test the packets in frequency order."""
    face = misc.face()
    wavelet = pywt.Wavelet(wavelet_str)
    wp_tree = pywt.WaveletPacket2D(
        data=np.mean(face, axis=-1).astype(np.float64),
        wavelet=wavelet,
        mode=pywt_boundary,
    )
    # Get the full decomposition
    freq_tree = wp_tree.get_level(level, "freq")
    freq_order = get_freq_order(level)

    for order_list, tree_list in zip(freq_tree, freq_order):
        for order_el, tree_el in zip(order_list, tree_list):
            print(
                level,
                order_el.path,
                "".join(tree_el),
                order_el.path == "".join(tree_el),
            )
            assert order_el.path == "".join(tree_el)
def sp_frontend(images,
                rho=0.02,
                wavelet='bior4.4',
                mode='periodization',
                max_lev=1):
    """
    Sparsifies input in the wavelet basis (using the PyWavelets package) and returns reconstruction.

    :param images: Should be in the range [-1, 1] and of shape [num_samples, num_features] where num_features is a perfect square.
    :param rho: Sparsity level, in the range [0, 1].
    :param wavelet: Wavelet to use in the transform. See https://pywavelets.readthedocs.io/ for more details.
    :param mode: Signal extension mode. See https://pywavelets.readthedocs.io/ for more details.
    :param max_lev: Maximum allowed level of decomposition.
    """
    num_samples = images.shape[0]
    num_features = images.shape[1]
    num_features_per_dim = np.int(np.sqrt(num_features))
    images_sp = images.copy()
    for i in range(num_samples):
        image = 0.5 * images[i, :].reshape(num_features_per_dim,
                                           num_features_per_dim) + 0.5
        wp = pywt.WaveletPacket2D(image, wavelet, mode, max_lev)
        paths = [node.path for node in wp.get_level(max_lev)]
        m = wp[paths[0]].data.shape[0]
        l = (4**max_lev) * m * m
        k = np.floor(rho * l).astype('int')
        n = l - k
        coeffs = np.zeros(l)
        for j in range(4**max_lev):
            coeffs[j * m * m:(j + 1) * m * m] = wp[paths[j]].data.flatten()
        indices = np.argpartition(np.abs(coeffs), n)[:n]
        coeffs[indices] = 0
        for j in range(4**max_lev):
            wp[paths[j]].data = coeffs[j * m * m:(j + 1) * m * m].reshape(
                [m, m])
        image_r = wp.reconstruct(update=False).astype('float32')
        image_r = 2.0 * np.clip(image_r, 0.0, 1.0) - 1.0
        images_sp[i, :] = image_r.flatten()
    return images_sp
Exemplo n.º 24
0
def test_traversing_tree_2d():
    x = np.array([[1, 2, 3, 4, 5, 6, 7, 8]] * 8, dtype=np.float64)
    wp = pywt.WaveletPacket2D(data=x, wavelet='db1', mode='sym')

    assert_(np.all(wp.data == x))
    assert_(wp.path == '')
    assert_(wp.level == 0)
    assert_(wp.maxlevel == 3)

    assert_allclose(wp['a'].data, np.array([[3., 7., 11., 15.]] * 4),
                    rtol=1e-12)
    assert_allclose(wp['h'].data, np.zeros((4, 4)), rtol=1e-12, atol=1e-14)
    assert_allclose(wp['v'].data, -np.ones((4, 4)), rtol=1e-12, atol=1e-14)
    assert_allclose(wp['d'].data, np.zeros((4, 4)), rtol=1e-12, atol=1e-14)

    assert_allclose(wp['aa'].data, np.array([[10., 26.]] * 2), rtol=1e-12)

    assert_(wp['a']['a'].data is wp['aa'].data)
    assert_allclose(wp['aaa'].data, np.array([[36.]]), rtol=1e-12)

    assert_raises(IndexError, lambda: wp['aaaa'])
    assert_raises(ValueError, lambda: wp['f'])
Exemplo n.º 25
0
import cv2
import pywt
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import os

dir = os.path.dirname(__file__)
foldername = os.path.join(dir, 'Images')
os.chdir(foldername)

image = cv2.imread('BPF.jpg')
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
cv2.imwrite('gray_image.png', gray_image)
x = np.asarray(Image.open("gray_image.png").convert("L"))
wp = pywt.WaveletPacket2D(data=x, wavelet='db1', mode='sym')
plt.title("Original Image")
plt.imshow(wp.data, plt.cm.gray)  # plot original image
plt.show()

limith = 25
limitv = 20
limitd = 20

zh = wp['h'].data
zh[zh < limith] = 0.0
plt.title("Horizontal")
plt.imshow(zh, plt.cm.gray)  # plot horizontal decomposition of image
plt.show()

zv = wp['v'].data
Exemplo n.º 26
0
def tentDetection_wt_mm(strInputFile,
                        maxTentArea,
                        strOutputFile,
                        strShape='box',
                        iThresh_coeff=0):
    import pywt
    import pymorph as pymm

    objImg = osgeo.gdal.Open(strInputFile, GA_ReadOnly)
    nRasterCount = objImg.RasterCount
    poDataset = objImg.ReadAsArray().astype(np.float)
    geotransform = objImg.GetGeoTransform()
    pixelWidth = np.fabs(geotransform[1])
    pixelHeight = np.fabs(geotransform[5])
    resolution = pixelWidth * pixelHeight
    # NoDataValue = objImg.GetRasterBand(1).GetNoDataValue()

    # gray scale image
    if (nRasterCount == 1):
        objnImg = pymm.to_int32(poDataset)
    # RGB image
    elif (nRasterCount == 3):
        objnImg = pymm.to_gray(poDataset)
    else:
        print 'it only supports gray-scale or RGB image'
        sys.exit(1)

    # determine the structure element
    iNum = int(np.sqrt(maxTentArea) / resolution) + 1
    if (strShape == 'box'):
        objStructureElement = pymm.sebox(iNum)
    elif (strShape == 'cross'):
        objStructureElement = pymm.secross(iNum)
    else:
        objStructureElement = pymm.sedisk(iNum)

    # decomposition until 1 level
    wp = pywt.WaveletPacket2D(data=objnImg,
                              wavelet='db4',
                              mode='sym',
                              maxlevel=1)
    # iMaxLevel = wp.maxlevel()
    # top-hat
    wp['h'].data = pymm.openrecth(pymm.to_int32(wp['h'].data),
                                  objStructureElement, objStructureElement)
    wp['v'].data = pymm.openrecth(pymm.to_int32(wp['v'].data),
                                  objStructureElement, objStructureElement)
    wp['d'].data = pymm.openrecth(pymm.to_int32(wp['d'].data),
                                  objStructureElement, objStructureElement)
    wp['a'].data = 0.5 * wp['a'].data
    # reconstruction
    wp.reconstruct(update=True)

    # top-hat for reconstructed image
    objtophat = pymm.openrecth(pymm.to_int32(wp.data), objStructureElement,
                               objStructureElement)

    # y = mean + k*std
    (minValue, maxValue, meanValue,
     stdValue) = objImg.GetRasterBand(1).GetStatistics(0, 1)

    if (nRasterCount == 3):
        (minValue2, maxValue2, meanValue2,
         stdValue2) = objImg.GetRasterBand(2).GetStatistics(0, 1)
        (minValue3, maxValue3, meanValue3,
         stdValue3) = objImg.GetRasterBand(3).GetStatistics(0, 1)
        meanValue = 0.2989 * meanValue + 0.5870 * meanValue2 + 0.1140 * meanValue3
        maxValue = 0.2989 * maxValue + 0.5870 * maxValue2 + 0.1140 * maxValue3

    # meanValue = 438
    # maxValue = 2047

    threshad = meanValue + iThresh_coeff * stdValue

    objTent = pymm.threshad(objtophat, stdValue, maxValue)

    data_list = []
    data_list.append(objTent)

    WriteOutputImage(strOutputFile, 1, data_list, 0, 0, 0, strInputFile)
Exemplo n.º 27
0
def GetFeatures(input_img):
    #input_img_name = "../images/10045.jpg"
    features = {}

    I_rgb_in = input_img  #cv2.imread(input_img_name)[:,:,::-1]
    scaler = np.min([800.0 / I_rgb_in.shape[0], 800.0 / I_rgb_in.shape[1]])
    I_rgb = cv2.resize(I_rgb_in, (np.int(
        scaler * I_rgb_in.shape[1]), np.int(scaler * I_rgb_in.shape[0])),
                       interpolation=cv.CV_INTER_AREA)
    print I_rgb_in.shape, "->", I_rgb.shape
    #assert isinstance(I_rgb, np.ndarray), "Filename %s is not valid" % input_img_name

    I_rgb = remove_border(I_rgb)
    I_hsv = cv2.cvtColor(I_rgb, cv.CV_RGB2HSV)
    I_h = I_hsv[:, :, 0]
    I_h_rad = I_h.flatten() * np.pi / 180.0
    I_s = I_hsv[:, :, 1]
    features['isGray'] = np.all(I_s == 0)
    I_v = I_hsv[:, :, 2]
    I_g = cv2.cvtColor(I_rgb, cv.CV_RGB2GRAY)

    nrows = I_rgb.shape[0]
    ncols = I_rgb.shape[1]

    rows1thrd = np.int(np.floor(nrows * 1.0 / 3.0))
    rows2thrd = np.int(np.floor(nrows * 2.0 / 3.0))
    cols1thrd = np.int(np.floor(ncols * 1.0 / 3.0))
    cols2thrd = np.int(np.floor(ncols * 2.0 / 3.0))

    rows1thrd_o = np.int(rows1thrd - np.floor(nrows / 20.0))
    rows2thrd_o = np.int(rows2thrd + np.floor(nrows / 20.0))
    cols1thrd_o = np.int(cols1thrd - np.floor(ncols / 20.0))
    cols2thrd_o = np.int(cols2thrd + np.floor(ncols / 20.0))

    rows1thrd_i = np.int(rows1thrd + np.floor(nrows / 20.0))
    rows2thrd_i = np.int(rows2thrd - np.floor(nrows / 20.0))
    cols1thrd_i = np.int(cols1thrd + np.floor(ncols / 20.0))
    cols2thrd_i = np.int(cols2thrd - np.floor(ncols / 20.0))

    I_zRoT = np.zeros_like(I_g)
    I_zRoT[rows1thrd_o:rows2thrd_o, cols1thrd_o:cols2thrd_o] = 1
    I_zRoT[rows1thrd_i:rows2thrd_i, cols1thrd_i:cols2thrd_i] = 0

    sm = pySaliencyMap.pySaliencyMap(ncols, nrows)
    saliencymap = sm.SMGetSM(I_rgb)
    features['Salience_mu'] = np.mean(saliencymap)  # 0-1
    features['Salience_med'] = np.median(saliencymap)  # 0-1
    features['Salience_var'] = np.var(saliencymap)  # 0-1

    features['SubjLighting_Hue'] = np.log(
        circstat.mean(I_h[saliencymap >= 0.2] * np.pi / 180.0) /
        circstat.mean(I_h * np.pi / 180.0))
    features['SubjLighting_Saturation'] = np.log(
        np.mean(I_s[saliencymap >= 0.2]) / np.mean(I_s))
    features['SubjLighting_Value'] = np.log(
        np.mean(I_v[saliencymap >= 0.2]) / np.mean(I_v))

    features['Blurriness'] = BlurDetection.blur_detector(I_rgb)[1]
    features['ComplementaryColorIndex'] = np.abs(
        np.exp(2 * I_h_rad * 1j).sum() / len(I_h_rad))

    # dutta f4
    features['Hue_mu'] = circstat.mean(I_h_rad) * 180.0 / np.pi
    features['Hue_var'] = circstat.var(I_h_rad) * 180.0 / np.pi

    # dutta f3
    features['Saturation_mu'] = np.mean(I_s) / 255.0
    features['Saturation_var'] = np.var(I_s / 255.0)

    # dutta f1
    features['Value_mu'] = np.mean(I_v) / 255.0
    features['Value_var'] = np.var(I_v / 255.0)

    # dutta f2
    features['Colorfulness'] = emd.getColorfulness(I_rgb, 8)

    # dutta f5 - circularized
    features['Rule_of_Thirds_Hue'] = circstat.mean(
        I_h[rows1thrd:rows2thrd, cols1thrd:cols2thrd] * np.pi /
        180.0) * 180.0 / np.pi

    # dutta f6
    features['Rule_of_Thirds_Saturation'] = np.mean(
        I_s[rows1thrd:rows2thrd, cols1thrd:cols2thrd] / 255.0)

    # dutta f7
    features['Rule_of_Thirds_Value'] = np.mean(
        I_v[rows1thrd:rows2thrd, cols1thrd:cols2thrd] / 255.0)

    features['Rule_of_Thirds_Salience'] = np.sum(
        saliencymap[I_zRoT == 1]) / np.sum(I_zRoT)

    (maskr, maskc) = np.where(I_zRoT)
    (maxsr, maxsc) = np.where(saliencymap == np.max(saliencymap))
    features['Rule_of_Thirds_Distance'] = np.min([
        np.sqrt(((maxsc[0] - maskc[i]) / np.float(ncols))**2 +
                ((maxsr[0] - maskr[i]) / np.float(nrows))**2)
        for i in range(len(maskr))
    ]) / np.sqrt(2)

    # dutta f8/f9 - Familiarity Metric - Requires Knn clustering of a large dataset.
    #

    # dutta f10-12
    wpH = pywt.WaveletPacket2D(data=I_h, wavelet='db1', mode='zpd', maxlevel=3)
    for ii in xrange(3):
        whh = wpH['d' * ii].data
        whl = wpH['v' * ii].data
        wlh = wpH['h' * ii].data
        Sk = np.linalg.norm(whh, 2) + np.linalg.norm(whl, 2) + np.linalg.norm(
            wlh, 2)
        features["Wavelet_hue_%d" %
                 ii] = (whh.sum() + whl.sum() + wlh.sum()) / Sk

    # dutta f13-15
    wpS = pywt.WaveletPacket2D(data=I_s, wavelet='db1', mode='zpd', maxlevel=3)
    for ii in xrange(3):
        whh = wpS['d' * ii].data
        whl = wpS['v' * ii].data
        wlh = wpS['h' * ii].data
        Sk = np.linalg.norm(whh, 2) + np.linalg.norm(whl, 2) + np.linalg.norm(
            wlh, 2)
        features["Wavelet_saturation_%d" %
                 ii] = (whh.sum() + whl.sum() + wlh.sum()) / Sk

    # dutta f16-18
    wpV = pywt.WaveletPacket2D(data=I_v, wavelet='db1', mode='zpd', maxlevel=3)
    for ii in xrange(3):
        whh = wpV['d' * ii].data
        whl = wpV['v' * ii].data
        wlh = wpV['h' * ii].data
        Sk = np.linalg.norm(whh, 2) + np.linalg.norm(whl, 2) + np.linalg.norm(
            wlh, 2)
        features["Wavelet_value_%d" %
                 ii] = (whh.sum() + whl.sum() + wlh.sum()) / Sk

    # dutta f19-21
    features['Wavelet_hue'] = features['Wavelet_hue_0'] + features[
        'Wavelet_hue_1'] + features['Wavelet_hue_2']
    features[
        'Wavelet_saturation'] = features['Wavelet_saturation_0'] + features[
            'Wavelet_saturation_1'] + features['Wavelet_saturation_2']
    features['Wavelet_value'] = features['Wavelet_value_0'] + features[
        'Wavelet_value_1'] + features['Wavelet_value_2']

    # dutta f22 - size
    features['Img_size'] = nrows + ncols

    # dutta f23 - ratio
    features['Img_ratio'] = float(ncols) / nrows

    # dutta f24-52 - Requires Segementation!
    #

    # dutta f53-55 Low DOF
    #wp = pywt.WaveletPacket2D(data=I_h, wavelet='db1', mode='zpd', maxlevel=3)
    (nsmallrows, nsmallcols) = wpH['ddd'].data.shape
    A = wpH['ddd'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum() + \
        wpH['vvv'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum() + \
        wpH['hhh'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum()
    B = wpH['ddd'].data.sum() + wpH['vvv'].data.sum() + wpH['hhh'].data.sum()
    features["DoF_hue"] = A / B

    #wp = pywt.WaveletPacket2D(data=I_s, wavelet='db1', mode='zpd', maxlevel=3)
    (nsmallrows, nsmallcols) = wpS['ddd'].data.shape
    A = wpS['ddd'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum() + \
        wpS['vvv'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum() + \
        wpS['hhh'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum()
    B = wpS['ddd'].data.sum() + wpS['vvv'].data.sum() + wpS['hhh'].data.sum()
    features["DoF_saturation"] = A / B

    #wp = pywt.WaveletPacket2D(data=I_v, wavelet='db1', mode='zpd', maxlevel=3)
    (nsmallrows, nsmallcols) = wpV['ddd'].data.shape
    A = wpV['ddd'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum() + \
        wpV['vvv'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum() + \
        wpV['hhh'].data[nsmallrows*1/4:nsmallrows*3/4,nsmallcols*1/4:nsmallcols*3/4].sum()
    B = wpV['ddd'].data.sum() + wpV['vvv'].data.sum() + wpV['hhh'].data.sum()
    features["DoF_value"] = A / B

    features["LapVar_Hue"] = cv2.Laplacian(I_h / 255.0, cv2.CV_64F).var()
    features["LapVar_Saturation"] = cv2.Laplacian(I_s / 255.0,
                                                  cv2.CV_64F).var()
    features["LapVar_Value"] = cv2.Laplacian(I_v / 255.0, cv2.CV_64F).var()

    tmp = I_h
    lines = cv2.HoughLinesP(cv2.Canny(tmp, 100, 200, apertureSize=3),
                            1,
                            np.pi / 180,
                            100,
                            minLineLength=5,
                            maxLineGap=20)
    if not lines is None:
        features['ProbAngles_Hue'] = circstat.mean([
            np.arctan2(np.abs(y2 - y1), np.abs(x2 - x1))
            for x1, y1, x2, y2 in lines[0]
        ])
    else:
        features['ProbAngles_Hue'] = 0

    tmp = I_s
    lines = cv2.HoughLinesP(cv2.Canny(tmp, 100, 200, apertureSize=3),
                            1,
                            np.pi / 180,
                            100,
                            minLineLength=5,
                            maxLineGap=20)
    if not lines is None:
        features['ProbAngles_Saturation'] = circstat.mean([
            np.arctan2(np.abs(y2 - y1), np.abs(x2 - x1))
            for x1, y1, x2, y2 in lines[0]
        ])
    else:
        features['ProbAngles_Saturation'] = 0
    tmp = I_v
    lines = cv2.HoughLinesP(cv2.Canny(tmp, 100, 200, apertureSize=3),
                            1,
                            np.pi / 180,
                            100,
                            minLineLength=5,
                            maxLineGap=20)
    if not lines is None:
        features['ProbAngles_Value'] = circstat.mean([
            np.arctan2(np.abs(y2 - y1), np.abs(x2 - x1))
            for x1, y1, x2, y2 in lines[0]
        ])
    else:
        features['ProbAngles_Value'] = 0

    tmp = I_h
    a = tmp.astype("float")
    b1 = tmp[::-1, :].astype("float")
    b2 = tmp[:, ::-1].astype("float")
    features['Sym_Horizontal_Hue'] = (a * b1).sum() / (np.sqrt(
        (a**2).sum()) * np.sqrt((b1**2).sum()))
    features['Sym_Vertical_Hue'] = (a * b2).sum() / (np.sqrt(
        (a**2).sum()) * np.sqrt((b2**2).sum()))

    tmp = I_s
    a = tmp.astype("float")
    b1 = tmp[::-1, :].astype("float")
    b2 = tmp[:, ::-1].astype("float")
    features['Sym_Horizontal_Saturation'] = (a * b1).sum() / (np.sqrt(
        (a**2).sum()) * np.sqrt((b1**2).sum()))
    features['Sym_Vertical_Saturation'] = (a * b2).sum() / (np.sqrt(
        (a**2).sum()) * np.sqrt((b2**2).sum()))

    tmp = I_v
    a = tmp.astype("float")
    b1 = tmp[::-1, :].astype("float")
    b2 = tmp[:, ::-1].astype("float")
    features['Sym_Horizontal_Value'] = (a * b1).sum() / (np.sqrt(
        (a**2).sum()) * np.sqrt((b1**2).sum()))
    features['Sym_Vertical_Value'] = (a * b2).sum() / (np.sqrt(
        (a**2).sum()) * np.sqrt((b2**2).sum()))
    return features
Exemplo n.º 28
0
# # add a 'best fit' line
# y = mlab.normpdf(bins, np.mean(Cresult_SB_list), np.std(Cresult_SB_list))
# l = plt.plot(bins, y, 'r--', linewidth=1)

plt.xlabel('Correlation Coefficient in H domain', fontsize=14)
plt.ylabel('Normalized Frequency', fontsize=14)
plt.legend()
# plt.title(r'$\mathrm{Histogram\ of\ IQ:}\ \mu=100,\ \sigma=15$')
# plt.axis([40, 160, 0, 0.03])
plt.grid(True)
plt.show()

#############################################################################################
lena = misc.imread("./images/im_512_3.tif")
wp = pywt.WaveletPacket2D(data=lena, wavelet='db4')
orgimg = wp['aa'].data  #wp['aa'].data  # (lena[:100, :100]).astype(float) # wp['aaa'].data
rectnumber, rectsize = 5, 15

hugemat, WMvec, m_begin_vec, m_finish_vec, n_begin_vec, n_finish_vec, Wlist = watermarking.encoding(orgimg, rectnumber, rectsize, k)
# Displaying rectangles
orgimg_squared=orgimg.copy()
for rectindex in range(rectnumber):
    orgimg_squared[int(m_begin_vec[rectindex]),int(n_begin_vec[rectindex]):int(n_finish_vec[rectindex])]=0;
    orgimg_squared[int(m_finish_vec[rectindex]),int(n_begin_vec[rectindex]):int(n_finish_vec[rectindex])]=0;
    orgimg_squared[int(m_begin_vec[rectindex]):int(m_finish_vec[rectindex]),int(n_begin_vec[rectindex])]=0;
    orgimg_squared[int(m_begin_vec[rectindex]):int(m_finish_vec[rectindex]),int(n_finish_vec[rectindex])]=0;

fig = plt.imshow(orgimg_squared, cmap='gray')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
Exemplo n.º 29
0
import pywt
import numpy as np
import matplotlib.pyplot as plt

from scipy import misc

root = os.path.dirname(
    os.path.dirname(os.path.dirname(os.path.realpath(__file__))))

if __name__ == '__main__':
    img_filename = os.path.join(root, 'test', 'test_data', 'sphere_mapped',
                                '30_to_60_solid_angle_to_sphere.raw')
    img = np.fromfile(img_filename, dtype=np.float).reshape([360, 180]).T

    # Decompose image
    wp = pywt.WaveletPacket2D(data=img, wavelet='haar', mode='symmetric')
    print(wp.data)
    print(repr(wp.path))
    print(wp.level)
    print(wp.maxlevel)

    # PLot results
    plt.figure()
    plt.subplot(221)
    plt.imshow(wp['a'].data, cmap='gray')
    plt.subplot(222)
    plt.imshow(wp['h'].data, cmap='gray')
    plt.subplot(223)
    plt.imshow(wp['v'].data, cmap='gray')
    plt.subplot(224)
    plt.imshow(wp['d'].data, cmap='gray')
Exemplo n.º 30
0
def _waveletPacket(img, outdir, level, kernel, subband, plot, distinct):
    wp = pywt.WaveletPacket2D(img, kernel, 'symmetric', level)

    # set wavelet sub band decomposition
    if subband == 'll':
        increment = 'a'
    elif subband == 'lh':
        increment = 'h'
    elif subband == 'hl':
        increment = 'v'
    elif subband == 'hh':
        increment = 'd'
    elif subband == 'all':
        pass
    else:
        print(
            'wavelet index error subband must be \'ll\' \'lh\' \'hl\' \'hh\' or \'all\''
        )

    if subband == 'all':
        # create energies, entropies list
        energies = list()
        entropies = list()

        # loop over level
        index = ['a', 'h', 'v', 'd']
        for n in range(1, level + 1):
            # get sub band index
            temp = list(index)
            for i in temp:
                # pop index
                del index[0]

                # plot
                if plot == True:
                    pass
                    # calculate power spectrum
                    # powerSpectrum = np.array(np.abs(wp[i].data)**2, copy=True)
                    # outdirTemp = outdir+'level_'+str(n)+'_subband_'+i
                    # _plotCoeffs(wp[i].data, outdirTemp+'_coeffs.png')
                    # _plotSpectrum(powerSpectrum, outdirTemp+'_powerspectrum.png')
                    # _plotHistogram(powerSpectrum, outdirTemp+'_hist.png')

                # binning data
                binData = _binningData(wp[i].data, 1000)
                histData = _calcHist(binData, 1000)
                probaData = _calcProbaHist(histData)

                # calculate energy and entropy
                energy = np.sum(np.abs(wp[i].data))
                entropy = _calcEntropy(probaData)

                # generate next level index
                if n < level + 1:
                    index.append(i + 'a')
                    index.append(i + 'h')
                    index.append(i + 'v')
                    index.append(i + 'd')

                if n != distinct:
                    continue

                # append energy and entropy to list
                energies.append(energy)
                entropies.append(entropy)

        # calculate total energy
        total_energy = np.sum(energies)

        # check sanity
        if total_energy != 0:
            energies = energies / np.sum(energies)

        # append list of energy and entropy to feature
        features = np.concatenate((energies, entropies), axis=0)
        return features
    else:
        pass
        features = list()
        index = ''
        for n in range(1, level + 1):
            # pass
            entropy = dict()
            outdirTemp = outdir + 'level_' + str(n) + '_subband_' + index
            index += increment
        return features