nTrainingData = 50  # number of training data
    data = data[:
                nTrainingData, :]  # A reduction in the set size, to test less optimal ITKrM routines without waiting hours
    test_data = test_data[:10, :]

    W_data = 128  # Width in pixels
    H_data = 128  # Height in pixels
    N_subpic = 16  # Width/Height in pixels of smaller square extracted from image.

    S = int(S / ((W_data / N_subpic)**2))
    S = 40
    e = 30
    print("S: {}".format(S))

    smallSet = ImportImages.ExtractSmall(data.T, W_data, H_data, N_subpic)
    testSet = ImportImages.ExtractSmall(test_data.T, W_data, H_data, N_subpic)
    print("ITKrM")
    t0 = time.time()
    dictionary = p_itkrm.itkrm(smallSet, K, S, maxit,
                               2)  # for the apply_async (nTrainingData)
    print("execution time: {}".format(time.time() - t0))
    print("\nOMP")
    x_sparse = OMP_fast.OMP(dictionary, testSet[:, :N], e, S, e_or_S)

    beforeImage = ImportImages.MergeSmall(testSet[:, :N], W_data, H_data,
                                          N_subpic)
    afterImage = ImportImages.MergeSmall(dictionary @ x_sparse, W_data, H_data,
                                         N_subpic)

    ssim = compare_ssim(beforeImage, afterImage)
Example #2
0
    P = A @ np.linalg.pinv(A)
    return P


K = 128  # Number of columns in D (Atoms)
M = 16  # Number of rows in D (Number of pixels in training image)
N = 1000  # Number of training examples
S = 3  # Number of used vectors (Sparsity). Amount that is NOT zero.

data = np.load('grayScale32x32cars.npy')

W_data = 32  # Width in pixels
H_data = 32  # Height in pixels
N_subpic = 4  # Width/Height in pixels of smaller square extracted from image.

smallSet = ImportImages.ExtractSmall(data, W_data, H_data, N_subpic)
newPic = ImportImages.MergeSmall(smallSet, W_data, H_data, N_subpic)

Ix = np.random.choice(K, size=(N, S))  # Random number drawn from pool.
x = np.zeros((K, N))
for i in range(N):
    x[Ix[i, :], i] = 1

D_init = np.random.rand(M, K)
for k in range(0, K):
    D_init[:, k] = D_init[:, k] / np.linalg.norm(
        D_init[:, k])  # Normalise vectors of initial dictionary.
y = D_init @ x
y = smallSet.T[:, :N]

D = y[:, :K]
np.random.seed(0)
K = 200
S = 40
maxit = 20
N = 1024

W_data = 32
H_data = 32
N_subpic = 16

data = LoadFromDataBatch.ImportData(7, 1)
test_data = LoadFromDataBatch.ImportData(7, 'test')

data = data[:600, :]

smallSet = ImportImages.ExtractSmall(data.T, W_data, H_data, N_subpic)


def _parallel(threads, Y, D_old, I_D):
    pool = mp.Pool(processes=threads)
    M, N = Y.shape
    R = pool.map_async(
        _f, [(Y[:, n * N // 4:(n + 1) * N // 4], K, S, maxit, D_old, I_D, n)
             for n in range(4)]).get()
    pool.close()
    pool.join()
    return R


def _f(d):
    Y, K, S, maxitr, D_old, I_D, i = d
    Y = np.array(file['testSet'])
    file.close()

    S = 100  # Sparsenesslevel
    E = 3  # Error
    e_or_s = 1

    x_s = OMP(D, Y, E, S, e_or_s)

    y_new = D @ x_s

    W_data = 32
    H_data = 32
    N_subpic = 16

    beforeImage = II.MergeSmall(Y, W_data, H_data, N_subpic)
    afterImage = II.MergeSmall(y_new, W_data, H_data, N_subpic)

    print('\nsparseness level x:')
    for i in range(int((W_data * H_data) / (N_subpic * N_subpic))):
        print(i, ':', len(np.where(x_s[:, i] != 0)[0]))
    print('Average: {}'.format(len(np.where(x_s != 0)[0]) / Y.shape[1]))

    pic_number = 0
    plt.figure('Before')
    plt.imshow(beforeImage[pic_number, :, :], cmap='gray', vmin=0, vmax=255)
    plt.tight_layout()
    plt.figure('After')
    plt.imshow(afterImage[pic_number, :, :], cmap='gray', vmin=0, vmax=255)
    plt.tight_layout()
    plt.show()