Example #1
0
def ipca(image, components, _batch_size=25):
    """Reconstruct an image from IPCA compression using specific number of components to use and batch size

    Args:
        image: PIL Image, Numpy array or path of 3D image
        components: Number of components used for reconstruction
        batch_size: Batch size used for learn (default 25)

    Returns:
        Reconstructed image

    Example:

    >>> from PIL import Image
    >>> import numpy as np
    >>> from ipfml.processing import reconstruction
    >>> image_values = Image.open('./images/test_img.png')
    >>> reconstructed_image = reconstruction.ipca(image_values, 20)
    >>> reconstructed_image.shape
    (200, 200)
    """
    lab_img = transform.get_LAB_L(image)
    lab_img = np.array(lab_img, 'uint8')

    transformer = IncrementalPCA(n_components=components,
                                 batch_size=_batch_size)

    transformed_image = transformer.fit_transform(lab_img)
    restored_image = transformer.inverse_transform(transformed_image)

    return restored_image
Example #2
0
def svd(image, interval):
    """Reconstruct an image from SVD compression using specific interval of Singular Values

    Args:
        image: PIL Image, Numpy array or path of 3D image
        interval: Interval used for reconstruction

    Returns:
        Reconstructed image

    Example:

    >>> from PIL import Image
    >>> import numpy as np
    >>> from ipfml.processing import reconstruction
    >>> image_values = Image.open('./images/test_img.png')
    >>> reconstructed_image = reconstruction.svd(image_values, (100, 200))
    >>> reconstructed_image.shape
    (200, 200)
    """

    begin, end = interval
    lab_img = transform.get_LAB_L(image)
    lab_img = np.array(lab_img, 'uint8')

    U, s, V = np_svd(lab_img, full_matrices=True)

    # reconstruction using specific interval
    smat = np.zeros((end - begin, end - begin), dtype=complex)
    smat[:, :] = np.diag(s[begin:end])
    output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))

    return output_img
Example #3
0
def fast_ica(image, components):
    """Reconstruct an image from Fast ICA compression using specific number of components to use

    Args:
        image: PIL Image, Numpy array or path of 3D image
        components: Number of components used for reconstruction

    Returns:
        Reconstructed image

    Example:

    >>> from PIL import Image
    >>> import numpy as np
    >>> from ipfml.processing import reconstruction
    >>> image_values = Image.open('./images/test_img.png')
    >>> reconstructed_image = reconstruction.fast_ica(image_values, 25)
    >>> reconstructed_image.shape
    (200, 200)
    """

    lab_img = transform.get_LAB_L(image)
    lab_img = np.array(lab_img, 'uint8')

    ica = FastICA(n_components=50)
    # run ICA on image
    ica.fit(lab_img)
    # reconstruct image with independent components
    image_ica = ica.fit_transform(lab_img)
    restored_image = ica.inverse_transform(image_ica)

    return restored_image
    def getTransformedImage(self, img):

        if self.transformation == 'svd_reconstruction':
            begin, end = list(map(int, self.param.split(',')))
            data = reconstruction.svd(img, [begin, end])

        if self.transformation == 'ipca_reconstruction':
            n_components, batch_size = list(map(int, self.param.split(',')))
            data = reconstruction.ipca(img, n_components, batch_size)

        if self.transformation == 'fast_ica_reconstruction':
            n_components = self.param
            data = reconstruction.fast_ica(img, n_components)

        if self.transformation == 'min_diff_filter':
            w_size, h_size = list(map(int, self.param.split(',')))
            h, w = list(map(int, self.size.split(',')))

            # bilateral with window of size (`w_size`, `h_size`)
            lab_img = transform.get_LAB_L(img)

            lab_img = Image.fromarray(lab_img)
            lab_img.thumbnail((h, w))

            diff_img = convolution.convolution2D(lab_img,
                                                 kernels.min_bilateral_diff,
                                                 (w_size, h_size))

            data = np.array(diff_img * 255, 'uint8')

        if self.transformation == 'static':
            # static content, we keep input as it is
            data = img

        return data
def get_zone_sobel_svd_entropy(images, interval, ksize, imnorm=False):

    entropy_list = []

    begin, end = interval

    for img in images:
        sobel_img = get_sobel_filtered_img(transform.get_LAB_L(img), ksize)

        if imnorm:
            sobel_img = np.array(sobel_img / 255.)

        sigma = compression.get_SVD_s(sobel_img)
        sigma = sigma[begin:end]

        s_entropy = utils.get_entropy(sigma)
        entropy_list.append(s_entropy)

    return entropy_list
def get_zone_entropy(images, interval, imnorm=False):

    entropy_list = []

    begin, end = interval

    for img in images:
        img_lab = transform.get_LAB_L(img)

        if imnorm:
            img_lab = np.array(img_lab / 255.)

        sigma = compression.get_SVD_s(img_lab)
        sigma = sigma[begin:end]

        s_entropy = utils.get_entropy(sigma)
        entropy_list.append(s_entropy)

    return entropy_list
def get_image_features(data_type, block):
    """
    Method which returns the data type expected
    """

    if data_type == 'lab':

        block_file_path = '/tmp/lab_img.png'
        block.save(block_file_path)
        data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))

    if data_type == 'mscn':

        img_mscn_revisited = transform.rgb_to_mscn(block)

        # save tmp as img
        img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
        mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
        img_output.save(mscn_revisited_file_path)
        img_block = Image.open(mscn_revisited_file_path)

        # extract from temp image
        data = compression.get_SVD_s(img_block)
    """if data_type == 'mscn':

        img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
        img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
        img_mscn_norm = transform.normalize_2D_arr(img_mscn)

        img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')

        data = compression.get_SVD_s(img_mscn_gray)
    """

    if data_type == 'low_bits_6':

        low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
        data = compression.get_SVD_s(low_bits_6)

    if data_type == 'low_bits_5':

        low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
        data = compression.get_SVD_s(low_bits_5)

    if data_type == 'low_bits_4':

        low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
        data = compression.get_SVD_s(low_bits_4)

    if data_type == 'low_bits_3':

        low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
        data = compression.get_SVD_s(low_bits_3)

    if data_type == 'low_bits_2':

        low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
        data = compression.get_SVD_s(low_bits_2)

    if data_type == 'low_bits_4_shifted_2':

        data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(
            block, (3, 6)))

    if data_type == 'sub_blocks_stats':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 4), int(height / 4)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            # get information we want from svd
            data.append(np.mean(l_svd_data))
            data.append(np.median(l_svd_data))
            data.append(np.percentile(l_svd_data, 25))
            data.append(np.percentile(l_svd_data, 75))
            data.append(np.var(l_svd_data))

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_stats_reduced':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 4), int(height / 4)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            # get information we want from svd
            data.append(np.mean(l_svd_data))
            data.append(np.median(l_svd_data))
            data.append(np.percentile(l_svd_data, 25))
            data.append(np.percentile(l_svd_data, 75))
            data.append(np.var(l_svd_data))

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_area':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 8), int(height / 8)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_area_normed':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 8), int(height / 8)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
            l_svd_data = utils.normalize_arr(l_svd_data)

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'mscn_var_4':

        data = _get_mscn_variance(block, (100, 100))

    if data_type == 'mscn_var_16':

        data = _get_mscn_variance(block, (50, 50))

    if data_type == 'mscn_var_64':

        data = _get_mscn_variance(block, (25, 25))

    if data_type == 'mscn_var_16_max':

        data = _get_mscn_variance(block, (50, 50))
        data = np.asarray(data)
        size = int(len(data) / 4)
        indices = data.argsort()[-size:][::-1]
        data = data[indices]

    if data_type == 'mscn_var_64_max':

        data = _get_mscn_variance(block, (25, 25))
        data = np.asarray(data)
        size = int(len(data) / 4)
        indices = data.argsort()[-size:][::-1]
        data = data[indices]

    if data_type == 'ica_diff':
        current_image = transform.get_LAB_L(block)

        ica = FastICA(n_components=50)
        ica.fit(current_image)

        image_ica = ica.fit_transform(current_image)
        image_restored = ica.inverse_transform(image_ica)

        final_image = utils.normalize_2D_arr(image_restored)
        final_image = np.array(final_image * 255, 'uint8')

        sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
        ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))

        data = abs(np.array(sv_values) - np.array(ica_sv_values))

    if data_type == 'svd_trunc_diff':

        current_image = transform.get_LAB_L(block)

        svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
        transformed_image = svd.fit_transform(current_image)
        restored_image = svd.inverse_transform(transformed_image)

        reduced_image = (current_image - restored_image)

        U, s, V = compression.get_SVD(reduced_image)
        data = s

    if data_type == 'ipca_diff':

        current_image = transform.get_LAB_L(block)

        transformer = IncrementalPCA(n_components=20, batch_size=25)
        transformed_image = transformer.fit_transform(current_image)
        restored_image = transformer.inverse_transform(transformed_image)

        reduced_image = (current_image - restored_image)

        U, s, V = compression.get_SVD(reduced_image)
        data = s

    if data_type == 'svd_reconstruct':

        reconstructed_interval = (90, 200)
        begin, end = reconstructed_interval

        lab_img = transform.get_LAB_L(block)
        lab_img = np.array(lab_img, 'uint8')

        U, s, V = lin_svd(lab_img, full_matrices=True)

        smat = np.zeros((end - begin, end - begin), dtype=complex)
        smat[:, :] = np.diag(s[begin:end])
        output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))

        output_img = np.array(output_img, 'uint8')

        data = compression.get_SVD_s(output_img)

    if 'sv_std_filters' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        images.append(medfilt2d(arr, [3, 3]))
        images.append(medfilt2d(arr, [5, 5]))
        images.append(wiener(arr, [3, 3]))
        images.append(wiener(arr, [5, 5]))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    # with the use of wavelet
    if 'wave_sv_std_filters' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        images.append(medfilt2d(arr, [3, 3]))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    # with the use of wavelet
    if 'sv_std_filters_full' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        kernel = np.ones((3, 3), np.float32) / 9
        images.append(cv2.filter2D(arr, -1, kernel))

        kernel = np.ones((5, 5), np.float32) / 25
        images.append(cv2.filter2D(arr, -1, kernel))

        images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))

        images.append(medfilt2d(arr, [3, 3]))

        images.append(medfilt2d(arr, [5, 5]))

        images.append(wiener(arr, [3, 3]))

        images.append(wiener(arr, [5, 5]))

        wave = w2d(arr, 'db1', 2)
        images.append(np.array(wave, 'float64'))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    if 'sv_entropy_std_filters' in data_type:

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        images = []

        kernel = np.ones((3, 3), np.float32) / 9
        images.append(cv2.filter2D(arr, -1, kernel))

        kernel = np.ones((5, 5), np.float32) / 25
        images.append(cv2.filter2D(arr, -1, kernel))

        images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))

        images.append(medfilt2d(arr, [3, 3]))

        images.append(medfilt2d(arr, [5, 5]))

        images.append(wiener(arr, [3, 3]))

        images.append(wiener(arr, [5, 5]))

        wave = w2d(arr, 'db1', 2)
        images.append(np.array(wave, 'float64'))

        sv_vector = []
        sv_entropy_list = []

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

            sv_entropy = [
                utils.get_entropy_contribution_of_i(s, id_sv)
                for id_sv, sv in enumerate(s)
            ]
            sv_entropy_list.append(sv_entropy)

        sv_std = []

        sv_array = np.array(sv_vector)
        _, length = sv_array.shape

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        s_arr = compression.get_SVD_s(arr)
        data = s_arr[indices]

    if 'convolutional_kernels' in data_type:

        sub_zones = segmentation.divide_in_blocks(block, (20, 20))

        data = []

        diff_std_list_3 = []
        diff_std_list_5 = []
        diff_mean_list_3 = []
        diff_mean_list_5 = []

        plane_std_list_3 = []
        plane_std_list_5 = []
        plane_mean_list_3 = []
        plane_mean_list_5 = []

        plane_max_std_list_3 = []
        plane_max_std_list_5 = []
        plane_max_mean_list_3 = []
        plane_max_mean_list_5 = []

        for sub_zone in sub_zones:
            l_img = transform.get_LAB_L(sub_zone)
            normed_l_img = utils.normalize_2D_arr(l_img)

            # bilateral with window of size (3, 3)
            normed_diff = convolution.convolution2D(normed_l_img,
                                                    kernels.min_bilateral_diff,
                                                    (3, 3))
            std_diff = np.std(normed_diff)
            mean_diff = np.mean(normed_diff)

            diff_std_list_3.append(std_diff)
            diff_mean_list_3.append(mean_diff)

            # bilateral with window of size (5, 5)
            normed_diff = convolution.convolution2D(normed_l_img,
                                                    kernels.min_bilateral_diff,
                                                    (5, 5))
            std_diff = np.std(normed_diff)
            mean_diff = np.mean(normed_diff)

            diff_std_list_5.append(std_diff)
            diff_mean_list_5.append(mean_diff)

            # plane mean with window of size (3, 3)
            normed_plane_mean = convolution.convolution2D(
                normed_l_img, kernels.plane_mean, (3, 3))
            std_plane_mean = np.std(normed_plane_mean)
            mean_plane_mean = np.mean(normed_plane_mean)

            plane_std_list_3.append(std_plane_mean)
            plane_mean_list_3.append(mean_plane_mean)

            # plane mean with window of size (5, 5)
            normed_plane_mean = convolution.convolution2D(
                normed_l_img, kernels.plane_mean, (5, 5))
            std_plane_mean = np.std(normed_plane_mean)
            mean_plane_mean = np.mean(normed_plane_mean)

            plane_std_list_5.append(std_plane_mean)
            plane_mean_list_5.append(mean_plane_mean)

            # plane max error with window of size (3, 3)
            normed_plane_max = convolution.convolution2D(
                normed_l_img, kernels.plane_max_error, (3, 3))
            std_plane_max = np.std(normed_plane_max)
            mean_plane_max = np.mean(normed_plane_max)

            plane_max_std_list_3.append(std_plane_max)
            plane_max_mean_list_3.append(mean_plane_max)

            # plane max error with window of size (5, 5)
            normed_plane_max = convolution.convolution2D(
                normed_l_img, kernels.plane_max_error, (5, 5))
            std_plane_max = np.std(normed_plane_max)
            mean_plane_max = np.mean(normed_plane_max)

            plane_max_std_list_5.append(std_plane_max)
            plane_max_mean_list_5.append(mean_plane_max)

        diff_std_list_3 = np.array(diff_std_list_3)
        diff_std_list_5 = np.array(diff_std_list_5)

        diff_mean_list_3 = np.array(diff_mean_list_3)
        diff_mean_list_5 = np.array(diff_mean_list_5)

        plane_std_list_3 = np.array(plane_std_list_3)
        plane_std_list_5 = np.array(plane_std_list_5)

        plane_mean_list_3 = np.array(plane_mean_list_3)
        plane_mean_list_5 = np.array(plane_mean_list_5)

        plane_max_std_list_3 = np.array(plane_max_std_list_3)
        plane_max_std_list_5 = np.array(plane_max_std_list_5)

        plane_max_mean_list_3 = np.array(plane_max_mean_list_3)
        plane_max_mean_list_5 = np.array(plane_max_mean_list_5)

        if 'std_max_blocks' in data_type:

            data.append(np.std(diff_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.std(plane_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.std(plane_max_std_list_3[0:int(len(sub_zones) /
                                                          5)]))
            data.append(
                np.std(plane_max_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_max_std_list_5[0:int(len(sub_zones) /
                                                          5)]))
            data.append(
                np.std(plane_max_mean_list_5[0:int(len(sub_zones) / 5)]))

        if 'mean_max_blocks' in data_type:

            data.append(np.mean(diff_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.mean(plane_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(
                np.mean(plane_max_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_mean_list_5[0:int(len(sub_zones) / 5)]))

        if 'std_normed' in data_type:

            data.append(np.std(diff_std_list_3))
            data.append(np.std(diff_mean_list_3))
            data.append(np.std(diff_std_list_5))
            data.append(np.std(diff_mean_list_5))

            data.append(np.std(plane_std_list_3))
            data.append(np.std(plane_mean_list_3))
            data.append(np.std(plane_std_list_5))
            data.append(np.std(plane_mean_list_5))

            data.append(np.std(plane_max_std_list_3))
            data.append(np.std(plane_max_mean_list_3))
            data.append(np.std(plane_max_std_list_5))
            data.append(np.std(plane_max_mean_list_5))

        if 'mean_normed' in data_type:

            data.append(np.mean(diff_std_list_3))
            data.append(np.mean(diff_mean_list_3))
            data.append(np.mean(diff_std_list_5))
            data.append(np.mean(diff_mean_list_5))

            data.append(np.mean(plane_std_list_3))
            data.append(np.mean(plane_mean_list_3))
            data.append(np.mean(plane_std_list_5))
            data.append(np.mean(plane_mean_list_5))

            data.append(np.mean(plane_max_std_list_3))
            data.append(np.mean(plane_max_mean_list_3))
            data.append(np.mean(plane_max_std_list_5))
            data.append(np.mean(plane_max_mean_list_5))

        data = np.array(data)

    if data_type == 'convolutional_kernel_stats_svd':

        l_img = transform.get_LAB_L(block)
        normed_l_img = utils.normalize_2D_arr(l_img)

        # bilateral with window of size (5, 5)
        normed_diff = convolution.convolution2D(normed_l_img,
                                                kernels.min_bilateral_diff,
                                                (5, 5))

        # getting sigma vector from SVD compression
        s = compression.get_SVD_s(normed_diff)

        data = s

    if data_type == 'svd_entropy':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            values.append(utils.get_entropy(sv))
        data = np.array(values)

    if data_type == 'svd_entropy_20':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            values.append(utils.get_entropy(sv))
        data = np.array(values)

    if data_type == 'svd_entropy_noise_20':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            sv_size = len(sv)
            values.append(utils.get_entropy(sv[int(sv_size / 4):]))
        data = np.array(values)

    return data
def get_image_features(data_type, block):
    """
    Method which returns the data type expected
    """

    if 'filters_statistics' in data_type:

        img_width, img_height = 200, 200

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        # compute all filters statistics
        def get_stats(arr, I_filter):

            e1 = np.abs(arr - I_filter)
            L = np.array(e1)
            mu0 = np.mean(L)
            A = L - mu0
            H = A * A
            E = np.sum(H) / (img_width * img_height)
            P = np.sqrt(E)

            return mu0, P
            # return np.mean(I_filter), np.std(I_filter)

        stats = []

        kernel = np.ones((3, 3), np.float32) / 9
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        kernel = np.ones((5, 5), np.float32) / 25
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1.5)))

        stats.append(get_stats(arr, medfilt2d(arr, [3, 3])))

        stats.append(get_stats(arr, medfilt2d(arr, [5, 5])))

        stats.append(get_stats(arr, wiener(arr, [3, 3])))

        stats.append(get_stats(arr, wiener(arr, [5, 5])))

        wave = w2d(arr, 'db1', 2)
        stats.append(get_stats(arr, np.array(wave, 'float64')))

        data = []

        for stat in stats:
            data.append(stat[0])

        for stat in stats:
            data.append(stat[1])

        data = np.array(data)

    if 'statistics_extended' in data_type:

        data = get_image_features('filters_statistics', block)

        # add kolmogorov complexity
        bytes_data = np.array(block).tobytes()
        compress_data = gzip.compress(bytes_data)

        mo_size = sys.getsizeof(compress_data) / 1024.
        go_size = mo_size / 1024.
        data = np.append(data, go_size)

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        # add of svd entropy
        svd_entropy = utils.get_entropy(compression.get_SVD_s(arr))
        data = np.append(data, svd_entropy)

        # add sobel complexity (kernel size of 3)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=3)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

        # add sobel complexity (kernel size of 5)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=5)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

    if 'lab' in data_type:

        data = transform.get_LAB_L_SVD_s(block)

    return data
    def getTransformedImage(self, img):

        if self.transformation == 'svd_reconstruction':
            begin, end = list(map(int, self.param.split(',')))
            h, w = list(map(int, self.size.split(',')))
            img_reconstructed = reconstruction.svd(img, [begin, end])
            data_array = np.array(img_reconstructed, 'uint8')

            img_array = Image.fromarray(data_array)
            img_array.thumbnail((h, w))

            data = np.array(img_array)

        if self.transformation == 'svd_reconstruction':
            begin, end = list(map(int, self.param.split(',')))
            h, w = list(map(int, self.size.split(',')))
            img_reconstructed = reconstruction.svd(img, [begin, end])
            data_array = np.array(img_reconstructed, 'uint8')

            img_array = Image.fromarray(data_array)
            img_array.thumbnail((h, w))

            data = np.array(img_array)

        if self.transformation == 'ipca_reconstruction':
            n_components, batch_size = list(map(int, self.param.split(',')))
            h, w = list(map(int, self.size.split(',')))
            img_reconstructed = reconstruction.ipca(img, n_components,
                                                    batch_size)
            data_array = np.array(img_reconstructed, 'uint8')

            img_array = Image.fromarray(data_array)
            img_array.thumbnail((h, w))

            data = np.array(img_array)

        if self.transformation == 'fast_ica_reconstruction':
            n_components = self.param
            h, w = list(map(int, self.size.split(',')))
            img_reconstructed = reconstruction.fast_ica(img, n_components)
            data_array = np.array(img_reconstructed, 'uint8')

            img_array = Image.fromarray(data_array)
            img_array.thumbnail((h, w))

            data = np.array(img_array)

        if self.transformation == 'gini_map':
            # kernel size
            k_w, k_h = list(map(int, self.param.split(',')))
            h, w = list(map(int, self.size.split(',')))

            lab_img = transform.get_LAB_L(img)
            img_mask = convolution.convolution2D(lab_img, kernels.gini,
                                                 (k_w, k_h))

            # renormalize data
            data_array = np.array(img_mask * 255, 'uint8')

            img_array = Image.fromarray(data_array)
            img_array.thumbnail((h, w))

            data = np.array(img_array)

        if self.transformation == 'sobel_based_filter':
            k_size, p_limit = list(map(int, self.param.split(',')))
            h, w = list(map(int, self.size.split(',')))

            lab_img = transform.get_LAB_L(img)

            weight, height = lab_img.shape

            sobelx = cv2.Sobel(lab_img, cv2.CV_64F, 1, 0, ksize=k_size)
            sobely = cv2.Sobel(lab_img, cv2.CV_64F, 0, 1, ksize=k_size)

            sobel_mag = np.array(np.hypot(sobelx, sobely),
                                 'uint8')  # magnitude
            sobel_mag_limit = remove_pixel(sobel_mag, p_limit)

            # use distribution value of pixel to fill `0` values
            sobel_mag_limit_without_0 = [
                x for x in sobel_mag_limit.reshape((weight * height)) if x != 0
            ]
            distribution = distribution_from_data(sobel_mag_limit_without_0)
            min_value = int(min(sobel_mag_limit_without_0))
            l = lambda: get_random_value(distribution) + min_value
            img_reconstructed = fill_image_with_rand_value(
                sobel_mag_limit, l, 0)

            img_reconstructed_norm = utils.normalize_2D_arr(img_reconstructed)
            img_reconstructed_norm = np.array(img_reconstructed_norm * 255,
                                              'uint8')
            sobel_reconstructed = Image.fromarray(img_reconstructed_norm)
            sobel_reconstructed.thumbnail((h, w))

            data = np.array(sobel_reconstructed)

        if self.transformation == 'nl_mean_noise_mask':
            patch_size, patch_distance = list(map(int, self.param.split(',')))
            h, w = list(map(int, self.size.split(',')))

            img = np.array(img)
            sigma_est = np.mean(estimate_sigma(img, multichannel=True))

            patch_kw = dict(
                patch_size=patch_size,  # 5x5 patches
                patch_distance=patch_distance,  # 13x13 search area
                multichannel=True)

            # slow algorithm
            denoise = denoise_nl_means(img,
                                       h=0.8 * sigma_est,
                                       sigma=sigma_est,
                                       fast_mode=False,
                                       **patch_kw)

            denoise = np.array(denoise, 'uint8')
            noise_mask = np.abs(denoise - img)

            data_array = np.array(noise_mask, 'uint8')

            img_array = Image.fromarray(data_array)
            img_array.thumbnail((h, w))

            data = np.array(img_array)

        if self.transformation == 'static':
            # static content, we keep input as it is
            data = img

        return data
Example #10
0
def estimate(estimator, arr):

    if estimator == 'variance':
        return np.var(arr)

    if estimator == 'l_variance':
        return np.var(transform.get_LAB_L(arr))

    if estimator == 'mean':
        return np.mean(arr)

    if estimator == 'l_mean':
        return np.mean(transform.get_LAB_L(arr))

    if estimator == 'sv_struct_all':
        return transform.get_LAB_L_SVD_s(arr)[0:50]

    if estimator == 'sv_struct':
        return np.mean(transform.get_LAB_L_SVD_s(arr)[0:50])

    if estimator == 'sv_noise_all':
        return transform.get_LAB_L_SVD_s(arr)[50:]

    if estimator == 'sv_noise':
        return np.mean(transform.get_LAB_L_SVD_s(arr)[50:])

    if estimator == 'sobel':

        lab_img = transform.get_LAB_L(arr)

        # 1. extract sobol complexity with kernel 3
        sobelx = cv2.Sobel(lab_img, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(lab_img, cv2.CV_64F, 0, 1, ksize=5)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        return np.std(sobel_mag)

    if estimator == 'l_kolmogorov':

        lab_img = transform.get_LAB_L(arr)

        bytes_data = lab_img.tobytes()
        compress_data = gzip.compress(bytes_data)

        mo_size = sys.getsizeof(compress_data) / 1024.
        go_size = mo_size / 1024.

        return np.float64(go_size)

    if estimator == 'l_sv_entropy_blocks':

        # get L channel
        L_channel = transform.get_LAB_L(arr)

        # split in n block
        blocks = segmentation.divide_in_blocks(L_channel, (20, 20))

        entropy_list = []

        for block in blocks:
            reduced_sigma = compression.get_SVD_s(block)
            reduced_entropy = get_entropy(reduced_sigma)
            entropy_list.append(reduced_entropy)

        return entropy_list

    if estimator == '26_attributes':

        img_width, img_height = 200, 200

        lab_img = transform.get_LAB_L(arr)
        arr = np.array(lab_img)

        # compute all filters statistics
        def get_stats(arr, I_filter):

            e1 = np.abs(arr - I_filter)
            L = np.array(e1)
            mu0 = np.mean(L)
            A = L - mu0
            H = A * A
            E = np.sum(H) / (img_width * img_height)
            P = np.sqrt(E)

            return mu0, P
            # return np.mean(I_filter), np.std(I_filter)

        stats = []

        kernel = np.ones((3, 3), np.float32) / 9
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        kernel = np.ones((5, 5), np.float32) / 25
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1.5)))

        stats.append(get_stats(arr, medfilt2d(arr, [3, 3])))

        stats.append(get_stats(arr, medfilt2d(arr, [5, 5])))

        stats.append(get_stats(arr, wiener(arr, [3, 3])))

        stats.append(get_stats(arr, wiener(arr, [5, 5])))

        wave = w2d(arr, 'db1', 2)
        stats.append(get_stats(arr, np.array(wave, 'float64')))

        data = []

        for stat in stats:
            data.append(stat[0])

        for stat in stats:
            data.append(stat[1])

        data = np.array(data)

        return data

    if estimator == 'statistics_extended':

        # data = estimate('26_attributes', arr)
        data = np.empty(0)
        # add kolmogorov complexity
        bytes_data = np.array(arr).tobytes()
        compress_data = gzip.compress(bytes_data)

        mo_size = sys.getsizeof(compress_data) / 1024.
        go_size = mo_size / 1024.
        data = np.append(data, go_size)

        lab_img = transform.get_LAB_L(arr)
        arr = np.array(lab_img)

        # add of svd entropy
        svd_entropy = utils.get_entropy(compression.get_SVD_s(arr))
        data = np.append(data, svd_entropy)

        # add sobel complexity (kernel size of 3)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=3)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

        # add sobel complexity (kernel size of 5)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=5)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

        return data