Beispiel #1
0
    def __pak_mon_pixel(self, means, rho=None, alpha=None):

        sorted_means = sorted(means)
        k_elements = len(means)
        middle_index = int(k_elements / 2)

        # use of entropy from {means} in order to determine dynamically {alpha} and {rho} parameter
        if alpha == None:
            alpha = utils.get_entropy(self.__variance_evolution(means))
            #print(f'Dynamic alpha computed using entropy ({utils.get_entropy(variance_evolution(means))}): {alpha}')

        if rho == None:
            rho = int(middle_index *
                      utils.get_entropy(self.__variance_evolution(means)))
            #print(f'Dynamic rho computed using entropy ({utils.get_entropy(variance_evolution(means))}): {rho}')

        if rho > int(k_elements / 2):
            raise (
                f"Error, rho set to {rho} is too high depending of k={k_elements}"
            )

        # get lower and higher center indices (to generalize with case of even number of elements)
        lower_index = None
        higher_index = None

        # compute the median
        if k_elements % 2 == 0:

            lower_index = middle_index - 1
            higher_index = middle_index

            # classical median when even number of elements
            median = sorted_means[lower_index] * 0.5 + sorted_means[
                middle_index] * 0.5

        else:
            lower_index = middle_index - 1
            higher_index = middle_index - 1

            # classical when odd number of elements
            median = sorted_means[middle_index - 1]

        # add neighborhood information
        sum_to_divide = 1
        weighted_median = median  # default only median

        for i in range(1, rho + 1):

            # confidence {alpha} parameter using distance criterion
            mult_factor = math.pow(alpha, i)

            # add left and right neighbor contribution
            weighted_median += sorted_means[lower_index - i] * mult_factor
            weighted_median += sorted_means[higher_index + i] * mult_factor

            # weighting contribution to take in account
            sum_to_divide += 2 * mult_factor

        # weighted median with neigborhood information
        return weighted_median / sum_to_divide
def get_zone_sobel_svd_entropy(images, interval, ksize, imnorm=False):

    entropy_list = []

    begin, end = interval

    for img in images:
        sobel_img = get_sobel_filtered_img(transform.get_LAB_L(img), ksize)

        if imnorm:
            sobel_img = np.array(sobel_img / 255.)

        sigma = compression.get_SVD_s(sobel_img)
        sigma = sigma[begin:end]

        s_entropy = utils.get_entropy(sigma)
        entropy_list.append(s_entropy)

    return entropy_list
def get_zone_entropy(images, interval, imnorm=False):

    entropy_list = []

    begin, end = interval

    for img in images:
        img_lab = transform.get_LAB_L(img)

        if imnorm:
            img_lab = np.array(img_lab / 255.)

        sigma = compression.get_SVD_s(img_lab)
        sigma = sigma[begin:end]

        s_entropy = utils.get_entropy(sigma)
        entropy_list.append(s_entropy)

    return entropy_list
def get_image_features(data_type, block):
    """
    Method which returns the data type expected
    """

    if data_type == 'lab':

        block_file_path = '/tmp/lab_img.png'
        block.save(block_file_path)
        data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))

    if data_type == 'mscn':

        img_mscn_revisited = transform.rgb_to_mscn(block)

        # save tmp as img
        img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
        mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
        img_output.save(mscn_revisited_file_path)
        img_block = Image.open(mscn_revisited_file_path)

        # extract from temp image
        data = compression.get_SVD_s(img_block)
    """if data_type == 'mscn':

        img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
        img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
        img_mscn_norm = transform.normalize_2D_arr(img_mscn)

        img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')

        data = compression.get_SVD_s(img_mscn_gray)
    """

    if data_type == 'low_bits_6':

        low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
        data = compression.get_SVD_s(low_bits_6)

    if data_type == 'low_bits_5':

        low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
        data = compression.get_SVD_s(low_bits_5)

    if data_type == 'low_bits_4':

        low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
        data = compression.get_SVD_s(low_bits_4)

    if data_type == 'low_bits_3':

        low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
        data = compression.get_SVD_s(low_bits_3)

    if data_type == 'low_bits_2':

        low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
        data = compression.get_SVD_s(low_bits_2)

    if data_type == 'low_bits_4_shifted_2':

        data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(
            block, (3, 6)))

    if data_type == 'sub_blocks_stats':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 4), int(height / 4)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            # get information we want from svd
            data.append(np.mean(l_svd_data))
            data.append(np.median(l_svd_data))
            data.append(np.percentile(l_svd_data, 25))
            data.append(np.percentile(l_svd_data, 75))
            data.append(np.var(l_svd_data))

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_stats_reduced':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 4), int(height / 4)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            # get information we want from svd
            data.append(np.mean(l_svd_data))
            data.append(np.median(l_svd_data))
            data.append(np.percentile(l_svd_data, 25))
            data.append(np.percentile(l_svd_data, 75))
            data.append(np.var(l_svd_data))

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_area':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 8), int(height / 8)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_area_normed':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 8), int(height / 8)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
            l_svd_data = utils.normalize_arr(l_svd_data)

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'mscn_var_4':

        data = _get_mscn_variance(block, (100, 100))

    if data_type == 'mscn_var_16':

        data = _get_mscn_variance(block, (50, 50))

    if data_type == 'mscn_var_64':

        data = _get_mscn_variance(block, (25, 25))

    if data_type == 'mscn_var_16_max':

        data = _get_mscn_variance(block, (50, 50))
        data = np.asarray(data)
        size = int(len(data) / 4)
        indices = data.argsort()[-size:][::-1]
        data = data[indices]

    if data_type == 'mscn_var_64_max':

        data = _get_mscn_variance(block, (25, 25))
        data = np.asarray(data)
        size = int(len(data) / 4)
        indices = data.argsort()[-size:][::-1]
        data = data[indices]

    if data_type == 'ica_diff':
        current_image = transform.get_LAB_L(block)

        ica = FastICA(n_components=50)
        ica.fit(current_image)

        image_ica = ica.fit_transform(current_image)
        image_restored = ica.inverse_transform(image_ica)

        final_image = utils.normalize_2D_arr(image_restored)
        final_image = np.array(final_image * 255, 'uint8')

        sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
        ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))

        data = abs(np.array(sv_values) - np.array(ica_sv_values))

    if data_type == 'svd_trunc_diff':

        current_image = transform.get_LAB_L(block)

        svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
        transformed_image = svd.fit_transform(current_image)
        restored_image = svd.inverse_transform(transformed_image)

        reduced_image = (current_image - restored_image)

        U, s, V = compression.get_SVD(reduced_image)
        data = s

    if data_type == 'ipca_diff':

        current_image = transform.get_LAB_L(block)

        transformer = IncrementalPCA(n_components=20, batch_size=25)
        transformed_image = transformer.fit_transform(current_image)
        restored_image = transformer.inverse_transform(transformed_image)

        reduced_image = (current_image - restored_image)

        U, s, V = compression.get_SVD(reduced_image)
        data = s

    if data_type == 'svd_reconstruct':

        reconstructed_interval = (90, 200)
        begin, end = reconstructed_interval

        lab_img = transform.get_LAB_L(block)
        lab_img = np.array(lab_img, 'uint8')

        U, s, V = lin_svd(lab_img, full_matrices=True)

        smat = np.zeros((end - begin, end - begin), dtype=complex)
        smat[:, :] = np.diag(s[begin:end])
        output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))

        output_img = np.array(output_img, 'uint8')

        data = compression.get_SVD_s(output_img)

    if 'sv_std_filters' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        images.append(medfilt2d(arr, [3, 3]))
        images.append(medfilt2d(arr, [5, 5]))
        images.append(wiener(arr, [3, 3]))
        images.append(wiener(arr, [5, 5]))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    # with the use of wavelet
    if 'wave_sv_std_filters' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        images.append(medfilt2d(arr, [3, 3]))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    # with the use of wavelet
    if 'sv_std_filters_full' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        kernel = np.ones((3, 3), np.float32) / 9
        images.append(cv2.filter2D(arr, -1, kernel))

        kernel = np.ones((5, 5), np.float32) / 25
        images.append(cv2.filter2D(arr, -1, kernel))

        images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))

        images.append(medfilt2d(arr, [3, 3]))

        images.append(medfilt2d(arr, [5, 5]))

        images.append(wiener(arr, [3, 3]))

        images.append(wiener(arr, [5, 5]))

        wave = w2d(arr, 'db1', 2)
        images.append(np.array(wave, 'float64'))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    if 'sv_entropy_std_filters' in data_type:

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        images = []

        kernel = np.ones((3, 3), np.float32) / 9
        images.append(cv2.filter2D(arr, -1, kernel))

        kernel = np.ones((5, 5), np.float32) / 25
        images.append(cv2.filter2D(arr, -1, kernel))

        images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))

        images.append(medfilt2d(arr, [3, 3]))

        images.append(medfilt2d(arr, [5, 5]))

        images.append(wiener(arr, [3, 3]))

        images.append(wiener(arr, [5, 5]))

        wave = w2d(arr, 'db1', 2)
        images.append(np.array(wave, 'float64'))

        sv_vector = []
        sv_entropy_list = []

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

            sv_entropy = [
                utils.get_entropy_contribution_of_i(s, id_sv)
                for id_sv, sv in enumerate(s)
            ]
            sv_entropy_list.append(sv_entropy)

        sv_std = []

        sv_array = np.array(sv_vector)
        _, length = sv_array.shape

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        s_arr = compression.get_SVD_s(arr)
        data = s_arr[indices]

    if 'convolutional_kernels' in data_type:

        sub_zones = segmentation.divide_in_blocks(block, (20, 20))

        data = []

        diff_std_list_3 = []
        diff_std_list_5 = []
        diff_mean_list_3 = []
        diff_mean_list_5 = []

        plane_std_list_3 = []
        plane_std_list_5 = []
        plane_mean_list_3 = []
        plane_mean_list_5 = []

        plane_max_std_list_3 = []
        plane_max_std_list_5 = []
        plane_max_mean_list_3 = []
        plane_max_mean_list_5 = []

        for sub_zone in sub_zones:
            l_img = transform.get_LAB_L(sub_zone)
            normed_l_img = utils.normalize_2D_arr(l_img)

            # bilateral with window of size (3, 3)
            normed_diff = convolution.convolution2D(normed_l_img,
                                                    kernels.min_bilateral_diff,
                                                    (3, 3))
            std_diff = np.std(normed_diff)
            mean_diff = np.mean(normed_diff)

            diff_std_list_3.append(std_diff)
            diff_mean_list_3.append(mean_diff)

            # bilateral with window of size (5, 5)
            normed_diff = convolution.convolution2D(normed_l_img,
                                                    kernels.min_bilateral_diff,
                                                    (5, 5))
            std_diff = np.std(normed_diff)
            mean_diff = np.mean(normed_diff)

            diff_std_list_5.append(std_diff)
            diff_mean_list_5.append(mean_diff)

            # plane mean with window of size (3, 3)
            normed_plane_mean = convolution.convolution2D(
                normed_l_img, kernels.plane_mean, (3, 3))
            std_plane_mean = np.std(normed_plane_mean)
            mean_plane_mean = np.mean(normed_plane_mean)

            plane_std_list_3.append(std_plane_mean)
            plane_mean_list_3.append(mean_plane_mean)

            # plane mean with window of size (5, 5)
            normed_plane_mean = convolution.convolution2D(
                normed_l_img, kernels.plane_mean, (5, 5))
            std_plane_mean = np.std(normed_plane_mean)
            mean_plane_mean = np.mean(normed_plane_mean)

            plane_std_list_5.append(std_plane_mean)
            plane_mean_list_5.append(mean_plane_mean)

            # plane max error with window of size (3, 3)
            normed_plane_max = convolution.convolution2D(
                normed_l_img, kernels.plane_max_error, (3, 3))
            std_plane_max = np.std(normed_plane_max)
            mean_plane_max = np.mean(normed_plane_max)

            plane_max_std_list_3.append(std_plane_max)
            plane_max_mean_list_3.append(mean_plane_max)

            # plane max error with window of size (5, 5)
            normed_plane_max = convolution.convolution2D(
                normed_l_img, kernels.plane_max_error, (5, 5))
            std_plane_max = np.std(normed_plane_max)
            mean_plane_max = np.mean(normed_plane_max)

            plane_max_std_list_5.append(std_plane_max)
            plane_max_mean_list_5.append(mean_plane_max)

        diff_std_list_3 = np.array(diff_std_list_3)
        diff_std_list_5 = np.array(diff_std_list_5)

        diff_mean_list_3 = np.array(diff_mean_list_3)
        diff_mean_list_5 = np.array(diff_mean_list_5)

        plane_std_list_3 = np.array(plane_std_list_3)
        plane_std_list_5 = np.array(plane_std_list_5)

        plane_mean_list_3 = np.array(plane_mean_list_3)
        plane_mean_list_5 = np.array(plane_mean_list_5)

        plane_max_std_list_3 = np.array(plane_max_std_list_3)
        plane_max_std_list_5 = np.array(plane_max_std_list_5)

        plane_max_mean_list_3 = np.array(plane_max_mean_list_3)
        plane_max_mean_list_5 = np.array(plane_max_mean_list_5)

        if 'std_max_blocks' in data_type:

            data.append(np.std(diff_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.std(plane_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.std(plane_max_std_list_3[0:int(len(sub_zones) /
                                                          5)]))
            data.append(
                np.std(plane_max_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_max_std_list_5[0:int(len(sub_zones) /
                                                          5)]))
            data.append(
                np.std(plane_max_mean_list_5[0:int(len(sub_zones) / 5)]))

        if 'mean_max_blocks' in data_type:

            data.append(np.mean(diff_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.mean(plane_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(
                np.mean(plane_max_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_mean_list_5[0:int(len(sub_zones) / 5)]))

        if 'std_normed' in data_type:

            data.append(np.std(diff_std_list_3))
            data.append(np.std(diff_mean_list_3))
            data.append(np.std(diff_std_list_5))
            data.append(np.std(diff_mean_list_5))

            data.append(np.std(plane_std_list_3))
            data.append(np.std(plane_mean_list_3))
            data.append(np.std(plane_std_list_5))
            data.append(np.std(plane_mean_list_5))

            data.append(np.std(plane_max_std_list_3))
            data.append(np.std(plane_max_mean_list_3))
            data.append(np.std(plane_max_std_list_5))
            data.append(np.std(plane_max_mean_list_5))

        if 'mean_normed' in data_type:

            data.append(np.mean(diff_std_list_3))
            data.append(np.mean(diff_mean_list_3))
            data.append(np.mean(diff_std_list_5))
            data.append(np.mean(diff_mean_list_5))

            data.append(np.mean(plane_std_list_3))
            data.append(np.mean(plane_mean_list_3))
            data.append(np.mean(plane_std_list_5))
            data.append(np.mean(plane_mean_list_5))

            data.append(np.mean(plane_max_std_list_3))
            data.append(np.mean(plane_max_mean_list_3))
            data.append(np.mean(plane_max_std_list_5))
            data.append(np.mean(plane_max_mean_list_5))

        data = np.array(data)

    if data_type == 'convolutional_kernel_stats_svd':

        l_img = transform.get_LAB_L(block)
        normed_l_img = utils.normalize_2D_arr(l_img)

        # bilateral with window of size (5, 5)
        normed_diff = convolution.convolution2D(normed_l_img,
                                                kernels.min_bilateral_diff,
                                                (5, 5))

        # getting sigma vector from SVD compression
        s = compression.get_SVD_s(normed_diff)

        data = s

    if data_type == 'svd_entropy':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            values.append(utils.get_entropy(sv))
        data = np.array(values)

    if data_type == 'svd_entropy_20':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            values.append(utils.get_entropy(sv))
        data = np.array(values)

    if data_type == 'svd_entropy_noise_20':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            sv_size = len(sv)
            values.append(utils.get_entropy(sv[int(sv_size / 4):]))
        data = np.array(values)

    return data
def get_image_features(data_type, block):
    """
    Method which returns the data type expected
    """

    if 'filters_statistics' in data_type:

        img_width, img_height = 200, 200

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        # compute all filters statistics
        def get_stats(arr, I_filter):

            e1 = np.abs(arr - I_filter)
            L = np.array(e1)
            mu0 = np.mean(L)
            A = L - mu0
            H = A * A
            E = np.sum(H) / (img_width * img_height)
            P = np.sqrt(E)

            return mu0, P
            # return np.mean(I_filter), np.std(I_filter)

        stats = []

        kernel = np.ones((3, 3), np.float32) / 9
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        kernel = np.ones((5, 5), np.float32) / 25
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1.5)))

        stats.append(get_stats(arr, medfilt2d(arr, [3, 3])))

        stats.append(get_stats(arr, medfilt2d(arr, [5, 5])))

        stats.append(get_stats(arr, wiener(arr, [3, 3])))

        stats.append(get_stats(arr, wiener(arr, [5, 5])))

        wave = w2d(arr, 'db1', 2)
        stats.append(get_stats(arr, np.array(wave, 'float64')))

        data = []

        for stat in stats:
            data.append(stat[0])

        for stat in stats:
            data.append(stat[1])

        data = np.array(data)

    if 'statistics_extended' in data_type:

        data = get_image_features('filters_statistics', block)

        # add kolmogorov complexity
        bytes_data = np.array(block).tobytes()
        compress_data = gzip.compress(bytes_data)

        mo_size = sys.getsizeof(compress_data) / 1024.
        go_size = mo_size / 1024.
        data = np.append(data, go_size)

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        # add of svd entropy
        svd_entropy = utils.get_entropy(compression.get_SVD_s(arr))
        data = np.append(data, svd_entropy)

        # add sobel complexity (kernel size of 3)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=3)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

        # add sobel complexity (kernel size of 5)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=5)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

    if 'lab' in data_type:

        data = transform.get_LAB_L_SVD_s(block)

    return data
Beispiel #6
0
def estimate(estimator, arr):

    if estimator == 'variance':
        return np.var(arr)

    if estimator == 'l_variance':
        return np.var(transform.get_LAB_L(arr))

    if estimator == 'mean':
        return np.mean(arr)

    if estimator == 'l_mean':
        return np.mean(transform.get_LAB_L(arr))

    if estimator == 'sv_struct_all':
        return transform.get_LAB_L_SVD_s(arr)[0:50]

    if estimator == 'sv_struct':
        return np.mean(transform.get_LAB_L_SVD_s(arr)[0:50])

    if estimator == 'sv_noise_all':
        return transform.get_LAB_L_SVD_s(arr)[50:]

    if estimator == 'sv_noise':
        return np.mean(transform.get_LAB_L_SVD_s(arr)[50:])

    if estimator == 'sobel':

        lab_img = transform.get_LAB_L(arr)

        # 1. extract sobol complexity with kernel 3
        sobelx = cv2.Sobel(lab_img, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(lab_img, cv2.CV_64F, 0, 1, ksize=5)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        return np.std(sobel_mag)

    if estimator == 'l_kolmogorov':

        lab_img = transform.get_LAB_L(arr)

        bytes_data = lab_img.tobytes()
        compress_data = gzip.compress(bytes_data)

        mo_size = sys.getsizeof(compress_data) / 1024.
        go_size = mo_size / 1024.

        return np.float64(go_size)

    if estimator == 'l_sv_entropy_blocks':

        # get L channel
        L_channel = transform.get_LAB_L(arr)

        # split in n block
        blocks = segmentation.divide_in_blocks(L_channel, (20, 20))

        entropy_list = []

        for block in blocks:
            reduced_sigma = compression.get_SVD_s(block)
            reduced_entropy = get_entropy(reduced_sigma)
            entropy_list.append(reduced_entropy)

        return entropy_list

    if estimator == '26_attributes':

        img_width, img_height = 200, 200

        lab_img = transform.get_LAB_L(arr)
        arr = np.array(lab_img)

        # compute all filters statistics
        def get_stats(arr, I_filter):

            e1 = np.abs(arr - I_filter)
            L = np.array(e1)
            mu0 = np.mean(L)
            A = L - mu0
            H = A * A
            E = np.sum(H) / (img_width * img_height)
            P = np.sqrt(E)

            return mu0, P
            # return np.mean(I_filter), np.std(I_filter)

        stats = []

        kernel = np.ones((3, 3), np.float32) / 9
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        kernel = np.ones((5, 5), np.float32) / 25
        stats.append(get_stats(arr, cv2.filter2D(arr, -1, kernel)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (3, 3), 1.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 0.5)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1)))

        stats.append(get_stats(arr, cv2.GaussianBlur(arr, (5, 5), 1.5)))

        stats.append(get_stats(arr, medfilt2d(arr, [3, 3])))

        stats.append(get_stats(arr, medfilt2d(arr, [5, 5])))

        stats.append(get_stats(arr, wiener(arr, [3, 3])))

        stats.append(get_stats(arr, wiener(arr, [5, 5])))

        wave = w2d(arr, 'db1', 2)
        stats.append(get_stats(arr, np.array(wave, 'float64')))

        data = []

        for stat in stats:
            data.append(stat[0])

        for stat in stats:
            data.append(stat[1])

        data = np.array(data)

        return data

    if estimator == 'statistics_extended':

        # data = estimate('26_attributes', arr)
        data = np.empty(0)
        # add kolmogorov complexity
        bytes_data = np.array(arr).tobytes()
        compress_data = gzip.compress(bytes_data)

        mo_size = sys.getsizeof(compress_data) / 1024.
        go_size = mo_size / 1024.
        data = np.append(data, go_size)

        lab_img = transform.get_LAB_L(arr)
        arr = np.array(lab_img)

        # add of svd entropy
        svd_entropy = utils.get_entropy(compression.get_SVD_s(arr))
        data = np.append(data, svd_entropy)

        # add sobel complexity (kernel size of 3)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=3)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=3)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

        # add sobel complexity (kernel size of 5)
        sobelx = cv2.Sobel(arr, cv2.CV_64F, 1, 0, ksize=5)
        sobely = cv2.Sobel(arr, cv2.CV_64F, 0, 1, ksize=5)

        sobel_mag = np.array(np.hypot(sobelx, sobely), 'uint8')  # magnitude

        data = np.append(data, np.std(sobel_mag))

        return data