def get_sobel_entropy_complexity(entropy_list, sobel_list, std=False):
    
    dh_list = []
    previous_entropy_value = 0
    previous_sobel_value = 0

    entropy_list = list(map(float, entropy_list))
    sobel_list = list(map(float, sobel_list))

    if std:
        entropy_list_norm = utils.normalize_arr(entropy_list)
        sobel_list_norm = utils.normalize_arr(sobel_list)
    
    for i in range(len(entropy_list)):
        
        if i > 0:
            
            entropy_diff = abs(previous_entropy_value - entropy_list[i])
            sobel_diff = abs(previous_sobel_value - sobel_list[i])
            
            entropy_norm = utils.normalize_arr(entropy_list[:i+1])
            sobel_norm = utils.normalize_arr(sobel_list[:i+1])
            dh = entropy_diff * sobel_list[i]
                
            dh_list.append(dh)
        
        previous_entropy_value = entropy_list[i]
        previous_sobel_value = sobel_list[i]
        
    return dh_list
Beispiel #2
0
def get_sobel_entropy_complexity(entropy_list, sobel_list, std=False):

    dh_list = []
    previous_entropy_value = 0
    previous_sobel_value = 0

    entropy_list = list(map(float, entropy_list))
    sobel_list = list(map(float, sobel_list))

    if std:
        entropy_list_norm = utils.normalize_arr(entropy_list)
        sobel_list_norm = utils.normalize_arr(sobel_list)

    for i in range(len(entropy_list)):

        if i > 0:

            entropy_diff = 1 - abs(previous_entropy_value - entropy_list[i])
            sobel_diff = 1 - abs(previous_sobel_value - sobel_list[i])

            if std:
                # ponderation using `std` from each list
                dh = (entropy_diff * np.std(entropy_list_norm[:(i + 1)])) * (
                    sobel_diff * np.std(sobel_list_norm[:(i + 1)]))
            else:
                dh = entropy_diff * (sobel_diff + sys.float_info.epsilon)

            dh_list.append(dh)

        previous_entropy_value = entropy_list[i]
        previous_sobel_value = sobel_list[i]

    return dh_list
def construct_new_line(threshold, interval, line, choice, each, norm):
    begin, end = interval

    line_data = line.split(';')
    seuil = line_data[0]
    features = line_data[begin + 1:end + 1]

    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    if norm:
        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features,
                                                      min_value_interval,
                                                      max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    if threshold > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
Beispiel #4
0
def construct_new_line(interval, line_data, choice, each, norm):
    begin, end = interval

    label = line_data[2]
    features = line_data[begin+3:end+3]
    
    # keep only if modulo result is 0 (keep only each wanted values)
    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    # TODO : check if it's always necessary to do that (loss of information for svd)
    if norm:

        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    line = label

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
def get_zone_diff_entropy(entropy_list, std=False):

    diff_list = []
    previous_value = 0

    entropy_list = list(map(float, entropy_list))

    if std:
        entropy_list_norm = utils.normalize_arr(entropy_list)

    for index, value in enumerate(entropy_list):

        if index > 0:

            if std:
                # ponderation using `std` from list normalized
                diff = previous_value - np.std(entropy_list[:(index + 1)])
            else:
                diff = previous_value - value

            diff_list.append(diff)

        if std:
            previous_value = np.std(entropy_list_norm[:(index + 1)])
        else:
            previous_value = value

    return diff_list
Beispiel #6
0
def construct_new_line(path_seuil, interval, line, choice, each, norm):
    begin, end = interval

    line_data = line.split(';')
    seuil = line_data[0]
    features = line_data[begin + 1:end + 1]

    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    if norm:
        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features,
                                                      min_value_interval,
                                                      max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    with open(path_seuil, "r") as seuil_file:
        seuil_learned = int(seuil_file.readline().strip())

    if seuil_learned > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
Beispiel #7
0
def construct_new_line(seuil_learned, interval, line, choice, each, norm):
    begin, end = interval

    line_data = line.split(';')
    seuil = line_data[0]
    features = line_data[begin + 1:end + 1]

    # keep only if modulo result is 0 (keep only each wanted values)
    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    # TODO : check if it's always necessary to do that (loss of information for svd)
    if norm:

        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features,
                                                      min_value_interval,
                                                      max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    if seuil_learned > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
def get_zone_minus_entropy(entropy_list, std=False):

    dh_list = []
    previous_dh = 0

    entropy_list = list(map(float, entropy_list))

    if std:
        entropy_list_norm = utils.normalize_arr(entropy_list)

    for index, value in enumerate(entropy_list):

        dh = 0

        if index == 0:
            dh = 1 - float(value)
        else:
            dh = previous_dh - float(value)

            if std:
                # ponderation using `std` from list normalized
                dh = dh * np.std(entropy_list_norm[:(index + 1)])

            dh_list.append(dh)

        previous_dh = dh

    return dh_list
def construct_new_line(path_seuil, indices, line, choice, norm):

    # increase indices values by one to avoid label
    f = lambda x: x + 1
    indices = f(indices)

    line_data = np.array(line.split(';'))
    seuil = line_data[0]
    features = line_data[indices]
    features = features.astype('float32')

    # TODO : check if it's always necessary to do that (loss of information for svd)
    if norm:
        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features,
                                                      min_value_interval,
                                                      max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    with open(path_seuil, "r") as seuil_file:
        seuil_learned = int(seuil_file.readline().strip())

    if seuil_learned > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
def get_zone_gradient_sobel_svd_entropy(entropy_list, std=False):

    dh_list = []
    previous_value = 0

    entropy_list = list(map(float, entropy_list))

    if std:
        entropy_list_norm = utils.normalize_arr(entropy_list)

    for index, value in enumerate(entropy_list):

        if index > 0:
            dh = 1 - (previous_value - value)

            if std:
                # ponderation using `std` from list normalized
                dh = dh * np.std(entropy_list_norm[:(index + 1)])

            dh_list.append(dh)

        previous_value = value

    return dh_list
def get_image_features(data_type, block):
    """
    Method which returns the data type expected
    """

    if data_type == 'lab':

        block_file_path = '/tmp/lab_img.png'
        block.save(block_file_path)
        data = transform.get_LAB_L_SVD_s(Image.open(block_file_path))

    if data_type == 'mscn':

        img_mscn_revisited = transform.rgb_to_mscn(block)

        # save tmp as img
        img_output = Image.fromarray(img_mscn_revisited.astype('uint8'), 'L')
        mscn_revisited_file_path = '/tmp/mscn_revisited_img.png'
        img_output.save(mscn_revisited_file_path)
        img_block = Image.open(mscn_revisited_file_path)

        # extract from temp image
        data = compression.get_SVD_s(img_block)
    """if data_type == 'mscn':

        img_gray = np.array(color.rgb2gray(np.asarray(block))*255, 'uint8')
        img_mscn = transform.calculate_mscn_coefficients(img_gray, 7)
        img_mscn_norm = transform.normalize_2D_arr(img_mscn)

        img_mscn_gray = np.array(img_mscn_norm*255, 'uint8')

        data = compression.get_SVD_s(img_mscn_gray)
    """

    if data_type == 'low_bits_6':

        low_bits_6 = transform.rgb_to_LAB_L_low_bits(block, 6)
        data = compression.get_SVD_s(low_bits_6)

    if data_type == 'low_bits_5':

        low_bits_5 = transform.rgb_to_LAB_L_low_bits(block, 5)
        data = compression.get_SVD_s(low_bits_5)

    if data_type == 'low_bits_4':

        low_bits_4 = transform.rgb_to_LAB_L_low_bits(block, 4)
        data = compression.get_SVD_s(low_bits_4)

    if data_type == 'low_bits_3':

        low_bits_3 = transform.rgb_to_LAB_L_low_bits(block, 3)
        data = compression.get_SVD_s(low_bits_3)

    if data_type == 'low_bits_2':

        low_bits_2 = transform.rgb_to_LAB_L_low_bits(block, 2)
        data = compression.get_SVD_s(low_bits_2)

    if data_type == 'low_bits_4_shifted_2':

        data = compression.get_SVD_s(transform.rgb_to_LAB_L_bits(
            block, (3, 6)))

    if data_type == 'sub_blocks_stats':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 4), int(height / 4)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            # get information we want from svd
            data.append(np.mean(l_svd_data))
            data.append(np.median(l_svd_data))
            data.append(np.percentile(l_svd_data, 25))
            data.append(np.percentile(l_svd_data, 75))
            data.append(np.var(l_svd_data))

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=100)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_stats_reduced':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 4), int(height / 4)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            # get information we want from svd
            data.append(np.mean(l_svd_data))
            data.append(np.median(l_svd_data))
            data.append(np.percentile(l_svd_data, 25))
            data.append(np.percentile(l_svd_data, 75))
            data.append(np.var(l_svd_data))

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_area':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 8), int(height / 8)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'sub_blocks_area_normed':

        block = np.asarray(block)
        width, height, _ = block.shape
        sub_width, sub_height = int(width / 8), int(height / 8)

        sub_blocks = segmentation.divide_in_blocks(block,
                                                   (sub_width, sub_height))

        data = []

        for sub_b in sub_blocks:

            # by default use the whole lab L canal
            l_svd_data = np.array(transform.get_LAB_L_SVD_s(sub_b))
            l_svd_data = utils.normalize_arr(l_svd_data)

            area_under_curve = utils.integral_area_trapz(l_svd_data, dx=50)
            data.append(area_under_curve)

        # convert into numpy array after computing all stats
        data = np.asarray(data)

    if data_type == 'mscn_var_4':

        data = _get_mscn_variance(block, (100, 100))

    if data_type == 'mscn_var_16':

        data = _get_mscn_variance(block, (50, 50))

    if data_type == 'mscn_var_64':

        data = _get_mscn_variance(block, (25, 25))

    if data_type == 'mscn_var_16_max':

        data = _get_mscn_variance(block, (50, 50))
        data = np.asarray(data)
        size = int(len(data) / 4)
        indices = data.argsort()[-size:][::-1]
        data = data[indices]

    if data_type == 'mscn_var_64_max':

        data = _get_mscn_variance(block, (25, 25))
        data = np.asarray(data)
        size = int(len(data) / 4)
        indices = data.argsort()[-size:][::-1]
        data = data[indices]

    if data_type == 'ica_diff':
        current_image = transform.get_LAB_L(block)

        ica = FastICA(n_components=50)
        ica.fit(current_image)

        image_ica = ica.fit_transform(current_image)
        image_restored = ica.inverse_transform(image_ica)

        final_image = utils.normalize_2D_arr(image_restored)
        final_image = np.array(final_image * 255, 'uint8')

        sv_values = utils.normalize_arr(compression.get_SVD_s(current_image))
        ica_sv_values = utils.normalize_arr(compression.get_SVD_s(final_image))

        data = abs(np.array(sv_values) - np.array(ica_sv_values))

    if data_type == 'svd_trunc_diff':

        current_image = transform.get_LAB_L(block)

        svd = TruncatedSVD(n_components=30, n_iter=100, random_state=42)
        transformed_image = svd.fit_transform(current_image)
        restored_image = svd.inverse_transform(transformed_image)

        reduced_image = (current_image - restored_image)

        U, s, V = compression.get_SVD(reduced_image)
        data = s

    if data_type == 'ipca_diff':

        current_image = transform.get_LAB_L(block)

        transformer = IncrementalPCA(n_components=20, batch_size=25)
        transformed_image = transformer.fit_transform(current_image)
        restored_image = transformer.inverse_transform(transformed_image)

        reduced_image = (current_image - restored_image)

        U, s, V = compression.get_SVD(reduced_image)
        data = s

    if data_type == 'svd_reconstruct':

        reconstructed_interval = (90, 200)
        begin, end = reconstructed_interval

        lab_img = transform.get_LAB_L(block)
        lab_img = np.array(lab_img, 'uint8')

        U, s, V = lin_svd(lab_img, full_matrices=True)

        smat = np.zeros((end - begin, end - begin), dtype=complex)
        smat[:, :] = np.diag(s[begin:end])
        output_img = np.dot(U[:, begin:end], np.dot(smat, V[begin:end, :]))

        output_img = np.array(output_img, 'uint8')

        data = compression.get_SVD_s(output_img)

    if 'sv_std_filters' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        images.append(medfilt2d(arr, [3, 3]))
        images.append(medfilt2d(arr, [5, 5]))
        images.append(wiener(arr, [3, 3]))
        images.append(wiener(arr, [5, 5]))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    # with the use of wavelet
    if 'wave_sv_std_filters' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        images.append(medfilt2d(arr, [3, 3]))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    # with the use of wavelet
    if 'sv_std_filters_full' in data_type:

        # convert into lab by default to apply filters
        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)
        images = []

        # Apply list of filter on arr
        kernel = np.ones((3, 3), np.float32) / 9
        images.append(cv2.filter2D(arr, -1, kernel))

        kernel = np.ones((5, 5), np.float32) / 25
        images.append(cv2.filter2D(arr, -1, kernel))

        images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))

        images.append(medfilt2d(arr, [3, 3]))

        images.append(medfilt2d(arr, [5, 5]))

        images.append(wiener(arr, [3, 3]))

        images.append(wiener(arr, [5, 5]))

        wave = w2d(arr, 'db1', 2)
        images.append(np.array(wave, 'float64'))

        # By default computation of current block image
        s_arr = compression.get_SVD_s(arr)
        sv_vector = [s_arr]

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

        sv_array = np.array(sv_vector)

        _, length = sv_array.shape

        sv_std = []

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        data = s_arr[indices]

    if 'sv_entropy_std_filters' in data_type:

        lab_img = transform.get_LAB_L(block)
        arr = np.array(lab_img)

        images = []

        kernel = np.ones((3, 3), np.float32) / 9
        images.append(cv2.filter2D(arr, -1, kernel))

        kernel = np.ones((5, 5), np.float32) / 25
        images.append(cv2.filter2D(arr, -1, kernel))

        images.append(cv2.GaussianBlur(arr, (3, 3), 0.5))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1))

        images.append(cv2.GaussianBlur(arr, (3, 3), 1.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 0.5))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1))

        images.append(cv2.GaussianBlur(arr, (5, 5), 1.5))

        images.append(medfilt2d(arr, [3, 3]))

        images.append(medfilt2d(arr, [5, 5]))

        images.append(wiener(arr, [3, 3]))

        images.append(wiener(arr, [5, 5]))

        wave = w2d(arr, 'db1', 2)
        images.append(np.array(wave, 'float64'))

        sv_vector = []
        sv_entropy_list = []

        # for each new image apply SVD and get SV
        for img in images:
            s = compression.get_SVD_s(img)
            sv_vector.append(s)

            sv_entropy = [
                utils.get_entropy_contribution_of_i(s, id_sv)
                for id_sv, sv in enumerate(s)
            ]
            sv_entropy_list.append(sv_entropy)

        sv_std = []

        sv_array = np.array(sv_vector)
        _, length = sv_array.shape

        # normalize each SV vectors and compute standard deviation for each sub vectors
        for i in range(length):
            sv_array[:, i] = utils.normalize_arr(sv_array[:, i])
            sv_std.append(np.std(sv_array[:, i]))

        indices = []

        if 'lowest' in data_type:
            indices = utils.get_indices_of_lowest_values(sv_std, 200)

        if 'highest' in data_type:
            indices = utils.get_indices_of_highest_values(sv_std, 200)

        # data are arranged following std trend computed
        s_arr = compression.get_SVD_s(arr)
        data = s_arr[indices]

    if 'convolutional_kernels' in data_type:

        sub_zones = segmentation.divide_in_blocks(block, (20, 20))

        data = []

        diff_std_list_3 = []
        diff_std_list_5 = []
        diff_mean_list_3 = []
        diff_mean_list_5 = []

        plane_std_list_3 = []
        plane_std_list_5 = []
        plane_mean_list_3 = []
        plane_mean_list_5 = []

        plane_max_std_list_3 = []
        plane_max_std_list_5 = []
        plane_max_mean_list_3 = []
        plane_max_mean_list_5 = []

        for sub_zone in sub_zones:
            l_img = transform.get_LAB_L(sub_zone)
            normed_l_img = utils.normalize_2D_arr(l_img)

            # bilateral with window of size (3, 3)
            normed_diff = convolution.convolution2D(normed_l_img,
                                                    kernels.min_bilateral_diff,
                                                    (3, 3))
            std_diff = np.std(normed_diff)
            mean_diff = np.mean(normed_diff)

            diff_std_list_3.append(std_diff)
            diff_mean_list_3.append(mean_diff)

            # bilateral with window of size (5, 5)
            normed_diff = convolution.convolution2D(normed_l_img,
                                                    kernels.min_bilateral_diff,
                                                    (5, 5))
            std_diff = np.std(normed_diff)
            mean_diff = np.mean(normed_diff)

            diff_std_list_5.append(std_diff)
            diff_mean_list_5.append(mean_diff)

            # plane mean with window of size (3, 3)
            normed_plane_mean = convolution.convolution2D(
                normed_l_img, kernels.plane_mean, (3, 3))
            std_plane_mean = np.std(normed_plane_mean)
            mean_plane_mean = np.mean(normed_plane_mean)

            plane_std_list_3.append(std_plane_mean)
            plane_mean_list_3.append(mean_plane_mean)

            # plane mean with window of size (5, 5)
            normed_plane_mean = convolution.convolution2D(
                normed_l_img, kernels.plane_mean, (5, 5))
            std_plane_mean = np.std(normed_plane_mean)
            mean_plane_mean = np.mean(normed_plane_mean)

            plane_std_list_5.append(std_plane_mean)
            plane_mean_list_5.append(mean_plane_mean)

            # plane max error with window of size (3, 3)
            normed_plane_max = convolution.convolution2D(
                normed_l_img, kernels.plane_max_error, (3, 3))
            std_plane_max = np.std(normed_plane_max)
            mean_plane_max = np.mean(normed_plane_max)

            plane_max_std_list_3.append(std_plane_max)
            plane_max_mean_list_3.append(mean_plane_max)

            # plane max error with window of size (5, 5)
            normed_plane_max = convolution.convolution2D(
                normed_l_img, kernels.plane_max_error, (5, 5))
            std_plane_max = np.std(normed_plane_max)
            mean_plane_max = np.mean(normed_plane_max)

            plane_max_std_list_5.append(std_plane_max)
            plane_max_mean_list_5.append(mean_plane_max)

        diff_std_list_3 = np.array(diff_std_list_3)
        diff_std_list_5 = np.array(diff_std_list_5)

        diff_mean_list_3 = np.array(diff_mean_list_3)
        diff_mean_list_5 = np.array(diff_mean_list_5)

        plane_std_list_3 = np.array(plane_std_list_3)
        plane_std_list_5 = np.array(plane_std_list_5)

        plane_mean_list_3 = np.array(plane_mean_list_3)
        plane_mean_list_5 = np.array(plane_mean_list_5)

        plane_max_std_list_3 = np.array(plane_max_std_list_3)
        plane_max_std_list_5 = np.array(plane_max_std_list_5)

        plane_max_mean_list_3 = np.array(plane_max_mean_list_3)
        plane_max_mean_list_5 = np.array(plane_max_mean_list_5)

        if 'std_max_blocks' in data_type:

            data.append(np.std(diff_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.std(diff_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.std(plane_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.std(plane_max_std_list_3[0:int(len(sub_zones) /
                                                          5)]))
            data.append(
                np.std(plane_max_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.std(plane_max_std_list_5[0:int(len(sub_zones) /
                                                          5)]))
            data.append(
                np.std(plane_max_mean_list_5[0:int(len(sub_zones) / 5)]))

        if 'mean_max_blocks' in data_type:

            data.append(np.mean(diff_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(diff_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(np.mean(plane_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(np.mean(plane_mean_list_5[0:int(len(sub_zones) / 5)]))

            data.append(
                np.mean(plane_max_std_list_3[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_mean_list_3[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_std_list_5[0:int(len(sub_zones) / 5)]))
            data.append(
                np.mean(plane_max_mean_list_5[0:int(len(sub_zones) / 5)]))

        if 'std_normed' in data_type:

            data.append(np.std(diff_std_list_3))
            data.append(np.std(diff_mean_list_3))
            data.append(np.std(diff_std_list_5))
            data.append(np.std(diff_mean_list_5))

            data.append(np.std(plane_std_list_3))
            data.append(np.std(plane_mean_list_3))
            data.append(np.std(plane_std_list_5))
            data.append(np.std(plane_mean_list_5))

            data.append(np.std(plane_max_std_list_3))
            data.append(np.std(plane_max_mean_list_3))
            data.append(np.std(plane_max_std_list_5))
            data.append(np.std(plane_max_mean_list_5))

        if 'mean_normed' in data_type:

            data.append(np.mean(diff_std_list_3))
            data.append(np.mean(diff_mean_list_3))
            data.append(np.mean(diff_std_list_5))
            data.append(np.mean(diff_mean_list_5))

            data.append(np.mean(plane_std_list_3))
            data.append(np.mean(plane_mean_list_3))
            data.append(np.mean(plane_std_list_5))
            data.append(np.mean(plane_mean_list_5))

            data.append(np.mean(plane_max_std_list_3))
            data.append(np.mean(plane_max_mean_list_3))
            data.append(np.mean(plane_max_std_list_5))
            data.append(np.mean(plane_max_mean_list_5))

        data = np.array(data)

    if data_type == 'convolutional_kernel_stats_svd':

        l_img = transform.get_LAB_L(block)
        normed_l_img = utils.normalize_2D_arr(l_img)

        # bilateral with window of size (5, 5)
        normed_diff = convolution.convolution2D(normed_l_img,
                                                kernels.min_bilateral_diff,
                                                (5, 5))

        # getting sigma vector from SVD compression
        s = compression.get_SVD_s(normed_diff)

        data = s

    if data_type == 'svd_entropy':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            values.append(utils.get_entropy(sv))
        data = np.array(values)

    if data_type == 'svd_entropy_20':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            values.append(utils.get_entropy(sv))
        data = np.array(values)

    if data_type == 'svd_entropy_noise_20':
        l_img = transform.get_LAB_L(block)

        blocks = segmentation.divide_in_blocks(l_img, (20, 20))

        values = []
        for b in blocks:
            sv = compression.get_SVD_s(b)
            sv_size = len(sv)
            values.append(utils.get_entropy(sv[int(sv_size / 4):]))
        data = np.array(values)

    return data
Beispiel #12
0
def display_data_scenes(data_type, p_scene, p_kind):
    """
    @brief Method which displays data from scene
    @param data_type,  feature choice
    @param scene, scene choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            print(folder_scene)
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            threshold_info = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            start_image_path = scene_images[0]
            end_image_path = scene_images[-1]

            start_quality_image = dt.get_scene_image_quality(scene_images[0])
            end_quality_image = dt.get_scene_image_quality(scene_images[-1])

            for id_zone, zone_folder in enumerate(zones_folder):

                zone_path = os.path.join(scene_path, zone_folder)

                # get threshold information
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())

                threshold_image_found = False

                for img_path in scene_images:
                    current_quality_image = dt.get_scene_image_quality(
                        img_path)

                    if threshold_learned < int(current_quality_image
                                               ) and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_path = img_path

                        threshold_image = dt.get_scene_image_postfix(img_path)
                        threshold_info.append(threshold_image)

                # all indexes of picture to plot
                images_path = [
                    start_image_path, threshold_image_path, end_image_path
                ]
                images_data = []

                for img_path in images_path:

                    current_img = Image.open(img_path)
                    img_blocks = segmentation.divide_in_blocks(
                        current_img, (200, 200))

                    # getting expected block id
                    block = img_blocks[id_zone]

                    data = get_image_features(data_type, block)

                    ##################
                    # Data mode part #
                    ##################

                    # modify data depending mode

                    if p_kind == 'svdn':
                        data = utils.normalize_arr(data)

                    if p_kind == 'svdne':
                        path_min_max = os.path.join(
                            path, data_type + min_max_filename)

                        with open(path_min_max, 'r') as f:
                            min_val = float(f.readline())
                            max_val = float(f.readline())

                        data = utils.normalize_arr_with_range(
                            data, min_val, max_val)

                    # append of data
                    images_data.append(data)

                zones_images_data.append(images_data)

            fig = plt.figure(figsize=(8, 8))
            fig.suptitle(data_type + " values for " + p_scene +
                         " scene (normalization : " + p_kind + ")",
                         fontsize=20)

            for id, data in enumerate(zones_images_data):
                fig.add_subplot(4, 4, (id + 1))
                plt.plot(data[0], label='Noisy_' + start_quality_image)
                plt.plot(data[1], label='Threshold_' + threshold_info[id])
                plt.plot(data[2], label='Reference_' + end_quality_image)
                plt.ylabel(data_type + ' SVD, ZONE_' + str(id + 1),
                           fontsize=18)
                plt.xlabel('Vector features', fontsize=18)
                plt.legend(bbox_to_anchor=(0.5, 1),
                           loc=2,
                           borderaxespad=0.2,
                           fontsize=18)
                plt.ylim(0, 0.1)
            plt.show()
def display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode,
                       p_step, p_norm, p_error, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_error, error feature used to display
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    max_value_svd = 0
    min_value_svd = sys.maxsize

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            images_data = []
            images_path = []

            threshold_learned_zones = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])
            number_scene_image = len(scene_images)

            for id, zone_folder in enumerate(zones_folder):

                # get threshold information

                zone_path = os.path.join(scene_path, zone_folder)
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())
                    threshold_learned_zones.append(threshold_learned)

            threshold_mean = np.mean(np.asarray(threshold_learned_zones))
            threshold_image_found = False

            svd_data = []

            # for each images
            for id_img, img_path in enumerate(scene_images):

                current_quality_image = dt.get_scene_image_quality(img_path)

                img = Image.open(img_path)

                svd_values = get_image_features(p_feature, img)

                if p_norm:
                    svd_values = svd_values[begin_data:end_data]

                # update min max values
                min_value = svd_values.min()
                max_value = svd_values.max()

                if min_value < min_value_svd:
                    min_value_svd = min_value

                if max_value > min_value_svd:
                    max_value_svd = max_value

                # keep in memory used data
                if current_quality_image % p_step == 0:
                    if current_quality_image >= begin_index and current_quality_image <= end_index:
                        images_path.append(img_path)
                        svd_data.append(svd_values)

                    if threshold_mean < current_quality_image and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_zone = dt.get_scene_image_postfix(
                            img_path)

                print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
                sys.stdout.write("\033[F")

            previous_data = []
            error_data = [0.]

            for id, data in enumerate(svd_data):

                current_data = data

                if not p_norm:
                    current_data = current_data[begin_data:end_data]

                if p_mode == 'svdn':
                    current_data = utils.normalize_arr(current_data)

                if p_mode == 'svdne':
                    current_data = utils.normalize_arr_with_range(
                        current_data, min_value_svd, max_value_svd)

                images_data.append(current_data)

                # use of whole image data for computation of ssim or psnr
                if p_error == 'ssim' or p_error == 'psnr':
                    current_data = np.asarray(Image.open(images_path[id]))

                if len(previous_data) > 0:

                    current_error = get_error_distance(p_error, previous_data,
                                                       current_data)
                    error_data.append(current_error)

                if len(previous_data) == 0:
                    previous_data = current_data

            # display all data using matplotlib (configure plt)
            gridsize = (3, 2)

            # fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(30, 22))
            fig = plt.figure(figsize=(30, 22))
            ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
            ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)

            ax1.set_title(p_scene + ' scene interval information SVD[' +
                          str(begin_data) + ', ' + str(end_data) +
                          '], from scenes indices [' + str(begin_index) +
                          ', ' + str(end_index) + '], ' + p_feature +
                          ' feature, ' + p_mode + ', with step of ' +
                          str(p_step) + ', svd norm ' + str(p_norm),
                          fontsize=20)
            ax1.set_ylabel('Image samples or time (minutes) generation',
                           fontsize=14)
            ax1.set_xlabel('Vector features', fontsize=16)

            for id, data in enumerate(images_data):

                current_quality_image = dt.get_scene_image_quality(
                    images_path[id])
                current_quality_postfix = dt.get_scene_image_postfix(
                    images_path[id])

                if display_error:
                    p_label = p_scene + '_' + current_quality_postfix + " | " + p_error + ": " + str(
                        error_data[id])
                else:
                    p_label = p_scene + '_' + current_quality_postfix

                if current_quality_image == threshold_image_zone:
                    ax1.plot(data,
                             label=p_label + " (threshold mean)",
                             lw=4,
                             color='red')
                else:
                    ax1.plot(data, label=p_label)

            ax1.legend(bbox_to_anchor=(0.7, 1),
                       loc=2,
                       borderaxespad=0.2,
                       fontsize=14)

            start_ylim, end_ylim = p_ylim
            ax1.set_ylim(start_ylim, end_ylim)

            ax2.set_title(p_error + " information for whole step images")
            ax2.set_ylabel(p_error + ' error')
            ax2.set_xlabel('Number of samples per pixels or times')
            ax2.set_xticks(range(len(current_quality_image)))
            ax2.set_xticklabels(
                list(map(dt.get_scene_image_quality, current_quality_image)))
            ax2.plot(error_data)

            plot_name = p_scene + '_' + p_feature + '_' + str(
                p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
            plt.savefig(plot_name)
Beispiel #14
0
def main():

    parser = argparse.ArgumentParser(
        description="Display svd of images with noise level")

    parser.add_argument(
        '--prefix',
        type=str,
        help='Generated noise folder prefix (ex: `generated/prefix/noise`)')
    parser.add_argument('--mode',
                        type=str,
                        help='Kind of normalization',
                        default=normalization_choices)
    parser.add_argument('--feature',
                        type=str,
                        help='feature choice',
                        default=feature_choices)
    parser.add_argument('--n', type=int, help='Number of images')
    parser.add_argument('--color',
                        type=int,
                        help='Use of color or grey level',
                        default=0)
    parser.add_argument(
        '--norm',
        type=int,
        help='Use of normalization from interval or whole data vector',
        default=0)
    parser.add_argument('--interval',
                        type=str,
                        help='Interval data choice (ex: `0, 200`)',
                        default="0, 200")
    parser.add_argument('--step',
                        type=int,
                        help='Step of image indices to keep',
                        default=1)
    parser.add_argument('--ylim',
                        type=str,
                        help='Limite to display data (ex: `0, 1`)',
                        default="0, 1")

    args = parser.parse_args()

    param_prefix = args.prefix
    param_mode = args.mode
    param_feature = args.feature
    param_n = args.n
    param_color = args.color
    param_norm = args.norm
    param_interval = list(map(int, args.interval.split(',')))
    param_step = args.step
    param_ylim = list(map(float, args.ylim.split(',')))

    param_prefix = param_prefix.split('/')[1].replace('_', '')
    noise_name = param_prefix.split('/')[2]

    if param_color:
        file_path = param_prefix + "/" + param_prefix + "_" + noise_name + "_color_{}." + filename_ext
    else:
        file_path = param_prefix + "/" + param_prefix + "_" + noise_name + "_{}." + filename_ext

    begin, end = param_interval
    all_svd_data = []

    svd_data = []
    image_indices = []

    # get all data from images
    for i in range(1, param_n):

        if i % steparam_picture == 0:

            image_path = file_path.format(str(i))
            img = Image.open(image_path)

            svd_values = get_image_features(param_feature, img)

            if param_norm:
                svd_values = svd_values[begin:end]

            all_svd_data.append(svd_values)

            # update min max values
            min_value = svd_values.min()
            max_value = svd_values.max()

            if min_value < min_value_svd:
                min_value_svd = min_value

            if max_value > max_value_svd:
                max_value_svd = max_value

            print('%.2f%%' % ((i + 1) / param_n * 100))
            sys.stdout.write("\033[F")

    for id, data in enumerate(all_svd_data):

        if (id * steparam_picture) % param_step == 0:

            current_data = data
            if param_mode == 'svdn':
                current_data = utils.normalize_arr(current_data)

            if param_mode == 'svdne':
                current_data = utils.normalize_arr_with_range(
                    current_data, min_value_svd, max_value_svd)

            svd_data.append(current_data)
            image_indices.append(str(id * steparam_picture))

    # display all data using matplotlib (configure plt)

    plt.rcParams['figure.figsize'] = (25, 18)

    plt.title(param_prefix + ' noise, interval information [' + str(begin) +
              ', ' + str(end) + '], ' + param_feature + ' feature, step ' +
              str(param_step) + ' normalization ' + param_mode,
              fontsize=20)
    plt.ylabel('Importance of noise [1, 999]', fontsize=14)
    plt.xlabel('Vector features', fontsize=16)

    for id, data in enumerate(svd_data):

        param_label = param_prefix + str(image_indices[id])
        plt.plot(data, label=param_label)

    plt.legend(bbox_to_anchor=(0.8, 1), loc=2, borderaxespad=0.2, fontsize=14)

    if not param_norm:
        plt.xlim(begin, end)

    # adapt ylim
    y_begin, y_end = param_ylim
    plt.ylim(y_begin, y_end)

    output_filename = param_prefix + "_" + noise_name + "_1_to_" + str(
        param_n) + "_B" + str(begin) + "_E" + str(
            end) + "_" + param_feature + "_S" + str(
                param_step) + "_norm" + str(param_norm) + "_" + param_mode

    if param_color:
        output_filename = output_filename + '_color'

    print("Generation of output figure... %s" % output_filename)
    output_path = os.path.join(pictures_folder, output_filename)

    if not os.path.exists(pictures_folder):
        os.makedirs(pictures_folder)

    plt.savefig(output_path, dpi=(200))
def generate_data_svd(data_type, color, mode):
    """
    @brief Method which generates all .csv files from scenes
    @param data_type,  feature choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)

    # filter scene
    scenes = [s for s in scenes if calibration_folder not in s]

    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # keep in memory min and max data found from data_type
    min_val_found = sys.maxsize
    max_val_found = 0

    data_min_max_filename = os.path.join(path, data_type + min_max_filename)

    # go ahead each scenes
    for id_scene, folder_scene in enumerate(scenes):

        print(folder_scene)
        scene_path = os.path.join(path, folder_scene)

        for noise in noise_choices:

            noise_path = os.path.join(scene_path, noise)

            # getting output filename
            if color:
                output_svd_filename = data_type + "_color_" + mode + generic_output_file_svd
            else:
                output_svd_filename = data_type + "_" + mode + generic_output_file_svd

            # construct each zones folder name
            zones_folder = []
            svd_output_files = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone"+index_str
                zones_folder.append(current_zone)

                zone_path = os.path.join(noise_path, current_zone)

                if not os.path.exists(zone_path):
                    os.makedirs(zone_path)

                svd_file_path = os.path.join(zone_path, output_svd_filename)

                # add writer into list
                svd_output_files.append(open(svd_file_path, 'w'))

            counter_index = 1

            while(counter_index < end_counter_index):

                if counter_index % picture_step == 0:
                    counter_index_str = str(counter_index)

                    if color:
                        img_path = os.path.join(noise_path, folder_scene + "_" + noise + "_color_" + counter_index_str + ".png")
                    else:
                        img_path = os.path.join(noise_path, folder_scene + "_" + noise + "_" + counter_index_str + ".png")

                    current_img = Image.open(img_path)
                    img_blocks = divide_in_blocks(current_img, (200, 200))

                    for id_block, block in enumerate(img_blocks):

                        ###########################
                        # feature computation part #
                        ###########################

                        data = get_image_features(data_type, block)

                        ##################
                        # Data mode part #
                        ##################

                        # modify data depending mode
                        if mode == 'svdne':

                            # getting max and min information from min_max_filename
                            with open(data_min_max_filename, 'r') as f:
                                min_val = float(f.readline())
                                max_val = float(f.readline())

                            data = utils.normalize_arr_with_range(data, min_val, max_val)

                        if mode == 'svdn':
                            data = utils.normalize_arr(data)

                        # save min and max found from dataset in order to normalize data using whole data known
                        if mode == 'svd':

                            current_min = data.min()
                            current_max = data.max()

                            if current_min < min_val_found:
                                min_val_found = current_min

                            if current_max > max_val_found:
                                max_val_found = current_max

                        # now write data into current writer
                        current_file = svd_output_files[id_block]

                        # add of index
                        current_file.write(counter_index_str + ';')

                        for val in data:
                            current_file.write(str(val) + ";")

                        current_file.write('\n')

                if color:
                    print(data_type + "_" + noise + "_color_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((counter_index) / (end_counter_index)* 100.) + "%")
                else:
                    print(data_type + "_" + noise + "_"+ mode + "_" + folder_scene + " - " + "{0:.2f}".format((counter_index) / (end_counter_index)* 100.) + "%")

                sys.stdout.write("\033[F")

                counter_index += 1

            for f in svd_output_files:
                f.close()

            if color:
                print(data_type + "_" + noise + "_color_" + mode + "_" + folder_scene + " - " + "Done...")
            else:
                print(data_type + "_" + noise + "_"+ mode + "_" + folder_scene + " - " + "Done...")


    # save current information about min file found
    if mode == 'svd':
        with open(data_min_max_filename, 'w') as f:
            f.write(str(min_val_found) + '\n')
            f.write(str(max_val_found) + '\n')

    print("%s : end of data generation\n" % mode)
def display_estimated_thresholds(scene,
                                 displayed_data,
                                 info,
                                 metric,
                                 norm=True):

    colors = ['C0', 'C1', 'C2', 'C3', 'C4']

    #plt.figure(figsize=(25, 20))
    plt.rc('xtick', labelsize=16)  # fontsize of the tick labels
    plt.rc('ytick', labelsize=16)  # fontsize of the tick labels

    fig, axs = plt.subplots(4, 4, figsize=(25, 20))
    fig.suptitle('Complexity value evolution in scene' + scene + " | " + info,
                 fontsize=24)

    for i, ax in enumerate(axs.flat):

        complexity_values = get_zone_diff_entropy(displayed_data[i]['data'])
        error_values = displayed_data[i]['errors']
        human_threshold = int(displayed_data[i]['human_threshold'])

        if norm:
            complexity_values = utils.normalize_arr(complexity_values)
            error_values = utils.normalize_arr(error_values)

        # TODO : normalize step by step
        complexity_values_steps_normed = []
        error_values_steps_normed = []

        for j, _ in enumerate(complexity_values):
            if j > 0:
                # normalize and get last values
                complexity_normed_value = utils.normalize_arr(
                    complexity_values[:j + 1])[-1]
                error_normed_value = utils.normalize_arr(error_values[:j +
                                                                      1])[-1]
            else:
                complexity_normed_value = 0.5  #complexity_values[0]
                error_normed_value = 0.5  #error_values[0]

            complexity_values_steps_normed.append(complexity_normed_value)
            error_values_steps_normed.append(error_normed_value)

        # TODO : current test
        complexity_values = complexity_values_steps_normed
        error_values = error_values_steps_normed

        # display evolution curve and error curve for each zone
        ax.set_title(displayed_data[i]['zone'])

        ax.plot(complexity_values_steps_normed,
                color=colors[0],
                label='complexity values')

        ax.plot(error_values_steps_normed,
                color=colors[1],
                label=metric + ' error')

        # get max `y` value
        max_y = 0

        max_complexity = max(complexity_values)
        max_error = max(error_values)
        max_found = max(max_error, max_complexity)

        if max_found > max_y:
            max_y = max_found

        # for each indices
        image_indices = displayed_data[i]['steps'][
            1:]  # remove first element such as complexity

        index_threshold = 0
        while image_indices[index_threshold] < human_threshold:
            index_threshold = index_threshold + 1

        ax.plot([index_threshold, index_threshold], [max_y, 0],
                'k-',
                lw=2,
                color=colors[2])

        # set each labels
        x_labels = [
            str(label) for label in image_indices
            if int(label) % display_xticks_step == 0
        ]
        x = [
            i for i, v in enumerate(image_indices)
            if int(v) % display_xticks_step == 0
        ]
        ax.set_xticks(x)
        ax.set_xticklabels(x_labels, rotation=45)
        ax.legend(fontsize=16)

    for ax in axs.flat:
        ax.label_outer()

    #fig.tight_layout()
    plt.show()
Beispiel #17
0
def main():

    parser = argparse.ArgumentParser(description="Display threshold svd data")

    parser.add_argument(
        '--prefix',
        type=str,
        help='Generated noise folder prefix (ex: `generated/prefix/noise`)')
    parser.add_argument('--file', type=str, help='Threshold file to use')
    parser.add_argument('--mode',
                        type=str,
                        help='Kind of normalization',
                        default=normalization_choices)
    parser.add_argument('--feature',
                        type=str,
                        help='feature choice',
                        default=feature_choices)
    parser.add_argument('--color',
                        type=int,
                        help='Use of color or grey level',
                        default=0)
    parser.add_argument(
        '--norm',
        type=int,
        help='Use of normalization from interval or whole data vector',
        default=0)
    parser.add_argument('--interval',
                        type=str,
                        help='Interval data choice (ex: `0, 200`)',
                        default="0, 200")
    parser.add_argument('--step',
                        type=int,
                        help='Step of image indices to keep',
                        default=1)
    parser.add_argument('--ylim',
                        type=str,
                        help='Limite to display data (ex: `0, 1`)',
                        default="0, 1")

    args = parser.parse_args()

    param_prefix = args.prefix
    param_file = args.file
    param_mode = args.mode
    param_feature = args.feature
    param_n = args.n
    param_color = args.color
    param_norm = args.norm
    param_interval = list(map(int, args.interval.split(',')))
    param_step = args.step
    param_ylim = list(map(float, args.ylim.split(',')))

    param_prefix = param_prefix.split('/')[1].replace('_', '')

    if param_color:
        file_path = param_prefix + "{}/" + param_prefix + "_{}_color_{}." + filename_ext
    else:
        file_path = param_prefix + "{}/" + param_prefix + "_{}_{}." + filename_ext

    begin, end = param_interval

    svd_data = []
    final_svd_data = []
    image_indices = []
    min_max_list = {}

    threshold_data = []

    # read data threshold file
    with open(param_file, 'r') as f:
        lines = f.readlines()

        for line in lines:
            data = line.replace('\n', '').split(';')
            print(data)

            threshold = ThresholdData(data[0], float(data[1]), int(data[2]))
            threshold_data.append(threshold)

    # filter data if color or not
    threshold_data = [t for t in threshold_data if t.isColor() == param_color]

    for id, threshold in enumerate(threshold_data):

        current_noise = threshold.get_noise()
        current_threshold = threshold.get_threshold()

        min_max_list[current_noise] = (sys.maxsize, 0)
        threshold_found = False

        # get all data from images
        for i in range(1, param_n):

            if i % steparam_picture == 0:
                image_path = file_path.format(current_noise, current_noise,
                                              str(i))
                img = Image.open(image_path)

                svd_values = get_image_features(param_feature, img)

                if param_norm:
                    svd_values = svd_values[begin:end]

                # only append data once
                if not threshold_found and current_threshold < i:
                    svd_data.append(svd_values)
                    image_indices.append(i)

                if current_threshold < i:
                    threshold_found = True

                # update min max values
                min_value = svd_values.min()
                max_value = svd_values.max()

                # update of min max values for noise
                current_min, current_max = min_max_list[current_noise]

                if min_value < current_min:
                    current_min = min_value

                if max_value > current_max:
                    current_max = max_value

                min_max_list[current_noise] = (current_min, current_max)

            print('%.2f%%' % (((i + 1) * 100 + (id * param_n * 100)) /
                              (param_n * len(threshold_data))))
            sys.stdout.write("\033[F")

    for id, data in enumerate(svd_data):

        current_data = data

        threshold = threshold_data[id]
        min_value_svd, max_value_svd = min_max_list[threshold.get_noise()]

        if param_mode == 'svdn':
            current_data = utils.normalize_arr(current_data)

        if param_mode == 'svdne':
            current_data = utils.normalize_arr_with_range(
                current_data, min_value_svd, max_value_svd)

        final_svd_data.append(current_data)

    # display all data using matplotlib (configure plt)

    plt.rcParams['figure.figsize'] = (25, 18)

    plt.title(param_prefix + ' noise, interval information [' + str(begin) +
              ', ' + str(end) + '], ' + param_feature + ' feature, step ' +
              str(param_step) + ' normalization ' + param_mode,
              fontsize=20)
    plt.ylabel('Importance of noise [1, 999]', fontsize=14)
    plt.xlabel('Vector features', fontsize=16)

    for id, data in enumerate(final_svd_data):

        param_label = param_prefix + '_' + threshold_data[id].get_noise(
        ) + str(image_indices[id])
        plt.plot(data, label=param_label)

    plt.legend(bbox_to_anchor=(0.8, 1), loc=2, borderaxespad=0.2, fontsize=14)

    if not param_norm:
        plt.xlim(begin, end)

    # adapt ylim
    y_begin, y_end = param_ylim
    plt.ylim(y_begin, y_end)

    output_filename = param_prefix + "_threshold_1_to_" + str(
        param_n) + "_B" + str(begin) + "_E" + str(
            end) + "_" + param_feature + "_S" + str(
                param_step) + "_norm" + str(param_norm) + "_" + param_mode

    if param_color:
        output_filename = output_filename + '_color'

    print("Generation of output figure... %s" % output_filename)
    output_path = os.path.join(pictures_folder, output_filename)

    if not os.path.exists(pictures_folder):
        os.makedirs(pictures_folder)

    plt.savefig(output_path, dpi=(200))
def main():

    max_value_svd = 0
    min_value_svd = sys.maxsize

    parser = argparse.ArgumentParser(description="Display svd tend of images with noise level")

    parser.add_argument('--prefix', type=str, help='Generated noise folder prefix (ex: `generated/prefix/noise`)')
    parser.add_argument('--mode', type=str, help='Kind of normalization', default=normalization_choices)
    parser.add_argument('--feature', type=str, help='feature choice', default=feature_choices)
    parser.add_argument('--n', type=int, help='Number of images')
    parser.add_argument('--color', type=int, help='Use of color or grey level', default=0)
    parser.add_argument('--norm', type=int, help='Use of normalization from interval or whole data vector', default=0)
    parser.add_argument('--interval', type=str, help='Interval data choice (ex: `0, 200`)', default="0, 200")
    parser.add_argument('--step', type=int, help='Step of image indices to keep', default=1)
    parser.add_argument('--ylim', type=str, help='Limite to display data (ex: `0, 1`)', default="0, 1")
    parser.add_argument('--error', type=str, help='Error used for information data', default=error_data_choices)

    args = parser.parse_args()

    param_prefix   = args.prefix
    param_mode     = args.mode
    param_feature  = args.feature
    param_n        = args.n
    param_color    = args.color
    param_norm     = args.norm
    param_interval = list(map(int, args.interval.split(',')))
    param_step     = args.step
    param_ylim     = list(map(float, args.ylim.split(',')))
    param_error    = args.error


    param_prefix = param_prefix.split('/')[1].replace('_', '')
    noise_name = param_prefix.split('/')[2]

    if param_color:
        file_path = os.path.join(param_prefix, param_prefix + "_" + noise_name + "_color_{}." + filename_ext)
    else:
        file_path = os.path.join(param_prefix, param_prefix + "_" + noise_name + "_{}." + filename_ext)

    begin, end = param_interval
    all_svd_data = []

    svd_data = []
    image_indices = []

    noise_indices = range(1, param_n)[::-1]

    # get all data from images
    for i in noise_indices:

        if i % steparam_picture == 0:

            image_path = file_path.format(str(i))

            img = Image.open(image_path)

            svd_values = get_image_features(param_feature, img)

            if param_norm:
                svd_values = svd_values[begin:end]

            all_svd_data.append(svd_values)

            # update min max values
            min_value = svd_values.min()
            max_value = svd_values.max()

            if min_value < min_value_svd:
                min_value_svd = min_value

            if max_value > max_value_svd:
                max_value_svd = max_value

        print('%.2f%%' % ((param_n - i + 1) / param_n * 100))
        sys.stdout.write("\033[F")

    previous_data = []
    error_data = [0.]

    for id, data in enumerate(all_svd_data):

        current_id = (param_n - ((id + 1) * 10))

        if current_id % param_step == 0:

            current_data = data

            if param_mode == 'svdn':
                current_data = utils.normalize_arr(current_data)

            if param_mode == 'svdne':
                current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)

            svd_data.append(current_data)
            image_indices.append(current_id)

            # use of whole image data for computation of ssim or psnr
            if param_error == 'ssim' or param_error == 'psnr':
                image_path = file_path.format(str(current_id))
                current_data = np.asarray(Image.open(image_path))

            if len(previous_data) > 0:

                current_error = get_error_distance(param_error, previous_data, current_data)
                error_data.append(current_error)

            if len(previous_data) == 0:
                previous_data = current_data

    # display all data using matplotlib (configure plt)
    gridsize = (3, 2)

    # fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(30, 22))
    fig = plt.figure(figsize=(30, 22))
    ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
    ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)

    ax1.set_title(param_prefix  + ', ' + noise_name + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + param_feature + ' feature, step ' + str(param_step) + ' normalization ' + param_mode)
    ax1.set_label('Importance of noise [1, 999]')
    ax1.set_xlabel('Vector features')

    for id, data in enumerate(svd_data):

        param_label = param_prefix + str(image_indices[id]) + " | " + param_error + ": " + str(error_data[id])
        ax1.plot(data, label=param_label)

    ax1.legend(bbox_to_anchor=(0.75, 1), loc=2, borderaxespad=0.2, fontsize=12)

    if not param_norm:
        ax1.set_xlim(begin, end)

    # adapt ylim
    y_begin, y_end = param_ylim
    ax1.set_ylim(y_begin, y_end)

    output_filename = param_prefix + "_" + noise_name + "_1_to_" + str(param_n) + "_B" + str(begin) + "_E" + str(end) + "_" + param_feature + "_S" + str(param_step) + "_norm" + str(param_norm )+  "_" + param_mode + "_" + param_error

    if param_color:
        output_filename = output_filename + '_color'

    ax2.set_title(param_error + " information for : " + param_prefix  + ', ' + noise_name + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + param_feature + ' feature, step ' + str(param_step) + ', normalization ' + param_mode)
    ax2.set_ylabel(param_error + ' error')
    ax2.set_xlabel('Number of samples per pixels')
    ax2.set_xticks(range(len(image_indices)))
    ax2.set_xticklabels(image_indices)
    ax2.plot(error_data)

    print("Generation of output figure... %s" % output_filename)
    output_path = os.path.join(pictures_folder, output_filename)

    if not os.path.exists(pictures_folder):
        os.makedirs(pictures_folder)

    fig.savefig(output_path, dpi=(200))
def main():

    parser = argparse.ArgumentParser(
        description=
        "Read and compute entropy data file (using `minus` approach)")

    parser.add_argument('--data',
                        type=str,
                        help='entropy file data to read and compute')
    parser.add_argument('--norm',
                        type=int,
                        help='normalize or not entropy',
                        choices=[0, 1],
                        default=0)
    parser.add_argument('--std',
                        type=int,
                        help='multiply result by current std',
                        choices=[0, 1],
                        default=0)
    parser.add_argument('--output', type=str, help='prediction file used')

    args = parser.parse_args()

    p_data = args.data
    p_norm = args.norm
    p_std = args.std
    p_output = args.output

    # create output path if not exists
    threshold_path = os.path.join(cfg.output_data_folder, cfg.data_thresholds)
    p_output_path = os.path.join(threshold_path, p_output)
    if not os.path.exists(threshold_path):
        os.makedirs(threshold_path)

    # read line by line file to estimate threshold entropy stopping criteria
    with open(p_data, 'r') as f:
        lines = f.readlines()

        minus_entropy_found = []
        for line in lines:

            data = line.split(';')

            threshold = data[3]
            image_indices = data[4].split(',')
            entropy_list = data[5].split(',')

            # no element is removed using this function
            entropy_minus_list = get_zone_minus_entropy(entropy_list, p_std)

            found_index = 0
            for index, v in enumerate(image_indices):

                if int(v) > int(threshold):
                    found_index = index
                    break

            if p_norm:
                diff_entropy_kept = utils.normalize_arr(
                    entropy_minus_list[:found_index + 1])[-1]
            else:
                diff_entropy_kept = entropy_minus_list[found_index]

            # Keep only absolute value
            minus_entropy_found.append(diff_entropy_kept)

        # TODO : test this part
        if p_norm:
            diff_entropy_found = utils.normalize_arr(diff_entropy_found)

        mean_entropy_minus = sum(minus_entropy_found) / len(
            minus_entropy_found)
        std_entropy_minus = np.std(minus_entropy_found)

        print('mean', mean_entropy_minus)
        print('std', std_entropy_minus)

        with open(p_output_path, 'w') as f:
            print("Erase", p_output_path, "previous file if exists")

        # now we can predict threshold img using `mean_entropy_diff`
        for line in lines:
            data = line.split(';')

            scene_name = data[0]
            zone_index = data[1]
            zone_index_str = data[2]
            threshold = data[3]
            image_indices = data[4].split(',')
            entropy_list = data[5].split(',')

            # no element is removed using this function
            entropy_minus_list = get_zone_minus_entropy(entropy_list, p_std)

            # by default max index (if no stoppring criteria found)
            found_index = len(image_indices) - 1
            for index, v in enumerate(entropy_minus_list):

                if p_norm:
                    current_v = utils.normalize_arr(entropy_minus_list[:index +
                                                                       1])[-1]
                else:
                    current_v = v

                if mean_entropy_minus > current_v:
                    found_index = index
                    break

            threshold_found = image_indices[found_index]

            with open(p_output_path, 'a') as f:
                f.write(scene_name + ';')
                f.write(zone_index + ';')
                f.write(zone_index_str + ';')
                f.write(threshold + ';')
                f.write(threshold_found + ';')
                f.write(str(mean_entropy_minus) + ';')
                f.write(str(std_entropy_minus) + ';')
                f.write(str(p_norm))
                f.write('\n')
def main():

    # getting all params
    parser = argparse.ArgumentParser(
        description=
        "Script which detects if an image is noisy or not using specific model"
    )

    parser.add_argument('--image', type=str, help='Image path')
    parser.add_argument('--interval',
                        type=str,
                        help='Interval value to keep from svd',
                        default='"0, 200"')
    parser.add_argument('--model',
                        type=str,
                        help='.joblib or .json file (sklearn or keras model)')
    parser.add_argument('--mode',
                        type=str,
                        help='Kind of normalization level wished',
                        choices=normalization_choices)
    parser.add_argument('--feature',
                        type=str,
                        help='feature data choice',
                        choices=features_choices)
    parser.add_argument(
        '--custom',
        type=str,
        help='Name of custom min max file if use of renormalization of data',
        default=False)

    args = parser.parse_args()

    p_img_file = args.image
    p_model_file = args.model
    p_interval = list(map(int, args.interval.split(',')))
    p_mode = args.mode
    p_feature = args.feature
    p_custom = args.custom

    if '.joblib' in p_model_file:
        kind_model = 'sklearn'

    if '.json' in p_model_file:
        kind_model = 'keras'

    if 'corr' in p_model_file:
        corr_model = True

        indices_corr_path = os.path.join(
            cfg.correlation_indices_folder,
            p_model_file.split('/')[1].replace('.json', '').replace(
                '.joblib', '') + '.csv')

        with open(indices_corr_path, 'r') as f:
            data_corr_indices = [
                int(x) for x in f.readline().split(';') if x != ''
            ]
    else:
        corr_model = False

    if kind_model == 'sklearn':
        # load of model file
        model = joblib.load(p_model_file)

    if kind_model == 'keras':
        with open(p_model_file, 'r') as f:
            json_model = json.load(f)
            model = model_from_json(json_model)
            model.load_weights(p_model_file.replace('.json', '.h5'))

            model.compile(loss='binary_crossentropy',
                          optimizer='adam',
                          features=['accuracy'])

    # load image
    img = Image.open(p_img_file)

    data = get_image_features(p_feature, img)

    # get interval values
    begin, end = p_interval

    # check if custom min max file is used
    if p_custom:

        if corr_model:
            test_data = data[data_corr_indices]
        else:
            test_data = data[begin:end]

        if p_mode == 'svdne':

            # set min_max_filename if custom use
            min_max_file_path = custom_min_max_folder + '/' + p_custom

            # need to read min_max_file
            file_path = os.path.join(os.path.dirname(__file__),
                                     min_max_file_path)
            with open(file_path, 'r') as f:
                min_val = float(f.readline().replace('\n', ''))
                max_val = float(f.readline().replace('\n', ''))

            test_data = utils.normalize_arr_with_range(test_data, min_val,
                                                       max_val)

        if p_mode == 'svdn':
            test_data = utils.normalize_arr(test_data)

    else:

        # check mode to normalize data
        if p_mode == 'svdne':

            # set min_max_filename if custom use
            min_max_file_path = path + '/' + p_feature + min_max_ext

            # need to read min_max_file
            file_path = os.path.join(os.path.dirname(__file__),
                                     min_max_file_path)
            with open(file_path, 'r') as f:
                min_val = float(f.readline().replace('\n', ''))
                max_val = float(f.readline().replace('\n', ''))

            l_values = utils.normalize_arr_with_range(data, min_val, max_val)

        elif p_mode == 'svdn':
            l_values = utils.normalize_arr(data)
        else:
            l_values = data

        if corr_model:
            test_data = data[data_corr_indices]
        else:
            test_data = data[begin:end]

    # get prediction of model
    if kind_model == 'sklearn':
        prediction = model.predict([test_data])[0]

    if kind_model == 'keras':
        test_data = np.asarray(test_data).reshape(1, len(test_data), 1)
        prediction = model.predict_classes([test_data])[0][0]

    # output expected from others scripts
    print(prediction)
def display_data_scenes(p_scene, p_bits, p_shifted):
    """
    @brief Method which generates all .csv files from scenes photos
    @param p_scene, scene we want to show values
    @param nb_bits, number of bits expected
    @param p_shifted, number of bits expected to be shifted
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            print(folder_scene)
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            threshold_info = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            start_image_path = scene_images[0]
            end_image_path = scene_images[-1]

            start_quality_image = dt.get_scene_image_quality(scene_images[0])
            end_quality_image = dt.get_scene_image_quality(scene_images[-1])

            for id_zone, zone_folder in enumerate(zones_folder):

                zone_path = os.path.join(scene_path, zone_folder)

                # get threshold information
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())

                threshold_image_found = False

                # for each images
                for img_path in scene_images:
                    current_quality_image = dt.get_scene_image_quality(
                        img_path)

                    if threshold_learned < int(current_quality_image
                                               ) and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_path = img_path

                        threshold_image = dt.get_scene_image_postfix(img_path)
                        threshold_info.append(threshold_image)

                # all indexes of picture to plot
                images_path = [
                    start_image_path, threshold_image_path, end_image_path
                ]
                images_data = []

                for img_path in images_path:

                    current_img = Image.open(img_path)
                    img_blocks = segmentation.divide_in_blocks(
                        current_img, (200, 200))

                    # getting expected block id
                    block = img_blocks[id_zone]

                    # get data from mode
                    # Here you can add the way you compute data
                    low_bits_block = transform.rgb_to_LAB_L_bits(
                        block, (p_shifted + 1, p_shifted + p_bits + 1))
                    data = compression.get_SVD_s(low_bits_block)

                    ##################
                    # Data mode part #
                    ##################

                    # modify data depending mode
                    data = utils.normalize_arr(data)
                    images_data.append(data)

                zones_images_data.append(images_data)

            fig = plt.figure(figsize=(8, 8))
            fig.suptitle('Lab SVD ' + str(p_bits) + ' bits shifted by ' +
                         str(p_shifted) + " for " + p_scene + " scene",
                         fontsize=20)

            for id, data in enumerate(zones_images_data):
                fig.add_subplot(4, 4, (id + 1))
                plt.plot(data[0], label='Noisy_' + start_quality_image)
                plt.plot(data[1], label='Threshold_' + threshold_info[id])
                plt.plot(data[2], label='Reference_' + end_quality_image)
                plt.ylabel('Lab SVD ' + str(p_bits) + ' bits shifted by ' +
                           str(p_shifted) + ', ZONE_' + str(id + 1),
                           fontsize=14)
                plt.xlabel('Vector features', fontsize=16)
                plt.legend(bbox_to_anchor=(0.5, 1),
                           loc=2,
                           borderaxespad=0.2,
                           fontsize=14)
                plt.ylim(0, 0.1)
            plt.show()
Beispiel #22
0
def display_estimated_thresholds(scene,
                                 displayed_data,
                                 info,
                                 metric,
                                 norm=True,
                                 save=True):

    colors = ['C0', 'C1', 'C2', 'C3', 'C4']

    #plt.figure(figsize=(25, 20))
    plt.rc('xtick', labelsize=16)  # fontsize of the tick labels
    plt.rc('ytick', labelsize=16)  # fontsize of the tick labels

    fig, axs = plt.subplots(4, 4, figsize=(25, 20))
    fig.suptitle('Complexity value evolution in scene' + scene + " | " + info,
                 fontsize=24)

    for i, ax in enumerate(axs.flat):

        complexity_values = get_zone_diff_entropy(displayed_data[i]['data'])
        error_values = displayed_data[i]['errors'][
            1:]  # remove first element such as complexity
        human_threshold = int(displayed_data[i]['human_threshold'])

        if norm:
            complexity_values = utils.normalize_arr(complexity_values)
            error_values = utils.normalize_arr(error_values)

        # display evolution curve and error curve for each zone
        ax.set_title(displayed_data[i]['zone'])

        ax.plot(complexity_values, color=colors[0], label='complexity values')

        ax.plot(error_values, color=colors[1], label=metric + ' error')

        # get max `y` value
        max_y = 0

        max_complexity = max(complexity_values)
        max_error = max(error_values)
        max_found = max(max_error, max_complexity)

        if max_found > max_y:
            max_y = max_found

        # for each indices
        image_indices = displayed_data[i]['steps'][
            1:]  # remove first element such as complexity

        index_threshold = 0
        while image_indices[index_threshold] < human_threshold:
            index_threshold = index_threshold + 1

        ax.plot([index_threshold, index_threshold], [max_y, 0],
                'k-',
                lw=2,
                color=colors[2])

        # set each labels
        x_labels = [
            str(label) for label in image_indices
            if int(label) % display_xticks_step == 0
        ]
        x = [
            i for i, v in enumerate(image_indices)
            if int(v) % display_xticks_step == 0
        ]
        ax.set_xticks(x)
        ax.set_xticklabels(x_labels, rotation=45)
        #ax.legend(fontsize=16)

        if i >= len(axs.flat) - 1:
            handles, labels = ax.get_legend_handles_labels()
    fig.legend(handles,
               labels,
               loc='upper right',
               bbox_to_anchor=(0.9, 0.95),
               fontsize=18)

    for ax in axs.flat:
        ax.label_outer()

    #fig.tight_layout()
    if save:
        fig_folder = os.path.join(cfg.output_data_folder, cfg.data_fig_folder)

        if not os.path.exists(fig_folder):
            os.makedirs(fig_folder)

        fig.savefig(os.path.join(fig_folder, 'temp.png'), transparent=True)
    else:
        plt.show()
def display_svd_values(p_scene, p_thresholds, p_interval, p_indices, p_feature,
                       p_mode, p_step, p_norm, p_ylim, p_label):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    max_value_svd = 0
    min_value_svd = sys.maxsize

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # go ahead selected scene
    scene_path = p_scene

    # construct each zones folder name
    zones_folder = []

    # get zones list info
    for index in zones:
        index_str = str(index)
        if len(index_str) < 2:
            index_str = "0" + index_str

        current_zone = "zone" + index_str
        zones_folder.append(current_zone)

    images_data = []
    images_indices = []

    threshold_learned_zones = []

    # get all images of folder
    scene_images = sorted([
        os.path.join(scene_path, img) for img in os.listdir(scene_path)
        if cfg.scene_image_extension in img
    ])
    number_scene_image = len(scene_images)

    _, scene_name = os.path.split(p_scene)
    threshold_learned_zones = p_thresholds[scene_name]

    threshold_mean = np.mean(np.asarray(threshold_learned_zones))
    threshold_image_found = False

    svd_data = []

    # for each images
    for id_img, img_path in enumerate(scene_images):

        current_quality_image = dt.get_scene_image_quality(img_path)

        img = Image.open(img_path)

        svd_values = get_image_features(p_feature, img)

        if p_norm:
            svd_values = svd_values[begin_data:end_data]

        #svd_values = np.asarray([math.log(x) for x in svd_values])

        # update min max values
        min_value = svd_values.min()
        max_value = svd_values.max()

        if min_value < min_value_svd:
            min_value_svd = min_value

        if max_value > min_value_svd:
            max_value_svd = max_value

        # keep in memory used data
        if current_quality_image % p_step == 0:
            if current_quality_image >= begin_index and current_quality_image <= end_index:

                images_indices.append(dt.get_scene_image_postfix(img_path))
                svd_data.append(svd_values)

        if threshold_mean < current_quality_image and not threshold_image_found:

            threshold_image_found = True
            threshold_image_zone = current_quality_image

            print("Quality mean : ", current_quality_image, "\n")

            if dt.get_scene_image_postfix(img_path) not in images_indices:
                images_indices.append(dt.get_scene_image_postfix(img_path))

        print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
        sys.stdout.write("\033[F")

    # all indices of picture to plot
    print(images_indices)

    for id, data in enumerate(svd_data):

        # current_data = [ math.log10(d + 1.) for d in data ]
        # print(current_data)

        current_data = data

        if not p_norm:
            current_data = current_data[begin_data:end_data]

        if p_mode == 'svdn':
            current_data = utils.normalize_arr(current_data)

        if p_mode == 'svdne':
            current_data = utils.normalize_arr_with_range(
                current_data, min_value_svd, max_value_svd)

        images_data.append(current_data)

    # display all data using matplotlib (configure plt)
    fig, ax = plt.subplots(figsize=(30, 15))
    ax.set_facecolor('#FFFFFF')
    #fig.patch.set_facecolor('#F9F9F9')

    ax.tick_params(labelsize=26)
    #plt.rc('xtick', labelsize=22)
    #plt.rc('ytick', labelsize=22)

    #plt.title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
    ax.set_ylabel('Component values', fontsize=36)
    ax.set_xlabel('Singular value component indices', fontsize=36)

    for id, data in enumerate(images_data):

        #p_label = p_scene + "_" + images_indices[id]
        p_label = images_indices[id] + " samples"

        if int(images_indices[id]) == int(threshold_image_zone):
            ax.plot(data,
                    label=p_label + " (threshold mean)",
                    lw=8,
                    color='red')
        else:
            ax.plot(data, label=p_label, lw=4)

    plt.legend(bbox_to_anchor=(0.60, 0.98),
               loc=2,
               borderaxespad=0.2,
               fontsize=32)

    start_ylim, end_ylim = p_ylim
    ax.set_ylim(start_ylim, end_ylim)

    plot_name = scene_name + '_' + p_feature + '_' + str(
        p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
    # plt.title('Tend of Singular values at different samples of ' + p_label + ' scene', fontsize=40)
    plt.savefig(plot_name, transparent=True)
Beispiel #24
0
def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_area, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_interval, interval [begin, end] of svd data to display
    @param p_indices, indices to display
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_area, area method name to compute area under curve
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    image_indices = []

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # Store all informations about scenes
    scenes_area_data = []
    scenes_images_indices = []
    scenes_threshold_mean = []

    # go ahead each scenes
    for folder_scene in scenes:

        max_value_svd = 0
        min_value_svd = sys.maxsize

        scene_path = os.path.join(path, folder_scene)

        # construct each zones folder name
        zones_folder = []

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str

            current_zone = "zone"+index_str
            zones_folder.append(current_zone)

        # store data information for current scene
        images_data = []
        images_indices = []
        threshold_learned_zones = []

        # get all images of folder
        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
        number_scene_image = len(scene_images)

        for id, zone_folder in enumerate(zones_folder):

            # get threshold information
            zone_path = os.path.join(scene_path, zone_folder)
            path_seuil = os.path.join(zone_path, seuil_expe_filename)

            # open treshold path and get this information
            with open(path_seuil, "r") as seuil_file:
                threshold_learned = int(seuil_file.readline().strip())
                threshold_learned_zones.append(threshold_learned)

        threshold_mean = np.mean(np.asarray(threshold_learned_zones))
        threshold_image_found = False
        scenes_threshold_mean.append(int(threshold_mean / p_step))

        svd_data = []

        # for each images
        for id_img, img_path in enumerate(scene_images):
            
            current_quality_image = dt.get_scene_image_quality(img_path)

            img = Image.open(img_path)

            svd_values = get_image_features(p_metric, img)

            if p_norm:
                svd_values = svd_values[begin_data:end_data]

            # update min max values
            min_value = svd_values.min()
            max_value = svd_values.max()

            if min_value < min_value_svd:
                min_value_svd = min_value

            if max_value > min_value_svd:
                max_value_svd = max_value

            # keep in memory used data
            if current_quality_image % p_step == 0:
                if current_quality_image >= begin_index and current_quality_image <= end_index:
                    images_indices.append(dt.get_scene_image_postfix(img_path))
                    svd_data.append(svd_values)

                if threshold_mean < current_quality_image and not threshold_image_found:

                    threshold_image_found = True

            print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
            sys.stdout.write("\033[F")


            # all indices of picture to plot
        print("Scene %s : %s" % (folder_scene, images_indices))

        scenes_images_indices.append(image_indices)

        area_data = []

        for id, data in enumerate(svd_data):

            current_data = data

            if not p_norm:
                current_data = current_data[begin_data:end_data]

            if p_mode == 'svdn':
                current_data = utils.normalize_arr(current_data)

            if p_mode == 'svdne':
                current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)

            images_data.append(current_data)

            # not use this script for 'sub_blocks_stats'
            current_area = get_area_under_curve(p_area, current_data)
            area_data.append(current_area)

        scenes_area_data.append(area_data)

    # display all data using matplotlib (configure plt)
    plt.title('Scenes area interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + ']' + p_metric + ' metric, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=20)
    plt.ylabel('Image samples or time (minutes) generation', fontsize=14)
    plt.xlabel('Vector features', fontsize=16)

    plt.legend(bbox_to_anchor=(0.7, 1), loc=2, borderaxespad=0.2, fontsize=14)

    for id, area_data in enumerate(scenes_area_data):

        threshold_id = 0
        scene_name = scenes[id]
        image_indices = scenes_images_indices[id]

        p_label = scene_name + '_' + str(images_indices[id])

        threshold_id = scenes_threshold_mean[id]

        print(p_label)

        plt.plot(area_data, label=p_label)
        #ax2.set_xticks(range(len(images_indices)))
        #ax2.set_xticklabels(list(map(int, images_indices)))
        if threshold_id != 0:
            print("Plot threshold ", threshold_id)
            plt.plot([threshold_id, threshold_id], [np.min(area_data), np.max(area_data)], 'k-', lw=2, color='red')


    start_ylim, end_ylim = p_ylim
    plt.ylim(start_ylim, end_ylim)

    plt.show()
Beispiel #25
0
def main():

    parser = argparse.ArgumentParser(
        description=
        "Read and compute complexity data file (using entropy and sobel diff)")

    parser.add_argument('--data1',
                        type=str,
                        help='entropy file data to read and compute')
    parser.add_argument('--data2',
                        type=str,
                        help='entropy file data to read and compute')
    parser.add_argument('--norm',
                        type=int,
                        help='normalize or not entropy',
                        choices=[0, 1],
                        default=0)
    parser.add_argument('--std',
                        type=int,
                        help='multiply result by current std',
                        choices=[0, 1],
                        default=0)
    parser.add_argument('--output', type=str, help='prediction file used')

    args = parser.parse_args()

    p_data1 = args.data1
    p_data2 = args.data2
    p_norm = args.norm
    p_std = args.std
    p_output = args.output

    # create output path if not exists
    threshold_path = os.path.join(cfg.output_data_folder, cfg.data_thresholds)
    p_output_path = os.path.join(threshold_path, p_output)
    if not os.path.exists(threshold_path):
        os.makedirs(threshold_path)

    # read line by line file to estimate threshold entropy stopping criteria
    with open(p_data1, 'r') as f:
        lines_entropy = f.readlines()

    with open(p_data2, 'r') as f:
        lines_complexity = f.readlines()

    gradient_complexity_found = []

    for i in range(len(lines_entropy)):

        data_entropy = lines_entropy[i].split(';')
        data_complexity = lines_complexity[i].split(';')

        threshold = data_entropy[3]
        image_indices = data_entropy[4].split(',')

        entropy_list = data_entropy[5].split(',')
        complexity_list = data_complexity[5].split(',')

        # one element is removed using this function (first element of list for computing first gradient complexity)
        entropy_gradient_list = get_sobel_entropy_complexity(
            entropy_list, complexity_list, p_std)
        image_indices_without_first = image_indices[1:]

        found_index = 0
        for index, v in enumerate(image_indices_without_first):

            if int(v) > int(threshold):
                found_index = index
                break

        if p_norm:
            gradient_complexity_kept = utils.normalize_arr(
                entropy_gradient_list[:found_index + 1])[-1]
        else:
            gradient_complexity_kept = entropy_gradient_list[found_index]

        # Keep only absolute value
        gradient_complexity_found.append(gradient_complexity_kept)

    mean_complexity_gradient = sum(gradient_complexity_found) / len(
        gradient_complexity_found)
    std_complexity_gradient = np.std(gradient_complexity_found)

    print('mean', mean_complexity_gradient)
    print('std', std_complexity_gradient)

    with open(p_output_path, 'w') as f:
        print("Erase", p_output_path, "previous file if exists")

    # now we can predict threshold img using `mean_complexity_gradient`
    for i in range(len(lines_entropy)):

        data_entropy = lines_entropy[i].split(';')
        data_complexity = lines_complexity[i].split(';')

        scene_name = data_entropy[0]
        zone_index = data_entropy[1]
        zone_index_str = data_entropy[2]
        threshold = data_entropy[3]
        image_indices = data_entropy[4].split(',')

        entropy_list = data_entropy[5].split(',')
        complexity_list = data_complexity[5].split(',')

        # one element is removed using this function (first element of list for computing first gradient complexity)
        entropy_gradient_list = get_sobel_entropy_complexity(
            entropy_list, complexity_list, p_std)
        image_indices_without_first = image_indices[1:]

        # by default max index (if no stoppring criteria found)
        found_index = len(image_indices_without_first) - 1
        for index, v in enumerate(entropy_gradient_list):

            if p_norm:
                current_v = utils.normalize_arr(entropy_gradient_list[:index +
                                                                      1])[-1]
            else:
                current_v = v

            if mean_complexity_gradient > current_v:
                found_index = index
                break

        threshold_found = image_indices_without_first[found_index]

        with open(p_output_path, 'a') as f:
            f.write(scene_name + ';')
            f.write(zone_index + ';')
            f.write(zone_index_str + ';')
            f.write(threshold + ';')
            f.write(threshold_found + ';')
            f.write(str(mean_complexity_gradient) + ';')
            f.write(str(std_complexity_gradient) + ';')
            f.write(str(p_norm))
            f.write('\n')
Beispiel #26
0
def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_feature,
                       p_mode, p_step, p_norm, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_zone, zone's identifier of picture
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_step, step of images indices
    @param p_norm, normalization or not of selected svd data
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    data_min_max_filename = os.path.join(path, p_feature + min_max_filename)

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            scene_path = os.path.join(path, folder_scene)
            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            images_path = []

            zone_folder = zones_folder[p_zone]

            zone_path = os.path.join(scene_path, zone_folder)

            # get threshold information
            path_seuil = os.path.join(zone_path, seuil_expe_filename)

            # open treshold path and get this information
            with open(path_seuil, "r") as seuil_file:
                seuil_learned = int(seuil_file.readline().strip())

            threshold_image_found = False

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            # for each images
            for img_path in scene_images:

                current_quality_image = dt.get_scene_image_quality(img_path)

                if current_quality_image % p_step == 0:
                    if current_quality_image >= begin_index and current_quality_image <= end_index:
                        images_path.append(img_path)

                    if seuil_learned < current_quality_image and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_zone = dt.get_scene_image_postfix(
                            img_path)

                        if img_path not in images_path:
                            images_path.append(img_path)

            for img_path in images_path:

                current_img = Image.open(img_path)
                img_blocks = segmentation.divide_in_blocks(
                    current_img, (200, 200))

                # getting expected block id
                block = img_blocks[p_zone]

                # get data from mode
                # Here you can add the way you compute data
                data = get_image_features(p_feature, block)

                # TODO : improve part of this code to get correct min / max values
                if p_norm:
                    data = data[begin_data:end_data]

                ##################
                # Data mode part #
                ##################

                if p_mode == 'svdne':

                    # getting max and min information from min_max_filename
                    if not p_norm:
                        with open(data_min_max_filename, 'r') as f:
                            min_val = float(f.readline())
                            max_val = float(f.readline())
                    else:
                        min_val = min_value_interval
                        max_val = max_value_interval

                    data = utils.normalize_arr_with_range(
                        data, min_val, max_val)

                if p_mode == 'svdn':
                    data = utils.normalize_arr(data)

                if not p_norm:
                    zones_images_data.append(data[begin_data:end_data])
                else:
                    zones_images_data.append(data)

            fig, ax = plt.subplots(figsize=(30, 22))
            ax.set_facecolor('#FFFFFF')

            # plt.title(p_scene + ' scene (zone  ' + str(p_zone) + ') interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
            ax.set_ylabel('Component values', fontsize=28)
            ax.set_xlabel('Vector features', fontsize=28)

            ax.tick_params(labelsize=22)

            for id, data in enumerate(zones_images_data):

                p_label = p_scene + "_" + dt.get_scene_image_postfix(
                    images_path[id])

                if int(dt.get_scene_image_postfix(
                        images_path[id])) == int(threshold_image_zone):
                    ax.plot(data,
                            label=p_label + ' (zone ' + str(p_zone) +
                            ' threshold)',
                            lw=4,
                            color='red')
                else:
                    ax.plot(data, label=p_label)

            plt.legend(bbox_to_anchor=(0.60, 0.98),
                       loc=2,
                       borderaxespad=0.2,
                       fontsize=24)

            start_ylim, end_ylim = p_ylim
            plt.ylim(start_ylim, end_ylim)

            plot_name = p_scene + '_zone_' + str(
                p_zone) + '_' + p_feature + '_' + str(
                    p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
            plt.savefig(plot_name, facecolor=ax.get_facecolor())
def main():

    parser = argparse.ArgumentParser(
        description="Read and compute entropy data file (using diff)")

    parser.add_argument('--data',
                        type=str,
                        help='entropy file data to read and compute')
    parser.add_argument('--norm',
                        type=int,
                        help='normalize or not entropy',
                        choices=[0, 1],
                        default=0)
    parser.add_argument('--std',
                        type=int,
                        help='multiply result by current std',
                        choices=[0, 1],
                        default=0)
    parser.add_argument('--output', type=str, help='prediction file used')
    parser.add_argument('--train_scenes',
                        type=str,
                        help='list of train scenes used',
                        default='')

    args = parser.parse_args()

    p_data = args.data
    p_norm = args.norm
    p_std = args.std
    p_output = args.output
    p_train_scenes = args.train_scenes.split(',')

    # list all possibles choices of renderer
    scenes_list = cfg.scenes_names
    scenes_indices = cfg.scenes_indices

    # getting scenes from indexes user selection
    scenes_selected = []

    # if training set is empty then use all scenes
    if p_train_scenes[0] == '':
        scenes_selected = scenes_list
    else:
        for scene_id in p_train_scenes:
            index = scenes_indices.index(scene_id.strip())
            scenes_selected.append(scenes_list[index])

    print("Scenes used in train:", scenes_selected)

    # create output path if not exists
    threshold_path = os.path.join(cfg.output_data_folder, cfg.data_thresholds)
    p_output_path = os.path.join(threshold_path, p_output)
    if not os.path.exists(threshold_path):
        os.makedirs(threshold_path)

    # read line by line file to estimate threshold entropy stopping criteria
    with open(p_data, 'r') as f:
        lines = f.readlines()

        diff_entropy_found = []
        for line in lines:

            data = line.split(';')

            scene_name = data[0]

            # only if scene is used for training part
            if scene_name in scenes_selected:

                threshold = data[3]
                image_indices = data[4].split(',')
                entropy_list = data[5].split(',')

                # one element is removed using this function (first element of list for computing first difference)
                entropy_diff_list = get_zone_diff_entropy(entropy_list, p_std)
                image_indices_without_first = image_indices[1:]

                found_index = 0
                for index, v in enumerate(image_indices_without_first):

                    if int(v) > int(threshold):
                        found_index = index
                        break

                if p_norm:
                    diff_entropy_kept = utils.normalize_arr(
                        entropy_diff_list[:found_index + 1])[-1]
                else:
                    diff_entropy_kept = entropy_diff_list[found_index]

                # Keep only absolute value
                diff_entropy_found.append(diff_entropy_kept)

        mean_entropy_diff = sum(diff_entropy_found) / len(diff_entropy_found)
        std_entropy_diff = np.std(diff_entropy_found)

        print('mean', mean_entropy_diff)
        print('std', std_entropy_diff)

        with open(p_output_path, 'w') as f:
            print("Erase", p_output_path, "previous file if exists")

        # now we can predict threshold img using `mean_entropy_diff`
        for line in lines:
            data = line.split(';')

            scene_name = data[0]
            zone_index = data[1]
            zone_index_str = data[2]
            threshold = data[3]
            image_indices = data[4].split(',')
            entropy_list = data[5].split(',')

            # one element is removed using this function (first element of list for computing first difference)
            entropy_diff_list = get_zone_diff_entropy(entropy_list, p_std)
            image_indices_without_first = image_indices[1:]

            # by default max index (if no stoppring criteria found)
            found_index = len(image_indices_without_first) - 1
            for index, v in enumerate(entropy_diff_list):

                if p_norm:
                    current_v = utils.normalize_arr(entropy_diff_list[:index +
                                                                      1])[-1]
                else:
                    current_v = v

                if mean_entropy_diff > current_v * k_factor:
                    found_index = index
                    break

            threshold_found = image_indices_without_first[found_index]

            with open(p_output_path, 'a') as f:
                f.write(scene_name + ';')
                f.write(zone_index + ';')
                f.write(zone_index_str + ';')
                f.write(threshold + ';')
                f.write(threshold_found + ';')
                f.write(str(mean_entropy_diff) + ';')
                f.write(str(std_entropy_diff) + ';')
                f.write(str(p_norm))
                f.write('\n')
Beispiel #28
0
def generate_data_feature(path, output, human_thresholds, data_type, mode):
    """
    @brief Method which generates all .csv files from scenes
    @param data_type,  feature choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # keep in memory min and max data found from data_type
    min_val_found = sys.maxsize
    max_val_found = 0

    output_path = os.path.join(cfg.output_data_generated, output)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    data_min_max_filename = os.path.join(output_path, data_type + min_max_filename)

    # go ahead each scenes
    for folder_scene in human_thresholds:

        print(folder_scene)
        scene_path = os.path.join(path, folder_scene)
        output_scene_path = os.path.join(output_path, folder_scene)

        if not os.path.exists(output_scene_path):
            os.makedirs(output_scene_path)

        # getting output filename
        output_svd_filename = data_type + "_" + mode + generic_output_file_svd

        # construct each zones folder name
        zones_folder = []
        svd_output_files = []

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str

            current_zone = "zone"+index_str
            zones_folder.append(current_zone)

            zone_path = os.path.join(scene_path, current_zone)
            output_zone_path = os.path.join(output_scene_path, current_zone)

            if not os.path.exists(output_zone_path):
                os.makedirs(output_zone_path)

            svd_file_path = os.path.join(output_zone_path, output_svd_filename)

            # add writer into list
            svd_output_files.append(open(svd_file_path, 'w'))

        # get all images of folder
        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
        number_scene_image = len(scene_images)
            
        for id_img, img_path in enumerate(scene_images):
            
            current_image_postfix = dt.get_scene_image_postfix(img_path)

            current_img = Image.open(img_path)
            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))

            for id_block, block in enumerate(img_blocks):

                ###########################
                # feature computation part #
                ###########################

                data = get_image_features(data_type, block)

                ##################
                # Data mode part #
                ##################

                # modify data depending mode
                if mode == 'svdne':

                    # getting max and min information from min_max_filename
                    with open(data_min_max_filename, 'r') as f:
                        min_val = float(f.readline())
                        max_val = float(f.readline())

                    data = utils.normalize_arr_with_range(data, min_val, max_val)

                if mode == 'svdn':
                    data = utils.normalize_arr(data)

                # save min and max found from dataset in order to normalize data using whole data known
                if mode == 'svd':

                    current_min = data.min()
                    current_max = data.max()

                    if current_min < min_val_found:
                        min_val_found = current_min

                    if current_max > max_val_found:
                        max_val_found = current_max

                # now write data into current writer
                current_file = svd_output_files[id_block]

                # add of index
                current_file.write(current_image_postfix + ';')

                for val in data:
                    current_file.write(str(val) + ";")

                current_file.write('\n')

            print(data_type + "_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((id_img + 1) / number_scene_image * 100.) + "%")
            sys.stdout.write("\033[F")

        for f in svd_output_files:
            f.close()

        print('\n')

    # save current information about min file found
    if mode == 'svd':
        with open(data_min_max_filename, 'w') as f:
            f.write(str(min_val_found) + '\n')
            f.write(str(max_val_found) + '\n')

    print("%s_%s : end of data generation\n" % (data_type, mode))