Exemplo n.º 1
0
def main():

    parser = argparse.ArgumentParser(
        description="Compute and prepare data augmentation of scenes")

    parser.add_argument('--output',
                        type=str,
                        help="output folder expected",
                        required=True)
    parser.add_argument('--number',
                        type=int,
                        help="number of images for each sample of scene",
                        required=True)
    parser.add_argument('--rotation',
                        type=bool,
                        help="",
                        required=True,
                        default=False)

    args = parser.parse_args()

    p_output = args.output
    p_number = args.number
    p_rotation = args.rotation

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # getting image zone size and usefull information
    zone_width, zone_height = image_zone_size
    scene_width, scene_height = image_scene_size
    nb_x_parts = math.floor(scene_width / zone_width)

    output_dataset_filename_path = os.path.join(p_output,
                                                data_augmented_filename)

    # go ahead each scenes
    for folder_scene in scenes:

        scene_path = os.path.join(path, folder_scene)

        # build output scene path
        output_scene_path = os.path.join(p_output, folder_scene)

        if not os.path.exists(output_scene_path):
            os.makedirs(output_scene_path)

        # construct each zones folder name
        zones_folder = []
        zones_threshold = []

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str

            current_zone = "zone" + index_str
            zones_folder.append(current_zone)

            zone_path = os.path.join(scene_path, current_zone)

            with open(os.path.join(zone_path, seuil_expe_filename)) as f:
                zones_threshold.append(int(f.readline()))

        possible_x, possible_y = possible_point_zone

        # get all images of folder
        scene_images = sorted([
            os.path.join(scene_path, img) for img in os.listdir(scene_path)
            if cfg.scene_image_extension in img
        ])
        number_scene_image = len(scene_images)

        for id_img, img_path in enumerate(scene_images):

            current_img = Image.open(img_path)
            img = np.array(current_img)

            for generation in range(p_number):
                p_x, p_y = (random.randrange(possible_x),
                            random.randrange(possible_y))

                # extract random zone into scene image
                extracted_img = img[p_y:(p_y + zone_height),
                                    p_x:(p_x + zone_width)]

                extracted_img.shape

                pil_extracted_img = Image.fromarray(extracted_img)

                # coordinate of specific zone, hence use threshold of zone
                if p_x % zone_width == 0 and p_y % zone_height == 0:

                    zone_index = math.floor(p_x / zone_width) + math.floor(
                        p_y / zone_height) * nb_x_parts

                    final_threshold = int(zones_threshold[zone_index])
                else:
                    # get zone identifiers of this new zones (from endpoints)
                    p_top_left = (p_x, p_y)
                    p_top_right = (p_x + zone_width, p_y)
                    p_bottom_right = (p_x + zone_width, p_y + zone_height)
                    p_bottom_left = (p_x, p_y + zone_height)

                    points = [
                        p_top_left, p_top_right, p_bottom_right, p_bottom_left
                    ]

                    p_zones_indices = []

                    # for each points get threshold information
                    for p in points:
                        x, y = p

                        zone_index = math.floor(x / zone_width) + math.floor(
                            y / zone_height) * nb_x_parts
                        p_zones_indices.append(zone_index)

                    p_thresholds = np.array(zones_threshold)[p_zones_indices]

                    # get proportions of pixels of img into each zone
                    overlaps = []

                    p_x_max = p_x + zone_width
                    p_y_max = p_y + zone_height

                    for index, zone_index in enumerate(p_zones_indices):
                        x_zone = (zone_index % nb_x_parts) * zone_width
                        y_zone = (math.floor(
                            zone_index / nb_x_parts)) * zone_height

                        x_max_zone = x_zone + zone_width
                        y_max_zone = y_zone + zone_height

                        # computation of overlap
                        # x_overlap = max(0, min(rect1.right, rect2.right) - max(rect1.left, rect2.left))
                        # y_overlap = max(0, min(rect1.bottom, rect2.bottom) - max(rect1.top, rect2.top))
                        x_overlap = max(
                            0,
                            min(x_max_zone, p_x_max) - max(x_zone, p_x))
                        y_overlap = max(
                            0,
                            min(y_max_zone, p_y_max) - max(y_zone, p_y))

                        overlapArea = x_overlap * y_overlap
                        overlaps.append(overlapArea)

                    overlapSum = sum(overlaps)
                    proportions = [item / overlapSum for item in overlaps]

                    final_threshold = 0

                    for index, proportion in enumerate(proportions):
                        final_threshold += proportion * p_thresholds[index]

                    final_threshold = int(final_threshold)

                # save image into new scene folder
                current_image_postfix = dt.get_scene_image_postfix(img_path)

                # prepare output img name
                label_img = (int(current_image_postfix) < final_threshold)
                extracted_image_name = dt.get_scene_image_prefix(
                    img_path) + '_' + str(generation) + '_x' + str(
                        p_x) + '_y' + str(p_y) + '_label' + str(int(label_img))

                # if wished add of rotations images with same final threshold (increase data)
                # write new line into global .csv ('threshold', 'filepath')
                if p_rotation:

                    # do rotations and save
                    rotations = [0, 90, 180, 270]

                    for rotation in rotations:

                        rotated_img_name = extracted_image_name + 'rot' + str(
                            rotation
                        ) + '_' + current_image_postfix + cfg.scene_image_extension
                        rotated_img_path = os.path.join(
                            output_scene_path, rotated_img_name)
                        saved_rotated_img_path = os.path.join(
                            folder_scene, rotated_img_name)
                        rotated_img = pil_extracted_img.rotate(rotation)
                        rotated_img.save(rotated_img_path)

                        csv_line = folder_scene + ';' + str(
                            final_threshold) + ';' + str(
                                int(current_image_postfix)) + ';' + str(
                                    int(label_img)
                                ) + ';' + saved_rotated_img_path + '\n'

                        with open(output_dataset_filename_path, 'a') as f:
                            f.write(csv_line)

                else:
                    extracted_image_name += current_image_postfix + cfg.scene_image_extension
                    extracted_image_path = os.path.join(
                        output_scene_path, extracted_image_name)
                    saved_extracted_image_path = os.path.join(
                        output_scene_path, extracted_image_name)

                    pil_extracted_img.save(extracted_image_path)

                    csv_line = folder_scene + ';' + str(
                        final_threshold) + ';' + str(
                            int(current_image_postfix)) + ';' + str(
                                int(label_img)
                            ) + ';' + saved_extracted_image_path + '\n'

                    with open(output_dataset_filename_path, 'a') as f:
                        f.write(csv_line)

                print(folder_scene + " - " +
                      "{0:.2f}".format(((id_img * p_number + generation) + 1) /
                                       (p_number * number_scene_image) *
                                       100.) + "%")
                sys.stdout.write("\033[F")

        print('\n', folder_scene, 'done...')
Exemplo n.º 2
0
def main():

    p_custom = False

    parser = argparse.ArgumentParser(
        description="Script which predicts threshold using specific model")

    parser.add_argument('--interval',
                        type=str,
                        help='Interval value to keep from svd',
                        default='"0, 200"')
    parser.add_argument('--model',
                        type=str,
                        help='.joblib or .json file (sklearn or keras model)')
    parser.add_argument('--mode',
                        type=str,
                        help='Kind of normalization level wished',
                        choices=normalization_choices)
    parser.add_argument('--feature',
                        type=str,
                        help='Feature data choice',
                        choices=features_choices)
    parser.add_argument(
        '--limit_detection',
        type=int,
        help='Specify number of same prediction to stop threshold prediction',
        default=2)
    parser.add_argument(
        '--custom',
        type=str,
        help='Name of custom min max file if use of renormalization of data',
        default=False)

    args = parser.parse_args()

    p_interval = list(map(int, args.interval.split(',')))
    p_model_file = args.model
    p_mode = args.mode
    p_feature = args.feature
    p_limit = args.limit
    p_custom = args.custom

    scenes = os.listdir(scenes_path)
    scenes = [s for s in scenes if not min_max_filename in s]

    # go ahead each scenes
    for id_scene, folder_scene in enumerate(scenes):

        print(folder_scene)

        scene_path = os.path.join(scenes_path, folder_scene)

        threshold_expes = []
        threshold_expes_detected = []
        threshold_expes_counter = []
        threshold_expes_found = []

        # get all images of folder
        scene_images = sorted([
            os.path.join(scene_path, img) for img in os.listdir(scene_path)
            if cfg.scene_image_extension in img
        ])

        start_quality_image = dt.get_scene_image_quality(scene_images[0])
        end_quality_image = dt.get_scene_image_quality(scene_images[-1])

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str
            zone_folder = "zone" + index_str

            threshold_path_file = os.path.join(
                os.path.join(scene_path, zone_folder), threshold_expe_filename)

            with open(threshold_path_file) as f:
                threshold = int(f.readline())
                threshold_expes.append(threshold)

                # Initialize default data to get detected model threshold found
                threshold_expes_detected.append(False)
                threshold_expes_counter.append(0)
                threshold_expes_found.append(
                    end_quality_image)  # by default use max

        check_all_done = False

        # for each images
        for img_path in scene_images:

            current_img = Image.open(img_path)
            current_quality_image = dt.get_scene_image_quality(img_path)
            current_image_potfix = dt.get_scene_image_postfix(img_path)

            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))
            current_img = Image.open(img_path)
            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))

            check_all_done = all(d == True for d in threshold_expes_detected)

            if check_all_done:
                break

            for id_block, block in enumerate(img_blocks):

                # check only if necessary for this scene (not already detected)
                if not threshold_expes_detected[id_block]:

                    tmp_file_path = tmp_filename.replace(
                        '__model__',
                        p_model_file.split('/')[-1].replace('.joblib', '_'))
                    block.save(tmp_file_path)

                    python_cmd = "python prediction/predict_noisy_image_svd.py --image " + tmp_file_path + \
                                    " --interval '" + p_interval + \
                                    "' --model " + p_model_file  + \
                                    " --mode " + p_mode + \
                                    " --feature " + p_feature

                    # specify use of custom file for min max normalization
                    if p_custom:
                        python_cmd = python_cmd + ' --custom ' + p_custom

                    ## call command ##
                    p = subprocess.Popen(python_cmd,
                                         stdout=subprocess.PIPE,
                                         shell=True)

                    (output, err) = p.communicate()

                    ## Wait for result ##
                    p_status = p.wait()

                    prediction = int(output)

                    if prediction == 0:
                        threshold_expes_counter[
                            id_block] = threshold_expes_counter[id_block] + 1
                    else:
                        threshold_expes_counter[id_block] = 0

                    if threshold_expes_counter[id_block] == p_limit:
                        threshold_expes_detected[id_block] = True
                        threshold_expes_found[id_block] = current_quality_image

                    print(
                        str(id_block) + " : " + current_image_potfix + "/" +
                        str(threshold_expes[id_block]) + " => " +
                        str(prediction))

            print("------------------------")
            print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)))
            print("------------------------")

        # end of scene => display of results

        # construct path using model name for saving threshold map folder
        model_treshold_path = os.path.join(
            threshold_map_folder,
            p_model_file.split('/')[-1].replace('.joblib', ''))

        # create threshold model path if necessary
        if not os.path.exists(model_treshold_path):
            os.makedirs(model_treshold_path)

        abs_dist = []

        map_filename = os.path.join(model_treshold_path,
                                    threshold_map_file_prefix + folder_scene)
        f_map = open(map_filename, 'w')

        line_information = ""

        # default header
        f_map.write('|  |    |    |  |\n')
        f_map.write('---|----|----|---\n')
        for id, threshold in enumerate(threshold_expes_found):

            line_information += str(threshold) + " / " + str(
                threshold_expes[id]) + " | "
            abs_dist.append(abs(threshold - threshold_expes[id]))

            if (id + 1) % 4 == 0:
                f_map.write(line_information + '\n')
                line_information = ""

        f_map.write(line_information + '\n')

        min_abs_dist = min(abs_dist)
        max_abs_dist = max(abs_dist)
        avg_abs_dist = sum(abs_dist) / len(abs_dist)

        f_map.write('\nScene information : ')
        f_map.write('\n- BEGIN : ' + str(start_quality_image))
        f_map.write('\n- END : ' + str(end_quality_image))

        f_map.write('\n\nDistances information : ')
        f_map.write('\n- MIN : ' + str(min_abs_dist))
        f_map.write('\n- MAX : ' + str(max_abs_dist))
        f_map.write('\n- AVG : ' + str(avg_abs_dist))

        f_map.write('\n\nOther information : ')
        f_map.write('\n- Detection limit : ' + str(p_limit))

        # by default print last line
        f_map.close()

        print("Scene " + str(id_scene + 1) + "/" + str(len(scenes)) +
              " Done..")
        print("------------------------")
Exemplo n.º 3
0
def display_data_scenes(data_type, p_scene, p_kind):
    """
    @brief Method which displays data from scene
    @param data_type,  feature choice
    @param scene, scene choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            print(folder_scene)
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            threshold_info = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            start_image_path = scene_images[0]
            end_image_path = scene_images[-1]

            start_quality_image = dt.get_scene_image_quality(scene_images[0])
            end_quality_image = dt.get_scene_image_quality(scene_images[-1])

            for id_zone, zone_folder in enumerate(zones_folder):

                zone_path = os.path.join(scene_path, zone_folder)

                # get threshold information
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())

                threshold_image_found = False

                for img_path in scene_images:
                    current_quality_image = dt.get_scene_image_quality(
                        img_path)

                    if threshold_learned < int(current_quality_image
                                               ) and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_path = img_path

                        threshold_image = dt.get_scene_image_postfix(img_path)
                        threshold_info.append(threshold_image)

                # all indexes of picture to plot
                images_path = [
                    start_image_path, threshold_image_path, end_image_path
                ]
                images_data = []

                for img_path in images_path:

                    current_img = Image.open(img_path)
                    img_blocks = segmentation.divide_in_blocks(
                        current_img, (200, 200))

                    # getting expected block id
                    block = img_blocks[id_zone]

                    data = get_image_features(data_type, block)

                    ##################
                    # Data mode part #
                    ##################

                    # modify data depending mode

                    if p_kind == 'svdn':
                        data = utils.normalize_arr(data)

                    if p_kind == 'svdne':
                        path_min_max = os.path.join(
                            path, data_type + min_max_filename)

                        with open(path_min_max, 'r') as f:
                            min_val = float(f.readline())
                            max_val = float(f.readline())

                        data = utils.normalize_arr_with_range(
                            data, min_val, max_val)

                    # append of data
                    images_data.append(data)

                zones_images_data.append(images_data)

            fig = plt.figure(figsize=(8, 8))
            fig.suptitle(data_type + " values for " + p_scene +
                         " scene (normalization : " + p_kind + ")",
                         fontsize=20)

            for id, data in enumerate(zones_images_data):
                fig.add_subplot(4, 4, (id + 1))
                plt.plot(data[0], label='Noisy_' + start_quality_image)
                plt.plot(data[1], label='Threshold_' + threshold_info[id])
                plt.plot(data[2], label='Reference_' + end_quality_image)
                plt.ylabel(data_type + ' SVD, ZONE_' + str(id + 1),
                           fontsize=18)
                plt.xlabel('Vector features', fontsize=18)
                plt.legend(bbox_to_anchor=(0.5, 1),
                           loc=2,
                           borderaxespad=0.2,
                           fontsize=18)
                plt.ylim(0, 0.1)
            plt.show()
def display_data_scenes(p_scene, p_bits, p_shifted):
    """
    @brief Method which generates all .csv files from scenes photos
    @param p_scene, scene we want to show values
    @param nb_bits, number of bits expected
    @param p_shifted, number of bits expected to be shifted
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            print(folder_scene)
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            threshold_info = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            start_image_path = scene_images[0]
            end_image_path = scene_images[-1]

            start_quality_image = dt.get_scene_image_quality(scene_images[0])
            end_quality_image = dt.get_scene_image_quality(scene_images[-1])

            for id_zone, zone_folder in enumerate(zones_folder):

                zone_path = os.path.join(scene_path, zone_folder)

                # get threshold information
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())

                threshold_image_found = False

                # for each images
                for img_path in scene_images:
                    current_quality_image = dt.get_scene_image_quality(
                        img_path)

                    if threshold_learned < int(current_quality_image
                                               ) and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_path = img_path

                        threshold_image = dt.get_scene_image_postfix(img_path)
                        threshold_info.append(threshold_image)

                # all indexes of picture to plot
                images_path = [
                    start_image_path, threshold_image_path, end_image_path
                ]
                images_data = []

                for img_path in images_path:

                    current_img = Image.open(img_path)
                    img_blocks = segmentation.divide_in_blocks(
                        current_img, (200, 200))

                    # getting expected block id
                    block = img_blocks[id_zone]

                    # get data from mode
                    # Here you can add the way you compute data
                    low_bits_block = transform.rgb_to_LAB_L_bits(
                        block, (p_shifted + 1, p_shifted + p_bits + 1))
                    data = compression.get_SVD_s(low_bits_block)

                    ##################
                    # Data mode part #
                    ##################

                    # modify data depending mode
                    data = utils.normalize_arr(data)
                    images_data.append(data)

                zones_images_data.append(images_data)

            fig = plt.figure(figsize=(8, 8))
            fig.suptitle('Lab SVD ' + str(p_bits) + ' bits shifted by ' +
                         str(p_shifted) + " for " + p_scene + " scene",
                         fontsize=20)

            for id, data in enumerate(zones_images_data):
                fig.add_subplot(4, 4, (id + 1))
                plt.plot(data[0], label='Noisy_' + start_quality_image)
                plt.plot(data[1], label='Threshold_' + threshold_info[id])
                plt.plot(data[2], label='Reference_' + end_quality_image)
                plt.ylabel('Lab SVD ' + str(p_bits) + ' bits shifted by ' +
                           str(p_shifted) + ', ZONE_' + str(id + 1),
                           fontsize=14)
                plt.xlabel('Vector features', fontsize=16)
                plt.legend(bbox_to_anchor=(0.5, 1),
                           loc=2,
                           borderaxespad=0.2,
                           fontsize=14)
                plt.ylim(0, 0.1)
            plt.show()
def display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode,
                       p_step, p_norm, p_error, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_error, error feature used to display
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    max_value_svd = 0
    min_value_svd = sys.maxsize

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            images_data = []
            images_path = []

            threshold_learned_zones = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])
            number_scene_image = len(scene_images)

            for id, zone_folder in enumerate(zones_folder):

                # get threshold information

                zone_path = os.path.join(scene_path, zone_folder)
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())
                    threshold_learned_zones.append(threshold_learned)

            threshold_mean = np.mean(np.asarray(threshold_learned_zones))
            threshold_image_found = False

            svd_data = []

            # for each images
            for id_img, img_path in enumerate(scene_images):

                current_quality_image = dt.get_scene_image_quality(img_path)

                img = Image.open(img_path)

                svd_values = get_image_features(p_feature, img)

                if p_norm:
                    svd_values = svd_values[begin_data:end_data]

                # update min max values
                min_value = svd_values.min()
                max_value = svd_values.max()

                if min_value < min_value_svd:
                    min_value_svd = min_value

                if max_value > min_value_svd:
                    max_value_svd = max_value

                # keep in memory used data
                if current_quality_image % p_step == 0:
                    if current_quality_image >= begin_index and current_quality_image <= end_index:
                        images_path.append(img_path)
                        svd_data.append(svd_values)

                    if threshold_mean < current_quality_image and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_zone = dt.get_scene_image_postfix(
                            img_path)

                print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
                sys.stdout.write("\033[F")

            previous_data = []
            error_data = [0.]

            for id, data in enumerate(svd_data):

                current_data = data

                if not p_norm:
                    current_data = current_data[begin_data:end_data]

                if p_mode == 'svdn':
                    current_data = utils.normalize_arr(current_data)

                if p_mode == 'svdne':
                    current_data = utils.normalize_arr_with_range(
                        current_data, min_value_svd, max_value_svd)

                images_data.append(current_data)

                # use of whole image data for computation of ssim or psnr
                if p_error == 'ssim' or p_error == 'psnr':
                    current_data = np.asarray(Image.open(images_path[id]))

                if len(previous_data) > 0:

                    current_error = get_error_distance(p_error, previous_data,
                                                       current_data)
                    error_data.append(current_error)

                if len(previous_data) == 0:
                    previous_data = current_data

            # display all data using matplotlib (configure plt)
            gridsize = (3, 2)

            # fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(30, 22))
            fig = plt.figure(figsize=(30, 22))
            ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
            ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)

            ax1.set_title(p_scene + ' scene interval information SVD[' +
                          str(begin_data) + ', ' + str(end_data) +
                          '], from scenes indices [' + str(begin_index) +
                          ', ' + str(end_index) + '], ' + p_feature +
                          ' feature, ' + p_mode + ', with step of ' +
                          str(p_step) + ', svd norm ' + str(p_norm),
                          fontsize=20)
            ax1.set_ylabel('Image samples or time (minutes) generation',
                           fontsize=14)
            ax1.set_xlabel('Vector features', fontsize=16)

            for id, data in enumerate(images_data):

                current_quality_image = dt.get_scene_image_quality(
                    images_path[id])
                current_quality_postfix = dt.get_scene_image_postfix(
                    images_path[id])

                if display_error:
                    p_label = p_scene + '_' + current_quality_postfix + " | " + p_error + ": " + str(
                        error_data[id])
                else:
                    p_label = p_scene + '_' + current_quality_postfix

                if current_quality_image == threshold_image_zone:
                    ax1.plot(data,
                             label=p_label + " (threshold mean)",
                             lw=4,
                             color='red')
                else:
                    ax1.plot(data, label=p_label)

            ax1.legend(bbox_to_anchor=(0.7, 1),
                       loc=2,
                       borderaxespad=0.2,
                       fontsize=14)

            start_ylim, end_ylim = p_ylim
            ax1.set_ylim(start_ylim, end_ylim)

            ax2.set_title(p_error + " information for whole step images")
            ax2.set_ylabel(p_error + ' error')
            ax2.set_xlabel('Number of samples per pixels or times')
            ax2.set_xticks(range(len(current_quality_image)))
            ax2.set_xticklabels(
                list(map(dt.get_scene_image_quality, current_quality_image)))
            ax2.plot(error_data)

            plot_name = p_scene + '_' + p_feature + '_' + str(
                p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
            plt.savefig(plot_name)
Exemplo n.º 6
0
def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_feature,
                       p_mode, p_step, p_norm, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_zone, zone's identifier of picture
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_step, step of images indices
    @param p_norm, normalization or not of selected svd data
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    data_min_max_filename = os.path.join(path, p_feature + min_max_filename)

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            scene_path = os.path.join(path, folder_scene)
            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            images_path = []

            zone_folder = zones_folder[p_zone]

            zone_path = os.path.join(scene_path, zone_folder)

            # get threshold information
            path_seuil = os.path.join(zone_path, seuil_expe_filename)

            # open treshold path and get this information
            with open(path_seuil, "r") as seuil_file:
                seuil_learned = int(seuil_file.readline().strip())

            threshold_image_found = False

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            # for each images
            for img_path in scene_images:

                current_quality_image = dt.get_scene_image_quality(img_path)

                if current_quality_image % p_step == 0:
                    if current_quality_image >= begin_index and current_quality_image <= end_index:
                        images_path.append(img_path)

                    if seuil_learned < current_quality_image and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_zone = dt.get_scene_image_postfix(
                            img_path)

                        if img_path not in images_path:
                            images_path.append(img_path)

            for img_path in images_path:

                current_img = Image.open(img_path)
                img_blocks = segmentation.divide_in_blocks(
                    current_img, (200, 200))

                # getting expected block id
                block = img_blocks[p_zone]

                # get data from mode
                # Here you can add the way you compute data
                data = get_image_features(p_feature, block)

                # TODO : improve part of this code to get correct min / max values
                if p_norm:
                    data = data[begin_data:end_data]

                ##################
                # Data mode part #
                ##################

                if p_mode == 'svdne':

                    # getting max and min information from min_max_filename
                    if not p_norm:
                        with open(data_min_max_filename, 'r') as f:
                            min_val = float(f.readline())
                            max_val = float(f.readline())
                    else:
                        min_val = min_value_interval
                        max_val = max_value_interval

                    data = utils.normalize_arr_with_range(
                        data, min_val, max_val)

                if p_mode == 'svdn':
                    data = utils.normalize_arr(data)

                if not p_norm:
                    zones_images_data.append(data[begin_data:end_data])
                else:
                    zones_images_data.append(data)

            fig, ax = plt.subplots(figsize=(30, 22))
            ax.set_facecolor('#FFFFFF')

            # plt.title(p_scene + ' scene (zone  ' + str(p_zone) + ') interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
            ax.set_ylabel('Component values', fontsize=28)
            ax.set_xlabel('Vector features', fontsize=28)

            ax.tick_params(labelsize=22)

            for id, data in enumerate(zones_images_data):

                p_label = p_scene + "_" + dt.get_scene_image_postfix(
                    images_path[id])

                if int(dt.get_scene_image_postfix(
                        images_path[id])) == int(threshold_image_zone):
                    ax.plot(data,
                            label=p_label + ' (zone ' + str(p_zone) +
                            ' threshold)',
                            lw=4,
                            color='red')
                else:
                    ax.plot(data, label=p_label)

            plt.legend(bbox_to_anchor=(0.60, 0.98),
                       loc=2,
                       borderaxespad=0.2,
                       fontsize=24)

            start_ylim, end_ylim = p_ylim
            plt.ylim(start_ylim, end_ylim)

            plot_name = p_scene + '_zone_' + str(
                p_zone) + '_' + p_feature + '_' + str(
                    p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
            plt.savefig(plot_name, facecolor=ax.get_facecolor())
Exemplo n.º 7
0
def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_area, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_interval, interval [begin, end] of svd data to display
    @param p_indices, indices to display
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_area, area method name to compute area under curve
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    image_indices = []

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # Store all informations about scenes
    scenes_area_data = []
    scenes_images_indices = []
    scenes_threshold_mean = []

    # go ahead each scenes
    for folder_scene in scenes:

        max_value_svd = 0
        min_value_svd = sys.maxsize

        scene_path = os.path.join(path, folder_scene)

        # construct each zones folder name
        zones_folder = []

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str

            current_zone = "zone"+index_str
            zones_folder.append(current_zone)

        # store data information for current scene
        images_data = []
        images_indices = []
        threshold_learned_zones = []

        # get all images of folder
        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
        number_scene_image = len(scene_images)

        for id, zone_folder in enumerate(zones_folder):

            # get threshold information
            zone_path = os.path.join(scene_path, zone_folder)
            path_seuil = os.path.join(zone_path, seuil_expe_filename)

            # open treshold path and get this information
            with open(path_seuil, "r") as seuil_file:
                threshold_learned = int(seuil_file.readline().strip())
                threshold_learned_zones.append(threshold_learned)

        threshold_mean = np.mean(np.asarray(threshold_learned_zones))
        threshold_image_found = False
        scenes_threshold_mean.append(int(threshold_mean / p_step))

        svd_data = []

        # for each images
        for id_img, img_path in enumerate(scene_images):
            
            current_quality_image = dt.get_scene_image_quality(img_path)

            img = Image.open(img_path)

            svd_values = get_image_features(p_metric, img)

            if p_norm:
                svd_values = svd_values[begin_data:end_data]

            # update min max values
            min_value = svd_values.min()
            max_value = svd_values.max()

            if min_value < min_value_svd:
                min_value_svd = min_value

            if max_value > min_value_svd:
                max_value_svd = max_value

            # keep in memory used data
            if current_quality_image % p_step == 0:
                if current_quality_image >= begin_index and current_quality_image <= end_index:
                    images_indices.append(dt.get_scene_image_postfix(img_path))
                    svd_data.append(svd_values)

                if threshold_mean < current_quality_image and not threshold_image_found:

                    threshold_image_found = True

            print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
            sys.stdout.write("\033[F")


            # all indices of picture to plot
        print("Scene %s : %s" % (folder_scene, images_indices))

        scenes_images_indices.append(image_indices)

        area_data = []

        for id, data in enumerate(svd_data):

            current_data = data

            if not p_norm:
                current_data = current_data[begin_data:end_data]

            if p_mode == 'svdn':
                current_data = utils.normalize_arr(current_data)

            if p_mode == 'svdne':
                current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)

            images_data.append(current_data)

            # not use this script for 'sub_blocks_stats'
            current_area = get_area_under_curve(p_area, current_data)
            area_data.append(current_area)

        scenes_area_data.append(area_data)

    # display all data using matplotlib (configure plt)
    plt.title('Scenes area interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + ']' + p_metric + ' metric, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=20)
    plt.ylabel('Image samples or time (minutes) generation', fontsize=14)
    plt.xlabel('Vector features', fontsize=16)

    plt.legend(bbox_to_anchor=(0.7, 1), loc=2, borderaxespad=0.2, fontsize=14)

    for id, area_data in enumerate(scenes_area_data):

        threshold_id = 0
        scene_name = scenes[id]
        image_indices = scenes_images_indices[id]

        p_label = scene_name + '_' + str(images_indices[id])

        threshold_id = scenes_threshold_mean[id]

        print(p_label)

        plt.plot(area_data, label=p_label)
        #ax2.set_xticks(range(len(images_indices)))
        #ax2.set_xticklabels(list(map(int, images_indices)))
        if threshold_id != 0:
            print("Plot threshold ", threshold_id)
            plt.plot([threshold_id, threshold_id], [np.min(area_data), np.max(area_data)], 'k-', lw=2, color='red')


    start_ylim, end_ylim = p_ylim
    plt.ylim(start_ylim, end_ylim)

    plt.show()
def display_svd_values(p_scene, p_thresholds, p_interval, p_indices, p_feature,
                       p_mode, p_step, p_norm, p_ylim, p_label):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    max_value_svd = 0
    min_value_svd = sys.maxsize

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # go ahead selected scene
    scene_path = p_scene

    # construct each zones folder name
    zones_folder = []

    # get zones list info
    for index in zones:
        index_str = str(index)
        if len(index_str) < 2:
            index_str = "0" + index_str

        current_zone = "zone" + index_str
        zones_folder.append(current_zone)

    images_data = []
    images_indices = []

    threshold_learned_zones = []

    # get all images of folder
    scene_images = sorted([
        os.path.join(scene_path, img) for img in os.listdir(scene_path)
        if cfg.scene_image_extension in img
    ])
    number_scene_image = len(scene_images)

    _, scene_name = os.path.split(p_scene)
    threshold_learned_zones = p_thresholds[scene_name]

    threshold_mean = np.mean(np.asarray(threshold_learned_zones))
    threshold_image_found = False

    svd_data = []

    # for each images
    for id_img, img_path in enumerate(scene_images):

        current_quality_image = dt.get_scene_image_quality(img_path)

        img = Image.open(img_path)

        svd_values = get_image_features(p_feature, img)

        if p_norm:
            svd_values = svd_values[begin_data:end_data]

        #svd_values = np.asarray([math.log(x) for x in svd_values])

        # update min max values
        min_value = svd_values.min()
        max_value = svd_values.max()

        if min_value < min_value_svd:
            min_value_svd = min_value

        if max_value > min_value_svd:
            max_value_svd = max_value

        # keep in memory used data
        if current_quality_image % p_step == 0:
            if current_quality_image >= begin_index and current_quality_image <= end_index:

                images_indices.append(dt.get_scene_image_postfix(img_path))
                svd_data.append(svd_values)

        if threshold_mean < current_quality_image and not threshold_image_found:

            threshold_image_found = True
            threshold_image_zone = current_quality_image

            print("Quality mean : ", current_quality_image, "\n")

            if dt.get_scene_image_postfix(img_path) not in images_indices:
                images_indices.append(dt.get_scene_image_postfix(img_path))

        print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
        sys.stdout.write("\033[F")

    # all indices of picture to plot
    print(images_indices)

    for id, data in enumerate(svd_data):

        # current_data = [ math.log10(d + 1.) for d in data ]
        # print(current_data)

        current_data = data

        if not p_norm:
            current_data = current_data[begin_data:end_data]

        if p_mode == 'svdn':
            current_data = utils.normalize_arr(current_data)

        if p_mode == 'svdne':
            current_data = utils.normalize_arr_with_range(
                current_data, min_value_svd, max_value_svd)

        images_data.append(current_data)

    # display all data using matplotlib (configure plt)
    fig, ax = plt.subplots(figsize=(30, 15))
    ax.set_facecolor('#FFFFFF')
    #fig.patch.set_facecolor('#F9F9F9')

    ax.tick_params(labelsize=26)
    #plt.rc('xtick', labelsize=22)
    #plt.rc('ytick', labelsize=22)

    #plt.title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
    ax.set_ylabel('Component values', fontsize=36)
    ax.set_xlabel('Singular value component indices', fontsize=36)

    for id, data in enumerate(images_data):

        #p_label = p_scene + "_" + images_indices[id]
        p_label = images_indices[id] + " samples"

        if int(images_indices[id]) == int(threshold_image_zone):
            ax.plot(data,
                    label=p_label + " (threshold mean)",
                    lw=8,
                    color='red')
        else:
            ax.plot(data, label=p_label, lw=4)

    plt.legend(bbox_to_anchor=(0.60, 0.98),
               loc=2,
               borderaxespad=0.2,
               fontsize=32)

    start_ylim, end_ylim = p_ylim
    ax.set_ylim(start_ylim, end_ylim)

    plot_name = scene_name + '_' + p_feature + '_' + str(
        p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
    # plt.title('Tend of Singular values at different samples of ' + p_label + ' scene', fontsize=40)
    plt.savefig(plot_name, transparent=True)
Exemplo n.º 9
0
def generate_data_feature(path, output, human_thresholds, data_type, mode):
    """
    @brief Method which generates all .csv files from scenes
    @param data_type,  feature choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # keep in memory min and max data found from data_type
    min_val_found = sys.maxsize
    max_val_found = 0

    output_path = os.path.join(cfg.output_data_generated, output)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    data_min_max_filename = os.path.join(output_path, data_type + min_max_filename)

    # go ahead each scenes
    for folder_scene in human_thresholds:

        print(folder_scene)
        scene_path = os.path.join(path, folder_scene)
        output_scene_path = os.path.join(output_path, folder_scene)

        if not os.path.exists(output_scene_path):
            os.makedirs(output_scene_path)

        # getting output filename
        output_svd_filename = data_type + "_" + mode + generic_output_file_svd

        # construct each zones folder name
        zones_folder = []
        svd_output_files = []

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str

            current_zone = "zone"+index_str
            zones_folder.append(current_zone)

            zone_path = os.path.join(scene_path, current_zone)
            output_zone_path = os.path.join(output_scene_path, current_zone)

            if not os.path.exists(output_zone_path):
                os.makedirs(output_zone_path)

            svd_file_path = os.path.join(output_zone_path, output_svd_filename)

            # add writer into list
            svd_output_files.append(open(svd_file_path, 'w'))

        # get all images of folder
        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
        number_scene_image = len(scene_images)
            
        for id_img, img_path in enumerate(scene_images):
            
            current_image_postfix = dt.get_scene_image_postfix(img_path)

            current_img = Image.open(img_path)
            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))

            for id_block, block in enumerate(img_blocks):

                ###########################
                # feature computation part #
                ###########################

                data = get_image_features(data_type, block)

                ##################
                # Data mode part #
                ##################

                # modify data depending mode
                if mode == 'svdne':

                    # getting max and min information from min_max_filename
                    with open(data_min_max_filename, 'r') as f:
                        min_val = float(f.readline())
                        max_val = float(f.readline())

                    data = utils.normalize_arr_with_range(data, min_val, max_val)

                if mode == 'svdn':
                    data = utils.normalize_arr(data)

                # save min and max found from dataset in order to normalize data using whole data known
                if mode == 'svd':

                    current_min = data.min()
                    current_max = data.max()

                    if current_min < min_val_found:
                        min_val_found = current_min

                    if current_max > max_val_found:
                        max_val_found = current_max

                # now write data into current writer
                current_file = svd_output_files[id_block]

                # add of index
                current_file.write(current_image_postfix + ';')

                for val in data:
                    current_file.write(str(val) + ";")

                current_file.write('\n')

            print(data_type + "_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((id_img + 1) / number_scene_image * 100.) + "%")
            sys.stdout.write("\033[F")

        for f in svd_output_files:
            f.close()

        print('\n')

    # save current information about min file found
    if mode == 'svd':
        with open(data_min_max_filename, 'w') as f:
            f.write(str(min_val_found) + '\n')
            f.write(str(max_val_found) + '\n')

    print("%s_%s : end of data generation\n" % (data_type, mode))