def construct_new_line(path_seuil, interval, line, choice, each, norm):
    begin, end = interval

    line_data = line.split(';')
    seuil = line_data[0]
    features = line_data[begin+1:end+1]

    # keep only if modulo result is 0 (keep only each wanted values)
    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    # TODO : check if it's always necessary to do that (loss of information for svd)
    if norm:

        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr_with_range(features)

    with open(path_seuil, "r") as seuil_file:
        seuil_learned = int(seuil_file.readline().strip())

    if seuil_learned > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
def get_svd_mean_and_image_rotations(img_path):

    print("Extract features from... " + img_path)
    img = np.asarray(Image.open(img_path))
    width, height, dim = img.shape

    img_mean = np.empty([width, height, 3])
    rotations = []
    svd_data_rotation = []

    for i in range(4):
        rotations.append(processing.rotate_image(img, (i + 1) * 90, pil=False))
        svd_data_rotation.append(processing.get_LAB_L_SVD_s(rotations[i]))
        Image.fromarray(rotations[i]).show()

    mean_image = processing.fusion_images(rotations, pil=False)
    mean_data = processing.get_LAB_L_SVD_s(mean_image)

    # getting max and min information from min_max_filename
    with open(data_folder + "/lab_min_max_values", 'r') as f:
        min_val = float(f.readline())
        max_val = float(f.readline())

    mean_data = utils.normalize_arr_with_range(mean_data, min_val, max_val)

    return [
        utils.normalize_arr_with_range(data, min_val, max_val)
        for data in svd_data_rotation
    ], mean_data
Example #3
0
def construct_new_line(path_seuil, interval, line, choice, each, norm):
    begin, end = interval

    line_data = line.split(';')
    seuil = line_data[0]
    features = line_data[begin + 1:end + 1]

    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    if norm:
        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features,
                                                      min_value_interval,
                                                      max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    with open(path_seuil, "r") as seuil_file:
        seuil_learned = int(seuil_file.readline().strip())

    if seuil_learned > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
Example #4
0
def construct_new_line(interval, line_data, choice, each, norm):
    begin, end = interval

    label = line_data[2]
    features = line_data[begin+3:end+3]
    
    # keep only if modulo result is 0 (keep only each wanted values)
    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    # TODO : check if it's always necessary to do that (loss of information for svd)
    if norm:

        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features, min_value_interval, max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    line = label

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
def construct_new_line(threshold, interval, line, choice, each, norm):
    begin, end = interval

    line_data = line.split(';')
    seuil = line_data[0]
    features = line_data[begin + 1:end + 1]

    features = [float(m) for id, m in enumerate(features) if id % each == 0]

    if norm:
        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features,
                                                      min_value_interval,
                                                      max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    if threshold > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
def construct_new_line(path_seuil, indices, line, choice, norm):

    # increase indices values by one to avoid label
    f = lambda x: x + 1
    indices = f(indices)

    line_data = np.array(line.split(';'))
    seuil = line_data[0]
    features = line_data[indices]
    features = features.astype('float32')

    # TODO : check if it's always necessary to do that (loss of information for svd)
    if norm:
        if choice == 'svdne':
            features = utils.normalize_arr_with_range(features,
                                                      min_value_interval,
                                                      max_value_interval)
        if choice == 'svdn':
            features = utils.normalize_arr(features)

    with open(path_seuil, "r") as seuil_file:
        seuil_learned = int(seuil_file.readline().strip())

    if seuil_learned > int(seuil):
        line = '1'
    else:
        line = '0'

    for val in features:
        line += ';'
        line += str(val)
    line += '\n'

    return line
Example #7
0
def display_svd_values(p_interval, p_indices, p_metric, p_mode, p_step, p_norm, p_area, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_interval, interval [begin, end] of svd data to display
    @param p_indices, indices to display
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_area, area method name to compute area under curve
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    image_indices = []

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # Store all informations about scenes
    scenes_area_data = []
    scenes_images_indices = []
    scenes_threshold_mean = []

    # go ahead each scenes
    for folder_scene in scenes:

        max_value_svd = 0
        min_value_svd = sys.maxsize

        scene_path = os.path.join(path, folder_scene)

        # construct each zones folder name
        zones_folder = []

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str

            current_zone = "zone"+index_str
            zones_folder.append(current_zone)

        # store data information for current scene
        images_data = []
        images_indices = []
        threshold_learned_zones = []

        # get all images of folder
        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
        number_scene_image = len(scene_images)

        for id, zone_folder in enumerate(zones_folder):

            # get threshold information
            zone_path = os.path.join(scene_path, zone_folder)
            path_seuil = os.path.join(zone_path, seuil_expe_filename)

            # open treshold path and get this information
            with open(path_seuil, "r") as seuil_file:
                threshold_learned = int(seuil_file.readline().strip())
                threshold_learned_zones.append(threshold_learned)

        threshold_mean = np.mean(np.asarray(threshold_learned_zones))
        threshold_image_found = False
        scenes_threshold_mean.append(int(threshold_mean / p_step))

        svd_data = []

        # for each images
        for id_img, img_path in enumerate(scene_images):
            
            current_quality_image = dt.get_scene_image_quality(img_path)

            img = Image.open(img_path)

            svd_values = get_image_features(p_metric, img)

            if p_norm:
                svd_values = svd_values[begin_data:end_data]

            # update min max values
            min_value = svd_values.min()
            max_value = svd_values.max()

            if min_value < min_value_svd:
                min_value_svd = min_value

            if max_value > min_value_svd:
                max_value_svd = max_value

            # keep in memory used data
            if current_quality_image % p_step == 0:
                if current_quality_image >= begin_index and current_quality_image <= end_index:
                    images_indices.append(dt.get_scene_image_postfix(img_path))
                    svd_data.append(svd_values)

                if threshold_mean < current_quality_image and not threshold_image_found:

                    threshold_image_found = True

            print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
            sys.stdout.write("\033[F")


            # all indices of picture to plot
        print("Scene %s : %s" % (folder_scene, images_indices))

        scenes_images_indices.append(image_indices)

        area_data = []

        for id, data in enumerate(svd_data):

            current_data = data

            if not p_norm:
                current_data = current_data[begin_data:end_data]

            if p_mode == 'svdn':
                current_data = utils.normalize_arr(current_data)

            if p_mode == 'svdne':
                current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)

            images_data.append(current_data)

            # not use this script for 'sub_blocks_stats'
            current_area = get_area_under_curve(p_area, current_data)
            area_data.append(current_area)

        scenes_area_data.append(area_data)

    # display all data using matplotlib (configure plt)
    plt.title('Scenes area interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + ']' + p_metric + ' metric, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=20)
    plt.ylabel('Image samples or time (minutes) generation', fontsize=14)
    plt.xlabel('Vector features', fontsize=16)

    plt.legend(bbox_to_anchor=(0.7, 1), loc=2, borderaxespad=0.2, fontsize=14)

    for id, area_data in enumerate(scenes_area_data):

        threshold_id = 0
        scene_name = scenes[id]
        image_indices = scenes_images_indices[id]

        p_label = scene_name + '_' + str(images_indices[id])

        threshold_id = scenes_threshold_mean[id]

        print(p_label)

        plt.plot(area_data, label=p_label)
        #ax2.set_xticks(range(len(images_indices)))
        #ax2.set_xticklabels(list(map(int, images_indices)))
        if threshold_id != 0:
            print("Plot threshold ", threshold_id)
            plt.plot([threshold_id, threshold_id], [np.min(area_data), np.max(area_data)], 'k-', lw=2, color='red')


    start_ylim, end_ylim = p_ylim
    plt.ylim(start_ylim, end_ylim)

    plt.show()
def extracts_linear_indices(images_path,
                            n_expected=50,
                            indices_step=20,
                            start_at=20,
                            smooth_arr=False):

    # TODO : check this part
    default_add = start_at - indices_step

    # extract variance for each image path
    var_arr = []

    n_counter = 0
    n_images = len(images_path)

    for p in sorted(images_path):
        img = Image.open(p)
        var_arr.append(np.var(img))

        n_counter += 1
        write_progress((n_counter + 1) / n_images)

    # normalize variance values
    norm_arr = np.array(utils.normalize_arr_with_range(var_arr))

    if smooth_arr:
        norm_arr = utils.normalize_arr_with_range(
            savgol_filter(norm_arr, 201,
                          3))  # window size 7, polynomial order 3

    # get expected linear step (using n_expectec output images)
    linear_steps = utils.normalize_arr_with_range(
        (1 - (np.arange(n_expected) / n_expected)))

    # get image indices from variance convergence and linear
    # => when linear step is reached we store the index found from variance values
    indices_found = []
    for i in linear_steps:

        find_index = 0

        for index, y in enumerate(norm_arr):
            if i <= y:
                find_index = index

        indices_found.append(find_index + 1)

    indices = np.array(indices_found) * indices_step

    # add tricks to avoid same indice
    # => when index is same as previous, then add number of samples expected by step
    # Example with step of 20 : [20, 20, 20, 100, 200] => [20, 40, 60, 100, 200]
    final_indices = []
    for index, i in enumerate(indices):
        value = indices[index]
        if index > 0:
            if i <= indices[index - 1]:
                value = indices[index - 1] + indices_step
                indices[index] = value

        final_indices.append(value)

    return np.array(final_indices) + default_add
Example #9
0
def display_data_scenes(data_type, p_scene, p_kind):
    """
    @brief Method which displays data from scene
    @param data_type,  feature choice
    @param scene, scene choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            print(folder_scene)
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            threshold_info = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            start_image_path = scene_images[0]
            end_image_path = scene_images[-1]

            start_quality_image = dt.get_scene_image_quality(scene_images[0])
            end_quality_image = dt.get_scene_image_quality(scene_images[-1])

            for id_zone, zone_folder in enumerate(zones_folder):

                zone_path = os.path.join(scene_path, zone_folder)

                # get threshold information
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())

                threshold_image_found = False

                for img_path in scene_images:
                    current_quality_image = dt.get_scene_image_quality(
                        img_path)

                    if threshold_learned < int(current_quality_image
                                               ) and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_path = img_path

                        threshold_image = dt.get_scene_image_postfix(img_path)
                        threshold_info.append(threshold_image)

                # all indexes of picture to plot
                images_path = [
                    start_image_path, threshold_image_path, end_image_path
                ]
                images_data = []

                for img_path in images_path:

                    current_img = Image.open(img_path)
                    img_blocks = segmentation.divide_in_blocks(
                        current_img, (200, 200))

                    # getting expected block id
                    block = img_blocks[id_zone]

                    data = get_image_features(data_type, block)

                    ##################
                    # Data mode part #
                    ##################

                    # modify data depending mode

                    if p_kind == 'svdn':
                        data = utils.normalize_arr(data)

                    if p_kind == 'svdne':
                        path_min_max = os.path.join(
                            path, data_type + min_max_filename)

                        with open(path_min_max, 'r') as f:
                            min_val = float(f.readline())
                            max_val = float(f.readline())

                        data = utils.normalize_arr_with_range(
                            data, min_val, max_val)

                    # append of data
                    images_data.append(data)

                zones_images_data.append(images_data)

            fig = plt.figure(figsize=(8, 8))
            fig.suptitle(data_type + " values for " + p_scene +
                         " scene (normalization : " + p_kind + ")",
                         fontsize=20)

            for id, data in enumerate(zones_images_data):
                fig.add_subplot(4, 4, (id + 1))
                plt.plot(data[0], label='Noisy_' + start_quality_image)
                plt.plot(data[1], label='Threshold_' + threshold_info[id])
                plt.plot(data[2], label='Reference_' + end_quality_image)
                plt.ylabel(data_type + ' SVD, ZONE_' + str(id + 1),
                           fontsize=18)
                plt.xlabel('Vector features', fontsize=18)
                plt.legend(bbox_to_anchor=(0.5, 1),
                           loc=2,
                           borderaxespad=0.2,
                           fontsize=18)
                plt.ylim(0, 0.1)
            plt.show()
Example #10
0
def extracts_linear_indices_rawls(images_path,
                                  n_expected=50,
                                  i_indices_step=20,
                                  o_indices_step=20,
                                  start_at=20,
                                  smooth_arr=False,
                                  gamma=False):

    default_add = start_at - 20

    # by default
    if i_indices_step == 1:
        default_add = 0

    n_start_images = int(start_at / i_indices_step)
    n_counter = 0

    # extract variance for each image path
    var_arr = []
    prev_rawls = None

    n_images = len(images_path)

    for p in sorted(images_path):

        if prev_rawls is None:
            temp = Rawls.load(p)

            if gamma:
                temp.gammaConvert()

            prev_rawls = temp
        else:
            temp = Rawls.load(p)

            if gamma:
                temp.gammaConvert()

            prev_rawls = Rawls.fusion(prev_rawls, temp)

        write_progress((n_counter + 1) / n_images)

        n_counter += 1

        if n_counter >= n_start_images:
            # only get center variance of image (800 per 800)
            width, heigth, _ = prev_rawls.shape
            n_w, n_h = (800, 800)  # new expected size

            # get center of image
            middle_w = int(width / 2)
            middle_h = int(heigth / 2)

            # start coordinates
            s_w = middle_w - int(n_w / 2)
            s_h = middle_h - int(n_h / 2)

            # end coordinates
            e_w = middle_w + int(n_w / 2)
            e_h = middle_h + int(n_h / 2)

            var_arr.append(np.var(prev_rawls.data[s_w:e_w, s_h:e_h]))

    # normalize variance values
    norm_arr = np.array(utils.normalize_arr_with_range(var_arr))

    if smooth_arr:
        norm_arr = utils.normalize_arr_with_range(
            savgol_filter(norm_arr, 201,
                          3))  # window size 7, polynomial order 3

    # get expected linear step (using n_expectec output images)
    linear_steps = utils.normalize_arr_with_range(
        (1 - (np.arange(n_expected) / n_expected)))

    # get image indices from variance convergence and linear
    # => when linear step is reached we store the index found from variance values

    indices_found = []
    for i in linear_steps:
        find_index = len(linear_steps) - 1
        for index, y in enumerate(norm_arr):
            if i <= y:
                find_index = index
        indices_found.append(find_index + 1)

    indices = np.array(indices_found) * i_indices_step

    # add tricks to avoid same indice
    # => when index is same as previous, then add number of samples expected by step
    # Example with step of 20 : [20, 20, 20, 100, 200] => [20, 40, 60, 100, 200]
    final_indices = []
    for index, i in enumerate(indices):
        value = indices[index]
        if index > 0:
            if i <= indices[index - 1]:

                value = indices[index - 1] + o_indices_step
                indices[index] = value

        final_indices.append(value)

    return np.array(final_indices) + default_add
def main():

    parser = argparse.ArgumentParser(description="Read and compute entropy data file")

    parser.add_argument('--model', type=str, help='model file')
    parser.add_argument('--method', type=str, help='method name to used', choices=cfg.features_choices_labels, default=cfg.features_choices_labels[0])
    parser.add_argument('--interval', type=str, help='Interval value to keep from svd', default='"0, 200"')
    parser.add_argument('--kind', type=str, help='Kind of normalization level wished', choices=cfg.normalization_choices)
    parser.add_argument('--imnorm', type=int, help="specify if image is normalized before computing something", default=0, choices=[0, 1])
    parser.add_argument('--scene', type=str, help='Scene index to use', choices=cfg.scenes_indices)
    parser.add_argument('--save', type=str, help='filename where to save input data')
    parser.add_argument('--label', type=str, help='label to use when saving thresholds')

    args = parser.parse_args()

    p_model    = args.model
    p_method   = args.method
    p_interval = list(map(int, args.interval.split(',')))
    #p_n_stop   = args.n_stop
    p_imnorm   = args.imnorm
    p_scene    = args.scene
    p_mode     = args.kind
    p_save     = args.save
    p_label    = args.label

    p_n_stop = 1
    begin, end = p_interval

    # 1. get scene name
    scenes_list = cfg.scenes_names
    scenes_indices = cfg.scenes_indices

    scene_index = scenes_indices.index(p_scene.strip())
    scene = scenes_list[scene_index]

    scene_path = os.path.join(cfg.dataset_path, scene)

    # 2. load model and compile it

    # TODO : check kind of model
    model = joblib.load(p_model)
    # model.compile(loss='binary_crossentropy',
    #               optimizer='rmsprop',
    #               metrics=['accuracy'])


    estimated_thresholds = []
    n_estimated_thresholds = []
    human_thresholds = []

    # 3. retrieve human_thresholds
    # construct zones folder
    zones_list = []

    for index in zones_indices:

        index_str = str(index)

        while len(index_str) < 2:
            index_str = "0" + index_str
        
        zones_list.append(cfg.zone_folder + index_str)

    for zone in zones_list:
            zone_path = os.path.join(scene_path, zone)

            with open(os.path.join(zone_path, cfg.seuil_expe_filename), 'r') as f:
                human_thresholds.append(int(f.readline()))

    # 4. get estimated thresholds using model and specific method
    images_path = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
    number_of_images = len(images_path)
    image_indices = [ dt.get_scene_image_quality(img_path) for img_path in images_path ]

    image_counter = 0

    print(human_thresholds)

    # append empty list
    for zone in zones_list:
        estimated_thresholds.append(None)
        n_estimated_thresholds.append(0)

    for img_i, img_path in enumerate(images_path):

        blocks = segmentation.divide_in_blocks(Image.open(img_path), (200, 200))

        for index, block in enumerate(blocks):
            
            if estimated_thresholds[index] is None:
                # normalize if necessary
                if p_imnorm:
                    block = np.array(block) / 255.
                
                # check if prediction is possible
                data = np.array(get_image_features(p_method, np.array(block)))

                if p_mode == 'svdn':
                    data = utils.normalize_arr_with_range(data)

                data = data[begin:end]

                #data = np.expand_dims(data, axis=0)
                #print(data.shape)
                
                prob = model.predict(np.array(data).reshape(1, -1))[0]
                #print(index, ':', image_indices[img_i], '=>', prob)

                if prob < 0.5:
                    n_estimated_thresholds[index] += 1

                    # if same number of detection is attempted
                    if n_estimated_thresholds[index] >= p_n_stop:
                        estimated_thresholds[index] = image_indices[img_i]
                else:
                    n_estimated_thresholds[index] = 0

        # write progress bar
        write_progress((image_counter + 1) / number_of_images)
        
        image_counter = image_counter + 1
    
    # default label
    for i, _ in enumerate(zones_list):
        if estimated_thresholds[i] == None:
            estimated_thresholds[i] = image_indices[-1]

    # 6. save estimated thresholds into specific file
    print(estimated_thresholds)
    print(p_save)
    if p_save is not None:
        with open(p_save, 'a') as f:
            f.write(p_label + ';')

            for t in estimated_thresholds:
                f.write(str(t) + ';')
            f.write('\n')
def display_svd_values(p_scene, p_interval, p_indices, p_feature, p_mode,
                       p_step, p_norm, p_error, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_error, error feature used to display
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    max_value_svd = 0
    min_value_svd = sys.maxsize

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            scene_path = os.path.join(path, folder_scene)

            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            images_data = []
            images_path = []

            threshold_learned_zones = []

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])
            number_scene_image = len(scene_images)

            for id, zone_folder in enumerate(zones_folder):

                # get threshold information

                zone_path = os.path.join(scene_path, zone_folder)
                path_seuil = os.path.join(zone_path, seuil_expe_filename)

                # open treshold path and get this information
                with open(path_seuil, "r") as seuil_file:
                    threshold_learned = int(seuil_file.readline().strip())
                    threshold_learned_zones.append(threshold_learned)

            threshold_mean = np.mean(np.asarray(threshold_learned_zones))
            threshold_image_found = False

            svd_data = []

            # for each images
            for id_img, img_path in enumerate(scene_images):

                current_quality_image = dt.get_scene_image_quality(img_path)

                img = Image.open(img_path)

                svd_values = get_image_features(p_feature, img)

                if p_norm:
                    svd_values = svd_values[begin_data:end_data]

                # update min max values
                min_value = svd_values.min()
                max_value = svd_values.max()

                if min_value < min_value_svd:
                    min_value_svd = min_value

                if max_value > min_value_svd:
                    max_value_svd = max_value

                # keep in memory used data
                if current_quality_image % p_step == 0:
                    if current_quality_image >= begin_index and current_quality_image <= end_index:
                        images_path.append(img_path)
                        svd_data.append(svd_values)

                    if threshold_mean < current_quality_image and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_zone = dt.get_scene_image_postfix(
                            img_path)

                print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
                sys.stdout.write("\033[F")

            previous_data = []
            error_data = [0.]

            for id, data in enumerate(svd_data):

                current_data = data

                if not p_norm:
                    current_data = current_data[begin_data:end_data]

                if p_mode == 'svdn':
                    current_data = utils.normalize_arr(current_data)

                if p_mode == 'svdne':
                    current_data = utils.normalize_arr_with_range(
                        current_data, min_value_svd, max_value_svd)

                images_data.append(current_data)

                # use of whole image data for computation of ssim or psnr
                if p_error == 'ssim' or p_error == 'psnr':
                    current_data = np.asarray(Image.open(images_path[id]))

                if len(previous_data) > 0:

                    current_error = get_error_distance(p_error, previous_data,
                                                       current_data)
                    error_data.append(current_error)

                if len(previous_data) == 0:
                    previous_data = current_data

            # display all data using matplotlib (configure plt)
            gridsize = (3, 2)

            # fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(30, 22))
            fig = plt.figure(figsize=(30, 22))
            ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
            ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)

            ax1.set_title(p_scene + ' scene interval information SVD[' +
                          str(begin_data) + ', ' + str(end_data) +
                          '], from scenes indices [' + str(begin_index) +
                          ', ' + str(end_index) + '], ' + p_feature +
                          ' feature, ' + p_mode + ', with step of ' +
                          str(p_step) + ', svd norm ' + str(p_norm),
                          fontsize=20)
            ax1.set_ylabel('Image samples or time (minutes) generation',
                           fontsize=14)
            ax1.set_xlabel('Vector features', fontsize=16)

            for id, data in enumerate(images_data):

                current_quality_image = dt.get_scene_image_quality(
                    images_path[id])
                current_quality_postfix = dt.get_scene_image_postfix(
                    images_path[id])

                if display_error:
                    p_label = p_scene + '_' + current_quality_postfix + " | " + p_error + ": " + str(
                        error_data[id])
                else:
                    p_label = p_scene + '_' + current_quality_postfix

                if current_quality_image == threshold_image_zone:
                    ax1.plot(data,
                             label=p_label + " (threshold mean)",
                             lw=4,
                             color='red')
                else:
                    ax1.plot(data, label=p_label)

            ax1.legend(bbox_to_anchor=(0.7, 1),
                       loc=2,
                       borderaxespad=0.2,
                       fontsize=14)

            start_ylim, end_ylim = p_ylim
            ax1.set_ylim(start_ylim, end_ylim)

            ax2.set_title(p_error + " information for whole step images")
            ax2.set_ylabel(p_error + ' error')
            ax2.set_xlabel('Number of samples per pixels or times')
            ax2.set_xticks(range(len(current_quality_image)))
            ax2.set_xticklabels(
                list(map(dt.get_scene_image_quality, current_quality_image)))
            ax2.plot(error_data)

            plot_name = p_scene + '_' + p_feature + '_' + str(
                p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
            plt.savefig(plot_name)
def generate_data_svd(data_type, color, mode):
    """
    @brief Method which generates all .csv files from scenes
    @param data_type,  feature choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)

    # filter scene
    scenes = [s for s in scenes if calibration_folder not in s]

    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # keep in memory min and max data found from data_type
    min_val_found = sys.maxsize
    max_val_found = 0

    data_min_max_filename = os.path.join(path, data_type + min_max_filename)

    # go ahead each scenes
    for id_scene, folder_scene in enumerate(scenes):

        print(folder_scene)
        scene_path = os.path.join(path, folder_scene)

        for noise in noise_choices:

            noise_path = os.path.join(scene_path, noise)

            # getting output filename
            if color:
                output_svd_filename = data_type + "_color_" + mode + generic_output_file_svd
            else:
                output_svd_filename = data_type + "_" + mode + generic_output_file_svd

            # construct each zones folder name
            zones_folder = []
            svd_output_files = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone"+index_str
                zones_folder.append(current_zone)

                zone_path = os.path.join(noise_path, current_zone)

                if not os.path.exists(zone_path):
                    os.makedirs(zone_path)

                svd_file_path = os.path.join(zone_path, output_svd_filename)

                # add writer into list
                svd_output_files.append(open(svd_file_path, 'w'))

            counter_index = 1

            while(counter_index < end_counter_index):

                if counter_index % picture_step == 0:
                    counter_index_str = str(counter_index)

                    if color:
                        img_path = os.path.join(noise_path, folder_scene + "_" + noise + "_color_" + counter_index_str + ".png")
                    else:
                        img_path = os.path.join(noise_path, folder_scene + "_" + noise + "_" + counter_index_str + ".png")

                    current_img = Image.open(img_path)
                    img_blocks = divide_in_blocks(current_img, (200, 200))

                    for id_block, block in enumerate(img_blocks):

                        ###########################
                        # feature computation part #
                        ###########################

                        data = get_image_features(data_type, block)

                        ##################
                        # Data mode part #
                        ##################

                        # modify data depending mode
                        if mode == 'svdne':

                            # getting max and min information from min_max_filename
                            with open(data_min_max_filename, 'r') as f:
                                min_val = float(f.readline())
                                max_val = float(f.readline())

                            data = utils.normalize_arr_with_range(data, min_val, max_val)

                        if mode == 'svdn':
                            data = utils.normalize_arr(data)

                        # save min and max found from dataset in order to normalize data using whole data known
                        if mode == 'svd':

                            current_min = data.min()
                            current_max = data.max()

                            if current_min < min_val_found:
                                min_val_found = current_min

                            if current_max > max_val_found:
                                max_val_found = current_max

                        # now write data into current writer
                        current_file = svd_output_files[id_block]

                        # add of index
                        current_file.write(counter_index_str + ';')

                        for val in data:
                            current_file.write(str(val) + ";")

                        current_file.write('\n')

                if color:
                    print(data_type + "_" + noise + "_color_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((counter_index) / (end_counter_index)* 100.) + "%")
                else:
                    print(data_type + "_" + noise + "_"+ mode + "_" + folder_scene + " - " + "{0:.2f}".format((counter_index) / (end_counter_index)* 100.) + "%")

                sys.stdout.write("\033[F")

                counter_index += 1

            for f in svd_output_files:
                f.close()

            if color:
                print(data_type + "_" + noise + "_color_" + mode + "_" + folder_scene + " - " + "Done...")
            else:
                print(data_type + "_" + noise + "_"+ mode + "_" + folder_scene + " - " + "Done...")


    # save current information about min file found
    if mode == 'svd':
        with open(data_min_max_filename, 'w') as f:
            f.write(str(min_val_found) + '\n')
            f.write(str(max_val_found) + '\n')

    print("%s : end of data generation\n" % mode)
Example #14
0
def main():

    parser = argparse.ArgumentParser(
        description="Display svd of images with noise level")

    parser.add_argument(
        '--prefix',
        type=str,
        help='Generated noise folder prefix (ex: `generated/prefix/noise`)')
    parser.add_argument('--mode',
                        type=str,
                        help='Kind of normalization',
                        default=normalization_choices)
    parser.add_argument('--feature',
                        type=str,
                        help='feature choice',
                        default=feature_choices)
    parser.add_argument('--n', type=int, help='Number of images')
    parser.add_argument('--color',
                        type=int,
                        help='Use of color or grey level',
                        default=0)
    parser.add_argument(
        '--norm',
        type=int,
        help='Use of normalization from interval or whole data vector',
        default=0)
    parser.add_argument('--interval',
                        type=str,
                        help='Interval data choice (ex: `0, 200`)',
                        default="0, 200")
    parser.add_argument('--step',
                        type=int,
                        help='Step of image indices to keep',
                        default=1)
    parser.add_argument('--ylim',
                        type=str,
                        help='Limite to display data (ex: `0, 1`)',
                        default="0, 1")

    args = parser.parse_args()

    param_prefix = args.prefix
    param_mode = args.mode
    param_feature = args.feature
    param_n = args.n
    param_color = args.color
    param_norm = args.norm
    param_interval = list(map(int, args.interval.split(',')))
    param_step = args.step
    param_ylim = list(map(float, args.ylim.split(',')))

    param_prefix = param_prefix.split('/')[1].replace('_', '')
    noise_name = param_prefix.split('/')[2]

    if param_color:
        file_path = param_prefix + "/" + param_prefix + "_" + noise_name + "_color_{}." + filename_ext
    else:
        file_path = param_prefix + "/" + param_prefix + "_" + noise_name + "_{}." + filename_ext

    begin, end = param_interval
    all_svd_data = []

    svd_data = []
    image_indices = []

    # get all data from images
    for i in range(1, param_n):

        if i % steparam_picture == 0:

            image_path = file_path.format(str(i))
            img = Image.open(image_path)

            svd_values = get_image_features(param_feature, img)

            if param_norm:
                svd_values = svd_values[begin:end]

            all_svd_data.append(svd_values)

            # update min max values
            min_value = svd_values.min()
            max_value = svd_values.max()

            if min_value < min_value_svd:
                min_value_svd = min_value

            if max_value > max_value_svd:
                max_value_svd = max_value

            print('%.2f%%' % ((i + 1) / param_n * 100))
            sys.stdout.write("\033[F")

    for id, data in enumerate(all_svd_data):

        if (id * steparam_picture) % param_step == 0:

            current_data = data
            if param_mode == 'svdn':
                current_data = utils.normalize_arr(current_data)

            if param_mode == 'svdne':
                current_data = utils.normalize_arr_with_range(
                    current_data, min_value_svd, max_value_svd)

            svd_data.append(current_data)
            image_indices.append(str(id * steparam_picture))

    # display all data using matplotlib (configure plt)

    plt.rcParams['figure.figsize'] = (25, 18)

    plt.title(param_prefix + ' noise, interval information [' + str(begin) +
              ', ' + str(end) + '], ' + param_feature + ' feature, step ' +
              str(param_step) + ' normalization ' + param_mode,
              fontsize=20)
    plt.ylabel('Importance of noise [1, 999]', fontsize=14)
    plt.xlabel('Vector features', fontsize=16)

    for id, data in enumerate(svd_data):

        param_label = param_prefix + str(image_indices[id])
        plt.plot(data, label=param_label)

    plt.legend(bbox_to_anchor=(0.8, 1), loc=2, borderaxespad=0.2, fontsize=14)

    if not param_norm:
        plt.xlim(begin, end)

    # adapt ylim
    y_begin, y_end = param_ylim
    plt.ylim(y_begin, y_end)

    output_filename = param_prefix + "_" + noise_name + "_1_to_" + str(
        param_n) + "_B" + str(begin) + "_E" + str(
            end) + "_" + param_feature + "_S" + str(
                param_step) + "_norm" + str(param_norm) + "_" + param_mode

    if param_color:
        output_filename = output_filename + '_color'

    print("Generation of output figure... %s" % output_filename)
    output_path = os.path.join(pictures_folder, output_filename)

    if not os.path.exists(pictures_folder):
        os.makedirs(pictures_folder)

    plt.savefig(output_path, dpi=(200))
Example #15
0
def display_svd_values(p_scene, p_interval, p_indices, p_zone, p_feature,
                       p_mode, p_step, p_norm, p_ylim):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_zone, zone's identifier of picture
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_step, step of images indices
    @param p_norm, normalization or not of selected svd data
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    data_min_max_filename = os.path.join(path, p_feature + min_max_filename)

    # go ahead each scenes
    for folder_scene in scenes:

        if p_scene == folder_scene:
            scene_path = os.path.join(path, folder_scene)
            # construct each zones folder name
            zones_folder = []

            # get zones list info
            for index in zones:
                index_str = str(index)
                if len(index_str) < 2:
                    index_str = "0" + index_str

                current_zone = "zone" + index_str
                zones_folder.append(current_zone)

            zones_images_data = []
            images_path = []

            zone_folder = zones_folder[p_zone]

            zone_path = os.path.join(scene_path, zone_folder)

            # get threshold information
            path_seuil = os.path.join(zone_path, seuil_expe_filename)

            # open treshold path and get this information
            with open(path_seuil, "r") as seuil_file:
                seuil_learned = int(seuil_file.readline().strip())

            threshold_image_found = False

            # get all images of folder
            scene_images = sorted([
                os.path.join(scene_path, img) for img in os.listdir(scene_path)
                if cfg.scene_image_extension in img
            ])

            # for each images
            for img_path in scene_images:

                current_quality_image = dt.get_scene_image_quality(img_path)

                if current_quality_image % p_step == 0:
                    if current_quality_image >= begin_index and current_quality_image <= end_index:
                        images_path.append(img_path)

                    if seuil_learned < current_quality_image and not threshold_image_found:

                        threshold_image_found = True
                        threshold_image_zone = dt.get_scene_image_postfix(
                            img_path)

                        if img_path not in images_path:
                            images_path.append(img_path)

            for img_path in images_path:

                current_img = Image.open(img_path)
                img_blocks = segmentation.divide_in_blocks(
                    current_img, (200, 200))

                # getting expected block id
                block = img_blocks[p_zone]

                # get data from mode
                # Here you can add the way you compute data
                data = get_image_features(p_feature, block)

                # TODO : improve part of this code to get correct min / max values
                if p_norm:
                    data = data[begin_data:end_data]

                ##################
                # Data mode part #
                ##################

                if p_mode == 'svdne':

                    # getting max and min information from min_max_filename
                    if not p_norm:
                        with open(data_min_max_filename, 'r') as f:
                            min_val = float(f.readline())
                            max_val = float(f.readline())
                    else:
                        min_val = min_value_interval
                        max_val = max_value_interval

                    data = utils.normalize_arr_with_range(
                        data, min_val, max_val)

                if p_mode == 'svdn':
                    data = utils.normalize_arr(data)

                if not p_norm:
                    zones_images_data.append(data[begin_data:end_data])
                else:
                    zones_images_data.append(data)

            fig, ax = plt.subplots(figsize=(30, 22))
            ax.set_facecolor('#FFFFFF')

            # plt.title(p_scene + ' scene (zone  ' + str(p_zone) + ') interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
            ax.set_ylabel('Component values', fontsize=28)
            ax.set_xlabel('Vector features', fontsize=28)

            ax.tick_params(labelsize=22)

            for id, data in enumerate(zones_images_data):

                p_label = p_scene + "_" + dt.get_scene_image_postfix(
                    images_path[id])

                if int(dt.get_scene_image_postfix(
                        images_path[id])) == int(threshold_image_zone):
                    ax.plot(data,
                            label=p_label + ' (zone ' + str(p_zone) +
                            ' threshold)',
                            lw=4,
                            color='red')
                else:
                    ax.plot(data, label=p_label)

            plt.legend(bbox_to_anchor=(0.60, 0.98),
                       loc=2,
                       borderaxespad=0.2,
                       fontsize=24)

            start_ylim, end_ylim = p_ylim
            plt.ylim(start_ylim, end_ylim)

            plot_name = p_scene + '_zone_' + str(
                p_zone) + '_' + p_feature + '_' + str(
                    p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
            plt.savefig(plot_name, facecolor=ax.get_facecolor())
Example #16
0
def generate_data_feature(path, output, human_thresholds, data_type, mode):
    """
    @brief Method which generates all .csv files from scenes
    @param data_type,  feature choice
    @param mode, normalization choice
    @return nothing
    """

    scenes = os.listdir(path)
    # remove min max file from scenes folder
    scenes = [s for s in scenes if min_max_filename not in s]

    # keep in memory min and max data found from data_type
    min_val_found = sys.maxsize
    max_val_found = 0

    output_path = os.path.join(cfg.output_data_generated, output)

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    data_min_max_filename = os.path.join(output_path, data_type + min_max_filename)

    # go ahead each scenes
    for folder_scene in human_thresholds:

        print(folder_scene)
        scene_path = os.path.join(path, folder_scene)
        output_scene_path = os.path.join(output_path, folder_scene)

        if not os.path.exists(output_scene_path):
            os.makedirs(output_scene_path)

        # getting output filename
        output_svd_filename = data_type + "_" + mode + generic_output_file_svd

        # construct each zones folder name
        zones_folder = []
        svd_output_files = []

        # get zones list info
        for index in zones:
            index_str = str(index)
            if len(index_str) < 2:
                index_str = "0" + index_str

            current_zone = "zone"+index_str
            zones_folder.append(current_zone)

            zone_path = os.path.join(scene_path, current_zone)
            output_zone_path = os.path.join(output_scene_path, current_zone)

            if not os.path.exists(output_zone_path):
                os.makedirs(output_zone_path)

            svd_file_path = os.path.join(output_zone_path, output_svd_filename)

            # add writer into list
            svd_output_files.append(open(svd_file_path, 'w'))

        # get all images of folder
        scene_images = sorted([os.path.join(scene_path, img) for img in os.listdir(scene_path) if cfg.scene_image_extension in img])
        number_scene_image = len(scene_images)
            
        for id_img, img_path in enumerate(scene_images):
            
            current_image_postfix = dt.get_scene_image_postfix(img_path)

            current_img = Image.open(img_path)
            img_blocks = segmentation.divide_in_blocks(current_img, (200, 200))

            for id_block, block in enumerate(img_blocks):

                ###########################
                # feature computation part #
                ###########################

                data = get_image_features(data_type, block)

                ##################
                # Data mode part #
                ##################

                # modify data depending mode
                if mode == 'svdne':

                    # getting max and min information from min_max_filename
                    with open(data_min_max_filename, 'r') as f:
                        min_val = float(f.readline())
                        max_val = float(f.readline())

                    data = utils.normalize_arr_with_range(data, min_val, max_val)

                if mode == 'svdn':
                    data = utils.normalize_arr(data)

                # save min and max found from dataset in order to normalize data using whole data known
                if mode == 'svd':

                    current_min = data.min()
                    current_max = data.max()

                    if current_min < min_val_found:
                        min_val_found = current_min

                    if current_max > max_val_found:
                        max_val_found = current_max

                # now write data into current writer
                current_file = svd_output_files[id_block]

                # add of index
                current_file.write(current_image_postfix + ';')

                for val in data:
                    current_file.write(str(val) + ";")

                current_file.write('\n')

            print(data_type + "_" + mode + "_" + folder_scene + " - " + "{0:.2f}".format((id_img + 1) / number_scene_image * 100.) + "%")
            sys.stdout.write("\033[F")

        for f in svd_output_files:
            f.close()

        print('\n')

    # save current information about min file found
    if mode == 'svd':
        with open(data_min_max_filename, 'w') as f:
            f.write(str(min_val_found) + '\n')
            f.write(str(max_val_found) + '\n')

    print("%s_%s : end of data generation\n" % (data_type, mode))
def display_svd_values(p_scene, p_thresholds, p_interval, p_indices, p_feature,
                       p_mode, p_step, p_norm, p_ylim, p_label):
    """
    @brief Method which gives information about svd curves from zone of picture
    @param p_scene, scene expected to show svd values
    @param p_interval, interval [begin, end] of svd data to display
    @param p_interval, interval [begin, end] of samples or minutes from render generation engine
    @param p_feature, feature computed to show
    @param p_mode, normalization's mode
    @param p_norm, normalization or not of selected svd data
    @param p_ylim, ylim choice to better display of data
    @return nothing
    """

    max_value_svd = 0
    min_value_svd = sys.maxsize

    begin_data, end_data = p_interval
    begin_index, end_index = p_indices

    # go ahead selected scene
    scene_path = p_scene

    # construct each zones folder name
    zones_folder = []

    # get zones list info
    for index in zones:
        index_str = str(index)
        if len(index_str) < 2:
            index_str = "0" + index_str

        current_zone = "zone" + index_str
        zones_folder.append(current_zone)

    images_data = []
    images_indices = []

    threshold_learned_zones = []

    # get all images of folder
    scene_images = sorted([
        os.path.join(scene_path, img) for img in os.listdir(scene_path)
        if cfg.scene_image_extension in img
    ])
    number_scene_image = len(scene_images)

    _, scene_name = os.path.split(p_scene)
    threshold_learned_zones = p_thresholds[scene_name]

    threshold_mean = np.mean(np.asarray(threshold_learned_zones))
    threshold_image_found = False

    svd_data = []

    # for each images
    for id_img, img_path in enumerate(scene_images):

        current_quality_image = dt.get_scene_image_quality(img_path)

        img = Image.open(img_path)

        svd_values = get_image_features(p_feature, img)

        if p_norm:
            svd_values = svd_values[begin_data:end_data]

        #svd_values = np.asarray([math.log(x) for x in svd_values])

        # update min max values
        min_value = svd_values.min()
        max_value = svd_values.max()

        if min_value < min_value_svd:
            min_value_svd = min_value

        if max_value > min_value_svd:
            max_value_svd = max_value

        # keep in memory used data
        if current_quality_image % p_step == 0:
            if current_quality_image >= begin_index and current_quality_image <= end_index:

                images_indices.append(dt.get_scene_image_postfix(img_path))
                svd_data.append(svd_values)

        if threshold_mean < current_quality_image and not threshold_image_found:

            threshold_image_found = True
            threshold_image_zone = current_quality_image

            print("Quality mean : ", current_quality_image, "\n")

            if dt.get_scene_image_postfix(img_path) not in images_indices:
                images_indices.append(dt.get_scene_image_postfix(img_path))

        print('%.2f%%' % ((id_img + 1) / number_scene_image * 100))
        sys.stdout.write("\033[F")

    # all indices of picture to plot
    print(images_indices)

    for id, data in enumerate(svd_data):

        # current_data = [ math.log10(d + 1.) for d in data ]
        # print(current_data)

        current_data = data

        if not p_norm:
            current_data = current_data[begin_data:end_data]

        if p_mode == 'svdn':
            current_data = utils.normalize_arr(current_data)

        if p_mode == 'svdne':
            current_data = utils.normalize_arr_with_range(
                current_data, min_value_svd, max_value_svd)

        images_data.append(current_data)

    # display all data using matplotlib (configure plt)
    fig, ax = plt.subplots(figsize=(30, 15))
    ax.set_facecolor('#FFFFFF')
    #fig.patch.set_facecolor('#F9F9F9')

    ax.tick_params(labelsize=26)
    #plt.rc('xtick', labelsize=22)
    #plt.rc('ytick', labelsize=22)

    #plt.title(p_scene + ' scene interval information SVD['+ str(begin_data) +', '+ str(end_data) +'], from scenes indices [' + str(begin_index) + ', '+ str(end_index) + '], ' + p_feature + ' feature, ' + p_mode + ', with step of ' + str(p_step) + ', svd norm ' + str(p_norm), fontsize=24)
    ax.set_ylabel('Component values', fontsize=36)
    ax.set_xlabel('Singular value component indices', fontsize=36)

    for id, data in enumerate(images_data):

        #p_label = p_scene + "_" + images_indices[id]
        p_label = images_indices[id] + " samples"

        if int(images_indices[id]) == int(threshold_image_zone):
            ax.plot(data,
                    label=p_label + " (threshold mean)",
                    lw=8,
                    color='red')
        else:
            ax.plot(data, label=p_label, lw=4)

    plt.legend(bbox_to_anchor=(0.60, 0.98),
               loc=2,
               borderaxespad=0.2,
               fontsize=32)

    start_ylim, end_ylim = p_ylim
    ax.set_ylim(start_ylim, end_ylim)

    plot_name = scene_name + '_' + p_feature + '_' + str(
        p_step) + '_' + p_mode + '_' + str(p_norm) + '.png'
    # plt.title('Tend of Singular values at different samples of ' + p_label + ' scene', fontsize=40)
    plt.savefig(plot_name, transparent=True)
Example #18
0
def main():

    parser = argparse.ArgumentParser(description="Display threshold svd data")

    parser.add_argument(
        '--prefix',
        type=str,
        help='Generated noise folder prefix (ex: `generated/prefix/noise`)')
    parser.add_argument('--file', type=str, help='Threshold file to use')
    parser.add_argument('--mode',
                        type=str,
                        help='Kind of normalization',
                        default=normalization_choices)
    parser.add_argument('--feature',
                        type=str,
                        help='feature choice',
                        default=feature_choices)
    parser.add_argument('--color',
                        type=int,
                        help='Use of color or grey level',
                        default=0)
    parser.add_argument(
        '--norm',
        type=int,
        help='Use of normalization from interval or whole data vector',
        default=0)
    parser.add_argument('--interval',
                        type=str,
                        help='Interval data choice (ex: `0, 200`)',
                        default="0, 200")
    parser.add_argument('--step',
                        type=int,
                        help='Step of image indices to keep',
                        default=1)
    parser.add_argument('--ylim',
                        type=str,
                        help='Limite to display data (ex: `0, 1`)',
                        default="0, 1")

    args = parser.parse_args()

    param_prefix = args.prefix
    param_file = args.file
    param_mode = args.mode
    param_feature = args.feature
    param_n = args.n
    param_color = args.color
    param_norm = args.norm
    param_interval = list(map(int, args.interval.split(',')))
    param_step = args.step
    param_ylim = list(map(float, args.ylim.split(',')))

    param_prefix = param_prefix.split('/')[1].replace('_', '')

    if param_color:
        file_path = param_prefix + "{}/" + param_prefix + "_{}_color_{}." + filename_ext
    else:
        file_path = param_prefix + "{}/" + param_prefix + "_{}_{}." + filename_ext

    begin, end = param_interval

    svd_data = []
    final_svd_data = []
    image_indices = []
    min_max_list = {}

    threshold_data = []

    # read data threshold file
    with open(param_file, 'r') as f:
        lines = f.readlines()

        for line in lines:
            data = line.replace('\n', '').split(';')
            print(data)

            threshold = ThresholdData(data[0], float(data[1]), int(data[2]))
            threshold_data.append(threshold)

    # filter data if color or not
    threshold_data = [t for t in threshold_data if t.isColor() == param_color]

    for id, threshold in enumerate(threshold_data):

        current_noise = threshold.get_noise()
        current_threshold = threshold.get_threshold()

        min_max_list[current_noise] = (sys.maxsize, 0)
        threshold_found = False

        # get all data from images
        for i in range(1, param_n):

            if i % steparam_picture == 0:
                image_path = file_path.format(current_noise, current_noise,
                                              str(i))
                img = Image.open(image_path)

                svd_values = get_image_features(param_feature, img)

                if param_norm:
                    svd_values = svd_values[begin:end]

                # only append data once
                if not threshold_found and current_threshold < i:
                    svd_data.append(svd_values)
                    image_indices.append(i)

                if current_threshold < i:
                    threshold_found = True

                # update min max values
                min_value = svd_values.min()
                max_value = svd_values.max()

                # update of min max values for noise
                current_min, current_max = min_max_list[current_noise]

                if min_value < current_min:
                    current_min = min_value

                if max_value > current_max:
                    current_max = max_value

                min_max_list[current_noise] = (current_min, current_max)

            print('%.2f%%' % (((i + 1) * 100 + (id * param_n * 100)) /
                              (param_n * len(threshold_data))))
            sys.stdout.write("\033[F")

    for id, data in enumerate(svd_data):

        current_data = data

        threshold = threshold_data[id]
        min_value_svd, max_value_svd = min_max_list[threshold.get_noise()]

        if param_mode == 'svdn':
            current_data = utils.normalize_arr(current_data)

        if param_mode == 'svdne':
            current_data = utils.normalize_arr_with_range(
                current_data, min_value_svd, max_value_svd)

        final_svd_data.append(current_data)

    # display all data using matplotlib (configure plt)

    plt.rcParams['figure.figsize'] = (25, 18)

    plt.title(param_prefix + ' noise, interval information [' + str(begin) +
              ', ' + str(end) + '], ' + param_feature + ' feature, step ' +
              str(param_step) + ' normalization ' + param_mode,
              fontsize=20)
    plt.ylabel('Importance of noise [1, 999]', fontsize=14)
    plt.xlabel('Vector features', fontsize=16)

    for id, data in enumerate(final_svd_data):

        param_label = param_prefix + '_' + threshold_data[id].get_noise(
        ) + str(image_indices[id])
        plt.plot(data, label=param_label)

    plt.legend(bbox_to_anchor=(0.8, 1), loc=2, borderaxespad=0.2, fontsize=14)

    if not param_norm:
        plt.xlim(begin, end)

    # adapt ylim
    y_begin, y_end = param_ylim
    plt.ylim(y_begin, y_end)

    output_filename = param_prefix + "_threshold_1_to_" + str(
        param_n) + "_B" + str(begin) + "_E" + str(
            end) + "_" + param_feature + "_S" + str(
                param_step) + "_norm" + str(param_norm) + "_" + param_mode

    if param_color:
        output_filename = output_filename + '_color'

    print("Generation of output figure... %s" % output_filename)
    output_path = os.path.join(pictures_folder, output_filename)

    if not os.path.exists(pictures_folder):
        os.makedirs(pictures_folder)

    plt.savefig(output_path, dpi=(200))
def main():

    # getting all params
    parser = argparse.ArgumentParser(
        description=
        "Script which detects if an image is noisy or not using specific model"
    )

    parser.add_argument('--image', type=str, help='Image path')
    parser.add_argument('--interval',
                        type=str,
                        help='Interval value to keep from svd',
                        default='"0, 200"')
    parser.add_argument('--model',
                        type=str,
                        help='.joblib or .json file (sklearn or keras model)')
    parser.add_argument('--mode',
                        type=str,
                        help='Kind of normalization level wished',
                        choices=normalization_choices)
    parser.add_argument('--feature',
                        type=str,
                        help='feature data choice',
                        choices=features_choices)
    parser.add_argument(
        '--custom',
        type=str,
        help='Name of custom min max file if use of renormalization of data',
        default=False)

    args = parser.parse_args()

    p_img_file = args.image
    p_model_file = args.model
    p_interval = list(map(int, args.interval.split(',')))
    p_mode = args.mode
    p_feature = args.feature
    p_custom = args.custom

    if '.joblib' in p_model_file:
        kind_model = 'sklearn'

    if '.json' in p_model_file:
        kind_model = 'keras'

    if 'corr' in p_model_file:
        corr_model = True

        indices_corr_path = os.path.join(
            cfg.correlation_indices_folder,
            p_model_file.split('/')[1].replace('.json', '').replace(
                '.joblib', '') + '.csv')

        with open(indices_corr_path, 'r') as f:
            data_corr_indices = [
                int(x) for x in f.readline().split(';') if x != ''
            ]
    else:
        corr_model = False

    if kind_model == 'sklearn':
        # load of model file
        model = joblib.load(p_model_file)

    if kind_model == 'keras':
        with open(p_model_file, 'r') as f:
            json_model = json.load(f)
            model = model_from_json(json_model)
            model.load_weights(p_model_file.replace('.json', '.h5'))

            model.compile(loss='binary_crossentropy',
                          optimizer='adam',
                          features=['accuracy'])

    # load image
    img = Image.open(p_img_file)

    data = get_image_features(p_feature, img)

    # get interval values
    begin, end = p_interval

    # check if custom min max file is used
    if p_custom:

        if corr_model:
            test_data = data[data_corr_indices]
        else:
            test_data = data[begin:end]

        if p_mode == 'svdne':

            # set min_max_filename if custom use
            min_max_file_path = custom_min_max_folder + '/' + p_custom

            # need to read min_max_file
            file_path = os.path.join(os.path.dirname(__file__),
                                     min_max_file_path)
            with open(file_path, 'r') as f:
                min_val = float(f.readline().replace('\n', ''))
                max_val = float(f.readline().replace('\n', ''))

            test_data = utils.normalize_arr_with_range(test_data, min_val,
                                                       max_val)

        if p_mode == 'svdn':
            test_data = utils.normalize_arr(test_data)

    else:

        # check mode to normalize data
        if p_mode == 'svdne':

            # set min_max_filename if custom use
            min_max_file_path = path + '/' + p_feature + min_max_ext

            # need to read min_max_file
            file_path = os.path.join(os.path.dirname(__file__),
                                     min_max_file_path)
            with open(file_path, 'r') as f:
                min_val = float(f.readline().replace('\n', ''))
                max_val = float(f.readline().replace('\n', ''))

            l_values = utils.normalize_arr_with_range(data, min_val, max_val)

        elif p_mode == 'svdn':
            l_values = utils.normalize_arr(data)
        else:
            l_values = data

        if corr_model:
            test_data = data[data_corr_indices]
        else:
            test_data = data[begin:end]

    # get prediction of model
    if kind_model == 'sklearn':
        prediction = model.predict([test_data])[0]

    if kind_model == 'keras':
        test_data = np.asarray(test_data).reshape(1, len(test_data), 1)
        prediction = model.predict_classes([test_data])[0][0]

    # output expected from others scripts
    print(prediction)
def main():

    max_value_svd = 0
    min_value_svd = sys.maxsize

    parser = argparse.ArgumentParser(description="Display svd tend of images with noise level")

    parser.add_argument('--prefix', type=str, help='Generated noise folder prefix (ex: `generated/prefix/noise`)')
    parser.add_argument('--mode', type=str, help='Kind of normalization', default=normalization_choices)
    parser.add_argument('--feature', type=str, help='feature choice', default=feature_choices)
    parser.add_argument('--n', type=int, help='Number of images')
    parser.add_argument('--color', type=int, help='Use of color or grey level', default=0)
    parser.add_argument('--norm', type=int, help='Use of normalization from interval or whole data vector', default=0)
    parser.add_argument('--interval', type=str, help='Interval data choice (ex: `0, 200`)', default="0, 200")
    parser.add_argument('--step', type=int, help='Step of image indices to keep', default=1)
    parser.add_argument('--ylim', type=str, help='Limite to display data (ex: `0, 1`)', default="0, 1")
    parser.add_argument('--error', type=str, help='Error used for information data', default=error_data_choices)

    args = parser.parse_args()

    param_prefix   = args.prefix
    param_mode     = args.mode
    param_feature  = args.feature
    param_n        = args.n
    param_color    = args.color
    param_norm     = args.norm
    param_interval = list(map(int, args.interval.split(',')))
    param_step     = args.step
    param_ylim     = list(map(float, args.ylim.split(',')))
    param_error    = args.error


    param_prefix = param_prefix.split('/')[1].replace('_', '')
    noise_name = param_prefix.split('/')[2]

    if param_color:
        file_path = os.path.join(param_prefix, param_prefix + "_" + noise_name + "_color_{}." + filename_ext)
    else:
        file_path = os.path.join(param_prefix, param_prefix + "_" + noise_name + "_{}." + filename_ext)

    begin, end = param_interval
    all_svd_data = []

    svd_data = []
    image_indices = []

    noise_indices = range(1, param_n)[::-1]

    # get all data from images
    for i in noise_indices:

        if i % steparam_picture == 0:

            image_path = file_path.format(str(i))

            img = Image.open(image_path)

            svd_values = get_image_features(param_feature, img)

            if param_norm:
                svd_values = svd_values[begin:end]

            all_svd_data.append(svd_values)

            # update min max values
            min_value = svd_values.min()
            max_value = svd_values.max()

            if min_value < min_value_svd:
                min_value_svd = min_value

            if max_value > max_value_svd:
                max_value_svd = max_value

        print('%.2f%%' % ((param_n - i + 1) / param_n * 100))
        sys.stdout.write("\033[F")

    previous_data = []
    error_data = [0.]

    for id, data in enumerate(all_svd_data):

        current_id = (param_n - ((id + 1) * 10))

        if current_id % param_step == 0:

            current_data = data

            if param_mode == 'svdn':
                current_data = utils.normalize_arr(current_data)

            if param_mode == 'svdne':
                current_data = utils.normalize_arr_with_range(current_data, min_value_svd, max_value_svd)

            svd_data.append(current_data)
            image_indices.append(current_id)

            # use of whole image data for computation of ssim or psnr
            if param_error == 'ssim' or param_error == 'psnr':
                image_path = file_path.format(str(current_id))
                current_data = np.asarray(Image.open(image_path))

            if len(previous_data) > 0:

                current_error = get_error_distance(param_error, previous_data, current_data)
                error_data.append(current_error)

            if len(previous_data) == 0:
                previous_data = current_data

    # display all data using matplotlib (configure plt)
    gridsize = (3, 2)

    # fig, (ax1, ax2) = plt.subplots(nrows=2, ncols=1, figsize=(30, 22))
    fig = plt.figure(figsize=(30, 22))
    ax1 = plt.subplot2grid(gridsize, (0, 0), colspan=2, rowspan=2)
    ax2 = plt.subplot2grid(gridsize, (2, 0), colspan=2)

    ax1.set_title(param_prefix  + ', ' + noise_name + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + param_feature + ' feature, step ' + str(param_step) + ' normalization ' + param_mode)
    ax1.set_label('Importance of noise [1, 999]')
    ax1.set_xlabel('Vector features')

    for id, data in enumerate(svd_data):

        param_label = param_prefix + str(image_indices[id]) + " | " + param_error + ": " + str(error_data[id])
        ax1.plot(data, label=param_label)

    ax1.legend(bbox_to_anchor=(0.75, 1), loc=2, borderaxespad=0.2, fontsize=12)

    if not param_norm:
        ax1.set_xlim(begin, end)

    # adapt ylim
    y_begin, y_end = param_ylim
    ax1.set_ylim(y_begin, y_end)

    output_filename = param_prefix + "_" + noise_name + "_1_to_" + str(param_n) + "_B" + str(begin) + "_E" + str(end) + "_" + param_feature + "_S" + str(param_step) + "_norm" + str(param_norm )+  "_" + param_mode + "_" + param_error

    if param_color:
        output_filename = output_filename + '_color'

    ax2.set_title(param_error + " information for : " + param_prefix  + ', ' + noise_name + ' noise, interval information ['+ str(begin) +', '+ str(end) +'], ' + param_feature + ' feature, step ' + str(param_step) + ', normalization ' + param_mode)
    ax2.set_ylabel(param_error + ' error')
    ax2.set_xlabel('Number of samples per pixels')
    ax2.set_xticks(range(len(image_indices)))
    ax2.set_xticklabels(image_indices)
    ax2.plot(error_data)

    print("Generation of output figure... %s" % output_filename)
    output_path = os.path.join(pictures_folder, output_filename)

    if not os.path.exists(pictures_folder):
        os.makedirs(pictures_folder)

    fig.savefig(output_path, dpi=(200))