def applyKmeansMasks(image_input, mask_samples_macro_input_list, image_samples_merged_output, proposal_table_output, micro_samples_images_output_list, centroids_files_output_list, macroclass_sampling_list, macroclass_labels_list, no_data_value, path_time_log, kmeans_param_maximum_iterations=200, kmeans_param_training_set_size_weight=1, kmeans_param_minimum_training_set_size=-1, rate_clean_micro_class=0.0, rand_otb=0, ram_otb=0, number_of_actives_pixels_threshold=200, extension_raster=".tif", save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "applyKmeansMasks() : Kmeans and mask starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(cyan + "applyKmeansMasks() : " + bold + green + "## START : SUBSAMPLING OF " + str(macroclass_labels_list) + endC)
    print(endC)

    if debug >= 2:
        print(cyan + "applyKmeansMasks() : variables dans la fonction" + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "image_input : " + str(image_input) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "image_samples_merged_output : " + str(image_samples_merged_output) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "proposal_table_output : " + str(proposal_table_output) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "mask_samples_macro_input_list : " + str(mask_samples_macro_input_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "micro_samples_images_output_list : " + str(micro_samples_images_output_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "centroids_files_output_list : " + str(centroids_files_output_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "macroclass_sampling_list : " + str(macroclass_sampling_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "macroclass_labels_list : " + str(macroclass_labels_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_maximum_iterations : " + str(kmeans_param_maximum_iterations) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_training_set_size_weight : " + str(kmeans_param_training_set_size_weight) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_minimum_training_set_size : " + str(kmeans_param_minimum_training_set_size) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "rate_clean_micro_class : " + str(rate_clean_micro_class))
        print(cyan + "applyKmeansMasks() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "rand_otb : " + str(rand_otb) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "number_of_actives_pixels_threshold : " + str(number_of_actives_pixels_threshold))
        print(cyan + "applyKmeansMasks() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "overwrite : " + str(overwrite) + endC)

    # constantes
    HEADER_TABLEAU_MODIF = "MICROCLASSE;TRAITEMENT\n"

    CODAGE_16B = "uint16"
    CODAGE_8B = "uint8"
    EXT_XML = ".xml"

    SUFFIX_MASK_CLEAN = "_clean"
    SUFFIX_SAMPLE_MICRO = "_sample_micro"
    SUFFIX_STATISTICS = "_statistics"
    SUFFIX_CENTROID = "_centroid"
    SUFFIX_MASK_TEMP = "_tmp"

   # Creation des fichiers temporaires de sortie si ils ne sont pas spécifier
   #-------------------------------------------------------------------------

    length_mask = len(mask_samples_macro_input_list)
    images_mask_cleaned_list = []
    temporary_files_list = []
    micro_samples_images_list = []
    centroids_files_list = []
    repertory_output_tmp_list = []

    if image_samples_merged_output != "" :
        repertory_base_output = os.path.dirname(image_samples_merged_output)
        filename = os.path.splitext(os.path.basename(image_samples_merged_output))[0]
    else :
        repertory_base_output = os.path.dirname(micro_samples_images_output_list[0])
        filename = os.path.splitext(os.path.basename(micro_samples_images_output_list[0]))[0]

    file_statistic_points = repertory_base_output + os.sep + filename + SUFFIX_STATISTICS + EXT_XML

    for macroclass_id in range(length_mask):

        repertory_output = repertory_base_output + os.sep + str(macroclass_labels_list[macroclass_id])
        if not os.path.isdir(repertory_output):
            os.makedirs(repertory_output)
        repertory_output_tmp_list.append(repertory_output)
        samples_image_input = mask_samples_macro_input_list[macroclass_id]
        filename = os.path.splitext(os.path.basename(samples_image_input))[0]
        image_mask_cleaned =  repertory_output + os.sep + filename + SUFFIX_MASK_CLEAN + extension_raster
        images_mask_cleaned_list.append(image_mask_cleaned)
        image_tmp =  repertory_output + os.sep + filename + SUFFIX_MASK_TEMP + extension_raster
        temporary_files_list.append(image_tmp)
        if micro_samples_images_output_list == [] :
            micro_samples_image = repertory_output + os.sep + filename + SUFFIX_SAMPLE_MICRO + extension_raster
        else :
            micro_samples_image = micro_samples_images_output_list[macroclass_id]
        micro_samples_images_list.append(micro_samples_image)
        if centroids_files_output_list == [] :
            centroids_file = repertory_output + os.sep + filename + SUFFIX_CENTROID + extension_raster
        else :
            centroids_file = centroids_files_output_list[macroclass_id]
        centroids_files_list.append(centroids_file)

    # Nettoyage des pixels superposés sur plusieurs images
    #-----------------------------------------------------

    if length_mask > 1:
        image_name = os.path.splitext(os.path.basename(image_input))[0]
        deletePixelsSuperpositionMasks(mask_samples_macro_input_list, images_mask_cleaned_list, image_name, CODAGE_8B)
    else:
        images_mask_cleaned_list = mask_samples_macro_input_list

    # Execution du kmeans pour chaque macroclasse
    #--------------------------------------------

    # Initialisation de la liste pour le multi-threading
    thread_list = []

    for macroclass_id in range(length_mask):

        mask_sample_input = images_mask_cleaned_list[macroclass_id]
        micro_samples_image = micro_samples_images_list[macroclass_id]
        image_tmp = temporary_files_list[macroclass_id]
        centroids_file = centroids_files_list[macroclass_id]
        check = os.path.isfile(micro_samples_image)

        if check and not overwrite : # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors passe à la classification suivante
            print(cyan + "applyKmeansMasks() : " + bold + yellow +  "Computing kmeans from %s with %s already done : no actualisation" % (image_input, mask_sample_input) + endC)

        else:            # Si non, on applique un kmeans

            if check :
                removeFile(micro_samples_image)   # Suppression de l'éventuel fichier existant

            print(cyan + "applyKmeansMasks() : " + bold + green + "Computing kmeans from %s with %s ; output image is %s" %(image_input, mask_sample_input,micro_samples_image) + endC)

            # Obtention du nombre de microclasses
            number_of_classes = macroclass_sampling_list[macroclass_id]   # Nombre de microclasses
            label = macroclass_labels_list[macroclass_id]                 # Label de la macroclasse Ex : 11000

            # Gestion du multi threading pour l'appel du calcul du kmeans
            thread = threading.Thread(target=computeKmeans, args=(image_input, mask_sample_input, image_tmp, micro_samples_image, centroids_file, label, number_of_classes, macroclass_id, number_of_actives_pixels_threshold, kmeans_param_minimum_training_set_size, kmeans_param_maximum_iterations, length_mask, no_data_value, rand_otb, int(ram_otb/length_mask), CODAGE_8B, CODAGE_16B, save_results_intermediate, overwrite))
            thread.start()
            thread_list.append(thread)

    # Start Kmeans all macro classes
    try:
        for thread in thread_list:
            thread.join()
    except:
        print(cyan + "applyKmeansMasks() : " + bold + red + "applyKmeansMasks() : " + endC + "Erreur lors du calcul du kmeans : impossible de demarrer le thread" + endC, file=sys.stderr)

    # Fusion des echantillons micro
    #------------------------------
    if image_samples_merged_output != "" :

        mergeListRaster(micro_samples_images_list, image_samples_merged_output, CODAGE_16B)
        updateReferenceProjection(image_input, image_samples_merged_output)

        # Creation de la table de proposition et le fichier statistique
        #--------------------------------------------------------------
        if proposal_table_output != "" :

            suppress_micro_class_list = []
            info_micoclass_nbpoints_dico = {}
            nb_points_total = 0
            nb_points_medium = 0

            # Liste des identifants des micro classes disponibles
            id_micro_list = identifyPixelValues(image_samples_merged_output)
            if 0 in id_micro_list :
                id_micro_list.remove(0)
            nb_micr_class = len(id_micro_list)

            # Pour toutes les micro classes
            for id_micro in id_micro_list :
                nb_pixels = countPixelsOfValue(image_samples_merged_output, id_micro)

                info_micoclass_nbpoints_dico[id_micro] = nb_pixels
                nb_points_total += nb_pixels

            # Valeur moyenne de nombre de points
            if nb_micr_class != 0 :
                nb_points_medium = int(nb_points_total / nb_micr_class)
            nb_points_min = int((nb_points_medium * rate_clean_micro_class) / 100)

            # Identifier les micro classes trop petites
            if debug >= 4:
                print("rate_clean_micro_class = " + str(rate_clean_micro_class))
                print("nb_points_medium = " + str(nb_points_medium))
                print("nb_points_min = " + str(nb_points_min))

            # Preparation du fichier statistique
            writeTextFile(file_statistic_points, '<?xml version="1.0" ?>\n')
            appendTextFileCR(file_statistic_points, '<GeneralStatistics>')
            appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassRaw">')

            for micro_class_id in info_micoclass_nbpoints_dico :
                nb_points = info_micoclass_nbpoints_dico[micro_class_id]
                if debug >= 4:
                    print("micro_class_id = " + str(micro_class_id) + ", nb_points = " + str(nb_points))
                appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(micro_class_id, nb_points))

                if nb_points < nb_points_min :
                    # Micro_class à proposer en effacement
                    suppress_micro_class_list.append(micro_class_id)

            # Fin du fichier statistique
            appendTextFileCR(file_statistic_points, '    </Statistic>')
            appendTextFileCR(file_statistic_points, '</GeneralStatistics>')

            # Test si ecrassement de la table précédemment créée
            check = os.path.isfile(proposal_table_output)
            if check and not overwrite :
                print(cyan + "applyKmeansMasks() : " + bold + yellow + "Modifier table already exists." + '\n' + endC)
            else:
                # Tenter de supprimer le fichier
                try:
                    removeFile(proposal_table_output)
                except Exception:
                    pass   # Ignore l'exception levee si le fichier n'existe pas (et ne peut donc pas être supprime)
                # lister les micro classes à supprimer
                text_output = HEADER_TABLEAU_MODIF

                for micro_class_del in suppress_micro_class_list:
                    text_output += "%d;-1\n" %(micro_class_del)

                # Ecriture du fichier proposition de réaffectation
                writeTextFile(proposal_table_output, text_output)

    # Suppresions fichiers intermediaires inutiles
    #---------------------------------------------

    if not save_results_intermediate:
        for macroclass_id in range(length_mask):
            if (os.path.isfile(temporary_files_list[macroclass_id])) :
                removeFile(temporary_files_list[macroclass_id])

            if (length_mask > 1) and (os.path.isfile(images_mask_cleaned_list[macroclass_id])) :
                removeFile(images_mask_cleaned_list[macroclass_id])

            if (micro_samples_images_output_list == []) and (os.path.isfile(micro_samples_images_list[macroclass_id])) :
                removeFile(micro_samples_images_list[macroclass_id])

            if (centroids_files_output_list == []) and (os.path.isfile(centroids_files_list[macroclass_id])) :
                removeFile(centroids_files_list[macroclass_id])

            if os.path.isdir(repertory_output_tmp_list[macroclass_id]) :
                removeDir(repertory_output_tmp_list[macroclass_id])

    print(cyan + "applyKmeansMasks() : " + bold + green + "## END : KMEANS CLASSIFICATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "applyKmeansMasks() : Kmeans and mask ending : "
    timeLine(path_time_log,ending_event)

    return
def occupationIndicator(input_grid,
                        output_grid,
                        class_label_dico_out,
                        input_vector_classif,
                        field_classif_name,
                        input_soil_occupation,
                        input_height_model,
                        class_build_list,
                        class_road_list,
                        class_baresoil_list,
                        class_water_list,
                        class_vegetation_list,
                        class_high_vegetation_list,
                        class_low_vegetation_list,
                        epsg=2154,
                        no_data_value=0,
                        format_raster='GTiff',
                        format_vector='ESRI Shapefile',
                        extension_raster='.tif',
                        extension_vector='.shp',
                        path_time_log='',
                        save_results_intermediate=False,
                        overwrite=True):

    if debug >= 3:
        print(
            '\n' + bold + green +
            "Calcul d'indicateurs du taux de classes OCS - Variables dans la fonction :"
            + endC)
        print(cyan + "    occupationIndicator() : " + endC + "input_grid : " +
              str(input_grid) + endC)
        print(cyan + "    occupationIndicator() : " + endC + "output_grid : " +
              str(output_grid) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_label_dico_out : " + str(class_label_dico_out) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "input_vector_classif : " + str(input_vector_classif) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "field_classif_name : " + str(field_classif_name) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "input_soil_occupation : " + str(input_soil_occupation) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "input_height_model : " + str(input_height_model) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_build_list : " + str(class_build_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_road_list : " + str(class_road_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_baresoil_list : " + str(class_baresoil_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_water_list : " + str(class_water_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_vegetation_list : " + str(class_vegetation_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_high_vegetation_list : " +
              str(class_high_vegetation_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_low_vegetation_list : " + str(class_low_vegetation_list) +
              endC)
        print(cyan + "    occupationIndicator() : " + endC + "epsg : " +
              str(epsg) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "format_raster : " + str(format_raster) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "format_vector : " + str(format_vector) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "    occupationIndicator() : " + endC + "overwrite : " +
              str(overwrite) + endC + '\n')

    # Définition des constantes
    CODAGE_8BITS = 'uint8'
    CODAGE_FLOAT = 'float'
    NODATA_FIELD = 'nodata'

    PREFIX_S = 'S_'
    SUFFIX_TEMP = '_temp'
    SUFFIX_RASTER = '_raster'
    SUFFIX_HEIGHT = '_height'
    SUFFIX_VEGETATION = '_vegetation'

    VEG_MEAN_FIELD = 'veg_h_mean'
    VEG_MAX_FIELD = 'veg_h_max'
    VEG_RATE_FIELD = 'veg_h_rate'
    MAJ_OCS_FIELD = 'class_OCS'

    BUILT_FIELD, BUILT_LABEL = 'built', 1
    MINERAL_FIELD, MINERAL_LABEL = 'mineral', 2
    BARESOIL_FIELD, BARESOIL_LABEL = 'baresoil', 3
    WATER_FIELD, WATER_LABEL = 'water', 4
    VEGETATION_FIELD, VEGETATION_LABEL = 'veget', 5
    HIGH_VEGETATION_FIELD, HIGH_VEGETATION_LABEL = 'high_veg', 6
    LOW_VEGETATION_FIELD, LOW_VEGETATION_LABEL = 'low_veg', 7

    # Mise à jour du log
    starting_event = "occupationIndicator() : Début du traitement : "
    timeLine(path_time_log, starting_event)

    print(cyan + "occupationIndicator() : " + bold + green +
          "DEBUT DES TRAITEMENTS" + endC + '\n')

    # Définition des variables 'basename'
    output_grid_basename = os.path.basename(os.path.splitext(output_grid)[0])
    output_grid_dirname = os.path.dirname(output_grid)
    soil_occupation_basename = os.path.basename(
        os.path.splitext(input_soil_occupation)[0])

    # Définition des variables temp
    temp_directory = output_grid_dirname + os.sep + output_grid_basename
    temp_grid = temp_directory + os.sep + output_grid_basename + SUFFIX_TEMP + extension_vector
    temp_soil_occupation = temp_directory + os.sep + soil_occupation_basename + SUFFIX_TEMP + SUFFIX_RASTER + extension_raster
    temp_height_vegetation = temp_directory + os.sep + output_grid_basename + SUFFIX_HEIGHT + SUFFIX_VEGETATION + extension_raster

    # Nettoyage des traitements précédents
    if overwrite:
        if debug >= 3:
            print(cyan + "occupationIndicator() : " + endC +
                  "Nettoyage des traitements précédents." + endC + '\n')
        removeFile(output_grid)
        cleanTempData(temp_directory)
    else:
        if os.path.exists(output_grid):
            raise NameError(
                cyan + "occupationIndicator() : " + bold + yellow +
                "Le fichier de sortie existe déjà et ne sera pas regénéré." +
                endC + '\n')
        pass

    #############
    # Etape 0/3 # Préparation des traitements
    #############

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 0/3 - Début de la préparation des traitements." + endC + '\n')

    # Rasterisation de l'information de classification (OCS) si au format vecteur en entrée
    if input_vector_classif != "":
        if debug >= 3:
            print(cyan + "occupationIndicator() : " + endC + bold +
                  "Rasterisation de l'OCS vecteur." + endC + '\n')
        reference_image = input_soil_occupation
        soil_occupation_vector_basename = os.path.basename(
            os.path.splitext(input_vector_classif)[0])
        input_soil_occupation = temp_directory + os.sep + soil_occupation_vector_basename + SUFFIX_RASTER + extension_raster
        command = "otbcli_Rasterization -in %s -out %s %s -im %s -background 0 -mode attribute -mode.attribute.field %s" % (
            input_vector_classif, input_soil_occupation, CODAGE_8BITS,
            reference_image, field_classif_name)
        if debug >= 3:
            print(command)
        exit_code = os.system(command)
        if exit_code != 0:
            raise NameError(
                cyan + "occupationIndicator() : " + bold + red +
                "Erreur lors de la rasterisation de l'OCS vecteur." + endC)

    # Analyse de la couche OCS raster
    class_other_list = identifyPixelValues(input_soil_occupation)
    no_data_ocs = getNodataValueImage(input_soil_occupation, 1)
    if no_data_ocs != None:
        no_data_value = no_data_ocs

    # Affectation de nouveaux codes de classification
    divide_vegetation_classes = False
    if class_high_vegetation_list != [] and class_low_vegetation_list != []:
        divide_vegetation_classes = True

    col_to_delete_list = [
        "minority", PREFIX_S + NODATA_FIELD, PREFIX_S + BUILT_FIELD,
        PREFIX_S + MINERAL_FIELD, PREFIX_S + BARESOIL_FIELD,
        PREFIX_S + WATER_FIELD
    ]
    class_label_dico = {
        int(no_data_value): NODATA_FIELD,
        int(BUILT_LABEL): BUILT_FIELD,
        int(MINERAL_LABEL): MINERAL_FIELD,
        int(BARESOIL_LABEL): BARESOIL_FIELD,
        int(WATER_LABEL): WATER_FIELD
    }
    if not divide_vegetation_classes:
        class_label_dico[int(VEGETATION_LABEL)] = VEGETATION_FIELD
        col_to_delete_list.append(PREFIX_S + VEGETATION_FIELD)
    else:
        class_label_dico[int(HIGH_VEGETATION_LABEL)] = HIGH_VEGETATION_FIELD
        class_label_dico[int(LOW_VEGETATION_LABEL)] = LOW_VEGETATION_FIELD
        col_to_delete_list.append(PREFIX_S + HIGH_VEGETATION_FIELD)
        col_to_delete_list.append(PREFIX_S + LOW_VEGETATION_FIELD)

    # Gestion de la réaffectation des classes
    if debug >= 3:
        print(cyan + "occupationIndicator() : " + endC + bold +
              "Reaffectation du raster OCS." + endC + '\n')

    reaff_class_list = []
    macro_reaff_class_list = []

    for label in class_build_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(BUILT_LABEL)

    for label in class_road_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(MINERAL_LABEL)

    for label in class_baresoil_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(BARESOIL_LABEL)

    for label in class_water_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(WATER_LABEL)

    if not divide_vegetation_classes:
        for label in class_vegetation_list:
            if label in class_other_list:
                class_other_list.remove(label)
            reaff_class_list.append(label)
            macro_reaff_class_list.append(VEGETATION_LABEL)
    else:
        for label in class_high_vegetation_list:
            if label in class_other_list:
                class_other_list.remove(label)
            reaff_class_list.append(label)
            macro_reaff_class_list.append(HIGH_VEGETATION_LABEL)
        for label in class_low_vegetation_list:
            if label in class_other_list:
                class_other_list.remove(label)
            reaff_class_list.append(label)
            macro_reaff_class_list.append(LOW_VEGETATION_LABEL)

    # Reste des valeurs de pixel nom utilisé
    for label in class_other_list:
        reaff_class_list.append(label)
        macro_reaff_class_list.append(no_data_value)

    reallocateClassRaster(input_soil_occupation, temp_soil_occupation,
                          reaff_class_list, macro_reaff_class_list,
                          CODAGE_8BITS)

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 0/3 - Fin de la préparation des traitements." + endC + '\n')

    #############
    # Etape 1/3 # Calcul des indicateurs de taux de classes OCS
    #############

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 1/3 - Début du calcul des indicateurs de taux de classes OCS." +
        endC + '\n')

    if debug >= 3:
        print(cyan + "occupationIndicator() : " + endC + bold +
              "Calcul des indicateurs de taux de classes OCS." + endC + '\n')

    statisticsVectorRaster(temp_soil_occupation, input_grid, temp_grid, 1,
                           True, True, False, col_to_delete_list, [],
                           class_label_dico, path_time_log, True,
                           format_vector, save_results_intermediate, overwrite)

    # Fusion des classes végétation dans le cas où haute et basse sont séparées (pour utilisation du taux de végétation dans le logigramme)
    if divide_vegetation_classes:
        temp_grid_v2 = os.path.splitext(
            temp_grid)[0] + "_v2" + extension_vector
        sql_statement = "SELECT *, (%s + %s) AS %s FROM %s" % (
            HIGH_VEGETATION_FIELD, LOW_VEGETATION_FIELD, VEGETATION_FIELD,
            os.path.splitext(os.path.basename(temp_grid))[0])
        os.system("ogr2ogr -sql '%s' -dialect SQLITE %s %s" %
                  (sql_statement, temp_grid_v2, temp_grid))
        removeVectorFile(temp_grid, format_vector=format_vector)
        copyVectorFile(temp_grid_v2, temp_grid, format_vector=format_vector)

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 1/3 - Fin du calcul des indicateurs de taux de classes OCS." +
          endC + '\n')

    #############
    # Etape 2/3 # Calcul de l'indicateur de "hauteur de végétation"
    #############

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 2/3 - Début du calcul de l'indicateur de \"hauteur de végétation\"."
        + endC + '\n')

    computeVegetationHeight(
        temp_grid, output_grid, temp_soil_occupation, input_height_model,
        temp_height_vegetation, divide_vegetation_classes, VEGETATION_LABEL,
        HIGH_VEGETATION_LABEL, LOW_VEGETATION_LABEL, HIGH_VEGETATION_FIELD,
        LOW_VEGETATION_FIELD, VEG_MEAN_FIELD, VEG_MAX_FIELD, VEG_RATE_FIELD,
        CODAGE_FLOAT, SUFFIX_TEMP, no_data_value, format_vector, path_time_log,
        save_results_intermediate, overwrite)

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 2/3 - Fin du calcul de l'indicateur de \"hauteur de végétation\"."
        + endC + '\n')

    #############
    # Etape 3/3 # Calcul de l'indicateur de classe majoritaire
    #############

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 3/3 - Début du calcul de l'indicateur de classe majoritaire." +
        endC + '\n')

    if input_height_model != "":
        computeMajorityClass(output_grid, temp_directory, NODATA_FIELD,
                             BUILT_FIELD, MINERAL_FIELD, BARESOIL_FIELD,
                             WATER_FIELD, VEGETATION_FIELD,
                             HIGH_VEGETATION_FIELD, LOW_VEGETATION_FIELD,
                             MAJ_OCS_FIELD, VEG_MEAN_FIELD,
                             class_label_dico_out, format_vector,
                             extension_vector, overwrite)
    else:
        print(
            cyan + "occupationIndicator() : " + bold + yellow +
            "Pas de calcul de l'indicateur de classe majoritaire demandé (pas de MNH en entrée)."
            + endC + '\n')

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 3/3 - Fin du calcul de l'indicateur de classe majoritaire." +
          endC + '\n')

    ####################################################################

    # Suppression des fichiers temporaires
    if not save_results_intermediate:
        if debug >= 3:
            print(cyan + "occupationIndicator() : " + endC +
                  "Suppression des fichiers temporaires." + endC + '\n')
        deleteDir(temp_directory)

    print(cyan + "occupationIndicator() : " + bold + green +
          "FIN DES TRAITEMENTS" + endC + '\n')

    # Mise à jour du log
    ending_event = "occupationIndicator() : Fin du traitement : "
    timeLine(path_time_log, ending_event)

    return 0
Ejemplo n.º 3
0
def classRasterSubSampling(satellite_image_input, classified_image_input, image_output, table_reallocation, sub_sampling_number, no_data_value, path_time_log, rand_otb=0, ram_otb=0, number_of_actives_pixels_threshold=8000, extension_raster=".tif", save_results_intermediate=False, overwrite=True) :

    # Mise à jour du Log
    starting_event = "classRasterSubSampling() : Micro class subsampling on classification image starting : "
    timeLine(path_time_log,starting_event)

    if debug >= 3:
       print(cyan + "classRasterSubSampling() : " + endC + "satellite_image_input : " +  str(satellite_image_input) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "classified_image_input : " +  str(classified_image_input) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "image_output : " + str(image_output) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "table_reallocation : " + str(table_reallocation) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "sub_sampling_number : " + str(sub_sampling_number) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "no_data_value : " + str(no_data_value) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "path_time_log : " + str(path_time_log) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "rand_otb : " + str(rand_otb) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "ram_otb : " + str(ram_otb) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "number_of_actives_pixels_threshold : " + str(number_of_actives_pixels_threshold) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "extension_raster : " + str(extension_raster) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Constantes
    CODAGE = "uint16"
    CODAGE_8B = "uint8"
    TEMP = "TempSubSampling_"
    MASK_SUF = "_Mask"
    SUB_SAMPLE_SUF = "_SubSampled"
    CENTROID_SUF = "_Centroids"
    TEMP_OUT = "_temp_out"
    EXTENSION_TXT = ".txt"

    # Contenu de la nouvelle table
    text_new_table = ""

    # CREATION DES NOMS DE CHEMINS UTILES
    name = os.path.splitext(os.path.basename(image_output))[0]
    input_classified_image_path = os.path.dirname(classified_image_input)                      # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/
    temp_sub_sampling_path = input_classified_image_path + os.sep + TEMP + name + os.sep       # Dossier contenant les fichiers temporaires de cette brique. Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/
    input_classified_image_complete_name = os.path.basename(classified_image_input)            # Ex : Paysage_01_raw.tif
    input_classified_image_name = os.path.splitext(input_classified_image_complete_name)[0]    # Ex : Paysage_01_raw
    input_classified_image_extend = os.path.splitext(input_classified_image_complete_name)[1]  # Ex : .tif
    image_output_temp = os.path.splitext(image_output)[0] + TEMP_OUT + extension_raster        # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_temp.tif

    # Création de temp_sub_sampling_path s'il n'existe pas
    if not os.path.isdir(os.path.dirname(temp_sub_sampling_path)) :
        os.makedirs(os.path.dirname(temp_sub_sampling_path))

    print(cyan + "classRasterSubSampling() : " + bold + green + "START ...\n" + endC)

    # Lecture du fichier table de proposition
    supp_class_list, reaff_class_list, macro_reaff_class_list, sub_sampling_class_list, sub_sampling_number_list = readReallocationTable(table_reallocation, sub_sampling_number)      # Fonction de Lib_text
    info_table_list = readTextFileBySeparator(table_reallocation, "\n")

    # Recherche de la liste des micro classes contenu dans le fichier de classification d'entrée
    class_values_list = identifyPixelValues(classified_image_input)

    # Supression dans la table des lignes correspondant aux actions "-2"
    for ligne_table in info_table_list:
        if not "-2" in ligne_table[0]:
            text_new_table += str(ligne_table[0]) + "\n"

    if debug >= 3:
        print("supp_class_list : " + str(supp_class_list))
        print("reaff_class_list : " + str(reaff_class_list))
        print("macro_reaff_class_list : " + str(macro_reaff_class_list))
        print("sub_sampling_class_list : " + str(sub_sampling_class_list))
        print("sub_sampling_number_list : " + str(sub_sampling_number_list))

    # Dans cettre brique, on ne s'intéresse qu'à la partie sous echantillonage
    # Gestion du cas de suppression
    if len(supp_class_list) > 0:
        print(cyan + "classRasterSubSampling() : " + bold + yellow + "ATTENTION : Les classes ne sont pas supprimees pour le fichier classification format raster." + '\n' + endC)

    # Gestion du cas de réaffectation
    if len(reaff_class_list) > 0:
         print(cyan + "classRasterSubSampling() : " + bold + yellow + "ATTENTION : la brique SpecificSubSampling ne traite pas les reaffectation. A l'issue de cette brique, verifier la table de reallocation et executer la brique de reallocation." + '\n' + endC)

    if len(sub_sampling_class_list) > 0 :

        if debug >= 3:
           print(cyan + "classRasterSubSampling() : " + bold + green + "DEBUT DU SOUS ECHANTILLONAGE DES CLASSES %s " %(sub_sampling_class_list) + endC)

        # Parcours des classes à sous échantilloner
        processing_pass_first = False
        for idx_class in range(len(sub_sampling_class_list)) :

            # INITIALISATION DU TRAITEMENT DE LA CLASSE

            # Classe à sous échantilloner. Ex : 21008
            class_to_sub_sample = sub_sampling_class_list[idx_class]
            if idx_class == 0 or not processing_pass_first :
                # Image à reclassifier : classified_image_input au premier tour
                image_to_sub_sample = classified_image_input
            else :
                # Image à reclassifier : la sortie de la boucle précédente ensuite
                image_to_sub_sample = image_output

            # determiner le label disponible de la classe
            base_subclass_label = int(class_to_sub_sample/100)*100
            subclass_label = base_subclass_label
            for class_value in class_values_list:
                if (class_value > subclass_label) and (class_value < base_subclass_label + 100) :
                    subclass_label = class_value
            subclass_label += 1
            # subclass_label = int(class_to_sub_sample/100)*100 + 20 + class_to_sub_sample%20 * 5
            # Label de départ des sous classes. Formule proposée : 3 premiers chiffres de class_to_sub_sample puis ajout de 20 + 5 * class_to_sub_sample modulo 20. Ex : 21000 -> 21020, 21001-> 21025, 21002-> 21030 etc...
            # Part du principe qu'il y a moins de 20 micro classes et que chacune est sous échantillonnée au maximum en 5 sous parties. Si ce n'est pas le cas : A ADAPTER

            number_of_sub_samples = sub_sampling_number_list[idx_class]    # Nombre de sous classes demandées pour le sous échantillonage de class_to_sub_sample. Ex : 4
            class_mask_raster = temp_sub_sampling_path + input_classified_image_name + "_" + str(class_to_sub_sample) + MASK_SUF + input_classified_image_extend    # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_21008_Mask.tif
            class_subsampled_raster = temp_sub_sampling_path + input_classified_image_name + "_" + str(class_to_sub_sample) + SUB_SAMPLE_SUF + input_classified_image_extend  # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_21008_SubSampled.tif
            centroid_file = temp_sub_sampling_path + input_classified_image_name + "_" + str(class_to_sub_sample) + CENTROID_SUF + EXTENSION_TXT  # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_21008_Centroid.txt

            if debug >= 5:
                print(cyan + "classRasterSubSampling() : " + endC + "class_to_sub_sample :" , class_to_sub_sample)
                print(cyan + "classRasterSubSampling() : " + endC + "subclass_label :" , subclass_label)
                print(cyan + "classRasterSubSampling() : " + endC + "number_of_sub_samples :" , number_of_sub_samples)
                print(cyan + "classRasterSubSampling() : " + endC + "class_mask_raster :" , class_mask_raster)
                print(cyan + "classRasterSubSampling() : " + endC + "class_subsampled_raster :" , class_subsampled_raster)
                print(cyan + "classRasterSubSampling() : " + endC + "centroid_file :" , centroid_file)

            if debug >= 3:
                print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s : SOUS ECHANTILLONAGE DE %s EN %s CLASSES " %(idx_class+1, len(sub_sampling_class_list), class_to_sub_sample, number_of_sub_samples) + endC)

            # ETAPE 1/5 : EXTRACTION DU MASQUE BINAIRE DES PIXELS CORRESPONDANT A LA CLASSE
            expression_masque = "\"im1b1 == %s? 1 : 0\"" %(class_to_sub_sample)
            command = "otbcli_BandMath -il %s -out %s %s -exp %s" %(classified_image_input, class_mask_raster, CODAGE_8B, expression_masque)

            if debug >=2:
                print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 1/5 : Debut de l extraction du masque binaire de la classe %s" %(idx_class+1, len(sub_sampling_class_list),class_to_sub_sample) + endC)
                print(command)

            os.system(command)

            if debug >=2:
                print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 1/5 : Fin de l extraction du masque binaire de la classe %s, disponible ici : %s" %(idx_class+1, len(sub_sampling_class_list),class_to_sub_sample, class_mask_raster) + endC)

            # TEST POUR SAVOIR SI ON EST EN CAPACITE D'EFFECTUER LE KMEANS
            number_of_actives_pixels = countPixelsOfValue(class_mask_raster, 1)  # Comptage du nombre de pixels disponibles pour effectuer le kmeans
            if number_of_actives_pixels > (number_of_sub_samples * number_of_actives_pixels_threshold) :    # Cas où il y a plus de pixels disponibles pour effectuer le kmeans que le seuil

                # ETAPE 2/5 : CLASSIFICATION NON SUPERVISEE DES PIXELS CORRESPONDANT A LA CLASSE
                if debug >= 3:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 2/5 : Il y a assez de pixels pour faire le sous echantillonage :  %s sur %s requis au minimum " %(idx_class+1, len(sub_sampling_class_list), number_of_actives_pixels, int(number_of_sub_samples) * number_of_actives_pixels_threshold) + endC)
                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 2/5 : Debut du sous echantillonage par classification non supervisee en %s classes " %(idx_class+1, len(sub_sampling_class_list), number_of_sub_samples) + endC)

                # appel du kmeans
                input_mask_list = []
                input_mask_list.append(class_mask_raster)
                output_masked_image_list = []
                output_masked_image_list.append(class_subsampled_raster)
                output_centroids_files_list = []
                output_centroids_files_list.append(centroid_file)
                macroclass_sampling_list = []
                macroclass_sampling_list.append(number_of_sub_samples)
                macroclass_labels_list = []
                macroclass_labels_list.append(subclass_label)
                applyKmeansMasks(satellite_image_input, input_mask_list, "", "", output_masked_image_list, output_centroids_files_list, macroclass_sampling_list, macroclass_labels_list, no_data_value, path_time_log, 200, 1, -1, 0.0, rand_otb, ram_otb, number_of_actives_pixels_threshold, extension_raster, save_results_intermediate, overwrite)

                if debug >=2:
                    print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 2/5 : Fin du sous echantillonage par classification non supervisee en %s classes, disponible ici %s : " %(idx_class+1, len(sub_sampling_class_list), number_of_sub_samples, class_subsampled_raster) + endC)

                # ETAPE 3/5 : INTEGRATION DES NOUVELLES SOUS CLASSES DANS LA TABLE DE REALLOCATION
                # Ouveture du fichier table de proposition pour re-ecriture

                for i in range(number_of_sub_samples):
                    class_values_list.append(subclass_label + i)
                    text_new_table += str(subclass_label + i) + ";" + str(subclass_label + i) + "; METTRE A JOUR MANUELLEMENT (origine : " +  str(class_to_sub_sample) + ")" + "\n"

                # ETAPE 4/5 : APPLICATION DU SOUS ECHANTILLONAGE AU RESULTAT DE CLASSIFICATION
                expression_application_sous_echantillonage = "\"im1b1 == %s? im2b1 : im1b1\"" %(class_to_sub_sample)
                command = "otbcli_BandMath -il %s %s -out %s %s -exp %s" %(image_to_sub_sample, class_subsampled_raster, image_output_temp, CODAGE, expression_application_sous_echantillonage)

                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 4/5 : Debut de l application du sous echantillonage present dans %s sur %s" %(idx_class+1, len(sub_sampling_class_list), class_subsampled_raster, classified_image_input) + endC)
                    print(command)

                os.system(command)

                if debug >=2:
                    print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 4/5 : Fin de l application du sous echantillonage present dans %s sur %s, sortie disponible ici : %s" %(idx_class+1, len(sub_sampling_class_list), class_subsampled_raster, classified_image_input, image_output_temp) + endC)

                # ETAPE 5/5 : GESTION DES RENOMMAGES ET SUPPRESSIONS
                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 5/5 : Debut du renommage et suppression des dossiers intermediaires" %(idx_class+1, len(sub_sampling_class_list)) + endC)

                if debug >=3 :
                    print("\n" + green + "classified image input: %s" %(classified_image_input) + endC)
                    print("\n" + green + "image to sub sample: %s" %(image_to_sub_sample) + endC)
                    print("\n" + green + "image temp : %s" %(image_output_temp) + endC)
                    print("\n" + green + "image output : %s" %(image_output) + endC)

                # Si l'image d'entrée et l'image de sorte sont le même fichier on efface le fichier d'entrée pour le re-creer avec le fichier re-travaillé
                if image_output == classified_image_input and os.path.isfile(classified_image_input) :
                    removeFile(classified_image_input)
                os.rename(image_output_temp,image_output)
                processing_pass_first = True

                # SUPPRESSION DES FICHIERS TEMPORAIRES
                if not save_results_intermediate :
                    if os.path.isfile(class_mask_raster) :
                        removeFile(class_mask_raster)
                    if os.path.isfile(class_subsampled_raster) :
                        removeFile(class_subsampled_raster)
                    if os.path.isfile(centroid_file) :
                        removeFile(centroid_file)

                if debug >=2:
                    print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 5/5 : Fin du renommage et suppression des dossiers intermediaires" %(idx_class+1, len(sub_sampling_class_list)) + endC)

            else:  # Cas où il n'y a pas assez de pixels pour effectuer le kmeans

                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + yellow + "CLASSE %s/%s - ETAPE 2/5 : Nombre insuffisant de pixels disponibles pour appliquer le kmeans : %s sur %s requis au minimum " %(idx_class+1, len(sub_sampling_class_list), number_of_actives_pixels, int(number_of_sub_samples) * number_of_actives_pixels_threshold) + endC)
                    print(cyan + "classRasterSubSampling() : " + bold + yellow + "CLASSE %s/%s - ETAPE 2/5 : SOUS ECHANTILLONAGE NON APPLIQUE A LA CLASSE %s" %(idx_class+1, len(sub_sampling_class_list), class_to_sub_sample) + endC + "\n")

                # MISE A JOUR DU FICHIER image_to_sub_sample
                if idx_class == 0:
                    processing_pass_first = False

                # MISE A JOUR DE LA TABLE DE REALLOCATION
                text_new_table += str(class_to_sub_sample) + ";" + str(class_to_sub_sample) + ";CLASSE TROP PETITE POUR SOUS ECHANTILLONAGE" + "\n"

                # SUPPRESSION DU MASQUE
                if not save_results_intermediate and os.path.isfile(class_mask_raster) :
                    removeFile(class_mask_raster)

    else:
        shutil.copy2(classified_image_input, image_output) # Copie du raster d'entree si pas de sous-echantillonnage

    # Ecriture de la nouvelle table dans le fichier
    writeTextFile(table_reallocation, text_new_table)

    # SUPPRESSION DU DOSSIER ET DES FICHIERS TEMPORAIRES
    if not save_results_intermediate and os.path.isdir(os.path.dirname(temp_sub_sampling_path)) :
        shutil.rmtree(os.path.dirname(temp_sub_sampling_path))

    print(cyan + "classRasterSubSampling() : " + bold + green + "END\n" + endC)

    # Mise à jour du Log
    ending_event = "classRasterSubSampling() : Micro class subsampling on classification image ending : "
    timeLine(path_time_log,ending_event)
    return
Ejemplo n.º 4
0
def statisticsVectorRaster(image_input,
                           vector_input,
                           vector_output,
                           band_number,
                           enable_stats_all_count,
                           enable_stats_columns_str,
                           enable_stats_columns_real,
                           col_to_delete_list,
                           col_to_add_list,
                           class_label_dico,
                           path_time_log,
                           clean_small_polygons=False,
                           format_vector='ESRI Shapefile',
                           save_results_intermediate=False,
                           overwrite=True):

    # INITIALISATION
    if debug >= 3:
        print(cyan + "statisticsVectorRaster() : " + endC + "image_input : " +
              str(image_input) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC + "vector_input : " +
              str(vector_input) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "vector_output : " + str(vector_output) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC + "band_number : " +
              str(band_number) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "enable_stats_all_count : " + str(enable_stats_all_count) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "enable_stats_columns_str : " + str(enable_stats_columns_str) +
              endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "enable_stats_columns_real : " + str(enable_stats_columns_real) +
              endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "col_to_delete_list : " + str(col_to_delete_list) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "col_to_add_list : " + str(col_to_add_list) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "class_label_dico : " + str(class_label_dico) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "clean_small_polygons : " + str(clean_small_polygons) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "format_vector : " + str(format_vector) + endC)
        print(cyan + "statisticsVectorRaster() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "statisticsVectorRaster() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    # Constantes
    PREFIX_AREA_COLUMN = "S_"

    # Mise à jour du Log
    starting_event = "statisticsVectorRaster() : Compute statistic crossing starting : "
    timeLine(path_time_log, starting_event)

    # creation du fichier vecteur de sortie
    if vector_output == "":
        vector_output = vector_input  # Précisé uniquement pour l'affichage
    else:
        # Copy vector_output
        copyVectorFile(vector_input, vector_output, format_vector)

    # Vérifications
    image_xmin, image_xmax, image_ymin, image_ymax = getEmpriseImage(
        image_input)
    vector_xmin, vector_xmax, vector_ymin, vector_ymax = getEmpriseFile(
        vector_output, format_vector)
    extension_vector = os.path.splitext(vector_output)[1]

    if round(vector_xmin, 4) < round(image_xmin, 4) or round(
            vector_xmax, 4) > round(image_xmax, 4) or round(
                vector_ymin, 4) < round(image_ymin, 4) or round(
                    vector_ymax, 4) > round(image_ymax, 4):
        print(cyan + "statisticsVectorRaster() : " + bold + red +
              "image_xmin, image_xmax, image_ymin, image_ymax" + endC,
              image_xmin,
              image_xmax,
              image_ymin,
              image_ymax,
              file=sys.stderr)
        print(cyan + "statisticsVectorRaster() : " + bold + red +
              "vector_xmin, vector_xmax, vector_ymin, vector_ymax" + endC,
              vector_xmin,
              vector_xmax,
              vector_ymin,
              vector_ymax,
              file=sys.stderr)
        raise NameError(
            cyan + "statisticsVectorRaster() : " + bold + red +
            "The extend of the vector file (%s) is greater than the image file (%s)"
            % (vector_output, image_input) + endC)

    pixel_size = getPixelSizeImage(image_input)

    # Suppression des très petits polygones qui introduisent des valeurs NaN
    if clean_small_polygons:
        min_size_area = pixel_size * 2
        vector_temp = os.path.splitext(
            vector_output)[0] + "_temp" + extension_vector

        cleanMiniAreaPolygons(vector_output, vector_temp, min_size_area, '',
                              format_vector)
        removeVectorFile(vector_output, format_vector)
        renameVectorFile(vector_temp, vector_output)

    # Récuperation du driver pour le format shape
    driver = ogr.GetDriverByName(format_vector)

    # Ouverture du fichier shape en lecture-écriture
    data_source = driver.Open(vector_output,
                              1)  # 0 means read-only - 1 means writeable.
    if data_source is None:
        print(cyan + "statisticsVectorRaster() : " + bold + red +
              "Impossible d'ouvrir le fichier shape : " + vector_output + endC,
              file=sys.stderr)
        sys.exit(1)  # exit with an error code

    # Récupération du vecteur
    layer = data_source.GetLayer(
        0)  # Recuperation de la couche (une couche contient les polygones)
    layer_definition = layer.GetLayerDefn(
    )  # GetLayerDefn => returns the field names of the user defined (created) fields

    # ETAPE 1/4 : CREATION AUTOMATIQUE DU DICO DE VALEUR SI IL N'EXISTE PAS
    if enable_stats_all_count and class_label_dico == {}:
        image_values_list = identifyPixelValues(image_input)
        # Pour toutes les valeurs
        for id_value in image_values_list:
            class_label_dico[id_value] = str(id_value)
        # Suppression de la valeur no date à 0
        if 0 in class_label_dico:
            del class_label_dico[0]
    if debug >= 2:
        print(class_label_dico)

    # ETAPE 2/4 : CREATION DES COLONNES DANS LE FICHIER SHAPE
    if debug >= 2:
        print(
            cyan + "statisticsVectorRaster() : " + bold + green +
            "ETAPE 1/3 : DEBUT DE LA CREATION DES COLONNES DANS LE FICHIER VECTEUR %s"
            % (vector_output) + endC)

    # En entrée :
    # col_to_add_list = [UniqueID, majority/DateMaj/SrcMaj, minority, min, max, mean, median, sum, std, unique, range, all, count, all_S, count_S] - all traduisant le class_label_dico en autant de colonnes
    # Sous_listes de col_to_add_list à identifier pour des facilités de manipulations ultérieures:
    # col_to_add_inter01_list = [majority/DateMaj/SrcMaj, minority, min, max, mean, median, sum, std, unique, range]
    # col_to_add_inter02_list = [majority, minority, min, max, mean, median, sum, std, unique, range, all, count, all_S, count_S]
    # Construction des listes intermédiaires
    col_to_add_inter01_list = []

    # Valeurs à injecter dans des colonnes - Format String
    if enable_stats_columns_str:
        stats_columns_str_list = ['majority', 'minority']
        for e in stats_columns_str_list:
            col_to_add_list.append(e)

    # Valeurs à injecter dans des colonnes - Format Nbr
    if enable_stats_columns_real:
        stats_columns_real_list = [
            'min', 'max', 'mean', 'median', 'sum', 'std', 'unique', 'range'
        ]
        for e in stats_columns_real_list:
            col_to_add_list.append(e)

    # Valeurs à injecter dans des colonnes - Format Nbr
    if enable_stats_all_count:
        stats_all_count_list = ['all', 'count']
        for e in stats_all_count_list:
            col_to_add_list.append(e)

    # Valeurs à injecter dans des colonnes - si class_label_dico est non vide
    if class_label_dico != {}:
        stats_all_count_list = ['all', 'count']
        for e in stats_all_count_list:
            if not e in col_to_add_list:
                col_to_add_list.append(e)

    # Ajout colonne par colonne
    if "majority" in col_to_add_list:
        col_to_add_inter01_list.append("majority")
    if "DateMaj" in col_to_add_list:
        col_to_add_inter01_list.append("DateMaj")
    if "SrcMaj" in col_to_add_list:
        col_to_add_inter01_list.append("SrcMaj")
    if "minority" in col_to_add_list:
        col_to_add_inter01_list.append("minority")
    if "min" in col_to_add_list:
        col_to_add_inter01_list.append("min")
    if "max" in col_to_add_list:
        col_to_add_inter01_list.append("max")
    if "mean" in col_to_add_list:
        col_to_add_inter01_list.append("mean")
    if "median" in col_to_add_list:
        col_to_add_inter01_list.append("median")
    if "sum" in col_to_add_list:
        col_to_add_inter01_list.append("sum")
    if "std" in col_to_add_list:
        col_to_add_inter01_list.append("std")
    if "unique" in col_to_add_list:
        col_to_add_inter01_list.append("unique")
    if "range" in col_to_add_list:
        col_to_add_inter01_list.append("range")

    # Copy de col_to_add_inter01_list dans col_to_add_inter02_list
    col_to_add_inter02_list = list(col_to_add_inter01_list)

    if "all" in col_to_add_list:
        col_to_add_inter02_list.append("all")
    if "count" in col_to_add_list:
        col_to_add_inter02_list.append("count")
    if "all_S" in col_to_add_list:
        col_to_add_inter02_list.append("all_S")
    if "count_S" in col_to_add_list:
        col_to_add_inter02_list.append("count_S")
    if "DateMaj" in col_to_add_inter02_list:
        col_to_add_inter02_list.remove("DateMaj")
        col_to_add_inter02_list.insert(0, "majority")
    if "SrcMaj" in col_to_add_inter02_list:
        col_to_add_inter02_list.remove("SrcMaj")
        col_to_add_inter02_list.insert(0, "majority")

    # Valeurs à injecter dans des colonnes - Format Nbr
    if enable_stats_all_count:
        stats_all_count_list = ['all_S', 'count_S']
        for e in stats_all_count_list:
            col_to_add_list.append(e)

    # Creation de la colonne de l'identifiant unique
    if ("UniqueID" in col_to_add_list) or ("uniqueID" in col_to_add_list) or (
            "ID" in col_to_add_list):
        field_defn = ogr.FieldDefn(
            "ID", ogr.OFTInteger
        )  # Création du nom du champ dans l'objet stat_classif_field_defn
        layer.CreateField(field_defn)
        if debug >= 3:
            print(cyan + "statisticsVectorRaster() : " + endC +
                  "Creation de la colonne : ID")

    # Creation des colonnes de col_to_add_inter01_list ([majority/DateMaj/SrcMaj, minority, min, max, mean, median, sum, std, unique, range])
    for col in col_to_add_list:
        if layer_definition.GetFieldIndex(
                col
        ) == -1:  # Vérification de l'existence de la colonne col (retour = -1 : elle n'existe pas)
            if col == 'majority' or col == 'DateMaj' or col == 'SrcMaj' or col == 'minority':  # Identification de toutes les colonnes remplies en string
                stat_classif_field_defn = ogr.FieldDefn(
                    col, ogr.OFTString
                )  # Création du champ (string) dans l'objet stat_classif_field_defn
                layer.CreateField(stat_classif_field_defn)
            elif col == 'mean' or col == 'median' or col == 'sum' or col == 'std' or col == 'unique' or col == 'range' or col == 'max' or col == 'min':
                stat_classif_field_defn = ogr.FieldDefn(
                    col, ogr.OFTReal
                )  # Création du champ (real) dans l'objet stat_classif_field_defn
                # Définition de la largeur du champ
                stat_classif_field_defn.SetWidth(20)
                # Définition de la précision du champ valeur flottante
                stat_classif_field_defn.SetPrecision(2)
                layer.CreateField(stat_classif_field_defn)
            if debug >= 3:
                print(cyan + "statisticsVectorRaster() : " + endC +
                      "Creation de la colonne : " + str(col))

    # Creation des colonnes reliées au dictionnaire
    if ('all' in col_to_add_list) or ('count' in col_to_add_list) or (
            'all_S' in col_to_add_list) or ('count_S' in col_to_add_list):
        for col in class_label_dico:

            # Gestion du nom de la colonne correspondant à la classe
            name_col = class_label_dico[col]
            if len(name_col) > 10:
                name_col = name_col[:10]
                print(
                    cyan + "statisticsVectorRaster() : " + bold + yellow +
                    "Nom de la colonne trop long. Il sera tronque a 10 caracteres en cas d'utilisation: "
                    + endC + name_col)

            # Gestion du nom de la colonne correspondant à la surface de la classe
            name_col_area = PREFIX_AREA_COLUMN + name_col
            if len(name_col_area) > 10:
                name_col_area = name_col_area[:10]
                if debug >= 3:
                    print(
                        cyan + "statisticsVectorRaster() : " + bold + yellow +
                        "Nom de la colonne trop long. Il sera tronque a 10 caracteres en cas d'utilisation: "
                        + endC + name_col_area)

            # Ajout des colonnes de % de répartition des éléments du raster
            if ('all' in col_to_add_list) or ('count' in col_to_add_list):
                if layer_definition.GetFieldIndex(
                        name_col
                ) == -1:  # Vérification de l'existence de la colonne name_col (retour = -1 : elle n'existe pas)
                    stat_classif_field_defn = ogr.FieldDefn(
                        name_col, ogr.OFTReal
                    )  # Création du champ (real) dans l'objet stat_classif_field_defn
                    # Définition de la largeur du champ
                    stat_classif_field_defn.SetWidth(20)
                    # Définition de la précision du champ valeur flottante
                    stat_classif_field_defn.SetPrecision(2)
                    if debug >= 3:
                        print(cyan + "statisticsVectorRaster() : " + endC +
                              "Creation de la colonne : " + str(name_col))
                    layer.CreateField(
                        stat_classif_field_defn)  # Ajout du champ

            # Ajout des colonnes de surface des éléments du raster
            if ('all_S' in col_to_add_list) or ('count_S' in col_to_add_list):
                if layer_definition.GetFieldIndex(
                        name_col_area
                ) == -1:  # Vérification de l'existence de la colonne name_col_area (retour = -1 : elle n'existe pas)
                    stat_classif_field_defn = ogr.FieldDefn(
                        name_col_area, ogr.OFTReal
                    )  # Création du nom du champ dans l'objet stat_classif_field_defn
                    # Définition de la largeur du champ
                    stat_classif_field_defn.SetWidth(20)
                    # Définition de la précision du champ valeur flottante
                    stat_classif_field_defn.SetPrecision(2)

                    if debug >= 3:
                        print(cyan + "statisticsVectorRaster() : " + endC +
                              "Creation de la colonne : " + str(name_col_area))
                    layer.CreateField(
                        stat_classif_field_defn)  # Ajout du champ

    if debug >= 2:
        print(
            cyan + "statisticsVectorRaster() : " + bold + green +
            "ETAPE 1/3 : FIN DE LA CREATION DES COLONNES DANS LE FICHIER VECTEUR %s"
            % (vector_output) + endC)

    # ETAPE 3/4 : REMPLISSAGE DES COLONNES DU VECTEUR
    if debug >= 2:
        print(cyan + "statisticsVectorRaster() : " + bold + green +
              "ETAPE 2/3 : DEBUT DU REMPLISSAGE DES COLONNES DU VECTEUR " +
              endC)

    # Calcul des statistiques col_to_add_inter02_list = [majority, minority, min, max, mean, median, sum, std, unique, range, all, count, all_S, count_S] de croisement images_raster / vecteur
    # Utilisation de la librairie rasterstat
    if debug >= 3:
        print(cyan + "statisticsVectorRaster() : " + bold + green +
              "Calcul des statistiques " + endC +
              "Stats : %s - Vecteur : %s - Raster : %s" %
              (col_to_add_inter02_list, vector_output, image_input) + endC)
    stats_info_list = raster_stats(vector_output,
                                   image_input,
                                   band_num=band_number,
                                   stats=col_to_add_inter02_list)

    # Decompte du nombre de polygones
    num_features = layer.GetFeatureCount()
    if debug >= 3:
        print(cyan + "statisticsVectorRaster() : " + bold + green +
              "Remplissage des colonnes polygone par polygone " + endC)
    if debug >= 3:
        print(cyan + "statisticsVectorRaster() : " + endC +
              "Nombre total de polygones : " + str(num_features))

    polygone_count = 0

    for polygone_stats in stats_info_list:  # Pour chaque polygone représenté dans stats_info_list - et il y a autant de polygone que dans le fichier vecteur

        # Extraction de feature
        feature = layer.GetFeature(polygone_stats['__fid__'])

        polygone_count = polygone_count + 1

        if debug >= 3 and polygone_count % 10000 == 0:
            print(cyan + "statisticsVectorRaster() : " + endC +
                  "Avancement : %s polygones traites sur %s" %
                  (polygone_count, num_features))
        if debug >= 5:
            print(
                cyan + "statisticsVectorRaster() : " + endC +
                "Traitement du polygone : ",
                stats_info_list.index(polygone_stats) + 1)

        # Remplissage de l'identifiant unique
        if ("UniqueID" in col_to_add_list) or (
                "uniqueID" in col_to_add_list) or ("ID" in col_to_add_list):
            feature.SetField('ID', int(stats_info_list.index(polygone_stats)))

        # Initialisation à 0 des colonnes contenant le % de répartition de la classe - Verifier ce qu'il se passe si le nom dépasse 10 caracteres
        if ('all' in col_to_add_list) or ('count' in col_to_add_list):
            for element in class_label_dico:
                name_col = class_label_dico[element]
                if len(name_col) > 10:
                    name_col = name_col[:10]
                feature.SetField(name_col, 0)

        # Initialisation à 0 des colonnes contenant la surface correspondant à la classe - Verifier ce qu'il se passe si le nom dépasse 10 caracteres
        if ('all_S' in col_to_add_list) or ('count_S' in col_to_add_list):
            for element in class_label_dico:
                name_col = class_label_dico[element]
                name_col_area = PREFIX_AREA_COLUMN + name_col
                if len(name_col_area) > 10:
                    name_col_area = name_col_area[:10]
                feature.SetField(name_col_area, 0)

        # Remplissage des colonnes contenant le % de répartition et la surface des classes
        if ('all' in col_to_add_list) or ('count' in col_to_add_list) or (
                'all_S' in col_to_add_list) or ('count_S' in col_to_add_list):
            # 'all' est une liste des couples : (Valeur_du_pixel_sur_le_raster, Nbr_pixel_ayant_cette_valeur) pour le polygone observe.
            # Ex : [(0,183),(803,45),(801,4)] : dans le polygone, il y a 183 pixels de valeur 0, 45 pixels de valeur 803 et 4 pixels de valeur 801
            majority_all = polygone_stats['all']

            # Deux valeurs de pixel peuvent faire référence à une même colonne. Par exemple : les pixels à 201, 202, 203 peuvent correspondre à la BD Topo
            # Regroupement des éléments de majority_all allant dans la même colonne au regard de class_label_dico
            count_for_idx_couple = 0  # Comptage du nombre de modifications (suppression de couple) de majority_all pour adapter la valeur de l'index lors de son parcours

            for idx_couple in range(
                    1, len(majority_all)
            ):  # Inutile d'appliquer le traitement au premier élément (idx_couple == 0)

                idx_couple = idx_couple - count_for_idx_couple  # Prise en compte dans le parcours de majority_all des couples supprimés
                couple = majority_all[idx_couple]  # Ex : couple = (803,45)

                if (couple is None) or (
                        couple == ""
                ):  # en cas de bug de rasterstats (erreur geometrique du polygone par exemple)
                    if debug >= 3:
                        print(
                            cyan + "statisticsVectorRaster() : " + bold + red +
                            "Probleme detecte dans la gestion du polygone %s" %
                            (polygone_count) + endC,
                            file=sys.stderr)
                    pass
                else:
                    for idx_verif in range(idx_couple):
                        # Vérification au regard des éléments présents en amont dans majority_all
                        # Cas où le nom correspondant au label a déjà été rencontré dans majority_all
                        # Vérification que les pixels de l'image sont réferncés dans le dico
                        if couple[0] in class_label_dico:

                            if class_label_dico[couple[0]] == class_label_dico[
                                    majority_all[idx_verif][0]]:
                                majority_all[idx_verif] = (
                                    majority_all[idx_verif][0],
                                    majority_all[idx_verif][1] + couple[1]
                                )  # Ajout du nombre de pixels correspondant dans le couple précédent
                                majority_all.remove(
                                    couple
                                )  # Supression du couple présentant le "doublon"
                                count_for_idx_couple = count_for_idx_couple + 1  # Mise à jour du décompte de modifications
                                break
                        else:
                            raise NameError(
                                cyan + "statisticsVectorRaster() : " + bold +
                                red +
                                "The image file (%s) contain pixel value '%d' not identified into class_label_dico"
                                % (image_input, couple[0]) + endC)

            # Intégration des valeurs de majority all dans les colonnes
            for couple_value_count in majority_all:  # Parcours de majority_all. Ex : couple_value_count = (803,45)
                if (couple_value_count is None) or (
                        couple_value_count == ""
                ):  # en cas de bug de rasterstats (erreur geometrique du polygone par exemple)
                    if debug >= 3:
                        print(
                            cyan + "statisticsVectorRaster() : " + bold + red +
                            "Probleme detecte dans la gestion du polygone %s" %
                            (polygone_count) + endC,
                            file=sys.stderr)
                    pass
                else:
                    nb_pixel_total = polygone_stats[
                        'count']  # Nbr de pixels du polygone
                    pixel_value = couple_value_count[0]  # Valeur du pixel
                    value_count = couple_value_count[
                        1]  # Nbr de pixels ayant cette valeur
                    name_col = class_label_dico[
                        pixel_value]  # Transformation de la valeur du pixel en "signification" au regard du dictionnaire. Ex : BD Topo ou 2011
                    name_col_area = PREFIX_AREA_COLUMN + name_col  # Identification du nom de la colonne en surfaces

                    if len(name_col) > 10:
                        name_col = name_col[:10]
                    if len(name_col_area) > 10:
                        name_col_area = name_col_area[:10]

                    value_area = pixel_size * value_count  # Calcul de la surface du polygone correspondant à la valeur du pixel
                    if nb_pixel_total != None and nb_pixel_total != 0:
                        percentage = (
                            float(value_count) / float(nb_pixel_total)
                        ) * 100  # Conversion de la surface en pourcentages, arondi au pourcent
                    else:
                        if debug >= 3:
                            print(
                                cyan + "statisticsVectorRaster() : " + bold +
                                red +
                                "Probleme dans l'identification du nombre de pixels du polygone %s : le pourcentage de %s est mis à 0"
                                % (polygone_count, name_col) + endC,
                                file=sys.stderr)
                        percentage = 0.0

                    if ('all' in col_to_add_list) or ('count'
                                                      in col_to_add_list):
                        feature.SetField(
                            name_col, percentage
                        )  # Injection du pourcentage dans la colonne correpondante
                    if ('all_S' in col_to_add_list) or ('count_S'
                                                        in col_to_add_list):
                        feature.SetField(
                            name_col_area, value_area
                        )  # Injection de la surface dans la colonne correpondante
        else:
            pass

        # Remplissage des colonnes statistiques demandées ( col_to_add_inter01_list = [majority/DateMaj/SrcMaj, minority, min, max, mean, median, sum, std, unique, range] )
        for stats in col_to_add_inter01_list:

            if stats == 'DateMaj' or stats == 'SrcMaj':  # Cas particulier de 'DateMaj' et 'SrcMaj' : le nom de la colonne est DateMaj ou SrcMaj, mais la statistique utilisée est identifiée par majority
                name_col = stats  # Nom de la colonne. Ex : 'DateMaj'
                value_statis = polygone_stats[
                    'majority']  # Valeur majoritaire. Ex : '203'
                if value_statis == None:
                    value_statis_class = 'nan'
                else:
                    value_statis_class = class_label_dico[
                        value_statis]  # Transformation de la valeur au regard du dictionnaire. Ex : '2011'
                feature.SetField(name_col,
                                 value_statis_class)  # Ajout dans la colonne

            elif (stats is None) or (stats == "") or (
                    polygone_stats[stats] is
                    None) or (polygone_stats[stats]) == "" or (
                        polygone_stats[stats]) == 'nan':
                # En cas de bug de rasterstats (erreur geometrique du polygone par exemple)
                pass

            else:
                name_col = stats  # Nom de la colonne. Ex : 'majority', 'max'
                value_statis = polygone_stats[
                    stats]  # Valeur à associer à la colonne, par exemple '2011'

                if (
                        name_col == 'majority' or name_col == 'minority'
                ) and class_label_dico != []:  # Cas où la colonne fait référence à une valeur du dictionnaire
                    value_statis_class = class_label_dico[value_statis]
                else:
                    value_statis_class = value_statis

                feature.SetField(name_col, value_statis_class)

        layer.SetFeature(feature)
        feature.Destroy()

    if debug >= 2:
        print(cyan + "statisticsVectorRaster() : " + bold + green +
              "ETAPE 2/3 : FIN DU REMPLISSAGE DES COLONNES DU VECTEUR %s" %
              (vector_output) + endC)

    # ETAPE 4/4 : SUPRESSION DES COLONNES NON SOUHAITEES
    if col_to_delete_list != []:

        if debug >= 2:
            print(cyan + "statisticsVectorRaster() : " + bold + green +
                  "ETAPE 3/3 : DEBUT DES SUPPRESSIONS DES COLONNES %s" %
                  (col_to_delete_list) + endC)

        for col_to_delete in col_to_delete_list:

            if layer_definition.GetFieldIndex(
                    col_to_delete
            ) != -1:  # Vérification de l'existence de la colonne col (retour = -1 : elle n'existe pas)

                layer.DeleteField(layer_definition.GetFieldIndex(
                    col_to_delete))  # Suppression de la colonne

                if debug >= 3:
                    print(cyan + "statisticsVectorRaster() : " + endC +
                          "Suppression de %s" % (col_to_delete) + endC)

        if debug >= 2:
            print(cyan + "statisticsVectorRaster() : " + bold + green +
                  "ETAPE 3/3 : FIN DE LA SUPPRESSION DES COLONNES" + endC)

    else:
        print(cyan + "statisticsVectorRaster() : " + bold + yellow +
              "ETAPE 3/3 : AUCUNE SUPPRESSION DE COLONNE DEMANDEE" + endC)

    # Fermeture du fichier shape
    layer.SyncToDisk()
    layer = None
    data_source.Destroy()

    # Mise à jour du Log
    ending_event = "statisticsVectorRaster() : Compute statistic crossing ending : "
    timeLine(path_time_log, ending_event)

    return
Ejemplo n.º 5
0
def comparareClassificationToReferenceGrid(image_input,
                                           vector_cut_input,
                                           vector_sample_input,
                                           vector_grid_input,
                                           vector_grid_output,
                                           size_grid,
                                           field_value_verif,
                                           no_data_value,
                                           path_time_log,
                                           epsg=2154,
                                           format_raster='GTiff',
                                           format_vector="ESRI Shapefile",
                                           extension_raster=".tif",
                                           extension_vector=".shp",
                                           save_results_intermediate=False,
                                           overwrite=True):

    # Mise à jour du Log
    starting_event = "comparareClassificationToReferenceGrid() : starting : "
    timeLine(path_time_log, starting_event)

    print(endC)
    print(bold + green +
          "## START : COMPARE QUALITY FROM CLASSIF IMAGE BY GRID" + endC)
    print(endC)

    if debug >= 2:
        print(
            bold + green +
            "comparareClassificationToReferenceGrid() : Variables dans la fonction"
            + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "image_input : " + str(image_input) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "vector_cut_input : " + str(vector_cut_input) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "vector_sample_input : " + str(vector_sample_input) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "vector_grid_input : " + str(vector_grid_input) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "vector_grid_output : " + str(vector_grid_output) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "size_grid : " + str(size_grid) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "field_value_verif : " + str(field_value_verif))
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "no_data_value : " + str(no_data_value))
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "epsg  : " + str(epsg) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "format_raster : " + str(format_raster) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "format_vector : " + str(format_vector) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "comparareClassificationToReferenceGrid() : " + endC +
              "overwrite : " + str(overwrite) + endC)

    # ETAPE 0 : PREPARATION DES FICHIERS INTERMEDIAIRES'

    CODAGE = "uint16"
    SUFFIX_STUDY = '_study'
    SUFFIX_TEMP = '_temp'
    SUFFIX_FUSION = '_other_fusion'

    NONE_VALUE_QUANTITY = -1.0
    FIELD_VALUE_OTHER = 65535

    FIELD_NAME_ID = "id"
    FIELD_NAME_RATE_BUILD = "rate_build"
    FIELD_NAME_RATE_OTHER = "rate_other"
    FIELD_NAME_SREF_BUILD = "sref_build"
    FIELD_NAME_SCLA_BUILD = "scla_build"
    FIELD_NAME_SREF_OTHER = "sref_other"
    FIELD_NAME_SCLA_OTHER = "scla_other"
    FIELD_NAME_KAPPA = "kappa"
    FIELD_NAME_ACCURACY = "accuracy"

    pixel_size_x, pixel_size_y = getPixelWidthXYImage(image_input)

    repertory_output = os.path.dirname(vector_grid_output)
    base_name = os.path.splitext(os.path.basename(vector_grid_output))[0]

    vector_study = repertory_output + os.sep + base_name + SUFFIX_STUDY + extension_vector
    vector_grid_temp = repertory_output + os.sep + base_name + SUFFIX_TEMP + extension_vector
    image_raster_other_fusion = repertory_output + os.sep + base_name + SUFFIX_FUSION + extension_raster

    # ETAPE 0 : VERIFICATION

    # Verification de la valeur de la nomemclature à verifier
    if field_value_verif >= FIELD_VALUE_OTHER:
        print(
            cyan + "comparareClassificationToReferenceGrid() : " + bold + red +
            "Attention de valeur de nomenclature à vérifier  : " +
            str(field_value_verif) +
            " doit être inferieur à la valeur de fusion des valeur autre arbitraire de : "
            + str(FIELD_VALUE_OTHER) + endC,
            file=sys.stderr)
        sys.exit(1)  #exit with an error code

    # ETAPE 1 : DEFINIR UN SHAPE ZONE D'ETUDE

    if (not vector_cut_input is None) and (vector_cut_input != "") and (
            os.path.isfile(vector_cut_input)):
        cutting_action = True
        vector_study = vector_cut_input
    else:
        cutting_action = False
        createVectorMask(image_input, vector_study)

    # ETAPE 2 : UNIFORMISATION DE LA ZONE OTHER

    # Réalocation des valeurs de classification pour les valeurs autre que le bati
    change_reaff_value_list = []
    reaff_value_list = identifyPixelValues(image_input)
    if field_value_verif in reaff_value_list:
        reaff_value_list.remove(field_value_verif)
    if no_data_value in reaff_value_list:
        reaff_value_list.remove(no_data_value)
    for elem in reaff_value_list:
        change_reaff_value_list.append(FIELD_VALUE_OTHER)
    reallocateClassRaster(image_input, image_raster_other_fusion,
                          reaff_value_list, change_reaff_value_list)

    # ETAPE 3 : CREATION DE LA GRILLE SUR LA ZONE D'ETUDE

    # Définir les attibuts du fichier
    attribute_dico = {
        FIELD_NAME_ID: ogr.OFTInteger,
        FIELD_NAME_RATE_BUILD: ogr.OFTReal,
        FIELD_NAME_RATE_OTHER: ogr.OFTReal,
        FIELD_NAME_SREF_BUILD: ogr.OFTReal,
        FIELD_NAME_SCLA_BUILD: ogr.OFTReal,
        FIELD_NAME_SREF_OTHER: ogr.OFTReal,
        FIELD_NAME_SCLA_OTHER: ogr.OFTReal,
        FIELD_NAME_KAPPA: ogr.OFTReal,
        FIELD_NAME_ACCURACY: ogr.OFTReal
    }
    nb_polygon = 0

    if (not vector_grid_input is None) and (vector_grid_input != "") and (
            os.path.isfile(vector_grid_input)):
        # Utilisation du fichier grille d'entrée

        # Recopie du fichier grille d'entrée vers le fichier grille de sortie
        copyVectorFile(vector_grid_input, vector_grid_output)

        # Ajout des champs au fichier grille de sortie
        for field_name in attribute_dico:
            addNewFieldVector(vector_grid_output, field_name,
                              attribute_dico[field_name], None, None, None,
                              format_vector)

        # Mettre le champs "id" identifiant du carré de l'élément de la grille
        nb_polygon = updateIndexVector(vector_grid_output, FIELD_NAME_ID,
                                       format_vector)

    else:
        # Si il n'existe pas de fichier grille on en créer un avec la valeur de size_grid

        # Creer le fichier grille
        nb_polygon = createGridVector(vector_study, vector_grid_temp,
                                      size_grid, size_grid, attribute_dico,
                                      overwrite, epsg, format_vector)

        # Découper la grille avec le shape zone d'étude
        cutVectorAll(vector_study, vector_grid_temp, vector_grid_output,
                     format_vector)

    # ETAPE 4 : CALCUL DE L'INDICATEUR DE QUALITE POUR CHAQUE CASE DE LA GRILLE

    if debug >= 2:
        print(bold + "nb_polygon = " + endC + str(nb_polygon) + "\n")

    # Pour chaque polygone existant
    sum_rate_quantity_build = 0
    nb_rate_sum = 0
    size_area_pixel = abs(pixel_size_x * pixel_size_y)

    for id_polygon in range(nb_polygon):
        geom_list = getGeomPolygons(vector_grid_output, FIELD_NAME_ID,
                                    id_polygon, format_vector)
        if geom_list is not None and geom_list != []:  # and (id_polygon == 24 or id_polygon == 30):

            if debug >= 1:
                print(cyan + "comparareClassificationToReferenceGrid() : " +
                      bold + green +
                      "Calcul de la matrice pour le polygon n°: " +
                      str(id_polygon) + endC)

            geom = geom_list[0]
            class_ref_list, class_pro_list, rate_quantity_list, kappa, accuracy, matrix = computeQualityIndiceRateQuantity(
                image_raster_other_fusion, vector_sample_input,
                repertory_output, base_name + str(id_polygon), geom, size_grid,
                pixel_size_x, pixel_size_y, field_value_verif,
                FIELD_VALUE_OTHER, no_data_value, epsg, format_raster,
                format_vector, extension_raster, extension_vector, overwrite,
                save_results_intermediate)

            # Si les calculs indicateurs de qualité sont ok
            if debug >= 2:
                print(matrix)
            if matrix != None and matrix != [] and matrix[0] != []:

                # Récuperer la quantité de bati et calcul de la surface de référence et de la surface de classification (carreau entier ou pas!)
                if len(class_ref_list) == 2 and len(
                        class_pro_list
                ) == 2:  # Cas ou l'on a des pixels de build et other (en ref et en prod)
                    rate_quantity_build = rate_quantity_list[0]
                    rate_quantity_other = rate_quantity_list[1]
                    size_area_ref_build = (matrix[0][0] +
                                           matrix[0][1]) * size_area_pixel
                    size_area_classif_build = (matrix[0][0] +
                                               matrix[1][0]) * size_area_pixel
                    size_area_ref_other = (matrix[1][0] +
                                           matrix[1][1]) * size_area_pixel
                    size_area_classif_other = (matrix[0][1] +
                                               matrix[1][1]) * size_area_pixel
                    sum_rate_quantity_build += rate_quantity_build
                    nb_rate_sum += 1

                else:  # Cas ou l'on a uniquement des pixels de build OU uniquement des pixels de other

                    if class_ref_list[
                            0] == field_value_verif:  # Cas ou l'on a uniquement des pixels references build
                        rate_quantity_build = rate_quantity_list[0]
                        rate_quantity_other = NONE_VALUE_QUANTITY
                        size_area_ref_other = 0

                        if len(
                                class_pro_list
                        ) == 2:  # Cas ou l'on a des pixels de prod build et other
                            size_area_ref_build = (
                                matrix[0][0] + matrix[0][1]) * size_area_pixel
                            size_area_classif_build = matrix[0][
                                0] * size_area_pixel
                            size_area_classif_other = matrix[0][
                                1] * size_area_pixel

                        else:
                            size_area_ref_build = matrix[0][0] * size_area_pixel
                            if class_pro_list[
                                    0] == field_value_verif:  # Cas ou l'on a uniquement des pixels prod build
                                size_area_classif_build = matrix[0][
                                    0] * size_area_pixel
                                size_area_classif_other = 0

                            else:  # Cas ou l'on a uniquement des pixels prod other
                                size_area_classif_build = 0
                                size_area_classif_other = matrix[0][
                                    0] * size_area_pixel

                    else:  # Cas ou l'on a uniquement des pixels references other
                        rate_quantity_build = NONE_VALUE_QUANTITY
                        rate_quantity_other = rate_quantity_list[0]
                        size_area_ref_build = 0

                        if len(
                                class_pro_list
                        ) == 2:  # Cas ou l'on a des pixels de prod build et other
                            size_area_ref_other = (
                                matrix[0][0] + matrix[0][1]) * size_area_pixel
                            size_area_classif_build = matrix[0][
                                0] * size_area_pixel
                            size_area_classif_other = matrix[0][
                                1] * size_area_pixel

                        else:
                            size_area_ref_other = matrix[0][0] * size_area_pixel
                            if class_pro_list[
                                    0] == field_value_verif:  # Cas ou l'on a uniquement des pixels prod build
                                size_area_classif_build = matrix[0][
                                    0] * size_area_pixel
                                size_area_classif_other = 0

                            else:  # Cas ou l'on a uniquement des pixels prod other
                                size_area_classif_build = 0
                                size_area_classif_other = matrix[0][
                                    0] * size_area_pixel

                # Mettre à jour ses éléments du carré de la grille
                setAttributeValues(
                    vector_grid_output, FIELD_NAME_ID, id_polygon, {
                        FIELD_NAME_RATE_BUILD: rate_quantity_build,
                        FIELD_NAME_RATE_OTHER: rate_quantity_other,
                        FIELD_NAME_SREF_BUILD: size_area_ref_build,
                        FIELD_NAME_SCLA_BUILD: size_area_classif_build,
                        FIELD_NAME_SREF_OTHER: size_area_ref_other,
                        FIELD_NAME_SCLA_OTHER: size_area_classif_other,
                        FIELD_NAME_KAPPA: kappa,
                        FIELD_NAME_ACCURACY: accuracy
                    }, format_vector)

    # Calcul de la moyenne
    if nb_rate_sum != 0:
        average_quantity_build = sum_rate_quantity_build / nb_rate_sum
    else:
        average_quantity_build = 0
    if debug >= 2:
        print(bold + "nb_polygon_used = " + endC + str(nb_rate_sum))
        print(bold + "average_quantity_build = " + endC +
              str(average_quantity_build) + "\n")

    # ETAPE 5 : SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES

    # Suppression des données intermédiairess
    if not save_results_intermediate:

        if not cutting_action:
            if os.path.isfile(vector_study):
                removeVectorFile(vector_study)

        if os.path.isfile(image_raster_other_fusion):
            removeFile(image_raster_other_fusion)

        if os.path.isfile(vector_grid_temp):
            removeVectorFile(vector_grid_temp)

    print(endC)
    print(bold + green +
          "## END : COMPARE QUALITY FROM CLASSIF IMAGE BY GRID" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "comparareClassificationToReferenceGrid() :  ending : "
    timeLine(path_time_log, ending_event)

    return average_quantity_build
Ejemplo n.º 6
0
def selectSamples(image_input_list, sample_image_input, vector_output, table_statistics_output, sampler_strategy, select_ratio_floor, ratio_per_class_dico, name_column, no_data_value, path_time_log, rand_seed=0, ram_otb=0, epsg=2154, format_vector='ESRI Shapefile', extension_vector=".shp", save_results_intermediate=False, overwrite=True) :

    # Mise à jour du Log
    starting_event = "selectSamples() : Select points in raster mask macro input starting : "
    timeLine(path_time_log, starting_event)

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "image_input_list : " + str(image_input_list) + endC)
        print(cyan + "selectSamples() : " + endC + "sample_image_input : " + str(sample_image_input) + endC)
        print(cyan + "selectSamples() : " + endC + "vector_output : " + str(vector_output) + endC)
        print(cyan + "selectSamples() : " + endC + "table_statistics_output : " + str(table_statistics_output) + endC)
        print(cyan + "selectSamples() : " + endC + "sampler_strategy : " + str(sampler_strategy) + endC)
        print(cyan + "selectSamples() : " + endC + "select_ratio_floor : " + str(select_ratio_floor) + endC)
        print(cyan + "selectSamples() : " + endC + "ratio_per_class_dico : " + str(ratio_per_class_dico) + endC)
        print(cyan + "selectSamples() : " + endC + "name_column : " + str(name_column) + endC)
        print(cyan + "selectSamples() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "selectSamples() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "selectSamples() : " + endC + "rand_seed : " + str(rand_seed) + endC)
        print(cyan + "selectSamples() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "selectSamples() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "selectSamples() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "selectSamples() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Constantes
    EXT_XML = ".xml"

    SUFFIX_SAMPLE = "_sample"
    SUFFIX_STATISTICS = "_statistics"
    SUFFIX_POINTS = "_points"
    SUFFIX_VALUE = "_value"

    BAND_NAME = "band_"
    COLUMN_CLASS = "class"
    COLUMN_ORIGINFID = "originfid"

    NB_POINTS = "nb_points"
    AVERAGE = "average"
    STANDARD_DEVIATION = "st_dev"

    print(cyan + "selectSamples() : " + bold + green + "DEBUT DE LA SELECTION DE POINTS" + endC)

    # Definition variables et chemins
    repertory_output = os.path.dirname(vector_output)
    filename = os.path.splitext(os.path.basename(vector_output))[0]
    sample_points_output = repertory_output + os.sep + filename +  SUFFIX_SAMPLE + extension_vector
    file_statistic_points = repertory_output + os.sep + filename + SUFFIX_STATISTICS + SUFFIX_POINTS + EXT_XML

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "file_statistic_points : " + str(file_statistic_points) + endC)

    # 0. EXISTENCE DU FICHIER DE SORTIE
    #----------------------------------

    # Si le fichier vecteur points de sortie existe deja et que overwrite n'est pas activé
    check = os.path.isfile(vector_output)
    if check and not overwrite:
        print(bold + yellow + "Samples points already done for file %s and will not be calculated again." %(vector_output) + endC)
    else:   # Si non ou si la vérification est désactivée : creation du fichier d'échantillons points

        # Suppression de l'éventuel fichier existant
        if check:
            try:
                removeVectorFile(vector_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite
        if os.path.isfile(table_statistics_output) :
            try:
                removeFile(table_statistics_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite


        # 1. STATISTIQUE SUR L'IMAGE DES ECHANTILLONS RASTEUR
        #----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start statistique sur l'image des echantillons rasteur..." + endC)

        id_micro_list = identifyPixelValues(sample_image_input)

        if 0 in id_micro_list :
            id_micro_list.remove(0)

        min_micro_class_nb_points = -1
        min_micro_class_label = 0
        infoStructPointSource_dico = {}

        writeTextFile(file_statistic_points, '<?xml version="1.0" ?>\n')
        appendTextFileCR(file_statistic_points, '<GeneralStatistics>')
        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassRaw">')

        if debug >= 2:
            print("Nombre de points par micro classe :" + endC)

        for id_micro in id_micro_list :
            nb_pixels = countPixelsOfValue(sample_image_input, id_micro)

            if debug >= 2:
                print("MicroClass : " + str(id_micro) + ", nb_points = " + str(nb_pixels))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_pixels))

            if min_micro_class_nb_points == -1 or min_micro_class_nb_points > nb_pixels :
                min_micro_class_nb_points = nb_pixels
                min_micro_class_label = id_micro

            infoStructPointSource_dico[id_micro] = StructInfoMicoClass()
            infoStructPointSource_dico[id_micro].label_class = id_micro
            infoStructPointSource_dico[id_micro].nb_points = nb_pixels
            infoStructPointSource_dico[id_micro].info_points_list = []
            del nb_pixels

        if debug >= 2:
            print("MicroClass min points find : " + str(min_micro_class_label) + ", nb_points = " + str(min_micro_class_nb_points))

        appendTextFileCR(file_statistic_points, '    </Statistic>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End statistique sur l'image des echantillons rasteur. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 2. CHARGEMENT DE L'IMAGE DES ECHANTILLONS
        #------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start chargement de l'image des echantillons..." + endC)

        # Information image
        cols, rows, bands = getGeometryImage(sample_image_input)
        xmin, xmax, ymin, ymax = getEmpriseImage(sample_image_input)
        pixel_width, pixel_height = getPixelWidthXYImage(sample_image_input)
        projection_input = getProjectionImage(sample_image_input)
        if projection_input == None or projection_input == 0 :
            projection_input = epsg
        else :
            projection_input = int(projection_input)

        pixel_width = abs(pixel_width)
        pixel_height = abs(pixel_height)

        # Lecture des données
        raw_data = getRawDataImage(sample_image_input)

        if debug >= 3:
            print("projection = " + str(projection_input))
            print("cols = " + str(cols))
            print("rows = " + str(rows))

        # Creation d'une structure dico contenent tous les points différents de zéro
        progress = 0
        pass_prog = False
        for y_row in range(rows) :
            for x_col in range(cols) :
                value_class = raw_data[y_row][x_col]
                if value_class != 0 :
                    infoStructPointSource_dico[value_class].info_points_list.append(x_col + (y_row * cols))

            # Barre de progression
            if debug >= 4:
                if  ((float(y_row) / rows) * 100.0 > progress) and not pass_prog :
                    progress += 1
                    pass_prog = True
                    print("Progression => " + str(progress) + "%")
                if ((float(y_row) / rows) * 100.0  > progress + 1) :
                    pass_prog = False

        del raw_data

        pending_event = cyan + "selectSamples() : " + bold + green + "End chargement de l'image des echantillons. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 3. SELECTION DES POINTS D'ECHANTILLON
        #--------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start selection des points d'echantillon..." + endC)

        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassSelect">')

        # Rendre deterministe la fonction aléatoire de random.sample
        if rand_seed > 0:
            random.seed( rand_seed )

        # Pour toute les micro classes
        for id_micro in id_micro_list :

            # Selon la stategie de selection
            nb_points_ratio = 0
            while switch(sampler_strategy.lower()):
                if case('all'):
                    # Le mode de selection 'all' est choisi
                    nb_points_ratio = infoStructPointSource_dico[id_micro].nb_points
                    infoStructPointSource_dico[id_micro].sample_points_list = range(nb_points_ratio)

                    break
                if case('percent'):
                    # Le mode de selection 'percent' est choisi
                    id_macro_class = int(math.floor(id_micro / 100) * 100)
                    select_ratio_class = ratio_per_class_dico[id_macro_class]
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_class / 100)
                    infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)
                    break
                if case('mixte'):
                    # Le mode de selection 'mixte' est choisi
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_floor / 100)
                    if id_micro == min_micro_class_label :
                        # La plus petite micro classe est concervée intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = range(infoStructPointSource_dico[id_micro].nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    elif nb_points_ratio <= min_micro_class_nb_points :
                        # Les micro classes dont le ratio de selection est inferieur au nombre de points de la plus petite classe sont égement conservées intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), min_micro_class_nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    else :
                        # Pour toutes les autres micro classes tirage aleatoire d'un nombre de points correspondant au ratio
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)

                    break
                break


            if debug >= 2:
                print("MicroClass = " + str(id_micro) + ", nb_points_ratio " + str(nb_points_ratio))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_points_ratio))

        appendTextFileCR(file_statistic_points, '    </Statistic>')
        appendTextFileCR(file_statistic_points, '</GeneralStatistics>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End selection des points d'echantillon. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 4. PREPARATION DES POINTS D'ECHANTILLON
        #----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start preparation des points d'echantillon..." + endC)

        # Création du dico de points
        points_random_value_dico = {}
        index_dico_point = 0
        for micro_class in infoStructPointSource_dico :
            micro_class_struct = infoStructPointSource_dico[micro_class]
            label_class = micro_class_struct.label_class
            point_attr_dico = {name_column:int(label_class), COLUMN_CLASS:int(label_class), COLUMN_ORIGINFID:0}

            for id_point in micro_class_struct.sample_points_list:

                # Recuperer les valeurs des coordonnees des points
                coor_x = float(xmin + (int(micro_class_struct.info_points_list[id_point] % cols) * pixel_width)) + (pixel_width / 2.0)
                coor_y = float(ymax - (int(micro_class_struct.info_points_list[id_point] / cols) * pixel_height)) - (pixel_height / 2.0)
                points_random_value_dico[index_dico_point] = [[coor_x, coor_y], point_attr_dico]
                del coor_x
                del coor_y
                index_dico_point += 1
            del point_attr_dico
        del infoStructPointSource_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End preparation des points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 5. CREATION DU FICHIER SHAPE DE POINTS D'ECHANTILLON
        #-----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start creation du fichier shape de points d'echantillon..." + endC)

        # Définir les attibuts du fichier résultat
        attribute_dico = {name_column:ogr.OFTInteger, COLUMN_CLASS:ogr.OFTInteger, COLUMN_ORIGINFID:ogr.OFTInteger}

        # Creation du fichier shape
        createPointsFromCoordList(attribute_dico, points_random_value_dico, sample_points_output, projection_input, format_vector)
        del attribute_dico
        del points_random_value_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End creation du fichier shape de points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 6.  EXTRACTION DES POINTS D'ECHANTILLONS
        #-----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start extraction des points d'echantillon dans l'image..." + endC)

        # Cas ou l'on a une seule image
        if len(image_input_list) == 1:
            # Extract sample
            image_input = image_input_list[0]
            command = "otbcli_SampleExtraction -in %s -vec %s -outfield prefix -outfield.prefix.name %s -out %s -field %s" %(image_input, sample_points_output, BAND_NAME, vector_output, name_column)
            if ram_otb > 0:
                command += " -ram %d" %(ram_otb)
            if debug >= 3:
                print(command)
            exitCode = os.system(command)
            if exitCode != 0:
                raise NameError(cyan + "selectSamples() : " + bold + red + "An error occured during otbcli_SampleExtraction command. See error message above." + endC)

        # Cas de plusieurs imagettes
        else :

            # Le repertoire de sortie
            repertory_output = os.path.dirname(vector_output)
            # Initialisation de la liste pour le multi-threading et la liste de l'ensemble des echantions locaux
            thread_list = []
            vector_local_output_list = []

            # Obtenir l'emprise des images d'entrées pour redecouper le vecteur d'echantillon d'apprentissage pour chaque image
            for image_input in image_input_list :
                # Definition des fichiers sur emprise local
                file_name = os.path.splitext(os.path.basename(image_input))[0]
                emprise_local_sample = repertory_output + os.sep + file_name + SUFFIX_SAMPLE + extension_vector
                vector_sample_local_output = repertory_output + os.sep + file_name + SUFFIX_VALUE + extension_vector
                vector_local_output_list.append(vector_sample_local_output)

                # Gestion sans thread...
                #SampleLocalExtraction(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate)

                # Gestion du multi threading
                thread = threading.Thread(target=SampleLocalExtraction, args=(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate))
                thread.start()
                thread_list.append(thread)

            # Extraction des echantions points des images
            try:
                for thread in thread_list:
                    thread.join()
            except:
                print(cyan + "selectSamples() : " + bold + red + "Erreur lors de l'éextaction des valeurs d'echantion : impossible de demarrer le thread" + endC, file=sys.stderr)

            # Fusion des multi vecteurs de points contenant les valeurs des bandes de l'image
            fusionVectors(vector_local_output_list, vector_output, format_vector)

            # Clean des vecteurs point sample local file
            for vector_sample_local_output in vector_local_output_list :
                removeVectorFile(vector_sample_local_output)

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "End extraction des points d'echantillon dans l'image." + endC)

        # 7. CALCUL DES STATISTIQUES SUR LES VALEURS DES POINTS D'ECHANTILLONS SELECTIONNEES
        #-----------------------------------------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start calcul des statistiques sur les valeurs des points d'echantillons selectionnees..." + endC)

        # Si le calcul des statistiques est demandé presence du fichier stat
        if table_statistics_output != "":

            # On récupère la liste de données
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part1... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            attribute_name_dico = {}
            name_field_value_list = []
            names_attribut_list = getAttributeNameList(vector_output, format_vector)
            if debug >=4:
                print("names_attribut_list = " + str(names_attribut_list))

            attribute_name_dico[name_column] = ogr.OFTInteger
            for name_attribut in names_attribut_list :
                if BAND_NAME in name_attribut :
                    attribute_name_dico[name_attribut] = ogr.OFTReal
                    name_field_value_list.append(name_attribut)

            name_field_value_list.sort()

            res_values_dico = getAttributeValues(vector_output, None, None, attribute_name_dico, format_vector)
            del attribute_name_dico

            # Trie des données par identifiant micro classes
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part2... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            data_value_by_micro_class_dico = {}
            stat_by_micro_class_dico = {}

            # Initilisation du dico complexe
            for id_micro in id_micro_list :
                data_value_by_micro_class_dico[id_micro] = {}
                stat_by_micro_class_dico[id_micro] = {}
                for name_field_value in res_values_dico :
                    if name_field_value != name_column :
                        data_value_by_micro_class_dico[id_micro][name_field_value] = []
                        stat_by_micro_class_dico[id_micro][name_field_value] = {}
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0.0
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0.0

            # Trie des valeurs
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part3... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for index in range(len(res_values_dico[name_column])) :
                id_micro = res_values_dico[name_column][index]
                for name_field_value in name_field_value_list :
                    data_value_by_micro_class_dico[id_micro][name_field_value].append(res_values_dico[name_field_value][index])
            del res_values_dico

            # Calcul des statistiques
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part4... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = average(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = standardDeviation(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = len(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = 0

            del data_value_by_micro_class_dico

            # Creation du fichier statistique .csv
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part5... " + endC
            if debug >= 4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            text_csv = " Micro classes ; Champs couche image ; Nombre de points  ; Moyenne ; Ecart type \n"
            writeTextFile(table_statistics_output, text_csv)
            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    # Ecriture du fichier
                    text_csv = " %d " %(id_micro)
                    text_csv += " ; %s" %(name_field_value)
                    text_csv += " ; %d" %(stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION])
                    appendTextFileCR(table_statistics_output, text_csv)
            del name_field_value_list

        else :
            if debug >=3:
                print(cyan + "selectSamples() : " + bold + green + "Pas de calcul des statistiques sur les valeurs des points demander!!!." + endC)

        del id_micro_list

        pending_event = cyan + "selectSamples() : " + bold + green + "End calcul des statistiques sur les valeurs des points d'echantillons selectionnees. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)


    # 8. SUPRESSION DES FICHIERS INTERMEDIAIRES
    #------------------------------------------

    if not save_results_intermediate:

        if os.path.isfile(sample_points_output) :
            removeVectorFile(sample_points_output)

    print(cyan + "selectSamples() : " + bold + green + "FIN DE LA SELECTION DE POINTS" + endC)

    # Mise à jour du Log
    ending_event = "selectSamples() : Select points in raster mask macro input ending : "
    timeLine(path_time_log,ending_event)

    return