Ejemplo n.º 1
0
def processingMacroSample(sample_raster_file_input, sample_raster_file_output, file_mask_input, threshold_min, threshold_max, filter_size_zone_0, filter_size_zone_1, mask_operator, repertory_temp, codage, path_time_log) :

    # Mise à jour du Log
    starting_event = "processingMacroSample() : processing macro sample starting : "
    timeLine(path_time_log,starting_event)

    if debug >= 3:
        print(" ")
        print(bold + green + "processingMacroSample() : Variables dans la fonction" + endC)
        print(cyan + "processingMacroSample() : " + endC + "sample_raster_file_input : " + str(sample_raster_file_input) + endC)
        print(cyan + "processingMacroSample() : " + endC + "sample_raster_file_output : " + str(sample_raster_file_output) + endC)
        print(cyan + "processingMacroSample() : " + endC + "file_mask_input : " + str(file_mask_input) + endC)
        print(cyan + "processingMacroSample() : " + endC + "threshold_min : " + str(threshold_min) + endC)
        print(cyan + "processingMacroSample() : " + endC + "threshold_max : " + str(threshold_max) + endC)
        print(cyan + "processingMacroSample() : " + endC + "filter_size_zone_0 : " + str(filter_size_zone_0) + endC)
        print(cyan + "processingMacroSample() : " + endC + "filter_size_zone_1 : " + str(filter_size_zone_1) + endC)
        print(cyan + "processingMacroSample() : " + endC + "mask_operator : " + str(mask_operator) + endC)
        print(cyan + "processingMacroSample() : " + endC + "repertory_temp : " + str(repertory_temp) + endC)
        print(cyan + "processingMacroSample() : " + endC + "codage : " + str(codage) + endC)
        print(cyan + "processingMacroSample() : " + endC + "path_time_log : " + str(path_time_log) + endC)

    print(cyan + "processingMacroSample() : " + bold + green + "Traitement en cours..." + endC)

    # Traitement préparation
    file_mask_output_temp = repertory_temp + os.sep + os.path.splitext(os.path.basename(file_mask_input))[0] + "_mask_tmp" + os.path.splitext(file_mask_input)[1]
    if os.path.isfile(file_mask_output_temp) :
        removeFile(file_mask_output_temp)
    file_mask_filtered_output_temp = repertory_temp + os.sep + os.path.splitext(os.path.basename(file_mask_input))[0] + "_mask_filtered_tmp" + os.path.splitext(file_mask_input)[1]
    if os.path.isfile(file_mask_filtered_output_temp) :
        removeFile(file_mask_filtered_output_temp)

    # Creation masque binaire
    createBinaryMaskThreshold(file_mask_input, file_mask_output_temp, threshold_min, threshold_max)

    # Filtrage binaire
    if filter_size_zone_0 != 0 or filter_size_zone_1 != 0 :
        filterBinaryRaster(file_mask_output_temp, file_mask_filtered_output_temp, filter_size_zone_0, filter_size_zone_1)
    else :
        file_mask_filtered_output_temp = file_mask_output_temp

    # Masquage des zones non retenues
    if mask_operator.lower() == "and" :
        applyMaskAnd(sample_raster_file_input, file_mask_filtered_output_temp, sample_raster_file_output, codage)
    elif mask_operator.lower() == "or" :
        applyMaskOr(sample_raster_file_input, file_mask_filtered_output_temp, sample_raster_file_output, codage)
    else :
        raise NameError (cyan + "processingMacroSample() : " + bold + red  + "Mask operator unknown : " + str(mask_operator) + endC)

    print(cyan + "processingMacroSample() : " + bold + green + "Fin du traitement" + endC)

    # Mise à jour du Log
    ending_event = "processingMacroSample() : processing macro sample ending : "
    timeLine(path_time_log,ending_event)

    return
Ejemplo n.º 2
0
def computeAreaMicro(repertory_output, table_input_name, data_base_name, micro):
    sql_temporary_file = repertory_output + os.sep + "spatialiteTemp.txt"

    requete = sqlSurfaceMicro(table_input_name, "ID", micro, data_base_name)
    exitCode = os.system("%s > %s"%(requete,sql_temporary_file))
    if exitCode != 0:
        raise NameError(cyan + "computeAreaMicro() : " + bold + red + "An error occured during fileCreation command. See error message above." + endC)
    # Calculer la surface de la microclasse
    area_micro = float(readTextFileBySeparator(sql_temporary_file, " ")[0][0])
    removeFile(sql_temporary_file)

    return area_micro
Ejemplo n.º 3
0
def createMask(image_input, vector_samples_input, image_masked, path_time_log, save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "createMask() : Masks creation starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(bold + green + "## START : MASQUES CREATION" + endC)
    print(endC)

    CODAGE = "uint8"

    if debug >= 2:
        print(bold + green + "createMask() : Variables dans la fonction" + endC)
        print(cyan + "createMask() : " + endC + "image_input : " + str(image_input) + endC)
        print(cyan + "createMask() : " + endC + "vector_samples_input : " + str(vector_samples_input) + endC)
        print(cyan + "createMask() : " + endC + "image_masked : " + str(image_masked) + endC)
        print(cyan + "createMask() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "createMask() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "createMask() : " + endC + "overwrite : " + str(overwrite) + endC)

    # RASTERIZATION DES VECTEURS D'APPRENTISSAGE

    # VERIFICATION SI LE MASQUE DE SORTIE EXISTE DEJA
    # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors passe au masque suivant
    check = os.path.isfile(image_masked)
    if check and not overwrite:
        print(bold + yellow +  "createMask() : " + endC + "Computing mask from %s with %s already done : no actualisation" % (image_input, vector_samples_input) + endC)
    # Si non, ou si la fonction ecrasement est désative, alors on le calcule
    else:
        if check:
            try: # Suppression de l'éventuel fichier existant
                removeFile(image_masked)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

        # EXTRACTION DU MASQUE
        print(bold + green +  "createMask() : " + endC + "Computing mask from %s with %s " %(image_input, vector_samples_input) + endC)

        rasterizeBinaryVector(vector_samples_input, image_input, image_masked, 1, CODAGE)

        print(bold + green +  "createMask() : " + endC + "Computing mask from %s with %s completed" %(image_input, vector_samples_input) + endC)

    print(endC)
    print(bold + green + "## END : MASQUES CREATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "createMask() : Masks creation ending : "
    timeLine(path_time_log,ending_event)

    return
Ejemplo n.º 4
0
def getFtp(ftp, path_ftp, local_path, all_path, file_error):

    EXT_LIST = ['.tif', '.tiff', '.ecw', '.jp2', '.asc']

    ftp.cwd(path_ftp)
    data_list = []
    ftp.retrlines("LIST", data_list.append)

    for data in data_list:

        data_tmp = data.split(' ')
        filename = data_tmp[len(data_tmp) - 1]
        if data[0] == 'd':
            print(cyan + "getFtp() : " + green + "Get directory : " +
                  filename + endC)
            getFtp(ftp, filename, local_path + os.sep + filename,
                   all_path + os.sep + filename, file_error)
            ftp.cwd("..")
        else:
            print(cyan + "getFtp() : " + green + "Download file : " +
                  filename + endC)
            try:
                local_filename = local_path + os.sep + filename
                filename_error = all_path + os.sep + filename
                if not os.path.isdir(local_path):
                    os.makedirs(local_path)
                ftp.retrbinary("RETR " + filename,
                               open(local_filename, 'wb').write)
            except:
                print(cyan + "getFtp() : " + bold + red +
                      "Error during download " + filename + " from FTP" + endC,
                      file=sys.stderr)
                appendTextFileCR(file_error, filename_error)
                if os.path.isfile(local_filename):
                    removeFile(local_filename)

            extent_name = os.path.splitext(
                os.path.basename(local_filename))[1].lower()

            if extent_name in EXT_LIST:
                test_image = imageControl(local_filename)
                if not test_image:
                    appendTextFileCR(file_error, filename_error)
                    if os.path.isfile(local_filename):
                        removeFile(local_filename)
    return
Ejemplo n.º 5
0
def computeConfusionMatrix(classif_image_file, validation_input_vector,
                           validation_input_raster, validation_id_field,
                           output, no_data_value, overwrite):
    # calcul de la matrice de confusion
    check = os.path.isfile(output)
    if check and not overwrite:
        print(cyan + "computeConfusionMatrix() : " + bold + yellow +
              "Confusion matrix already exists." + '\n' + endC)
    else:
        # Tente de supprimer le fichier
        try:
            removeFile(output)
        except Exception:
            # Ignore l'exception levee si le fichier n'existe pas (et ne peut donc pas être supprime)
            pass
        if debug >= 2:
            print(cyan + "computeConfusionMatrix() : " + bold + green +
                  "Assessing quality..." + '\n' + endC)

        # Test si on entre avec des echantillons de controles au format vecteur ou au format raster
        if validation_input_vector != None:
            # Cas d'echantillons vecteur
            command = "otbcli_ComputeConfusionMatrix -in %s -ref vector -ref.vector.in %s -ref.vector.field %s -no_data_value %s -ref.vector.nodata %s -out %s" % (
                classif_image_file, validation_input_vector,
                validation_id_field, str(no_data_value), str(no_data_value),
                output)
        else:
            # Cas d'echantillons raster
            command = "otbcli_ComputeConfusionMatrix -in %s -ref raster -ref.raster.in %s -no_data_value %s -ref.raster.nodata %s -out %s" % (
                classif_image_file, validation_input_raster,
                str(no_data_value), str(no_data_value), output)
        if debug >= 3:
            print(command)
        exitCode = os.system(command)
        if exitCode != 0:
            raise NameError(
                cyan + "computeConfusionMatrix() : " + bold + red +
                "An error occured during otbcli_ComputeConfusionMatrix command. See error message above."
                + endC)
        if debug >= 2:
            print(cyan + "computeConfusionMatrix() : " + bold + green +
                  "Confusion matrix created" + '\n' + endC)
    return
Ejemplo n.º 6
0
def computeAverageAreaMacro(repertory_output, class_labels_list, table_input_name, data_base_name):

    average_area_macro_list = []
    sql_temporary_file = repertory_output + os.sep + "spatialiteTemp.txt"

    for class_label in class_labels_list:
        requete = sqlSurfaceAverageMacro(table_input_name, "ID", class_label, data_base_name)
        exitCode = os.system("%s > %s"%(requete,sql_temporary_file))
        if exitCode != 0:
            raise NameError(cyan + "computeAverageAreaMacro() : " + bold + red + "An error occured during file creation. See error message above." + endC)
        requete_result = readTextFileBySeparator(sql_temporary_file, " ")
        print(requete_result)
        if requete_result == []:
            average_area_macro_list.append(0)
        else:
            average_area_macro_list.append(float(requete_result[0][0]))
        removeFile(sql_temporary_file)

    return average_area_macro_list
def displayCommands(command_doc, debug):
    # Constantes
    EXT_DOT = ".dot"
    EXT_PNG = ".png"
    GRAPH_NAME = "workflows_commandes"
    process = None

    while not getEndDisplay():
        # Define dot file
        dot_file = os.path.splitext(command_doc)[0] + EXT_DOT
        if os.path.isfile(dot_file):
            removeFile(dot_file)
        png_file = os.path.splitext(command_doc)[0] + EXT_PNG

        # Processing file .dot
        struct_cmd_dico = readCommands(command_doc, debug)

        if struct_cmd_dico is not None and struct_cmd_dico != {}:
            convert2dot(command_doc, struct_cmd_dico, GRAPH_NAME, dot_file,
                        debug)
            # Ignore Graphviz warning messages
            warnings.simplefilter('ignore', RuntimeWarning)
            graph = pgv.AGraph(name=GRAPH_NAME)
            # Convert file .dot
            graph.read(dot_file)
            graph.layout(prog='dot')  # layout with default (neato)
            png_draw = graph.draw(png_file, format='png', prog='dot')
            # Affichage image PNG
            process, root = displayImage(process, png_file, GRAPH_NAME, debug)

        # Attente avant boucle
        time.sleep(1)

    # Fin arret de l'affichage de l'image
    endDisplayImage(process, root)

    return
Ejemplo n.º 8
0
def vectorsListToOcs(input_text,
                     output_raster,
                     footprint_vector,
                     reference_raster,
                     codage_raster='uint8',
                     epsg=2154,
                     no_data_value=0,
                     format_raster='GTiff',
                     format_vector='ESRI Shapefile',
                     extension_raster='.tif',
                     extension_vector='.shp',
                     path_time_log='',
                     save_results_intermediate=False,
                     overwrite=True):

    if debug >= 3:
        print(
            '\n' + bold + green +
            "OCS raster à partir d'une liste de vecteurs - Variables dans la fonction :"
            + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "input_text : " +
              str(input_text) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "output_raster : " +
              str(output_raster) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC +
              "footprint_vector : " + str(footprint_vector) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC +
              "reference_raster : " + str(reference_raster) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "codage_raster : " +
              str(codage_raster) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "epsg : " +
              str(epsg) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "no_data_value : " +
              str(no_data_value) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC +
              "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC +
              "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "    vectorsListToOcs() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "    vectorsListToOcs() : " + endC + "overwrite : " +
              str(overwrite) + endC + '\n')

    # Définition des constantes
    SUFFIX_TEMP = '_temp'
    SUFFIX_CUT = '_cut'
    SUFFIX_FILTER = '_filter'
    SUFFIX_BUFFER = '_buffer'
    TEXT_SEPARATOR = ':'

    # Mise à jour du log
    starting_event = "vectorsListToOcs() : Début du traitement : "
    timeLine(path_time_log, starting_event)

    print(cyan + "vectorsListToOcs() : " + bold + green +
          "DEBUT DES TRAITEMENTS" + endC + '\n')

    # Définition des variables 'basename'
    output_raster_basename = os.path.basename(
        os.path.splitext(output_raster)[0])
    output_raster_dirname = os.path.dirname(output_raster)

    # Définition des variables temp
    temp_directory = output_raster_dirname + os.sep + output_raster_basename + SUFFIX_TEMP
    temp_raster = temp_directory + os.sep + output_raster_basename + SUFFIX_TEMP + extension_raster

    # Nettoyage des traitements précédents
    if overwrite:
        if debug >= 3:
            print(cyan + "vectorsListToOcs() : " + endC +
                  "Nettoyage des traitements précédents." + '\n')
        removeFile(output_raster)
        cleanTempData(temp_directory)
    else:
        if os.path.exists(output_raster):
            print(cyan + "vectorsListToOcs() : " + bold + yellow +
                  "Le fichier de sortie existe déjà et ne sera pas regénéré." +
                  endC)
            raise
        if not os.path.exixts(temp_directory):
            os.makedirs(temp_directory)
        pass

    # Test de l'emprise des fichiers vecteur d'emprise et raster de référence (le raster doit être de même taille ou plus grand que le vecteur)
    xmin_fpt, xmax_fpt, ymin_fpt, ymax_fpt = getEmpriseFile(
        footprint_vector, format_vector=format_vector)
    xmin_ref, xmax_ref, ymin_ref, ymax_ref = getEmpriseImage(reference_raster)
    if round(xmin_fpt, 4) < round(xmin_ref, 4) or round(xmax_fpt, 4) > round(
            xmax_ref, 4) or round(ymin_fpt, 4) < round(ymin_ref, 4) or round(
                ymax_fpt, 4) > round(ymax_ref, 4):
        print(cyan + "vectorsListToOcs() : " + bold + red +
              "xmin_fpt, xmax_fpt, ymin_fpt, ymax_fpt" + endC,
              xmin_fpt,
              xmax_fpt,
              ymin_fpt,
              ymax_fpt,
              file=sys.stderr)
        print(cyan + "vectorsListToOcs() : " + bold + red +
              "xmin_ref, xmax_ref, ymin_ref, ymax_ref" + endC,
              xmin_ref,
              xmax_ref,
              ymin_ref,
              ymax_ref,
              file=sys.stderr)
        raise NameError(
            cyan + "vectorsListToOcs() : " + bold + red +
            "The extend of the footprint vector (%s) is greater than the reference raster (%s)."
            % (footprint_vector, reference_raster) + endC)

    # Récupération des traitements à faire dans le fichier texte d'entrée
    text_list = readTextFileBySeparator(input_text, TEXT_SEPARATOR)

    ####################################################################

    print(cyan + "vectorsListToOcs() : " + bold + green +
          "Début de la génération de l'OCS raster à partir de vecteurs." +
          endC + '\n')

    # Boucle sur les traitements à réaliser
    for text in text_list:
        idx = text_list.index(text) + 1
        class_label = int(text[0])
        vector_file = text[1]
        if debug >= 3:
            print(cyan + "vectorsListToOcs() : " + endC + bold +
                  "Génération %s/%s : " % (idx, len(text_list)) + endC +
                  "traitement du fichier %s (label %s)." %
                  (vector_file, str(class_label)) + '\n')

        # Gestion des noms des fichiers temporaires
        vector_file_basename = os.path.basename(
            os.path.splitext(vector_file)[0])
        vector_file_cut = temp_directory + os.sep + vector_file_basename + SUFFIX_CUT + extension_vector
        vector_file_filter = temp_directory + os.sep + vector_file_basename + SUFFIX_FILTER + extension_vector
        vector_file_buffer = temp_directory + os.sep + vector_file_basename + SUFFIX_BUFFER + extension_vector
        vector_file_raster = temp_directory + os.sep + vector_file_basename + extension_raster

        # Gestion des variables de traitement (tampon et filtrage SQL)
        try:
            buffer_len = float(text[2])
        except ValueError:
            buffer_len = text[2]
        except Exception:
            buffer_len = ''
        try:
            sql_filter = text[3]
        except Exception:
            sql_filter = ''

        # Découpage à l'emprise de la zone d'étude
        if debug >= 3:
            print(cyan + "vectorsListToOcs() : " + endC +
                  "Découpage à l'emprise de la zone d'étude." + '\n')
        cutVectorAll(footprint_vector,
                     vector_file,
                     vector_file_cut,
                     overwrite=overwrite,
                     format_vector=format_vector)

        # Filtrage SQL (facultatif)
        if sql_filter != '':
            if debug >= 3:
                print(cyan + "vectorsListToOcs() : " + endC +
                      "Application du filtrage SQL : %s." % sql_filter + '\n')
            attr_names_list = getAttributeNameList(vector_file_cut,
                                                   format_vector=format_vector)
            column = "'"
            for attr_name in attr_names_list:
                column += attr_name + ", "
            column = column[:-2]
            column += "'"
            filterSelectDataVector(vector_file_cut,
                                   vector_file_filter,
                                   column,
                                   sql_filter,
                                   overwrite=overwrite,
                                   format_vector=format_vector)
        else:
            vector_file_filter = vector_file_cut

        # Application d'un tampon (facultatif)
        if buffer_len != '' and buffer_len != 0:
            if debug >= 3:
                print(cyan + "vectorsListToOcs() : " + endC +
                      "Application d'un buffer : %s." % buffer_len + '\n')
            if type(buffer_len) is float:
                bufferVector(vector_file_filter,
                             vector_file_buffer,
                             buffer_len,
                             col_name_buf='',
                             fact_buf=1.0,
                             quadsecs=10,
                             format_vector=format_vector)
            else:
                bufferVector(vector_file_filter,
                             vector_file_buffer,
                             0,
                             col_name_buf=buffer_len,
                             fact_buf=1.0,
                             quadsecs=10,
                             format_vector=format_vector)
        else:
            vector_file_buffer = vector_file_filter

        # Rastérisation du vecteur préparé
        if debug >= 3:
            print(cyan + "vectorsListToOcs() : " + endC +
                  "Rastérisation du vecteur préparé." + '\n')
        rasterizeBinaryVector(vector_file_buffer,
                              reference_raster,
                              vector_file_raster,
                              label=class_label,
                              codage=codage_raster)

        # Ajout de l'information dans le raster de sortie
        if debug >= 3:
            print(cyan + "vectorsListToOcs() : " + endC +
                  "Ajout de l'information dans le raster de sortie." + '\n')
        if idx == 1:
            shutil.copy(vector_file_raster, output_raster)
        else:
            removeFile(temp_raster)
            shutil.copy(output_raster, temp_raster)
            removeFile(output_raster)
            expression = "im1b1!=%s ? im1b1 : im2b1" % no_data_value
            rasterCalculator([temp_raster, vector_file_raster],
                             output_raster,
                             expression,
                             codage=codage_raster)

    print(cyan + "vectorsListToOcs() : " + bold + green +
          "Fin de la génération de l'OCS raster à partir de vecteurs." + endC +
          '\n')

    ####################################################################

    # Suppression des fichiers temporaires
    if not save_results_intermediate:
        if debug >= 3:
            print(cyan + "vectorsListToOcs() : " + endC +
                  "Suppression des fichiers temporaires." + '\n')
        deleteDir(temp_directory)

    print(cyan + "vectorsListToOcs() : " + bold + green +
          "FIN DES TRAITEMENTS" + endC + '\n')

    # Mise à jour du log
    ending_event = "vectorsListToOcs() : Fin du traitement : "
    timeLine(path_time_log, ending_event)

    return 0
Ejemplo n.º 9
0
def createMnh(image_mns_input, image_mnt_input, image_threshold_input, vector_emprise_input, image_mnh_output, automatic, bd_road_vector_input_list, bd_road_buff_list, sql_road_expression_list, bd_build_vector_input_list, height_bias, threshold_bd_value, threshold_delta_h, mode_interpolation, method_interpolation, interpolation_bco_radius, simplify_vector_param, epsg, no_data_value, ram_otb, path_time_log, format_raster='GTiff', format_vector='ESRI Shapefile', extension_raster=".tif", extension_vector=".shp", save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "createMnh() : MNH creation starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(bold + green + "## START : MNH CREATION" + endC)
    print(endC)

    if debug >= 2:
        print(bold + green + "createMnh() : Variables dans la fonction" + endC)
        print(cyan + "createMnh() : " + endC + "image_mns_input : " + str(image_mns_input) + endC)
        print(cyan + "createMnh() : " + endC + "image_mnt_input : " + str(image_mnt_input) + endC)
        print(cyan + "createMnh() : " + endC + "image_threshold_input : " + str(image_threshold_input) + endC)
        print(cyan + "createMnh() : " + endC + "vector_emprise_input : " + str(vector_emprise_input) + endC)
        print(cyan + "createMnh() : " + endC + "image_mnh_output : " + str(image_mnh_output) + endC)
        print(cyan + "createMnh() : " + endC + "automatic : " + str(automatic) + endC)
        print(cyan + "createMnh() : " + endC + "bd_road_vector_input_list : " + str(bd_road_vector_input_list) + endC)
        print(cyan + "createMnh() : " + endC + "bd_road_buff_list : " + str(bd_road_buff_list) + endC)
        print(cyan + "createMnh() : " + endC + "sql_road_expression_list : " + str(sql_road_expression_list) + endC)
        print(cyan + "createMnh() : " + endC + "bd_build_vector_input_list : " + str(bd_build_vector_input_list) + endC)
        print(cyan + "createMnh() : " + endC + "height_bias : " + str(height_bias) + endC)
        print(cyan + "createMnh() : " + endC + "threshold_bd_value : " + str(threshold_bd_value) + endC)
        print(cyan + "createMnh() : " + endC + "threshold_delta_h : " + str(threshold_delta_h) + endC)
        print(cyan + "createMnh() : " + endC + "mode_interpolation : " + str(mode_interpolation) + endC)
        print(cyan + "createMnh() : " + endC + "method_interpolation : " + str(method_interpolation) + endC)
        print(cyan + "createMnh() : " + endC + "interpolation_bco_radius : " + str(interpolation_bco_radius) + endC)
        print(cyan + "createMnh() : " + endC + "simplify_vector_param : " + str(simplify_vector_param) + endC)
        print(cyan + "createMnh() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "createMnh() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "createMnh() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "createMnh() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "createMnh() : " + endC + "format_raster : " + str(format_raster) + endC)
        print(cyan + "createMnh() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "createMnh() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "createMnh() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "createMnh() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "createMnh() : " + endC + "overwrite : " + str(overwrite) + endC)

    # LES CONSTANTES
    PRECISION = 0.0000001

    CODAGE_8B = "uint8"
    CODAGE_F = "float"

    SUFFIX_CUT = "_cut"
    SUFFIX_CLEAN = "_clean"
    SUFFIX_SAMPLE = "_sample"
    SUFFIX_MASK = "_mask"
    SUFFIX_TMP = "_tmp"
    SUFFIX_MNS = "_mns"
    SUFFIX_MNT = "_mnt"
    SUFFIX_ROAD = "_road"
    SUFFIX_BUILD = "_build"
    SUFFIX_RASTER = "_raster"
    SUFFIX_VECTOR = "_vector"

    # DEFINIR LES REPERTOIRES ET FICHIERS TEMPORAIRES
    repertory_output = os.path.dirname(image_mnh_output)
    basename_mnh = os.path.splitext(os.path.basename(image_mnh_output))[0]

    sub_repertory_raster_temp = repertory_output + os.sep + basename_mnh + SUFFIX_RASTER + SUFFIX_TMP
    sub_repertory_vector_temp = repertory_output + os.sep + basename_mnh + SUFFIX_VECTOR + SUFFIX_TMP
    cleanTempData(sub_repertory_raster_temp)
    cleanTempData(sub_repertory_vector_temp)

    basename_vector_emprise = os.path.splitext(os.path.basename(vector_emprise_input))[0]
    basename_mns_input = os.path.splitext(os.path.basename(image_mns_input))[0]
    basename_mnt_input = os.path.splitext(os.path.basename(image_mnt_input))[0]

    image_mnh_tmp = sub_repertory_raster_temp + os.sep + basename_mnh + SUFFIX_TMP + extension_raster
    image_mnh_road = sub_repertory_raster_temp + os.sep + basename_mnh + SUFFIX_ROAD + extension_raster

    vector_bd_bati_temp = sub_repertory_vector_temp + os.sep + basename_mnh + SUFFIX_BUILD + SUFFIX_TMP + extension_vector
    vector_bd_bati = repertory_output + os.sep + basename_mnh + SUFFIX_BUILD + extension_vector
    raster_bd_bati = sub_repertory_vector_temp + os.sep + basename_mnh + SUFFIX_BUILD + extension_raster
    removeVectorFile(vector_bd_bati)

    image_emprise_mnt_mask = sub_repertory_raster_temp + os.sep + basename_vector_emprise + SUFFIX_MNT + extension_raster
    image_mnt_cut = sub_repertory_raster_temp + os.sep + basename_mnt_input + SUFFIX_CUT + extension_raster
    image_mnt_clean = sub_repertory_raster_temp + os.sep + basename_mnt_input + SUFFIX_CLEAN + extension_raster
    image_mnt_clean_sample = sub_repertory_raster_temp + os.sep + basename_mnt_input + SUFFIX_CLEAN + SUFFIX_SAMPLE + extension_raster
    image_emprise_mns_mask = sub_repertory_raster_temp + os.sep + basename_vector_emprise + SUFFIX_MNS + extension_raster
    image_mns_cut = sub_repertory_raster_temp + os.sep + basename_mns_input + SUFFIX_CUT + extension_raster
    image_mns_clean = sub_repertory_raster_temp + os.sep + basename_mns_input + SUFFIX_CLEAN + extension_raster

    vector_bd_road_temp = sub_repertory_vector_temp + os.sep + basename_mnh + SUFFIX_ROAD + SUFFIX_TMP + extension_vector
    raster_bd_road_mask = sub_repertory_raster_temp + os.sep + basename_mnh + SUFFIX_ROAD + SUFFIX_MASK + extension_raster

    if image_threshold_input != "" :
        basename_threshold_input = os.path.splitext(os.path.basename(image_threshold_input))[0]
        image_threshold_cut = sub_repertory_raster_temp + os.sep + basename_threshold_input + SUFFIX_CUT + extension_raster
        image_threshold_mask = sub_repertory_raster_temp + os.sep + basename_threshold_input + SUFFIX_MASK + extension_raster

    # VERIFICATION SI LE FICHIER DE SORTIE EXISTE DEJA
    # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors on ne fait rien
    check = os.path.isfile(image_mnh_output)
    if check and not overwrite:
        print(bold + yellow +  "createMnh() : " + endC + "Create mnh %s from %s and %s already done : no actualisation" % (image_mnh_output, image_mns_input, image_mnt_input) + endC)
    # Si non, ou si la fonction ecrasement est désative, alors on le calcule
    else:
        if check:
            try: # Suppression de l'éventuel fichier existant
                removeFile(image_mnh_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

        # DECOUPAGE DES FICHIERS MS ET MNT D'ENTREE PAR LE FICHIER D'EMPRISE
        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage selon l'emprise des fichiers %s et %s " %(image_mns_input, image_mnt_input) + endC)

        # Fonction de découpe du mns
        if not cutImageByVector(vector_emprise_input, image_mns_input, image_mns_cut, None, None, no_data_value, epsg, format_raster, format_vector) :
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_mns_input + ". Voir message d'erreur." + endC)

        # Fonction de découpe du mnt
        if not cutImageByVector(vector_emprise_input, image_mnt_input, image_mnt_cut, None, None, no_data_value, epsg, format_raster, format_vector) :
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_mnt_input + ". Voir message d'erreur." + endC)

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage des fichiers %s et %s complet" %(image_mns_cut, image_mnt_cut) + endC)


        # REBOUCHAGE DES TROUS DANS LE MNT D'ENTREE SI NECESSAIRE

        nodata_mnt = getNodataValueImage(image_mnt_cut)
        pixelNodataCount = countPixelsOfValue(image_mnt_cut, nodata_mnt)

        if pixelNodataCount > 0 :

            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes MNT for  %s" %(image_mnt_cut) + endC)

            # Rasterisation du vecteur d'emprise pour creer un masque pour boucher les trous du MNT
            rasterizeBinaryVector(vector_emprise_input, image_mnt_cut, image_emprise_mnt_mask, 1, CODAGE_8B)

            # Utilisation de SAGA pour boucher les trous
            fillNodata(image_mnt_cut, image_emprise_mnt_mask, image_mnt_clean, save_results_intermediate)

            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes MNT to %s completed" %(image_mnt_clean) + endC)

        else :
            image_mnt_clean = image_mnt_cut
            if debug >= 3:
                print(bold + green +  "\ncreateMnh() : " + endC + "Fill the holes not necessary MNT for %s" %(image_mnt_cut) + endC)


        # REBOUCHAGE DES TROUS DANS LE MNS D'ENTREE SI NECESSAIRE

        nodata_mns = getNodataValueImage(image_mns_cut)
        pixelNodataCount = countPixelsOfValue(image_mns_cut, nodata_mns)

        if pixelNodataCount > 0 :

            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes MNS for  %s" %(image_mns_cut) + endC)

            # Rasterisation du vecteur d'emprise pour creer un masque pour boucher les trous du MNS
            rasterizeBinaryVector(vector_emprise_input, image_mns_cut, image_emprise_mns_mask, 1, CODAGE_8B)

            # Utilisation de SAGA pour boucher les trous
            fillNodata(image_mns_cut, image_emprise_mns_mask, image_mns_clean, save_results_intermediate)

            if debug >= 3:
                print(bold + green +  "\ncreateMnh() : " + endC + "Fill the holes MNS to %s completed" %(image_mns_clean) + endC)

        else :
            image_mns_clean = image_mns_cut
            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes not necessary MNS for %s" %(image_mns_cut) + endC)

        # CALLER LE FICHIER MNT AU FORMAT DU FICHIER MNS

        # Commande de mise en place de la geométrie re-echantionage
        command = "otbcli_Superimpose -inr " + image_mns_clean + " -inm " + image_mnt_clean + " -mode " + mode_interpolation + " -interpolator " + method_interpolation + " -out " + image_mnt_clean_sample

        if method_interpolation.lower() == 'bco' :
            command += " -interpolator.bco.radius " + str(interpolation_bco_radius)
        if ram_otb > 0:
            command += " -ram %d" %(ram_otb)

        if debug >= 3:
            print(cyan + "createMnh() : " + bold + green + "Réechantillonage du fichier %s par rapport à la reference %s" %(image_mnt_clean, image_mns_clean) + endC)
            print(command)

        exit_code = os.system(command)
        if exit_code != 0:
            print(command)
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du superimpose de l'image : " + image_mnt_input + ". Voir message d'erreur." + endC)

        # INCRUSTATION DANS LE MNH DES DONNEES VECTEURS ROUTES

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Use BD road to clean MNH"  + endC)

        # Creation d'un masque de filtrage des donnes routes (exemple : le NDVI)
        if image_threshold_input != "" :
            if not cutImageByVector(vector_emprise_input, image_threshold_input, image_threshold_cut, None, None, no_data_value, epsg, format_raster, format_vector) :
                raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_threshold_input + ". Voir message d'erreur." + endC)
            createBinaryMask(image_threshold_cut, image_threshold_mask, threshold_bd_value, False, CODAGE_8B)

        # Execution de la fonction createMacroSamples pour une image correspondant au données routes
        if bd_road_vector_input_list != [] :
            createMacroSamples(image_mns_clean, vector_emprise_input, vector_bd_road_temp, raster_bd_road_mask, bd_road_vector_input_list, bd_road_buff_list, sql_road_expression_list, path_time_log, basename_mnh, simplify_vector_param, format_vector, extension_vector, save_results_intermediate, overwrite)

        if debug >= 3:
            print(bold + green +  "\ncreateMnh() : " + endC + "File raster from BD road is create %s" %(raster_bd_road_mask) + endC)

        # CALCUL DU MNH

        # Calcul par bandMath du MNH definir l'expression qui soustrait le MNT au MNS en introduisant le biais et en mettant les valeurs à 0 à une valeur approcher de 0.0000001
        delta = ""
        if height_bias > 0 :
            delta = "+%s" %(str(height_bias))
        elif height_bias < 0 :
            delta = "-%s" %(str(abs(height_bias)))
        else :
            delta = ""

        # Definition de l'expression
        if bd_road_vector_input_list != [] :
            if image_threshold_input != "" :
                expression = "\"im3b1 > 0 and im4b1 > 0?%s:(im1b1-im2b1%s) > 0.0?im1b1-im2b1%s:%s\"" %(str(PRECISION), delta, delta, str(PRECISION))
                command = "otbcli_BandMath -il %s %s %s %s -out %s %s -exp %s" %(image_mns_clean, image_mnt_clean_sample, raster_bd_road_mask, image_threshold_mask, image_mnh_tmp, CODAGE_F, expression)
            else :
                expression = "\"im3b1 > 0?%s:(im1b1-im2b1%s) > 0.0?im1b1-im2b1%s:%s\"" %(str(PRECISION), delta, delta, str(PRECISION))
                command = "otbcli_BandMath -il %s %s %s -out %s %s -exp %s" %(image_mns_clean, image_mnt_clean_sample, raster_bd_road_mask, image_mnh_tmp, CODAGE_F, expression)
        else :
            expression = "\"(im1b1-im2b1%s) > 0.0?im1b1-im2b1%s:%s\"" %(delta, delta, str(PRECISION))
            command = "otbcli_BandMath -il %s %s -out %s %s -exp %s" %(image_mns_clean, image_mnt_clean_sample, image_mnh_tmp, CODAGE_F, expression)

        if ram_otb > 0:
            command += " -ram %d" %(ram_otb)

        if debug >= 3:
            print(cyan + "createMnh() : " + bold + green + "Calcul du MNH  %s difference du MNS : %s par le MNT :%s" %(image_mnh_tmp, image_mns_clean, image_mnt_clean_sample) + endC)
            print(command)

        exitCode = os.system(command)
        if exitCode != 0:
            print(command)
            raise NameError(cyan + "createMnh() : " + bold + red + "An error occured during otbcli_BandMath command to compute MNH " + image_mnh_tmp + ". See error message above." + endC)

        # DECOUPAGE DU MNH

        if bd_build_vector_input_list == []:
            image_mnh_road = image_mnh_output

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage selon l'emprise du fichier mnh %s " %(image_mnh_tmp) + endC)

        # Fonction de découpe du mnh
        if not cutImageByVector(vector_emprise_input, image_mnh_tmp, image_mnh_road, None, None, no_data_value, epsg, format_raster, format_vector) :
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_mns_input + ". Voir message d'erreur." + endC)

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage du fichier mnh %s complet" %(image_mnh_road) + endC)

        # INCRUSTATION DANS LE MNH DES DONNEES VECTEURS BATIS

        # Si demander => liste de fichier vecteur bati passé en donnée d'entrée
        if bd_build_vector_input_list != []:

            # Découpage des vecteurs de bd bati exogenes avec l'emprise
            vectors_build_cut_list = []
            for vector_build_input in bd_build_vector_input_list :
                vector_name = os.path.splitext(os.path.basename(vector_build_input))[0]
                vector_build_cut = sub_repertory_vector_temp + os.sep + vector_name + SUFFIX_CUT + extension_vector
                vectors_build_cut_list.append(vector_build_cut)
            cutoutVectors(vector_emprise_input, bd_build_vector_input_list, vectors_build_cut_list, format_vector)

            # Fusion des vecteurs batis découpés
            fusionVectors (vectors_build_cut_list, vector_bd_bati_temp)

            # Croisement vecteur rasteur entre le vecteur fusion des batis et le MNH créé precedement
            statisticsVectorRaster(image_mnh_road, vector_bd_bati_temp, "", 1, False, False, True, ['PREC_PLANI','PREC_ALTI','ORIGIN_BAT','median','sum','std','unique','range'], [], {}, path_time_log, True, format_vector, save_results_intermediate, overwrite)

            # Calcul de la colonne delta_H entre les hauteurs des batis et la hauteur moyenne du MNH sous le bati
            COLUMN_ID = "ID"
            COLUMN_H_BUILD = "HAUTEUR"
            COLUMN_H_BUILD_MIN = "Z_MIN"
            COLUMN_H_BUILD_MAX = "Z_MAX"
            COLUMN_H_MNH = "mean"
            COLUMN_H_MNH_MIN = "min"
            COLUMN_H_MNH_MAX = "max"
            COLUMN_H_DIFF = "H_diff"

            field_type = ogr.OFTReal
            field_value = 0.0
            field_width = 20
            field_precision = 2
            attribute_name_dico = {}
            attribute_name_dico[COLUMN_ID] = ogr.OFTString
            attribute_name_dico[COLUMN_H_BUILD] = ogr.OFTReal
            attribute_name_dico[COLUMN_H_MNH] = ogr.OFTReal

            # Ajouter la nouvelle colonne H_diff
            addNewFieldVector(vector_bd_bati_temp, COLUMN_H_DIFF, field_type, field_value, field_width, field_precision, format_vector)

            # Recuperer les valeur de hauteur du bati et du mnt dans le vecteur
            data_z_dico = getAttributeValues(vector_bd_bati_temp, None, None, attribute_name_dico, format_vector)

            # Calculer la difference des Hauteur bati et mnt
            field_new_values_dico = {}
            for index in range(len(data_z_dico[COLUMN_ID])) :
                index_polygon = data_z_dico[COLUMN_ID][index]
                delta_h = abs(data_z_dico[COLUMN_H_BUILD][index] - data_z_dico[COLUMN_H_MNH][index])
                field_new_values_dico[index_polygon] = {COLUMN_H_DIFF:delta_h}

            # Mettre à jour la colonne H_diff dans le vecteur
            setAttributeIndexValuesList(vector_bd_bati_temp, COLUMN_ID, field_new_values_dico, format_vector)

            # Suppression de tous les polygones bati dons la valeur du delat H est inferieur à threshold_delta_h
            column = "'%s, %s, %s, %s, %s, %s, %s, %s'"% (COLUMN_ID, COLUMN_H_BUILD, COLUMN_H_BUILD_MIN, COLUMN_H_BUILD_MAX, COLUMN_H_MNH, COLUMN_H_MNH_MIN, COLUMN_H_MNH_MAX, COLUMN_H_DIFF)
            expression = "%s > %s" % (COLUMN_H_DIFF, threshold_delta_h)
            filterSelectDataVector(vector_bd_bati_temp, vector_bd_bati, column, expression, overwrite, format_vector)

            # Attention!!!! PAUSE pour trie et verification des polygones bati nom deja present dans le MNH ou non
            if not automatic :
                print(bold + blue +  "Application MnhCreation => " + endC + "Vérification manuelle du vecteur bati %s pour ne concerver que les batis non présent dans le MNH courant %s" %(vector_bd_bati_temp, image_mnh_road) + endC)
                input(bold + red + "Appuyez sur entree pour continuer le programme..." + endC)

            # Creation du masque bati avec pour H la hauteur des batiments
            rasterizeVector(vector_bd_bati, raster_bd_bati, image_mnh_road, COLUMN_H_BUILD)

            # Fusion du mask des batis et du MNH temporaire
            expression = "\"im1b1 > 0.0?im1b1:im2b1\""
            command = "otbcli_BandMath -il %s %s -out %s %s -exp %s" %(raster_bd_bati, image_mnh_road, image_mnh_output, CODAGE_F, expression)

            if ram_otb > 0:
                command += " -ram %d" %(ram_otb)

            if debug >= 3:
                print(cyan + "createMnh() : " + bold + green + "Amelioration du MNH  %s ajout des hauteurs des batis %s" %(image_mnh_road, raster_bd_bati) + endC)
                print(command)

            exitCode = os.system(command)
            if exitCode != 0:
                print(command)
                raise NameError(cyan + "createMnh() : " + bold + red + "An error occured during otbcli_BandMath command to compute MNH Final" + image_mnh_output + ". See error message above." + endC)

    # SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES

    # Suppression des fichiers intermédiaires
    if not save_results_intermediate :
        if bd_build_vector_input_list != []:
            removeFile(image_mnh_road)
        removeFile(image_threshold_cut)
        removeFile(image_threshold_mask)
        removeFile(raster_bd_bati)
        removeVectorFile(vector_bd_road_temp)
        removeVectorFile(vector_bd_bati_temp)
        removeVectorFile(vector_bd_bati) # A confirmer!!!
        removeFile(raster_bd_road_mask)
        removeFile(image_mnh_tmp)
        deleteDir(sub_repertory_raster_temp)
        deleteDir(sub_repertory_vector_temp)

    print(endC)
    print(bold + green + "## END : MNH CREATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "createMnh() : MNH creation ending : "
    timeLine(path_time_log,ending_event)

    return
Ejemplo n.º 10
0
def writeQualityIndicatorsToCsvFile(
        class_count, precision_list, recall_list, fscore_list,
        performance_list, TFP_class_list, TFN_class_list, quantity_rate_list,
        class_list, overall_accuracy, overall_fscore, overall_performance,
        kappa, indicators_output_file, overwrite, textures_list):

    check = os.path.isfile(indicators_output_file)
    if check and not overwrite:
        print(cyan + "writeQualityIndicatorsToCsvFile() : " + bold + yellow +
              "Result file quality indicators exists." + '\n' + endC)
    else:

        # verifier si les info textures existes
        if textures_list is None:
            # Tente de supprimer le fichier
            try:
                removeFile(indicators_output_file)
            except Exception:
                #Ignore l'exception levee si le fichier n'existe pas (et ne peut donc pas être supprime)
                pass

        else:
            # Ecriture du nom de la texture
            texture_characteristics = textures_list[0]
            texture_name = str(texture_characteristics[0])
            channel = str(texture_characteristics[1])
            radius = str(texture_characteristics[2])
            text_texture = "Name : %s ; Channel : %s ; Radius : %s \n" % (
                texture_name, channel, radius)
            appendTextFile(indicators_output_file, text_texture)

        print(cyan + "writeQualityIndicatorsToCsvFile() : " + bold + green +
              "Writing file quality indicators..." + '\n' + endC)

        # contenu texte des indicateurs
        text_indicators = "Overall Accuracy ; Overall F1-Score ; Overall Performance  ;  Kappa  \n  %f  ;  %f  ;  %f  ;  %f  \n\n" % (
            overall_accuracy, overall_fscore, overall_performance, kappa)
        text_class_list = " Class  "
        text_precision_list = " Precision  "
        text_recall_list = " Recall  "
        text_f_scores_list = " F1-Scores "
        text_performance_list = " Performance "
        text_TFP_class_list = " TFP_class "
        text_TFN_class_list = " TFN_class "
        text_quantity_rate_list = " Quantity_rate "

        for i in range(class_count):
            text_class_list += " ; %d" % (class_list[i])
            text_precision_list += " ; %f" % (precision_list[i])
            text_recall_list += " ; %f" % (recall_list[i])
            text_f_scores_list += " ; %f" % (fscore_list[i])
            text_performance_list += " ; %f" % (performance_list[i])
            text_TFP_class_list += " ; %f" % (TFP_class_list[i])
            text_TFN_class_list += " ; %f" % (TFN_class_list[i])
            text_quantity_rate_list += " ; %f" % (quantity_rate_list[i])


        text_File_list = text_class_list + "\n" \
                       + text_precision_list + "\n" \
                       + text_recall_list + "\n"  \
                       + text_f_scores_list + "\n"  \
                       + text_performance_list + "\n" \
                       + text_TFP_class_list + "\n" \
                       + text_TFN_class_list + "\n" \
                       + text_quantity_rate_list + "\n\n" \
                       + text_indicators + "\n"
        try:
            appendTextFile(indicators_output_file, text_File_list)

        except Exception:
            raise NameError(
                cyan + "writeQualityIndicatorsToCsvFile() : " + bold + red +
                "An error occured during writing " + indicators_output_file +
                " file quality indicators. See error message above." + endC)
    print(cyan + "writeQualityIndicatorsToCsvFile() : " + bold + green +
          "Quality indicators writed on file" + '\n' + endC)
    return
Ejemplo n.º 11
0
def addDataBaseExo(image_input,
                   image_classif_add_output,
                   class_file_dico,
                   class_buffer_dico,
                   class_sql_dico,
                   path_time_log,
                   format_vector='ESRI Shapefile',
                   extension_raster=".tif",
                   extension_vector=".shp",
                   save_results_intermediate=False,
                   overwrite=True,
                   simplifie_param=10.0):

    # Mise à jour du Log
    starting_event = "addDataBaseExo() : Add data base exogene to classification starting : "
    timeLine(path_time_log, starting_event)

    # Print
    if debug >= 3:
        print(bold + green + "Variables dans la fonction" + endC)
        print(cyan + "addDataBaseExo() : " + endC + "image_input : " +
              str(image_input) + endC)
        print(cyan + "addDataBaseExo() : " + endC +
              "image_classif_add_output : " + str(image_classif_add_output) +
              endC)
        print(cyan + "addDataBaseExo() : " + endC + "class_file_dico : " +
              str(class_file_dico) + endC)
        print(cyan + "addDataBaseExo() : " + endC + "class_buffer_dico : " +
              str(class_buffer_dico) + endC)
        print(cyan + "addDataBaseExo() : " + endC + "class_sql_dico : " +
              str(class_sql_dico) + endC)
        print(cyan + "addDataBaseExo() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "addDataBaseExo() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "addDataBaseExo() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "addDataBaseExo() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "addDataBaseExo() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "addDataBaseExo() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    # Constantes
    FOLDER_MASK_TEMP = 'Mask_'
    FOLDER_FILTERING_TEMP = 'Filt_'
    FOLDER_CUTTING_TEMP = 'Cut_'
    FOLDER_BUFF_TEMP = 'Buff_'

    SUFFIX_MASK_CRUDE = '_mcrude'
    SUFFIX_MASK = '_mask'
    SUFFIX_FUSION = '_info'
    SUFFIX_VECTOR_FILTER = "_filt"
    SUFFIX_VECTOR_CUT = '_decoup'
    SUFFIX_VECTOR_BUFF = '_buff'

    CODAGE = "uint16"

    # ETAPE 1 : NETTOYER LES DONNEES EXISTANTES
    if debug >= 2:
        print(cyan + "addDataBaseExo() : " + bold + green +
              "NETTOYAGE ESPACE DE TRAVAIL..." + endC)

    # Nom de base de l'image
    image_name = os.path.splitext(os.path.basename(image_input))[0]

    # Nettoyage d'anciennes données résultat

    # Si le fichier résultat existent deja et que overwrite n'est pas activé
    check = os.path.isfile(image_classif_add_output)
    if check and not overwrite:
        print(bold + yellow + "addDataBaseExo() : " + endC +
              image_classif_add_output +
              " has already added bd exo and will not be added again." + endC)
    else:
        if check:
            try:
                removeFile(image_classif_add_output
                           )  # Tentative de suppression du fichier
            except Exception:
                pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

        # Définition des répertoires temporaires
        repertory_output = os.path.dirname(image_classif_add_output)
        repertory_mask_temp = repertory_output + os.sep + FOLDER_MASK_TEMP + image_name
        repertory_samples_filtering_temp = repertory_output + os.sep + FOLDER_FILTERING_TEMP + image_name
        repertory_samples_cutting_temp = repertory_output + os.sep + FOLDER_CUTTING_TEMP + image_name
        repertory_samples_buff_temp = repertory_output + os.sep + FOLDER_BUFF_TEMP + image_name

        if debug >= 4:
            print(repertory_mask_temp)
            print(repertory_samples_filtering_temp)
            print(repertory_samples_cutting_temp)
            print(repertory_samples_buff_temp)

        # Creer les répertoires temporaire si ils n'existent pas
        if not os.path.isdir(repertory_output):
            os.makedirs(repertory_output)
        if not os.path.isdir(repertory_mask_temp):
            os.makedirs(repertory_mask_temp)
        if not os.path.isdir(repertory_samples_filtering_temp):
            os.makedirs(repertory_samples_filtering_temp)
        if not os.path.isdir(repertory_samples_cutting_temp):
            os.makedirs(repertory_samples_cutting_temp)
        if not os.path.isdir(repertory_samples_buff_temp):
            os.makedirs(repertory_samples_buff_temp)

        # Nettoyer les répertoires temporaire si ils ne sont pas vide
        cleanTempData(repertory_mask_temp)
        cleanTempData(repertory_samples_filtering_temp)
        cleanTempData(repertory_samples_cutting_temp)
        cleanTempData(repertory_samples_buff_temp)

        if debug >= 2:
            print(cyan + "addDataBaseExo() : " + bold + green +
                  "... FIN NETTOYAGE" + endC)

        # ETAPE 2 : CREER UN SHAPE DE DECOUPE

        if debug >= 2:
            print(cyan + "addDataBaseExo() : " + bold + green +
                  "SHAPE DE DECOUPE..." + endC)

        # 2.1 : Création des masques délimitant l'emprise de la zone par image

        vector_mask = repertory_mask_temp + os.sep + image_name + SUFFIX_MASK_CRUDE + extension_vector
        createVectorMask(image_input, vector_mask)

        # 2.2 : Simplification du masque global

        vector_simple_mask_cut = repertory_mask_temp + os.sep + image_name + SUFFIX_MASK + extension_vector
        simplifyVector(vector_mask, vector_simple_mask_cut, simplifie_param,
                       format_vector)

        if debug >= 2:
            print(cyan + "addDataBaseExo() : " + bold + green +
                  "...FIN SHAPE DE DECOUPEE" + endC)

        # ETAPE 3 : DECOUPER BUFFERISER LES VECTEURS ET FUSIONNER

        if debug >= 2:
            print(cyan + "addDataBaseExo() : " + bold + green +
                  "MISE EN PLACE DES TAMPONS..." + endC)

        image_combined_list = []
        # Parcours du dictionnaire associant les macroclasses aux noms de fichiers
        for macroclass_label in class_file_dico:
            vector_fusion_list = []
            for index_info in range(len(class_file_dico[macroclass_label])):
                input_vector = class_file_dico[macroclass_label][index_info]
                vector_name = os.path.splitext(
                    os.path.basename(input_vector))[0]
                output_vector_filtered = repertory_samples_filtering_temp + os.sep + vector_name + SUFFIX_VECTOR_FILTER + extension_vector
                output_vector_cut = repertory_samples_cutting_temp + os.sep + vector_name + SUFFIX_VECTOR_CUT + extension_vector
                output_vector_buff = repertory_samples_buff_temp + os.sep + vector_name + SUFFIX_VECTOR_BUFF + extension_vector
                sql_expression = class_sql_dico[macroclass_label][index_info]
                buffer_str = class_buffer_dico[macroclass_label][index_info]
                buff = 0.0
                col_name_buf = ""
                try:
                    buff = float(buffer_str)
                except:
                    col_name_buf = buffer_str
                    print(
                        cyan + "addDataBaseExo() : " + bold + green +
                        "Pas de valeur buffer mais un nom de colonne pour les valeur à bufferiser : "
                        + endC + col_name_buf)

                if os.path.isfile(input_vector):
                    if debug >= 3:
                        print(cyan + "addDataBaseExo() : " + endC +
                              "input_vector : " + str(input_vector) + endC)
                        print(cyan + "addDataBaseExo() : " + endC +
                              "output_vector_filtered : " +
                              str(output_vector_filtered) + endC)
                        print(cyan + "addDataBaseExo() : " + endC +
                              "output_vector_cut : " + str(output_vector_cut) +
                              endC)
                        print(cyan + "addDataBaseExo() : " + endC +
                              "output_vector_buff : " +
                              str(output_vector_buff) + endC)
                        print(cyan + "addDataBaseExo() : " + endC + "buff : " +
                              str(buff) + endC)
                        print(cyan + "addDataBaseExo() : " + endC + "sql : " +
                              str(sql_expression) + endC)

                    # 3.0 : Recuperer les vecteurs d'entrée et filtree selon la requete sql par ogr2ogr
                    if sql_expression != "":
                        names_attribut_list = getAttributeNameList(
                            input_vector, format_vector)
                        column = "'"
                        for name_attribut in names_attribut_list:
                            column += name_attribut + ", "
                        column = column[0:len(column) - 2]
                        column += "'"
                        ret = filterSelectDataVector(input_vector,
                                                     output_vector_filtered,
                                                     column, sql_expression,
                                                     format_vector)
                        if not ret:
                            print(
                                cyan + "addDataBaseExo() : " + bold + yellow +
                                "Attention problème lors du filtrage des BD vecteurs l'expression SQL %s est incorrecte"
                                % (sql_expression) + endC)
                            output_vector_filtered = input_vector
                    else:
                        print(cyan + "addDataBaseExo() : " + bold + green +
                              "Pas de filtrage sur le fichier du nom : " +
                              endC + output_vector_filtered)
                        output_vector_filtered = input_vector

                    # 3.1 : Découper le vecteur selon l'empise de l'image d'entrée
                    cutoutVectors(vector_simple_mask_cut,
                                  [output_vector_filtered],
                                  [output_vector_cut], format_vector)

                    # 3.2 : Bufferiser lesvecteurs découpé avec la valeur défini dans le dico ou trouver dans la base du vecteur lui même si le nom de la colonne est passée dans le dico
                    if os.path.isfile(output_vector_cut) and (
                        (buff != 0) or (col_name_buf != "")):
                        bufferVector(output_vector_cut, output_vector_buff,
                                     buff, col_name_buf, 1.0, 10,
                                     format_vector)
                    else:
                        print(cyan + "addDataBaseExo() : " + bold + green +
                              "Pas de buffer sur le fichier du nom : " + endC +
                              output_vector_cut)
                        output_vector_buff = output_vector_cut

                    # 3.3 : Si un shape résulat existe l'ajouté à la liste de fusion
                    if os.path.isfile(output_vector_buff):
                        vector_fusion_list.append(output_vector_buff)
                        if debug >= 3:
                            print("file for fusion : " + output_vector_buff)
                    else:
                        print(bold + yellow +
                              "pas de fichiers avec ce nom : " + endC +
                              output_vector_buff)

                else:
                    print(cyan + "addDataBaseExo() : " + bold + yellow +
                          "Pas de fichier du nom : " + endC + input_vector)

            # 3.4 : Fusionner les shapes transformés d'une même classe, rasterization et labelisations des vecteurs
            # Si une liste de fichier shape existe
            if not vector_fusion_list:
                print(bold + yellow + "Pas de fusion sans donnee a fusionnee" +
                      endC)
            else:
                # Rasterization et BandMath des fichiers shapes
                raster_list = []
                for vector in vector_fusion_list:
                    if debug >= 3:
                        print(cyan + "addDataBaseExo() : " + endC +
                              "Rasterization : " + vector + " label : " +
                              macroclass_label)
                    raster_output = os.path.splitext(
                        vector)[0] + extension_raster

                    # Rasterisation
                    rasterizeBinaryVector(vector, image_input, raster_output,
                                          macroclass_label, CODAGE)
                    raster_list.append(raster_output)

                if debug >= 3:
                    print(cyan + "addDataBaseExo() : " + endC +
                          "nombre d'images a combiner : " +
                          str(len(raster_list)))

                # Liste les images raster combined and sample
                image_combined = repertory_output + os.sep + image_name + '_' + str(
                    macroclass_label) + SUFFIX_FUSION + extension_raster
                image_combined_list.append(image_combined)

                # Fusion des images raster en une seule
                mergeListRaster(raster_list, image_combined, CODAGE)

        if debug >= 2:
            print(cyan + "addDataBaseExo() : " + bold + green +
                  "FIN DE L AFFECTATION DES TAMPONS" + endC)

        # ETAPE 4 : ASSEMBLAGE DE L'IMAGE CLASSEE ET DES BD EXOS
        if debug >= 2:
            print(cyan + "addDataBaseExo() : " + bold + green +
                  "ASSEMBLAGE..." + endC)

        # Ajout de l'image de classification a la liste des image bd conbinées
        image_combined_list.append(image_input)
        # Fusion les images avec la classification
        mergeListRaster(image_combined_list, image_classif_add_output, CODAGE)
        if debug >= 2:
            print(cyan + "addDataBaseExo() : " + bold + green + "FIN" + endC)

    # ETAPE 5 : SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES

    # Suppression des données intermédiaires
    if not save_results_intermediate:

        image_combined_list.remove(image_input)
        for to_delete in image_combined_list:
            removeFile(to_delete)

        # Suppression des repertoires temporaires
        deleteDir(repertory_mask_temp)
        deleteDir(repertory_samples_filtering_temp)
        deleteDir(repertory_samples_cutting_temp)
        deleteDir(repertory_samples_buff_temp)

    # Mise à jour du Log
    ending_event = "addDataBaseExo() : Add data base exogene to classification ending : "
    timeLine(path_time_log, ending_event)

    return
def reduceChannelsImage(image_stack_input, image_acp_output, path_time_log, method_reduce, nb_components=0, normalize=True, napca_radius=1, ica_iterations=20, ica_increment=1.0, save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "reduceChannelsImage() : Reduction channels starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(bold + green + "## START : CHANNELS REDUCTION ALGO" + endC)
    print(endC)

    # Constantes
    EXT_XML = ".xml"
    EXT_CSV = ".csv"

    # Variables locales
    output_min_value = 1
    output_max_value = 65536

    # Nom de l'image en entrée
    image_name = os.path.splitext(os.path.basename(image_acp_output))[0]
    repertory_output = os.path.dirname(image_acp_output)
    parameters_output = repertory_output + os.sep + image_name + EXT_XML
    matrix_output = repertory_output + os.sep + image_name + EXT_CSV

    if debug >= 3:
        print(bold + green + "Variables dans la fonction" + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "image_stack_input : " + str(image_stack_input) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "image_acp_output : " + str(image_acp_output) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "method_reduce : " + str(method_reduce) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "nb_components : " + str(nb_components) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "normalize : " + str(normalize) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "napca_radius : " + str(napca_radius) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "ica_iterations : " + str(ica_iterations) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "ica_increment : " + str(ica_increment) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "reduceChannelsImage() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Si l'image acp existe deja et que overwrite n'est pas activé
    check = os.path.isfile(image_acp_output)
    if check and not overwrite:
        print(bold + yellow + "ACP image already exists and will not be calculated again." + endC)
    else:
        if check :
            try:
                removeFile(image_acp_output)
            except Exception:
                pass # si le fichier n'existe pas, il ne peut pas être supprimé : cette étape est ignorée

        # Execution de l'acp  [PCA/NAPCA/MAF/ICA]
        method_and_parametres = ""
        while switch(method_reduce):
            if case("PCA"):
                method_and_parametres = "pca"
                break
            if case("NAPCA"):
                method_and_parametres = "napca -method.napca.radiusx %d -method.napca.radiusy %d "%(napca_radius,napca_radius)
                break
            if case("MAF"):
                method_and_parametres = "maf"
                break
            if case("ICA"):
                method_and_parametres = "ica -method.ica.ite %d -method.ica.mu %f "%(ica_iterations,ica_increment)
                break
            break

        if debug >= 1:
            print(cyan + "reduceChannelsImage() : " + bold + green + "Debut de la reduction de dimension avec l'algorithme " + method_and_parametres + endC)

        command = "otbcli_DimensionalityReduction -in %s -out %s -rescale.outmax %d -rescale.outmin %d -method %s -nbcomp %d -normalize %s -outmatrix %s -outxml %s" %(image_stack_input, image_acp_output, output_max_value, output_min_value, method_and_parametres, nb_components, str(normalize).lower(), matrix_output, parameters_output)

        if debug >= 4:
            print("Execution de la commande : %s " %(command))

        exitCode = os.system(command)

        if exitCode != 0:
            raise NameError(cyan + "reduceChannelsImage() : " + bold + red + "An error occured during otbcli_DimensionalityReduction command. See error message above." + endC)
        if debug >= 1:
            print(cyan + "reduceChannelsImage() : " + bold + green + "Reduction de dimension terminée." + endC)

    # Nettoyage des fichiers parameters_output et matrix_output
    if not save_results_intermediate:
        if os.path.isfile(parameters_output) :
            removeFile(parameters_output)
        if os.path.isfile(matrix_output) :
            removeFile(matrix_output)

    print(endC)
    print(bold + green + "## END : CHANNELS REDUCTION ALGO" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "reduceChannelsImage() : Reduction channels ending : "
    timeLine(path_time_log,ending_event)

    return
def applyKmeansMasks(image_input, mask_samples_macro_input_list, image_samples_merged_output, proposal_table_output, micro_samples_images_output_list, centroids_files_output_list, macroclass_sampling_list, macroclass_labels_list, no_data_value, path_time_log, kmeans_param_maximum_iterations=200, kmeans_param_training_set_size_weight=1, kmeans_param_minimum_training_set_size=-1, rate_clean_micro_class=0.0, rand_otb=0, ram_otb=0, number_of_actives_pixels_threshold=200, extension_raster=".tif", save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "applyKmeansMasks() : Kmeans and mask starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(cyan + "applyKmeansMasks() : " + bold + green + "## START : SUBSAMPLING OF " + str(macroclass_labels_list) + endC)
    print(endC)

    if debug >= 2:
        print(cyan + "applyKmeansMasks() : variables dans la fonction" + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "image_input : " + str(image_input) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "image_samples_merged_output : " + str(image_samples_merged_output) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "proposal_table_output : " + str(proposal_table_output) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "mask_samples_macro_input_list : " + str(mask_samples_macro_input_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "micro_samples_images_output_list : " + str(micro_samples_images_output_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "centroids_files_output_list : " + str(centroids_files_output_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "macroclass_sampling_list : " + str(macroclass_sampling_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "macroclass_labels_list : " + str(macroclass_labels_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_maximum_iterations : " + str(kmeans_param_maximum_iterations) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_training_set_size_weight : " + str(kmeans_param_training_set_size_weight) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_minimum_training_set_size : " + str(kmeans_param_minimum_training_set_size) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "rate_clean_micro_class : " + str(rate_clean_micro_class))
        print(cyan + "applyKmeansMasks() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "rand_otb : " + str(rand_otb) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "number_of_actives_pixels_threshold : " + str(number_of_actives_pixels_threshold))
        print(cyan + "applyKmeansMasks() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "overwrite : " + str(overwrite) + endC)

    # constantes
    HEADER_TABLEAU_MODIF = "MICROCLASSE;TRAITEMENT\n"

    CODAGE_16B = "uint16"
    CODAGE_8B = "uint8"
    EXT_XML = ".xml"

    SUFFIX_MASK_CLEAN = "_clean"
    SUFFIX_SAMPLE_MICRO = "_sample_micro"
    SUFFIX_STATISTICS = "_statistics"
    SUFFIX_CENTROID = "_centroid"
    SUFFIX_MASK_TEMP = "_tmp"

   # Creation des fichiers temporaires de sortie si ils ne sont pas spécifier
   #-------------------------------------------------------------------------

    length_mask = len(mask_samples_macro_input_list)
    images_mask_cleaned_list = []
    temporary_files_list = []
    micro_samples_images_list = []
    centroids_files_list = []
    repertory_output_tmp_list = []

    if image_samples_merged_output != "" :
        repertory_base_output = os.path.dirname(image_samples_merged_output)
        filename = os.path.splitext(os.path.basename(image_samples_merged_output))[0]
    else :
        repertory_base_output = os.path.dirname(micro_samples_images_output_list[0])
        filename = os.path.splitext(os.path.basename(micro_samples_images_output_list[0]))[0]

    file_statistic_points = repertory_base_output + os.sep + filename + SUFFIX_STATISTICS + EXT_XML

    for macroclass_id in range(length_mask):

        repertory_output = repertory_base_output + os.sep + str(macroclass_labels_list[macroclass_id])
        if not os.path.isdir(repertory_output):
            os.makedirs(repertory_output)
        repertory_output_tmp_list.append(repertory_output)
        samples_image_input = mask_samples_macro_input_list[macroclass_id]
        filename = os.path.splitext(os.path.basename(samples_image_input))[0]
        image_mask_cleaned =  repertory_output + os.sep + filename + SUFFIX_MASK_CLEAN + extension_raster
        images_mask_cleaned_list.append(image_mask_cleaned)
        image_tmp =  repertory_output + os.sep + filename + SUFFIX_MASK_TEMP + extension_raster
        temporary_files_list.append(image_tmp)
        if micro_samples_images_output_list == [] :
            micro_samples_image = repertory_output + os.sep + filename + SUFFIX_SAMPLE_MICRO + extension_raster
        else :
            micro_samples_image = micro_samples_images_output_list[macroclass_id]
        micro_samples_images_list.append(micro_samples_image)
        if centroids_files_output_list == [] :
            centroids_file = repertory_output + os.sep + filename + SUFFIX_CENTROID + extension_raster
        else :
            centroids_file = centroids_files_output_list[macroclass_id]
        centroids_files_list.append(centroids_file)

    # Nettoyage des pixels superposés sur plusieurs images
    #-----------------------------------------------------

    if length_mask > 1:
        image_name = os.path.splitext(os.path.basename(image_input))[0]
        deletePixelsSuperpositionMasks(mask_samples_macro_input_list, images_mask_cleaned_list, image_name, CODAGE_8B)
    else:
        images_mask_cleaned_list = mask_samples_macro_input_list

    # Execution du kmeans pour chaque macroclasse
    #--------------------------------------------

    # Initialisation de la liste pour le multi-threading
    thread_list = []

    for macroclass_id in range(length_mask):

        mask_sample_input = images_mask_cleaned_list[macroclass_id]
        micro_samples_image = micro_samples_images_list[macroclass_id]
        image_tmp = temporary_files_list[macroclass_id]
        centroids_file = centroids_files_list[macroclass_id]
        check = os.path.isfile(micro_samples_image)

        if check and not overwrite : # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors passe à la classification suivante
            print(cyan + "applyKmeansMasks() : " + bold + yellow +  "Computing kmeans from %s with %s already done : no actualisation" % (image_input, mask_sample_input) + endC)

        else:            # Si non, on applique un kmeans

            if check :
                removeFile(micro_samples_image)   # Suppression de l'éventuel fichier existant

            print(cyan + "applyKmeansMasks() : " + bold + green + "Computing kmeans from %s with %s ; output image is %s" %(image_input, mask_sample_input,micro_samples_image) + endC)

            # Obtention du nombre de microclasses
            number_of_classes = macroclass_sampling_list[macroclass_id]   # Nombre de microclasses
            label = macroclass_labels_list[macroclass_id]                 # Label de la macroclasse Ex : 11000

            # Gestion du multi threading pour l'appel du calcul du kmeans
            thread = threading.Thread(target=computeKmeans, args=(image_input, mask_sample_input, image_tmp, micro_samples_image, centroids_file, label, number_of_classes, macroclass_id, number_of_actives_pixels_threshold, kmeans_param_minimum_training_set_size, kmeans_param_maximum_iterations, length_mask, no_data_value, rand_otb, int(ram_otb/length_mask), CODAGE_8B, CODAGE_16B, save_results_intermediate, overwrite))
            thread.start()
            thread_list.append(thread)

    # Start Kmeans all macro classes
    try:
        for thread in thread_list:
            thread.join()
    except:
        print(cyan + "applyKmeansMasks() : " + bold + red + "applyKmeansMasks() : " + endC + "Erreur lors du calcul du kmeans : impossible de demarrer le thread" + endC, file=sys.stderr)

    # Fusion des echantillons micro
    #------------------------------
    if image_samples_merged_output != "" :

        mergeListRaster(micro_samples_images_list, image_samples_merged_output, CODAGE_16B)
        updateReferenceProjection(image_input, image_samples_merged_output)

        # Creation de la table de proposition et le fichier statistique
        #--------------------------------------------------------------
        if proposal_table_output != "" :

            suppress_micro_class_list = []
            info_micoclass_nbpoints_dico = {}
            nb_points_total = 0
            nb_points_medium = 0

            # Liste des identifants des micro classes disponibles
            id_micro_list = identifyPixelValues(image_samples_merged_output)
            if 0 in id_micro_list :
                id_micro_list.remove(0)
            nb_micr_class = len(id_micro_list)

            # Pour toutes les micro classes
            for id_micro in id_micro_list :
                nb_pixels = countPixelsOfValue(image_samples_merged_output, id_micro)

                info_micoclass_nbpoints_dico[id_micro] = nb_pixels
                nb_points_total += nb_pixels

            # Valeur moyenne de nombre de points
            if nb_micr_class != 0 :
                nb_points_medium = int(nb_points_total / nb_micr_class)
            nb_points_min = int((nb_points_medium * rate_clean_micro_class) / 100)

            # Identifier les micro classes trop petites
            if debug >= 4:
                print("rate_clean_micro_class = " + str(rate_clean_micro_class))
                print("nb_points_medium = " + str(nb_points_medium))
                print("nb_points_min = " + str(nb_points_min))

            # Preparation du fichier statistique
            writeTextFile(file_statistic_points, '<?xml version="1.0" ?>\n')
            appendTextFileCR(file_statistic_points, '<GeneralStatistics>')
            appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassRaw">')

            for micro_class_id in info_micoclass_nbpoints_dico :
                nb_points = info_micoclass_nbpoints_dico[micro_class_id]
                if debug >= 4:
                    print("micro_class_id = " + str(micro_class_id) + ", nb_points = " + str(nb_points))
                appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(micro_class_id, nb_points))

                if nb_points < nb_points_min :
                    # Micro_class à proposer en effacement
                    suppress_micro_class_list.append(micro_class_id)

            # Fin du fichier statistique
            appendTextFileCR(file_statistic_points, '    </Statistic>')
            appendTextFileCR(file_statistic_points, '</GeneralStatistics>')

            # Test si ecrassement de la table précédemment créée
            check = os.path.isfile(proposal_table_output)
            if check and not overwrite :
                print(cyan + "applyKmeansMasks() : " + bold + yellow + "Modifier table already exists." + '\n' + endC)
            else:
                # Tenter de supprimer le fichier
                try:
                    removeFile(proposal_table_output)
                except Exception:
                    pass   # Ignore l'exception levee si le fichier n'existe pas (et ne peut donc pas être supprime)
                # lister les micro classes à supprimer
                text_output = HEADER_TABLEAU_MODIF

                for micro_class_del in suppress_micro_class_list:
                    text_output += "%d;-1\n" %(micro_class_del)

                # Ecriture du fichier proposition de réaffectation
                writeTextFile(proposal_table_output, text_output)

    # Suppresions fichiers intermediaires inutiles
    #---------------------------------------------

    if not save_results_intermediate:
        for macroclass_id in range(length_mask):
            if (os.path.isfile(temporary_files_list[macroclass_id])) :
                removeFile(temporary_files_list[macroclass_id])

            if (length_mask > 1) and (os.path.isfile(images_mask_cleaned_list[macroclass_id])) :
                removeFile(images_mask_cleaned_list[macroclass_id])

            if (micro_samples_images_output_list == []) and (os.path.isfile(micro_samples_images_list[macroclass_id])) :
                removeFile(micro_samples_images_list[macroclass_id])

            if (centroids_files_output_list == []) and (os.path.isfile(centroids_files_list[macroclass_id])) :
                removeFile(centroids_files_list[macroclass_id])

            if os.path.isdir(repertory_output_tmp_list[macroclass_id]) :
                removeDir(repertory_output_tmp_list[macroclass_id])

    print(cyan + "applyKmeansMasks() : " + bold + green + "## END : KMEANS CLASSIFICATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "applyKmeansMasks() : Kmeans and mask ending : "
    timeLine(path_time_log,ending_event)

    return
def binaryMaskVect(input_image,
                   output_dir,
                   threshold,
                   input_cut_vector,
                   attributes_list,
                   no_data_value,
                   epsg,
                   format_raster="GTiff",
                   format_vector="ESRI Shapefile",
                   extension_raster=".tif",
                   extension_vector=".shp",
                   save_results_intermediate=False,
                   overwrite=True):

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green +
              "Variables dans le binaryMaskVect - Variables générales" + endC)
        print(cyan + "binaryMaskVect() : " + endC + "input_image : " +
              str(input_image) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "output_dir : " +
              str(output_dir) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "threshold : " +
              str(threshold) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "input_cut_vector : " +
              str(input_cut_vector) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "binaryMaskVect() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "binaryMaskVect() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    image_name = os.path.splitext(os.path.basename(input_image))[0]
    binary_mask = output_dir + os.sep + "bin_mask_" + image_name + "_" + str(
        threshold).replace('.', '_') + extension_raster
    binary_mask_decoup = output_dir + os.sep + "bin_mask_decoup_" + image_name + "_" + str(
        threshold).replace('.', '_') + extension_raster
    binary_mask_vector = output_dir + os.sep + "bin_mask_vect_" + image_name + "_" + str(
        threshold).replace('.', '_') + extension_vector

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Suppression des fichiers temporaires pour les calculs
    if os.path.exists(binary_mask):
        removeFile(binary_mask)

    if os.path.exists(binary_mask_decoup):
        removeFile(binary_mask_decoup)

    if os.path.exists(binary_mask_vector):
        if overwrite:
            removeVectorFile(binary_mask_vector, format_vector)
        else:
            return binary_mask_vector

    # Création du masque binaire
    createBinaryMask(input_image, binary_mask, threshold, False)

    if input_cut_vector != "":
        # Découpe du raster
        cutImageByVector(input_cut_vector, binary_mask, binary_mask_decoup,
                         None, None, no_data_value, epsg, format_raster,
                         format_vector)
    else:
        binary_mask_decoup = binary_mask

    # Vectorisation du masque binaire découpé
    polygonizeRaster(binary_mask_decoup, binary_mask_vector, image_name, "id",
                     format_vector)

    # Ajout des champs au fichier vecteur créé
    for attribute in attributes_list:
        addNewFieldVector(binary_mask_vector, attribute.name,
                          attribute.ogrType, attribute.value, attribute.width,
                          None, format_vector)

    # Suppresions des fichiers intermediaires inutiles et reperoire temporaire
    if not save_results_intermediate:
        removeFile(binary_mask)
        removeFile(binary_mask_decoup)

    return binary_mask_vector
def occupationIndicator(input_grid,
                        output_grid,
                        class_label_dico_out,
                        input_vector_classif,
                        field_classif_name,
                        input_soil_occupation,
                        input_height_model,
                        class_build_list,
                        class_road_list,
                        class_baresoil_list,
                        class_water_list,
                        class_vegetation_list,
                        class_high_vegetation_list,
                        class_low_vegetation_list,
                        epsg=2154,
                        no_data_value=0,
                        format_raster='GTiff',
                        format_vector='ESRI Shapefile',
                        extension_raster='.tif',
                        extension_vector='.shp',
                        path_time_log='',
                        save_results_intermediate=False,
                        overwrite=True):

    if debug >= 3:
        print(
            '\n' + bold + green +
            "Calcul d'indicateurs du taux de classes OCS - Variables dans la fonction :"
            + endC)
        print(cyan + "    occupationIndicator() : " + endC + "input_grid : " +
              str(input_grid) + endC)
        print(cyan + "    occupationIndicator() : " + endC + "output_grid : " +
              str(output_grid) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_label_dico_out : " + str(class_label_dico_out) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "input_vector_classif : " + str(input_vector_classif) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "field_classif_name : " + str(field_classif_name) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "input_soil_occupation : " + str(input_soil_occupation) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "input_height_model : " + str(input_height_model) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_build_list : " + str(class_build_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_road_list : " + str(class_road_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_baresoil_list : " + str(class_baresoil_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_water_list : " + str(class_water_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_vegetation_list : " + str(class_vegetation_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_high_vegetation_list : " +
              str(class_high_vegetation_list) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "class_low_vegetation_list : " + str(class_low_vegetation_list) +
              endC)
        print(cyan + "    occupationIndicator() : " + endC + "epsg : " +
              str(epsg) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "format_raster : " + str(format_raster) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "format_vector : " + str(format_vector) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "    occupationIndicator() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "    occupationIndicator() : " + endC + "overwrite : " +
              str(overwrite) + endC + '\n')

    # Définition des constantes
    CODAGE_8BITS = 'uint8'
    CODAGE_FLOAT = 'float'
    NODATA_FIELD = 'nodata'

    PREFIX_S = 'S_'
    SUFFIX_TEMP = '_temp'
    SUFFIX_RASTER = '_raster'
    SUFFIX_HEIGHT = '_height'
    SUFFIX_VEGETATION = '_vegetation'

    VEG_MEAN_FIELD = 'veg_h_mean'
    VEG_MAX_FIELD = 'veg_h_max'
    VEG_RATE_FIELD = 'veg_h_rate'
    MAJ_OCS_FIELD = 'class_OCS'

    BUILT_FIELD, BUILT_LABEL = 'built', 1
    MINERAL_FIELD, MINERAL_LABEL = 'mineral', 2
    BARESOIL_FIELD, BARESOIL_LABEL = 'baresoil', 3
    WATER_FIELD, WATER_LABEL = 'water', 4
    VEGETATION_FIELD, VEGETATION_LABEL = 'veget', 5
    HIGH_VEGETATION_FIELD, HIGH_VEGETATION_LABEL = 'high_veg', 6
    LOW_VEGETATION_FIELD, LOW_VEGETATION_LABEL = 'low_veg', 7

    # Mise à jour du log
    starting_event = "occupationIndicator() : Début du traitement : "
    timeLine(path_time_log, starting_event)

    print(cyan + "occupationIndicator() : " + bold + green +
          "DEBUT DES TRAITEMENTS" + endC + '\n')

    # Définition des variables 'basename'
    output_grid_basename = os.path.basename(os.path.splitext(output_grid)[0])
    output_grid_dirname = os.path.dirname(output_grid)
    soil_occupation_basename = os.path.basename(
        os.path.splitext(input_soil_occupation)[0])

    # Définition des variables temp
    temp_directory = output_grid_dirname + os.sep + output_grid_basename
    temp_grid = temp_directory + os.sep + output_grid_basename + SUFFIX_TEMP + extension_vector
    temp_soil_occupation = temp_directory + os.sep + soil_occupation_basename + SUFFIX_TEMP + SUFFIX_RASTER + extension_raster
    temp_height_vegetation = temp_directory + os.sep + output_grid_basename + SUFFIX_HEIGHT + SUFFIX_VEGETATION + extension_raster

    # Nettoyage des traitements précédents
    if overwrite:
        if debug >= 3:
            print(cyan + "occupationIndicator() : " + endC +
                  "Nettoyage des traitements précédents." + endC + '\n')
        removeFile(output_grid)
        cleanTempData(temp_directory)
    else:
        if os.path.exists(output_grid):
            raise NameError(
                cyan + "occupationIndicator() : " + bold + yellow +
                "Le fichier de sortie existe déjà et ne sera pas regénéré." +
                endC + '\n')
        pass

    #############
    # Etape 0/3 # Préparation des traitements
    #############

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 0/3 - Début de la préparation des traitements." + endC + '\n')

    # Rasterisation de l'information de classification (OCS) si au format vecteur en entrée
    if input_vector_classif != "":
        if debug >= 3:
            print(cyan + "occupationIndicator() : " + endC + bold +
                  "Rasterisation de l'OCS vecteur." + endC + '\n')
        reference_image = input_soil_occupation
        soil_occupation_vector_basename = os.path.basename(
            os.path.splitext(input_vector_classif)[0])
        input_soil_occupation = temp_directory + os.sep + soil_occupation_vector_basename + SUFFIX_RASTER + extension_raster
        command = "otbcli_Rasterization -in %s -out %s %s -im %s -background 0 -mode attribute -mode.attribute.field %s" % (
            input_vector_classif, input_soil_occupation, CODAGE_8BITS,
            reference_image, field_classif_name)
        if debug >= 3:
            print(command)
        exit_code = os.system(command)
        if exit_code != 0:
            raise NameError(
                cyan + "occupationIndicator() : " + bold + red +
                "Erreur lors de la rasterisation de l'OCS vecteur." + endC)

    # Analyse de la couche OCS raster
    class_other_list = identifyPixelValues(input_soil_occupation)
    no_data_ocs = getNodataValueImage(input_soil_occupation, 1)
    if no_data_ocs != None:
        no_data_value = no_data_ocs

    # Affectation de nouveaux codes de classification
    divide_vegetation_classes = False
    if class_high_vegetation_list != [] and class_low_vegetation_list != []:
        divide_vegetation_classes = True

    col_to_delete_list = [
        "minority", PREFIX_S + NODATA_FIELD, PREFIX_S + BUILT_FIELD,
        PREFIX_S + MINERAL_FIELD, PREFIX_S + BARESOIL_FIELD,
        PREFIX_S + WATER_FIELD
    ]
    class_label_dico = {
        int(no_data_value): NODATA_FIELD,
        int(BUILT_LABEL): BUILT_FIELD,
        int(MINERAL_LABEL): MINERAL_FIELD,
        int(BARESOIL_LABEL): BARESOIL_FIELD,
        int(WATER_LABEL): WATER_FIELD
    }
    if not divide_vegetation_classes:
        class_label_dico[int(VEGETATION_LABEL)] = VEGETATION_FIELD
        col_to_delete_list.append(PREFIX_S + VEGETATION_FIELD)
    else:
        class_label_dico[int(HIGH_VEGETATION_LABEL)] = HIGH_VEGETATION_FIELD
        class_label_dico[int(LOW_VEGETATION_LABEL)] = LOW_VEGETATION_FIELD
        col_to_delete_list.append(PREFIX_S + HIGH_VEGETATION_FIELD)
        col_to_delete_list.append(PREFIX_S + LOW_VEGETATION_FIELD)

    # Gestion de la réaffectation des classes
    if debug >= 3:
        print(cyan + "occupationIndicator() : " + endC + bold +
              "Reaffectation du raster OCS." + endC + '\n')

    reaff_class_list = []
    macro_reaff_class_list = []

    for label in class_build_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(BUILT_LABEL)

    for label in class_road_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(MINERAL_LABEL)

    for label in class_baresoil_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(BARESOIL_LABEL)

    for label in class_water_list:
        if label in class_other_list:
            class_other_list.remove(label)
        reaff_class_list.append(label)
        macro_reaff_class_list.append(WATER_LABEL)

    if not divide_vegetation_classes:
        for label in class_vegetation_list:
            if label in class_other_list:
                class_other_list.remove(label)
            reaff_class_list.append(label)
            macro_reaff_class_list.append(VEGETATION_LABEL)
    else:
        for label in class_high_vegetation_list:
            if label in class_other_list:
                class_other_list.remove(label)
            reaff_class_list.append(label)
            macro_reaff_class_list.append(HIGH_VEGETATION_LABEL)
        for label in class_low_vegetation_list:
            if label in class_other_list:
                class_other_list.remove(label)
            reaff_class_list.append(label)
            macro_reaff_class_list.append(LOW_VEGETATION_LABEL)

    # Reste des valeurs de pixel nom utilisé
    for label in class_other_list:
        reaff_class_list.append(label)
        macro_reaff_class_list.append(no_data_value)

    reallocateClassRaster(input_soil_occupation, temp_soil_occupation,
                          reaff_class_list, macro_reaff_class_list,
                          CODAGE_8BITS)

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 0/3 - Fin de la préparation des traitements." + endC + '\n')

    #############
    # Etape 1/3 # Calcul des indicateurs de taux de classes OCS
    #############

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 1/3 - Début du calcul des indicateurs de taux de classes OCS." +
        endC + '\n')

    if debug >= 3:
        print(cyan + "occupationIndicator() : " + endC + bold +
              "Calcul des indicateurs de taux de classes OCS." + endC + '\n')

    statisticsVectorRaster(temp_soil_occupation, input_grid, temp_grid, 1,
                           True, True, False, col_to_delete_list, [],
                           class_label_dico, path_time_log, True,
                           format_vector, save_results_intermediate, overwrite)

    # Fusion des classes végétation dans le cas où haute et basse sont séparées (pour utilisation du taux de végétation dans le logigramme)
    if divide_vegetation_classes:
        temp_grid_v2 = os.path.splitext(
            temp_grid)[0] + "_v2" + extension_vector
        sql_statement = "SELECT *, (%s + %s) AS %s FROM %s" % (
            HIGH_VEGETATION_FIELD, LOW_VEGETATION_FIELD, VEGETATION_FIELD,
            os.path.splitext(os.path.basename(temp_grid))[0])
        os.system("ogr2ogr -sql '%s' -dialect SQLITE %s %s" %
                  (sql_statement, temp_grid_v2, temp_grid))
        removeVectorFile(temp_grid, format_vector=format_vector)
        copyVectorFile(temp_grid_v2, temp_grid, format_vector=format_vector)

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 1/3 - Fin du calcul des indicateurs de taux de classes OCS." +
          endC + '\n')

    #############
    # Etape 2/3 # Calcul de l'indicateur de "hauteur de végétation"
    #############

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 2/3 - Début du calcul de l'indicateur de \"hauteur de végétation\"."
        + endC + '\n')

    computeVegetationHeight(
        temp_grid, output_grid, temp_soil_occupation, input_height_model,
        temp_height_vegetation, divide_vegetation_classes, VEGETATION_LABEL,
        HIGH_VEGETATION_LABEL, LOW_VEGETATION_LABEL, HIGH_VEGETATION_FIELD,
        LOW_VEGETATION_FIELD, VEG_MEAN_FIELD, VEG_MAX_FIELD, VEG_RATE_FIELD,
        CODAGE_FLOAT, SUFFIX_TEMP, no_data_value, format_vector, path_time_log,
        save_results_intermediate, overwrite)

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 2/3 - Fin du calcul de l'indicateur de \"hauteur de végétation\"."
        + endC + '\n')

    #############
    # Etape 3/3 # Calcul de l'indicateur de classe majoritaire
    #############

    print(
        cyan + "occupationIndicator() : " + bold + green +
        "ETAPE 3/3 - Début du calcul de l'indicateur de classe majoritaire." +
        endC + '\n')

    if input_height_model != "":
        computeMajorityClass(output_grid, temp_directory, NODATA_FIELD,
                             BUILT_FIELD, MINERAL_FIELD, BARESOIL_FIELD,
                             WATER_FIELD, VEGETATION_FIELD,
                             HIGH_VEGETATION_FIELD, LOW_VEGETATION_FIELD,
                             MAJ_OCS_FIELD, VEG_MEAN_FIELD,
                             class_label_dico_out, format_vector,
                             extension_vector, overwrite)
    else:
        print(
            cyan + "occupationIndicator() : " + bold + yellow +
            "Pas de calcul de l'indicateur de classe majoritaire demandé (pas de MNH en entrée)."
            + endC + '\n')

    print(cyan + "occupationIndicator() : " + bold + green +
          "ETAPE 3/3 - Fin du calcul de l'indicateur de classe majoritaire." +
          endC + '\n')

    ####################################################################

    # Suppression des fichiers temporaires
    if not save_results_intermediate:
        if debug >= 3:
            print(cyan + "occupationIndicator() : " + endC +
                  "Suppression des fichiers temporaires." + endC + '\n')
        deleteDir(temp_directory)

    print(cyan + "occupationIndicator() : " + bold + green +
          "FIN DES TRAITEMENTS" + endC + '\n')

    # Mise à jour du log
    ending_event = "occupationIndicator() : Fin du traitement : "
    timeLine(path_time_log, ending_event)

    return 0
Ejemplo n.º 16
0
def convertImage(image_input, huit_bits_image, image_compressed, need_8bits, need_compress, compress_type, predictor, zlevel, suppr_min, suppr_max, need_optimize8b, need_rvb, path_time_log, format_raster='GTiff', extension_raster=".tif", save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "convertImage() : conversion image starting : "
    timeLine(path_time_log,starting_event)

    # Affichage des parametres
    if debug >= 3:
        print(cyan + "convertImage() : " + endC + "image_input: ",image_input)
        print(cyan + "convertImage() : " + endC + "huit_bits_image: ",huit_bits_image)
        print(cyan + "convertImage() : " + endC + "image_compressed: ",image_compressed)
        print(cyan + "convertImage() : " + endC + "need_8bits: ",need_8bits)
        print(cyan + "convertImage() : " + endC + "need_compress: ",need_compress)
        print(cyan + "convertImage() : " + endC + "compress_type: ",compress_type)
        print(cyan + "convertImage() : " + endC + "predictor: ",predictor)
        print(cyan + "convertImage() : " + endC + "zlevel: ",zlevel)
        print(cyan + "convertImage() : " + endC + "suppr_min: ",suppr_min)
        print(cyan + "convertImage() : " + endC + "suppr_max: ",suppr_max)
        print(cyan + "convertImage() : " + endC + "need_rvb: ",need_rvb)
        print(cyan + "convertImage() : " + endC + "path_time_log: ",path_time_log)
        print(cyan + "convertImage() : " + endC + "format_raster : " + str(format_raster) + endC)
        print(cyan + "convertImage() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "convertImage() : " + endC + "save_results_intermediate: ",save_results_intermediate)
        print(cyan + "convertImage() : " + endC + "overwrite: ",overwrite)

    # Constantes
    FOLDER_TEMP = 'Tmp_'

    # Definition des dossiers de travail
    image_name = os.path.splitext(os.path.basename(image_input))[0]

    if not need_compress:
        image_compressed = huit_bits_image

    repertory_tmp = os.path.dirname(image_compressed) + os.sep + FOLDER_TEMP + image_name # repertory_tmp : Dossier dans lequel on va placer les images temporaires
    if not os.path.isdir(repertory_tmp):
        os.makedirs(repertory_tmp)

    print(cyan + "ImageCompression : " + endC + "Dossier de travail temporaire: ",repertory_tmp)

    # Impression des informations d'execution
    print(endC)
    print(bold + green + "# DEBUT DE LA CONVERSION DE L'IMAGE %s" %(image_input) + endC)
    print(endC)

    if debug >= 1:
        print(cyan + "convertImage() : " + endC + "%s pourcents des petites valeurs initiales et %s pourcents des grandes valeurs initiales seront supprimees" %(suppr_min,suppr_max))

    # VERIFICATION SI L'IMAGE DE SORTIE EXISTE DEJA
    check = os.path.isfile(image_compressed)

    # Si oui et si la vérification est activée, passe à l'étape suivante
    if check and not overwrite :
        print(cyan + "convertImage() : " + bold + yellow + "Image have already been converted." + endC)
    else:
        # Tente de supprimer le fichier
        try:
            removeFile(image_compressed)
        except Exception:
            # Ignore l'exception levée si le fichier n'existe pas (et ne peut donc pas être supprimé)
            pass

        ###########################################################
        #   Conversion du fichier en 8bits                        #
        ###########################################################
        if need_8bits:
            convertion8Bits(image_input, huit_bits_image, repertory_tmp, need_optimize8b, need_rvb, suppr_min, suppr_max, extension_raster)
            image_to_compress = huit_bits_image
        else :
            image_to_compress = image_input

        ###########################################################
        #   Compression du fichier                                #
        ###########################################################
        if need_compress:
            compressImage(image_to_compress, image_compressed, compress_type, predictor, zlevel, format_raster)

    ###########################################################
    #   nettoyage du repertoire temporaire                    #
    ###########################################################
    if not save_results_intermediate and need_compress:
        shutil.rmtree(repertory_tmp)
        if debug >= 1:
            print(bold + green + "Suppression du dossier temporaire : " + repertory_tmp + endC)

    if debug >= 1:
        print(cyan + "convertImage() : " + endC + "Fin de la conversion de %s" %(image_input))

    print(endC)
    if need_compress:
        print(bold + green + "# FIN DE LA CONVERSION DE L'IMAGE %s" %(image_input) + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "convertImage() : conversion image ending : "
    timeLine(path_time_log,ending_event)
    return
Ejemplo n.º 17
0
def classRasterSubSampling(satellite_image_input, classified_image_input, image_output, table_reallocation, sub_sampling_number, no_data_value, path_time_log, rand_otb=0, ram_otb=0, number_of_actives_pixels_threshold=8000, extension_raster=".tif", save_results_intermediate=False, overwrite=True) :

    # Mise à jour du Log
    starting_event = "classRasterSubSampling() : Micro class subsampling on classification image starting : "
    timeLine(path_time_log,starting_event)

    if debug >= 3:
       print(cyan + "classRasterSubSampling() : " + endC + "satellite_image_input : " +  str(satellite_image_input) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "classified_image_input : " +  str(classified_image_input) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "image_output : " + str(image_output) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "table_reallocation : " + str(table_reallocation) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "sub_sampling_number : " + str(sub_sampling_number) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "no_data_value : " + str(no_data_value) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "path_time_log : " + str(path_time_log) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "rand_otb : " + str(rand_otb) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "ram_otb : " + str(ram_otb) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "number_of_actives_pixels_threshold : " + str(number_of_actives_pixels_threshold) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "extension_raster : " + str(extension_raster) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
       print(cyan + "classRasterSubSampling() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Constantes
    CODAGE = "uint16"
    CODAGE_8B = "uint8"
    TEMP = "TempSubSampling_"
    MASK_SUF = "_Mask"
    SUB_SAMPLE_SUF = "_SubSampled"
    CENTROID_SUF = "_Centroids"
    TEMP_OUT = "_temp_out"
    EXTENSION_TXT = ".txt"

    # Contenu de la nouvelle table
    text_new_table = ""

    # CREATION DES NOMS DE CHEMINS UTILES
    name = os.path.splitext(os.path.basename(image_output))[0]
    input_classified_image_path = os.path.dirname(classified_image_input)                      # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/
    temp_sub_sampling_path = input_classified_image_path + os.sep + TEMP + name + os.sep       # Dossier contenant les fichiers temporaires de cette brique. Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/
    input_classified_image_complete_name = os.path.basename(classified_image_input)            # Ex : Paysage_01_raw.tif
    input_classified_image_name = os.path.splitext(input_classified_image_complete_name)[0]    # Ex : Paysage_01_raw
    input_classified_image_extend = os.path.splitext(input_classified_image_complete_name)[1]  # Ex : .tif
    image_output_temp = os.path.splitext(image_output)[0] + TEMP_OUT + extension_raster        # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_temp.tif

    # Création de temp_sub_sampling_path s'il n'existe pas
    if not os.path.isdir(os.path.dirname(temp_sub_sampling_path)) :
        os.makedirs(os.path.dirname(temp_sub_sampling_path))

    print(cyan + "classRasterSubSampling() : " + bold + green + "START ...\n" + endC)

    # Lecture du fichier table de proposition
    supp_class_list, reaff_class_list, macro_reaff_class_list, sub_sampling_class_list, sub_sampling_number_list = readReallocationTable(table_reallocation, sub_sampling_number)      # Fonction de Lib_text
    info_table_list = readTextFileBySeparator(table_reallocation, "\n")

    # Recherche de la liste des micro classes contenu dans le fichier de classification d'entrée
    class_values_list = identifyPixelValues(classified_image_input)

    # Supression dans la table des lignes correspondant aux actions "-2"
    for ligne_table in info_table_list:
        if not "-2" in ligne_table[0]:
            text_new_table += str(ligne_table[0]) + "\n"

    if debug >= 3:
        print("supp_class_list : " + str(supp_class_list))
        print("reaff_class_list : " + str(reaff_class_list))
        print("macro_reaff_class_list : " + str(macro_reaff_class_list))
        print("sub_sampling_class_list : " + str(sub_sampling_class_list))
        print("sub_sampling_number_list : " + str(sub_sampling_number_list))

    # Dans cettre brique, on ne s'intéresse qu'à la partie sous echantillonage
    # Gestion du cas de suppression
    if len(supp_class_list) > 0:
        print(cyan + "classRasterSubSampling() : " + bold + yellow + "ATTENTION : Les classes ne sont pas supprimees pour le fichier classification format raster." + '\n' + endC)

    # Gestion du cas de réaffectation
    if len(reaff_class_list) > 0:
         print(cyan + "classRasterSubSampling() : " + bold + yellow + "ATTENTION : la brique SpecificSubSampling ne traite pas les reaffectation. A l'issue de cette brique, verifier la table de reallocation et executer la brique de reallocation." + '\n' + endC)

    if len(sub_sampling_class_list) > 0 :

        if debug >= 3:
           print(cyan + "classRasterSubSampling() : " + bold + green + "DEBUT DU SOUS ECHANTILLONAGE DES CLASSES %s " %(sub_sampling_class_list) + endC)

        # Parcours des classes à sous échantilloner
        processing_pass_first = False
        for idx_class in range(len(sub_sampling_class_list)) :

            # INITIALISATION DU TRAITEMENT DE LA CLASSE

            # Classe à sous échantilloner. Ex : 21008
            class_to_sub_sample = sub_sampling_class_list[idx_class]
            if idx_class == 0 or not processing_pass_first :
                # Image à reclassifier : classified_image_input au premier tour
                image_to_sub_sample = classified_image_input
            else :
                # Image à reclassifier : la sortie de la boucle précédente ensuite
                image_to_sub_sample = image_output

            # determiner le label disponible de la classe
            base_subclass_label = int(class_to_sub_sample/100)*100
            subclass_label = base_subclass_label
            for class_value in class_values_list:
                if (class_value > subclass_label) and (class_value < base_subclass_label + 100) :
                    subclass_label = class_value
            subclass_label += 1
            # subclass_label = int(class_to_sub_sample/100)*100 + 20 + class_to_sub_sample%20 * 5
            # Label de départ des sous classes. Formule proposée : 3 premiers chiffres de class_to_sub_sample puis ajout de 20 + 5 * class_to_sub_sample modulo 20. Ex : 21000 -> 21020, 21001-> 21025, 21002-> 21030 etc...
            # Part du principe qu'il y a moins de 20 micro classes et que chacune est sous échantillonnée au maximum en 5 sous parties. Si ce n'est pas le cas : A ADAPTER

            number_of_sub_samples = sub_sampling_number_list[idx_class]    # Nombre de sous classes demandées pour le sous échantillonage de class_to_sub_sample. Ex : 4
            class_mask_raster = temp_sub_sampling_path + input_classified_image_name + "_" + str(class_to_sub_sample) + MASK_SUF + input_classified_image_extend    # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_21008_Mask.tif
            class_subsampled_raster = temp_sub_sampling_path + input_classified_image_name + "_" + str(class_to_sub_sample) + SUB_SAMPLE_SUF + input_classified_image_extend  # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_21008_SubSampled.tif
            centroid_file = temp_sub_sampling_path + input_classified_image_name + "_" + str(class_to_sub_sample) + CENTROID_SUF + EXTENSION_TXT  # Ex : D2_Par_Zone/Paysage_01/Corr_2/Resultats/Temp/Temp_Sub_Sampling/Paysage_01_raw_21008_Centroid.txt

            if debug >= 5:
                print(cyan + "classRasterSubSampling() : " + endC + "class_to_sub_sample :" , class_to_sub_sample)
                print(cyan + "classRasterSubSampling() : " + endC + "subclass_label :" , subclass_label)
                print(cyan + "classRasterSubSampling() : " + endC + "number_of_sub_samples :" , number_of_sub_samples)
                print(cyan + "classRasterSubSampling() : " + endC + "class_mask_raster :" , class_mask_raster)
                print(cyan + "classRasterSubSampling() : " + endC + "class_subsampled_raster :" , class_subsampled_raster)
                print(cyan + "classRasterSubSampling() : " + endC + "centroid_file :" , centroid_file)

            if debug >= 3:
                print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s : SOUS ECHANTILLONAGE DE %s EN %s CLASSES " %(idx_class+1, len(sub_sampling_class_list), class_to_sub_sample, number_of_sub_samples) + endC)

            # ETAPE 1/5 : EXTRACTION DU MASQUE BINAIRE DES PIXELS CORRESPONDANT A LA CLASSE
            expression_masque = "\"im1b1 == %s? 1 : 0\"" %(class_to_sub_sample)
            command = "otbcli_BandMath -il %s -out %s %s -exp %s" %(classified_image_input, class_mask_raster, CODAGE_8B, expression_masque)

            if debug >=2:
                print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 1/5 : Debut de l extraction du masque binaire de la classe %s" %(idx_class+1, len(sub_sampling_class_list),class_to_sub_sample) + endC)
                print(command)

            os.system(command)

            if debug >=2:
                print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 1/5 : Fin de l extraction du masque binaire de la classe %s, disponible ici : %s" %(idx_class+1, len(sub_sampling_class_list),class_to_sub_sample, class_mask_raster) + endC)

            # TEST POUR SAVOIR SI ON EST EN CAPACITE D'EFFECTUER LE KMEANS
            number_of_actives_pixels = countPixelsOfValue(class_mask_raster, 1)  # Comptage du nombre de pixels disponibles pour effectuer le kmeans
            if number_of_actives_pixels > (number_of_sub_samples * number_of_actives_pixels_threshold) :    # Cas où il y a plus de pixels disponibles pour effectuer le kmeans que le seuil

                # ETAPE 2/5 : CLASSIFICATION NON SUPERVISEE DES PIXELS CORRESPONDANT A LA CLASSE
                if debug >= 3:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 2/5 : Il y a assez de pixels pour faire le sous echantillonage :  %s sur %s requis au minimum " %(idx_class+1, len(sub_sampling_class_list), number_of_actives_pixels, int(number_of_sub_samples) * number_of_actives_pixels_threshold) + endC)
                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 2/5 : Debut du sous echantillonage par classification non supervisee en %s classes " %(idx_class+1, len(sub_sampling_class_list), number_of_sub_samples) + endC)

                # appel du kmeans
                input_mask_list = []
                input_mask_list.append(class_mask_raster)
                output_masked_image_list = []
                output_masked_image_list.append(class_subsampled_raster)
                output_centroids_files_list = []
                output_centroids_files_list.append(centroid_file)
                macroclass_sampling_list = []
                macroclass_sampling_list.append(number_of_sub_samples)
                macroclass_labels_list = []
                macroclass_labels_list.append(subclass_label)
                applyKmeansMasks(satellite_image_input, input_mask_list, "", "", output_masked_image_list, output_centroids_files_list, macroclass_sampling_list, macroclass_labels_list, no_data_value, path_time_log, 200, 1, -1, 0.0, rand_otb, ram_otb, number_of_actives_pixels_threshold, extension_raster, save_results_intermediate, overwrite)

                if debug >=2:
                    print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 2/5 : Fin du sous echantillonage par classification non supervisee en %s classes, disponible ici %s : " %(idx_class+1, len(sub_sampling_class_list), number_of_sub_samples, class_subsampled_raster) + endC)

                # ETAPE 3/5 : INTEGRATION DES NOUVELLES SOUS CLASSES DANS LA TABLE DE REALLOCATION
                # Ouveture du fichier table de proposition pour re-ecriture

                for i in range(number_of_sub_samples):
                    class_values_list.append(subclass_label + i)
                    text_new_table += str(subclass_label + i) + ";" + str(subclass_label + i) + "; METTRE A JOUR MANUELLEMENT (origine : " +  str(class_to_sub_sample) + ")" + "\n"

                # ETAPE 4/5 : APPLICATION DU SOUS ECHANTILLONAGE AU RESULTAT DE CLASSIFICATION
                expression_application_sous_echantillonage = "\"im1b1 == %s? im2b1 : im1b1\"" %(class_to_sub_sample)
                command = "otbcli_BandMath -il %s %s -out %s %s -exp %s" %(image_to_sub_sample, class_subsampled_raster, image_output_temp, CODAGE, expression_application_sous_echantillonage)

                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 4/5 : Debut de l application du sous echantillonage present dans %s sur %s" %(idx_class+1, len(sub_sampling_class_list), class_subsampled_raster, classified_image_input) + endC)
                    print(command)

                os.system(command)

                if debug >=2:
                    print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 4/5 : Fin de l application du sous echantillonage present dans %s sur %s, sortie disponible ici : %s" %(idx_class+1, len(sub_sampling_class_list), class_subsampled_raster, classified_image_input, image_output_temp) + endC)

                # ETAPE 5/5 : GESTION DES RENOMMAGES ET SUPPRESSIONS
                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 5/5 : Debut du renommage et suppression des dossiers intermediaires" %(idx_class+1, len(sub_sampling_class_list)) + endC)

                if debug >=3 :
                    print("\n" + green + "classified image input: %s" %(classified_image_input) + endC)
                    print("\n" + green + "image to sub sample: %s" %(image_to_sub_sample) + endC)
                    print("\n" + green + "image temp : %s" %(image_output_temp) + endC)
                    print("\n" + green + "image output : %s" %(image_output) + endC)

                # Si l'image d'entrée et l'image de sorte sont le même fichier on efface le fichier d'entrée pour le re-creer avec le fichier re-travaillé
                if image_output == classified_image_input and os.path.isfile(classified_image_input) :
                    removeFile(classified_image_input)
                os.rename(image_output_temp,image_output)
                processing_pass_first = True

                # SUPPRESSION DES FICHIERS TEMPORAIRES
                if not save_results_intermediate :
                    if os.path.isfile(class_mask_raster) :
                        removeFile(class_mask_raster)
                    if os.path.isfile(class_subsampled_raster) :
                        removeFile(class_subsampled_raster)
                    if os.path.isfile(centroid_file) :
                        removeFile(centroid_file)

                if debug >=2:
                    print(cyan + "classRasterSubSampling() : " + bold + green + "CLASSE %s/%s - ETAPE 5/5 : Fin du renommage et suppression des dossiers intermediaires" %(idx_class+1, len(sub_sampling_class_list)) + endC)

            else:  # Cas où il n'y a pas assez de pixels pour effectuer le kmeans

                if debug >=2:
                    print("\n" + cyan + "classRasterSubSampling() : " + bold + yellow + "CLASSE %s/%s - ETAPE 2/5 : Nombre insuffisant de pixels disponibles pour appliquer le kmeans : %s sur %s requis au minimum " %(idx_class+1, len(sub_sampling_class_list), number_of_actives_pixels, int(number_of_sub_samples) * number_of_actives_pixels_threshold) + endC)
                    print(cyan + "classRasterSubSampling() : " + bold + yellow + "CLASSE %s/%s - ETAPE 2/5 : SOUS ECHANTILLONAGE NON APPLIQUE A LA CLASSE %s" %(idx_class+1, len(sub_sampling_class_list), class_to_sub_sample) + endC + "\n")

                # MISE A JOUR DU FICHIER image_to_sub_sample
                if idx_class == 0:
                    processing_pass_first = False

                # MISE A JOUR DE LA TABLE DE REALLOCATION
                text_new_table += str(class_to_sub_sample) + ";" + str(class_to_sub_sample) + ";CLASSE TROP PETITE POUR SOUS ECHANTILLONAGE" + "\n"

                # SUPPRESSION DU MASQUE
                if not save_results_intermediate and os.path.isfile(class_mask_raster) :
                    removeFile(class_mask_raster)

    else:
        shutil.copy2(classified_image_input, image_output) # Copie du raster d'entree si pas de sous-echantillonnage

    # Ecriture de la nouvelle table dans le fichier
    writeTextFile(table_reallocation, text_new_table)

    # SUPPRESSION DU DOSSIER ET DES FICHIERS TEMPORAIRES
    if not save_results_intermediate and os.path.isdir(os.path.dirname(temp_sub_sampling_path)) :
        shutil.rmtree(os.path.dirname(temp_sub_sampling_path))

    print(cyan + "classRasterSubSampling() : " + bold + green + "END\n" + endC)

    # Mise à jour du Log
    ending_event = "classRasterSubSampling() : Micro class subsampling on classification image ending : "
    timeLine(path_time_log,ending_event)
    return
def assembleImagePansharpening(image_panchro_input,
                               image_xs_input,
                               image_output,
                               mode_interpolation,
                               method_interpolation,
                               method_pansharpening,
                               interpolation_bco_radius,
                               pansharpening_lmvm_xradius,
                               pansharpening_lmvm_yradius,
                               pansharpening_bayes_lambda,
                               pansharpening_bayes_s,
                               path_time_log,
                               ram_otb=0,
                               format_raster='GTiff',
                               extension_raster=".tif",
                               save_results_intermediate=False,
                               overwrite=True):

    # Mise à jour du Log
    starting_event = "assembleImagePansharpening() : Pansharpening assembly starting "
    timeLine(path_time_log, starting_event)

    if debug >= 2:
        print(bold + green +
              "assembleImagePansharpening() : Variables dans la fonction" +
              endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "image_panchro_input : " + str(image_panchro_input) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "image_xs_input : " + str(image_xs_input) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "image_output : " + str(image_output) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "mode_interpolation : " + str(mode_interpolation) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "method_interpolation : " + str(method_interpolation) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "method_pansharpening : " + str(method_pansharpening) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "interpolation_bco_radius : " + str(interpolation_bco_radius) +
              endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "pansharpening_lmvm_xradius : " +
              str(pansharpening_lmvm_xradius) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "pansharpening_lmvm_yradius : " +
              str(pansharpening_lmvm_yradius) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "pansharpening_bayes_lambda : " +
              str(pansharpening_bayes_lambda) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "pansharpening_bayes_s : " + str(pansharpening_bayes_s) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC + "ram_otb : " +
              str(ram_otb) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "format_raster : " + str(format_raster) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "assembleImagePansharpening() : " + endC +
              "overwrite : " + str(overwrite) + endC)

    # Les constantes
    EXT_GEOM = '.geom'
    XS_RESAMPLE_SUFFIX = "_resample"
    CODAGE = "uint16"

    print(endC)
    print(cyan + "assembleImagePansharpening() : " + bold + green +
          "## START : PANSHARPENING IMAGE" + endC)
    print(endC)

    # ETAPE 0 : PREPARATION

    # Preparation des fichiers intermediaires
    repertory_output = os.path.dirname(image_output)
    image_xs_resample_tmp = repertory_output + os.sep + os.path.splitext(
        os.path.basename(
            image_xs_input))[0] + XS_RESAMPLE_SUFFIX + extension_raster

    # Vérification de l'existence de l'image de sortie
    check = os.path.isfile(image_output)

    # Si oui et si la vérification est activée, passage à l'étape suivante
    if check and not overwrite:
        print(cyan + "assembleImagePansharpening() : " + bold + green +
              "Image de sortie existe déja : " + image_output + endC)
    # Si non ou si la vérification est désactivée, assemblage
    else:
        # Tentative de suppresion du fichier
        try:
            removeFile(image_output)
        except Exception:
            # Ignore l'exception levée si le fichier n'existe pas (et ne peut donc pas être supprimé)
            pass

        # ETAPE 1 : REECHANTILLONAGE DU FICHIER XS A LA RESOLUTION DU PANCHRO

        # Commande de mise en place de la geométrie re-echantionage
        command = "otbcli_Superimpose -inr " + image_panchro_input + " -inm " + image_xs_input + " -mode " + mode_interpolation + " -interpolator " + method_interpolation + " -out " + image_xs_resample_tmp

        if method_interpolation.lower() == 'bco':
            command += " -interpolator.bco.radius " + str(
                interpolation_bco_radius)

        if ram_otb > 0:
            command += " -ram %d" % (ram_otb)

        if debug >= 4:
            print(cyan + "assembleImagePansharpening() : " + bold + green +
                  "SUPERIMPOSE DU FICHIER %s" % (image_xs_input) + endC)
            print(command)

        exit_code = os.system(command)
        if exit_code != 0:
            raise NameError(
                cyan + "assembleImagePansharpening() : " + bold + red +
                "!!! Une erreur c'est produite au cours du superimpose de l'image : "
                + image_xs_input + ". Voir message d'erreur." + endC)

        # ETAPE 2 : ASSEMBLE DES 2 FICHIERS PANCHRO + XS POUR CREER LE FICHIER DE SORTIE

        # Commande de d'assemnlage pansharpening
        command = "otbcli_Pansharpening -inp " + image_panchro_input + " -inxs " + image_xs_resample_tmp + " -method " + method_pansharpening + " -out " + image_output + " " + CODAGE

        if method_pansharpening.lower() == 'lmvm':
            command += " -method.lmvm.radiusx " + str(
                pansharpening_lmvm_xradius) + " -method.lmvm.radiusy " + str(
                    pansharpening_lmvm_yradius)

        if method_pansharpening.lower() == 'bayes':
            command += " -method.bayes.lambda " + str(
                pansharpening_bayes_lambda) + " -method.bayes.s " + str(
                    pansharpening_bayes_s)

        if ram_otb > 0:
            command += " -ram %d" % (ram_otb)

        if debug >= 4:
            print(cyan + "assembleImagePansharpening() : " + bold + green +
                  "PANSHARPENING DES FICHIERS %s ET %s" %
                  (image_panchro_input, image_xs_resample_tmp) + endC)
            print(command)

        exit_code = os.system(command)
        if exit_code != 0:
            raise NameError(
                cyan + "assembleImagePansharpening() : " + bold + red +
                "!!! Une erreur c'est produite au cours du pansharpening l'image : "
                + image_output + ". Voir message d'erreur." + endC)

# ETAPE 3 : SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES

# Suppression des données intermédiaires
    if not save_results_intermediate:

        if os.path.isfile(image_xs_resample_tmp):
            removeFile(image_xs_resample_tmp)
        if os.path.isfile(
                os.path.splitext(image_xs_resample_tmp)[0] +
                EXT_GEOM):  # suppression du fichier de géométrie associé
            removeFile(os.path.splitext(image_xs_resample_tmp)[0] + EXT_GEOM)

    print(endC)
    print(cyan + "assembleImagePansharpening() : " + bold + green +
          "## END : PANSHARPENING IMAGE" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "assembleImagePansharpening() : Pansharpening assembly ending "
    timeLine(path_time_log, ending_event)

    return
Ejemplo n.º 19
0
def rasterAssembly(input_images_list,
                   output_image,
                   radius,
                   value_to_force,
                   boundaries_shape,
                   no_data_value,
                   path_time_log,
                   format_raster='GTiff',
                   format_vector='ESRI Shapefile',
                   extension_raster=".tif",
                   save_results_inter=False,
                   overwrite=True):

    # Mise à jour du Log
    starting_event = "rasterAssembly() : Assemblage d'images raster starting "
    timeLine(path_time_log, starting_event)

    if debug >= 3:
        print(cyan + "rasterAssembly() : " + endC + "input_images_list : " +
              str(input_images_list))
        print(cyan + "rasterAssembly() : " + endC + "output_image : " +
              str(output_image))
        print(cyan + "rasterAssembly() : " + endC + "radius : " + str(radius))
        print(cyan + "rasterAssembly() : " + endC + "value_to_force : " +
              str(value_to_force))
        print(cyan + "rasterAssembly() : " + endC + "boundaries_shape : " +
              str(boundaries_shape))
        print(cyan + "rasterAssembly() : " + endC + "no_data_value : " +
              str(no_data_value) + endC)
        print(cyan + "rasterAssembly() : " + endC + "path_time_log : " +
              str(path_time_log))
        print(cyan + "rasterAssembly() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "rasterAssembly() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "rasterAssembly() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "rasterAssembly() : " + endC + "save_results_inter : " +
              str(save_results_inter))
        print(cyan + "rasterAssembly() : " + endC + "overwrite : " +
              str(overwrite))

    print(cyan + "rasterAssembly() : " + bold + green + "START ...\n" + endC)

    # Gestion des noms de variables
    CODAGE = "uint16"

    images_input_list_str = " "
    for input_image in input_images_list:
        images_input_list_str += input_image + " "

    output_dir = os.path.dirname(output_image)
    output_name = os.path.splitext(os.path.basename(output_image))[0]

    temp_directory = output_dir + os.sep + "Temp"
    vrt_image = temp_directory + os.sep + output_name + '_vrt' + extension_raster
    assembled_image = temp_directory + os.sep + output_name + '_assembled' + extension_raster
    assembled_smoothed_image = temp_directory + os.sep + output_name + '_assembled_smoothed' + extension_raster
    assembled_cleaned_with_smooth_image = temp_directory + os.sep + output_name + '_assembled_cleaned_with_smooth' + extension_raster
    assembled_cleaned_with_smooth_and_value_to_force_image = temp_directory + os.sep + output_name + '_assembled_cleaned_with_smooth_and_value_to_force' + extension_raster
    assembled_cleaned_cutted_image = temp_directory + os.sep + output_name + '_assembled_cleaned_cutted' + extension_raster

    # ETAPE 1/5 : CREATION DU RASTER VIRTUEL

    if not os.path.exists(temp_directory):
        os.makedirs(temp_directory)

    command = "gdalbuildvrt -srcnodata %s %s %s" % (
        str(no_data_value), vrt_image, images_input_list_str)
    if debug >= 2:
        print(
            '\n' + cyan + "rasterAssembly() : " + bold + green +
            "ETAPE 1/5 : DEBUT DE LA CREATION DU RASTER VIRTUEL. Sortie : %s - Entrees : %s"
            % (vrt_image, images_input_list_str) + endC)
        print(command)

    exitCode = os.system(command)

    if exitCode != 0:
        raise NameError(
            cyan + "rasterAssembly() : " + bold + red +
            "An error occured during the virtual raster construction. See error message above."
        )
    else:
        if debug >= 2:
            print('\n' + cyan + "rasterAssembly() : " + bold + green +
                  "ETAPE 1/5 : FIN DE LA CREATION DU RASTER VIRTUEL" + endC)

    # ETAPE 2/5 : DEBUT DE LA CONVERSION DU RASTER VIRTUEL EN .tif
    command = "gdal_translate -a_nodata %s -of %s %s %s" % (
        str(no_data_value), format_raster, vrt_image, assembled_image)
    if debug >= 2:
        print(
            '\n' + cyan + "rasterAssembly() : " + bold + green +
            "ETAPE 2/5 : DEBUT DE LA CONVERSION DU RASTER VIRTUEL EN .tif. Entree : %s - Sortie : %s"
            % (vrt_image, assembled_image) + endC)
        print(command)

    exitCode = os.system(command)

    if exitCode != 0:
        raise NameError(
            cyan + "rasterAssembly() : " + bold + red +
            "An error occured during the virtual raster conversion. See error message above."
        )
    else:
        if debug >= 2:
            print(
                '\n' + cyan + "rasterAssembly() : " + bold + green +
                "ETAPE 2/5 : FIN DE LA CONVERSION DU RASTER VIRTUEL EN .tif. "
                + endC)

    # ETAPE 3/5 : SUPPRESSION EVENTUELLE DU RASTER VIRTUEL
    if not save_results_inter:
        if debug >= 2:
            print('\n' + cyan + "rasterAssembly() : " + bold + green +
                  "ETAPE 3/5 : DEBUT DE LA SUPRESSION DU RASTER VIRTUEL %s" %
                  (vrt_image) + endC)
        try:
            removeFile(vrt_image)  # Tentative de suppression du fichier
        except Exception:
            pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite
        if debug >= 2:
            print(cyan + "rasterAssembly() : " + bold + green +
                  "ETAPE 3/5 : FIN DE LA SUPRESSION DU RASTER VIRTUEL" + endC)
    else:
        if debug >= 2:
            print(cyan + "rasterAssembly() : " + bold + yellow +
                  "ETAPE 3/5 : PAS DE SUPPRESSION DU RASTER VIRTUEL %s" %
                  (vrt_image) + endC)

    # ETAPE 4/5 et 5/5 : SI DEMANDE : CALCUL ET APPLICATION DU FILTRE MAJORITAIRE
    if radius > 0:

        # ETAPE 4/5 : Calcul du fichier filtre majoritaire
        command = "otbcli_ClassificationMapRegularization -io.in %s -io.out %s %s -ip.radius %d" % (
            assembled_image, assembled_smoothed_image, CODAGE, radius)
        if debug >= 2:
            print(
                '\n' + cyan + "rasterAssembly() : " + bold + green +
                "ETAPE 4/5 : DEBUT DU CALCUL DU FILTRE MAJORITAIRE DE RAYON %s. Entree : %s - Sortie : %s"
                % (radius, assembled_image, assembled_smoothed_image) + endC)
            print(command)

        exitCode = os.system(command)

        if exitCode != 0:
            raise NameError(
                cyan + "rasterAssembly() : " + bold + red +
                "An error occured during otbcli_ClassificationMapRegularization command. See error message above."
            )
        else:
            if debug >= 2:
                print(cyan + "rasterAssembly() : " + bold + green +
                      "ETAPE 4/5 : FIN DU CALCUL DU FILTRE MAJORITAIRE" + endC)

        # ETAPE 5/5 : Utilisation du fichier raster filtre majoritaire pour boucher les trous eventuels entre les images assemblées
        expression = "\"(im1b1 == 0? im2b1 : im1b1)\""
        command = "otbcli_BandMath -il %s %s -exp %s -out %s %s" % (
            assembled_image, assembled_smoothed_image, expression,
            assembled_cleaned_with_smooth_image, CODAGE)
        if debug >= 2:
            print(
                '\n' + cyan + "rasterAssembly() : " + bold + green +
                "ETAPE 5/5 : DEBUT DE L'APPLICATION DU FILTRE MAJORITAIRE. Sortie : %s - Entree : %s et %s"
                % (assembled_cleaned_with_smooth_image, assembled_image,
                   assembled_smoothed_image) + endC)
            print(command)

        exitCode = os.system(command)

        if exitCode != 0:
            raise NameError(
                cyan + "rasterAssembly() : " + bold + red +
                "An error occured during otbcli_BandMath command. See error message above."
            )
        else:
            if debug >= 2:
                print(
                    cyan + "rasterAssembly() : " + bold + green +
                    "ETAPE 5/5 : FIN DE L'APPLICATION DU FILTRE MAJORITAIRE" +
                    endC)

        # Supression éventuelle du filtre majoritaire et de l'image non nettoyée
        if not save_results_inter:
            if debug >= 2:
                print(
                    cyan + "rasterAssembly() : " + bold + green +
                    "SUPRESSION DU FILTRE MAJORITAIRE %s ET DE L'IMAGE NON NETTOYEE %s"
                    % (assembled_smoothed_image, assembled_image) + endC)
            try:
                removeFile(assembled_smoothed_image
                           )  # Tentative de suppression du fichier
            except Exception:
                pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

            try:
                removeFile(
                    assembled_image)  # Tentative de suppression du fichier
            except Exception:
                pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

    else:  # Cas où le filtre majoritaire n'est pas demandé
        assembled_cleaned_with_smooth_image = assembled_image
        if debug >= 2:
            print('\n' + cyan + "rasterAssembly() : " + bold + yellow +
                  "ETAPE 4/5 : PAS DE CALCUL DU FILTRE MAJORITAIRE DEMANDE" +
                  endC)
            print(
                '\n' + cyan + "rasterAssembly() : " + bold + yellow +
                "ETAPE 5/5 : PAS D'APPLICATION DU FILTRE MAJORITAIRE DEMANDE" +
                endC)

    # ETAPE 6/5 : SI DEMANDE : ON IMPOSE UNE VALEUR "value_to_force" POUR LES 0 RESTANTS
    if value_to_force > 0:
        expression = "\"(im1b1 == 0? %d : im1b1)\"" % (value_to_force)
        command = "otbcli_BandMath -il %s -exp %s -out %s %s" % (
            assembled_cleaned_with_smooth_image, expression,
            assembled_cleaned_with_smooth_and_value_to_force_image, CODAGE)
        if debug >= 2:
            print(
                '\n' + cyan + "rasterAssembly() : " + bold + green +
                "ETAPE 6/5 : DEBUT DU NETTOYAGE DES ZEROS RESTANTS, TRANSFORMES EN %s. SORTIE : %s"
                % (value_to_force,
                   assembled_cleaned_with_smooth_and_value_to_force_image) +
                endC)
            print(command)

        exitCode = os.system(command)

        if exitCode != 0:
            raise NameError(
                cyan + "rasterAssembly() : " + bold + red +
                "An error occured during the otbcli_BandMath command. See error message above."
            )
        else:
            if debug >= 2:
                print(cyan + "rasterAssembly() : " + bold + green +
                      "ETAPE 6/5 : FIN DU NETTOYAGE DES ZEROS RESTANTS" + endC)

        # Supression éventuelle de l'image non nettoyée
        if not save_results_inter:
            if debug >= 2:
                print(cyan + "rasterAssembly() : " + bold + green +
                      "SUPRESSION DE L'IMAGE NON NETTOYEE %s" %
                      (assembled_cleaned_with_smooth_image) + endC)
            try:
                removeFile(assembled_cleaned_with_smooth_image
                           )  # Tentative de suppression du fichier
            except Exception:
                pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

    else:  # Cas où le forcage d'une valeur n'est pas demandé
        assembled_cleaned_with_smooth_and_value_to_force_image = assembled_cleaned_with_smooth_image
        if debug >= 2:
            print('\n' + cyan + "rasterAssembly() : " + bold + yellow +
                  "ETAPE 6/5 : AUCUNE VALEUR IMPOSEE POUR LES 0" + endC)

    # ETAPE 7/5 : SI DEMANDE : DECOUPAGE AU REGARD DE L'EMPRISE
    if boundaries_shape != "":

        if debug >= 2:
            print(
                '\n' + cyan + "rasterAssembly() : " + bold + green +
                "ETAPE 7/5 : DEBUT DU DECOUPAGE AU REGARD DE L EMPRISE GLOBALE %s"
                % (boundaries_shape) + endC)

        if not cutImageByVector(
                boundaries_shape,
                assembled_cleaned_with_smooth_and_value_to_force_image,
                output_image, None, None, no_data_value, 0, format_raster,
                format_vector):
            raise NameError(
                cyan + "rasterAssembly() : " + bold + red +
                "An error occured during the cutting. See error message above."
            )
        else:
            if debug >= 2:
                print(
                    cyan + "rasterAssembly() : " + bold + green +
                    "ETAPE 7/5 : FIN DU DECOUPAGE AU REGARD DE L'EMPRISE GLOBALE"
                    + endC)

        # Supression éventuelle de l'image non découpée
        if not save_results_inter:
            if debug >= 2:
                print(
                    cyan + "rasterAssembly() : " + bold + green +
                    "SUPRESSION DE L'IMAGE NON DECOUPEE %s" %
                    (assembled_cleaned_with_smooth_and_value_to_force_image) +
                    endC)
            try:
                removeFile(
                    assembled_cleaned_with_smooth_and_value_to_force_image
                )  # Tentative de suppression du fichier
            except Exception:
                pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

    else:
        os.rename(
            assembled_cleaned_with_smooth_and_value_to_force_image,
            output_image
        )  # Dans ce cas, l'image finale est assembled_cleaned_with_smooth_and_value_to_force_image, qu'il faut renommer
        if debug >= 2:
            print('\n' + cyan + "rasterAssembly() : " + bold + yellow +
                  "ETAPE 7/5 : AUCUN DECOUPAGE FINAL DEMANDE" + endC)

    # SUPPRESSION DU DOSSIER TEMP
    if not save_results_inter and os.path.exists(temp_directory):
        deleteDir(temp_directory)

    if debug >= 2:
        print(cyan + "rasterAssembly() : " + bold + green +
              "FIN DE L'ASSEMBLAGE DES RASTERS. Sortie : %s" % (output_image) +
              endC)

    # Mise à jour du Log
    ending_event = "rasterAssembly() : Realocation micro class on classification image ending : "
    timeLine(path_time_log, ending_event)
    return
Ejemplo n.º 20
0
def createTableModifier(centroids_input_files_list, indicators_input_file, matrix_input_file, validation_input_vector, label_macro_list, table_output_file, path_time_log, rate_area_min, threshold_delete_perf, threshold_alert_perf, format_vector='ESRI Shapefile', save_results_intermediate=False, overwrite=True) :
    # Mise à jour du Log
    starting_event = "createTableModifier() : Create table modifier starting : "
    timeLine(path_time_log,starting_event)

    print(cyan + "createTableModifier() : " + bold + green + "START ...\n" + endC)

    # Affichage des parametres
    if debug >= 3:
        print(cyan + "createTableModifier() : " + endC + "centroids_input_files_list: "+ str(centroids_input_files_list))
        print(cyan + "createTableModifier() : " + endC + "indicators_input_file: "+ str(indicators_input_file))
        print(cyan + "createTableModifier() : " + endC + "matrix_input_file: "+ str(matrix_input_file))
        print(cyan + "createTableModifier() : " + endC + "validation_input_vector: "+ str(validation_input_vector))
        print(cyan + "createTableModifier() : " + endC + "label_macro_list: "+ str(label_macro_list))
        print(cyan + "createTableModifier() : " + endC + "table_output_file: "+ str(table_output_file))
        print(cyan + "createTableModifier() : " + endC + "path_time_log: " + str(path_time_log))
        print(cyan + "createTableModifier() : " + endC + "rate_area_min: " + str(rate_area_min))
        print(cyan + "createTableModifier() : " + endC + "threshold_delete_perf: " + str(threshold_delete_perf))
        print(cyan + "createTableModifier() : " + endC + "threshold_alert_perf: " + str(threshold_alert_perf))
        print(cyan + "createTableModifier() : " + endC + "format_vector: " + str(format_vector))
        print(cyan + "createTableModifier() : " + endC + "save_results_intermediate: "+ str(save_results_intermediate))
        print(cyan + "createTableModifier() : " + endC + "overwrite: "+ str(overwrite))

    # Test si ecrassement de la table précédemment créée
    check = os.path.isfile(table_output_file)
    if check and not overwrite :
        print(cyan + "createTableModifier() : " + bold + yellow + "Modifier table already exists." + '\n' + endC)
    else:
        # Tenter de supprimer le fichier
        try:
            removeFile(table_output_file)
        except Exception:
            # Ignore l'exception levee si le fichier n'existe pas (et ne peut donc pas être supprime)
            pass

        # lecture des fichiers centroides
        microclass_centroides_list = readCentroidsFiles(centroids_input_files_list)

        # lecture du fichier indicateurs de qualité
        indicator_macro_dico,indicator_general_dico = readQualityIndicatorsFile(indicators_input_file)

        # lecture de la matrice de confusion
        matrix,class_ref_list,class_pro_list = readConfusionMatrix(matrix_input_file)

        # correction de la matrice de confision pour identifier les micro classes manquantes
        missed_micro_list = []
        if class_ref_list != class_pro_list:
            matrix, missed_micro_list = correctMatrix(class_ref_list, class_pro_list, matrix)

        # creer la liste de microclasse en de 'int'
        class_labels_list =[]
        for class_elem in class_pro_list:
            class_labels_list.append(int(class_elem))

        if debug >= 3:
           print(cyan + "createTableModifier() : " + endC + "indicator_macro_dico : " +  str(indicator_macro_dico) + "\n")
           print(cyan + "createTableModifier() : " + endC + "indicator_general_dico : " + str(indicator_general_dico) + "\n")
           print(cyan + "createTableModifier() : " + endC + "microclass_centroides_list = " + str(microclass_centroides_list))
           print(cyan + "createTableModifier() : " + endC + "missed_micro_list : " + str(missed_micro_list))
           print(cyan + "createTableModifier() : " + endC + "class_ref_list : " + str(class_ref_list))
           print(cyan + "createTableModifier() : " + endC + "class_pro_list : " + str(class_pro_list))
           print(cyan + "createTableModifier() : " + endC + "class_labels_list : " + str(class_labels_list))

        # indentifier les microclasses suspectes
        s_suspect_microclass1,s_suspect_microclass2 = findSuspiciousMicroClass(label_macro_list, microclass_centroides_list)

        # detecter les micro classes suspectes
        suspect_micro_list,s_performance_list = detectSuspiciousMicroclass(indicator_macro_dico, class_labels_list, threshold_alert_perf)

        if debug >= 3:
            print(cyan + "createTableModifier() : " + endC + "s_suspect_microclass1 = " + str(s_suspect_microclass1))
            print(cyan + "createTableModifier() : " + endC + "s_suspect_microclass2 = "  + str(s_suspect_microclass2))
            print(cyan + "createTableModifier() : " + endC + "suspect_micro_list = " + str(suspect_micro_list))
            print(cyan + "createTableModifier() : " + endC + "s_performance_list = "  + str(s_performance_list))

        # creation de la liste de proposition de réaffectation des micro classes
        proposal_text = proposeReallocationMicroClass(validation_input_vector, s_suspect_microclass1, s_suspect_microclass2, s_performance_list, missed_micro_list, suspect_micro_list, table_output_file, label_macro_list, rate_area_min, threshold_delete_perf, threshold_alert_perf, format_vector)

        # ecriture du fichier proposition de réaffectation
        writeTextFile(table_output_file, proposal_text)

    print(cyan + "createTableModifier() : " + bold + green + "END\n" + endC)

    # Mise à jour du Log
    ending_event = "createTableModifier() : Create table modifier ending : "
    timeLine(path_time_log,ending_event)
    return
Ejemplo n.º 21
0
def soilOccupationChange(input_plot_vector, output_plot_vector, footprint_vector, input_tx_files_list, evolutions_list=['0:1:11000:10:50:and', '0:1:12000:10:50:and', '0:1:21000:10:50:and', '0:1:22000:10:50:and', '0:1:23000:10:50:and'], class_label_dico={11000:'Bati', 12000:'Route', 21000:'SolNu', 22000:'Eau', 23000:'Vegetation'}, epsg=2154, no_data_value=0, format_raster='GTiff', format_vector='ESRI Shapefile', extension_raster='.tif', extension_vector='.shp', postgis_ip_host='localhost', postgis_num_port=5432, postgis_user_name='postgres', postgis_password='******', postgis_database_name='database', postgis_schema_name='public', postgis_encoding='latin1', path_time_log='', save_results_intermediate=False, overwrite=True):

    if debug >= 3:
        print('\n' + bold + green + "Evolution de l'OCS par parcelle - Variables dans la fonction :" + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "input_plot_vector : " + str(input_plot_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "output_plot_vector : " + str(output_plot_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "footprint_vector : " + str(footprint_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "input_tx_files_list : " + str(input_tx_files_list) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "evolutions_list : " + str(evolutions_list) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "class_label_dico : " + str(class_label_dico) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "format_raster : " + str(format_raster) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_ip_host : " + str(postgis_ip_host) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_num_port : " + str(postgis_num_port) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_user_name : " + str(postgis_user_name) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_password : "******"    soilOccupationChange() : " + endC + "postgis_database_name : " + str(postgis_database_name) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_schema_name : " + str(postgis_schema_name) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_encoding : " + str(postgis_encoding) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "overwrite : " + str(overwrite) + endC + '\n')

    # Définition des constantes
    EXTENSION_TEXT = '.txt'
    SUFFIX_TEMP = '_temp'
    SUFFIX_CUT = '_cut'
    AREA_FIELD = 'st_area'
    GEOM_FIELD = 'geom'

    # Mise à jour du log
    starting_event = "soilOccupationChange() : Début du traitement : "
    timeLine(path_time_log, starting_event)

    print(cyan + "soilOccupationChange() : " + bold + green + "DEBUT DES TRAITEMENTS" + endC + '\n')

    # Définition des variables 'basename'
    output_plot_basename = os.path.splitext(os.path.basename(output_plot_vector))[0]

    # Définition des variables temp
    temp_directory = os.path.dirname(output_plot_vector) + os.sep + output_plot_basename + SUFFIX_TEMP
    plot_vector_cut = temp_directory + os.sep + output_plot_basename + SUFFIX_CUT + extension_vector

    # Définition des variables PostGIS
    plot_table = output_plot_basename.lower()

    # Fichier .txt associé au fichier vecteur de sortie, sur la liste des évolutions quantifiées
    output_evolution_text_file = os.path.splitext(output_plot_vector)[0] + EXTENSION_TEXT

    # Nettoyage des traitements précédents
    if debug >= 3:
        print(cyan + "soilOccupationChange() : " + endC + "Nettoyage des traitements précédents." + endC + '\n')
    removeVectorFile(output_plot_vector, format_vector=format_vector)
    removeFile(output_evolution_text_file)
    cleanTempData(temp_directory)
    dropDatabase(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    #############
    # Etape 0/2 # Préparation des traitements
    #############

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 0/2 - Début de la préparation des traitements." + endC + '\n')

    # Découpage du parcellaire à la zone d'étude
    cutVector(footprint_vector, input_plot_vector, plot_vector_cut, overwrite=overwrite, format_vector=format_vector)

    # Récupération du nom des champs dans le fichier source (pour isoler les champs nouvellement créés par la suite, et les renommer)
    attr_names_list_origin = getAttributeNameList(plot_vector_cut, format_vector=format_vector)
    new_attr_names_list_origin = attr_names_list_origin

    # Préparation de PostGIS
    createDatabase(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 0/2 - Fin de la préparation des traitements." + endC + '\n')

    #############
    # Etape 1/2 # Calculs des statistiques à tx
    #############

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 1/2 - Début des calculs des statistiques à tx." + endC + '\n')

    len_tx = len(input_tx_files_list)
    tx = 0

    # Boucle sur les fichiers d'entrés à t0+x
    for input_tx_file in input_tx_files_list:
        if debug >= 3:
            print(cyan + "soilOccupationChange() : " + endC + bold + "Calcul des statistiques à tx %s/%s." % (tx+1, len_tx) + endC + '\n')

        # Statistiques OCS par parcelle
        statisticsVectorRaster(input_tx_file, plot_vector_cut, "", 1, True, False, False, [], [], class_label_dico, path_time_log, clean_small_polygons=True, format_vector=format_vector, save_results_intermediate=save_results_intermediate, overwrite=overwrite)

        # Récupération du nom des champs dans le fichier parcellaire (avec les champs créés précédemment dans CVR)
        attr_names_list_tx = getAttributeNameList(plot_vector_cut, format_vector=format_vector)

        # Isolement des nouveaux champs issus du CVR
        fields_name_list  = []
        for attr_name in attr_names_list_tx:
            if attr_name not in new_attr_names_list_origin:
                fields_name_list.append(attr_name)

        # Gestion des nouveaux noms des champs issus du CVR
        new_fields_name_list  = []
        for field_name in fields_name_list:
            new_field_name = 't%s_' % tx + field_name
            new_field_name = new_field_name[:10]
            new_fields_name_list.append(new_field_name)
            new_attr_names_list_origin.append(new_field_name)

        # Renommage des champs issus du CVR, pour le relancer par la suite sur d'autres dates
        renameFieldsVector(plot_vector_cut, fields_name_list, new_fields_name_list, format_vector=format_vector)

        tx += 1

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 1/2 - Fin des calculs des statistiques à tx." + endC + '\n')

    #############
    # Etape 2/2 # Caractérisation des changements
    #############

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 2/2 - Début de la caractérisation des changements." + endC + '\n')

    # Pré-traitements dans PostGIS
    plot_table = importVectorByOgr2ogr(postgis_database_name, plot_vector_cut, plot_table, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name, epsg=epsg, codage=postgis_encoding)
    connection = openConnection(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    # Requête SQL pour le calcul de la surface des parcelles
    sql_query = "ALTER TABLE %s ADD COLUMN %s REAL;\n" % (plot_table, AREA_FIELD)
    sql_query += "UPDATE %s SET %s = ST_Area(%s);\n" % (plot_table, AREA_FIELD, GEOM_FIELD)

    # Boucle sur les évolutions à quantifier
    temp_field = 1
    for evolution in evolutions_list:
        evolution_split = evolution.split(':')
        idx_bef = int(evolution_split[0])
        idx_aft = int(evolution_split[1])
        label = int(evolution_split[2])
        evol = abs(int(evolution_split[3]))
        evol_s = abs(int(evolution_split[4]))
        combi = evolution_split[5]
        class_name = class_label_dico[label]
        def_evo_field = "def_evo_%s" % str(temp_field)
        if debug >= 3:
            print(cyan + "soilOccupationChange() : " + endC + bold + "Caractérisation des changements t%s/t%s pour la classe '%s' (%s)." % (idx_bef, idx_aft, class_name, label) + endC + '\n')

        if evol != 0 or evol_s != 0:

            # Gestion de l'évolution via le taux
            evol_str = str(evol) + ' %'
            evo_field = "evo_%s" % str(temp_field)
            t0_field = 't%s_' % idx_bef + class_name.lower()[:7]
            t1_field = 't%s_' % idx_aft + class_name.lower()[:7]

            # Gestion de l'évolution via la surface
            evol_s_str = str(evol_s) + ' m²'
            evo_s_field = "evo_s_%s" % str(temp_field)
            t0_s_field = 't%s_s_' % idx_bef + class_name.lower()[:5]
            t1_s_field = 't%s_s_' % idx_aft + class_name.lower()[:5]

            # Requête SQL pour le calcul brut de l'évolution
            sql_query += "ALTER TABLE %s ADD COLUMN %s REAL;\n" % (plot_table, evo_field)
            sql_query += "UPDATE %s SET %s = %s - %s;\n" % (plot_table, evo_field, t1_field, t0_field)
            sql_query += "ALTER TABLE %s ADD COLUMN %s REAL;\n" % (plot_table, evo_s_field)
            sql_query += "UPDATE %s SET %s = %s - %s;\n" % (plot_table, evo_s_field, t1_s_field, t0_s_field)
            sql_query += "ALTER TABLE %s ADD COLUMN %s VARCHAR;\n" % (plot_table, def_evo_field)
            sql_query += "UPDATE %s SET %s = 't%s a t%s - %s - aucune evolution';\n" % (plot_table, def_evo_field, idx_bef, idx_aft, class_name)

            # Si évolution à la fois via taux et via surface
            if evol != 0 and evol_s != 0:
                text_evol = "taux à %s" % evol_str
                if combi == 'and':
                    text_evol += " ET "
                elif combi == 'or':
                    text_evol += " OU "
                text_evol += "surface à %s" % evol_s_str
                sql_where_pos = "%s >= %s %s %s >= %s" % (evo_field, evol, combi, evo_s_field, evol_s)
                sql_where_neg = "%s <= -%s %s %s <= -%s" % (evo_field, evol, combi, evo_s_field, evol_s)

            # Si évolution uniquement via taux
            elif evol != 0:
                text_evol = "taux à %s" % evol_str
                sql_where_pos = "%s >= %s" % (evo_field, evol)
                sql_where_neg = "%s <= -%s" % (evo_field, evol)

            # Si évolution uniquement via surface
            elif evol_s != 0:
                text_evol = "surface à %s" % evol_s_str
                sql_where_pos = "%s >= %s" % (evo_s_field, evol_s)
                sql_where_neg = "%s <= -%s" % (evo_s_field, evol_s)

            sql_query += "UPDATE %s SET %s = 't%s a t%s - %s - evolution positive' WHERE %s;\n" % (plot_table, def_evo_field, idx_bef, idx_aft, class_name, sql_where_pos)
            sql_query += "UPDATE %s SET %s = 't%s a t%s - %s - evolution negative' WHERE %s;\n" % (plot_table, def_evo_field, idx_bef, idx_aft, class_name, sql_where_neg)

            # Ajout des paramètres de l'évolution quantifiée (temporalités, classe, taux/surface) au fichier texte de sortie
            text = "%s --> évolution entre t%s et t%s, pour la classe '%s' (label %s) :\n" % (def_evo_field, idx_bef, idx_aft, class_name, label)
            text += "    %s --> taux d'évolution brut" % evo_field + " (%)\n"
            text += "    %s --> surface d'évolution brute" % evo_s_field + " (m²)\n"
            text += "Evolution quantifiée : %s\n" % text_evol
            appendTextFileCR(output_evolution_text_file, text)
            temp_field += 1

    # Traitements SQL de l'évolution des classes OCS
    executeQuery(connection, sql_query)
    closeConnection(connection)
    exportVectorByOgr2ogr(postgis_database_name, output_plot_vector, plot_table, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name, format_type=format_vector)

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 2/2 - Fin de la caractérisation des changements." + endC + '\n')

    # Suppression des fichiers temporaires
    if not save_results_intermediate:
        if debug >= 3:
            print(cyan + "soilOccupationChange() : " + endC + "Suppression des fichiers temporaires." + endC + '\n')
        deleteDir(temp_directory)
        dropDatabase(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    print(cyan + "soilOccupationChange() : " + bold + green + "FIN DES TRAITEMENTS" + endC + '\n')

    # Mise à jour du log
    ending_event = "soilOccupationChange() : Fin du traitement : "
    timeLine(path_time_log, ending_event)

    return
def filterImageMajority(image_input,
                        filtered_image_output,
                        filter_mode,
                        radius,
                        umc_pixels,
                        path_time_log,
                        ram_otb=0,
                        save_results_intermediate=False,
                        overwrite=True):

    # Mise à jour du Log
    starting_event = "filterImageMajority() : Filter image starting : "
    timeLine(path_time_log, starting_event)

    print(endC)
    print(bold + green + "## START : MAP REGULARIZATION" + endC)
    print(endC)

    CODAGE = "uint16"

    if debug >= 2:
        print(bold + green +
              "filterImageMajority() : Variables dans la fonction" + endC)
        print(cyan + "filterImageMajority() : " + endC + "image_input : " +
              str(image_input) + endC)
        print(cyan + "filterImageMajority() : " + endC +
              "filtered_image_output : " + str(filtered_image_output) + endC)
        print(cyan + "filterImageMajority() : " + endC + "filter_mode : " +
              str(filter_mode) + endC)
        print(cyan + "filterImageMajority() : " + endC + "radius : " +
              str(radius) + endC)
        print(cyan + "filterImageMajority() : " + endC + "umc_pixels : " +
              str(umc_pixels) + endC)
        print(cyan + "filterImageMajority() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "filterImageMajority() : " + endC + "ram_otb : " +
              str(ram_otb) + endC)
        print(cyan + "filterImageMajority() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "filterImageMajority() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    # Vérification de l'existence d'une image filtrée
    check = os.path.isfile(filtered_image_output)

    # Si oui et si la vérification est activée, passage à l'étape suivante
    if check and not overwrite:
        print(cyan + "filterImageMajority() : " + bold + green +
              "Image already filtered with window size of " + str(radius) +
              "." + endC)
    # Si non ou si la vérification est désactivée, application du filtre
    else:
        # Tentative de suppresion du fichier
        try:
            removeFile(filtered_image_output)
        except Exception:
            # Ignore l'exception levée si le fichier n'existe pas (et ne peut donc pas être supprimé)
            pass

        if debug >= 3:
            print(
                cyan + "filterImageMajority() : " + bold +
                green + "Applying majority filter with window size ",
                str(radius), "...", '\n' + endC)

        # Selon le mode de filtrage
        if filter_mode.lower() == "otb":
            # Par otbcli_ClassificationMapRegularization
            command = "otbcli_ClassificationMapRegularization -io.in %s -io.out %s %s -ip.radius %d" % (
                image_input, filtered_image_output, CODAGE, radius)
            if ram_otb > 0:
                command += " -ram %d" % (ram_otb)
        else:
            # Par gdal_sieve
            command = "gdal_sieve.py -st %d -8 %s %s" % (
                umc_pixels, image_input, filtered_image_output)

        if debug >= 3:
            print(command)
        exitCode = os.system(command)
        if exitCode != 0:
            raise NameError(
                cyan + "filterImageMajority() : " + bold + red +
                "An error occured during otbcli_ClassificationMapRegularization command. See error message above."
            )
        print('\n' + cyan + "filterImageMajority() : " + bold + green +
              "Filter applied!" + endC)

    # Supression des .geom dans le dossier
    directory_output = os.path.dirname(filtered_image_output)
    for to_delete in glob.glob(directory_output + os.sep + "*.geom"):
        removeFile(to_delete)

    print(endC)
    print(bold + green + "## END :  MAP REGULARIZATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "filterImageMajority() : Filter image ending : "
    timeLine(path_time_log, ending_event)

    return
def normalizeChannels(image_stack_input, image_normalised_stack_output, path_time_log, save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "normalizeChannels() : Normalize channels starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(bold + green + "## START : CHANNELS NORMALIZATION" + endC)
    print(endC)

    # Constantes
    CODAGE = "float"

    # Nom de l'image en entrée
    repertory_stacks_output = os.path.dirname(image_normalised_stack_output)
    image_name = os.path.splitext(os.path.basename(image_stack_input))[0]
    extension_file = os.path.splitext(os.path.basename(image_stack_input))[1]
    statistics_image_normalize_output = os.path.splitext(image_normalised_stack_output)[0] + "_statistics.xml"

    if debug >= 3:
        print(bold + green + "Variables dans la fonction" + endC)
        print(cyan + "normalizeChannels() : " + endC + "image_stack_input : " + str(image_stack_input) + endC)
        print(cyan + "normalizeChannels() : " + endC + "image_normalised_stack_output : " + str(image_normalised_stack_output) + endC)
        print(cyan + "normalizeChannels() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "normalizeChannels() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "normalizeChannels() : " + endC + "overwrite : " + str(overwrite) + endC)
        print(cyan + "normalizeChannels() : " + endC + "image_name : " + str(image_name) + endC)
        print(cyan + "normalizeChannels() : " + endC + "extension_file : " + str(extension_file) + endC)
        print(cyan + "normalizeChannels() : " + endC + "repertory_stacks_output : " + str(repertory_stacks_output) + endC)

    # Si l'empilement existe deja et que overwrite n'est pas activé
    check = os.path.isfile(image_normalised_stack_output)
    if check and not overwrite:
        print(bold + yellow + "Stack normalized already exists and will not be calculated again." + endC)
    else:
        if check :
            try:
                removeFile(image_normalised_stack_output)
            except Exception:
                pass # si le fichier n'existe pas, il ne peut pas être supprimé : cette étape est ignorée

        # Calcul des statistiques de l'empilement
        # Les statistiques sont dans un fichier xml presentant un paragraphe pour la moyenne et un autre pour l'ecart-type. Chaque paragraphe contient une ligne par bande dont l'ordre correspond à celui de l'empilement
        print(bold + green + "Calcul des statistiques de %s " %(image_stack_input) + endC)
        computeStatisticsImage(image_stack_input, statistics_image_normalize_output)
        statistics_file_parser = parseDom(statistics_image_normalize_output)
        statistics_nodes_list = statistics_file_parser.getElementsByTagName('Statistic')

        # Initialise les listes des moyennes et des ecart-types des bandes de l'empilement
        means_list = []
        stddevs_list = []

        # Parcours du fichier de statistiques pour recuperer les valeur de moyenne et d'ecart-type associees a chaque bande
        if debug >= 1:
            print(cyan + "normalizeChannels() : " + endC + bold + green + "Parsing statistics file ..." + endC)

        for statistic in statistics_nodes_list:

            statistics_list = []
            nodes_values_list = statistic.getElementsByTagName('StatisticVector')

            for node in nodes_values_list:

                value = node.attributes['value'].value

                # Si la ligne comporte la balise "mean", ajoute la valeur associee a la liste des moyennes
                if statistic.attributes['name'].value == "mean":
                    value = node.attributes['value'].value
                    means_list.append(value)

                # Si la ligne comporte la balise "stddev", ajoute la valeur associee a la liste des ecart-types
                elif statistic.attributes['name'].value == "stddev":
                    value = node.attributes['value'].value
                    stddevs_list.append(value)
        if debug >= 1:
            print(cyan + "normalizeChannels() : " + endC + bold + green + "Parsing statistics complete." + endC)
            print(cyan + "normalizeChannels() : " + endC + "means_list : " + str(means_list) + endC)
            print(cyan + "normalizeChannels() : " + endC + "stddevs_list : " + str(stddevs_list) + endC)


        number_of_bands = len(means_list)   # Nombre de bandes a empiler (= nombre de valeurs dans la liste des moyennes par exemple)
        bands_to_concatenate_list = ""      # Initialisation de la liste des bandes extraites de l'empilement
        files_to_delete_list = []           # Initialisation de la liste de ces mêmes bandes pour la supression (format different de celui necessaire pour les applications OTB)

        # Parcours des bandes de l'empilement, en utilisant la place de leur moyenne dans la liste des moyennes pour conserver le même ordre que dans l'empilement
        for band in range (number_of_bands):

            mean_value = means_list[band]
            stddev_value = stddevs_list[band]

            normalised_band = repertory_stacks_output + os.sep + image_name + "_b" + str(band) + extension_file
            expression = "\"(im1b" + str(band+1) + "-(" + str(mean_value) + "))/" + str(stddev_value) + "\""
            if debug >= 1:
                print(cyan + "normalizeChannels() : " + endC + "Normalizing band %s" %(band)+ endC)
            if debug >= 2:
                print(cyan + "normalizeChannels() : " + endC + "mean_value : " + str(mean_value) + endC)
                print(cyan + "normalizeChannels() : " + endC + "stddev_value : " + str(stddev_value) + endC)
                print(cyan + "normalizeChannels() : " + endC + "normalised_band : " + str(normalised_band) + endC)

            command = "otbcli_BandMath -il %s -out %s %s -exp %s" %(image_stack_input,normalised_band,CODAGE,expression)

            if debug >= 3:
                print(command)

            exitCode = os.system(command)
            if exitCode != 0:
                print(command)
                raise NameError(cyan + "normalizeChannels() : " + bold + red + "An error occured during otbcli_BandMath command. See error message above." + endC)

            if debug >= 1:
                print(cyan + "normalizeChannels() : " + endC + bold + green + "Band %s normalized." %(band)+ endC)
                print(endC)

            # Mise à jour de bands_to_concatenate_list et files_to_delete_list
            bands_to_concatenate_list += " " + normalised_band
            files_to_delete_list.append(normalised_band)

        if debug >= 1:
            print(cyan + "normalizeChannels() : " + endC + bold + green + "Stacking bands ..."+ endC)
        if debug >= 2:
            print(cyan + "normalizeChannels() : " + endC + bold + green + "Debut de la concatenation de "+ endC + " %s ..." %(bands_to_concatenate_list)+ endC)
            print(endC)

        # Empile les bandes dans un nouveau fichier
        exitCode = os.system("otbcli_ConcatenateImages -progress true -il %s -out %s %s" %(bands_to_concatenate_list, image_normalised_stack_output, CODAGE))
        if exitCode != 0:
            raise NameError(cyan + "normalizeChannels() : " + bold + red + "An error occured during otbcli_ConcatenateImages command. See error message above." + endC)

        # Suppression des bandes normalisees seulles - Possibilitée de passer cette activation en parametre
        if not save_results_intermediate:
            for file_del in files_to_delete_list:
                removeFile(file_del) # Suppression des bandes

    print(endC)
    print(bold + green + "## END : CHANNELS NORMALIZATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "normalizeChannels() : Normalize channels ending : "
    timeLine(path_time_log,ending_event)

    return
def classifyVector(vector_input, classif_vector_output, list_feat, expression, input_cfield, output_cfield, path_time_log, format_vector='ESRI Shapefile', extension_vector=".shp", extension_xml = ".xml", extension_model = ".model", save_results_intermediate=False, overwrite=True):

    # Constante
    SUFFIX_OUT = "_out"
    SUFFIX_TRAIN = "_train"

    # Mise à jour du Log
    starting_event = "classifyVector() : Classification vecteur starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(bold + green + "## START : classifyVector" + endC)
    print(endC)

    if debug >= 2:
        print(bold + green + "classifyVector() : Variables dans la fonction" + endC)
        print(cyan + "classifyVector() : " + endC + "vector_input : " + str(vector_input) + endC)
        print(cyan + "classifyVector() : " + endC + "classif_vector_output : " + str(classif_vector_output) + endC)
        print(cyan + "classifyVector() : " + endC + "list_feat : " + str(list_feat) + endC)
        print(cyan + "classifyVector() : " + endC + "input_cfield : " + str(input_cfield) + endC)
        print(cyan + "classifyVector() : " + endC + "output_cfield : " + str(output_cfield) + endC)
        print(cyan + "classifyVector() : " + endC + "expression : " + str(expression) + endC)
        print(cyan + "classifyVector() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "classifyVector() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "classifyVector() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "classifyVector() : " + endC + "extension_xml : " + str(extension_xml) + endC)
        print(cyan + "classifyVector() : " + endC + "extension_model : " + str(extension_model) + endC)
        print(cyan + "classifyVector() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "classifyVector() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Creation des chemins d'enregistrement des calculs:
    repertory_output = os.path.dirname(vector_input)
    name = os.path.splitext(os.path.basename(vector_input))[0]
    layer_name = name
    vector_output_train_tmp = repertory_output + os.sep + name + SUFFIX_TRAIN + extension_vector
    outstats = repertory_output + os.sep + name + extension_xml
    model = repertory_output + os.sep + name + extension_model

    # Vérification si le vecteur de sortie est définie
    if classif_vector_output == "" :
        classif_vector_output = repertory_output + os.sep + name + SUFFIX_OUT + extension_vector

    # Vérification de l'existence d'une couche vecteur classée
        check = os.path.isfile(classif_vector_output)

    # Si oui et si la vérification est activée, passage à l'étape suivante
    if check and not overwrite :
        print(cyan + "classifyVector() : " + bold + green +  "vector already classified" + "." + endC)
    # Si non ou si la vérification est désactivée, application du filtre
    else:
        # Tentative de suppresion du fichier
        try:
            removeVectorFile(classif_vector_output, format_vector=format_vector)
        except Exception:
            # Ignore l'exception levée si le fichier n'existe pas (et ne peut donc pas être supprimé)
            pass

        if debug >= 3:
            print(cyan + "classifyVector() : " + bold + green +  "Applying classified segmenation", "..." , '\n' + endC)

        # Recuperation du driver pour le format shape fichier entrée
        driver_input = ogr.GetDriverByName(format_vector)

        # Ouverture du fichier shape en lecture
        data_source_input = driver_input.Open(vector_input, 1) # 1 means writeable.

        if data_source_input is None:
            print(cyan + "classifyVector() : " + bold + red + "Impossible d'ouvrir le fichier vecteur : " + vector_input + endC, file=sys.stderr)
            sys.exit(1) # exit with an error code

        # Recuperer la couche (une couche contient les segments)
        layer_input = data_source_input.GetLayer(0)

        # Vérification de l'existence de la colonne col (retour = -1 : elle n'existe pas)
        layer_definition = layer_input.GetLayerDefn() # GetLayerDefn => returns the field names of the user defined (created) fields
        id_field = layer_definition.GetFieldIndex(output_cfield)
        if id_field != -1 :
            print(cyan + "classifyVector() : " + bold + yellow + "Attention le champs de classification existe déjà" + output_cfield + endC)
        layer_input.DeleteField(id_field)

        # Fermeture du fichier vector_input
        layer_input.SyncToDisk()
        data_source_input.Destroy()

        # Selection du fichier vecteur d'apprentissage
        if overwrite:
            overwrite_str = "-overwrite"
        command = "ogr2ogr -f '%s' %s %s  %s -where \"%s\" " % (format_vector, overwrite_str, vector_output_train_tmp ,vector_input, expression)
        if debug >= 2:
           print(command)
        exit_code = os.system(command)
        if exitCode!= 0:
            print(command)
            raise NameError (cyan + "classifyVector() : " + bold + red + "ogr2ogr. Selection du fichier vecteur d'apprentissage erreur." + endC)

        # Les champs utilisés pour la calssification
        # Exemple : list_feat = ['meanB0' ,  'meanB1' ,   'meanB2' ,  'meanB3' ,  'varB0' ,  'varB1' ,  'varB2' , 'varB3']
        feat = str ("-feat")
        feat +=  " " +  str(''.join(list_feat))

        # Classification :
        # 1) Calculer les statistiques
        command = "otbcli_ComputeOGRLayersFeaturesStatistics -inshp %s -outstats %s " %(segmented_input, outstats)
        command += "%s" %(feat)
        if debug >= 2:
           print(command)
        exitCode = os.system(command)
        if exitCode!= 0:
            print(command)
            raise NameError (cyan + "classifyVector() : " + bold + red + "otbcli_ComputeOGRLayersFeaturesStatistics. See error message above." + endC)

        # 2) Générer le model
        command = "otbcli_TrainVectorClassifier -io.vd %s -io.stats %s -io.out %s  -cfield %s " %(vector_output_train_tmp, outstats, model, input_cfield)
        command += "%s" %(feat)
        if debug >= 2:
           print(command)
        exitCode = os.system(command)
        if exitCode!= 0:
            print(command)
            raise NameError (cyan + "classifyVector() : " + bold + red + "otbcli_TrainVectorClassifier. See error message above." + endC)

        # 3) Produire la classifictaion
        command = "otbcli_VectorClassifier -in %s -instat  %s  -model %s  -cfield %s  -out %s " %(segmented_input, outstats, model,  output_cfield, classif_vector_output)
        command += "%s" %(feat)
        if debug >= 2:
           print(command)
        exitCode = os.system(command)
        if exitCode!= 0:
            print(command)
            raise NameError (cyan + "classifyVector() : " + bold + red + "otbcli_VectorClassifier. See error message above." + endC)

        # Suppression des données intermédiaires
        if not save_results_intermediate:
            # Supression du fichier temporaire de segmentation
            if os.path.isfile(vector_output_train_tmp) :
                removeFile(vector_output_train_tmp)

    print(endC)
    print(bold + green + "## END :  classifyVector" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "classifyVector() : Classifictaion vector  ending : "
    timeLine(path_time_log,ending_event)
    return
def concatenateChannels(images_input_list, stack_image_output, path_time_log, save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "concatenateChannels() : Concatenate channels starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(bold + green + "## START : CHANNELS CONCATENATION" + endC)
    print(endC)

    # Constantes
    CODAGE = "float"

    if debug >= 3:
        print(bold + green + "Variables dans la fonction" + endC)
        print(cyan + "concatenateChannels() : " + endC + "images_input_list : " + str(images_input_list) + endC)
        print(cyan + "concatenateChannels() : " + endC + "stack_image_output : " + str(stack_image_output) + endC)
        print(cyan + "concatenateChannels() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "concatenateChannels() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "concatenateChannels() : " + endC + "overwrite : " + str(overwrite) + endC)

    check = os.path.isfile(stack_image_output)
    if check and not overwrite: # Si l'empilement existe deja et que overwrite n'est pas activé
        print(bold + yellow + "File " + stack_image_output + " already exists and will not be calculated again." + endC)
    else:
        if check:
            try:
                removeFile(stack_image_output)
            except Exception:
                pass # si le fichier n'existe pas, il ne peut pas être supprimé : cette étape est ignorée

        print(bold + green + "Searching for channels to add..." + endC)

        elements_to_stack_list_str = ""    # Initialisation de la liste des fichiers autres a empiler

        # Gestion des fichiers a ajouter
        for image_name_other in images_input_list:

            if debug >= 3:
                print(cyan + "concatenateChannels() : " + endC + "image_name_other : " + str(image_name_other) + endC)

            # Verification de l'existence de image_name_other
            if not os.path.isfile(image_name_other) :
                # Si image_name_other n'existe pas, message d'erreur
                raise NameError(cyan + "concatenateChannels() : " + bold + red + "The file %s not existe!"%(image_name_other) + endC)

            # Ajouter l'indice a la liste des indices a empiler
            elements_to_stack_list_str += " " + image_name_other

            if debug >= 1:
                print(cyan + "concatenateChannels() : " + endC + "elements_to_stack_list_str : " + str(elements_to_stack_list_str) + endC)

        # Stack de l'image avec les images additionnelles
        if len(elements_to_stack_list_str) > 0:

            # Assemble la liste d'image en une liste globale de fichiers d'entree
            print(bold + green + "concatenateChannels() : Assembling channels %s ... "%(elements_to_stack_list_str) + endC)

            command = "otbcli_ConcatenateImages -progress true -il %s -out %s %s" %(elements_to_stack_list_str,stack_image_output,CODAGE)
            if debug >= 3:
                print(command)
            exitCode = os.system(command)
            if exitCode != 0:
                print(command)
                raise NameError(cyan + "concatenateChannels() : " + bold + red + "An error occured during otbcli_ConcatenateImages command. See error message above." + endC)
            print(bold + green + "concatenateChannels() : Channels successfully assembled" + endC)

    print(endC)
    print(bold + green + "## END : CHANNELS CONCATENATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "concatenateChannels() : Concatenate channels ending : "
    timeLine(path_time_log,ending_event)

    return
def displayTree(origin_tree, origin_name, name_file_graph, path_time_log,
                save_results_intermediate, overwrite):

    # Constantes
    EXT_DOT = ".dot"
    EXT_PNG = ".png"
    GRAPH_NAME = "graph_LCZ"

    # Mise à jour du Log
    starting_event = "displayTree() : Graph creation starting : "
    timeLine(path_time_log, starting_event)

    label_graph = os.path.splitext(os.path.basename(name_file_graph))[0]
    path_file = os.path.dirname(name_file_graph)

    dot_file = path_file + os.sep + label_graph + EXT_DOT
    if os.path.isfile(dot_file):
        removeFile(dot_file)
    png_file = path_file + os.sep + label_graph + EXT_PNG

    # VERIFICATION SI LE MASQUE DE SORTIE EXISTE DEJA
    # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors passe au masque suivant
    check = os.path.isfile(png_file)
    if check and not overwrite:
        print(bold + yellow + "displayTree() : " + endC +
              "Output png file %s already done : no actualisation" %
              (png_file) + endC)
        return
    # Si non, ou si la fonction ecrasement est désative, alors on le calcule
    else:
        if check:
            try:  # Suppression de l'éventuel fichier existant
                removeFile(png_file)
            except Exception:
                pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

    # Definie les parametres du graphe
    graph = pgv.AGraph(name=GRAPH_NAME, directed=True)

    graph.graph_attr['outputorder'] = 'edgesfirst'
    graph.graph_attr['label'] = label_graph
    #graph.graph_attr['ratio']='1.0'
    graph.graph_attr['ratio'] = 'compress'
    graph.graph_attr['rankdir'] = 'TB'
    graph.node_attr['shape'] = 'ellipse'
    graph.node_attr['fixedsize'] = 'false'
    graph.node_attr['fontsize'] = '8'
    graph.node_attr['style'] = 'filled'

    graph.edge_attr['color'] = 'lightslategray'
    graph.edge_attr['style'] = 'etlinewidth(2)'
    graph.edge_attr['arrowhead'] = 'open'
    graph.edge_attr.update(arrowhead='vee', arrowsize='2')

    # Definir la couleur selon le noeud ou feuille
    value_color = 'gray52'

    # Creation du 1er noeud
    id_node = 0
    graph.add_node(id_node)
    node = graph.get_node(id_node)

    # Assign node color
    node.attr['fillcolor'] = value_color

    # Empty labels
    node.attr['label'] = origin_name + '\n'

    # Start parcours de l'arbre
    graph = createDisplayGraph(origin_tree, id_node, graph, debug)

    # Ecriture dans le fichier
    graph_reverse = graph.reverse()
    graph_reverse.write(dot_file)

    # Ignore Graphviz warning messages
    warnings.simplefilter('ignore', RuntimeWarning)
    graph = pgv.AGraph(name=GRAPH_NAME)

    # Convert file .dot
    graph.read(dot_file)
    graph.layout(prog='dot')  # layout with default (neato)
    png_draw = graph.draw(png_file, format='png', prog='dot')

    # Suppression des données intermédiaires
    if not save_results_intermediate:
        #removeFile(dot_file)
        pass

    # Mise à jour du Log
    ending_event = "displayTree() : Graph creation ending : "
    timeLine(path_time_log, ending_event)

    return
def estimateQualityMns(image_input,
                       vector_cut_input,
                       vector_sample_input_list,
                       vector_sample_points_input,
                       raster_input_dico,
                       vector_output,
                       no_data_value,
                       path_time_log,
                       format_raster='GTiff',
                       epsg=2154,
                       format_vector='ESRI Shapefile',
                       extension_raster=".tif",
                       extension_vector=".shp",
                       save_results_intermediate=False,
                       overwrite=True):

    # Mise à jour du Log
    starting_event = "estimateQualityMns() : Masks creation starting : "
    timeLine(path_time_log, starting_event)

    print(endC)
    print(bold + green + "## START : CREATE HEIGHT POINTS FILE FROM MNS" +
          endC)
    print(endC)

    if debug >= 2:
        print(bold + green +
              "estimateQualityMns() : Variables dans la fonction" + endC)
        print(cyan + "estimateQualityMns() : " + endC + "image_input : " +
              str(image_input) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "vector_cut_input : " +
              str(vector_cut_input) + endC)
        print(cyan + "estimateQualityMns() : " + endC +
              "vector_sample_input_list : " + str(vector_sample_input_list) +
              endC)
        print(cyan + "estimateQualityMns() : " + endC +
              "vector_sample_points_input : " +
              str(vector_sample_points_input) + endC)
        print(cyan + "estimateQualityMns() : " + endC +
              "raster_input_dico : " + str(raster_input_dico) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "vector_output : " +
              str(vector_output) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "no_data_value : " +
              str(no_data_value))
        print(cyan + "estimateQualityMns() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "epsg  : " +
              str(epsg) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "estimateQualityMns() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "estimateQualityMns() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "estimateQualityMns() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    # Définion des constantes
    EXT_DBF = '.dbf'
    EXT_CSV = '.csv'

    CODAGE = "uint16"

    SUFFIX_STUDY = '_study'
    SUFFIX_CUT = '_cut'
    SUFFIX_TEMP = '_temp'
    SUFFIX_CLEAN = '_clean'
    SUFFIX_SAMPLE = '_sample'

    ATTRIBUTE_ID = "ID"
    ATTRIBUTE_Z_INI = "Z_INI"
    ATTRIBUTE_Z_FIN = "Z_FIN"
    ATTRIBUTE_PREC_ALTI = "PREC_ALTI"
    ATTRIBUTE_Z_REF = "Z_Ref"
    ATTRIBUTE_Z_MNS = "Z_Mns"
    ATTRIBUTE_Z_DELTA = "Z_Delta"

    ERODE_EDGE_POINTS = -1.0

    ERROR_VALUE = -99.0
    ERROR_MIN_VALUE = -9999
    ERROR_MAX_VALUE = 9999

    # ETAPE 0 : PREPARATION DES FICHIERS INTERMEDIAIRES

    # Si le fichier de sortie existe on ecrase
    check = os.path.isfile(vector_output)
    if check and not overwrite:  # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors FIN
        print(cyan + "estimateQualityMns() : " + bold + yellow +
              "Create  file %s already exist : no actualisation" %
              (vector_output) + endC)
        return

    if os.path.isfile(os.path.splitext(vector_output)[0] + EXT_CSV):
        removeFile(os.path.splitext(vector_output)[0] + EXT_CSV)

    repertory_output = os.path.dirname(vector_output)
    base_name = os.path.splitext(os.path.basename(vector_output))[0]

    vector_output_temp = repertory_output + os.sep + base_name + SUFFIX_TEMP + extension_vector
    raster_study = repertory_output + os.sep + base_name + SUFFIX_STUDY + extension_raster
    vector_study = repertory_output + os.sep + base_name + SUFFIX_STUDY + extension_vector
    vector_study_clean = repertory_output + os.sep + base_name + SUFFIX_STUDY + SUFFIX_CLEAN + extension_vector
    image_cut = repertory_output + os.sep + base_name + SUFFIX_CUT + extension_raster
    vector_sample_temp = repertory_output + os.sep + base_name + SUFFIX_SAMPLE + SUFFIX_TEMP + extension_vector
    vector_sample_temp_clean = repertory_output + os.sep + base_name + SUFFIX_SAMPLE + SUFFIX_TEMP + SUFFIX_CLEAN + extension_vector

    # Utilisation des données raster externes
    raster_cut_dico = {}
    for raster_input in raster_input_dico:
        base_name_raster = os.path.splitext(os.path.basename(raster_input))[0]
        raster_cut = repertory_output + os.sep + base_name_raster + SUFFIX_CUT + extension_raster
        raster_cut_dico[raster_input] = raster_cut
        if os.path.exists(raster_cut):
            removeFile(raster_cut)

    # ETAPE 1 : DEFINIR UN SHAPE ZONE D'ETUDE

    if (not vector_cut_input is None) and (vector_cut_input != "") and (
            os.path.isfile(vector_cut_input)):
        cutting_action = True
        vector_study = vector_cut_input

    else:
        cutting_action = False
        createVectorMask(image_input, vector_study)

    # ETAPE 2 : DECOUPAGE DU RASTEUR PAR LE VECTEUR D'ETUDE SI BESOIN ET REECHANTILLONAGE SI BESOIN

    if cutting_action:
        # Identification de la tailles de pixels en x et en y du fichier MNS de reference
        pixel_size_x, pixel_size_y = getPixelWidthXYImage(image_input)

        # Si le fichier de sortie existe deja le supprimer
        if os.path.exists(image_cut):
            removeFile(image_cut)

        # Commande de découpe
        if not cutImageByVector(vector_study, image_input, image_cut,
                                pixel_size_x, pixel_size_y, no_data_value, 0,
                                format_raster, format_vector):
            print(
                cyan + "estimateQualityMns() : " + bold + red +
                "Une erreur c'est produite au cours du decoupage de l'image : "
                + image_input + endC,
                file=sys.stderr)
            raise

        if debug >= 2:
            print(cyan + "estimateQualityMns() : " + bold + green +
                  "DECOUPAGE DU RASTER %s AVEC LE VECTEUR %s" %
                  (image_input, vector_study) + endC)
    else:
        image_cut = image_input

    # Definir l'emprise du fichier MNS de reference

    # Decoupage de chaque raster de la liste des rasters
    for raster_input in raster_input_dico:
        raster_cut = raster_cut_dico[raster_input]
        if not cutImageByVector(vector_study, raster_input, raster_cut,
                                pixel_size_x, pixel_size_y, no_data_value, 0,
                                format_raster, format_vector):
            raise NameError(
                cyan + "estimateQualityMns() : " + bold + red +
                "Une erreur c'est produite au cours du decoupage du raster : "
                + raster_input + endC)

    # Gémotrie de l'image
    pixel_size_x, pixel_size_y = getPixelWidthXYImage(image_cut)
    cols, rows, bands = getGeometryImage(image_cut)
    xmin, xmax, ymin, ymax = getEmpriseImage(image_cut)

    if debug >= 3:
        print("Geometrie Image : ")
        print("  cols = " + str(cols))
        print("  rows = " + str(rows))
        print("  xmin = " + str(xmin))
        print("  xmax = " + str(xmax))
        print("  ymin = " + str(ymin))
        print("  ymax = " + str(ymax))
        print("  pixel_size_x = " + str(pixel_size_x))
        print("  pixel_size_y = " + str(pixel_size_y))
        print("\n")

    # Création du dico coordonnées des points en systeme cartographique
    points_random_value_dico = {}
    # liste coordonnées des points au format matrice image brute
    points_coordonnees_image_list = []

    # Selon que l'on utilise le fichier de points d'echantillons ou que l'on recréé a partir des sommets des vecteurs lignes
    if (vector_sample_points_input is None) or (vector_sample_points_input
                                                == ""):

        # ETAPE 3 : DECOUPAGES DES VECTEURS DE REFERENCE D'ENTREE PAR LE VECTEUR D'ETUDE ET LEUR FUSION ET
        #           LECTURE D'UN VECTEUR DE LIGNES ET SAUVEGARDE DES COORDONNEES POINTS DES EXTREMITEES ET LEUR HAUTEUR

        # Découpage des vecteurs de bd réference avec le vecteur zone d'étude
        vector_sample_input_cut_list = []
        for vector_sample in vector_sample_input_list:
            vector_name = os.path.splitext(os.path.basename(vector_sample))[0]
            vector_sample_cut = repertory_output + os.sep + vector_name + SUFFIX_CUT + extension_vector
            vector_sample_input_cut_list.append(vector_sample_cut)
        cutoutVectors(vector_study, vector_sample_input_list,
                      vector_sample_input_cut_list, format_vector)

        # Fusion des vecteurs de bd réference découpés
        fusionVectors(vector_sample_input_cut_list, vector_sample_temp,
                      format_vector)

        # Preparation des colonnes
        names_column_start_point_list = [
            ATTRIBUTE_ID, ATTRIBUTE_Z_INI, ATTRIBUTE_PREC_ALTI
        ]
        names_column_end_point_list = [
            ATTRIBUTE_ID, ATTRIBUTE_Z_FIN, ATTRIBUTE_PREC_ALTI
        ]
        fields_list = [
            ATTRIBUTE_ID, ATTRIBUTE_PREC_ALTI, ATTRIBUTE_Z_INI, ATTRIBUTE_Z_FIN
        ]

        multigeometries2geometries(vector_sample_temp,
                                   vector_sample_temp_clean, fields_list,
                                   "MULTILINESTRING", format_vector)
        points_coordinates_dico = readVectorFileLinesExtractTeminalsPoints(
            vector_sample_temp_clean, names_column_start_point_list,
            names_column_end_point_list, format_vector)

    else:
        # ETAPE 3_BIS : DECOUPAGE DE VECTEURS D'ECHANTILLONS POINTS PAR LE VECTEUR D'EMPRISE ET
        #               LECTURE DES COORDONNES D'ECHANTILLONS DURECTEMENT DANS LE FICHIER VECTEUR POINTS

        # Liste coordonnées des points au format matrice image brute
        cutVectorAll(vector_study, vector_sample_points_input,
                     vector_sample_temp, format_vector)
        points_coordinates_dico = readVectorFilePoints(vector_sample_temp,
                                                       format_vector)

    # ETAPE 4 : PREPARATION DU VECTEUR DE POINTS

    for index_key in points_coordinates_dico:
        # Recuperer les valeurs des coordonnees
        coord_info_list = points_coordinates_dico[index_key]
        coor_x = coord_info_list[0]
        coor_y = coord_info_list[1]
        attribut_dico = coord_info_list[2]

        # Coordonnées des points au format matrice image
        pos_x = int(round((coor_x - xmin) / abs(pixel_size_x)) - 1)
        pos_y = int(round((ymax - coor_y) / abs(pixel_size_y)) - 1)

        if pos_x < 0:
            pos_x = 0
        if pos_x >= cols:
            pos_x = cols - 1
        if pos_y < 0:
            pos_y = 0
        if pos_y >= rows:
            pos_y = rows - 1

        coordonnees_list = [pos_x, pos_y]
        points_coordonnees_image_list.append(coordonnees_list)

        value_ref = 0.0
        if ATTRIBUTE_Z_INI in attribut_dico.keys():
            value_ref = float(attribut_dico[ATTRIBUTE_Z_INI])
        if ATTRIBUTE_Z_FIN in attribut_dico.keys():
            value_ref = float(attribut_dico[ATTRIBUTE_Z_FIN])

        precision_alti = 0.0
        if ATTRIBUTE_PREC_ALTI in attribut_dico.keys():
            precision_alti = float(attribut_dico[ATTRIBUTE_PREC_ALTI])

        point_attr_dico = {
            ATTRIBUTE_ID: index_key,
            ATTRIBUTE_Z_REF: value_ref,
            ATTRIBUTE_PREC_ALTI: precision_alti,
            ATTRIBUTE_Z_MNS: 0.0,
            ATTRIBUTE_Z_DELTA: 0.0
        }

        for raster_input in raster_input_dico:
            field_name = raster_input_dico[raster_input][0][0]
            point_attr_dico[field_name] = 0.0

        points_random_value_dico[index_key] = [[coor_x, coor_y],
                                               point_attr_dico]

    # ETAPE 5 : LECTURE DES DONNEES DE HAUTEURS ISSU DU MNS et autre raster

    # Lecture dans le fichier raster des valeurs
    values_height_list = getPixelsValueListImage(
        image_cut, points_coordonnees_image_list)
    values_others_dico = {}
    for raster_input in raster_input_dico:
        raster_cut = raster_cut_dico[raster_input]
        values_list = getPixelsValueListImage(raster_cut,
                                              points_coordonnees_image_list)
        values_others_dico[raster_input] = values_list

    for i in range(len(points_random_value_dico)):
        value_mns = values_height_list[i]
        value_ref = points_random_value_dico[i][1][ATTRIBUTE_Z_REF]

        points_random_value_dico[i][1][ATTRIBUTE_Z_MNS] = float(value_mns)
        precision_alti = points_random_value_dico[i][1][ATTRIBUTE_PREC_ALTI]
        points_random_value_dico[i][1][ATTRIBUTE_PREC_ALTI] = float(
            precision_alti)
        value_diff = value_ref - value_mns
        points_random_value_dico[i][1][ATTRIBUTE_Z_DELTA] = float(value_diff)

        for raster_input in raster_input_dico:
            field_name = raster_input_dico[raster_input][0][0]
            value_other = values_others_dico[raster_input][i]
            points_random_value_dico[i][1][field_name] = float(value_other)

    # ETAPE 6 : CREATION D'UN VECTEUR DE POINTS AVEC DONNEE COORDONNES POINT ET HAUTEUR REFERENCE ET MNS

    # Suppression des points contenant des valeurs en erreur et en dehors du filtrage
    points_random_value_dico_clean = {}
    for i in range(len(points_random_value_dico)):
        value_ref = points_random_value_dico[i][1][ATTRIBUTE_Z_REF]
        if value_ref != ERROR_VALUE and value_ref > ERROR_MIN_VALUE and value_ref < ERROR_MAX_VALUE:

            points_is_valid = True
            for raster_input in raster_input_dico:
                if len(raster_input_dico[raster_input]) > 1 and len(
                        raster_input_dico[raster_input][1]) > 1:
                    threshold_min = float(
                        raster_input_dico[raster_input][1][0])
                    threshold_max = float(
                        raster_input_dico[raster_input][1][1])
                    field_name = raster_input_dico[raster_input][0][0]
                    value_raster = float(
                        points_random_value_dico[i][1][field_name])
                    if value_raster < threshold_min or value_raster > threshold_max:
                        points_is_valid = False

            if points_is_valid:
                points_random_value_dico_clean[i] = points_random_value_dico[i]

    # Définir les attibuts du fichier résultat
    attribute_dico = {
        ATTRIBUTE_ID: ogr.OFTInteger,
        ATTRIBUTE_PREC_ALTI: ogr.OFTReal,
        ATTRIBUTE_Z_REF: ogr.OFTReal,
        ATTRIBUTE_Z_MNS: ogr.OFTReal,
        ATTRIBUTE_Z_DELTA: ogr.OFTReal
    }

    for raster_input in raster_input_dico:
        field_name = raster_input_dico[raster_input][0][0]
        attribute_dico[field_name] = ogr.OFTReal

    createPointsFromCoordList(attribute_dico, points_random_value_dico_clean,
                              vector_output_temp, epsg, format_vector)

    # Suppression des points en bord de zone d'étude
    bufferVector(vector_study, vector_study_clean, ERODE_EDGE_POINTS, "", 1.0,
                 10, format_vector)
    cutVectorAll(vector_study_clean, vector_output_temp, vector_output, True,
                 format_vector)

    # ETAPE 7 : TRANSFORMATION DU FICHIER .DBF EN .CSV
    dbf_file = repertory_output + os.sep + base_name + EXT_DBF
    csv_file = repertory_output + os.sep + base_name + EXT_CSV

    if debug >= 2:
        print(cyan + "estimateQualityMns() : " + bold + green +
              "Conversion du fichier DBF %s en fichier CSV %s" %
              (dbf_file, csv_file) + endC)

    convertDbf2Csv(dbf_file, csv_file)

    # ETAPE 8 : SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES

    # Suppression des données intermédiaires
    if not save_results_intermediate:
        if cutting_action:
            if os.path.isfile(image_cut):
                removeFile(image_cut)
        else:
            if os.path.isfile(vector_study):
                removeVectorFile(vector_study)

        for raster_input in raster_input_dico:
            raster_cut = raster_cut_dico[raster_input]
            if os.path.isfile(raster_cut):
                removeFile(raster_cut)

        if os.path.isfile(vector_output_temp):
            removeVectorFile(vector_output_temp)

        if os.path.isfile(vector_study_clean):
            removeVectorFile(vector_study_clean)

        if os.path.isfile(vector_sample_temp):
            removeVectorFile(vector_sample_temp)

        if os.path.isfile(vector_sample_temp_clean):
            removeVectorFile(vector_sample_temp_clean)

        for vector_file in vector_sample_input_cut_list:
            if os.path.isfile(vector_file):
                removeVectorFile(vector_file)

    print(bold + green + "## END : CREATE HEIGHT POINTS FILE FROM MNSE" + endC)

    # Mise à jour du Log
    ending_event = "estimateQualityMns() : Masks creation ending : "
    timeLine(path_time_log, ending_event)

    return
Ejemplo n.º 28
0
def proposeReallocationMicroClass(shape_file_input, s_suspect_microclass1, s_suspect_microclass2, s_performance_list, missed_micro_list, suspect_micro_list, table_output_file, class_labels_list, rate_area_min, threshold_delete_perf, threshold_alert_perf, format_vector):

    print(cyan + "proposeReallocationMicroClass() : " + bold + green + "Start propose reallocation microclass ...\n" + endC)

    is_spatialite = False

    if debug >= 3:
        print(cyan + "proposeReallocationMicroClass() : " + endC + "shape_file_input : " +  str(shape_file_input))
        print(cyan + "proposeReallocationMicroClass() : " + endC + "s_suspect_microclass1 : " +  str(s_suspect_microclass1))
        print(cyan + "proposeReallocationMicroClass() : " + endC + "s_suspect_microclass2 : " +  str(s_suspect_microclass2))
        print(cyan + "proposeReallocationMicroClass() : " + endC + "suspect_micro_list : " + str(suspect_micro_list))
        print(cyan + "proposeReallocationMicroClass() : " + endC + "s_performance_list : " + str(s_performance_list))
        print(cyan + "proposeReallocationMicroClass() : " + endC + "format_vector : " + str(format_vector))

    # constantes
    HEADER_TABLEAU_MODIF = "MICROCLASSE;TRAITEMENT\n"
    EXT_SQLITE = ".sqlite"

    # variables
    text_output = HEADER_TABLEAU_MODIF
    suspect_micro_end_list = []

    if is_spatialite :
        # Creer une base de donnees qui contient une table des echantillons d'apprentissage
        repertory_output = os.path.dirname(table_output_file)
        table_input_name = os.path.splitext(os.path.basename(table_output_file))[0]
        bd_name = repertory_output + os.sep + table_input_name + EXT_SQLITE

        # Supprimer la base de donnees temporaire si elle existe encore
        try:
            removeFile(bd_name)
        except Exception:
            # Ignore l'exception levee si le fichier n'existe pas (et ne peut donc pas être supprime)
            pass

        createBDtableModify(shape_file_input, table_input_name, bd_name)

        average_area_list = computeAverageAreaMacro(repertory_output, class_labels_list, table_input_name, bd_name)
    else :
        average_area_list = []
        for class_label in class_labels_list:
            average_area = getAverageAreaClass(shape_file_input, "ID", class_label, format_vector)
            average_area_list.append(average_area)
    if debug >= 1:
        print(cyan + "proposeReallocationMicroClass() : " + endC + "average_area_list : " +  str(average_area_list))

    # calcul la surface min autorisée
    area_authorized_macro_list = []
    for area in average_area_list:
        area_authorized_macro_list.append(rate_area_min * area)
    if debug >= 1:
        print(cyan + "proposeReallocationMicroClass() : " + endC + "area_authorized_macro_list : " +  str(area_authorized_macro_list))

    # Recuperer les donnees de centroids
    micro_suspect1_list, micro_pp1_list, micro_suspect2_list, micro_pp2_list = getDataCentroids(s_suspect_microclass1, s_suspect_microclass2, class_labels_list)

    # Proposer les traitements correspondant a chaque microclasse suspect
    # 1er fois, traiter les informations obtenues par la matrice de confusion en prenant en compte l'analyse de centroids
    for i in range(len(s_performance_list)):

        if is_spatialite :
            area_micro = computeAreaMicro(repertory_output, table_input_name, bd_name, suspect_micro_list[i])
        else :
            area_micro = getAreaPolygon(shape_file_input,"ID", suspect_micro_list[i],format_vector )
        if debug >= 1:
            print("Traiter %d, surface %f" %(suspect_micro_list[i], area_micro))

        if area_micro < area_authorized_macro_list[findPositionList(class_labels_list, (suspect_micro_list[i]/100)*100)]: #Regle 1
            if debug >= 1:
                print("Regle 1 - Supprimer : %d Surf : %d" %(suspect_micro_list[i], area_micro))
            text_output += "%d;-1\n" %(suspect_micro_list[i])
            suspect_micro_end_list.append(suspect_micro_list[i])
        else:
            if s_performance_list[i] < threshold_delete_perf: #Regle 2.1
                if debug >= 1:
                    print("Regle 2.1 - Supprimer : %d Seuil : %f" %(suspect_micro_list[i], s_performance_list[i]))
                text_output += "%d;-1\n" %(suspect_micro_list[i])
                suspect_micro_end_list.append(suspect_micro_list[i])
            elif s_performance_list[i] < threshold_alert_perf:
                pos1 = findPositionList(micro_suspect1_list, suspect_micro_list[i])
                pos2 = findPositionList(micro_suspect2_list, suspect_micro_list[i])
                if (pos1 == -1 or pos2 == -1) and pos1 != pos2: #Regle 2.1.2
                    if debug >= 1:
                        print("Regle 2.1.2 - Supprimer : %d pos1 : %d pos2 : %d" %(suspect_micro_list[i], pos1, pos2))
                    text_output += "%d;-1\n" %(suspect_micro_list[i])
                    suspect_micro_end_list.append(suspect_micro_list[i])
                if pos1 != -1 and pos2 != -1:
                    if(micro_pp1_list[pos1]/100) == (micro_pp2_list[pos2]/100): #Regle 2.1.1
                        if debug >= 1:
                            print("Regle 2.1.1 - Reaffecter : %d %d" %(suspect_micro_list[i], (micro_pp1_list[pos1]/100) * 100))
                        text_output += "%d;%d\n" %(suspect_micro_list[i], (micro_pp1_list[pos1]/100)*100)
                        suspect_micro_end_list.append(suspect_micro_list[i])
                    else: #Regle 2.1.2
                        if debug >= 1:
                            print("Regle 2.1.2 - Supprimer : %d" %(suspect_micro_list[i]))
                        text_output += "%d;-1\n" %(suspect_micro_list[i])
                        suspect_micro_end_list.append(suspect_micro_list[i])
    # 2eme fois, traiter encore l'analyse de centroids pour les microclasses ayant la bonne performance mais plus proche d'autre microclasse issue par une autre macroclasse
    for i in range(len(micro_suspect1_list)):
        position = findPositionList(suspect_micro_end_list, micro_suspect1_list[i])
        if position == -1:
            pos = findPositionList(micro_suspect2_list, micro_suspect1_list[i])
            if pos != -1:
                if (micro_pp1_list[i]/100)*100 == (micro_pp2_list[pos]/100)*100: #Regle 3.1
                    if debug >= 1:
                        print("Regle 3.1 - Alert : %d" %(micro_suspect1_list[i]))
                    text_output += "%d;A\n" %(micro_suspect1_list[i])
                else: #Regle 3.2
                    if debug >= 1:
                        print("Regle 3.2 - Alert : %d" %(micro_suspect1_list[i]))
                    text_output += "%d;A\n" %(micro_suspect1_list[i])
                suspect_micro_end_list.append(micro_suspect1_list[i])
            else: #Regle 3.2
                if debug >= 1:
                    print("Regle 3.2 - Alert : %d" %(micro_suspect1_list[i]))
                text_output += "%d;A\n" %(micro_suspect1_list[i])
                suspect_micro_end_list.append(micro_suspect1_list[i])

    for i in range(len(micro_suspect2_list)):
        position = findPositionList(suspect_micro_end_list, micro_suspect2_list[i])
        if position == -1: #Regle 3.2
            if debug >= 1:
                print("Regle 3.2 - Alert : %d" %(micro_suspect2_list[i]))
            text_output += "%d;A\n" %(micro_suspect2_list[i])
            suspect_micro_end_list.append(micro_suspect2_list[i])

    # Annoncer pour les microclasses qui sont disparues (presenter en entree mais disparaitre en sortie)
    for missed in missed_micro_list:
        text_output += "%s;D\n" %(missed)

    if is_spatialite :
        # Enlever la base de donnees temporaire
        removeFile(bd_name)

    print(cyan + "proposeReallocationMicroClass() : " + bold + green + "End propose reallocation  microclass \n" + endC)
    return text_output
def cutRasterSamples(image_input,
                     vector_input,
                     image_output,
                     reference_image,
                     epsg,
                     no_data_value,
                     path_time_log,
                     superposition=False,
                     format_raster='GTiff',
                     format_vector='ESRI Shapefile',
                     extension_raster=".tif",
                     save_results_intermediate=False,
                     overwrite=True):

    # Mise à jour du Log
    starting_event = "cutRasterSamples() : Masks creation starting : "
    timeLine(path_time_log, starting_event)

    print(endC)
    print(bold + green + "## START : CUTTING IMAGE" + endC)
    print(endC)

    if debug >= 2:
        print(bold + green +
              "cutRasterSamples() : Variables dans la fonction" + endC)
        print(cyan + "cutRasterSamples() : " + endC + "image_input : " +
              str(image_input) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "vector_input : " +
              str(vector_input) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "image_output : " +
              str(image_output) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "reference_image : " +
              str(reference_image) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "epsg : " + str(epsg) +
              endC)
        print(cyan + "cutRasterSamples() : " + endC + "no_data_value : " +
              str(no_data_value) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "superposition : " +
              str(superposition) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "cutRasterSamples() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "cutRasterSamples() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "cutRasterSamples() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    # ETAPE 0 : PREPARATION DES FICHIERS INTERMEDIAIRES

    SAMPLE_MASK_SUFFIX = "_mask"
    CODAGE = "uint8"

    repertory_output = os.path.dirname(image_output)

    if superposition:  # Cas où on vérifie la superposition géométrique avec l'image satellite
        output_mask_temp01 = repertory_output + os.sep + os.path.splitext(
            os.path.basename(image_input)
        )[0] + SAMPLE_MASK_SUFFIX + "_temp01" + extension_raster
        output_mask_temp02 = repertory_output + os.sep + os.path.splitext(
            os.path.basename(image_input)
        )[0] + SAMPLE_MASK_SUFFIX + "_temp02" + extension_raster
    else:  # Cas où on ne vérifie pas la superposition géométrique avec l'image satellite
        output_mask_temp01 = image_output

    # ETAPE 1 : DECOUPAGE DU RASTEUR PAR LE VECTEUR D'EMPRISE

    # Fonction de découpe
    if not cutImageByVector(vector_input, image_input, output_mask_temp01,
                            None, None, no_data_value, epsg, format_raster,
                            format_vector):
        raise NameError(
            cyan + "cutRasterSamples() : " + bold + red +
            "!!! Une erreur c'est produite au cours du decoupage de l'image : "
            + image_input + ". Voir message d'erreur." + endC)

    if debug >= 2:
        print(cyan + "cutRasterSamples() : " + bold + green +
              "DECOUPAGE DU RASTER %s AVEC LE VECTEUR %s" %
              (image_input, vector_input) + endC)

    # ETAPE 2 : SUPERPOSITION DU FICHIER DECOUPE AVEC LE FICHIER DE REFERENCE

    if superposition:  # Cas où on vérifie la superposition géométrique
        # Commande de mise en place de la geométrie
        command = "otbcli_Superimpose -inr " + reference_image + " -inm " + output_mask_temp01 + " -out " + output_mask_temp02
        exit_code = os.system(command)
        if exit_code != 0:
            raise NameError(
                cyan + "cutRasterSamples() : " + bold + red +
                "!!! Une erreur c'est produite au cours du superimpose de l'image : "
                + output_mask_temp01 + ". Voir message d'erreur." + endC)

        # Commande de binarisation du nouveau masque (qui n'est plus binaire si une modification geometrique a été effectuée)
        expression_binarisation = "\"(im1b1 < %f? 0 : 1)\"" % (0.5)
        command = "otbcli_BandMath -il " + output_mask_temp02 + " -out " + image_output + " " + CODAGE + " -exp " + expression_binarisation
        exit_code = os.system(command)
        if exit_code != 0:
            raise NameError(
                cyan + "cutRasterSamples() : " + bold + red +
                "!!! Une erreur c'est produite au cours de la binarisation de l'image : "
                + output_mask_temp02 + ". Voir message d'erreur." + endC)

        if debug >= 2:
            print(cyan + "cutRasterSamples() : " + bold + green +
                  "SUPERIMPOSE ET FICHIER BINAIRE DU FICHIER %s" %
                  (image_input) + endC)
            print(command)

    # ETAPE 3 : SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES

    # Suppression des données intermédiaires
    if not save_results_intermediate:
        if superposition:
            removeFile(output_mask_temp01)
            removeFile(output_mask_temp02)

    print(endC)
    print(bold + green + "## END : UTTING IMAGE" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "cutRasterSamples() : Masks creation ending : "
    timeLine(path_time_log, ending_event)

    return
def classesOfWaterHeights(input_flooded_areas_vector,
                          input_digital_elevation_model_file,
                          output_heights_classes_file,
                          output_heights_classes_vector,
                          heights_classes='0,0.5,1,1.5,2',
                          epsg=2154,
                          no_data_value=0,
                          format_raster='GTiff',
                          format_vector='ESRI Shapefile',
                          extension_raster='.tif',
                          extension_vector='.shp',
                          grass_gisbase=os.environ['GISBASE'],
                          grass_gisdb='GRASS_database',
                          grass_location='LOCATION',
                          grass_mapset='MAPSET',
                          path_time_log='',
                          save_results_intermediate=False,
                          overwrite=True):

    if debug >= 3:
        print('\n' + bold + green +
              "Classes de hauteurs d'eau - Variables dans la fonction :" +
              endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "input_flooded_areas_vector : " +
              str(input_flooded_areas_vector) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "input_digital_elevation_model_file : " +
              str(input_digital_elevation_model_file) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "output_heights_classes_file : " +
              str(output_heights_classes_file) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "output_heights_classes_vector : " +
              str(output_heights_classes_vector) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "heights_classes : " + str(heights_classes) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC + "epsg : " +
              str(epsg) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "format_raster : " + str(format_raster) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "format_vector : " + str(format_vector) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "grass_gisbase : " + str(grass_gisbase) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "grass_gisdb : " + str(grass_gisdb) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "grass_location : " + str(grass_location) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "grass_mapset : " + str(grass_mapset) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "    classesOfWaterHeights() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "    classesOfWaterHeights() : " + endC + "overwrite : " +
              str(overwrite) + endC + '\n')

    # Définition des constantes
    ENCODING_RASTER_FLOAT = 'float'
    ENCODING_RASTER_UINT8 = 'uint8'
    EXTENSION_RASTER_SAGA = '.sdat'
    FORMAT_VECTOR_GRASS = format_vector.replace(' ', '_')
    SUFFIX_TEMP = '_temp'
    SUFFIX_LINES = '_lines'
    SUFFIX_POINTS = '_points'
    SUFFIX_ALTI = '_altitude'
    SUFFIX_CUT = '_cut'
    SUFFIX_RAW = '_raw_heights'
    INDEX_FIELD = 'idx'
    ALTI_FIELD = 'altitude'
    VECTORISATION = 'GRASS'

    # Mise à jour du log
    starting_event = "classesOfWaterHeights() : Début du traitement : "
    timeLine(path_time_log, starting_event)

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "DEBUT DES TRAITEMENTS" + endC + '\n')

    # Définition des variables 'basename'
    flooded_areas_basename = os.path.splitext(
        os.path.basename(input_flooded_areas_vector))[0]
    digital_elevation_model_basename = os.path.splitext(
        os.path.basename(input_digital_elevation_model_file))[0]
    flooded_areas_lines_basename = flooded_areas_basename + SUFFIX_LINES
    flooded_areas_points_basename = flooded_areas_basename + SUFFIX_POINTS
    if output_heights_classes_file != "":
        output_heights_classes_basename = os.path.splitext(
            os.path.basename(output_heights_classes_file))[0]
        output_dirname = os.path.dirname(output_heights_classes_file)
    else:
        output_heights_classes_basename = os.path.splitext(
            os.path.basename(output_heights_classes_vector))[0]
        output_dirname = os.path.dirname(output_heights_classes_vector)

    # Définition des variables temp
    temp_directory = output_dirname + os.sep + output_heights_classes_basename + SUFFIX_TEMP
    flooded_areas_lines = temp_directory + os.sep + flooded_areas_lines_basename + extension_vector
    flooded_areas_points = temp_directory + os.sep + flooded_areas_points_basename + extension_vector
    altitude_points = temp_directory + os.sep + flooded_areas_points_basename + SUFFIX_ALTI + extension_vector
    altitude_grid = temp_directory + os.sep + flooded_areas_basename + SUFFIX_ALTI + EXTENSION_RASTER_SAGA
    altitude_file = temp_directory + os.sep + flooded_areas_basename + SUFFIX_ALTI + SUFFIX_CUT + extension_raster
    digital_elevation_model_cut = temp_directory + os.sep + digital_elevation_model_basename + SUFFIX_CUT + extension_raster
    raw_heights = temp_directory + os.sep + flooded_areas_basename + SUFFIX_RAW + extension_raster
    heights_classes_temp = temp_directory + os.sep + output_heights_classes_basename + extension_raster
    if output_heights_classes_file == "":
        output_heights_classes_file = output_dirname + os.sep + output_heights_classes_basename + extension_raster

    # Nettoyage des traitements précédents
    if debug >= 3:
        print(cyan + "classesOfWaterHeights() : " + endC +
              "Nettoyage des traitements précédents." + endC + '\n')
    removeFile(output_heights_classes_file)
    removeVectorFile(output_heights_classes_vector,
                     format_vector=format_vector)
    cleanTempData(temp_directory)

    #############
    # Etape 0/6 # Préparation des traitements
    #############

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 0/6 - Début de la préparation des traitements." + endC + '\n')

    # Préparation de GRASS
    xmin, xmax, ymin, ymax = getEmpriseImage(
        input_digital_elevation_model_file)
    pixel_width, pixel_height = getPixelWidthXYImage(
        input_digital_elevation_model_file)
    grass_gisbase, grass_gisdb, grass_location, grass_mapset = initializeGrass(
        temp_directory,
        xmin,
        xmax,
        ymin,
        ymax,
        pixel_width,
        pixel_height,
        projection=epsg,
        gisbase=grass_gisbase,
        gisdb=grass_gisdb,
        location=grass_location,
        mapset=grass_mapset,
        clean_old=True,
        overwrite=overwrite)

    # Gestion des classes de hauteurs d'eau
    thresholds_list = heights_classes.split(',')
    thresholds_list_float = [float(x) for x in thresholds_list]
    thresholds_list_float.sort()
    thresholds_list_float_len = len(thresholds_list_float)

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 0/6 - Fin de la préparation des traitements." + endC + '\n')

    #############
    # Etape 1/6 # Création de points sur le périmètre de l'emprise inondée
    #############

    print(
        cyan + "classesOfWaterHeights() : " + bold + green +
        "ETAPE 1/6 - Début de la création de points sur le périmètre de l'emprise inondée."
        + endC + '\n')

    # Conversion de l'emprise inondée en polylignes
    convertePolygon2Polylines(input_flooded_areas_vector,
                              flooded_areas_lines,
                              overwrite=overwrite,
                              format_vector=format_vector)

    # Création de points le long du polyligne
    use = 'vertex'
    dmax = 10
    percent = False
    importVectorOgr2Grass(flooded_areas_lines,
                          flooded_areas_lines_basename,
                          overwrite=overwrite)
    pointsAlongPolylines(flooded_areas_lines_basename,
                         flooded_areas_points_basename,
                         use=use,
                         dmax=dmax,
                         percent=percent,
                         overwrite=overwrite)
    exportVectorOgr2Grass(flooded_areas_points_basename,
                          flooded_areas_points,
                          format_vector=FORMAT_VECTOR_GRASS,
                          overwrite=overwrite)

    # Ajout d'un index sur les points
    addNewFieldVector(flooded_areas_points,
                      INDEX_FIELD,
                      ogr.OFTInteger,
                      field_value=None,
                      field_width=None,
                      field_precision=None,
                      format_vector=format_vector)
    updateIndexVector(flooded_areas_points,
                      index_name=INDEX_FIELD,
                      format_vector=format_vector)

    print(
        cyan + "classesOfWaterHeights() : " + bold + green +
        "ETAPE 1/6 - Fin de la création de points sur le périmètre de l'emprise inondée."
        + endC + '\n')

    #############
    # Etape 2/6 # Récupération de l'altitude sous chaque point
    #############

    print(
        cyan + "classesOfWaterHeights() : " + bold + green +
        "ETAPE 2/6 - Début de la récupération de l'altitude sous chaque point."
        + endC + '\n')

    # Ajout d'un champ pour récupérer l'altitude
    addNewFieldVector(flooded_areas_points,
                      ALTI_FIELD,
                      ogr.OFTReal,
                      field_value=None,
                      field_width=None,
                      field_precision=None,
                      format_vector=format_vector)

    # Echantillonnage du MNT sous le fichier points
    importVectorOgr2Grass(flooded_areas_points,
                          flooded_areas_points_basename,
                          overwrite=overwrite)
    importRasterGdal2Grass(input_digital_elevation_model_file,
                           digital_elevation_model_basename,
                           overwrite=overwrite)
    sampleRasterUnderPoints(flooded_areas_points_basename,
                            digital_elevation_model_basename,
                            ALTI_FIELD,
                            overwrite=overwrite)
    exportVectorOgr2Grass(flooded_areas_points_basename,
                          altitude_points,
                          format_vector=FORMAT_VECTOR_GRASS,
                          overwrite=overwrite)

    print(
        cyan + "classesOfWaterHeights() : " + bold + green +
        "ETAPE 2/6 - Fin de la récupération de l'altitude sous chaque point." +
        endC + '\n')

    #############
    # Etape 3/6 # Triangulation de l'altitude
    #############

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 3/6 - Début de la triangulation de l'altitude." + endC + '\n')

    pixel_size = abs(min(pixel_width, pixel_height))
    triangulationDelaunay(altitude_points,
                          altitude_grid,
                          ALTI_FIELD,
                          cellsize=pixel_size)

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 3/6 - Fin de la triangulation de l'altitude." + endC + '\n')

    #############
    # Etape 4/6 # Calcul des hauteurs brutes
    #############

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 4/6 - Début du calcul des hauteurs brutes." + endC + '\n')

    # Redécoupage sur l'emprise inondée
    cutRasterImages([altitude_grid, input_digital_elevation_model_file],
                    input_flooded_areas_vector,
                    [altitude_file, digital_elevation_model_cut],
                    0,
                    0,
                    epsg,
                    no_data_value,
                    "",
                    False,
                    path_time_log,
                    format_raster=format_raster,
                    format_vector=format_vector,
                    extension_raster=extension_raster,
                    extension_vector=extension_vector,
                    save_results_intermediate=save_results_intermediate,
                    overwrite=overwrite)

    # BandMath pour les hauteurs brutes (triangulation - MNT)
    expression = "im1b1 - im2b1"
    rasterCalculator([altitude_file, digital_elevation_model_cut],
                     raw_heights,
                     expression,
                     codage=ENCODING_RASTER_FLOAT)

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 4/6 - Fin du calcul des hauteurs brutes." + endC + '\n')

    #############
    # Etape 5/6 # Attribution des classes de hauteurs d'eau
    #############

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 5/6 - Début de l'attribution des classes de hauteurs d'eau." +
          endC + '\n')

    # Génération de l'expression
    expression = ""
    for i in range(thresholds_list_float_len - 1):
        min_threshold = thresholds_list_float[i]
        max_threshold = thresholds_list_float[i + 1]
        expression += "im1b1>=%s and im1b1<%s ? %s : " % (min_threshold,
                                                          max_threshold, i + 1)
    expression += "im1b1>=%s ? %s : 0" % (thresholds_list_float[
        thresholds_list_float_len - 1], thresholds_list_float_len)

    # Calcul des classes de hauteurs d'eau
    rasterCalculator([raw_heights],
                     heights_classes_temp,
                     expression,
                     codage=ENCODING_RASTER_UINT8)

    # Redécoupage propre des zones en dehors de l'emprise inondée
    cutImageByVector(input_flooded_areas_vector,
                     heights_classes_temp,
                     output_heights_classes_file,
                     pixel_size_x=pixel_width,
                     pixel_size_y=pixel_height,
                     no_data_value=no_data_value,
                     epsg=epsg,
                     format_raster=format_raster,
                     format_vector=format_vector)

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "ETAPE 5/6 - Fin de l'attribution des classes de hauteurs d'eau." +
          endC + '\n')

    #############
    # Etape 6/6 # Vectorisation des classes de hauteurs d'eau
    #############

    if output_heights_classes_vector != "":

        print(
            cyan + "classesOfWaterHeights() : " + bold + green +
            "ETAPE 6/6 - Début de la vectorisation des classes de hauteurs d'eau."
            + endC + '\n')

        name_column = 'class'
        umc_list = 0

        if VECTORISATION == 'GRASS':
            vectorizeGrassClassification(
                output_heights_classes_file,
                output_heights_classes_vector,
                name_column, [umc_list],
                False,
                True,
                True,
                input_flooded_areas_vector,
                True,
                path_time_log,
                expression="",
                format_vector=format_vector,
                extension_raster=extension_raster,
                extension_vector=extension_vector,
                save_results_intermediate=save_results_intermediate,
                overwrite=overwrite)
        else:
            vectorizeClassification(
                output_heights_classes_file,
                output_heights_classes_vector,
                name_column, [umc_list],
                2000,
                False,
                True,
                True,
                True,
                True,
                True,
                input_flooded_areas_vector,
                True,
                False,
                False, [0],
                path_time_log,
                expression="",
                format_vector=format_vector,
                extension_raster=extension_raster,
                extension_vector=extension_vector,
                save_results_intermediate=save_results_intermediate,
                overwrite=overwrite)

        print(
            cyan + "classesOfWaterHeights() : " + bold + green +
            "ETAPE 6/6 - Fin de la vectorisation des classes de hauteurs d'eau."
            + endC + '\n')

    else:
        print(
            cyan + "classesOfWaterHeights() : " + bold + yellow +
            "ETAPE 6/6 - Pas de vectorisation des classes de hauteurs d'eau demandée."
            + endC + '\n')

    # Suppression des fichiers temporaires
    if not save_results_intermediate:
        if debug >= 3:
            print(cyan + "classesOfWaterHeights() : " + endC +
                  "Suppression des fichiers temporaires." + endC + '\n')
        deleteDir(temp_directory)

    print(cyan + "classesOfWaterHeights() : " + bold + green +
          "FIN DES TRAITEMENTS" + endC + '\n')

    # Mise à jour du log
    ending_event = "classesOfWaterHeights() : Fin du traitement : "
    timeLine(path_time_log, ending_event)

    return