def prepareData(input_buffer_tdc, input_paysage, output_dir, input_repertories_list, id_paysage, id_name_sub_rep, epsg, optimization_zone, no_cover, zone_date, separ_name, pos_date, nb_char_date, separ_date, path_time_log, format_raster='GTiff', format_vector="ESRI Shapefile", extension_raster=".tif", extension_vector=".shp", save_results_intermediate=True, overwrite=True):
    # Mise à jour du Log
    starting_event = "prepareData() : Select prepare data starting : "
    timeLine(path_time_log,starting_event)

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green + "Variables dans le prepareData - Variables générales" + endC)
        print(cyan + "PrepareData() : " + endC + "input_buffer_tdc : " + str(input_buffer_tdc) + endC)
        print(cyan + "PrepareData() : " + endC + "input_paysage : " + str(input_paysage) + endC)
        print(cyan + "PrepareData() : " + endC + "output_dir : " + str(output_dir) + endC)
        print(cyan + "PrepareData() : " + endC + "input_repertories_list : " + str(input_repertories_list) + endC)
        print(cyan + "PrepareData() : " + endC + "id_paysage : " + str(id_paysage) + endC)
        print(cyan + "PrepareData() : " + endC + "id_name_sub_rep : " + str(id_name_sub_rep) + endC)
        print(cyan + "PrepareData() : " + endC + "epsg : " + str(epsg) +endC)
        print(cyan + "PrepareData() : " + endC + "optimization_zone : " + str(optimization_zone) + endC)
        print(cyan + "PrepareData() : " + endC + "no_cover : " + str(no_cover) + endC)
        print(cyan + "PrepareData() : " + endC + "zone_date : " + str(zone_date) + endC)
        print(cyan + "PrepareData() : " + endC + "separ_name : " + str(separ_name) + endC)
        print(cyan + "PrepareData() : " + endC + "pos_date : " + str(pos_date) + endC)
        print(cyan + "PrepareData() : " + endC + "nb_char_date : " + str(nb_char_date) + endC)
        print(cyan + "PrepareData() : " + endC + "separ_date : " + str(separ_date) + endC)
        print(cyan + "PrepareData() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "PrepareData() : " + endC + "format_raster : " + str(format_raster) + endC)
        print(cyan + "PrepareData() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "PrepareData() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "PrepareData() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "PrepareData() : " + endC + "save_results_inter : " + str(save_results_intermediate) + endC)
        print(cyan + "PrepareData() : " + endC + "overwrite : " + str(overwrite) + endC)

    REPERTORY_PAYSAGES = "Paysages"
    REPERTORY_IMAGES = "Images"
    ID_P = "id_p"

    SUFFIX_OPTI = "_opti"
    SUFFIX_CUT = "_cut"
    SUFFIX_ERROR = "_error"
    SUFFIX_MERGE = "_merge"
    SUFFIX_CLEAN = "_clean"
    SUFFIX_STACK = "_stack"

    output_dir_paysages = output_dir + os.sep + REPERTORY_PAYSAGES
    output_dir_images = output_dir + os.sep + REPERTORY_IMAGES

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Création du répertoire de sortie pour les paysages s'il n'existe pas
    if not os.path.exists(output_dir_paysages):
        os.makedirs(output_dir_paysages)

    # Création du répertoire de sortie pour les images s'il n'existe pas
    if not os.path.exists(output_dir_images):
        os.makedirs(output_dir_images)

    # Recuperer l'epsg du fichier d'emprise
    if epsg == 0 :
        epsg = getProjection(input_paysage, format_vector)

    # Création du paysage optimal
    optimPaysage(input_buffer_tdc, input_paysage, optimization_zone, SUFFIX_OPTI, output_dir_paysages, id_paysage, format_vector)

    # Création un shapefile par polygone
    paysage_opti = output_dir_paysages + os.sep + os.path.splitext(os.path.basename(input_paysage))[0] + SUFFIX_OPTI + os.path.splitext(input_paysage)[1]
    if id_paysage != "" :
        paysages_list = splitVector(paysage_opti, str(output_dir_paysages), str(id_paysage), epsg, format_vector, extension_vector)
    else:
        paysages_list = splitVector(paysage_opti, str(output_dir_paysages), ID_P, epsg, format_vector, extension_vector)

    if debug >= 3:
        print(cyan + "PrepareData() : " + endC + "Liste des fichiers en entrée de imagesAssembly() : " + str(paysages_list))

    # Création du fichier de sortie des images s'il n'existe pas dejà
    if not os.path.exists(output_dir_images):
        os.makedirs(output_dir_images)

    # Assemblage des images dans les paysages optimisés
    # Si on choisit pas de recouvrement entre les images
    if no_cover:

        # Récupération des noms de sortie
        image_output_list = []
        id_paysage_list = []
        for shape in paysages_list :
            attribute_name_dico = {}
            attribute_name_dico[id_name_sub_rep] = ogr.OFTString
            attribute_name_dico[id_paysage] = ogr.OFTInteger
            res_values_dico = getAttributeValues(shape, None, None, attribute_name_dico, format_vector)
            id_name_sub_rep_value = res_values_dico[id_name_sub_rep][0]
            id_paysage_value = res_values_dico[id_paysage][0]
            image_output_list.append(id_name_sub_rep_value)
            id_paysage_list.append(id_paysage_value)
        if debug >= 3:
            print("image_output_list " + str(image_output_list))

        # Récupération de tous les (sous-)répertoires
        repertory_images_sources_list_temp = []
        for input_dir in input_repertories_list:
            sub_rep_list = getSubRepRecursifList(input_dir)
            if sub_rep_list != []:
                for sub_rep in sub_rep_list:
                    repertory_images_sources_list_temp.append(sub_rep)
            else:
                repertory_images_sources_list_temp.append(input_dir)
        # On réorganise pour avoir le même ordre dans les 2 listes 'repertory_images_sources_list' et 'image_output_list'
        repertory_images_sources_list = []
        for paysage in id_paysage_list:
            for repertory_images_source in repertory_images_sources_list_temp:
                if str(paysage) in repertory_images_source.split(os.sep)[-1]:
                    repertory_images_sources_list.append(repertory_images_source)
        if debug >= 3:
            print("repertory_images_sources_list " + str(repertory_images_sources_list))

        if len(repertory_images_sources_list) != len(image_output_list):
            raise Exception(bold + red + "Error: not same number of input repertories and output files." + endC)

        # Commande ImagesAssembly sur les éléments des 2 listes
        for i in range(len(paysages_list)):
            image_output = output_dir_images + os.sep + image_output_list[i]
            if debug >= 3:
                 print(cyan + "PrepareData() : " + endC + bold + green  + "image_output : " + endC + image_output)

            try:
                # ~ selectAssembyImagesByHold(paysages_list[i], [repertory_images_sources_list[i]], image_output, False, True, epsg, False, False, False, False, 0, 0, 0, 0, separ_name, pos_date, nb_char_date, separ_date, path_time_log, SUFFIX_ERROR, SUFFIX_MERGE, SUFFIX_CLEAN, SUFFIX_STACK, format_raster, format_vector, extension_raster, extension_vector, save_results_intermediate, overwrite)
                selectAssembyImagesByHold(paysages_list[i], [repertory_images_sources_list[i]], image_output, False, zone_date, epsg, False, False, False, False, 0, 0, 0, 0, separ_name, pos_date, nb_char_date, separ_date, path_time_log, SUFFIX_ERROR, SUFFIX_MERGE, SUFFIX_CLEAN, SUFFIX_STACK, format_raster, format_vector, extension_raster, extension_vector, save_results_intermediate, overwrite)
            except Exception:
                pass

    else:
        for shape in paysages_list :
            # ~ filename = os.path.basename(shape)
            # ~ info_name = filename.split(separ_name, 2)[POS]
            # ~ name_shape = info_name[:NB_CHAR]
            # ~ image_output = output_dir_images + os.sep + name_shape + SUFFIX_ASS + extension_raster

        # ~ for shape in sub_repertory_images_paysages_list :
            image_output = output_dir_images + os.sep + os.path.splitext(os.path.basename(shape)) + extension_raster

            if optimization_zone :
                shape_cut = os.path.splitext(shape)[0] + SUFFIX_CUT + os.path.splitext(shape)[1]
                cutVector(input_buffer_tdc, shape, shape_cut, overwrite, format_vector)
            else :
                shape_cut = shape

            selectAssembyImagesByHold(shape_cut, input_repertories_list, image_output, False, zone_date, epsg, False, False, False, False, 0, 0, 0, 0, separ_name, pos_date, nb_char_date, separ_date, path_time_log, SUFFIX_ERROR, SUFFIX_MERGE, SUFFIX_CLEAN, SUFFIX_STACK, format_raster, format_vector, save_results_intermediate, overwrite)
            if optimization_zone and os.path.exists(shape_cut):
                removeVectorFile(shape_cut, format_vector)

    # Mise à jour du Log
    ending_event = "prepareData() : Select prepare data ending : "
    timeLine(path_time_log,ending_event)

    if debug >= 3:
        print(cyan + "PrepareData() : " + endC + bold + green  + "Fin de traitement")
    return
def runTDCSeuil(input_im_seuils_dico,
                output_dir,
                input_sea_points,
                input_cut_vector,
                input_emprise_vector,
                simplif,
                is_calc_indice_image,
                attribute_val_limite,
                attribute_val_proced,
                attribute_val_datepr,
                attribute_val_precis,
                attribute_val_contac,
                attribute_val_type,
                no_data_value,
                path_time_log,
                channel_order=['Red', 'Green', 'Blue', 'NIR'],
                epsg=2154,
                format_raster='GTiff',
                format_vector="ESRI Shapefile",
                extension_raster=".tif",
                extension_vector=".shp",
                save_results_intermediate=True,
                overwrite=True):

    # Mise à jour du Log
    starting_event = "runTDCSeuil() : Select TDC Seuil starting : "
    timeLine(path_time_log, starting_event)

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green +
              "Variables dans runTDCSeuil - Variables générales" + endC)
        print(cyan + "runTDCSeuil() : " + endC + "input_im_seuils_dico : " +
              str(input_im_seuils_dico) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "output_dir : " +
              str(output_dir) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "input_sea_points : " +
              str(input_sea_points) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "input_cut_vector : " +
              str(input_cut_vector) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "input_emprise_vector : " +
              str(input_emprise_vector) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "simplif : " + str(simplif) +
              endC)
        print(cyan + "runTDCSeuil() : " + endC + "is_calc_indice_image : " +
              str(is_calc_indice_image) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "attribute_val_limite : " +
              str(attribute_val_limite) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "attribute_val_proced : " +
              str(attribute_val_proced) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "attribute_val_datepr : " +
              str(attribute_val_datepr) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "attribute_val_precis : " +
              str(attribute_val_precis) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "attribute_val_contac : " +
              str(attribute_val_contac) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "attribute_val_type : " +
              str(attribute_val_type) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "no_data_value : " +
              str(no_data_value) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "channel_order: " +
              str(channel_order) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "runTDCSeuil() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "runTDCSeuil() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "runTDCSeuil() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    # Initialisation des constantes
    AUTO = "auto"
    POS_NUMERO_DOSSIER = 2
    REP_NDVI_TDC_SEUIL = "ndvi_TDCSeuil"
    REP_TEMP_BIN_MASK_V = "Temp_Binary_Mask_Vector_"

    ATTR_NAME_REFDOSSIER = "RefDossier"
    ATTR_NAME_NOMIMAGE = "NomImage"
    ATTR_NAME_DATEACQUI = "DateAcqui"
    ATTR_NAME_HEUREACQUI = "HeureAcqui"
    ATTR_NAME_LIMITE = "TdcLimite"
    ATTR_NAME_PROCED = "TdcProced"
    ATTR_NAME_DATEPR = "TdcDatepro"
    ATTR_NAME_PRECIS = "TdcPrecis"
    ATTR_NAME_CONTAC = "TdcContact"
    ATTR_NAME_TYPE = "Type"

    # Repertoire NDVI à conserver!!!
    repertory_ndvi = output_dir + os.sep + REP_NDVI_TDC_SEUIL
    repertory_temp_list = []

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Création du répertoire de sortie temporaire s'il n'existe pas déjà
    if not os.path.exists(repertory_ndvi):
        os.makedirs(repertory_ndvi)

    # Exploitation du fichier emprise pour renseigner les informations des attribues
    res_values_dico = {}
    if input_emprise_vector != "":
        # Lecture des attributs de fichier vecteur
        names_attribut_list = getAttributeNameList(input_emprise_vector,
                                                   format_vector)
        attribute_name_dico = {}
        for name_attribut in names_attribut_list:
            attribute_name_dico[name_attribut] = getAttributeType(
                input_emprise_vector, name_attribut, format_vector)
        res_values_dico = getAttributeValues(input_emprise_vector, None, None,
                                             attribute_name_dico,
                                             format_vector)

    # On calcule plusieurs seuils par image, mais différents selon les images
    bin_mask_list = []
    images_list = []
    nb_images = len(input_im_seuils_dico.split())
    image_first_and_seuils = input_im_seuils_dico.split()[0]

    # Création d'une liste d'image
    for elt in input_im_seuils_dico.split():
        images_list.append(elt.split(":")[0])

    if ":" not in image_first_and_seuils:
        print(
            cyan + "runTDCSeuil() : " + red + bold +
            "Aucun seuil spécifié ! (Nécessité d'au moins un pour la 1ère image)"
            + endC,
            file=sys.stderr)
        sys.exit(1)
    else:
        seuils_first_image_list = image_first_and_seuils.split(":")[1].split(
            ",")

    for i in range(nb_images):
        # Chaque image + seuils (exemple : /path/image_xx.tif:0.1,0,-0.1)
        image_index_and_seuils = input_im_seuils_dico.split()[i]
        seuils_index_image_list = image_index_and_seuils.split(":")[1].split(
            ",")

        # L'image à traiter
        input_image = image_index_and_seuils.split(":")[0]
        image_name = os.path.splitext(os.path.basename(input_image))[0]

        # Création du répertoire temporaire de calcul
        repertory_temp = output_dir + os.sep + REP_TEMP_BIN_MASK_V + image_name
        if not os.path.exists(repertory_temp):
            os.makedirs(repertory_temp)
        repertory_temp_list.append(repertory_temp)

        # Initialisation des champs des attributs
        num_dossier = image_name.split("_")[POS_NUMERO_DOSSIER]
        attribute_val_refdossier = num_dossier
        attribute_val_nomimage = image_name
        attribute_val_datecqui = " "
        attribute_val_heureacqui = " "

        if attribute_val_limite == "":
            attribute_val_limite = " "
        if attribute_val_proced == "":
            attribute_val_proced = " "
        if attribute_val_datepr == "":
            now = datetime.datetime.now()
            attribute_val_datepr = now.strftime("%Y-%m-%d")
        if attribute_val_precis == "":
            attribute_val_precis = " "
        if attribute_val_contac == "":
            attribute_val_contac = " "
        if attribute_val_type == "":
            attribute_val_type = " "

        # Cas ou un fichier d'emprise contenant des données des attributs est present et contient un champs "RefDossier"
        if ATTR_NAME_REFDOSSIER in res_values_dico:

            if num_dossier in res_values_dico[ATTR_NAME_REFDOSSIER]:
                index_dossier = res_values_dico[ATTR_NAME_REFDOSSIER].index(
                    num_dossier)

                if ATTR_NAME_NOMIMAGE in res_values_dico:
                    attribute_val_nomimage = res_values_dico[
                        ATTR_NAME_NOMIMAGE][index_dossier]
                if ATTR_NAME_DATEACQUI in res_values_dico:
                    datecqui_list = res_values_dico[ATTR_NAME_DATEACQUI][
                        index_dossier]
                    attribute_val_datecqui = str(datecqui_list[0]) + "-" + str(
                        datecqui_list[1]) + "-" + str(datecqui_list[2])
                if ATTR_NAME_HEUREACQUI in res_values_dico:
                    attribute_val_heureacqui = res_values_dico[
                        ATTR_NAME_HEUREACQUI][index_dossier]

        # Initialisation de StructAttribute pour la création des champs
        attributes_list = [StructAttribute(ATTR_NAME_REFDOSSIER, ogr.OFTString, 20, attribute_val_refdossier), \
                           StructAttribute(ATTR_NAME_NOMIMAGE, ogr.OFTString, 20, attribute_val_nomimage), \
                           StructAttribute(ATTR_NAME_DATEACQUI, ogr.OFTDate, None,attribute_val_datecqui), \
                           StructAttribute(ATTR_NAME_HEUREACQUI, ogr.OFTString, 14, attribute_val_heureacqui), \
                           StructAttribute(ATTR_NAME_LIMITE, ogr.OFTString, 20, attribute_val_limite), \
                           StructAttribute(ATTR_NAME_PROCED, ogr.OFTString, 30, attribute_val_proced), \
                           StructAttribute(ATTR_NAME_DATEPR, ogr.OFTString, 14, attribute_val_datepr), \
                           StructAttribute(ATTR_NAME_PRECIS, ogr.OFTString, 20, attribute_val_precis), \
                           StructAttribute(ATTR_NAME_CONTAC, ogr.OFTString, 20, attribute_val_contac), \
                           StructAttribute(ATTR_NAME_TYPE, ogr.OFTString, 14, attribute_val_type)]

        # Calcul de l'image NDVI si is_calc_indice_image est à True
        if is_calc_indice_image:
            image_index = repertory_ndvi + os.sep + "image_NDVI_" + os.path.splitext(
                os.path.basename(images_list[i]))[0] + extension_raster
            if not os.path.exists(input_image):
                print(cyan + "runTDCSeuil() : " + red + bold +
                      "L'image renseignée en entrée : " + input_image +
                      " n'existe pas. Vérifiez le chemin !" + endC,
                      file=sys.stderr)
                sys.exit(1)

            createNDVI(input_image, image_index, channel_order)

        else:
            image_index = seuils_index_image_list[0]
            if os.path.splitext(image_index)[1] != extension_raster:
                print(
                    cyan + "runTDCSeuil() : " + red + bold +
                    "Si vous choisissez de calculer l'image NDVI, mettre l'option -c. Sinon, le 1er paramètre derrière \":\" dans -isd doit être l'image indice (.tif)"
                    + endC,
                    file=sys.stderr)
                sys.exit(1)

        if ":" not in image_index_and_seuils:
            if is_calc_indice_image:
                for t in seuils_first_image_list:
                    if t == AUTO:
                        seuils_list = runCalculSeuil(
                            image_index, output_dir, save_results_intermediate)
                        # Masque centre classe
                        bin_mask_cc = binaryMaskVect(
                            image_index, repertory_temp, float(seuils_list[0]),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        # Masque borne inf
                        bin_mask_bi = binaryMaskVect(
                            image_index, repertory_temp, float(v[1]),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        # Ajout des masques à la liste
                        bin_mask_list.append(bin_mask_cc)
                        bin_mask_list.append(bin_mask_bi)
                    else:
                        bin_mask = binaryMaskVect(
                            image_index, repertory_temp, float(t),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        bin_mask_list.append(bin_mask)
            else:
                print(cyan + "runTDCSeuil() : " + red + +bold +
                      "Renseignez les images NDVI associées et les seuils !" +
                      endC,
                      file=sys.stderr)
                sys.exit(1)

        else:
            if is_calc_indice_image:
                for t in seuils_index_image_list:
                    if t == AUTO:
                        seuils_list = runCalculSeuil(
                            image_index, output_dir, save_results_intermediate)
                        # Masque centre classe
                        bin_mask_cc = binaryMaskVect(
                            image_index, repertory_temp, float(seuils_list[0]),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        # Masque borne inf
                        bin_mask_bi = binaryMaskVect(
                            image_index, repertory_temp, float(seuils_list[1]),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        # Ajout des masques à la liste
                        bin_mask_list.append(bin_mask_cc)
                        bin_mask_list.append(bin_mask_bi)
                    else:
                        bin_mask = binaryMaskVect(
                            image_index, repertory_temp, float(t),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        bin_mask_list.append(bin_mask)
            else:
                for j in range(1, len(seuils_index_image_list)):
                    t = seuils_index_image_list[j]
                    if t == AUTO:
                        seuils_list = runCalculSeuil(
                            image_index, output_dir, save_results_intermediate)
                        # Masque centre classe
                        bin_mask_cc = binaryMaskVect(
                            image_index, repertory_temp, float(seuils_list[0]),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        # Masque borne inf
                        bin_mask_bi = binaryMaskVect(
                            image_index, repertory_temp, float(seuils_list[1]),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        # Ajout des masques à la liste
                        bin_mask_list.append(bin_mask_cc)
                        bin_mask_list.append(bin_mask_bi)
                    else:
                        bin_mask = binaryMaskVect(
                            image_index, repertory_temp, float(t),
                            input_cut_vector, attributes_list, no_data_value,
                            epsg, format_raster, format_vector,
                            extension_raster, extension_vector,
                            save_results_intermediate, overwrite)
                        bin_mask_list.append(bin_mask)

    # Constitution du dictionnaire associant chaque image aux vecteurs NDVI associés, pour l'entrée dans PolygonMerToTDC
    im_ndvivect_dico = ""
    if is_calc_indice_image:
        ndvi_mask_index = 0
        for i in range(nb_images):
            # Chaque image + seuils (exemple : /path/image_xx.tif:0.1,0,-0.1)
            image_index_and_seuils = input_im_seuils_dico.split()[i]
            input_image = image_index_and_seuils.split(":")[0]
            seuils_index_image_list = image_index_and_seuils.split(
                ":")[1].split(",")
            is_presence_auto = False

            im_ndvivect_dico += input_image + ":"

            # Si des seuils sont renseignés seulement pour la 1ère image
            if ":" not in image_index_and_seuils:
                # Parcours des seuils de la première image
                for seuil in seuils_first_image_list:
                    if seuil == AUTO:
                        is_presence_auto = True

                # S'il y a un seuil à "auto" dans la boucle, on parcourt un tour de plus (auto = borneinf + centre classe)
                if is_presence_auto == True:
                    nb_iter = len(seuils_first_image_list)
                else:
                    nb_iter = len(seuils_first_image_list) - 1

                for s in range(nb_iter):
                    im_ndvivect_dico += bin_mask_list[ndvi_mask_index] + ","
                    ndvi_mask_index = ndvi_mask_index + 1
                im_ndvivect_dico += bin_mask_list[ndvi_mask_index] + " "

            # Si au moins un seuil est renseigné pour chacune des autres images
            else:
                # Parcours des seuils de l'image
                for seuil in seuils_index_image_list:
                    if seuil == AUTO:
                        is_presence_auto = True

                # S'il y a un seuil à "auto" dans la boucle, on parcourt un tour de plus (auto = borneinf + centre classe)
                if is_presence_auto:
                    nb_iter = len(seuils_index_image_list)
                else:
                    nb_iter = len(seuils_index_image_list) - 1

                for s in range(nb_iter):
                    im_ndvivect_dico += bin_mask_list[ndvi_mask_index] + ","
                    ndvi_mask_index = ndvi_mask_index + 1
                im_ndvivect_dico += bin_mask_list[ndvi_mask_index] + " "
                ndvi_mask_index = ndvi_mask_index + 1
    else:
        ndvi_mask_index = 0
        for i in range(nb_images):
            # Chaque image + seuils (exemple : /path/image_xx.tif:0.1,0,-0.1)
            image_index_and_seuils = input_im_seuils_dico.split()[i]
            input_image = image_index_and_seuils.split(":")[0]
            seuils_index_image_list = image_index_and_seuils.split(
                ":")[1].split(",")
            is_presence_auto = False

            im_ndvivect_dico += input_image + ":"
            if ":" not in image_index_and_seuils:
                print(cyan + "runTDCSeuil() : " + red + bold +
                      "Renseignez les images NDVI associées et les seuils !" +
                      endC,
                      file=sys.stderr)
                sys.exit(1)

            # Si au moins un seuil est renseigné pour chacune des autres images
            else:
                # Parcours des seuils de l'image
                for seuil in seuils_index_image_list:
                    if seuil == AUTO:
                        is_presence_auto = True

                # S'il y a un seuil à "auto" dans la boucle, on parcourt un tour de plus (auto = borneinf + centre classe)
                if is_presence_auto:
                    nb_iter = len(seuils_index_image_list)
                else:
                    nb_iter = len(seuils_index_image_list) - 1
                for s in range(1, nb_iter):
                    im_ndvivect_dico += bin_mask_list[ndvi_mask_index] + ","
                    ndvi_mask_index = ndvi_mask_index + 1
                im_ndvivect_dico += bin_mask_list[ndvi_mask_index] + " "
                ndvi_mask_index = ndvi_mask_index + 1

    im_ndvivect_dico = im_ndvivect_dico[:-1]
    tdc_shp = polygonMerToTDC(im_ndvivect_dico, output_dir, input_sea_points,
                              True, simplif, input_cut_vector, 3.5, -3.5,
                              no_data_value, path_time_log, epsg,
                              format_vector, extension_raster,
                              extension_vector, save_results_intermediate,
                              overwrite)

    # Suppression des répertoires temporaires
    for repertory_temp in repertory_temp_list:
        if not save_results_intermediate and os.path.exists(repertory_temp):
            shutil.rmtree(repertory_temp)

    # Mise à jour du Log
    ending_event = "runTDCSeuil() : Select TDC Seuil ending : "
    timeLine(path_time_log, ending_event)

    return tdc_shp
コード例 #3
0
def createMnh(image_mns_input, image_mnt_input, image_threshold_input, vector_emprise_input, image_mnh_output, automatic, bd_road_vector_input_list, bd_road_buff_list, sql_road_expression_list, bd_build_vector_input_list, height_bias, threshold_bd_value, threshold_delta_h, mode_interpolation, method_interpolation, interpolation_bco_radius, simplify_vector_param, epsg, no_data_value, ram_otb, path_time_log, format_raster='GTiff', format_vector='ESRI Shapefile', extension_raster=".tif", extension_vector=".shp", save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "createMnh() : MNH creation starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(bold + green + "## START : MNH CREATION" + endC)
    print(endC)

    if debug >= 2:
        print(bold + green + "createMnh() : Variables dans la fonction" + endC)
        print(cyan + "createMnh() : " + endC + "image_mns_input : " + str(image_mns_input) + endC)
        print(cyan + "createMnh() : " + endC + "image_mnt_input : " + str(image_mnt_input) + endC)
        print(cyan + "createMnh() : " + endC + "image_threshold_input : " + str(image_threshold_input) + endC)
        print(cyan + "createMnh() : " + endC + "vector_emprise_input : " + str(vector_emprise_input) + endC)
        print(cyan + "createMnh() : " + endC + "image_mnh_output : " + str(image_mnh_output) + endC)
        print(cyan + "createMnh() : " + endC + "automatic : " + str(automatic) + endC)
        print(cyan + "createMnh() : " + endC + "bd_road_vector_input_list : " + str(bd_road_vector_input_list) + endC)
        print(cyan + "createMnh() : " + endC + "bd_road_buff_list : " + str(bd_road_buff_list) + endC)
        print(cyan + "createMnh() : " + endC + "sql_road_expression_list : " + str(sql_road_expression_list) + endC)
        print(cyan + "createMnh() : " + endC + "bd_build_vector_input_list : " + str(bd_build_vector_input_list) + endC)
        print(cyan + "createMnh() : " + endC + "height_bias : " + str(height_bias) + endC)
        print(cyan + "createMnh() : " + endC + "threshold_bd_value : " + str(threshold_bd_value) + endC)
        print(cyan + "createMnh() : " + endC + "threshold_delta_h : " + str(threshold_delta_h) + endC)
        print(cyan + "createMnh() : " + endC + "mode_interpolation : " + str(mode_interpolation) + endC)
        print(cyan + "createMnh() : " + endC + "method_interpolation : " + str(method_interpolation) + endC)
        print(cyan + "createMnh() : " + endC + "interpolation_bco_radius : " + str(interpolation_bco_radius) + endC)
        print(cyan + "createMnh() : " + endC + "simplify_vector_param : " + str(simplify_vector_param) + endC)
        print(cyan + "createMnh() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "createMnh() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "createMnh() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "createMnh() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "createMnh() : " + endC + "format_raster : " + str(format_raster) + endC)
        print(cyan + "createMnh() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "createMnh() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "createMnh() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "createMnh() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "createMnh() : " + endC + "overwrite : " + str(overwrite) + endC)

    # LES CONSTANTES
    PRECISION = 0.0000001

    CODAGE_8B = "uint8"
    CODAGE_F = "float"

    SUFFIX_CUT = "_cut"
    SUFFIX_CLEAN = "_clean"
    SUFFIX_SAMPLE = "_sample"
    SUFFIX_MASK = "_mask"
    SUFFIX_TMP = "_tmp"
    SUFFIX_MNS = "_mns"
    SUFFIX_MNT = "_mnt"
    SUFFIX_ROAD = "_road"
    SUFFIX_BUILD = "_build"
    SUFFIX_RASTER = "_raster"
    SUFFIX_VECTOR = "_vector"

    # DEFINIR LES REPERTOIRES ET FICHIERS TEMPORAIRES
    repertory_output = os.path.dirname(image_mnh_output)
    basename_mnh = os.path.splitext(os.path.basename(image_mnh_output))[0]

    sub_repertory_raster_temp = repertory_output + os.sep + basename_mnh + SUFFIX_RASTER + SUFFIX_TMP
    sub_repertory_vector_temp = repertory_output + os.sep + basename_mnh + SUFFIX_VECTOR + SUFFIX_TMP
    cleanTempData(sub_repertory_raster_temp)
    cleanTempData(sub_repertory_vector_temp)

    basename_vector_emprise = os.path.splitext(os.path.basename(vector_emprise_input))[0]
    basename_mns_input = os.path.splitext(os.path.basename(image_mns_input))[0]
    basename_mnt_input = os.path.splitext(os.path.basename(image_mnt_input))[0]

    image_mnh_tmp = sub_repertory_raster_temp + os.sep + basename_mnh + SUFFIX_TMP + extension_raster
    image_mnh_road = sub_repertory_raster_temp + os.sep + basename_mnh + SUFFIX_ROAD + extension_raster

    vector_bd_bati_temp = sub_repertory_vector_temp + os.sep + basename_mnh + SUFFIX_BUILD + SUFFIX_TMP + extension_vector
    vector_bd_bati = repertory_output + os.sep + basename_mnh + SUFFIX_BUILD + extension_vector
    raster_bd_bati = sub_repertory_vector_temp + os.sep + basename_mnh + SUFFIX_BUILD + extension_raster
    removeVectorFile(vector_bd_bati)

    image_emprise_mnt_mask = sub_repertory_raster_temp + os.sep + basename_vector_emprise + SUFFIX_MNT + extension_raster
    image_mnt_cut = sub_repertory_raster_temp + os.sep + basename_mnt_input + SUFFIX_CUT + extension_raster
    image_mnt_clean = sub_repertory_raster_temp + os.sep + basename_mnt_input + SUFFIX_CLEAN + extension_raster
    image_mnt_clean_sample = sub_repertory_raster_temp + os.sep + basename_mnt_input + SUFFIX_CLEAN + SUFFIX_SAMPLE + extension_raster
    image_emprise_mns_mask = sub_repertory_raster_temp + os.sep + basename_vector_emprise + SUFFIX_MNS + extension_raster
    image_mns_cut = sub_repertory_raster_temp + os.sep + basename_mns_input + SUFFIX_CUT + extension_raster
    image_mns_clean = sub_repertory_raster_temp + os.sep + basename_mns_input + SUFFIX_CLEAN + extension_raster

    vector_bd_road_temp = sub_repertory_vector_temp + os.sep + basename_mnh + SUFFIX_ROAD + SUFFIX_TMP + extension_vector
    raster_bd_road_mask = sub_repertory_raster_temp + os.sep + basename_mnh + SUFFIX_ROAD + SUFFIX_MASK + extension_raster

    if image_threshold_input != "" :
        basename_threshold_input = os.path.splitext(os.path.basename(image_threshold_input))[0]
        image_threshold_cut = sub_repertory_raster_temp + os.sep + basename_threshold_input + SUFFIX_CUT + extension_raster
        image_threshold_mask = sub_repertory_raster_temp + os.sep + basename_threshold_input + SUFFIX_MASK + extension_raster

    # VERIFICATION SI LE FICHIER DE SORTIE EXISTE DEJA
    # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors on ne fait rien
    check = os.path.isfile(image_mnh_output)
    if check and not overwrite:
        print(bold + yellow +  "createMnh() : " + endC + "Create mnh %s from %s and %s already done : no actualisation" % (image_mnh_output, image_mns_input, image_mnt_input) + endC)
    # Si non, ou si la fonction ecrasement est désative, alors on le calcule
    else:
        if check:
            try: # Suppression de l'éventuel fichier existant
                removeFile(image_mnh_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

        # DECOUPAGE DES FICHIERS MS ET MNT D'ENTREE PAR LE FICHIER D'EMPRISE
        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage selon l'emprise des fichiers %s et %s " %(image_mns_input, image_mnt_input) + endC)

        # Fonction de découpe du mns
        if not cutImageByVector(vector_emprise_input, image_mns_input, image_mns_cut, None, None, no_data_value, epsg, format_raster, format_vector) :
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_mns_input + ". Voir message d'erreur." + endC)

        # Fonction de découpe du mnt
        if not cutImageByVector(vector_emprise_input, image_mnt_input, image_mnt_cut, None, None, no_data_value, epsg, format_raster, format_vector) :
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_mnt_input + ". Voir message d'erreur." + endC)

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage des fichiers %s et %s complet" %(image_mns_cut, image_mnt_cut) + endC)


        # REBOUCHAGE DES TROUS DANS LE MNT D'ENTREE SI NECESSAIRE

        nodata_mnt = getNodataValueImage(image_mnt_cut)
        pixelNodataCount = countPixelsOfValue(image_mnt_cut, nodata_mnt)

        if pixelNodataCount > 0 :

            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes MNT for  %s" %(image_mnt_cut) + endC)

            # Rasterisation du vecteur d'emprise pour creer un masque pour boucher les trous du MNT
            rasterizeBinaryVector(vector_emprise_input, image_mnt_cut, image_emprise_mnt_mask, 1, CODAGE_8B)

            # Utilisation de SAGA pour boucher les trous
            fillNodata(image_mnt_cut, image_emprise_mnt_mask, image_mnt_clean, save_results_intermediate)

            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes MNT to %s completed" %(image_mnt_clean) + endC)

        else :
            image_mnt_clean = image_mnt_cut
            if debug >= 3:
                print(bold + green +  "\ncreateMnh() : " + endC + "Fill the holes not necessary MNT for %s" %(image_mnt_cut) + endC)


        # REBOUCHAGE DES TROUS DANS LE MNS D'ENTREE SI NECESSAIRE

        nodata_mns = getNodataValueImage(image_mns_cut)
        pixelNodataCount = countPixelsOfValue(image_mns_cut, nodata_mns)

        if pixelNodataCount > 0 :

            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes MNS for  %s" %(image_mns_cut) + endC)

            # Rasterisation du vecteur d'emprise pour creer un masque pour boucher les trous du MNS
            rasterizeBinaryVector(vector_emprise_input, image_mns_cut, image_emprise_mns_mask, 1, CODAGE_8B)

            # Utilisation de SAGA pour boucher les trous
            fillNodata(image_mns_cut, image_emprise_mns_mask, image_mns_clean, save_results_intermediate)

            if debug >= 3:
                print(bold + green +  "\ncreateMnh() : " + endC + "Fill the holes MNS to %s completed" %(image_mns_clean) + endC)

        else :
            image_mns_clean = image_mns_cut
            if debug >= 3:
                print(bold + green +  "createMnh() : " + endC + "Fill the holes not necessary MNS for %s" %(image_mns_cut) + endC)

        # CALLER LE FICHIER MNT AU FORMAT DU FICHIER MNS

        # Commande de mise en place de la geométrie re-echantionage
        command = "otbcli_Superimpose -inr " + image_mns_clean + " -inm " + image_mnt_clean + " -mode " + mode_interpolation + " -interpolator " + method_interpolation + " -out " + image_mnt_clean_sample

        if method_interpolation.lower() == 'bco' :
            command += " -interpolator.bco.radius " + str(interpolation_bco_radius)
        if ram_otb > 0:
            command += " -ram %d" %(ram_otb)

        if debug >= 3:
            print(cyan + "createMnh() : " + bold + green + "Réechantillonage du fichier %s par rapport à la reference %s" %(image_mnt_clean, image_mns_clean) + endC)
            print(command)

        exit_code = os.system(command)
        if exit_code != 0:
            print(command)
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du superimpose de l'image : " + image_mnt_input + ". Voir message d'erreur." + endC)

        # INCRUSTATION DANS LE MNH DES DONNEES VECTEURS ROUTES

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Use BD road to clean MNH"  + endC)

        # Creation d'un masque de filtrage des donnes routes (exemple : le NDVI)
        if image_threshold_input != "" :
            if not cutImageByVector(vector_emprise_input, image_threshold_input, image_threshold_cut, None, None, no_data_value, epsg, format_raster, format_vector) :
                raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_threshold_input + ". Voir message d'erreur." + endC)
            createBinaryMask(image_threshold_cut, image_threshold_mask, threshold_bd_value, False, CODAGE_8B)

        # Execution de la fonction createMacroSamples pour une image correspondant au données routes
        if bd_road_vector_input_list != [] :
            createMacroSamples(image_mns_clean, vector_emprise_input, vector_bd_road_temp, raster_bd_road_mask, bd_road_vector_input_list, bd_road_buff_list, sql_road_expression_list, path_time_log, basename_mnh, simplify_vector_param, format_vector, extension_vector, save_results_intermediate, overwrite)

        if debug >= 3:
            print(bold + green +  "\ncreateMnh() : " + endC + "File raster from BD road is create %s" %(raster_bd_road_mask) + endC)

        # CALCUL DU MNH

        # Calcul par bandMath du MNH definir l'expression qui soustrait le MNT au MNS en introduisant le biais et en mettant les valeurs à 0 à une valeur approcher de 0.0000001
        delta = ""
        if height_bias > 0 :
            delta = "+%s" %(str(height_bias))
        elif height_bias < 0 :
            delta = "-%s" %(str(abs(height_bias)))
        else :
            delta = ""

        # Definition de l'expression
        if bd_road_vector_input_list != [] :
            if image_threshold_input != "" :
                expression = "\"im3b1 > 0 and im4b1 > 0?%s:(im1b1-im2b1%s) > 0.0?im1b1-im2b1%s:%s\"" %(str(PRECISION), delta, delta, str(PRECISION))
                command = "otbcli_BandMath -il %s %s %s %s -out %s %s -exp %s" %(image_mns_clean, image_mnt_clean_sample, raster_bd_road_mask, image_threshold_mask, image_mnh_tmp, CODAGE_F, expression)
            else :
                expression = "\"im3b1 > 0?%s:(im1b1-im2b1%s) > 0.0?im1b1-im2b1%s:%s\"" %(str(PRECISION), delta, delta, str(PRECISION))
                command = "otbcli_BandMath -il %s %s %s -out %s %s -exp %s" %(image_mns_clean, image_mnt_clean_sample, raster_bd_road_mask, image_mnh_tmp, CODAGE_F, expression)
        else :
            expression = "\"(im1b1-im2b1%s) > 0.0?im1b1-im2b1%s:%s\"" %(delta, delta, str(PRECISION))
            command = "otbcli_BandMath -il %s %s -out %s %s -exp %s" %(image_mns_clean, image_mnt_clean_sample, image_mnh_tmp, CODAGE_F, expression)

        if ram_otb > 0:
            command += " -ram %d" %(ram_otb)

        if debug >= 3:
            print(cyan + "createMnh() : " + bold + green + "Calcul du MNH  %s difference du MNS : %s par le MNT :%s" %(image_mnh_tmp, image_mns_clean, image_mnt_clean_sample) + endC)
            print(command)

        exitCode = os.system(command)
        if exitCode != 0:
            print(command)
            raise NameError(cyan + "createMnh() : " + bold + red + "An error occured during otbcli_BandMath command to compute MNH " + image_mnh_tmp + ". See error message above." + endC)

        # DECOUPAGE DU MNH

        if bd_build_vector_input_list == []:
            image_mnh_road = image_mnh_output

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage selon l'emprise du fichier mnh %s " %(image_mnh_tmp) + endC)

        # Fonction de découpe du mnh
        if not cutImageByVector(vector_emprise_input, image_mnh_tmp, image_mnh_road, None, None, no_data_value, epsg, format_raster, format_vector) :
            raise NameError (cyan + "createMnh() : " + bold + red + "!!! Une erreur c'est produite au cours du decoupage de l'image : " + image_mns_input + ". Voir message d'erreur." + endC)

        if debug >= 3:
            print(bold + green +  "createMnh() : " + endC + "Decoupage du fichier mnh %s complet" %(image_mnh_road) + endC)

        # INCRUSTATION DANS LE MNH DES DONNEES VECTEURS BATIS

        # Si demander => liste de fichier vecteur bati passé en donnée d'entrée
        if bd_build_vector_input_list != []:

            # Découpage des vecteurs de bd bati exogenes avec l'emprise
            vectors_build_cut_list = []
            for vector_build_input in bd_build_vector_input_list :
                vector_name = os.path.splitext(os.path.basename(vector_build_input))[0]
                vector_build_cut = sub_repertory_vector_temp + os.sep + vector_name + SUFFIX_CUT + extension_vector
                vectors_build_cut_list.append(vector_build_cut)
            cutoutVectors(vector_emprise_input, bd_build_vector_input_list, vectors_build_cut_list, format_vector)

            # Fusion des vecteurs batis découpés
            fusionVectors (vectors_build_cut_list, vector_bd_bati_temp)

            # Croisement vecteur rasteur entre le vecteur fusion des batis et le MNH créé precedement
            statisticsVectorRaster(image_mnh_road, vector_bd_bati_temp, "", 1, False, False, True, ['PREC_PLANI','PREC_ALTI','ORIGIN_BAT','median','sum','std','unique','range'], [], {}, path_time_log, True, format_vector, save_results_intermediate, overwrite)

            # Calcul de la colonne delta_H entre les hauteurs des batis et la hauteur moyenne du MNH sous le bati
            COLUMN_ID = "ID"
            COLUMN_H_BUILD = "HAUTEUR"
            COLUMN_H_BUILD_MIN = "Z_MIN"
            COLUMN_H_BUILD_MAX = "Z_MAX"
            COLUMN_H_MNH = "mean"
            COLUMN_H_MNH_MIN = "min"
            COLUMN_H_MNH_MAX = "max"
            COLUMN_H_DIFF = "H_diff"

            field_type = ogr.OFTReal
            field_value = 0.0
            field_width = 20
            field_precision = 2
            attribute_name_dico = {}
            attribute_name_dico[COLUMN_ID] = ogr.OFTString
            attribute_name_dico[COLUMN_H_BUILD] = ogr.OFTReal
            attribute_name_dico[COLUMN_H_MNH] = ogr.OFTReal

            # Ajouter la nouvelle colonne H_diff
            addNewFieldVector(vector_bd_bati_temp, COLUMN_H_DIFF, field_type, field_value, field_width, field_precision, format_vector)

            # Recuperer les valeur de hauteur du bati et du mnt dans le vecteur
            data_z_dico = getAttributeValues(vector_bd_bati_temp, None, None, attribute_name_dico, format_vector)

            # Calculer la difference des Hauteur bati et mnt
            field_new_values_dico = {}
            for index in range(len(data_z_dico[COLUMN_ID])) :
                index_polygon = data_z_dico[COLUMN_ID][index]
                delta_h = abs(data_z_dico[COLUMN_H_BUILD][index] - data_z_dico[COLUMN_H_MNH][index])
                field_new_values_dico[index_polygon] = {COLUMN_H_DIFF:delta_h}

            # Mettre à jour la colonne H_diff dans le vecteur
            setAttributeIndexValuesList(vector_bd_bati_temp, COLUMN_ID, field_new_values_dico, format_vector)

            # Suppression de tous les polygones bati dons la valeur du delat H est inferieur à threshold_delta_h
            column = "'%s, %s, %s, %s, %s, %s, %s, %s'"% (COLUMN_ID, COLUMN_H_BUILD, COLUMN_H_BUILD_MIN, COLUMN_H_BUILD_MAX, COLUMN_H_MNH, COLUMN_H_MNH_MIN, COLUMN_H_MNH_MAX, COLUMN_H_DIFF)
            expression = "%s > %s" % (COLUMN_H_DIFF, threshold_delta_h)
            filterSelectDataVector(vector_bd_bati_temp, vector_bd_bati, column, expression, overwrite, format_vector)

            # Attention!!!! PAUSE pour trie et verification des polygones bati nom deja present dans le MNH ou non
            if not automatic :
                print(bold + blue +  "Application MnhCreation => " + endC + "Vérification manuelle du vecteur bati %s pour ne concerver que les batis non présent dans le MNH courant %s" %(vector_bd_bati_temp, image_mnh_road) + endC)
                input(bold + red + "Appuyez sur entree pour continuer le programme..." + endC)

            # Creation du masque bati avec pour H la hauteur des batiments
            rasterizeVector(vector_bd_bati, raster_bd_bati, image_mnh_road, COLUMN_H_BUILD)

            # Fusion du mask des batis et du MNH temporaire
            expression = "\"im1b1 > 0.0?im1b1:im2b1\""
            command = "otbcli_BandMath -il %s %s -out %s %s -exp %s" %(raster_bd_bati, image_mnh_road, image_mnh_output, CODAGE_F, expression)

            if ram_otb > 0:
                command += " -ram %d" %(ram_otb)

            if debug >= 3:
                print(cyan + "createMnh() : " + bold + green + "Amelioration du MNH  %s ajout des hauteurs des batis %s" %(image_mnh_road, raster_bd_bati) + endC)
                print(command)

            exitCode = os.system(command)
            if exitCode != 0:
                print(command)
                raise NameError(cyan + "createMnh() : " + bold + red + "An error occured during otbcli_BandMath command to compute MNH Final" + image_mnh_output + ". See error message above." + endC)

    # SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES

    # Suppression des fichiers intermédiaires
    if not save_results_intermediate :
        if bd_build_vector_input_list != []:
            removeFile(image_mnh_road)
        removeFile(image_threshold_cut)
        removeFile(image_threshold_mask)
        removeFile(raster_bd_bati)
        removeVectorFile(vector_bd_road_temp)
        removeVectorFile(vector_bd_bati_temp)
        removeVectorFile(vector_bd_bati) # A confirmer!!!
        removeFile(raster_bd_road_mask)
        removeFile(image_mnh_tmp)
        deleteDir(sub_repertory_raster_temp)
        deleteDir(sub_repertory_vector_temp)

    print(endC)
    print(bold + green + "## END : MNH CREATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "createMnh() : MNH creation ending : "
    timeLine(path_time_log,ending_event)

    return
def prepareData(input_buffer_tdc,
                input_paysage,
                output_dir,
                input_repertories_list,
                id_paysage,
                id_name_sub_rep,
                epsg,
                optimization_zone,
                no_cover,
                zone_date,
                separ_name,
                pos_date,
                nb_char_date,
                separ_date,
                path_time_log,
                format_raster='GTiff',
                format_vector="ESRI Shapefile",
                extension_raster=".tif",
                extension_vector=".shp",
                save_results_intermediate=True,
                overwrite=True):
    # Mise à jour du Log
    starting_event = "prepareData() : Select prepare data starting : "
    timeLine(path_time_log, starting_event)

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green +
              "Variables dans le prepareData - Variables générales" + endC)
        print(cyan + "PrepareData() : " + endC + "input_buffer_tdc : " +
              str(input_buffer_tdc) + endC)
        print(cyan + "PrepareData() : " + endC + "input_paysage : " +
              str(input_paysage) + endC)
        print(cyan + "PrepareData() : " + endC + "output_dir : " +
              str(output_dir) + endC)
        print(cyan + "PrepareData() : " + endC + "input_repertories_list : " +
              str(input_repertories_list) + endC)
        print(cyan + "PrepareData() : " + endC + "id_paysage : " +
              str(id_paysage) + endC)
        print(cyan + "PrepareData() : " + endC + "id_name_sub_rep : " +
              str(id_name_sub_rep) + endC)
        print(cyan + "PrepareData() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "PrepareData() : " + endC + "optimization_zone : " +
              str(optimization_zone) + endC)
        print(cyan + "PrepareData() : " + endC + "no_cover : " +
              str(no_cover) + endC)
        print(cyan + "PrepareData() : " + endC + "zone_date : " +
              str(zone_date) + endC)
        print(cyan + "PrepareData() : " + endC + "separ_name : " +
              str(separ_name) + endC)
        print(cyan + "PrepareData() : " + endC + "pos_date : " +
              str(pos_date) + endC)
        print(cyan + "PrepareData() : " + endC + "nb_char_date : " +
              str(nb_char_date) + endC)
        print(cyan + "PrepareData() : " + endC + "separ_date : " +
              str(separ_date) + endC)
        print(cyan + "PrepareData() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "PrepareData() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "PrepareData() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "PrepareData() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "PrepareData() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "PrepareData() : " + endC + "save_results_inter : " +
              str(save_results_intermediate) + endC)
        print(cyan + "PrepareData() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    REPERTORY_PAYSAGES = "Paysages"
    REPERTORY_IMAGES = "Images"
    ID_P = "id_p"

    SUFFIX_OPTI = "_opti"
    SUFFIX_ASS = "_ass"
    SUFFIX_CUT = "_cut"
    SUFFIX_ERROR = "_error"
    SUFFIX_MERGE = "_merge"
    SUFFIX_CLEAN = "_clean"
    SUFFIX_STACK = "_stack"

    output_dir_paysages = output_dir + os.sep + REPERTORY_PAYSAGES
    output_dir_images = output_dir + os.sep + REPERTORY_IMAGES

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Création du répertoire de sortie pour les paysages s'il n'existe pas
    if not os.path.exists(output_dir_paysages):
        os.makedirs(output_dir_paysages)

    # Création du répertoire de sortie pour les images s'il n'existe pas
    if not os.path.exists(output_dir_images):
        os.makedirs(output_dir_images)

    # Recuperer l'epsg du fichier d'emprise
    if epsg == 0:
        epsg = getProjection(input_paysage, format_vector)

    # Création du paysage optimal
    optimPaysage(input_buffer_tdc, input_paysage, optimization_zone,
                 SUFFIX_OPTI, output_dir_paysages, id_paysage, format_vector)

    # Création un shapefile par polygone
    paysage_opti = output_dir_paysages + os.sep + os.path.splitext(
        os.path.basename(input_paysage))[0] + SUFFIX_OPTI + os.path.splitext(
            input_paysage)[1]
    if id_paysage != "":
        paysages_list = splitVector(paysage_opti, str(output_dir_paysages),
                                    str(id_paysage), epsg, format_vector,
                                    extension_vector)
    else:
        paysages_list = splitVector(paysage_opti, str(output_dir_paysages),
                                    ID_P, epsg, format_vector,
                                    extension_vector)

    if debug >= 3:
        print(cyan + "PrepareData() : " + endC +
              "Liste des fichiers en entrée de imagesAssembly() : " +
              str(paysages_list))

    # Création du fichier de sortie des images s'il n'existe pas dejà
    if not os.path.exists(output_dir_images):
        os.makedirs(output_dir_images)

    # Assemblage des images dans les paysages optimisés
    # Si on choisit pas de recouvrement entre les images
    if no_cover:
        sub_repertory_images_paysages_list = []
        repertory_images_sources_list = []
        repertory_input_image_dir = input_repertories_list[
            0]  # ATTENTION !!!! GFT c'est louche comme methode de prendre uniquement le premier repertoire d'image????

        # On cherche la colonne contenant le nom de l'image
        for shape in paysages_list:
            attribute_name_dico = {}
            attribute_name_dico[id_name_sub_rep] = ogr.OFTString
            res_values_dico = getAttributeValues(shape, None, None,
                                                 attribute_name_dico,
                                                 format_vector)
            sub_repertory_images_paysage = res_values_dico[id_name_sub_rep][0]
            sub_repertory_images_paysages_list.append(
                sub_repertory_images_paysage)

        # Si la colonne existe
        if len(sub_repertory_images_paysages_list) != 0:
            for sub_repertory_images_paysage in sub_repertory_images_paysages_list:
                repertory_images_sources_list.append(
                    repertory_input_image_dir + os.sep +
                    sub_repertory_images_paysage)

        # Sinon on demande d'entrer les noms des images
        else:
            print("Liste des paysages optimisés : " + str(paysages_list))
            repertory_images_sources_list = input(
                "Rentrez la liste des images associées à chaque paysage dans l'ordre sous forme de liste [..., ..., ...] \n"
            )
            while len(repertory_images_sources_list) != len(paysages_list):
                print(
                    "Longueur de la liste images différente de celle des paysages \n"
                )
                repertory_images_sources_list = input(
                    "Rentrez la liste des images associées à chaque paysage dans l'ordre \n"
                )
        if debug >= 2:
            print("repertory_images_sources_list " +
                  str(repertory_images_sources_list))

        # Commande ImagesAssembly sur les éléments des 2 listes
        for i in range(len(paysages_list)):
            image_output = output_dir_images + os.sep + os.path.splitext(
                os.path.basename(
                    paysages_list[i]))[0] + SUFFIX_ASS + extension_raster
            try:
                selectAssembyImagesByHold(
                    paysages_list[i], [repertory_images_sources_list[i]],
                    image_output, False, True, epsg, False, False, False,
                    False, 0, 0, 0, 0, separ_name, pos_date, nb_char_date,
                    separ_date, path_time_log, SUFFIX_ERROR, SUFFIX_MERGE,
                    SUFFIX_CLEAN, SUFFIX_STACK, format_raster, format_vector,
                    extension_raster, extension_vector,
                    save_results_intermediate, overwrite)
            except Exception:
                pass

    else:
        for shape in paysages_list:
            image_output = output_dir_images + os.sep + os.path.splitext(
                os.path.basename(shape))[0] + SUFFIX_ASS + extension_raster

            if optimization_zone:
                shape_cut = os.path.splitext(
                    shape)[0] + SUFFIX_CUT + os.path.splitext(shape)[1]
                cutVector(input_buffer_tdc, shape, shape_cut, overwrite,
                          format_vector)
            else:
                shape_cut = shape

            selectAssembyImagesByHold(shape_cut, input_repertories_list,
                                      image_output, False, zone_date, epsg,
                                      False, False, False, False, 0, 0, 0, 0,
                                      separ_name, pos_date, nb_char_date,
                                      separ_date, path_time_log, SUFFIX_ERROR,
                                      SUFFIX_MERGE, SUFFIX_CLEAN, SUFFIX_STACK,
                                      format_raster, format_vector,
                                      save_results_intermediate, overwrite)
            if optimization_zone and os.path.exists(shape_cut):
                removeVectorFile(shape_cut, format_vector)

    # Mise à jour du Log
    ending_event = "prepareData() : Select prepare data ending : "
    timeLine(path_time_log, ending_event)

    return
コード例 #5
0
def computeRoughnessByOcsMnh( grid_input, grid_output, mnh_input, classif_input, class_build_list, epsg, no_data_value, path_time_log, format_raster='GTiff', format_vector='ESRI Shapefile', extension_raster=".tif", extension_vector=".shp", save_results_intermediate=False, overwrite=True):

    # Constante
    FIELD_H_TYPE = ogr.OFTReal
    FIELD_ID_TYPE = ogr.OFTInteger
    FIELD_NAME_HSUM = "sum_h"
    FIELD_NAME_HRE = "mean_h"
    FIELD_NAME_AREA = "nb_area"
    FIELD_NAME_ID = "id"

    SUFFIX_HEIGHT = '_hauteur'
    SUFFIX_BUILT = '_bati'
    SUFFIX_TEMP = '_temp'
    SUFFIX_MASK = '_mask'

    # Mise à jour du Log
    timeLine(path_time_log, "Début du calcul de l'indicateur Height of Roughness Elements par OCS et MNT starting : ")
    print(cyan + "computeRoughnessByOcsMnh() : " + endC + "Début du calcul de l'indicateur Height of Roughness Elements par OCS et MNT." + endC + "\n")

    if debug >= 3:
        print(bold + green + "computeRoughnessByOcsMnh() : Variables dans la fonction" + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "grid_input : " + str(grid_input) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "grid_output : " + str(grid_output) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "mnh_input : " + str(mnh_input) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "classif_input : " + str(classif_input) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "class_build_list : " + str(class_build_list) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "computeRoughnessByOcsMnh() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Test si le vecteur de sortie existe déjà et si il doit être écrasés
    check = os.path.isfile(grid_output)

    if check and not overwrite: # Si le fichier de sortie existent deja et que overwrite n'est pas activé
        print(cyan + "computeRoughnessByOcsMnh() : " + bold + yellow + "Le calcul de Roughness par OCS et MNT a déjà eu lieu." + endC + "\n")
        print(cyan + "computeRoughnessByOcsMnh() : " + bold + yellow + "Grid vector output : " + grid_output + " already exists and will not be created again." + endC)
    else :
        if check:
            try:
                removeVectorFile(grid_output)
            except Exception:
                pass # si le fichier n'existe pas, il ne peut pas être supprimé : cette étape est ignorée

        ############################################
        ### Préparation générale des traitements ###
        ############################################

        # Récuperation de la projection de l'image
        epsg_proj = getProjectionImage(classif_input)
        if epsg_proj == 0:
            epsg_proj = epsg

        # Préparation des fichiers temporaires
        temp_path = os.path.dirname(grid_output) + os.sep + "RoughnessByOcsAndMnh"

        # Nettoyage du repertoire temporaire si il existe
        if os.path.exists(temp_path):
            shutil.rmtree(temp_path)
        os.makedirs(temp_path)

        basename = os.path.splitext(os.path.basename(grid_output))[0]
        built_height = temp_path + os.sep + basename + SUFFIX_HEIGHT + SUFFIX_BUILT + extension_raster
        built_height_temp = temp_path + os.sep + basename + SUFFIX_HEIGHT + SUFFIX_BUILT + SUFFIX_TEMP + extension_raster
        classif_built_mask = temp_path + os.sep + basename + SUFFIX_BUILT + SUFFIX_MASK + extension_raster
        grid_output_temp = temp_path + os.sep + basename + SUFFIX_TEMP + extension_vector

        ##############################
        ### Calcul de l'indicateur ###
        ##############################

        # liste des classes de bati a prendre en compte dans l'expression du BandMath
        expression_bati = ""
        for id_class in class_build_list :
            expression_bati += "im1b1==%s or " %(str(id_class))
        expression_bati = expression_bati[:-4]
        expression = "(%s) and (im2b1!=%s) and (im2b1>0)" %(expression_bati, str(no_data_value))

        # Creation d'un masque vecteur des batis pour la surface des zones baties
        command = "otbcli_BandMath -il %s %s -out %s uint8 -exp '%s ? 1 : 0'" %(classif_input, mnh_input, classif_built_mask, expression)
        if debug >= 3:
            print(command)
        exit_code = os.system(command)
        if exit_code != 0:
            print(command)
            print(cyan + "computeRoughnessByOcsMnh() : " + bold + red + "!!! Une erreur c'est produite au cours de la commande otbcli_BandMath : " + command + ". Voir message d'erreur." + endC, file=sys.stderr)
            raise

        # Récupération de la hauteur du bati
        command = "otbcli_BandMath -il %s %s -out %s float -exp '%s ? im2b1 : 0'" %(classif_input, mnh_input, built_height_temp, expression)
        if debug >= 3:
            print(command)
        exit_code = os.system(command)
        if exit_code != 0:
            print(command)
            print(cyan + "computeRoughnessByOcsMnh() : " + bold + red + "!!! Une erreur c'est produite au cours de la commande otbcli_BandMath : " + command + ". Voir message d'erreur." + endC, file=sys.stderr)
            raise

        command = "gdal_translate -a_srs EPSG:%s -a_nodata %s -of %s %s %s" %(str(epsg_proj), str(no_data_value), format_raster, built_height_temp, built_height)
        if debug >= 3:
            print(command)
        exit_code = os.system(command)
        if exit_code != 0:
            print(command)
            print(cyan + "computeRoughnessByOcsMnh() : " + bold + red + "!!! Une erreur c'est produite au cours de la comande : gdal_translate : " + command + ". Voir message d'erreur." + endC, file=sys.stderr)
            raise

        # Récupération du nombre de pixel bati de chaque maille pour definir la surface
        statisticsVectorRaster(classif_built_mask, grid_input, grid_output_temp, 1, False, False, True, ["min", "max", "median", "mean", "std", "unique", "range"], [], {}, path_time_log, True, format_vector, save_results_intermediate, overwrite)

        # Renomer le champ 'sum' en FIELD_NAME_AREA
        renameFieldsVector(grid_output_temp, ['sum'], [FIELD_NAME_AREA], format_vector)

        # Récupération de la hauteur moyenne du bati de chaque maille
        statisticsVectorRaster(built_height, grid_output_temp, grid_output, 1, False, False, True, ["min", "max", "median", 'mean', "std", "unique", "range"], [], {}, path_time_log, True, format_vector, save_results_intermediate, overwrite)

        # Renomer le champ 'mean' en FIELD_NAME_HRE
        renameFieldsVector(grid_output, ['sum'], [FIELD_NAME_HSUM], format_vector)

        # Calcul de la colonne FIELD_NAME_HRE division de FIELD_NAME_HSUM par FIELD_NAME_AREA
        field_values_list = getAttributeValues(grid_output, None, None, {FIELD_NAME_ID:FIELD_ID_TYPE, FIELD_NAME_HSUM:FIELD_H_TYPE, FIELD_NAME_AREA:FIELD_H_TYPE}, format_vector)

        field_new_values_list = []
        for index in range(0, len(field_values_list[FIELD_NAME_ID])) :
            value_h = 0.0
            if field_values_list[FIELD_NAME_AREA][index] > 0 :
                value_h = field_values_list[FIELD_NAME_HSUM][index] / field_values_list[FIELD_NAME_AREA][index]
            field_new_values_list.append({FIELD_NAME_HRE:value_h})

        # Ajour de la nouvelle colonne calculé FIELD_NAME_HRE
        addNewFieldVector(grid_output, FIELD_NAME_HRE, FIELD_H_TYPE, 0, None, None, format_vector)
        setAttributeValuesList(grid_output, field_new_values_list, format_vector)

        ##########################################
        ### Nettoyage des fichiers temporaires ###
        ##########################################
        if not save_results_intermediate:
            if os.path.exists(temp_path):
                shutil.rmtree(temp_path)

    print(cyan + "computeRoughnessByOcsMnh() : " + endC + "Fin du calcul de l'indicateur Height of Roughness Elements par OCS et MNT." + endC + "\n")
    timeLine(path_time_log, "Fin du calcul de l'indicateur Height of Roughness Elements par OCS et MNT  ending : ")

    return
コード例 #6
0
def selectSamples(image_input_list, sample_image_input, vector_output, table_statistics_output, sampler_strategy, select_ratio_floor, ratio_per_class_dico, name_column, no_data_value, path_time_log, rand_seed=0, ram_otb=0, epsg=2154, format_vector='ESRI Shapefile', extension_vector=".shp", save_results_intermediate=False, overwrite=True) :

    # Mise à jour du Log
    starting_event = "selectSamples() : Select points in raster mask macro input starting : "
    timeLine(path_time_log, starting_event)

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "image_input_list : " + str(image_input_list) + endC)
        print(cyan + "selectSamples() : " + endC + "sample_image_input : " + str(sample_image_input) + endC)
        print(cyan + "selectSamples() : " + endC + "vector_output : " + str(vector_output) + endC)
        print(cyan + "selectSamples() : " + endC + "table_statistics_output : " + str(table_statistics_output) + endC)
        print(cyan + "selectSamples() : " + endC + "sampler_strategy : " + str(sampler_strategy) + endC)
        print(cyan + "selectSamples() : " + endC + "select_ratio_floor : " + str(select_ratio_floor) + endC)
        print(cyan + "selectSamples() : " + endC + "ratio_per_class_dico : " + str(ratio_per_class_dico) + endC)
        print(cyan + "selectSamples() : " + endC + "name_column : " + str(name_column) + endC)
        print(cyan + "selectSamples() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "selectSamples() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "selectSamples() : " + endC + "rand_seed : " + str(rand_seed) + endC)
        print(cyan + "selectSamples() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "selectSamples() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "selectSamples() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "selectSamples() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Constantes
    EXT_XML = ".xml"

    SUFFIX_SAMPLE = "_sample"
    SUFFIX_STATISTICS = "_statistics"
    SUFFIX_POINTS = "_points"
    SUFFIX_VALUE = "_value"

    BAND_NAME = "band_"
    COLUMN_CLASS = "class"
    COLUMN_ORIGINFID = "originfid"

    NB_POINTS = "nb_points"
    AVERAGE = "average"
    STANDARD_DEVIATION = "st_dev"

    print(cyan + "selectSamples() : " + bold + green + "DEBUT DE LA SELECTION DE POINTS" + endC)

    # Definition variables et chemins
    repertory_output = os.path.dirname(vector_output)
    filename = os.path.splitext(os.path.basename(vector_output))[0]
    sample_points_output = repertory_output + os.sep + filename +  SUFFIX_SAMPLE + extension_vector
    file_statistic_points = repertory_output + os.sep + filename + SUFFIX_STATISTICS + SUFFIX_POINTS + EXT_XML

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "file_statistic_points : " + str(file_statistic_points) + endC)

    # 0. EXISTENCE DU FICHIER DE SORTIE
    #----------------------------------

    # Si le fichier vecteur points de sortie existe deja et que overwrite n'est pas activé
    check = os.path.isfile(vector_output)
    if check and not overwrite:
        print(bold + yellow + "Samples points already done for file %s and will not be calculated again." %(vector_output) + endC)
    else:   # Si non ou si la vérification est désactivée : creation du fichier d'échantillons points

        # Suppression de l'éventuel fichier existant
        if check:
            try:
                removeVectorFile(vector_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite
        if os.path.isfile(table_statistics_output) :
            try:
                removeFile(table_statistics_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite


        # 1. STATISTIQUE SUR L'IMAGE DES ECHANTILLONS RASTEUR
        #----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start statistique sur l'image des echantillons rasteur..." + endC)

        id_micro_list = identifyPixelValues(sample_image_input)

        if 0 in id_micro_list :
            id_micro_list.remove(0)

        min_micro_class_nb_points = -1
        min_micro_class_label = 0
        infoStructPointSource_dico = {}

        writeTextFile(file_statistic_points, '<?xml version="1.0" ?>\n')
        appendTextFileCR(file_statistic_points, '<GeneralStatistics>')
        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassRaw">')

        if debug >= 2:
            print("Nombre de points par micro classe :" + endC)

        for id_micro in id_micro_list :
            nb_pixels = countPixelsOfValue(sample_image_input, id_micro)

            if debug >= 2:
                print("MicroClass : " + str(id_micro) + ", nb_points = " + str(nb_pixels))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_pixels))

            if min_micro_class_nb_points == -1 or min_micro_class_nb_points > nb_pixels :
                min_micro_class_nb_points = nb_pixels
                min_micro_class_label = id_micro

            infoStructPointSource_dico[id_micro] = StructInfoMicoClass()
            infoStructPointSource_dico[id_micro].label_class = id_micro
            infoStructPointSource_dico[id_micro].nb_points = nb_pixels
            infoStructPointSource_dico[id_micro].info_points_list = []
            del nb_pixels

        if debug >= 2:
            print("MicroClass min points find : " + str(min_micro_class_label) + ", nb_points = " + str(min_micro_class_nb_points))

        appendTextFileCR(file_statistic_points, '    </Statistic>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End statistique sur l'image des echantillons rasteur. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 2. CHARGEMENT DE L'IMAGE DES ECHANTILLONS
        #------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start chargement de l'image des echantillons..." + endC)

        # Information image
        cols, rows, bands = getGeometryImage(sample_image_input)
        xmin, xmax, ymin, ymax = getEmpriseImage(sample_image_input)
        pixel_width, pixel_height = getPixelWidthXYImage(sample_image_input)
        projection_input = getProjectionImage(sample_image_input)
        if projection_input == None or projection_input == 0 :
            projection_input = epsg
        else :
            projection_input = int(projection_input)

        pixel_width = abs(pixel_width)
        pixel_height = abs(pixel_height)

        # Lecture des données
        raw_data = getRawDataImage(sample_image_input)

        if debug >= 3:
            print("projection = " + str(projection_input))
            print("cols = " + str(cols))
            print("rows = " + str(rows))

        # Creation d'une structure dico contenent tous les points différents de zéro
        progress = 0
        pass_prog = False
        for y_row in range(rows) :
            for x_col in range(cols) :
                value_class = raw_data[y_row][x_col]
                if value_class != 0 :
                    infoStructPointSource_dico[value_class].info_points_list.append(x_col + (y_row * cols))

            # Barre de progression
            if debug >= 4:
                if  ((float(y_row) / rows) * 100.0 > progress) and not pass_prog :
                    progress += 1
                    pass_prog = True
                    print("Progression => " + str(progress) + "%")
                if ((float(y_row) / rows) * 100.0  > progress + 1) :
                    pass_prog = False

        del raw_data

        pending_event = cyan + "selectSamples() : " + bold + green + "End chargement de l'image des echantillons. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 3. SELECTION DES POINTS D'ECHANTILLON
        #--------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start selection des points d'echantillon..." + endC)

        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassSelect">')

        # Rendre deterministe la fonction aléatoire de random.sample
        if rand_seed > 0:
            random.seed( rand_seed )

        # Pour toute les micro classes
        for id_micro in id_micro_list :

            # Selon la stategie de selection
            nb_points_ratio = 0
            while switch(sampler_strategy.lower()):
                if case('all'):
                    # Le mode de selection 'all' est choisi
                    nb_points_ratio = infoStructPointSource_dico[id_micro].nb_points
                    infoStructPointSource_dico[id_micro].sample_points_list = range(nb_points_ratio)

                    break
                if case('percent'):
                    # Le mode de selection 'percent' est choisi
                    id_macro_class = int(math.floor(id_micro / 100) * 100)
                    select_ratio_class = ratio_per_class_dico[id_macro_class]
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_class / 100)
                    infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)
                    break
                if case('mixte'):
                    # Le mode de selection 'mixte' est choisi
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_floor / 100)
                    if id_micro == min_micro_class_label :
                        # La plus petite micro classe est concervée intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = range(infoStructPointSource_dico[id_micro].nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    elif nb_points_ratio <= min_micro_class_nb_points :
                        # Les micro classes dont le ratio de selection est inferieur au nombre de points de la plus petite classe sont égement conservées intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), min_micro_class_nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    else :
                        # Pour toutes les autres micro classes tirage aleatoire d'un nombre de points correspondant au ratio
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)

                    break
                break


            if debug >= 2:
                print("MicroClass = " + str(id_micro) + ", nb_points_ratio " + str(nb_points_ratio))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_points_ratio))

        appendTextFileCR(file_statistic_points, '    </Statistic>')
        appendTextFileCR(file_statistic_points, '</GeneralStatistics>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End selection des points d'echantillon. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 4. PREPARATION DES POINTS D'ECHANTILLON
        #----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start preparation des points d'echantillon..." + endC)

        # Création du dico de points
        points_random_value_dico = {}
        index_dico_point = 0
        for micro_class in infoStructPointSource_dico :
            micro_class_struct = infoStructPointSource_dico[micro_class]
            label_class = micro_class_struct.label_class
            point_attr_dico = {name_column:int(label_class), COLUMN_CLASS:int(label_class), COLUMN_ORIGINFID:0}

            for id_point in micro_class_struct.sample_points_list:

                # Recuperer les valeurs des coordonnees des points
                coor_x = float(xmin + (int(micro_class_struct.info_points_list[id_point] % cols) * pixel_width)) + (pixel_width / 2.0)
                coor_y = float(ymax - (int(micro_class_struct.info_points_list[id_point] / cols) * pixel_height)) - (pixel_height / 2.0)
                points_random_value_dico[index_dico_point] = [[coor_x, coor_y], point_attr_dico]
                del coor_x
                del coor_y
                index_dico_point += 1
            del point_attr_dico
        del infoStructPointSource_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End preparation des points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 5. CREATION DU FICHIER SHAPE DE POINTS D'ECHANTILLON
        #-----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start creation du fichier shape de points d'echantillon..." + endC)

        # Définir les attibuts du fichier résultat
        attribute_dico = {name_column:ogr.OFTInteger, COLUMN_CLASS:ogr.OFTInteger, COLUMN_ORIGINFID:ogr.OFTInteger}

        # Creation du fichier shape
        createPointsFromCoordList(attribute_dico, points_random_value_dico, sample_points_output, projection_input, format_vector)
        del attribute_dico
        del points_random_value_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End creation du fichier shape de points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 6.  EXTRACTION DES POINTS D'ECHANTILLONS
        #-----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start extraction des points d'echantillon dans l'image..." + endC)

        # Cas ou l'on a une seule image
        if len(image_input_list) == 1:
            # Extract sample
            image_input = image_input_list[0]
            command = "otbcli_SampleExtraction -in %s -vec %s -outfield prefix -outfield.prefix.name %s -out %s -field %s" %(image_input, sample_points_output, BAND_NAME, vector_output, name_column)
            if ram_otb > 0:
                command += " -ram %d" %(ram_otb)
            if debug >= 3:
                print(command)
            exitCode = os.system(command)
            if exitCode != 0:
                raise NameError(cyan + "selectSamples() : " + bold + red + "An error occured during otbcli_SampleExtraction command. See error message above." + endC)

        # Cas de plusieurs imagettes
        else :

            # Le repertoire de sortie
            repertory_output = os.path.dirname(vector_output)
            # Initialisation de la liste pour le multi-threading et la liste de l'ensemble des echantions locaux
            thread_list = []
            vector_local_output_list = []

            # Obtenir l'emprise des images d'entrées pour redecouper le vecteur d'echantillon d'apprentissage pour chaque image
            for image_input in image_input_list :
                # Definition des fichiers sur emprise local
                file_name = os.path.splitext(os.path.basename(image_input))[0]
                emprise_local_sample = repertory_output + os.sep + file_name + SUFFIX_SAMPLE + extension_vector
                vector_sample_local_output = repertory_output + os.sep + file_name + SUFFIX_VALUE + extension_vector
                vector_local_output_list.append(vector_sample_local_output)

                # Gestion sans thread...
                #SampleLocalExtraction(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate)

                # Gestion du multi threading
                thread = threading.Thread(target=SampleLocalExtraction, args=(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate))
                thread.start()
                thread_list.append(thread)

            # Extraction des echantions points des images
            try:
                for thread in thread_list:
                    thread.join()
            except:
                print(cyan + "selectSamples() : " + bold + red + "Erreur lors de l'éextaction des valeurs d'echantion : impossible de demarrer le thread" + endC, file=sys.stderr)

            # Fusion des multi vecteurs de points contenant les valeurs des bandes de l'image
            fusionVectors(vector_local_output_list, vector_output, format_vector)

            # Clean des vecteurs point sample local file
            for vector_sample_local_output in vector_local_output_list :
                removeVectorFile(vector_sample_local_output)

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "End extraction des points d'echantillon dans l'image." + endC)

        # 7. CALCUL DES STATISTIQUES SUR LES VALEURS DES POINTS D'ECHANTILLONS SELECTIONNEES
        #-----------------------------------------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start calcul des statistiques sur les valeurs des points d'echantillons selectionnees..." + endC)

        # Si le calcul des statistiques est demandé presence du fichier stat
        if table_statistics_output != "":

            # On récupère la liste de données
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part1... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            attribute_name_dico = {}
            name_field_value_list = []
            names_attribut_list = getAttributeNameList(vector_output, format_vector)
            if debug >=4:
                print("names_attribut_list = " + str(names_attribut_list))

            attribute_name_dico[name_column] = ogr.OFTInteger
            for name_attribut in names_attribut_list :
                if BAND_NAME in name_attribut :
                    attribute_name_dico[name_attribut] = ogr.OFTReal
                    name_field_value_list.append(name_attribut)

            name_field_value_list.sort()

            res_values_dico = getAttributeValues(vector_output, None, None, attribute_name_dico, format_vector)
            del attribute_name_dico

            # Trie des données par identifiant micro classes
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part2... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            data_value_by_micro_class_dico = {}
            stat_by_micro_class_dico = {}

            # Initilisation du dico complexe
            for id_micro in id_micro_list :
                data_value_by_micro_class_dico[id_micro] = {}
                stat_by_micro_class_dico[id_micro] = {}
                for name_field_value in res_values_dico :
                    if name_field_value != name_column :
                        data_value_by_micro_class_dico[id_micro][name_field_value] = []
                        stat_by_micro_class_dico[id_micro][name_field_value] = {}
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0.0
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0.0

            # Trie des valeurs
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part3... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for index in range(len(res_values_dico[name_column])) :
                id_micro = res_values_dico[name_column][index]
                for name_field_value in name_field_value_list :
                    data_value_by_micro_class_dico[id_micro][name_field_value].append(res_values_dico[name_field_value][index])
            del res_values_dico

            # Calcul des statistiques
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part4... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = average(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = standardDeviation(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = len(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = 0

            del data_value_by_micro_class_dico

            # Creation du fichier statistique .csv
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part5... " + endC
            if debug >= 4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            text_csv = " Micro classes ; Champs couche image ; Nombre de points  ; Moyenne ; Ecart type \n"
            writeTextFile(table_statistics_output, text_csv)
            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    # Ecriture du fichier
                    text_csv = " %d " %(id_micro)
                    text_csv += " ; %s" %(name_field_value)
                    text_csv += " ; %d" %(stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION])
                    appendTextFileCR(table_statistics_output, text_csv)
            del name_field_value_list

        else :
            if debug >=3:
                print(cyan + "selectSamples() : " + bold + green + "Pas de calcul des statistiques sur les valeurs des points demander!!!." + endC)

        del id_micro_list

        pending_event = cyan + "selectSamples() : " + bold + green + "End calcul des statistiques sur les valeurs des points d'echantillons selectionnees. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)


    # 8. SUPRESSION DES FICHIERS INTERMEDIAIRES
    #------------------------------------------

    if not save_results_intermediate:

        if os.path.isfile(sample_points_output) :
            removeVectorFile(sample_points_output)

    print(cyan + "selectSamples() : " + bold + green + "FIN DE LA SELECTION DE POINTS" + endC)

    # Mise à jour du Log
    ending_event = "selectSamples() : Select points in raster mask macro input ending : "
    timeLine(path_time_log,ending_event)

    return