Exemple #1
0
def runTDCKmeans(input_images,
                 output_dir,
                 input_sea_points,
                 input_cut_vector,
                 no_data_value,
                 path_time_log,
                 nb_classes=5,
                 epsg=2154,
                 format_raster='GTiff',
                 format_vector="ESRI Shapefile",
                 extension_raster=".tif",
                 extension_vector=".shp",
                 save_results_intermediate=True,
                 overwrite=True):

    # Mise à jour du Log
    starting_event = "runTDCKmeans() : Select TDC kmeans starting : "
    timeLine(path_time_log, starting_event)

    # Initialisation des constantes
    ID = "id"
    REP_TEMP = "temp_TDCKmeans"
    CHANNEL_ORDER = ["Red", "Green", "Blue", "NIR"]

    # Initialisation des variables
    repertory_temp = output_dir + os.sep + REP_TEMP

    # Nettoyage du repertoire de sortie
    if overwrite and os.path.exists(output_dir):
        shutil.rmtree(output_dir)

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Création du répertoire de sortie temporaire s'il n'existe pas déjà
    if not os.path.exists(repertory_temp):
        os.makedirs(repertory_temp)

    # Vérification de l'existence des fichiers
    if not os.path.exists(input_cut_vector):
        print(cyan + "runTDCKmeans() : " + bold + red +
              "The file %s does not exist" % (input_cut_vector) + endC,
              file=sys.stderr)
        sys.exit(1)

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green +
              "Variables dans runTDCKmeans - Variables générales" + endC)
        print(cyan + "runTDCKmeans() : " + endC + "input_images : " +
              str(input_images) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "output_dir : " +
              str(output_dir) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "input_sea_points : " +
              str(input_sea_points) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "input_cut_vector : " +
              str(input_cut_vector) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "nb_classes : " +
              str(nb_classes) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "no_data_value : " +
              str(no_data_value) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "runTDCKmeans() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "runTDCKmeans() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "runTDCKmeans() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    dico = ""
    for image in input_images:
        # Vérification de l'existence des fichiers
        if not os.path.exists(image):
            print(cyan + "runTDCKmeans() : " + bold + red +
                  "The file %s does not exist" % (image) + endC,
                  file=sys.stderr)
            sys.exit(1)

        # Initialisation des fichiers de sortie
        image_name = os.path.splitext(os.path.basename(image))[0]
        im_NDVI = repertory_temp + os.sep + "im_NDVI_" + image_name + extension_raster
        im_NDWI2 = repertory_temp + os.sep + "im_NDWI2_" + image_name + extension_raster
        im_BI = repertory_temp + os.sep + "im_BI_" + image_name + extension_raster
        im_concat = repertory_temp + os.sep + "im_concat_" + image_name + extension_raster
        im_kmeans = repertory_temp + os.sep + "im_kmeans_" + image_name + extension_raster
        im_kmeans_decoup = repertory_temp + os.sep + "im_kmeans_decoup_" + image_name + extension_raster
        im_kmeans_decoup_filter = repertory_temp + os.sep + "im_filter_" + image_name + extension_raster
        im_kmeans_vect_name = "im_kmeans_vect_" + image_name
        im_kmeans_vector = output_dir + os.sep + "temp_TDCKMeans" + os.sep + im_kmeans_vect_name + extension_vector

        # Création des images indice
        createNDVI(image, im_NDVI, CHANNEL_ORDER)
        createNDWI2(image, im_NDWI2, CHANNEL_ORDER)
        createBI(image, im_BI, CHANNEL_ORDER)

        # Concaténation des bandes des images brute, NDVI, NDWI2 et BI
        command = "otbcli_ConcatenateImages -il %s %s %s %s -out %s" % (
            image, im_NDVI, im_NDWI2, im_BI, im_concat)
        if debug >= 3:
            print(command)
        exitCode = os.system(command)
        if exitCode != 0:
            raise NameError(
                cyan + "runTDCKmeans() : " + endC + bold + red +
                "An error occured during otbcli_ConcatenateImages command. See error message above."
                + endC)
        else:
            print(cyan + "runTDCKmeans() : " + endC + bold + green +
                  "Create binary file %s complete!" % (im_concat) + endC)

        # K-Means sur l'image concaténée
        command = "otbcli_KMeansClassification -in %s -nc %s -nodatalabel %s -rand %s -out %s" % (
            im_concat, str(nb_classes), str(no_data_value), str(1), im_kmeans)
        if debug >= 3:
            print(command)
        exitCode = os.system(command)
        if exitCode != 0:
            raise NameError(
                cyan + "runTDCKmeans() : " + endC + bold + red +
                "An error occured during otbcli_ConcatenateImages command. See error message above."
                + endC)
        else:
            print(cyan + "runTDCKmeans() : " + endC + bold + green +
                  "Create binary file %s complete!" % (im_kmeans) + endC)

        # Découpe du raster image Kmeans
        cutImageByVector(input_cut_vector, im_kmeans, im_kmeans_decoup, None,
                         None, no_data_value, epsg, format_raster,
                         format_vector)

        # Nettoyage de l'image raster Kmeans
        command = "otbcli_ClassificationMapRegularization -io.in %s -io.out %s -ip.radius %s" % (
            im_kmeans_decoup, im_kmeans_decoup_filter, str(5))
        if debug >= 3:
            print(command)
        exitCode = os.system(command)
        if exitCode != 0:
            raise NameError(
                cyan + "runTDCKmeans() : " + endC + bold + red +
                "An error occured during otbcli_ClassificationMapRegularization command. See error message above."
                + endC)
        else:
            print(cyan + "runTDCKmeans() : " + endC + bold + green +
                  "Create binary file %s complete!" %
                  (im_kmeans_decoup_filter) + endC)

        # Vectorisation de l'image découpée
        polygonizeRaster(im_kmeans_decoup_filter, im_kmeans_vector,
                         im_kmeans_vect_name, ID, format_vector)

        # Création du dictionnaire pour le passage à PolygonMerToTDC
        dico += image + ":" + im_kmeans_vector + " "

    # Appel à PolygonMerToTDC pour l'extraction du TDC
    dico = dico[:-1]
    polygonMerToTDC(str(dico), output_dir, input_sea_points, False, 1,
                    input_cut_vector, 1, -1, no_data_value, path_time_log,
                    epsg, format_vector, extension_raster, extension_vector,
                    save_results_intermediate, overwrite)

    # Suppression du repertoire temporaire
    if not save_results_intermediate and os.path.exists(repertory_temp):
        shutil.rmtree(repertory_temp)

    # Mise à jour du Log
    ending_event = "runTDCKmeans() : Select TDC kmeans ending : "
    timeLine(path_time_log, ending_event)

    return
def binaryMaskVect(input_image,
                   output_dir,
                   threshold,
                   input_cut_vector,
                   attributes_list,
                   no_data_value,
                   epsg,
                   format_raster="GTiff",
                   format_vector="ESRI Shapefile",
                   extension_raster=".tif",
                   extension_vector=".shp",
                   save_results_intermediate=False,
                   overwrite=True):

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green +
              "Variables dans le binaryMaskVect - Variables générales" + endC)
        print(cyan + "binaryMaskVect() : " + endC + "input_image : " +
              str(input_image) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "output_dir : " +
              str(output_dir) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "threshold : " +
              str(threshold) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "input_cut_vector : " +
              str(input_cut_vector) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "format_raster : " +
              str(format_raster) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "binaryMaskVect() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "binaryMaskVect() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "binaryMaskVect() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    image_name = os.path.splitext(os.path.basename(input_image))[0]
    binary_mask = output_dir + os.sep + "bin_mask_" + image_name + "_" + str(
        threshold).replace('.', '_') + extension_raster
    binary_mask_decoup = output_dir + os.sep + "bin_mask_decoup_" + image_name + "_" + str(
        threshold).replace('.', '_') + extension_raster
    binary_mask_vector = output_dir + os.sep + "bin_mask_vect_" + image_name + "_" + str(
        threshold).replace('.', '_') + extension_vector

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Suppression des fichiers temporaires pour les calculs
    if os.path.exists(binary_mask):
        removeFile(binary_mask)

    if os.path.exists(binary_mask_decoup):
        removeFile(binary_mask_decoup)

    if os.path.exists(binary_mask_vector):
        if overwrite:
            removeVectorFile(binary_mask_vector, format_vector)
        else:
            return binary_mask_vector

    # Création du masque binaire
    createBinaryMask(input_image, binary_mask, threshold, False)

    if input_cut_vector != "":
        # Découpe du raster
        cutImageByVector(input_cut_vector, binary_mask, binary_mask_decoup,
                         None, None, no_data_value, epsg, format_raster,
                         format_vector)
    else:
        binary_mask_decoup = binary_mask

    # Vectorisation du masque binaire découpé
    polygonizeRaster(binary_mask_decoup, binary_mask_vector, image_name, "id",
                     format_vector)

    # Ajout des champs au fichier vecteur créé
    for attribute in attributes_list:
        addNewFieldVector(binary_mask_vector, attribute.name,
                          attribute.ogrType, attribute.value, attribute.width,
                          None, format_vector)

    # Suppresions des fichiers intermediaires inutiles et reperoire temporaire
    if not save_results_intermediate:
        removeFile(binary_mask)
        removeFile(binary_mask_decoup)

    return binary_mask_vector
def polygonMerToTDC(input_im_ndvi_dico, output_dir, input_sea_points, fct_bin_mask_vect, simplif, input_cut_vector, buf_pos, buf_neg, no_data_value, path_time_log, epsg=2154, format_vector="ESRI Shapefile", extension_raster=".tif", extension_vector=".shp", save_results_intermediate=True, overwrite=True):

    # Mise à jour du Log
    starting_event = "PolygonMerToTDC() : Select PolygonMerToTDC starting : "
    timeLine(path_time_log,starting_event)

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green + "Variables dans PolygonMerToTDC - Variables générales" + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "input_im_ndvi_dico : " + str(input_im_ndvi_dico) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "output_dir : " + str(output_dir) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "input_sea_points : " + str(input_sea_points) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "fct_bin_mask_vect : " + str(fct_bin_mask_vect) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "simplif : " + str(simplif) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "input_cut_vector : " + str(input_cut_vector) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "buf_pos : " + str(buf_pos) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "buf_neg : " + str(buf_neg) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "polygonMerToTDC() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Constantes
    REP_TEMP_POLY_MER = "Temp_PolygonMerToTDC_"
    CODAGE_8B = "uint8"

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    # Pour toutes les images à traiter
    for r in range(len(input_im_ndvi_dico.split())):
        # L'image à traiter
        input_image = input_im_ndvi_dico.split()[r].split(":")[0]
        image_name = os.path.splitext(os.path.basename(input_image))[0]

        # Création du répertoire temporaire de calcul
        repertory_temp = output_dir + os.sep + REP_TEMP_POLY_MER + image_name
        if not os.path.exists(repertory_temp):
            os.makedirs(repertory_temp)

        if not os.path.exists(input_image):
            print(cyan + "polygonMerToTDC() : " + bold + red + "L'image en entrée : " + input_image + " n'existe pas. Vérifiez le chemin !" + endC, file=sys.stderr)
            sys.exit(1)

        src_input_im = gdal.Open(input_image)
        if src_input_im is None:
            print(cyan + "polygonMerToTDC() : " + bold + red + "Impossible d'ouvrir l'image raster : " + input_image + endC, file=sys.stderr)
            sys.exit(1)
        try :
            srcband = src_input_im.GetRasterBand(2)
        except RuntimeError as err:
            print(cyan + "polygonMerToTDC() : " + bold + red + "Pas de bande 2 trouvée sur : " + input_image + endC, file=sys.stderr)
            e = "OS error: {0}".format(err)
            print(e, file=sys.stderr)
            sys.exit(1)

        # Traiter le cas où pas de NDVI derrière
        if ":" not in input_im_ndvi_dico.split()[r]:
            print(cyan + "polygonMerToTDC() : " + red + bold + "Aucun masque binaire vectorisé spécifié ! Nécessité d'au moins un par image." + endC, file=sys.stderr)
            sys.exit(1)

        # Parcours de toutes les images NDVI correspondant à chaque image
        for ndvi_mask_vect in input_im_ndvi_dico.split()[r].split(":")[1].split(","):
            if debug > 2 :
                print(cyan + "polygonMerToTDC() : " + endC + "Traitement de : " + ndvi_mask_vect)

            # Initialisation des noms des fichiers de sortie
            binary_mask_zeros = repertory_temp + os.sep +  "b_mask_zeros_" + os.path.splitext(os.path.basename(input_image))[0] + extension_raster
            binary_mask_zeros_vector = "b_mask_zeros_vect_" + os.path.splitext(os.path.basename(input_image))[0]
            path_binary_mask_zeros_vector = repertory_temp + os.sep +  binary_mask_zeros_vector + extension_vector
            true_values_buffneg = repertory_temp + os.sep + "true_values_buffneg_" + os.path.splitext(os.path.basename(input_image))[0] + extension_vector
            decoup_buffneg = repertory_temp + os.sep + "decoupe_buffneg_" + os.path.splitext(os.path.basename(input_cut_vector))[0] + extension_vector

            if fct_bin_mask_vect :
                threshold = os.path.splitext(os.path.basename(ndvi_mask_vect))[0].split("_")[-2] + "_" + os.path.splitext(os.path.basename(ndvi_mask_vect))[0].split("_")[-1]
                poly_mer_shp = repertory_temp + os.sep + "poly_mer_" + os.path.splitext(os.path.basename(input_image))[0] + "_" + str(threshold) + extension_vector
                poly_mer_shp_dilat = repertory_temp + os.sep + "poly_mer_dilat_" + os.path.splitext(os.path.basename(input_image))[0] + "_" + str(threshold) + extension_vector
                poly_mer_shp_ferm = repertory_temp + os.sep + "poly_mer_ferm_" + os.path.splitext(os.path.basename(input_image))[0] + "_" + str(threshold) + extension_vector
                polyline_mer = repertory_temp + os.sep + "polyline_mer_" + os.path.splitext(os.path.basename(input_image))[0] + "_" + str(threshold) + extension_vector
                polyline_tdc = repertory_temp + os.sep + "tdc_" + os.path.splitext(os.path.basename(input_image))[0] + "_" + str(threshold) + extension_vector
                polyline_tdc_simplif = repertory_temp + os.sep + "tdcs_" + os.path.splitext(os.path.basename(input_image))[0] + "_" + str(threshold) + extension_vector
                polyline_tdc_simplif_decoup = output_dir + os.sep + "tdcsd_" + os.path.splitext(os.path.basename(input_image))[0] + "_" + str(threshold) + extension_vector
            else :
                poly_mer_shp = repertory_temp + os.sep + "poly_mer_" + os.path.splitext(os.path.basename(ndvi_mask_vect))[0] + extension_vector
                poly_mer_shp_dilat = repertory_temp + os.sep + "poly_mer_dilat_" + os.path.splitext(os.path.basename(input_image))[0] + extension_vector
                poly_mer_shp_ferm = repertory_temp + os.sep + "poly_mer_ferm_" + os.path.splitext(os.path.basename(input_image))[0] + extension_vector
                polyline_mer = repertory_temp + os.sep + "polyline_mer_" + os.path.splitext(os.path.basename(ndvi_mask_vect))[0] + extension_vector
                polyline_tdc = repertory_temp + os.sep + "tdc_" + os.path.splitext(os.path.basename(ndvi_mask_vect))[0] + extension_vector
                polyline_tdc_simplif = repertory_temp + os.sep + "tdcs_" + os.path.splitext(os.path.basename(ndvi_mask_vect))[0] + extension_vector
                polyline_tdc_simplif_decoup = output_dir + os.sep + "tdcsd_" + os.path.splitext(os.path.basename(ndvi_mask_vect))[0] + extension_vector

            # Création shp poly_mer_shp contenant uniquement les polygones mer
            if os.path.exists(poly_mer_shp):
                removeVectorFile(poly_mer_shp)

            # Sélection des polygones qui sont de la mer, dans la liste poly_mer
            withinPolygons(input_sea_points, ndvi_mask_vect, poly_mer_shp, overwrite, format_vector)

            # Fermeture (dilatation - érosion) sur les polygones mer obtenus pour supprimer les petits trous dans les polygones (bateaux, ...) et rassembler les polygones proches
            bufferVector(poly_mer_shp, poly_mer_shp_dilat, buf_pos, "", 1.0, 10, format_vector)
            bufferVector(poly_mer_shp_dilat, poly_mer_shp_ferm, buf_neg, "", 1.0, 10, format_vector)

            # Création masque binaire pour séparer les no data des vraies valeurs
            no_data_ima = getNodataValueImage(input_image)
            if no_data_ima == None :
                no_data_ima = no_data_value
            createBinaryMaskMultiBand(input_image, binary_mask_zeros, no_data_ima, CODAGE_8B)

            # Vectorisation du masque binaire true data/false data -> polygone avec uniquement les vraies valeurs
            if os.path.exists(path_binary_mask_zeros_vector):
                removeVectorFile(path_binary_mask_zeros_vector)
            polygonizeRaster(binary_mask_zeros, path_binary_mask_zeros_vector, binary_mask_zeros_vector)

            # Buffer négatif sur ce polygone
            bufferVector(path_binary_mask_zeros_vector, true_values_buffneg, -2, "", 1.0, 10, format_vector)

            # Transformation des polygones de la couche poly_mer_shp_ferm en polyligne
            convertePolygon2Polylines(poly_mer_shp_ferm, polyline_mer, overwrite, format_vector)

            # Découpe du TDC polyline_mer avec le polygone négatif
            cutVectorAll(true_values_buffneg, polyline_mer, polyline_tdc, overwrite, format_vector)

            # Simplification du TDC
            simplifyVector(polyline_tdc, polyline_tdc_simplif, simplif, format_vector)

            # Buffer négatif autour de input_cut_vector
            bufferVector(input_cut_vector, decoup_buffneg, -1, "", 1.0, 10, format_vector)

            # Découpe du TDC polyline_mer avec le buffer négatif du polygone mer
            cutVectorAll(decoup_buffneg, polyline_tdc_simplif, polyline_tdc_simplif_decoup, overwrite, format_vector)

            tdc_final = polyline_tdc_simplif_decoup

        # Suppression du repertoire temporaire
        if not save_results_intermediate and os.path.exists(repertory_temp):
            shutil.rmtree(repertory_temp)

    # Mise à jour du Log
    ending_event = "PolygonMerToTDC() : Select PolygonMerToTDC ending: "
    timeLine(path_time_log,ending_event)

    return tdc_final
def createEmprise(input_dir,
                  output_file,
                  is_not_assembled,
                  is_all_polygons_used,
                  is_not_date,
                  is_optimize_emprise,
                  is_optimize_emprise_nodata,
                  no_data_value,
                  size_erode,
                  path_time_log,
                  separ_name="_",
                  pos_date=1,
                  nb_char_date=8,
                  separ_date="",
                  epsg=2154,
                  format_vector='ESRI Shapefile',
                  extension_raster=".tif",
                  extension_vector=".shp",
                  save_results_intermediate=False,
                  overwrite=True):

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green +
              "Variables dans le createEmprise - Variables générales" + endC)
        print(cyan + "createEmprise() : " + endC + "input_dir : " +
              str(input_dir))
        print(cyan + "createEmprise() : " + endC + "output_file : " +
              str(output_file))
        print(cyan + "createEmprise() : " + endC + "is_not_assembled : " +
              str(is_not_assembled))
        print(cyan + "createEmprise() : " + endC + "is_all_polygons_used : " +
              str(is_all_polygons_used))
        print(cyan + "createEmprise() : " + endC + "is_not_date : " +
              str(is_not_date))
        print(cyan + "createEmprise() : " + endC + "is_optimize_emprise : " +
              str(is_optimize_emprise))
        print(cyan + "createEmprise() : " + endC +
              "is_optimize_emprise_nodata : " +
              str(is_optimize_emprise_nodata))
        print(cyan + "createEmprise() : " + endC + "no_data_value : " +
              str(no_data_value))
        print(cyan + "createEmprise() : " + endC + "size_erode : " +
              str(size_erode))
        print(cyan + "createEmprise() : " + endC + "path_time_log : " +
              str(path_time_log))
        print(cyan + "createEmprise() : " + endC + "separ_name : " +
              str(separ_name))
        print(cyan + "createEmprise() : " + endC + "pos_date : " +
              str(pos_date))
        print(cyan + "createEmprise() : " + endC + "nb_char_date : " +
              str(nb_char_date))
        print(cyan + "createEmprise() : " + endC + "separ_date : " +
              str(separ_date))
        print(cyan + "createEmprise() : " + endC + "epsg : " + str(epsg))
        print(cyan + "createEmprise() : " + endC + "format_vector : " +
              str(format_vector))
        print(cyan + "createEmprise() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "createEmprise() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "createEmprise() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate))
        print(cyan + "createEmprise() : " + endC + "overwrite : " +
              str(overwrite))

# Constantes
    EXT_LIST_HDF5 = ['h5', 'H5', 'he5', 'HE5', 'hdf5', 'HDF5']
    EXT_LIST = EXT_LIST_HDF5 + [
        'tif', 'TIF', 'tiff', 'TIFF', 'ecw', 'ECW', 'jp2', 'JP2', 'dim', 'DIM',
        'asc', 'ASC'
    ]
    SUFFIX_DETAILLEE = "_detail"
    SUFFIX_MASK_ZERO = "_mask_zeros"
    SUFFIX_TMP = "_tmp"

    CODAGE_8B = "uint8"
    ATTR_NAME_ID = "Id"
    ATTR_NAME_NOMIMAGE = "NomImage"
    ATTR_NAME_DATEACQUI = "DateAcqui"
    ATTR_NAME_HEUREACQUI = "HeureAcqui"
    ATTR_NAME_REFDOSSIER = "RefDossier"

    # Variables
    points_list = []
    name_image_list = []
    name_rep_list = []
    ref_dossier_list = []
    date_list = []
    heure_list = []
    optimize_emprise_nodata_shape_list = []
    polygons_attr_coord_dico = {}
    pos_date = pos_date - 1

    repertory_output = os.path.dirname(output_file)
    file_name = os.path.splitext(os.path.basename(output_file))[0]
    extension = os.path.splitext(output_file)[1]
    file_vector_detail = repertory_output + os.sep + file_name + SUFFIX_DETAILLEE + extension

    # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors passe au masque suivant
    check = os.path.isfile(output_file)
    if check and not overwrite:
        print(
            bold + yellow + "createEmprise() : " + endC +
            "Le fichier vecteur d'emprise %s existe déjà : pas d'actualisation"
            % (output_file) + endC)
    # Si non, ou si la fonction ecrasement est désative, alors on le calcule
    else:
        if check:
            try:  # Suppression de l'éventuel fichier existant
                removeVectorFile(output_file)
                removeVectorFile(file_vector_detail)
            except Exception:
                pass  # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite

        # Récuperer tous les sous répertoires
        sub_rep_list = getSubRepRecursifList(input_dir)
        sub_rep_list.append(input_dir)

        # Parcours de chaque dossier image du dossier en entrée
        for repertory in sub_rep_list:
            if os.path.isdir(repertory):

                if debug >= 2:
                    print(cyan + "createEmprises() : " + endC + bold + green +
                          "Traitement de : " + endC + repertory)

                # Récupération des images du dossier en entrée
                imagettes_jp2_tif_ecw_list = []
                imagettes_list = os.listdir(repertory)

                for elt1 in imagettes_list:
                    path_image = repertory + os.sep + elt1
                    if (os.path.isfile(path_image)) and (len(
                            elt1.rsplit('.', 1)) == 2) and (elt1.rsplit(
                                '.', 1)[1] in EXT_LIST):
                        if elt1.rsplit('.', 1)[1] in EXT_LIST_HDF5:
                            elt1_new = os.path.splitext(
                                elt1)[0] + extension_raster
                            path_image_new = repertory + os.sep + elt1_new
                            h5ToGtiff(path_image, path_image_new)
                            imagettes_jp2_tif_ecw_list.append(elt1_new)
                        else:
                            imagettes_jp2_tif_ecw_list.append(elt1)

                # Pour le cas ou le repertoire contient des fichiers images
                if not imagettes_jp2_tif_ecw_list == []:

                    # Cas ou chaque emprise d'image est un polygone
                    if is_not_assembled or is_optimize_emprise or is_optimize_emprise_nodata:

                        for imagette in imagettes_jp2_tif_ecw_list:
                            # Récupération des emprises de l'image
                            path_image = repertory + os.sep + imagette
                            path_info_acquisition = repertory
                            xmin, xmax, ymin, ymax = getEmpriseImage(
                                path_image)
                            coord_list = [
                                xmin, ymax, xmax, ymax, xmax, ymin, xmin, ymin,
                                xmin, ymax
                            ]

                            # Saisie des données
                            points_list.append(coord_list)

                            # Récupération du nom de l'image pour la création des champs
                            input_image_name = os.path.splitext(
                                os.path.basename(path_image))[0]
                            name_image_list.append(input_image_name)

                            # Cas optimisation de l'emprise en elevant les nodata
                            if is_optimize_emprise_nodata:

                                path_info_acquisition = path_image
                                optimize_emprise_nodata_shape = repertory_output + os.sep + input_image_name + extension_vector
                                optimize_emprise_tmp1_shape = repertory_output + os.sep + input_image_name + SUFFIX_TMP + str(
                                    1) + extension_vector
                                optimize_emprise_tmp2_shape = repertory_output + os.sep + input_image_name + SUFFIX_TMP + str(
                                    2) + extension_vector
                                optimize_emprise_tmp3_shape = repertory_output + os.sep + input_image_name + SUFFIX_TMP + str(
                                    3) + extension_vector
                                optimize_emprise_tmp4_shape = repertory_output + os.sep + input_image_name + SUFFIX_TMP + str(
                                    4) + extension_vector
                                binary_mask_zeros_raster = repertory_output + os.sep + input_image_name + SUFFIX_MASK_ZERO + extension_raster
                                optimize_emprise_nodata_shape_list.append(
                                    optimize_emprise_nodata_shape)

                                # Création masque binaire pour séparer les no data des vraies valeurs
                                no_data_value_img = getNodataValueImage(
                                    path_image)
                                if no_data_value_img == None:
                                    no_data_value_img = no_data_value
                                createBinaryMaskMultiBand(
                                    path_image, binary_mask_zeros_raster,
                                    no_data_value_img, CODAGE_8B)

                                # Vectorisation du masque binaire true data/false data -> polygone avec uniquement les vraies valeurs
                                if os.path.exists(
                                        optimize_emprise_nodata_shape):
                                    removeVectorFile(
                                        optimize_emprise_nodata_shape)

                                polygonizeRaster(binary_mask_zeros_raster,
                                                 optimize_emprise_tmp1_shape,
                                                 input_image_name,
                                                 ATTR_NAME_ID, format_vector)

                                # Nettoyage des polygones parasites pour ne garder que le polygone pricipale si l'option "all" n'est pas demandée
                                if not is_all_polygons_used:
                                    geometry_list = getGeomPolygons(
                                        optimize_emprise_tmp1_shape, None,
                                        None, format_vector)
                                    geometry_orded_dico = {}
                                    geometry_orded_list = []
                                    for geometry in geometry_list:
                                        area = geometry.GetArea()
                                        geometry_orded_dico[area] = geometry
                                        geometry_orded_list.append(area)
                                    geometry_orded_list.sort()
                                    if len(geometry_orded_list) > 0:
                                        max_area = geometry_orded_list[
                                            len(geometry_orded_list) - 1]
                                        geom_max = geometry_orded_dico[
                                            max_area]
                                        attribute_dico = {
                                            ATTR_NAME_ID: ogr.OFTInteger
                                        }
                                        polygons_attr_geom_dico = {}
                                        polygons_attr_geom_dico[str(1)] = [
                                            geom_max, {
                                                ATTR_NAME_ID: str(1)
                                            }
                                        ]
                                        createPolygonsFromGeometryList(
                                            attribute_dico,
                                            polygons_attr_geom_dico,
                                            optimize_emprise_tmp2_shape, epsg,
                                            format_vector)
                                    else:
                                        print(
                                            cyan + "createEmprise() : " +
                                            bold + yellow +
                                            " Attention!!! Fichier non traite (ne contient pas de polygone): "
                                            + optimize_emprise_tmp1_shape +
                                            endC)
                                        optimize_emprise_tmp2_shape = optimize_emprise_tmp1_shape
                                else:
                                    optimize_emprise_tmp2_shape = optimize_emprise_tmp1_shape

                                # Nettoyage des polygones simplification et supression des trous
                                cleanRingVector(optimize_emprise_tmp2_shape,
                                                optimize_emprise_tmp3_shape,
                                                format_vector)
                                simplifyVector(optimize_emprise_tmp3_shape,
                                               optimize_emprise_tmp4_shape, 2,
                                               format_vector)
                                if size_erode != 0.0:
                                    bufferVector(
                                        optimize_emprise_tmp4_shape,
                                        optimize_emprise_nodata_shape,
                                        size_erode * -1, "", 1.0, 10,
                                        format_vector)
                                else:
                                    copyVectorFile(
                                        optimize_emprise_tmp4_shape,
                                        optimize_emprise_nodata_shape,
                                        format_vector)

                                # Nettoyage des fichier intermediaires
                                if not save_results_intermediate:
                                    removeFile(binary_mask_zeros_raster)
                                    removeVectorFile(
                                        optimize_emprise_tmp1_shape)
                                    removeVectorFile(
                                        optimize_emprise_tmp2_shape)
                                    removeVectorFile(
                                        optimize_emprise_tmp3_shape)
                                    removeVectorFile(
                                        optimize_emprise_tmp4_shape)

                            # Recuperation de la date et l'heure d'acquisition
                            # Gestion de l'emprise optimisé nodata on utilise le nom de l'image pour la date d'acquisition sion c'est le nom du repertoire
                            getDataToFiels(
                                path_info_acquisition, is_not_date,
                                is_optimize_emprise
                                or is_optimize_emprise_nodata, separ_name,
                                pos_date, nb_char_date, separ_date,
                                points_list, ref_dossier_list, name_rep_list,
                                date_list, heure_list)

                    # Cas ou l'on prend l'emprise globale des images un seul plolygone correspondant a l'emprise globale
                    else:

                        # Récupération des emprises des images du dossier
                        liste_x_l = []
                        liste_y_b = []
                        liste_x_r = []
                        liste_y_t = []

                        for imagette in imagettes_jp2_tif_ecw_list:
                            path_image = repertory + os.sep + imagette
                            xmin, xmax, ymin, ymax = getEmpriseImage(
                                path_image)

                            liste_x_l.append(xmin)
                            liste_x_r.append(xmax)
                            liste_y_b.append(ymin)
                            liste_y_t.append(ymax)

                        # Récupération des min et max de la liste des imagettes
                        # Coin haut gauche
                        xmin_l_t = str(min(liste_x_l))

                        # Coin bas gauche
                        ymin_l_b = str(min(liste_y_b))
                        xmin_l_b = xmin_l_t

                        # Coin bas doite
                        xmax_r_b = str(max(liste_x_r))

                        # Coin haut droite
                        ymax_r_t = str(max(liste_y_t))
                        xmax_r_t = xmax_r_b
                        ymax_r_b = ymin_l_b
                        ymin_l_t = ymax_r_t

                        coord_list = [
                            xmin_l_t, ymin_l_t, xmin_l_b, ymin_l_b, xmax_r_b,
                            ymax_r_b, xmax_r_t, ymax_r_t, xmin_l_t, ymin_l_t
                        ]
                        points_list.append(coord_list)

                        # Récupération du nom du répertoire pour création des champs
                        getDataToFiels(repertory, is_not_date,
                                       is_optimize_emprise, separ_name,
                                       pos_date, nb_char_date, separ_date,
                                       points_list, ref_dossier_list,
                                       name_rep_list, date_list, heure_list)

        #  Préparation des attribute_dico et polygons_attr_coord_dico
        if is_not_assembled:
            attribute_dico = {
                ATTR_NAME_ID: ogr.OFTInteger,
                ATTR_NAME_NOMIMAGE: ogr.OFTString,
                ATTR_NAME_DATEACQUI: ogr.OFTDate,
                ATTR_NAME_HEUREACQUI: ogr.OFTString
            }

            for i in range(len(points_list)):
                polygons_attr_coord_dico[str(i)] = [
                    points_list[i], {
                        ATTR_NAME_ID: i + 1,
                        ATTR_NAME_NOMIMAGE: name_image_list[i],
                        ATTR_NAME_DATEACQUI: date_list[i],
                        ATTR_NAME_HEUREACQUI: heure_list[i]
                    }
                ]

        else:
            attribute_dico = {
                ATTR_NAME_NOMIMAGE: ogr.OFTString,
                ATTR_NAME_REFDOSSIER: ogr.OFTString,
                ATTR_NAME_DATEACQUI: ogr.OFTDate,
                ATTR_NAME_HEUREACQUI: ogr.OFTString
            }

            for i in range(len(points_list)):
                polygons_attr_coord_dico[str(i)] = [
                    points_list[i], {
                        ATTR_NAME_NOMIMAGE: name_rep_list[i],
                        ATTR_NAME_REFDOSSIER: ref_dossier_list[i],
                        ATTR_NAME_DATEACQUI: date_list[i],
                        ATTR_NAME_HEUREACQUI: heure_list[i]
                    }
                ]

        # Cas optimisation de l'emprise en elevant les nodata
        colum = ""
        if is_optimize_emprise_nodata:

            if is_not_assembled:
                file_vector = output_file
            else:
                file_vector = file_vector_detail

            # Fusion des polygones d'emprises images optimisées sans nodata
            polygons_attr_geom_dico = {}
            i = 0
            for shape_file in optimize_emprise_nodata_shape_list:
                geom_list = getGeomPolygons(shape_file, ATTR_NAME_ID, 1,
                                            format_vector)
                if not is_all_polygons_used:
                    if geom_list is not None and len(geom_list) > 0:
                        geom = geom_list[0]
                        polygons_attr_geom_dico[str(i)] = [
                            geom, polygons_attr_coord_dico[str(i)][1]
                        ]
                else:
                    j = 1
                    for geom in geom_list:
                        polygons_attr_geom_dico[str(i + 1000000 * j)] = [
                            geom, polygons_attr_coord_dico[str(i)][1]
                        ]
                        j += 1
                i += 1

            createPolygonsFromGeometryList(attribute_dico,
                                           polygons_attr_geom_dico,
                                           file_vector, epsg, format_vector)

            # Suppression des fichiers intermediaires
            if not save_results_intermediate:
                for vector_to_del in optimize_emprise_nodata_shape_list:
                    removeVectorFile(vector_to_del)

        else:
            # Utilisation de createPolygonsFromCoordList()
            if is_optimize_emprise:
                file_vector = file_vector_detail
            else:
                file_vector = output_file

            # Creation des polygones a partir de la liste des coordonnées des emprises
            createPolygonsFromCoordList(attribute_dico,
                                        polygons_attr_coord_dico, file_vector,
                                        epsg, format_vector)

        # Cas fusion des polygones pour avoir une emprise constituée d'un seul polygone
        if not is_not_assembled:
            if is_optimize_emprise or is_optimize_emprise_nodata or is_all_polygons_used:
                column_name = ""
                if is_all_polygons_used:
                    column_name = ATTR_NAME_DATEACQUI
                elif is_optimize_emprise or is_optimize_emprise_nodata:
                    column_name = ATTR_NAME_NOMIMAGE

                # Fusion des polygones
                if is_all_polygons_used and is_not_date:
                    fusionNeighbourPolygonsBySameValue(file_vector,
                                                       output_file,
                                                       column_name,
                                                       format_vector)
                    #dissolveVector(file_vector, output_file, column_name, format_vector)
                else:
                    if not geometries2multigeometries(file_vector, output_file,
                                                      column_name,
                                                      format_vector):
                        copyVectorFile(file_vector, output_file, format_vector)

                # Suppression des fichiers intermediaires
                if not save_results_intermediate:
                    removeVectorFile(file_vector_detail)

    return
def sobelToOuvrages(input_im_seuils_dico, output_dir, input_cut_vector, no_data_value, path_time_log, format_raster='GTiff', format_vector="ESRI Shapefile", extension_raster=".tif", extension_vector=".shp", save_results_intermediate=True, overwrite=True):

    # Constantes
    REPERTORY_TEMP = "temp_sobel"
    CODAGE_8B = "uint8"
    ID = "id"

    # Mise à jour du Log
    starting_event = "sobelToOuvrages() : Select Sobel to ouvrages starting : "
    timeLine(path_time_log,starting_event)

    # Création du répertoire de sortie s'il n'existe pas déjà
    if not os.path.exists(output_dir + os.sep + REPERTORY_TEMP):
        os.makedirs(output_dir + os.sep + REPERTORY_TEMP)

    # Affichage des paramètres
    if debug >= 3:
        print(bold + green + "Variables dans SobelToOuvrages - Variables générales" + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "input_im_seuils_dico : " + str(input_im_seuils_dico) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "output_dir : " + str(output_dir) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "input_cut_vector : " + str(input_cut_vector) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "format_raster : " + str(format_raster) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "sobelToOuvrages() : " + endC + "overwrite : " + str(overwrite) + endC)

    sobel_ouvrages_shp_list = []

    for elt in input_im_seuils_dico.split():
        raw_image = elt.split(":")[0]
        sobel_image = elt.split(":")[1].split(",")[0]

        for i in range(1,len(elt.split(":")[1].split(","))):
            seuil = elt.split(":")[1].split(",")[i]

            # Initialisation des noms des fichiers en sortie
            image_name = os.path.splitext(os.path.basename(raw_image))[0]
            sobel_binary_mask = output_dir + os.sep + REPERTORY_TEMP + os.sep + "bin_mask_sobel_" + image_name + "_" + str(seuil) + extension_raster
            sobel_binary_mask_vector_name = "bin_mask_vect_sobel_" + image_name + "_" + str(seuil)
            sobel_binary_mask_vector = output_dir + os.sep + REPERTORY_TEMP + os.sep + sobel_binary_mask_vector_name + extension_vector
            sobel_binary_mask_vector_cleaned = output_dir + os.sep + REPERTORY_TEMP + os.sep + "bin_mask_vect_sobel_cleaned_" + image_name + "_" + str(seuil) + extension_vector
            sobel_decoup = output_dir + os.sep + "sobel_decoup_" + image_name + "_" + str(seuil) + extension_vector

            binary_mask_zeros_name = "b_mask_zeros_vect_" + image_name
            binary_mask_zeros_raster = output_dir + os.sep + REPERTORY_TEMP + os.sep + "b_mask_zeros_" + image_name + extension_raster
            binary_mask_zeros_vector = output_dir + os.sep + REPERTORY_TEMP + os.sep + binary_mask_zeros_name + extension_vector
            binary_mask_zeros_vector_simpl = output_dir + os.sep + REPERTORY_TEMP + os.sep + "b_mask_zeros_vect_simpl_" + image_name + extension_vector
            true_values_buffneg = output_dir + os.sep + REPERTORY_TEMP + os.sep + "true_values_buffneg_" + image_name + extension_vector
            ouvrages_decoup_final = output_dir + os.sep + "ouvrages_sobel_" + image_name + "_" + str(seuil) + extension_vector

            # Création du masque binaire
            createBinaryMask(sobel_image, sobel_binary_mask, float(seuil), True)

            # Découpe du masque binaire par le shapefile de découpe en entrée
            cutImageByVector(input_cut_vector, sobel_binary_mask, sobel_decoup, None, None, no_data_value, 0, format_raster, format_vector)

            # Vectorisation du masque binaire Sobel découpé
            polygonizeRaster(sobel_decoup, sobel_binary_mask_vector, sobel_binary_mask_vector_name)

            # Création masque binaire pour séparer les no data des vraies valeurs
            nodata_value = getNodataValueImage(raw_image)
            if no_data_value == None :
                no_data_value = 0
            createBinaryMaskMultiBand(raw_image, binary_mask_zeros_raster, no_data_value, CODAGE_8B)

            # Vectorisation du masque binaire true data/false data -> polygone avec uniquement les vraies valeurs
            if os.path.exists(binary_mask_zeros_vector):
                removeVectorFile(binary_mask_zeros_vector, format_vector)

            # Polygonisation
            polygonizeRaster(binary_mask_zeros_raster, binary_mask_zeros_vector, binary_mask_zeros_name, ID, format_vector)

            # Simplification du masque obtenu
            simplifyVector(binary_mask_zeros_vector, binary_mask_zeros_vector_simpl, 2, format_vector)

            # Buffer négatif sur ce polygone
            bufferVector(binary_mask_zeros_vector_simpl, true_values_buffneg, -2, "", 1.0, 10, format_vector)
            cleanMiniAreaPolygons(sobel_binary_mask_vector, sobel_binary_mask_vector_cleaned, 15, ID, format_vector)

            # Découpe par le buffer négatif autour des true data
            cutVectorAll(true_values_buffneg, sobel_binary_mask_vector_cleaned, ouvrages_decoup_final, overwrite, format_vector)
            sobel_ouvrages_shp_list.append(ouvrages_decoup_final)

        return sobel_ouvrages_shp_list
Exemple #6
0
def createDifference(image_ortho_input,
                     image_mns_input,
                     image_mnt_input,
                     bd_vector_input_list,
                     zone_buffer_dico,
                     departments_list,
                     image_difference_output,
                     vector_difference_output,
                     fileld_bd_raster,
                     simplifie_param,
                     threshold_ndvi,
                     threshold_difference,
                     filter_difference_0,
                     filter_difference_1,
                     path_time_log,
                     format_vector='ESRI Shapefile',
                     extension_raster=".tif",
                     extension_vector=".shp",
                     save_results_intermediate=False,
                     channel_order=['Red', 'Green', 'Blue', 'NIR'],
                     overwrite=True):

    # Mise à jour du Log
    starting_event = "createDifference() : create macro samples starting : "
    timeLine(path_time_log, starting_event)

    # constantes
    CODAGE = "float"

    FOLDER_MASK_TEMP = 'Mask_'
    FOLDER_CUTTING_TEMP = 'Cut_'
    FOLDER_BUFF_TEMP = 'Buff_'
    FOLDER_RESULT_TEMP = 'Tmp_'

    SUFFIX_MASK_CRUDE = '_mcrude'
    SUFFIX_MASK = '_mask'
    SUFFIX_FILTERED = '_filtered'
    SUFFIX_VECTOR_CUT = '_decoup'
    SUFFIX_VECTOR_BUFF = '_buff'
    SUFFIX_NEW_MNS = '_new_mns'
    SUFFIX_DIFF_MNS = '_diff_mns'
    SUFFIX_NDVI = '_ndvi'

    # print
    if debug >= 3:
        print(bold + green + "Variables dans la fonction" + endC)
        print(cyan + "createDifference() : " + endC + "image_ortho_input : " +
              str(image_ortho_input) + endC)
        print(cyan + "createDifference() : " + endC + "image_mns_input : " +
              str(image_mns_input) + endC)
        print(cyan + "createDifference() : " + endC + "image_mnt_input : " +
              str(image_mnt_input) + endC)
        print(cyan + "createDifference() : " + endC +
              "bd_vector_input_list : " + str(bd_vector_input_list) + endC)
        print(cyan + "createDifference() : " + endC + "zone_buffer_dico : " +
              str(zone_buffer_dico) + endC)
        print(cyan + "createDifference() : " + endC + "departments_list : " +
              str(departments_list) + endC)
        print(cyan + "createDifference() : " + endC +
              "image_difference_output : " + str(image_difference_output) +
              endC)
        print(cyan + "createDifference() : " + endC +
              "vector_difference_output : " + str(vector_difference_output) +
              endC)
        print(cyan + "createDifference() : " + endC + "fileld_bd_raster : " +
              str(fileld_bd_raster) + endC)
        print(cyan + "createDifference() : " + endC + "simplifie_param : " +
              str(simplifie_param) + endC)
        print(cyan + "createDifference() : " + endC + "threshold_ndvi : " +
              str(threshold_ndvi) + endC)
        print(cyan + "createDifference() : " + endC +
              "threshold_difference : " + str(threshold_difference) + endC)
        print(cyan + "createDifference() : " + endC +
              "filter_difference_0 : " + str(filter_difference_0) + endC)
        print(cyan + "createDifference() : " + endC +
              "filter_difference_1 : " + str(filter_difference_1) + endC)
        print(cyan + "createDifference() : " + endC + "path_time_log : " +
              str(path_time_log) + endC)
        print(cyan + "createDifference() : " + endC + "channel_order : " +
              str(channel_order) + endC)
        print(cyan + "createDifference() : " + endC + "format_vector : " +
              str(format_vector) + endC)
        print(cyan + "createDifference() : " + endC + "extension_raster : " +
              str(extension_raster) + endC)
        print(cyan + "createDifference() : " + endC + "extension_vector : " +
              str(extension_vector) + endC)
        print(cyan + "createDifference() : " + endC +
              "save_results_intermediate : " + str(save_results_intermediate) +
              endC)
        print(cyan + "createDifference() : " + endC + "overwrite : " +
              str(overwrite) + endC)

    # ETAPE 1 : NETTOYER LES DONNEES EXISTANTES

    print(cyan + "createDifference() : " + bold + green +
          "NETTOYAGE ESPACE DE TRAVAIL..." + endC)

    # Nom de base de l'image
    image_name = os.path.splitext(os.path.basename(image_ortho_input))[0]

    # Test si le fichier résultat différence existe déjà et si il doit être écrasés
    check = os.path.isfile(vector_difference_output)

    if check and not overwrite:  # Si le fichier difference existe deja et que overwrite n'est pas activé
        print(cyan + "createDifference() : " + bold + yellow +
              "File difference  " + vector_difference_output +
              " already exists and will not be created again." + endC)
    else:
        if check:
            try:
                removeFile(vector_difference_output)
            except Exception:
                pass  # si le fichier n'existe pas, il ne peut pas être supprimé : cette étape est ignorée

        # Définition des répertoires temporaires
        repertory_output = os.path.dirname(vector_difference_output)
        repertory_output_temp = repertory_output + os.sep + FOLDER_RESULT_TEMP + image_name
        repertory_mask_temp = repertory_output + os.sep + FOLDER_MASK_TEMP + image_name
        repertory_samples_cutting_temp = repertory_output + os.sep + FOLDER_CUTTING_TEMP + image_name
        repertory_samples_buff_temp = repertory_output + os.sep + FOLDER_BUFF_TEMP + image_name

        print(repertory_output_temp)
        print(repertory_mask_temp)
        print(repertory_samples_cutting_temp)
        print(repertory_samples_buff_temp)

        # Création des répertoires temporaire qui n'existent pas
        if not os.path.isdir(repertory_output_temp):
            os.makedirs(repertory_output_temp)
        if not os.path.isdir(repertory_mask_temp):
            os.makedirs(repertory_mask_temp)
        if not os.path.isdir(repertory_samples_cutting_temp):
            os.makedirs(repertory_samples_cutting_temp)
        if not os.path.isdir(repertory_samples_buff_temp):
            os.makedirs(repertory_samples_buff_temp)

        # Nettoyage des répertoires temporaire qui ne sont pas vide
        cleanTempData(repertory_mask_temp)
        cleanTempData(repertory_samples_cutting_temp)
        cleanTempData(repertory_samples_buff_temp)
        cleanTempData(repertory_output_temp)

        BD_topo_layers_list = []
        #zone = zone_buffer_dico.keys()[0]
        zone = list(zone_buffer_dico)[0]
        # Creation liste des couches des bd exogenes utilisées
        for layers_buffer in zone_buffer_dico[zone]:
            BD_topo_layers_list.append(layers_buffer[0])

        print(cyan + "createDifference() : " + bold + green +
              "... FIN NETTOYAGE" + endC)

        # ETAPE 2 : DECOUPER LES VECTEURS

        print(cyan + "createDifference() : " + bold + green +
              "DECOUPAGE ECHANTILLONS..." + endC)

        # 2.1 : Création du masque délimitant l'emprise de la zone par image
        vector_mask = repertory_mask_temp + os.sep + image_name + SUFFIX_MASK_CRUDE + extension_vector
        createVectorMask(image_ortho_input, vector_mask)

        # 2.2 : Simplification du masque
        vector_simple_mask = repertory_mask_temp + os.sep + image_name + SUFFIX_MASK + extension_vector
        simplifyVector(vector_mask, vector_simple_mask, simplifie_param,
                       format_vector)

        # 2.3 : Découpage des vecteurs copiés en local avec le masque
        vector_output_list = []
        for vector_input in bd_vector_input_list:
            vector_name = os.path.splitext(os.path.basename(vector_input))[0]
            extension = os.path.splitext(os.path.basename(vector_input))[1]
            vector_output = repertory_samples_cutting_temp + os.sep + vector_name + SUFFIX_VECTOR_CUT + extension
            vector_output_list.append(vector_output)
        cutoutVectors(vector_simple_mask, bd_vector_input_list,
                      vector_output_list, format_vector)

        print(cyan + "createDifference() : " + bold + green +
              "...FIN DECOUPAGE" + endC)

        # ETAPE 3 : BUFFERISER LES VECTEURS

        print(cyan + "createDifference() : " + bold + green +
              "MISE EN PLACE DES TAMPONS..." + endC)

        # Parcours du dictionnaire associant la zone aux noms de fichiers et aux tampons associés
        for elem_buff in zone_buffer_dico[zone]:
            # Parcours des départements
            for dpt in departments_list:

                input_shape = repertory_samples_cutting_temp + os.sep + elem_buff[
                    0] + "_" + dpt + SUFFIX_VECTOR_CUT + extension_vector
                output_shape = repertory_samples_buff_temp + os.sep + elem_buff[
                    0] + "_" + dpt + SUFFIX_VECTOR_BUFF + extension_vector
                buff = elem_buff[1]
                if os.path.isfile(input_shape):
                    if debug >= 3:
                        print(cyan + "createDifference() : " + endC +
                              "input_shape : " + str(input_shape) + endC)
                        print(cyan + "createDifference() : " + endC +
                              "output_shape : " + str(output_shape) + endC)
                        print(cyan + "createDifference() : " + endC +
                              "buff : " + str(buff) + endC)
                    bufferVector(input_shape, output_shape, buff, "", 1.0, 10,
                                 format_vector)
                else:
                    print(cyan + "createDifference() : " + bold + yellow +
                          "Pas de fichier du nom : " + endC + input_shape)

        print(cyan + "createDifference() : " + bold + green +
              "FIN DE L AFFECTATION DES TAMPONS" + endC)

        # ETAPE 4 : FUSION DES SHAPES DE LA BD TOPO

        print(cyan + "createDifference() : " + bold + green +
              "FUSION DATA BD..." + endC)

        shape_buff_list = []
        # Parcours du dictionnaire associant la zone au nom du fichier
        for elem_buff in zone_buffer_dico[zone]:
            # Parcours des départements
            for dpt in departments_list:
                shape_file = repertory_samples_buff_temp + os.sep + elem_buff[
                    0] + "_" + dpt + SUFFIX_VECTOR_BUFF + extension_vector

                if os.path.isfile(shape_file):
                    shape_buff_list.append(shape_file)
                    print("file for fusion : " + shape_file)
                else:
                    print(bold + yellow + "pas de fichiers avec ce nom : " +
                          endC + shape_file)

            # si une liste de fichier shape existe
            if not shape_buff_list:
                print(bold + yellow + "Pas de fusion sans donnee a fusionnee" +
                      endC)
            else:
                # Fusion des fichiers shape
                image_zone_shape = repertory_output_temp + os.sep + image_name + '_' + zone + extension_vector
                fusionVectors(shape_buff_list, image_zone_shape)

        print("File BD : " + image_zone_shape)
        print(cyan + "createDifference() : " + bold + green +
              "FIN DE LA FUSION" + endC)

    # ETAPE 5 : RASTERISER LE FICHIER SHAPE DE ZONE BD
    print(cyan + "createDifference() : " + bold + green +
          "RASTERIZATION DE LA FUSION..." + endC)
    image_zone_raster = repertory_output_temp + os.sep + image_name + '_' + zone + extension_raster
    rasterizeVector(image_zone_shape,
                    image_zone_raster,
                    image_ortho_input,
                    fileld_bd_raster,
                    codage=CODAGE)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE LA RASTERIZATION" + endC)

    # ETAPE 6 : CREER UN NOUVEAU MMS ISSU DU MNT + DATA BD_TOPO
    print(cyan + "createDifference() : " + bold + green +
          "CREATION NOUVEAU MNS..." + endC)
    image_new_mns_output = repertory_output_temp + os.sep + image_name + SUFFIX_NEW_MNS + extension_raster
    createMNS(image_ortho_input, image_mnt_input, image_zone_raster,
              image_new_mns_output)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE LA CREATION MNS" + endC)

    # ETAPE 7 : CREER D'UN MASQUE SUR LES ZONES VEGETALES
    print(cyan + "createDifference() : " + bold + green +
          "CREATION DU NDVI..." + endC)
    image_ndvi_output = repertory_output_temp + os.sep + image_name + SUFFIX_NDVI + extension_raster
    createNDVI(image_ortho_input, image_ndvi_output, channel_order)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE LA CREATION DU NDVI" + endC)

    print(cyan + "createDifference() : " + bold + green +
          "CREATION DU MASQUE NDVI..." + endC)
    image_ndvi_mask_output = repertory_output_temp + os.sep + image_name + SUFFIX_NDVI + SUFFIX_MASK + extension_raster
    createBinaryMask(image_ndvi_output, image_ndvi_mask_output, threshold_ndvi,
                     False)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE LA CREATION DU MASQUE NDVI" + endC)

    # ETAPE 8 : CREER UN FICHIER DE DIFFERENCE DES MNS AVEC MASQUAGE DES ZONES VEGETALES
    print(cyan + "createDifference() : " + bold + green +
          "CREATION DIFFERENCE MNS..." + endC)
    #image_diff_mns_output = repertory_output + os.sep + image_name + SUFFIX_DIFF_MNS + extension_raster
    image_diff_mns_output = image_difference_output
    createDifferenceFile(image_mns_input, image_new_mns_output,
                         image_ndvi_mask_output, image_diff_mns_output)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE LA CREATION DE LA DIFFERENCE MNS" + endC)

    print(cyan + "createDifference() : " + bold + green +
          "CREATION DU MASQUE DE DIFFERENCE..." + endC)
    image_diff_mns_mask_output = repertory_output_temp + os.sep + image_name + SUFFIX_DIFF_MNS + SUFFIX_MASK + extension_raster
    createBinaryMask(image_diff_mns_output, image_diff_mns_mask_output,
                     threshold_difference, True)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE LA CREATION DU MASQUE DE DIFFERENCE" + endC)

    print(cyan + "createDifference() : " + bold + green +
          "FILTRAGE DU MASQUE DE DIFFERENCE..." + endC)
    image_diff_mns_filtered_output = repertory_output_temp + os.sep + image_name + SUFFIX_DIFF_MNS + SUFFIX_FILTERED + extension_raster
    filterBinaryRaster(image_diff_mns_mask_output,
                       image_diff_mns_filtered_output, filter_difference_0,
                       filter_difference_1)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DU FILTRAGE DU MASQUE DE DIFFERENCE" + endC)

    # ETAPE 9 : RASTERISER LE FICHIER DE DIFFERENCE DES MNS
    print(cyan + "createDifference() : " + bold + green +
          "VECTORISATION DU RASTER DE DIFFERENCE..." + endC)
    vector_diff_mns_filtered_output = repertory_output_temp + os.sep + image_name + SUFFIX_DIFF_MNS + SUFFIX_FILTERED + extension_vector
    polygonizeRaster(image_diff_mns_filtered_output,
                     vector_diff_mns_filtered_output,
                     image_name,
                     field_name="DN")
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE VECTORISATION DU RASTER DE DIFFERENCE" + endC)

    print(cyan + "createDifference() : " + bold + green +
          "SIMPLIFICATION VECTEUR DE DIFFERENCE..." + endC)
    simplifyVector(vector_diff_mns_filtered_output, vector_difference_output,
                   simplifie_param, format_vector)
    print(cyan + "createDifference() : " + bold + green +
          "FIN DE SIMPLIFICATION DI VECTEUR DE DIFFERENCE" + endC)

    # ETAPE 10 : SUPPRESIONS FICHIERS INTERMEDIAIRES INUTILES
    if not save_results_intermediate:

        # Supression des .geom dans le dossier
        for to_delete in glob.glob(repertory_mask_temp + os.sep + "*.geom"):
            removeFile(to_delete)

        # Suppression des repertoires temporaires
        deleteDir(repertory_mask_temp)
        deleteDir(repertory_samples_cutting_temp)
        deleteDir(repertory_samples_buff_temp)
        deleteDir(repertory_output_temp)

    # Mise à jour du Log
    ending_event = "createDifference() : create macro samples ending : "
    timeLine(path_time_log, ending_event)

    return