def checkPassword(user_name='postgres',
                  password='******',
                  ip_host='localhost',
                  num_port='5432'):

    osSystem = platform.system()
    if "Windows" in osSystem:
        file_pgpass = os.path.expanduser(
            '~') + "\\AppData\\Roaming\\postgresql\\pgpass.conf"
    else:
        file_pgpass = os.path.expanduser('~') + "/.pgpass"

    line_pgpass = str(ip_host) + ":" + str(num_port) + ":*:" + str(
        user_name) + ":" + str(password)

    find = False
    if os.path.isfile(file_pgpass):
        text = readTextFile(file_pgpass)
        for line in text.split('\n'):
            if line_pgpass in line:
                find = True
                break

    if not find:
        appendTextFileCR(file_pgpass, line_pgpass)
        # Le fichier n'aime pas avoir plus de droit d'accès...
        os.chmod(file_pgpass, 0o600)

    return
Ejemplo n.º 2
0
def getFtp(ftp, path_ftp, local_path, all_path, file_error):

    EXT_LIST = ['.tif', '.tiff', '.ecw', '.jp2', '.asc']

    ftp.cwd(path_ftp)
    data_list = []
    ftp.retrlines("LIST", data_list.append)

    for data in data_list:

        data_tmp = data.split(' ')
        filename = data_tmp[len(data_tmp) - 1]
        if data[0] == 'd':
            print(cyan + "getFtp() : " + green + "Get directory : " +
                  filename + endC)
            getFtp(ftp, filename, local_path + os.sep + filename,
                   all_path + os.sep + filename, file_error)
            ftp.cwd("..")
        else:
            print(cyan + "getFtp() : " + green + "Download file : " +
                  filename + endC)
            try:
                local_filename = local_path + os.sep + filename
                filename_error = all_path + os.sep + filename
                if not os.path.isdir(local_path):
                    os.makedirs(local_path)
                ftp.retrbinary("RETR " + filename,
                               open(local_filename, 'wb').write)
            except:
                print(cyan + "getFtp() : " + bold + red +
                      "Error during download " + filename + " from FTP" + endC,
                      file=sys.stderr)
                appendTextFileCR(file_error, filename_error)
                if os.path.isfile(local_filename):
                    removeFile(local_filename)

            extent_name = os.path.splitext(
                os.path.basename(local_filename))[1].lower()

            if extent_name in EXT_LIST:
                test_image = imageControl(local_filename)
                if not test_image:
                    appendTextFileCR(file_error, filename_error)
                    if os.path.isfile(local_filename):
                        removeFile(local_filename)
    return
Ejemplo n.º 3
0
def soilOccupationChange(input_plot_vector, output_plot_vector, footprint_vector, input_tx_files_list, evolutions_list=['0:1:11000:10:50:and', '0:1:12000:10:50:and', '0:1:21000:10:50:and', '0:1:22000:10:50:and', '0:1:23000:10:50:and'], class_label_dico={11000:'Bati', 12000:'Route', 21000:'SolNu', 22000:'Eau', 23000:'Vegetation'}, epsg=2154, no_data_value=0, format_raster='GTiff', format_vector='ESRI Shapefile', extension_raster='.tif', extension_vector='.shp', postgis_ip_host='localhost', postgis_num_port=5432, postgis_user_name='postgres', postgis_password='******', postgis_database_name='database', postgis_schema_name='public', postgis_encoding='latin1', path_time_log='', save_results_intermediate=False, overwrite=True):

    if debug >= 3:
        print('\n' + bold + green + "Evolution de l'OCS par parcelle - Variables dans la fonction :" + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "input_plot_vector : " + str(input_plot_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "output_plot_vector : " + str(output_plot_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "footprint_vector : " + str(footprint_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "input_tx_files_list : " + str(input_tx_files_list) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "evolutions_list : " + str(evolutions_list) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "class_label_dico : " + str(class_label_dico) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "format_raster : " + str(format_raster) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_ip_host : " + str(postgis_ip_host) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_num_port : " + str(postgis_num_port) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_user_name : " + str(postgis_user_name) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_password : "******"    soilOccupationChange() : " + endC + "postgis_database_name : " + str(postgis_database_name) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_schema_name : " + str(postgis_schema_name) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "postgis_encoding : " + str(postgis_encoding) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "    soilOccupationChange() : " + endC + "overwrite : " + str(overwrite) + endC + '\n')

    # Définition des constantes
    EXTENSION_TEXT = '.txt'
    SUFFIX_TEMP = '_temp'
    SUFFIX_CUT = '_cut'
    AREA_FIELD = 'st_area'
    GEOM_FIELD = 'geom'

    # Mise à jour du log
    starting_event = "soilOccupationChange() : Début du traitement : "
    timeLine(path_time_log, starting_event)

    print(cyan + "soilOccupationChange() : " + bold + green + "DEBUT DES TRAITEMENTS" + endC + '\n')

    # Définition des variables 'basename'
    output_plot_basename = os.path.splitext(os.path.basename(output_plot_vector))[0]

    # Définition des variables temp
    temp_directory = os.path.dirname(output_plot_vector) + os.sep + output_plot_basename + SUFFIX_TEMP
    plot_vector_cut = temp_directory + os.sep + output_plot_basename + SUFFIX_CUT + extension_vector

    # Définition des variables PostGIS
    plot_table = output_plot_basename.lower()

    # Fichier .txt associé au fichier vecteur de sortie, sur la liste des évolutions quantifiées
    output_evolution_text_file = os.path.splitext(output_plot_vector)[0] + EXTENSION_TEXT

    # Nettoyage des traitements précédents
    if debug >= 3:
        print(cyan + "soilOccupationChange() : " + endC + "Nettoyage des traitements précédents." + endC + '\n')
    removeVectorFile(output_plot_vector, format_vector=format_vector)
    removeFile(output_evolution_text_file)
    cleanTempData(temp_directory)
    dropDatabase(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    #############
    # Etape 0/2 # Préparation des traitements
    #############

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 0/2 - Début de la préparation des traitements." + endC + '\n')

    # Découpage du parcellaire à la zone d'étude
    cutVector(footprint_vector, input_plot_vector, plot_vector_cut, overwrite=overwrite, format_vector=format_vector)

    # Récupération du nom des champs dans le fichier source (pour isoler les champs nouvellement créés par la suite, et les renommer)
    attr_names_list_origin = getAttributeNameList(plot_vector_cut, format_vector=format_vector)
    new_attr_names_list_origin = attr_names_list_origin

    # Préparation de PostGIS
    createDatabase(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 0/2 - Fin de la préparation des traitements." + endC + '\n')

    #############
    # Etape 1/2 # Calculs des statistiques à tx
    #############

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 1/2 - Début des calculs des statistiques à tx." + endC + '\n')

    len_tx = len(input_tx_files_list)
    tx = 0

    # Boucle sur les fichiers d'entrés à t0+x
    for input_tx_file in input_tx_files_list:
        if debug >= 3:
            print(cyan + "soilOccupationChange() : " + endC + bold + "Calcul des statistiques à tx %s/%s." % (tx+1, len_tx) + endC + '\n')

        # Statistiques OCS par parcelle
        statisticsVectorRaster(input_tx_file, plot_vector_cut, "", 1, True, False, False, [], [], class_label_dico, path_time_log, clean_small_polygons=True, format_vector=format_vector, save_results_intermediate=save_results_intermediate, overwrite=overwrite)

        # Récupération du nom des champs dans le fichier parcellaire (avec les champs créés précédemment dans CVR)
        attr_names_list_tx = getAttributeNameList(plot_vector_cut, format_vector=format_vector)

        # Isolement des nouveaux champs issus du CVR
        fields_name_list  = []
        for attr_name in attr_names_list_tx:
            if attr_name not in new_attr_names_list_origin:
                fields_name_list.append(attr_name)

        # Gestion des nouveaux noms des champs issus du CVR
        new_fields_name_list  = []
        for field_name in fields_name_list:
            new_field_name = 't%s_' % tx + field_name
            new_field_name = new_field_name[:10]
            new_fields_name_list.append(new_field_name)
            new_attr_names_list_origin.append(new_field_name)

        # Renommage des champs issus du CVR, pour le relancer par la suite sur d'autres dates
        renameFieldsVector(plot_vector_cut, fields_name_list, new_fields_name_list, format_vector=format_vector)

        tx += 1

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 1/2 - Fin des calculs des statistiques à tx." + endC + '\n')

    #############
    # Etape 2/2 # Caractérisation des changements
    #############

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 2/2 - Début de la caractérisation des changements." + endC + '\n')

    # Pré-traitements dans PostGIS
    plot_table = importVectorByOgr2ogr(postgis_database_name, plot_vector_cut, plot_table, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name, epsg=epsg, codage=postgis_encoding)
    connection = openConnection(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    # Requête SQL pour le calcul de la surface des parcelles
    sql_query = "ALTER TABLE %s ADD COLUMN %s REAL;\n" % (plot_table, AREA_FIELD)
    sql_query += "UPDATE %s SET %s = ST_Area(%s);\n" % (plot_table, AREA_FIELD, GEOM_FIELD)

    # Boucle sur les évolutions à quantifier
    temp_field = 1
    for evolution in evolutions_list:
        evolution_split = evolution.split(':')
        idx_bef = int(evolution_split[0])
        idx_aft = int(evolution_split[1])
        label = int(evolution_split[2])
        evol = abs(int(evolution_split[3]))
        evol_s = abs(int(evolution_split[4]))
        combi = evolution_split[5]
        class_name = class_label_dico[label]
        def_evo_field = "def_evo_%s" % str(temp_field)
        if debug >= 3:
            print(cyan + "soilOccupationChange() : " + endC + bold + "Caractérisation des changements t%s/t%s pour la classe '%s' (%s)." % (idx_bef, idx_aft, class_name, label) + endC + '\n')

        if evol != 0 or evol_s != 0:

            # Gestion de l'évolution via le taux
            evol_str = str(evol) + ' %'
            evo_field = "evo_%s" % str(temp_field)
            t0_field = 't%s_' % idx_bef + class_name.lower()[:7]
            t1_field = 't%s_' % idx_aft + class_name.lower()[:7]

            # Gestion de l'évolution via la surface
            evol_s_str = str(evol_s) + ' m²'
            evo_s_field = "evo_s_%s" % str(temp_field)
            t0_s_field = 't%s_s_' % idx_bef + class_name.lower()[:5]
            t1_s_field = 't%s_s_' % idx_aft + class_name.lower()[:5]

            # Requête SQL pour le calcul brut de l'évolution
            sql_query += "ALTER TABLE %s ADD COLUMN %s REAL;\n" % (plot_table, evo_field)
            sql_query += "UPDATE %s SET %s = %s - %s;\n" % (plot_table, evo_field, t1_field, t0_field)
            sql_query += "ALTER TABLE %s ADD COLUMN %s REAL;\n" % (plot_table, evo_s_field)
            sql_query += "UPDATE %s SET %s = %s - %s;\n" % (plot_table, evo_s_field, t1_s_field, t0_s_field)
            sql_query += "ALTER TABLE %s ADD COLUMN %s VARCHAR;\n" % (plot_table, def_evo_field)
            sql_query += "UPDATE %s SET %s = 't%s a t%s - %s - aucune evolution';\n" % (plot_table, def_evo_field, idx_bef, idx_aft, class_name)

            # Si évolution à la fois via taux et via surface
            if evol != 0 and evol_s != 0:
                text_evol = "taux à %s" % evol_str
                if combi == 'and':
                    text_evol += " ET "
                elif combi == 'or':
                    text_evol += " OU "
                text_evol += "surface à %s" % evol_s_str
                sql_where_pos = "%s >= %s %s %s >= %s" % (evo_field, evol, combi, evo_s_field, evol_s)
                sql_where_neg = "%s <= -%s %s %s <= -%s" % (evo_field, evol, combi, evo_s_field, evol_s)

            # Si évolution uniquement via taux
            elif evol != 0:
                text_evol = "taux à %s" % evol_str
                sql_where_pos = "%s >= %s" % (evo_field, evol)
                sql_where_neg = "%s <= -%s" % (evo_field, evol)

            # Si évolution uniquement via surface
            elif evol_s != 0:
                text_evol = "surface à %s" % evol_s_str
                sql_where_pos = "%s >= %s" % (evo_s_field, evol_s)
                sql_where_neg = "%s <= -%s" % (evo_s_field, evol_s)

            sql_query += "UPDATE %s SET %s = 't%s a t%s - %s - evolution positive' WHERE %s;\n" % (plot_table, def_evo_field, idx_bef, idx_aft, class_name, sql_where_pos)
            sql_query += "UPDATE %s SET %s = 't%s a t%s - %s - evolution negative' WHERE %s;\n" % (plot_table, def_evo_field, idx_bef, idx_aft, class_name, sql_where_neg)

            # Ajout des paramètres de l'évolution quantifiée (temporalités, classe, taux/surface) au fichier texte de sortie
            text = "%s --> évolution entre t%s et t%s, pour la classe '%s' (label %s) :\n" % (def_evo_field, idx_bef, idx_aft, class_name, label)
            text += "    %s --> taux d'évolution brut" % evo_field + " (%)\n"
            text += "    %s --> surface d'évolution brute" % evo_s_field + " (m²)\n"
            text += "Evolution quantifiée : %s\n" % text_evol
            appendTextFileCR(output_evolution_text_file, text)
            temp_field += 1

    # Traitements SQL de l'évolution des classes OCS
    executeQuery(connection, sql_query)
    closeConnection(connection)
    exportVectorByOgr2ogr(postgis_database_name, output_plot_vector, plot_table, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name, format_type=format_vector)

    print(cyan + "soilOccupationChange() : " + bold + green + "ETAPE 2/2 - Fin de la caractérisation des changements." + endC + '\n')

    # Suppression des fichiers temporaires
    if not save_results_intermediate:
        if debug >= 3:
            print(cyan + "soilOccupationChange() : " + endC + "Suppression des fichiers temporaires." + endC + '\n')
        deleteDir(temp_directory)
        dropDatabase(postgis_database_name, user_name=postgis_user_name, password=postgis_password, ip_host=postgis_ip_host, num_port=postgis_num_port, schema_name=postgis_schema_name)

    print(cyan + "soilOccupationChange() : " + bold + green + "FIN DES TRAITEMENTS" + endC + '\n')

    # Mise à jour du log
    ending_event = "soilOccupationChange() : Fin du traitement : "
    timeLine(path_time_log, ending_event)

    return
def applyKmeansMasks(image_input, mask_samples_macro_input_list, image_samples_merged_output, proposal_table_output, micro_samples_images_output_list, centroids_files_output_list, macroclass_sampling_list, macroclass_labels_list, no_data_value, path_time_log, kmeans_param_maximum_iterations=200, kmeans_param_training_set_size_weight=1, kmeans_param_minimum_training_set_size=-1, rate_clean_micro_class=0.0, rand_otb=0, ram_otb=0, number_of_actives_pixels_threshold=200, extension_raster=".tif", save_results_intermediate=False, overwrite=True):

    # Mise à jour du Log
    starting_event = "applyKmeansMasks() : Kmeans and mask starting : "
    timeLine(path_time_log,starting_event)

    print(endC)
    print(cyan + "applyKmeansMasks() : " + bold + green + "## START : SUBSAMPLING OF " + str(macroclass_labels_list) + endC)
    print(endC)

    if debug >= 2:
        print(cyan + "applyKmeansMasks() : variables dans la fonction" + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "image_input : " + str(image_input) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "image_samples_merged_output : " + str(image_samples_merged_output) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "proposal_table_output : " + str(proposal_table_output) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "mask_samples_macro_input_list : " + str(mask_samples_macro_input_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "micro_samples_images_output_list : " + str(micro_samples_images_output_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "centroids_files_output_list : " + str(centroids_files_output_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "macroclass_sampling_list : " + str(macroclass_sampling_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "macroclass_labels_list : " + str(macroclass_labels_list) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_maximum_iterations : " + str(kmeans_param_maximum_iterations) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_training_set_size_weight : " + str(kmeans_param_training_set_size_weight) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "kmeans_param_minimum_training_set_size : " + str(kmeans_param_minimum_training_set_size) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "rate_clean_micro_class : " + str(rate_clean_micro_class))
        print(cyan + "applyKmeansMasks() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "rand_otb : " + str(rand_otb) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "number_of_actives_pixels_threshold : " + str(number_of_actives_pixels_threshold))
        print(cyan + "applyKmeansMasks() : " + endC + "extension_raster : " + str(extension_raster) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "applyKmeansMasks() : " + endC + "overwrite : " + str(overwrite) + endC)

    # constantes
    HEADER_TABLEAU_MODIF = "MICROCLASSE;TRAITEMENT\n"

    CODAGE_16B = "uint16"
    CODAGE_8B = "uint8"
    EXT_XML = ".xml"

    SUFFIX_MASK_CLEAN = "_clean"
    SUFFIX_SAMPLE_MICRO = "_sample_micro"
    SUFFIX_STATISTICS = "_statistics"
    SUFFIX_CENTROID = "_centroid"
    SUFFIX_MASK_TEMP = "_tmp"

   # Creation des fichiers temporaires de sortie si ils ne sont pas spécifier
   #-------------------------------------------------------------------------

    length_mask = len(mask_samples_macro_input_list)
    images_mask_cleaned_list = []
    temporary_files_list = []
    micro_samples_images_list = []
    centroids_files_list = []
    repertory_output_tmp_list = []

    if image_samples_merged_output != "" :
        repertory_base_output = os.path.dirname(image_samples_merged_output)
        filename = os.path.splitext(os.path.basename(image_samples_merged_output))[0]
    else :
        repertory_base_output = os.path.dirname(micro_samples_images_output_list[0])
        filename = os.path.splitext(os.path.basename(micro_samples_images_output_list[0]))[0]

    file_statistic_points = repertory_base_output + os.sep + filename + SUFFIX_STATISTICS + EXT_XML

    for macroclass_id in range(length_mask):

        repertory_output = repertory_base_output + os.sep + str(macroclass_labels_list[macroclass_id])
        if not os.path.isdir(repertory_output):
            os.makedirs(repertory_output)
        repertory_output_tmp_list.append(repertory_output)
        samples_image_input = mask_samples_macro_input_list[macroclass_id]
        filename = os.path.splitext(os.path.basename(samples_image_input))[0]
        image_mask_cleaned =  repertory_output + os.sep + filename + SUFFIX_MASK_CLEAN + extension_raster
        images_mask_cleaned_list.append(image_mask_cleaned)
        image_tmp =  repertory_output + os.sep + filename + SUFFIX_MASK_TEMP + extension_raster
        temporary_files_list.append(image_tmp)
        if micro_samples_images_output_list == [] :
            micro_samples_image = repertory_output + os.sep + filename + SUFFIX_SAMPLE_MICRO + extension_raster
        else :
            micro_samples_image = micro_samples_images_output_list[macroclass_id]
        micro_samples_images_list.append(micro_samples_image)
        if centroids_files_output_list == [] :
            centroids_file = repertory_output + os.sep + filename + SUFFIX_CENTROID + extension_raster
        else :
            centroids_file = centroids_files_output_list[macroclass_id]
        centroids_files_list.append(centroids_file)

    # Nettoyage des pixels superposés sur plusieurs images
    #-----------------------------------------------------

    if length_mask > 1:
        image_name = os.path.splitext(os.path.basename(image_input))[0]
        deletePixelsSuperpositionMasks(mask_samples_macro_input_list, images_mask_cleaned_list, image_name, CODAGE_8B)
    else:
        images_mask_cleaned_list = mask_samples_macro_input_list

    # Execution du kmeans pour chaque macroclasse
    #--------------------------------------------

    # Initialisation de la liste pour le multi-threading
    thread_list = []

    for macroclass_id in range(length_mask):

        mask_sample_input = images_mask_cleaned_list[macroclass_id]
        micro_samples_image = micro_samples_images_list[macroclass_id]
        image_tmp = temporary_files_list[macroclass_id]
        centroids_file = centroids_files_list[macroclass_id]
        check = os.path.isfile(micro_samples_image)

        if check and not overwrite : # Si un fichier de sortie avec le même nom existe déjà, et si l'option ecrasement est à false, alors passe à la classification suivante
            print(cyan + "applyKmeansMasks() : " + bold + yellow +  "Computing kmeans from %s with %s already done : no actualisation" % (image_input, mask_sample_input) + endC)

        else:            # Si non, on applique un kmeans

            if check :
                removeFile(micro_samples_image)   # Suppression de l'éventuel fichier existant

            print(cyan + "applyKmeansMasks() : " + bold + green + "Computing kmeans from %s with %s ; output image is %s" %(image_input, mask_sample_input,micro_samples_image) + endC)

            # Obtention du nombre de microclasses
            number_of_classes = macroclass_sampling_list[macroclass_id]   # Nombre de microclasses
            label = macroclass_labels_list[macroclass_id]                 # Label de la macroclasse Ex : 11000

            # Gestion du multi threading pour l'appel du calcul du kmeans
            thread = threading.Thread(target=computeKmeans, args=(image_input, mask_sample_input, image_tmp, micro_samples_image, centroids_file, label, number_of_classes, macroclass_id, number_of_actives_pixels_threshold, kmeans_param_minimum_training_set_size, kmeans_param_maximum_iterations, length_mask, no_data_value, rand_otb, int(ram_otb/length_mask), CODAGE_8B, CODAGE_16B, save_results_intermediate, overwrite))
            thread.start()
            thread_list.append(thread)

    # Start Kmeans all macro classes
    try:
        for thread in thread_list:
            thread.join()
    except:
        print(cyan + "applyKmeansMasks() : " + bold + red + "applyKmeansMasks() : " + endC + "Erreur lors du calcul du kmeans : impossible de demarrer le thread" + endC, file=sys.stderr)

    # Fusion des echantillons micro
    #------------------------------
    if image_samples_merged_output != "" :

        mergeListRaster(micro_samples_images_list, image_samples_merged_output, CODAGE_16B)
        updateReferenceProjection(image_input, image_samples_merged_output)

        # Creation de la table de proposition et le fichier statistique
        #--------------------------------------------------------------
        if proposal_table_output != "" :

            suppress_micro_class_list = []
            info_micoclass_nbpoints_dico = {}
            nb_points_total = 0
            nb_points_medium = 0

            # Liste des identifants des micro classes disponibles
            id_micro_list = identifyPixelValues(image_samples_merged_output)
            if 0 in id_micro_list :
                id_micro_list.remove(0)
            nb_micr_class = len(id_micro_list)

            # Pour toutes les micro classes
            for id_micro in id_micro_list :
                nb_pixels = countPixelsOfValue(image_samples_merged_output, id_micro)

                info_micoclass_nbpoints_dico[id_micro] = nb_pixels
                nb_points_total += nb_pixels

            # Valeur moyenne de nombre de points
            if nb_micr_class != 0 :
                nb_points_medium = int(nb_points_total / nb_micr_class)
            nb_points_min = int((nb_points_medium * rate_clean_micro_class) / 100)

            # Identifier les micro classes trop petites
            if debug >= 4:
                print("rate_clean_micro_class = " + str(rate_clean_micro_class))
                print("nb_points_medium = " + str(nb_points_medium))
                print("nb_points_min = " + str(nb_points_min))

            # Preparation du fichier statistique
            writeTextFile(file_statistic_points, '<?xml version="1.0" ?>\n')
            appendTextFileCR(file_statistic_points, '<GeneralStatistics>')
            appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassRaw">')

            for micro_class_id in info_micoclass_nbpoints_dico :
                nb_points = info_micoclass_nbpoints_dico[micro_class_id]
                if debug >= 4:
                    print("micro_class_id = " + str(micro_class_id) + ", nb_points = " + str(nb_points))
                appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(micro_class_id, nb_points))

                if nb_points < nb_points_min :
                    # Micro_class à proposer en effacement
                    suppress_micro_class_list.append(micro_class_id)

            # Fin du fichier statistique
            appendTextFileCR(file_statistic_points, '    </Statistic>')
            appendTextFileCR(file_statistic_points, '</GeneralStatistics>')

            # Test si ecrassement de la table précédemment créée
            check = os.path.isfile(proposal_table_output)
            if check and not overwrite :
                print(cyan + "applyKmeansMasks() : " + bold + yellow + "Modifier table already exists." + '\n' + endC)
            else:
                # Tenter de supprimer le fichier
                try:
                    removeFile(proposal_table_output)
                except Exception:
                    pass   # Ignore l'exception levee si le fichier n'existe pas (et ne peut donc pas être supprime)
                # lister les micro classes à supprimer
                text_output = HEADER_TABLEAU_MODIF

                for micro_class_del in suppress_micro_class_list:
                    text_output += "%d;-1\n" %(micro_class_del)

                # Ecriture du fichier proposition de réaffectation
                writeTextFile(proposal_table_output, text_output)

    # Suppresions fichiers intermediaires inutiles
    #---------------------------------------------

    if not save_results_intermediate:
        for macroclass_id in range(length_mask):
            if (os.path.isfile(temporary_files_list[macroclass_id])) :
                removeFile(temporary_files_list[macroclass_id])

            if (length_mask > 1) and (os.path.isfile(images_mask_cleaned_list[macroclass_id])) :
                removeFile(images_mask_cleaned_list[macroclass_id])

            if (micro_samples_images_output_list == []) and (os.path.isfile(micro_samples_images_list[macroclass_id])) :
                removeFile(micro_samples_images_list[macroclass_id])

            if (centroids_files_output_list == []) and (os.path.isfile(centroids_files_list[macroclass_id])) :
                removeFile(centroids_files_list[macroclass_id])

            if os.path.isdir(repertory_output_tmp_list[macroclass_id]) :
                removeDir(repertory_output_tmp_list[macroclass_id])

    print(cyan + "applyKmeansMasks() : " + bold + green + "## END : KMEANS CLASSIFICATION" + endC)
    print(endC)

    # Mise à jour du Log
    ending_event = "applyKmeansMasks() : Kmeans and mask ending : "
    timeLine(path_time_log,ending_event)

    return
Ejemplo n.º 5
0
def executeCommand(ip_serveur,
                   port,
                   id_command,
                   command_to_execute,
                   type_execution,
                   error_management,
                   base_name_shell_command,
                   ip_remote="",
                   login="",
                   password=""):

    EXT_SHELL = '.sh'
    EXT_ERR = '.err'
    EXT_LOG = '.log'
    new_state = ''

    # Preparation du fichier d'execution en background-local ou background-remote
    if type_execution == TAG_ACTION_TO_MAKE_BG or type_execution == TAG_ACTION_TO_MAKE_RE:

        # Pour les executions a faire en background ou en remote preparation des fichiers .sh et .err
        shell_command = base_name_shell_command + str(id_command) + EXT_SHELL
        error_file = base_name_shell_command + str(id_command) + EXT_ERR
        log_file = base_name_shell_command + str(id_command) + EXT_LOG

        # Creation du fichier shell
        error_management_option = ""
        if not error_management:
            error_management_option = " -nem "
        command_to_execute = command_to_execute.replace('\n', '')
        if six.PY2:
            cmd_tmp = command_to_execute + " 1> " + log_file.encode(
                "utf-8") + " 2> " + error_file.encode("utf-8") + "\n"
        else:
            cmd_tmp = command_to_execute + " 1> " + log_file + " 2> " + error_file + "\n"
        writeTextFile(shell_command, cmd_tmp)
        appendTextFileCR(
            shell_command, FUNCTION_PYTHON + "ReplyEndCommand -ip_serveur " +
            str(ip_serveur) + " -port " + str(port) + " -id_command " +
            str(id_command) + error_management_option + " -err " + error_file)
        appendTextFileCR(shell_command, "rm " + shell_command)
        os.chmod(shell_command, stat.S_IRWXU)

    # Selon le type d'execution
    while switch(type_execution):

        if case(TAG_ACTION_TO_MAKE_NOW):

            # Execution en direct (local)
            exitCode = subprocess.call(command_to_execute, shell=True)
            new_state = TAG_STATE_END
            if exitCode != 0:  # Si la commande command_to_execute a eu un probleme
                new_state = TAG_STATE_ERROR
                print(cyan + "executeCommand : " + endC + bold + red +
                      "ERREUR EXECUTION DE LA COMMANDE : " +
                      str(command_to_execute) + endC,
                      file=sys.stderr)
            break

        if case(TAG_ACTION_TO_MAKE_BG):

            # Execution en back ground (local)
            process = subprocess.Popen(shell_command,
                                       shell=True,
                                       stderr=subprocess.STDOUT)
            time.sleep(0.1)
            if process == None:
                new_state = TAG_STATE_ERROR
                print(cyan + "executeCommand : " + endC + bold + red +
                      "ERREUR EXECUTION DE LA COMMANDE EN BACKGROUND : " +
                      str(command_to_execute) + endC,
                      file=sys.stderr)
            else:
                print(cyan + "executeCommand : " + endC +
                      " background pid = " + str(process.pid))
            break

        if case(TAG_ACTION_TO_MAKE_RE):

            # Test si la machine Remote est accesible
            if ping(ip_remote):

                # Execution en remote execution
                try:
                    s = pxssh.pxssh()
                    s.login(ip_remote, login, password)
                    time.sleep(0.5)
                    s.sendline(shell_command + '&')
                    time.sleep(0.01)
                    s.logout()
                except pxssh.ExceptionPxssh as e:
                    new_state = TAG_STATE_ERROR
                    print(
                        cyan + "executeCommand : " + endC + bold + red +
                        "ERREUR EXECUTION DE LA COMMANDE EN REMOTE (login failed) : "
                        + str(command_to_execute) + endC,
                        file=sys.stderr)
                    print(e, file=sys.stderr)

            else:
                new_state = TAG_STATE_ERROR
                print(
                    cyan + "executeCommand : " + endC + bold + red +
                    "ERREUR EXECUTION DE LA COMMANDE EN REMOTE (Computeur : " +
                    ip_remote + " non disponible) : " +
                    str(command_to_execute) + endC,
                    file=sys.stderr)
            break

        break  # Sortie du while

    return new_state
Ejemplo n.º 6
0
def selectSamples(image_input_list, sample_image_input, vector_output, table_statistics_output, sampler_strategy, select_ratio_floor, ratio_per_class_dico, name_column, no_data_value, path_time_log, rand_seed=0, ram_otb=0, epsg=2154, format_vector='ESRI Shapefile', extension_vector=".shp", save_results_intermediate=False, overwrite=True) :

    # Mise à jour du Log
    starting_event = "selectSamples() : Select points in raster mask macro input starting : "
    timeLine(path_time_log, starting_event)

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "image_input_list : " + str(image_input_list) + endC)
        print(cyan + "selectSamples() : " + endC + "sample_image_input : " + str(sample_image_input) + endC)
        print(cyan + "selectSamples() : " + endC + "vector_output : " + str(vector_output) + endC)
        print(cyan + "selectSamples() : " + endC + "table_statistics_output : " + str(table_statistics_output) + endC)
        print(cyan + "selectSamples() : " + endC + "sampler_strategy : " + str(sampler_strategy) + endC)
        print(cyan + "selectSamples() : " + endC + "select_ratio_floor : " + str(select_ratio_floor) + endC)
        print(cyan + "selectSamples() : " + endC + "ratio_per_class_dico : " + str(ratio_per_class_dico) + endC)
        print(cyan + "selectSamples() : " + endC + "name_column : " + str(name_column) + endC)
        print(cyan + "selectSamples() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "selectSamples() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "selectSamples() : " + endC + "rand_seed : " + str(rand_seed) + endC)
        print(cyan + "selectSamples() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "selectSamples() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "selectSamples() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "selectSamples() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Constantes
    EXT_XML = ".xml"

    SUFFIX_SAMPLE = "_sample"
    SUFFIX_STATISTICS = "_statistics"
    SUFFIX_POINTS = "_points"
    SUFFIX_VALUE = "_value"

    BAND_NAME = "band_"
    COLUMN_CLASS = "class"
    COLUMN_ORIGINFID = "originfid"

    NB_POINTS = "nb_points"
    AVERAGE = "average"
    STANDARD_DEVIATION = "st_dev"

    print(cyan + "selectSamples() : " + bold + green + "DEBUT DE LA SELECTION DE POINTS" + endC)

    # Definition variables et chemins
    repertory_output = os.path.dirname(vector_output)
    filename = os.path.splitext(os.path.basename(vector_output))[0]
    sample_points_output = repertory_output + os.sep + filename +  SUFFIX_SAMPLE + extension_vector
    file_statistic_points = repertory_output + os.sep + filename + SUFFIX_STATISTICS + SUFFIX_POINTS + EXT_XML

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "file_statistic_points : " + str(file_statistic_points) + endC)

    # 0. EXISTENCE DU FICHIER DE SORTIE
    #----------------------------------

    # Si le fichier vecteur points de sortie existe deja et que overwrite n'est pas activé
    check = os.path.isfile(vector_output)
    if check and not overwrite:
        print(bold + yellow + "Samples points already done for file %s and will not be calculated again." %(vector_output) + endC)
    else:   # Si non ou si la vérification est désactivée : creation du fichier d'échantillons points

        # Suppression de l'éventuel fichier existant
        if check:
            try:
                removeVectorFile(vector_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite
        if os.path.isfile(table_statistics_output) :
            try:
                removeFile(table_statistics_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite


        # 1. STATISTIQUE SUR L'IMAGE DES ECHANTILLONS RASTEUR
        #----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start statistique sur l'image des echantillons rasteur..." + endC)

        id_micro_list = identifyPixelValues(sample_image_input)

        if 0 in id_micro_list :
            id_micro_list.remove(0)

        min_micro_class_nb_points = -1
        min_micro_class_label = 0
        infoStructPointSource_dico = {}

        writeTextFile(file_statistic_points, '<?xml version="1.0" ?>\n')
        appendTextFileCR(file_statistic_points, '<GeneralStatistics>')
        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassRaw">')

        if debug >= 2:
            print("Nombre de points par micro classe :" + endC)

        for id_micro in id_micro_list :
            nb_pixels = countPixelsOfValue(sample_image_input, id_micro)

            if debug >= 2:
                print("MicroClass : " + str(id_micro) + ", nb_points = " + str(nb_pixels))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_pixels))

            if min_micro_class_nb_points == -1 or min_micro_class_nb_points > nb_pixels :
                min_micro_class_nb_points = nb_pixels
                min_micro_class_label = id_micro

            infoStructPointSource_dico[id_micro] = StructInfoMicoClass()
            infoStructPointSource_dico[id_micro].label_class = id_micro
            infoStructPointSource_dico[id_micro].nb_points = nb_pixels
            infoStructPointSource_dico[id_micro].info_points_list = []
            del nb_pixels

        if debug >= 2:
            print("MicroClass min points find : " + str(min_micro_class_label) + ", nb_points = " + str(min_micro_class_nb_points))

        appendTextFileCR(file_statistic_points, '    </Statistic>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End statistique sur l'image des echantillons rasteur. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 2. CHARGEMENT DE L'IMAGE DES ECHANTILLONS
        #------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start chargement de l'image des echantillons..." + endC)

        # Information image
        cols, rows, bands = getGeometryImage(sample_image_input)
        xmin, xmax, ymin, ymax = getEmpriseImage(sample_image_input)
        pixel_width, pixel_height = getPixelWidthXYImage(sample_image_input)
        projection_input = getProjectionImage(sample_image_input)
        if projection_input == None or projection_input == 0 :
            projection_input = epsg
        else :
            projection_input = int(projection_input)

        pixel_width = abs(pixel_width)
        pixel_height = abs(pixel_height)

        # Lecture des données
        raw_data = getRawDataImage(sample_image_input)

        if debug >= 3:
            print("projection = " + str(projection_input))
            print("cols = " + str(cols))
            print("rows = " + str(rows))

        # Creation d'une structure dico contenent tous les points différents de zéro
        progress = 0
        pass_prog = False
        for y_row in range(rows) :
            for x_col in range(cols) :
                value_class = raw_data[y_row][x_col]
                if value_class != 0 :
                    infoStructPointSource_dico[value_class].info_points_list.append(x_col + (y_row * cols))

            # Barre de progression
            if debug >= 4:
                if  ((float(y_row) / rows) * 100.0 > progress) and not pass_prog :
                    progress += 1
                    pass_prog = True
                    print("Progression => " + str(progress) + "%")
                if ((float(y_row) / rows) * 100.0  > progress + 1) :
                    pass_prog = False

        del raw_data

        pending_event = cyan + "selectSamples() : " + bold + green + "End chargement de l'image des echantillons. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 3. SELECTION DES POINTS D'ECHANTILLON
        #--------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start selection des points d'echantillon..." + endC)

        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassSelect">')

        # Rendre deterministe la fonction aléatoire de random.sample
        if rand_seed > 0:
            random.seed( rand_seed )

        # Pour toute les micro classes
        for id_micro in id_micro_list :

            # Selon la stategie de selection
            nb_points_ratio = 0
            while switch(sampler_strategy.lower()):
                if case('all'):
                    # Le mode de selection 'all' est choisi
                    nb_points_ratio = infoStructPointSource_dico[id_micro].nb_points
                    infoStructPointSource_dico[id_micro].sample_points_list = range(nb_points_ratio)

                    break
                if case('percent'):
                    # Le mode de selection 'percent' est choisi
                    id_macro_class = int(math.floor(id_micro / 100) * 100)
                    select_ratio_class = ratio_per_class_dico[id_macro_class]
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_class / 100)
                    infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)
                    break
                if case('mixte'):
                    # Le mode de selection 'mixte' est choisi
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_floor / 100)
                    if id_micro == min_micro_class_label :
                        # La plus petite micro classe est concervée intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = range(infoStructPointSource_dico[id_micro].nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    elif nb_points_ratio <= min_micro_class_nb_points :
                        # Les micro classes dont le ratio de selection est inferieur au nombre de points de la plus petite classe sont égement conservées intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), min_micro_class_nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    else :
                        # Pour toutes les autres micro classes tirage aleatoire d'un nombre de points correspondant au ratio
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)

                    break
                break


            if debug >= 2:
                print("MicroClass = " + str(id_micro) + ", nb_points_ratio " + str(nb_points_ratio))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_points_ratio))

        appendTextFileCR(file_statistic_points, '    </Statistic>')
        appendTextFileCR(file_statistic_points, '</GeneralStatistics>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End selection des points d'echantillon. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 4. PREPARATION DES POINTS D'ECHANTILLON
        #----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start preparation des points d'echantillon..." + endC)

        # Création du dico de points
        points_random_value_dico = {}
        index_dico_point = 0
        for micro_class in infoStructPointSource_dico :
            micro_class_struct = infoStructPointSource_dico[micro_class]
            label_class = micro_class_struct.label_class
            point_attr_dico = {name_column:int(label_class), COLUMN_CLASS:int(label_class), COLUMN_ORIGINFID:0}

            for id_point in micro_class_struct.sample_points_list:

                # Recuperer les valeurs des coordonnees des points
                coor_x = float(xmin + (int(micro_class_struct.info_points_list[id_point] % cols) * pixel_width)) + (pixel_width / 2.0)
                coor_y = float(ymax - (int(micro_class_struct.info_points_list[id_point] / cols) * pixel_height)) - (pixel_height / 2.0)
                points_random_value_dico[index_dico_point] = [[coor_x, coor_y], point_attr_dico]
                del coor_x
                del coor_y
                index_dico_point += 1
            del point_attr_dico
        del infoStructPointSource_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End preparation des points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 5. CREATION DU FICHIER SHAPE DE POINTS D'ECHANTILLON
        #-----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start creation du fichier shape de points d'echantillon..." + endC)

        # Définir les attibuts du fichier résultat
        attribute_dico = {name_column:ogr.OFTInteger, COLUMN_CLASS:ogr.OFTInteger, COLUMN_ORIGINFID:ogr.OFTInteger}

        # Creation du fichier shape
        createPointsFromCoordList(attribute_dico, points_random_value_dico, sample_points_output, projection_input, format_vector)
        del attribute_dico
        del points_random_value_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End creation du fichier shape de points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 6.  EXTRACTION DES POINTS D'ECHANTILLONS
        #-----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start extraction des points d'echantillon dans l'image..." + endC)

        # Cas ou l'on a une seule image
        if len(image_input_list) == 1:
            # Extract sample
            image_input = image_input_list[0]
            command = "otbcli_SampleExtraction -in %s -vec %s -outfield prefix -outfield.prefix.name %s -out %s -field %s" %(image_input, sample_points_output, BAND_NAME, vector_output, name_column)
            if ram_otb > 0:
                command += " -ram %d" %(ram_otb)
            if debug >= 3:
                print(command)
            exitCode = os.system(command)
            if exitCode != 0:
                raise NameError(cyan + "selectSamples() : " + bold + red + "An error occured during otbcli_SampleExtraction command. See error message above." + endC)

        # Cas de plusieurs imagettes
        else :

            # Le repertoire de sortie
            repertory_output = os.path.dirname(vector_output)
            # Initialisation de la liste pour le multi-threading et la liste de l'ensemble des echantions locaux
            thread_list = []
            vector_local_output_list = []

            # Obtenir l'emprise des images d'entrées pour redecouper le vecteur d'echantillon d'apprentissage pour chaque image
            for image_input in image_input_list :
                # Definition des fichiers sur emprise local
                file_name = os.path.splitext(os.path.basename(image_input))[0]
                emprise_local_sample = repertory_output + os.sep + file_name + SUFFIX_SAMPLE + extension_vector
                vector_sample_local_output = repertory_output + os.sep + file_name + SUFFIX_VALUE + extension_vector
                vector_local_output_list.append(vector_sample_local_output)

                # Gestion sans thread...
                #SampleLocalExtraction(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate)

                # Gestion du multi threading
                thread = threading.Thread(target=SampleLocalExtraction, args=(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate))
                thread.start()
                thread_list.append(thread)

            # Extraction des echantions points des images
            try:
                for thread in thread_list:
                    thread.join()
            except:
                print(cyan + "selectSamples() : " + bold + red + "Erreur lors de l'éextaction des valeurs d'echantion : impossible de demarrer le thread" + endC, file=sys.stderr)

            # Fusion des multi vecteurs de points contenant les valeurs des bandes de l'image
            fusionVectors(vector_local_output_list, vector_output, format_vector)

            # Clean des vecteurs point sample local file
            for vector_sample_local_output in vector_local_output_list :
                removeVectorFile(vector_sample_local_output)

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "End extraction des points d'echantillon dans l'image." + endC)

        # 7. CALCUL DES STATISTIQUES SUR LES VALEURS DES POINTS D'ECHANTILLONS SELECTIONNEES
        #-----------------------------------------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start calcul des statistiques sur les valeurs des points d'echantillons selectionnees..." + endC)

        # Si le calcul des statistiques est demandé presence du fichier stat
        if table_statistics_output != "":

            # On récupère la liste de données
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part1... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            attribute_name_dico = {}
            name_field_value_list = []
            names_attribut_list = getAttributeNameList(vector_output, format_vector)
            if debug >=4:
                print("names_attribut_list = " + str(names_attribut_list))

            attribute_name_dico[name_column] = ogr.OFTInteger
            for name_attribut in names_attribut_list :
                if BAND_NAME in name_attribut :
                    attribute_name_dico[name_attribut] = ogr.OFTReal
                    name_field_value_list.append(name_attribut)

            name_field_value_list.sort()

            res_values_dico = getAttributeValues(vector_output, None, None, attribute_name_dico, format_vector)
            del attribute_name_dico

            # Trie des données par identifiant micro classes
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part2... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            data_value_by_micro_class_dico = {}
            stat_by_micro_class_dico = {}

            # Initilisation du dico complexe
            for id_micro in id_micro_list :
                data_value_by_micro_class_dico[id_micro] = {}
                stat_by_micro_class_dico[id_micro] = {}
                for name_field_value in res_values_dico :
                    if name_field_value != name_column :
                        data_value_by_micro_class_dico[id_micro][name_field_value] = []
                        stat_by_micro_class_dico[id_micro][name_field_value] = {}
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0.0
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0.0

            # Trie des valeurs
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part3... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for index in range(len(res_values_dico[name_column])) :
                id_micro = res_values_dico[name_column][index]
                for name_field_value in name_field_value_list :
                    data_value_by_micro_class_dico[id_micro][name_field_value].append(res_values_dico[name_field_value][index])
            del res_values_dico

            # Calcul des statistiques
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part4... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = average(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = standardDeviation(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = len(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = 0

            del data_value_by_micro_class_dico

            # Creation du fichier statistique .csv
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part5... " + endC
            if debug >= 4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            text_csv = " Micro classes ; Champs couche image ; Nombre de points  ; Moyenne ; Ecart type \n"
            writeTextFile(table_statistics_output, text_csv)
            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    # Ecriture du fichier
                    text_csv = " %d " %(id_micro)
                    text_csv += " ; %s" %(name_field_value)
                    text_csv += " ; %d" %(stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION])
                    appendTextFileCR(table_statistics_output, text_csv)
            del name_field_value_list

        else :
            if debug >=3:
                print(cyan + "selectSamples() : " + bold + green + "Pas de calcul des statistiques sur les valeurs des points demander!!!." + endC)

        del id_micro_list

        pending_event = cyan + "selectSamples() : " + bold + green + "End calcul des statistiques sur les valeurs des points d'echantillons selectionnees. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)


    # 8. SUPRESSION DES FICHIERS INTERMEDIAIRES
    #------------------------------------------

    if not save_results_intermediate:

        if os.path.isfile(sample_points_output) :
            removeVectorFile(sample_points_output)

    print(cyan + "selectSamples() : " + bold + green + "FIN DE LA SELECTION DE POINTS" + endC)

    # Mise à jour du Log
    ending_event = "selectSamples() : Select points in raster mask macro input ending : "
    timeLine(path_time_log,ending_event)

    return