def compressImage(image_input, image_output_compress, inputBand4Found,
                  compress_type, predictor, zlevel, format_raster):

    if debug >= 1:
        print(cyan + "compressImage() : " + endC +
              "Debut de la compression de %s" % (image_input))

    # Preparation de la commande
    command = ""
    caseBand4 = ""
    if inputBand4Found:
        caseBand4 = "-colorinterp_4 undefined"
    # Selon le type de compression
    while switch(compress_type.upper()):
        if case("DEFLATE"):
            if debug >= 2:
                print("Compression DEFLATE : ")
            command = "gdal_translate -of %s %s -co TILED=YES -co COMPRESS=%s -co PREDICTOR=%s -co ZLEVEL=%s %s %s" % (
                format_raster, caseBand4, compress_type, predictor, zlevel,
                image_input, image_output_compress)
            break
        if case("LZW"):
            if debug >= 2:
                print("Compression LZW : ")
            command = "gdal_translate -of %s %s -co TILED=YES -co COMPRESS=%s -co ZLEVEL=%s %s %s" % (
                format_raster, caseBand4, compress_type, zlevel, image_input,
                image_output_compress)
            break
        break
    if command == "":
        raise NameError(
            bold + red +
            "compressImage() : Le type de compression n'est pas reconu : " +
            str(compress_type + endC))

    if debug >= 1:
        print(cyan + "compressImage() : " + endC +
              "Algorithme de compressions : " + str(compress_type) + endC)
        print(cyan + "compressImage() : " + endC + "Predicteur : " +
              str(predictor) + endC)
        if compress_type == "DEFLATE":
            print(cyan + "compressImage() : " + endC +
                  "Taux de compression : " + str(zlevel) + endC)
        print(cyan + "compressImage() : " + endC + "Fichier de sortie : " +
              image_output_compress + endC)

    exitCode = os.system(command)
    if exitCode != 0:
        print(command)
        raise NameError(
            bold + red +
            "compressImage() : An error occured during gdal_translate command. See error message above."
            + endC)

    print(bold + green + "FIN DE LA COMPRESSION DE " + image_input + endC)

    return
def smoothGeomGrass(input_vector,
                    output_vector,
                    param_generalize_dico,
                    format_vector="ESRI_Shapefile",
                    overwrite=True):
    if debug >= 2:
        print(cyan + "smoothGeomGrass() : " + bold + green +
              "Lancement de la fonction de lissage par GRASS v.generalize" +
              endC)

    format_vector = format_vector.replace(' ', '_')

    # Import du vecteur d'entrée
    leng_name_vector_input = len(
        os.path.splitext(os.path.basename(input_vector))[0])
    if leng_name_vector_input > 16:
        leng_name_vector_input = 16
    input_name = "VI" + os.path.splitext(
        os.path.basename(input_vector))[0][0:leng_name_vector_input]
    input_name = input_name.replace('-', '_')
    leng_name_vector_output = len(
        os.path.splitext(os.path.basename(output_vector))[0])
    if leng_name_vector_output > 16:
        leng_name_vector_output = 16
    output_name = "VO" + os.path.splitext(
        os.path.basename(output_vector))[0][0:leng_name_vector_output]
    output_name = output_name.replace('-', '_')

    importVectorOgr2Grass(input_vector, input_name, overwrite)

    # Traitement avec la fonction v.generalize
    method = None
    threshold = None
    for key in param_generalize_dico:
        while switch(key):
            if case("method"):
                method = param_generalize_dico[key]
                break
            if case("threshold"):
                threshold = param_generalize_dico[key]
                break
    grass.run_command('v.generalize',
                      input=input_name,
                      output=output_name,
                      method=method,
                      threshold=threshold,
                      overwrite=overwrite,
                      stderr=subprocess.PIPE)

    # Export du jeu de données traité
    exportVectorOgr2Grass(output_name, output_vector, format_vector, overwrite)

    return
def convert2dot(command_doc, struct_cmd_dico, graph_name, dot_file, debug):

    EXT_ERR = '.err'

    # Definie les parametrres du graph
    graph = pgv.AGraph(name=graph_name, directed=True)

    graph.graph_attr['outputorder'] = 'edgesfirst'
    graph.graph_attr['label'] = command_doc
    #graph.graph_attr['ratio']='1.0'
    graph.graph_attr['ratio'] = 'compress'
    graph.graph_attr['rankdir'] = 'TB'

    graph.node_attr['shape'] = 'ellipse'
    graph.node_attr['fixedsize'] = 'false'
    graph.node_attr['fontsize'] = '8'
    graph.node_attr['style'] = 'filled'

    graph.edge_attr['color'] = 'lightslategray'
    graph.edge_attr['style'] = 'setlinewidth(2)'
    graph.edge_attr['arrowhead'] = 'open'
    graph.edge_attr.update(arrowhead='vee', arrowsize='2')

    # Parcours du dictionaire de facon ordonnée
    id_command_list = struct_cmd_dico.keys()
    id_command_sorted_list = sorted(id_command_list)

    # Pour toutes les lignes du fichier commande
    for id_cmd in id_command_sorted_list:

        # Recuperer les valeurs des coordonnees
        info_cmd_list = struct_cmd_dico[id_cmd]
        state = info_cmd_list[0]
        dependency_list = info_cmd_list[1]
        name_cmd = info_cmd_list[2]
        start_date = info_cmd_list[3]
        end_date = info_cmd_list[4]
        name_task_list = info_cmd_list[5].split('.')
        name_task = name_task_list[1] + '.' + name_task_list[2]

        # Creation du graph
        if debug >= 4:
            print(cyan + "convert2dot() : " + endC + "Id Cmd = " +
                  str(id_cmd) + ", state : " + state)

        graph.add_node(id_cmd)
        node = graph.get_node(id_cmd)

        # Dependances
        for dependency in dependency_list:
            graph.add_edge(id_cmd, dependency)

        # Definir la couleur selon l'etat de la commande
        value_color = 'white'
        info = ""
        while switch(state):

            if case(TAG_STATE_MAKE):
                # Etat A_Faire
                value_color = 'lightgray'
                break

            if case(TAG_STATE_WAIT):
                # Etat En_Attente
                value_color = 'gray52'
                break

            if case(TAG_STATE_LOCK):
                # Etat Bloqué
                value_color = 'darkorange'
                break

            if case(TAG_STATE_RUN):
                # Etat En_Cours
                value_color = 'deepskyblue1'
                info = "\n" + start_date
                break

            if case(TAG_STATE_END):
                # Etat Termine
                value_color = 'chartreuse'
                start_date_time = datetime.strptime(start_date,
                                                    '%d/%m/%y %H:%M:%S')
                end_date_time = datetime.strptime(end_date,
                                                  '%d/%m/%y %H:%M:%S')
                during_time = end_date_time - start_date_time
                info = "\n" + str(during_time)
                break

            if case(TAG_STATE_ERROR):
                # Etat En_Erreur"
                value_color = 'deeppink'

                # Definir la boite du fichier d'erreur
                error_file_name = os.path.splitext(
                    os.path.basename(command_doc))[0] + str(id_cmd) + EXT_ERR
                id_err = str(int(id_cmd) + 10000)
                graph.add_node(id_err)
                graph.graph_attr['label'] = ''
                graph.edge_attr['label'] = ''
                node_err = graph.get_node(id_err)
                graph.add_edge(id_err, id_cmd)
                edge_err = graph.get_edge(id_err, id_cmd)
                node_err.attr['shape'] = 'rectangle'
                node_err.attr['fillcolor'] = 'red'  #'lightpink'
                node_err.attr['label'] = error_file_name
                edge_err.attr['label'] = "See error file!"
                edge_err.attr['color'] = 'black'
                edge_err.attr['fontcolor'] = 'red'
                edge_err.attr['style'] = 'dotted'
                edge_err.attr['arrowhead'] = 'open'
                break

            break  # Sortie du while

        # Assign node color
        node.attr['fillcolor'] = value_color

        # Empty labels
        node.attr['label'] = name_task + " - " + name_cmd + info + '\n' + str(
            id_cmd)

    # Ecriture dans le fichier
    graph_reverse = graph.reverse()
    graph_reverse.write(dot_file)

    return
Пример #4
0
def executeCommand(ip_serveur,
                   port,
                   id_command,
                   command_to_execute,
                   type_execution,
                   error_management,
                   base_name_shell_command,
                   ip_remote="",
                   login="",
                   password=""):

    EXT_SHELL = '.sh'
    EXT_ERR = '.err'
    EXT_LOG = '.log'
    new_state = ''

    # Preparation du fichier d'execution en background-local ou background-remote
    if type_execution == TAG_ACTION_TO_MAKE_BG or type_execution == TAG_ACTION_TO_MAKE_RE:

        # Pour les executions a faire en background ou en remote preparation des fichiers .sh et .err
        shell_command = base_name_shell_command + str(id_command) + EXT_SHELL
        error_file = base_name_shell_command + str(id_command) + EXT_ERR
        log_file = base_name_shell_command + str(id_command) + EXT_LOG

        # Creation du fichier shell
        error_management_option = ""
        if not error_management:
            error_management_option = " -nem "
        command_to_execute = command_to_execute.replace('\n', '')
        if six.PY2:
            cmd_tmp = command_to_execute + " 1> " + log_file.encode(
                "utf-8") + " 2> " + error_file.encode("utf-8") + "\n"
        else:
            cmd_tmp = command_to_execute + " 1> " + log_file + " 2> " + error_file + "\n"
        writeTextFile(shell_command, cmd_tmp)
        appendTextFileCR(
            shell_command, FUNCTION_PYTHON + "ReplyEndCommand -ip_serveur " +
            str(ip_serveur) + " -port " + str(port) + " -id_command " +
            str(id_command) + error_management_option + " -err " + error_file)
        appendTextFileCR(shell_command, "rm " + shell_command)
        os.chmod(shell_command, stat.S_IRWXU)

    # Selon le type d'execution
    while switch(type_execution):

        if case(TAG_ACTION_TO_MAKE_NOW):

            # Execution en direct (local)
            exitCode = subprocess.call(command_to_execute, shell=True)
            new_state = TAG_STATE_END
            if exitCode != 0:  # Si la commande command_to_execute a eu un probleme
                new_state = TAG_STATE_ERROR
                print(cyan + "executeCommand : " + endC + bold + red +
                      "ERREUR EXECUTION DE LA COMMANDE : " +
                      str(command_to_execute) + endC,
                      file=sys.stderr)
            break

        if case(TAG_ACTION_TO_MAKE_BG):

            # Execution en back ground (local)
            process = subprocess.Popen(shell_command,
                                       shell=True,
                                       stderr=subprocess.STDOUT)
            time.sleep(0.1)
            if process == None:
                new_state = TAG_STATE_ERROR
                print(cyan + "executeCommand : " + endC + bold + red +
                      "ERREUR EXECUTION DE LA COMMANDE EN BACKGROUND : " +
                      str(command_to_execute) + endC,
                      file=sys.stderr)
            else:
                print(cyan + "executeCommand : " + endC +
                      " background pid = " + str(process.pid))
            break

        if case(TAG_ACTION_TO_MAKE_RE):

            # Test si la machine Remote est accesible
            if ping(ip_remote):

                # Execution en remote execution
                try:
                    s = pxssh.pxssh()
                    s.login(ip_remote, login, password)
                    time.sleep(0.5)
                    s.sendline(shell_command + '&')
                    time.sleep(0.01)
                    s.logout()
                except pxssh.ExceptionPxssh as e:
                    new_state = TAG_STATE_ERROR
                    print(
                        cyan + "executeCommand : " + endC + bold + red +
                        "ERREUR EXECUTION DE LA COMMANDE EN REMOTE (login failed) : "
                        + str(command_to_execute) + endC,
                        file=sys.stderr)
                    print(e, file=sys.stderr)

            else:
                new_state = TAG_STATE_ERROR
                print(
                    cyan + "executeCommand : " + endC + bold + red +
                    "ERREUR EXECUTION DE LA COMMANDE EN REMOTE (Computeur : " +
                    ip_remote + " non disponible) : " +
                    str(command_to_execute) + endC,
                    file=sys.stderr)
            break

        break  # Sortie du while

    return new_state
Пример #5
0
def selectSamples(image_input_list, sample_image_input, vector_output, table_statistics_output, sampler_strategy, select_ratio_floor, ratio_per_class_dico, name_column, no_data_value, path_time_log, rand_seed=0, ram_otb=0, epsg=2154, format_vector='ESRI Shapefile', extension_vector=".shp", save_results_intermediate=False, overwrite=True) :

    # Mise à jour du Log
    starting_event = "selectSamples() : Select points in raster mask macro input starting : "
    timeLine(path_time_log, starting_event)

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "image_input_list : " + str(image_input_list) + endC)
        print(cyan + "selectSamples() : " + endC + "sample_image_input : " + str(sample_image_input) + endC)
        print(cyan + "selectSamples() : " + endC + "vector_output : " + str(vector_output) + endC)
        print(cyan + "selectSamples() : " + endC + "table_statistics_output : " + str(table_statistics_output) + endC)
        print(cyan + "selectSamples() : " + endC + "sampler_strategy : " + str(sampler_strategy) + endC)
        print(cyan + "selectSamples() : " + endC + "select_ratio_floor : " + str(select_ratio_floor) + endC)
        print(cyan + "selectSamples() : " + endC + "ratio_per_class_dico : " + str(ratio_per_class_dico) + endC)
        print(cyan + "selectSamples() : " + endC + "name_column : " + str(name_column) + endC)
        print(cyan + "selectSamples() : " + endC + "no_data_value : " + str(no_data_value) + endC)
        print(cyan + "selectSamples() : " + endC + "path_time_log : " + str(path_time_log) + endC)
        print(cyan + "selectSamples() : " + endC + "rand_seed : " + str(rand_seed) + endC)
        print(cyan + "selectSamples() : " + endC + "ram_otb : " + str(ram_otb) + endC)
        print(cyan + "selectSamples() : " + endC + "epsg : " + str(epsg) + endC)
        print(cyan + "selectSamples() : " + endC + "format_vector : " + str(format_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "extension_vector : " + str(extension_vector) + endC)
        print(cyan + "selectSamples() : " + endC + "save_results_intermediate : " + str(save_results_intermediate) + endC)
        print(cyan + "selectSamples() : " + endC + "overwrite : " + str(overwrite) + endC)

    # Constantes
    EXT_XML = ".xml"

    SUFFIX_SAMPLE = "_sample"
    SUFFIX_STATISTICS = "_statistics"
    SUFFIX_POINTS = "_points"
    SUFFIX_VALUE = "_value"

    BAND_NAME = "band_"
    COLUMN_CLASS = "class"
    COLUMN_ORIGINFID = "originfid"

    NB_POINTS = "nb_points"
    AVERAGE = "average"
    STANDARD_DEVIATION = "st_dev"

    print(cyan + "selectSamples() : " + bold + green + "DEBUT DE LA SELECTION DE POINTS" + endC)

    # Definition variables et chemins
    repertory_output = os.path.dirname(vector_output)
    filename = os.path.splitext(os.path.basename(vector_output))[0]
    sample_points_output = repertory_output + os.sep + filename +  SUFFIX_SAMPLE + extension_vector
    file_statistic_points = repertory_output + os.sep + filename + SUFFIX_STATISTICS + SUFFIX_POINTS + EXT_XML

    if debug >= 3:
        print(cyan + "selectSamples() : " + endC + "file_statistic_points : " + str(file_statistic_points) + endC)

    # 0. EXISTENCE DU FICHIER DE SORTIE
    #----------------------------------

    # Si le fichier vecteur points de sortie existe deja et que overwrite n'est pas activé
    check = os.path.isfile(vector_output)
    if check and not overwrite:
        print(bold + yellow + "Samples points already done for file %s and will not be calculated again." %(vector_output) + endC)
    else:   # Si non ou si la vérification est désactivée : creation du fichier d'échantillons points

        # Suppression de l'éventuel fichier existant
        if check:
            try:
                removeVectorFile(vector_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite
        if os.path.isfile(table_statistics_output) :
            try:
                removeFile(table_statistics_output)
            except Exception:
                pass # Si le fichier ne peut pas être supprimé, on suppose qu'il n'existe pas et on passe à la suite


        # 1. STATISTIQUE SUR L'IMAGE DES ECHANTILLONS RASTEUR
        #----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start statistique sur l'image des echantillons rasteur..." + endC)

        id_micro_list = identifyPixelValues(sample_image_input)

        if 0 in id_micro_list :
            id_micro_list.remove(0)

        min_micro_class_nb_points = -1
        min_micro_class_label = 0
        infoStructPointSource_dico = {}

        writeTextFile(file_statistic_points, '<?xml version="1.0" ?>\n')
        appendTextFileCR(file_statistic_points, '<GeneralStatistics>')
        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassRaw">')

        if debug >= 2:
            print("Nombre de points par micro classe :" + endC)

        for id_micro in id_micro_list :
            nb_pixels = countPixelsOfValue(sample_image_input, id_micro)

            if debug >= 2:
                print("MicroClass : " + str(id_micro) + ", nb_points = " + str(nb_pixels))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_pixels))

            if min_micro_class_nb_points == -1 or min_micro_class_nb_points > nb_pixels :
                min_micro_class_nb_points = nb_pixels
                min_micro_class_label = id_micro

            infoStructPointSource_dico[id_micro] = StructInfoMicoClass()
            infoStructPointSource_dico[id_micro].label_class = id_micro
            infoStructPointSource_dico[id_micro].nb_points = nb_pixels
            infoStructPointSource_dico[id_micro].info_points_list = []
            del nb_pixels

        if debug >= 2:
            print("MicroClass min points find : " + str(min_micro_class_label) + ", nb_points = " + str(min_micro_class_nb_points))

        appendTextFileCR(file_statistic_points, '    </Statistic>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End statistique sur l'image des echantillons rasteur. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 2. CHARGEMENT DE L'IMAGE DES ECHANTILLONS
        #------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start chargement de l'image des echantillons..." + endC)

        # Information image
        cols, rows, bands = getGeometryImage(sample_image_input)
        xmin, xmax, ymin, ymax = getEmpriseImage(sample_image_input)
        pixel_width, pixel_height = getPixelWidthXYImage(sample_image_input)
        projection_input = getProjectionImage(sample_image_input)
        if projection_input == None or projection_input == 0 :
            projection_input = epsg
        else :
            projection_input = int(projection_input)

        pixel_width = abs(pixel_width)
        pixel_height = abs(pixel_height)

        # Lecture des données
        raw_data = getRawDataImage(sample_image_input)

        if debug >= 3:
            print("projection = " + str(projection_input))
            print("cols = " + str(cols))
            print("rows = " + str(rows))

        # Creation d'une structure dico contenent tous les points différents de zéro
        progress = 0
        pass_prog = False
        for y_row in range(rows) :
            for x_col in range(cols) :
                value_class = raw_data[y_row][x_col]
                if value_class != 0 :
                    infoStructPointSource_dico[value_class].info_points_list.append(x_col + (y_row * cols))

            # Barre de progression
            if debug >= 4:
                if  ((float(y_row) / rows) * 100.0 > progress) and not pass_prog :
                    progress += 1
                    pass_prog = True
                    print("Progression => " + str(progress) + "%")
                if ((float(y_row) / rows) * 100.0  > progress + 1) :
                    pass_prog = False

        del raw_data

        pending_event = cyan + "selectSamples() : " + bold + green + "End chargement de l'image des echantillons. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 3. SELECTION DES POINTS D'ECHANTILLON
        #--------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start selection des points d'echantillon..." + endC)

        appendTextFileCR(file_statistic_points, '    <Statistic name="pointsPerClassSelect">')

        # Rendre deterministe la fonction aléatoire de random.sample
        if rand_seed > 0:
            random.seed( rand_seed )

        # Pour toute les micro classes
        for id_micro in id_micro_list :

            # Selon la stategie de selection
            nb_points_ratio = 0
            while switch(sampler_strategy.lower()):
                if case('all'):
                    # Le mode de selection 'all' est choisi
                    nb_points_ratio = infoStructPointSource_dico[id_micro].nb_points
                    infoStructPointSource_dico[id_micro].sample_points_list = range(nb_points_ratio)

                    break
                if case('percent'):
                    # Le mode de selection 'percent' est choisi
                    id_macro_class = int(math.floor(id_micro / 100) * 100)
                    select_ratio_class = ratio_per_class_dico[id_macro_class]
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_class / 100)
                    infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)
                    break
                if case('mixte'):
                    # Le mode de selection 'mixte' est choisi
                    nb_points_ratio = int(infoStructPointSource_dico[id_micro].nb_points * select_ratio_floor / 100)
                    if id_micro == min_micro_class_label :
                        # La plus petite micro classe est concervée intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = range(infoStructPointSource_dico[id_micro].nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    elif nb_points_ratio <= min_micro_class_nb_points :
                        # Les micro classes dont le ratio de selection est inferieur au nombre de points de la plus petite classe sont égement conservées intégralement
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), min_micro_class_nb_points)
                        nb_points_ratio = min_micro_class_nb_points
                    else :
                        # Pour toutes les autres micro classes tirage aleatoire d'un nombre de points correspondant au ratio
                        infoStructPointSource_dico[id_micro].sample_points_list = random.sample(range(infoStructPointSource_dico[id_micro].nb_points), nb_points_ratio)

                    break
                break


            if debug >= 2:
                print("MicroClass = " + str(id_micro) + ", nb_points_ratio " + str(nb_points_ratio))
            appendTextFileCR(file_statistic_points, '        <StatisticPoints class="%d" value="%d" />' %(id_micro, nb_points_ratio))

        appendTextFileCR(file_statistic_points, '    </Statistic>')
        appendTextFileCR(file_statistic_points, '</GeneralStatistics>')

        pending_event = cyan + "selectSamples() : " + bold + green + "End selection des points d'echantillon. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 4. PREPARATION DES POINTS D'ECHANTILLON
        #----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start preparation des points d'echantillon..." + endC)

        # Création du dico de points
        points_random_value_dico = {}
        index_dico_point = 0
        for micro_class in infoStructPointSource_dico :
            micro_class_struct = infoStructPointSource_dico[micro_class]
            label_class = micro_class_struct.label_class
            point_attr_dico = {name_column:int(label_class), COLUMN_CLASS:int(label_class), COLUMN_ORIGINFID:0}

            for id_point in micro_class_struct.sample_points_list:

                # Recuperer les valeurs des coordonnees des points
                coor_x = float(xmin + (int(micro_class_struct.info_points_list[id_point] % cols) * pixel_width)) + (pixel_width / 2.0)
                coor_y = float(ymax - (int(micro_class_struct.info_points_list[id_point] / cols) * pixel_height)) - (pixel_height / 2.0)
                points_random_value_dico[index_dico_point] = [[coor_x, coor_y], point_attr_dico]
                del coor_x
                del coor_y
                index_dico_point += 1
            del point_attr_dico
        del infoStructPointSource_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End preparation des points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 5. CREATION DU FICHIER SHAPE DE POINTS D'ECHANTILLON
        #-----------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start creation du fichier shape de points d'echantillon..." + endC)

        # Définir les attibuts du fichier résultat
        attribute_dico = {name_column:ogr.OFTInteger, COLUMN_CLASS:ogr.OFTInteger, COLUMN_ORIGINFID:ogr.OFTInteger}

        # Creation du fichier shape
        createPointsFromCoordList(attribute_dico, points_random_value_dico, sample_points_output, projection_input, format_vector)
        del attribute_dico
        del points_random_value_dico

        pending_event = cyan + "selectSamples() : " + bold + green + "End creation du fichier shape de points d'echantillon. " + endC
        if debug >=3:
            print(pending_event)
        timeLine(path_time_log,pending_event)

        # 6.  EXTRACTION DES POINTS D'ECHANTILLONS
        #-----------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start extraction des points d'echantillon dans l'image..." + endC)

        # Cas ou l'on a une seule image
        if len(image_input_list) == 1:
            # Extract sample
            image_input = image_input_list[0]
            command = "otbcli_SampleExtraction -in %s -vec %s -outfield prefix -outfield.prefix.name %s -out %s -field %s" %(image_input, sample_points_output, BAND_NAME, vector_output, name_column)
            if ram_otb > 0:
                command += " -ram %d" %(ram_otb)
            if debug >= 3:
                print(command)
            exitCode = os.system(command)
            if exitCode != 0:
                raise NameError(cyan + "selectSamples() : " + bold + red + "An error occured during otbcli_SampleExtraction command. See error message above." + endC)

        # Cas de plusieurs imagettes
        else :

            # Le repertoire de sortie
            repertory_output = os.path.dirname(vector_output)
            # Initialisation de la liste pour le multi-threading et la liste de l'ensemble des echantions locaux
            thread_list = []
            vector_local_output_list = []

            # Obtenir l'emprise des images d'entrées pour redecouper le vecteur d'echantillon d'apprentissage pour chaque image
            for image_input in image_input_list :
                # Definition des fichiers sur emprise local
                file_name = os.path.splitext(os.path.basename(image_input))[0]
                emprise_local_sample = repertory_output + os.sep + file_name + SUFFIX_SAMPLE + extension_vector
                vector_sample_local_output = repertory_output + os.sep + file_name + SUFFIX_VALUE + extension_vector
                vector_local_output_list.append(vector_sample_local_output)

                # Gestion sans thread...
                #SampleLocalExtraction(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate)

                # Gestion du multi threading
                thread = threading.Thread(target=SampleLocalExtraction, args=(image_input, sample_points_output, emprise_local_sample, vector_sample_local_output, name_column, BAND_NAME, ram_otb, format_vector, extension_vector, save_results_intermediate))
                thread.start()
                thread_list.append(thread)

            # Extraction des echantions points des images
            try:
                for thread in thread_list:
                    thread.join()
            except:
                print(cyan + "selectSamples() : " + bold + red + "Erreur lors de l'éextaction des valeurs d'echantion : impossible de demarrer le thread" + endC, file=sys.stderr)

            # Fusion des multi vecteurs de points contenant les valeurs des bandes de l'image
            fusionVectors(vector_local_output_list, vector_output, format_vector)

            # Clean des vecteurs point sample local file
            for vector_sample_local_output in vector_local_output_list :
                removeVectorFile(vector_sample_local_output)

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "End extraction des points d'echantillon dans l'image." + endC)

        # 7. CALCUL DES STATISTIQUES SUR LES VALEURS DES POINTS D'ECHANTILLONS SELECTIONNEES
        #-----------------------------------------------------------------------------------

        if debug >= 3:
            print(cyan + "selectSamples() : " + bold + green + "Start calcul des statistiques sur les valeurs des points d'echantillons selectionnees..." + endC)

        # Si le calcul des statistiques est demandé presence du fichier stat
        if table_statistics_output != "":

            # On récupère la liste de données
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part1... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            attribute_name_dico = {}
            name_field_value_list = []
            names_attribut_list = getAttributeNameList(vector_output, format_vector)
            if debug >=4:
                print("names_attribut_list = " + str(names_attribut_list))

            attribute_name_dico[name_column] = ogr.OFTInteger
            for name_attribut in names_attribut_list :
                if BAND_NAME in name_attribut :
                    attribute_name_dico[name_attribut] = ogr.OFTReal
                    name_field_value_list.append(name_attribut)

            name_field_value_list.sort()

            res_values_dico = getAttributeValues(vector_output, None, None, attribute_name_dico, format_vector)
            del attribute_name_dico

            # Trie des données par identifiant micro classes
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part2... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            data_value_by_micro_class_dico = {}
            stat_by_micro_class_dico = {}

            # Initilisation du dico complexe
            for id_micro in id_micro_list :
                data_value_by_micro_class_dico[id_micro] = {}
                stat_by_micro_class_dico[id_micro] = {}
                for name_field_value in res_values_dico :
                    if name_field_value != name_column :
                        data_value_by_micro_class_dico[id_micro][name_field_value] = []
                        stat_by_micro_class_dico[id_micro][name_field_value] = {}
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0.0
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0.0

            # Trie des valeurs
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part3... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for index in range(len(res_values_dico[name_column])) :
                id_micro = res_values_dico[name_column][index]
                for name_field_value in name_field_value_list :
                    data_value_by_micro_class_dico[id_micro][name_field_value].append(res_values_dico[name_field_value][index])
            del res_values_dico

            # Calcul des statistiques
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part4... " + endC
            if debug >=4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = average(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = standardDeviation(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION] = 0
                    try :
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = len(data_value_by_micro_class_dico[id_micro][name_field_value])
                    except:
                        stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS] = 0

            del data_value_by_micro_class_dico

            # Creation du fichier statistique .csv
            pending_event = cyan + "selectSamples() : " + bold + green + "Encours calcul des statistiques part5... " + endC
            if debug >= 4:
                print(pending_event)
            timeLine(path_time_log,pending_event)

            text_csv = " Micro classes ; Champs couche image ; Nombre de points  ; Moyenne ; Ecart type \n"
            writeTextFile(table_statistics_output, text_csv)
            for id_micro in id_micro_list :
                for name_field_value in name_field_value_list :
                    # Ecriture du fichier
                    text_csv = " %d " %(id_micro)
                    text_csv += " ; %s" %(name_field_value)
                    text_csv += " ; %d" %(stat_by_micro_class_dico[id_micro][name_field_value][NB_POINTS])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][AVERAGE])
                    text_csv += " ; %f" %(stat_by_micro_class_dico[id_micro][name_field_value][STANDARD_DEVIATION])
                    appendTextFileCR(table_statistics_output, text_csv)
            del name_field_value_list

        else :
            if debug >=3:
                print(cyan + "selectSamples() : " + bold + green + "Pas de calcul des statistiques sur les valeurs des points demander!!!." + endC)

        del id_micro_list

        pending_event = cyan + "selectSamples() : " + bold + green + "End calcul des statistiques sur les valeurs des points d'echantillons selectionnees. " + endC
        if debug >= 3:
            print(pending_event)
        timeLine(path_time_log,pending_event)


    # 8. SUPRESSION DES FICHIERS INTERMEDIAIRES
    #------------------------------------------

    if not save_results_intermediate:

        if os.path.isfile(sample_points_output) :
            removeVectorFile(sample_points_output)

    print(cyan + "selectSamples() : " + bold + green + "FIN DE LA SELECTION DE POINTS" + endC)

    # Mise à jour du Log
    ending_event = "selectSamples() : Select points in raster mask macro input ending : "
    timeLine(path_time_log,ending_event)

    return