def mergeRaster(iteration, merged, temp_merged, filename, name, ordername,
                filename_ext, homedir):
    if iteration == 0:
        if os.path.exists(temp_merged):
            os.remove(temp_merged)
        else:
            os.rename(filename,
                      temp_merged)  #move first file as current temp merge file

    else:
        print("merging file: ", name)

        if name.find(nonImageLayers) != -1:  #TIFF with LZW
            gm.main([
                '', '-o', ordername + "_merged" + filename_ext[1],
                ordername + "_temp_merged" + filename_ext[1], name, '-co',
                'COMPRESS=LZW'
            ])
        else:  #TIFF with JPEG
            gm.main([
                '', '-o', ordername + "_merged" + filename_ext[1],
                ordername + "_temp_merged" + filename_ext[1], name, '-co',
                'COMPRESS=JPEG', '-co', 'PREDICTOR=2', '-co', 'TILED=YES',
                '-co', 'TILED=YES', '-co', 'BLOCKXSIZE=512', '-co',
                'BLOCKYSIZE=512', '-co', 'PHOTOMETRIC=YCBCR', '-ot', 'Byte'
            ])

        os.remove(temp_merged)  #move iniitial
        os.remove(filename)
        os.rename(merged, temp_merged)
        print((check_tmp_free_space(homedir, temp_merged, 1) +
               " will be used up for merging the next image together"))
    return
def generate_geotiffs(inputProductPath, outputPath):

	basename =  os.path.basename(inputProductPath)
	if os.path.isdir(outputPath + basename[:-3] + "SAFE") :
		print('Already extracted')
	else:
		zip = zipfile.ZipFile(inputProductPath) 
		zip.extractall(outputPath) 
		print("Extracting Done") 

	
	directoryName = outputPath + basename[:-3] + "SAFE/GRANULE"

	productName = os.path.basename(inputProductPath)[:-4]
	outputPathSubdirectory = outputPath + productName + "_PROCESSED"

	if not os.path.exists(outputPathSubdirectory):
		os.makedirs(outputPathSubdirectory)

	subDirectorys = get_immediate_subdirectories(directoryName)

	results = []

	for granule in subDirectorys:
		unprocessedBandPath = outputPath + productName + ".SAFE/GRANULE/" + granule + "/" + "IMG_DATA/"
		results.append(generate_all_bands(unprocessedBandPath, granule, outputPathSubdirectory))
	
	#gdal_merge.py -n 0 -a_nodata 0 -of GTiff -o /home/daire/Desktop/merged.tif /home/daire/Desktop/aa.tif /home/daire/Desktop/rgbTiff-16Bit-AllBands.tif
	merged = outputPathSubdirectory + "/merged.tif"
	params = ['',"-of", "GTiff", "-o", merged]

	for granule in results:
		params.append(granule)

	gdal_merge.main(params)
Exemple #3
0
    def _patch_rasters(self, products_data):
        """Patch bands together from more products

        :param products_data: dict {product: {band: file_name}}

        :return: dict {band: file_name}
        """

        target_dir = os.path.join(self.tempdir, self.area.name, "merged")
        if not os.path.isdir(target_dir):
            os.makedirs(target_dir)

        products = products_data.keys()

        data = {}
        for band in products_data[list(products)[0]].keys():
            input_files = []

            for product in products_data:
                input_files.append(products_data[product][band])

            output = os.path.join(target_dir, "{}.tif".format(band))

            merge_command = [
                "-n", "0", "-a_nodata", "0", "-o", output, "-co",
                "COMPRESS=DEFLATE"
            ] + input_files
            gm.main(merge_command)

            data[band] = output
        return data
def run_createDEM(xmin, xmax, ymin, ymax, scale, cellLen):
    """
  run_createDEM is used to run the createDEM function and to resample and 
  translate the created DEM to a usable format for the coastal module.
  xmin,xmax,ymin,ymax creates the window of the dem
  scale is the factor over which to use the checkerboard function
  cellLen is the length of the cells
  
  e.g.: scale and cellLen need to be adjusted accordingly
  It has been used as: scale = 10, cellLen = 1/120.0, which creates a DEM with
  10 times less cells along an axis.
  """
    for f in glob('d://test//Demmin//*.tif'):
        os.unlink(f)

    files = createDEM(xmin, xmax, ymin, ymax, scale)

    if len(files) > 1:
        outputFile = r'd://test//Demmin//out.tif'
        command = ['-o', outputFile]

        for i in range(0, len(files)):
            command.append(files[i])
        print command
        print 'Merging tiles'
        gdal_merge.main(command)
        print 'Translate to .map'
        translateMap('d://test//Demmin//out.tif',
                     'd://test//Demmin//merge_test.map')
        print 'Resample to low resolution'
        resample_map(xmin, xmax, ymin, ymax, cellLen, cellLen,
                     'merge_test.map', 'resample_dem.map')
        translateBack('resample_dem.map', 'resample_map.tif')
def int_function():
    global output_full

    output_full = os.path.join(input_directory, 'Merged.tif')

    if len(final_list) == 2:
        filea = final_list[0]
        fileb = final_list[1]
        filec = ''
        filed = ''
    elif len(final_list) == 3:
        filea = final_list[0]
        fileb = final_list[1]
        filec = final_list[2]
        filed = ''
    elif len(final_list) == 4:
        filea = final_list[0]
        fileb = final_list[1]
        filec = final_list[2]
        filed = final_list[3]

    sys.argv = ['q', '-v', filea, fileb, filec, filed, "-o", output_full]
    gm.main()

    print 'Finished merging files...'
def retrieve_SRTM(url_prefix, file_prefix, url_suffix, demLoc, case, xmin, ymin, xmax, ymax):
    """
    This function retrieves an SRTM DEM from the designated location, merges it together into a dem for a wanted target area
    Inputs:
        url_prefix:     the website where the SRTM tiles are located
        file_prefix:    file prefix of SRTM tile files
        url_suffix:     file suffix of SRTM tile files
        demLoc:         Folder where SRTM data should go to
        case:           Name of the DEM file to be used
        xmin:           minimum longitude
        xmax:           maximum longitude
        ymin:           minimum latitude
        ymax:           maximum latitude
    Outputs:
        A DEM over wanted area in original resolution at designated location and filename. Provided in GeoTIFF and PCRaster
    """
    # url_prefix=http://droppr.org/srtm/v4.1/6_5x5_TIFs/
    # url_prefix=ftp://srtm.csi.cgiar.org/SRTM_v41/SRTM_Data_GeoTIFF/
    tileMinX=np.int((np.round(xmin) + 180 ) / 5 + 1)
    tileMaxX=np.int((np.round(xmax) + 180 ) / 5 + 1)
    tileMinY=np.int((60 - np.round(ymax) ) / 5 + 1)
    tileMaxY=np.int((60 - np.round(ymin) ) / 5 + 1)
    print str('Retrieving DEM tiles minX: %3.2f maxX: %3.2f, minY: %3.2f, maxY: %3.2f') % (tileMinX, tileMaxX, tileMinY, tileMaxY)
    # compute UTM zone
    tileLat=tileMinY-1
    tileLon=tileMinX-1
    for tileLon in range(tileMinX, tileMaxX+1):
        for tileLat in range(tileMinY, tileMaxY+1):
            try:
                fileName = str(file_prefix + '%02.f_%02.f' + url_suffix) % (tileLon, tileLat)
                url = url_prefix + fileName
                fileTarget = os.path.join(demLoc, fileName)
                print 'Retrieving ' + url
                urllib.urlretrieve(url,fileTarget)
                print 'Unzipping %s' %(fileTarget)
                zf = zipfile.ZipFile(fileTarget , 'r')
                nameList = zf.namelist()
                for n in nameList:
                    outFile = open(os.path.join(demLoc, n), 'wb')
                    outFile.write(zf.read(n))
                    outFile.close()
                zf.close()
                os.unlink(fileTarget)
            except:
                print 'No suitable tile found, going to next tile...'
            # call gdal_merge to stitch everythin together
    temporary_dem = os.path.join(demLoc, 'temp.tif')
    cut_dem       = os.path.join(demLoc, case + '_rawSRTM.tif')
    pcr_dem       = os.path.join(demLoc, case + '_rawSRTM.map')
    source_dems   = os.path.join(demLoc, 'srtm*.tif')
    gdal_merge.main(argv=['dummy','-o', temporary_dem, source_dems])
    cutMap(xmin, ymin, xmax, ymax, temporary_dem, cut_dem) # this is the final lat lon map
    translateMap(cut_dem,pcr_dem)
    removeFiles(os.path.join(demLoc, 'srtm*.tif'))
    removeFiles(os.path.join(demLoc, 'srtm*.hdr'))
    removeFiles(os.path.join(demLoc, 'srtm*.tfw'))
    os.unlink(os.path.join(demLoc, 'readme.txt'))
    os.unlink(os.path.join(demLoc, 'temp.tif'))

    return pcr_dem
def run_createDEM(xmin,xmax,ymin,ymax,scale,cellLen):
  """
  run_createDEM is used to run the createDEM function and to resample and 
  translate the created DEM to a usable format for the coastal module.
  xmin,xmax,ymin,ymax creates the window of the dem
  scale is the factor over which to use the checkerboard function
  cellLen is the length of the cells
  
  e.g.: scale and cellLen need to be adjusted accordingly
  It has been used as: scale = 10, cellLen = 1/120.0, which creates a DEM with
  10 times less cells along an axis.
  """
  for f in glob('d://test//Demmin//*.tif'):
    os.unlink(f)  
      
  files = createDEM(xmin,xmax,ymin,ymax,scale)
  
  if len(files)>1:
      outputFile = r'd://test//Demmin//out.tif'
      command = ['-o',outputFile]  
  
      for i in range(0,len(files)):
          command.append(files[i])
      print command    
      print 'Merging tiles'
      gdal_merge.main(command)
      print 'Translate to .map'
      translateMap('d://test//Demmin//out.tif','d://test//Demmin//merge_test.map')
      print 'Resample to low resolution'
      resample_map(xmin,xmax,ymin,ymax,cellLen,cellLen,'merge_test.map','resample_dem.map')
      translateBack('resample_dem.map','resample_map.tif')
 def create_composite(self, filename, *rasters):
     """Create a composite raster from a set of rasters.
     filename : The name of the file to save the composite raster to
     returns a raster.
     """
     outfile = os.path.join(self.path, filename)
     if os.path.exists(outfile):
         os.remove(outfile)
     gdal_merge_args = ["-co PHOTOMETRIC=RGB", "-o", os.path.join(self.path, filename), "-separate"] \
         + [os.path.join(raster.path, raster.raster_name) for raster in rasters]
     logger.debug("calling gdal_merge with '%s'", ' '.join(gdal_merge_args))
     gdal_merge.main(gdal_merge_args)
Exemple #9
0
def generate_all_bands(unprocessedBandPath, granule, outputPathSubdirectory):
    #granuleBandTemplate =  granule[4:11]+granule[19:-1]+'1_'
    outputPathSubdirectory = outputPathSubdirectory
    if not os.path.exists(outputPathSubdirectory + "/IMAGE_DATA"):
        os.makedirs(outputPathSubdirectory + "/IMAGE_DATA")
    results = []
    outPutTiff = granule[:-1] + '1' + '16Bit'
    #outPutVRT =  granule[:-1]+'1' + '16Bit-AllBands.vrt'
    outPutFullPath = outputPathSubdirectory + "/IMAGE_DATA/"
    #outPutFullVrt = outputPathSubdirectory + "/IMAGE_DATA/" + outPutVRT
    #inputPath = unprocessedBandPath + granuleBandTemplate
    inputPath = unprocessedBandPath + granule
    print(inputPath)

    bands = {
        "band_04": inputPath + "B04.jp2",
        "band_03": inputPath + "B03.jp2",
        "band_02": inputPath + "B02.jp2"
    }
    #cmd = ['gdalbuildvrt', '-resolution', 'user', '-tr' ,'20', '20', '-separate' ,outPutFullVrt]
    #cmd = ['gdalbuildvrt','-ot','Byte','-scale','0','10000','0','255',outPutFullVrt]

    for key, value in bands.items():
        outPutFullPath_image = outPutFullPath + outPutTiff + '_' + key + '.tif'
        cmd = [
            'gdal_translate', '-ot', 'Byte', '-scale', '0', '4096', '0', '255'
        ]
        cmd.append(value)
        cmd.append(outPutFullPath_image)
        results.append(outPutFullPath_image)
        my_file = Path(outPutFullPath_image)
        if not my_file.is_file():
            subprocess.call(cmd)
#      my_file = Path(outPutFullVrt)
#      if not my_file.is_file():
#        # file exists
    merged = outputPathSubdirectory + "/merged.tif"
    jpg_out = outputPathSubdirectory + "/merged.jpg"
    #params = ['',"-of", "GTiff", "-o", merged]
    params = [
        '', '-v', '-ot', 'Byte', '-separate', '-of', 'GTiff', '-co',
        'PHOTOMETRIC=RGB', '-o', merged
    ]

    for granule in results:
        params.append(granule)
    gdal_merge.main(params)

    subprocess.call([
        'gdal_translate', '-of', 'JPEG', '-scale', '-co', 'worldfile=yes',
        merged, jpg_out
    ])
def mergeRasters(inRasterFilePath, outMergedRasterFile, inRasterFileNamePatternToMatch):
    argv = []
    argv.append('')
    argv.append('-q')
    argv.append('-v')
    argv.append('-separate')    #this would add each raster as a separate band
    argv.append('-o')
    argv.append(outMergedRasterFile)

    # select all raster files that matches the filename pattern
    inRasterFile = os.path.join(inRasterFilePath, inRasterFileNamePatternToMatch)
    argv.append(inRasterFile)
    gdal_merge.main(argv)
def assemblyImages(dlg, images_list, output_file, data_type, no_data_value, epsg):

    # Utilisation de la commande gdal_merge pour fusioner les fichiers image source
    # Pour les parties couvertes par plusieurs images, l'image retenue sera la dernière mergée

    # Récupération de la résolution du raster d'entrée
    pixel_size_x, pixel_size_y = getPixelWidthXYImage(images_list[0])

    if 'Linux' in os_system :
        # Creation de la commande avec gdal_merge
        command = [ '',
                    '-o',
                    output_file,
                    '-of',
                    FORMAT_IMA,
                    '-a_nodata',
                    str(no_data_value),
                    "-ps",
                    str(pixel_size_x),
                    str(pixel_size_y)]

        for ima in images_list :
            command.append(ima)

        try:
            if gm == None :
                exit_code = os.system("gdal_merge " + command)
            else :
                gm.main(command)
        except:
            messErreur(dlg,u"Erreur de assemblage par gdal_merge de %s !!!"%(output_file))
            return None
    else :
        try:
            #processing.algorithmHelp("gdal:merge")
            #processing.runalg('gdalogr:merge', images_list, False, False, no_data_value, data_type, output_file)
            parameters = {"INPUT":images_list, "PCT":False, "SEPARATE":False, "NODATA_OUTPUT":no_data_value, "DATA_TYPE":data_type, "OUTPUT":output_file}
            processing.run('gdal:merge', parameters)
        except :
            messErreur(dlg, "Erreur d'assemblage par gdal:merge de %s !!!"%(output_file))
            return None

    # Si le fichier de sortie mergé a perdu sa projection on force la projection à la valeur par defaut
    prj = getProjectionImage(output_file)

    if (prj == None or prj == 0) and (epsg != 0):
        updateReferenceProjection(output_file, int(epsg))

    return
 def NaturalColor432(self, _outnameimg, _band4, _band3, _band2):
     print('!!!!!!! NaturalColor432 !!!!!!!')
     self.filename = 'B432_%s' % os.path.basename(_outnameimg)
     self.outdir = os.path.dirname(_outnameimg)
     self.band4 = _band4
     self.band3 = _band4
     self.band2 = _band2
     self.outname = os.path.join(self.outdir, self.filename)
     self.outiff = "GTiff"
     print(self.outname)
     sys.argv = [
         '', '-separate', '-of', self.outiff, '-o', self.outname,
         self.band4, self.band3, self.band2
     ]
     gdal_merge.main()
Exemple #13
0
def merge_grids(files,
                outfilename='merged.tif',
                nodata_value='-9999',
                working_dir='/media/rmsare/data/fixed_merged_data/'):
    # TODO: fix gdal_merge argv
    #sys.argv = ['-o', outfilename, '-init', nodata_value, '-a_nodata', nodata_value] + filenames
    #print("Merging:")
    #print([f.filename.split('/')[-1] for f in files])

    # TODO: add file parameter, don't hard-code ArcInfo grid file
    sys.argv = [f.filename + '/w001001.adf' for f in files]
    gdal_merge.main()

    for f in files:
        f.times_processed += 1
 def HealthyVegetation562(self, _outnameimg, _band5, _band6, _band2):
     print('!!!!!!! HealthyVegetation562 !!!!!!!')
     self.filename = 'B562_%s' % os.path.basename(_outnameimg)
     self.outdir = os.path.dirname(_outnameimg)
     self.band6 = _band6
     self.band5 = _band5
     self.band2 = _band2
     self.outname = os.path.join(self.outdir, self.filename)
     self.outiff = "GTiff"
     print(self.outname)
     sys.argv = [
         '', '-separate', '-of', self.outiff, '-o', self.outname,
         self.band5, self.band6, self.band2
     ]
     gdal_merge.main()
 def ColorInfraredVegetation543(self, _outnameimg, _band5, _band4, _band3):
     print('!!!!!!! ColorInfraredVegetation543 !!!!!!!')
     self.filename = 'B543_%s' % os.path.basename(_outnameimg)
     self.outdir = os.path.dirname(_outnameimg)
     self.band5 = _band5
     self.band4 = _band4
     self.band3 = _band3
     self.outname = os.path.join(self.outdir, self.filename)
     self.outiff = "GTiff"
     print(self.outname)
     sys.argv = [
         '', '-separate', '-of', self.outiff, '-o', self.outname,
         self.band5, self.band4, self.band3
     ]
     gdal_merge.main()
 def Agriculture652(self, _outnameimg, _band6, _band5, _band2):
     print('!!!!!!! Agriculture652 !!!!!!!')
     self.filename = 'B652_%s' % os.path.basename(_outnameimg)
     self.outdir = os.path.dirname(_outnameimg)
     self.band5 = _band5
     self.band6 = _band6
     self.band2 = _band2
     self.outname = os.path.join(self.outdir, self.filename)
     self.outiff = "GTiff"
     print(self.outname)
     #sys.argv = ['-pct','-separate','-of',self.outiff,'-o',self.outname,self.band6,self.band5,self.band2]
     sys.argv = [
         '', '-separate', '-of', self.outiff, '-o', self.outname,
         self.band6, self.band5, self.band2
     ]
     gdal_merge.main()
Exemple #17
0
def merge_dir(dir_input):
    logging.debug("Merging {}".format(dir_input))
    # HACK: for some reason output tiles were both being called 'probability'
    import importlib
    importlib.reload(gr)
    TILE_SIZE = str(1024)
    file_tmp = dir_input + '_tmp.tif'
    file_out = dir_input + '.tif'
    file_int = dir_input + '_int.tif'
    co = list(
        itertools.chain.from_iterable(
            map(lambda x: ['-co', x], CREATION_OPTIONS)))
    files = []
    for region in os.listdir(dir_input):
        dir_region = os.path.join(dir_input, region)
        files = files + [
            os.path.join(dir_region, x)
            for x in sorted(os.listdir(dir_region)) if x.endswith('.tif')
        ]
    gm.main(['', '-n', '0', '-a_nodata', '0'] + co + ['-o', file_tmp] + files)
    #gm.main(['', '-n', '0', '-a_nodata', '0', '-co', 'COMPRESS=DEFLATE', '-co', 'ZLEVEL=9', '-co', 'TILED=YES', '-o', file_tmp] + files)
    shutil.move(file_tmp, file_out)
    logging.debug("Calculating...")
    gdal_calc.Calc(A=file_out,
                   outfile=file_tmp,
                   calc='A*100',
                   NoDataValue=0,
                   type='Byte',
                   creation_options=CREATION_OPTIONS,
                   quiet=True)
    shutil.move(file_tmp, file_int)
    dir_tile = os.path.join(dir_input, 'tiled')
    if os.path.exists(dir_tile):
        logging.debug('Removing {}'.format(dir_tile))
        shutil.rmtree(dir_tile)
    import subprocess
    file_cr = dir_input + '_cr.tif'
    logging.debug("Applying symbology...")
    subprocess.run(
        'gdaldem color-relief {} /FireGUARD/FireSTARR/col.txt {} -alpha -co COMPRESS=LZW -co TILED=YES'
        .format(file_int, file_cr),
        shell=True)
    dir_tile = common.ensure_dir(dir_tile)
    subprocess.run(
        'python /usr/local/bin/gdal2tiles.py -a 0 -z 5-12 {} {} --processes={}'
        .format(file_cr, dir_tile, os.cpu_count()),
        shell=True)
Exemple #18
0
def merge_rasters(grid_refs, raster_paths):
    print('Finding multiple raster files...')
    # Get file name for storage of raster files
    merged_filename_txt = name_raster_file(grid_refs, filetype='txt')
    # Store the location of all the raster files to be merged
    merged_raster_path_txt = path.join(path_to_raster_data,
                                       merged_filename_txt)
    rasterfiles_to_txt(grid_refs, raster_paths, merged_raster_path_txt)
    # Get file name for the merged raster file
    merged_filename_adf = name_raster_file(grid_refs, filetype='adf')
    # Merge the raster files; gdal_merge parses args starting at 1
    print('Merging multiple raster files...')
    merged_raster_path_adf = path.join(path_to_raster_data,
                                       merged_filename_adf)
    gdal_merge.main([
        '', '-o', merged_raster_path_adf, '-v', '--optfile',
        merged_raster_path_txt
    ])
Exemple #19
0
def band_merge(inputimg, outputimg):

    L = []

    L.append('')

    L.extend(
        ['-o', outputimg, '-of', 'GTiff', '-n', '-9999', '-a_nodata', '-9999'])

    L.append('-separate')

    for bands in inputimg:

        L.append(bands)

    sys.argv = L

    gdal_merge.main()
def assemblyImages(dlg, images_list, output_file, data_type, no_data_value,
                   epsg):

    # Utilisation de la commande gdal_merge pour fusioner les fichiers image source
    # Pour les parties couvertes par plusieurs images, l'image retenue sera la dernière mergée
    """
    try:
        processing.runalg('gdalogr:merge', images_list, False, False, no_data_value, data_type, output_file)
    except:
        messErreur(dlg,u"Erreur de assemblage par gdalogr:merge de %s !!!"%(output_file))
        return None
    """

    # Récupération de la résolution du raster d'entrée
    pixel_size_x, pixel_size_y = getPixelWidthXYImage(images_list[0])

    # Creation de la commande avec gdal_merge
    command = [
        '', '-o', output_file, '-of', FORMAT_IMA, '-a_nodata',
        str(no_data_value), "-ps",
        str(pixel_size_x),
        str(pixel_size_y)
    ]

    for ima in images_list:
        command.append(ima)

    try:
        gm.main(command)
    except:
        messErreur(
            dlg,
            u"Erreur de assemblage par gdal_merge de %s !!!" % (output_file))
        return None

    # Si le fichier de sortie mergé a perdu sa projection on force la projection à la valeur par defaut
    prj = getProjectionImage(output_file)

    if (prj == None or prj == 0) and (epsg != 0):
        updateReferenceProjection(output_file, int(epsg))

    return
Exemple #21
0
def merge_4_bands(parcel_id, crop, out_tif_folder_base):
    # convert parcel_id to a string that can be used as filename
    parcel_id_as_filename = batch_utils.convert_string_to_filename(parcel_id)
    chip_folder = str(parcel_id_as_filename) + '_' + crop

    out_tif_folder = out_tif_folder_base + "/" + chip_folder
    downloaded_band04_files_pattern = out_tif_folder + "/*/*.B04.tif"
    downloaded_band04_files = glob(downloaded_band04_files_pattern)

    out_merge_folder = out_tif_folder + "_merged_4bands"
    if not os.path.exists(out_merge_folder):
        os.makedirs(out_merge_folder)

    for downloaded_band04_file in downloaded_band04_files:
        band04_file_base = os.path.basename(downloaded_band04_file)
        band_file_path = os.path.dirname(downloaded_band04_file)
        tile_name = band04_file_base.split(".")[0]

        #get acquisition date from tile name
        acq_date_full = tile_name.split("_")[2]
        acq_date = acq_date_full[0:4] + "-" + acq_date_full[
            4:6] + "-" + acq_date_full[6:8]

        #     check if the other bands are also available for this tile
        if os.path.isfile(band_file_path + "/" + tile_name + ".B02.tif") and \
            os.path.isfile(band_file_path + "/" + tile_name + ".B03.tif") and \
            os.path.isfile(band_file_path + "/" + tile_name + ".B08.tif"):
            #we can merge these bands
            out_merge = out_merge_folder + "/" + tile_name + ".tif"
            print(out_merge)
            if not os.path.isfile(out_merge):
                # e:\MS\ES\Catalunia2019\raster\chips\343130\2019-05-01\B08.tif
                band02 = band_file_path + "/" + tile_name + ".B02.tif"
                band03 = band_file_path + "/" + tile_name + ".B03.tif"
                band04 = band_file_path + "/" + tile_name + ".B04.tif"
                band08 = band_file_path + "/" + tile_name + ".B08.tif"

                gm.main([
                    '', '-o', out_merge, '-ot', 'Int16', '-separate', band02,
                    band03, band04, band08
                ])
Exemple #22
0
def controller_for_mosaic(work_dir, input_tif_list, output_tif):
    #work_dir 拼接功能工作目录,定义这个后可以基于此调用相对路径
    #input_tif_list 输入的tif数据,List类型
    #output_tif 输出拼接后的tif数据
    #示例:
    #input_tif_list = []
    #input_tif_list.append('taihu/mosaic/0407RVUB2-3-4-8.tif')
    #input_tif_list.append('taihu/mosaic/0407RVVB2-3-4-8.tif')
    #controller_for_mosaic('/opt/amyz_test',  input_tif_list,'taihu/mosaic/merge5.tif')
    #以上示例为在/opt/amyz_test工作目录下,拼接taihu/mosaic/0407RVUB2-3-4-8.tif与taihu/mosaic/0407RVVB2-3-4-8.tif
    #生成taihu/mosaic/merge5.tif数据

    os.chdir(work_dir)
    input_all_list=[];
    input_all_list.append('')
    input_all_list.append('-o')
    input_all_list.append(output_tif)

    for input_tif in input_tif_list:
        input_all_list.append(input_tif)
    gm.main(input_all_list)
def autofmask(dirname):
    os.chdir(dirname)
    MTLfile = glob(os.path.join(dirname, '*MTL.TXT'))
    refname=os.path.join(dirname,'ref.img')
    themalname=os.path.join(dirname,'thermal.img')
    srcReflist=os.path.join(dirname,'L*_B[1,2,3,4,5,7].TIF')
    srcReflist=glob(srcReflist)
    srcThemal=os.path.join(dirname,'L*_B6.TIF')
    srcThemal=glob(srcThemal)
    anglesname=os.path.join(dirname,'angles.img')
    toaname=os.path.join(dirname,'toa.img')
    # 合并文件
    refMergeArgv = ['', '-separate', '-of', 'HFA', '-co', 'COMPRESSED=YES', '-o', refname]
    refMergeArgv.extend(srcReflist)
    themalMergeArgv = ['', '-separate', '-of', 'HFA', '-co', 'COMPRESSED=YES', '-o',themalname]
    themalMergeArgv.extend(srcThemal)
    if not os.path.exists(refname):
        gdal_merge.main(refMergeArgv)
    else:
        print('跳过组合多光谱')
    if not os.path.exists(themalname):
        gdal_merge.main(themalMergeArgv)
    else:
        print('跳过组合热红外')
    # 生成角度文件
    # 读取文件信息
    MTLfile = MTLfile[0]
    mtlInfo = fmask.config.readMTLFile(MTLfile)
    if not os.path.exists(anglesname):
        imgInfo = fileinfo.ImageInfo(refname)
        corners = landsatangles.findImgCorners(refname, imgInfo)
        nadirLine = landsatangles.findNadirLine(corners)
        extentSunAngles = landsatangles.sunAnglesForExtent(imgInfo, mtlInfo)
        satAzimuth = landsatangles.satAzLeftRight(nadirLine)
        landsatangles.makeAnglesImage(refname, anglesname, nadirLine, extentSunAngles, satAzimuth, imgInfo)
    # 生成辅助临时文件:反射率
    if not os.path.exists(toaname):
        fmask.landsatTOA.makeTOAReflectance(refname, MTLfile, anglesname, toaname)
    print("begin this")
    LandsatFmaskRoutine(MTLfile)
Exemple #24
0
    def merge(self):
        l,t,r,b,level = self.l,self.t,self.r,self.b, self.levels
        tms = srsweb.GlobalMercator() 
        rs,re,cs,ce = tms.calcRowColByLatLon(l,t,r,b, level) 
        total_pix_w, total_pix_h = (ce-cs+1)*256, (re-rs+1)*256
        out_w, out_h = self.width, self.height
        fcnt_col, fcnt_row = int(math.ceil(total_pix_w*1.0/out_w)), int(math.ceil(total_pix_h*1.0/out_h))
        ftile_list = [[] for i in range(fcnt_row*fcnt_col)]
        for row in range(rs, re+1):
            _row = (row-rs)*256/out_h
            for col in range(cs, ce+1):
                _col = (col-cs)*256/out_w
                _idx = fcnt_col * _row + _col
                ftile_list[_idx].append((row, col))

        out_dir = self.out
        tmp_dir = os.path.join(out_dir, TEMP_DIR)
        cmd_mgr = os.path.join(cm.app_path(), 'gdal_merge.py')
        ext = 'tif' if self.file_format.lower()=='tif' else 'img'

        for i in range(len(ftile_list)):
            tile_list = ftile_list[i]
            _out_file = os.path.join(out_dir, "level%d_%d.%s" % (level, i, ext))
            _in_files = []

            for (row,col) in tile_list:
                _path = os.path.join(tmp_dir, GOOGLE_TILE_LOCAL_NAME % (level,row,col))
                if os.path.isfile(_path):
                    _in_files.append('%s' % _path)

            if not _in_files:
                logger.info("没有找到要拼接的文件.")
                return None
            cmd_out_file = '%s' % _out_file
            cmd_in_files = ' '.join(_in_files)
            cmd = '%s -o %s %s' % (cmd_mgr, cmd_out_file, cmd_in_files)
            #print cmd
            argv = cmd.split()
            gdal_merge.main(argv) 
            logger.info("第%d个文件拼接完成, %s" % (i+1,cmd_out_file))
Exemple #25
0
    def _merge_srtm(self):
        temporary_dem = os.path.join(self.outputdir, 'temp.tif')
        cut_dem = os.path.join(self.outputdir, '_latlon.tif')
        source_dems = os.path.join(self.outputdir, 'srtm*.tif')

        if not os.path.isdir(os.path.join(self.outputdir)):
            raise Exception(
                "Output folder does not exist! Tip: run retrieve_strm()")

        gdal_merge.main(argv=['dummy', '-o', temporary_dem, source_dems])

        # this is the final lat lon map
        self._cutMap(temporary_dem, cut_dem)

        # remove all redundant files
        self._removeFiles(os.path.join(self.outputdir, 'srtm*.tif'))
        self._removeFiles(os.path.join(self.outputdir, 'srtm*.hdr'))
        self._removeFiles(os.path.join(self.outputdir, 'srtm*.tfw'))
        os.unlink(os.path.join(self.outputdir, 'readme.txt'))
        os.unlink(os.path.join(self.outputdir, 'temp.tif'))

        self.merged_srtm_file = cut_dem
def merge_geotiffs(input_filenames, output_filename, nodata_value=None):
    if type(input_filenames) is str:
        input_filenames = [input_filenames]

    output_filename_option = [GDAL_MERGE_OUTPUT_FLAG, output_filename]

    #gdal_merge_command = [PYTHON_COMMAND, GDAL_MERGE_SCRIPT]

    gdal_merge_args = output_filename_option

    if nodata_value:
        nodata_option = [GDAL_MERGE_NODATA_FLAG, str(nodata_value)]
        gdal_merge_args += nodata_option

    gdal_merge_args += input_filenames

    #gdal_merge_command += gdal_merge_args
    #print("running command: %s" % ' '.join(gdal_merge_command))
    #call(gdal_merge_command)

    gdal_merge_argv = ['gdal_merge'] + gdal_merge_args

    gdal_merge_argv = [str(arg) for arg in gdal_merge_argv]

    print('')
    print('running command: %s' % ' '.join(gdal_merge_argv))
    print('')

    gc.collect()

    try:
        gdal_merge.main(gdal_merge_argv)
    except MemoryError:
        print('')
        print('gdal_merge ran out of memory')
        print('')

    gc.collect()
Exemple #27
0
def merge_files(outputPath, index, tile):
    """
    This is basically a wrapper around gdal_merge.py which is executed as a
    Python module.
    """

    # Delete the stacked raster file if that exists already
    stackedRaster = "%s/%s/%s/%s" % (outputPath, index, tile, "%s.tif" % (index))
    if os.path.exists(stackedRaster):
        os.remove(stackedRaster)
    tifs = []
    # Put the command line options to a Python list
    options = ["gdal_merge.py", "-o", os.path.abspath(stackedRaster), "-of", "GTiff", "-separate"]
    for root, dirs, files in os.walk("%s/%s/%s" % (outputPath, index, tile)):
        for tif in files:
            matchObj = re.search('.*%s.*tif$' % tile, tif)
            if matchObj is not None:
                tifs.append("%s/%s/%s/%s" % (outputPath, index, tile, matchObj.group(0)))
    # Sort the raster files according to their file name
    tifs.sort()
    options.extend(tifs)
    # Call gdal_merge to stack the raster files
    gdal_merge.main(options)
Exemple #28
0
# python 3
# user_path_input = input("Enter the path to {satellite} files root folder: ".format(satellite=sat))

assert os.path.exists(user_path_input), "I did not find the file at, " + str(user_path_input)
root_folder = user_path_input
print('Path found, looking for {satellite} bands...'.format(satellite=sat))

for dirpath, dirnames, filenames in os.walk(root_folder):
    for dirname in dirnames:
        dir_path = os.path.join(dirpath, dirname)
        if sat == 'Sentinel-2':
            if any(re.match(r'.*B\d{2}\.jp2', f) for f in os.listdir(dir_path)):
                id_0 = dirname
                id_1 = os.path.basename(os.path.dirname(dir_path))
                id = id_1.split('_')[-1] + '_' + id_0
                out_file_name = id + '.tif'
                print(dir_path, id, out_file_name)
                bands = [os.path.join(dir_path, band + '.jp2') for band in ['B02', 'B03', 'B04']]
                print('Merging ' + dir_path)
                gdal_merge.main(['', '-separate', '-o', os.path.join(dir_path, out_file_name), bands[-1], bands[-2], bands[-3]])
        elif sat == 'Landsat-8':
            if any(re.match(r'LC08.*B\d{1,2}\.TIF', f) for f in os.listdir(dir_path)):
                id_0 = dirname
                sat_id, path_row, date = id_0.split('_')[0], id_0.split('_')[2], id_0.split('_')[3]
                id = '_'.join((sat_id, path_row, date))
                band_ids = ['B2', 'B3', 'B4']
                bands = [os.path.join(dir_path, id_0 + '_' + band + '.TIF') for band in band_ids]
                out_file_name = id + '_' + ''.join(list(reversed(band_ids))).replace('B', '') + '_merged' + '.tif'
                print(dir_path, id, out_file_name)
                print('Merging ' + dir_path)
                gdal_merge.main(['', '-separate', '-o', os.path.join(dir_path, out_file_name), bands[-1], bands[-2], bands[-3]])
Exemple #29
0
def render(qgs, sol, extension):
    #QgsApplication.setPrefixPath('/usr/bin/qgis', True)
    qgs.setPrefixPath('/usr/bin/qgis', True)
    #qgs = QgsApplication([], False)
    #qgs.initQgis()
    extension = str(extension)
    print('the extension is:', extension)

    #sol = solution.Solution(int(extension))

    #first read through to determine certain values present in the file
    maxVal = 0
    minVal = 0
    mindx = 100
    mindy = 100

    dt = datetime.datetime.now().strftime('%m-%d_%H-%M')

    directory = extension + ' ' + dt + '/'
    if not os.path.exists(directory):
        os.makedirs(directory)

    fnBase = 'qraster.tif'
    driver = gdal.GetDriverByName('GTiff')
    rData = {}
    hData = {}  #header data - [amr, mx, my, xlow, mx*dx, ylow+my*dy, my*dy]
    amr = {}  #amr levels tying level to file/grid number

    for state in sol.states:
        #set dict with grid num as key to empty list
        gn = state.patch.patch_index
        rData[gn] = []
        hData[gn] = []

        rValues = []
        hValues = []

        hData[gn].append(state.patch.level)  #AMR level

        hData[gn].append(state.patch.dimensions[0].num_cells)  #mx
        hData[gn].append(state.patch.dimensions[1].num_cells)  #my

        hData[gn].append(state.patch.dimensions[0].lower)  #xlow
        hData[gn].append(state.patch.dimensions[1].upper)  #yhigh

        hData[gn].append(state.patch.dimensions[0].delta)  #dx
        hData[gn].append(state.patch.dimensions[1].delta)  #dy
        if state.patch.dimensions[0].delta < mindx:
            mindx = state.patch.dimensions[
                0].delta  #smallest dx across all patches
        if state.patch.dimensions[1].delta < mindy:
            mindy = state.patch.dimensions[
                1].delta  #smallest dy across all patches

        for i in range(state.patch.dimensions[0].num_cells):
            values = []
            for j in range(state.patch.dimensions[1].num_cells):
                if state.q[0, i, j] != 0:  #check if depth is 0
                    values.append(state.q[3, i, j])
                    if state.q[3, i, j] > maxVal:  #largest data value
                        maxVal = state.q[3, i, j]
                    elif state.q[3, i, j] < minVal:  #smallest data value
                        minVal = state.q[3, i, j]
                elif state.q[0, i, j] == 0:
                    values.append(None)  #append None type if depth is 0

            rValues.append(values)

        rData[gn] = rValues

    for key, items in rData.items():
        amrVal = hData[key][0]  #amr level
        xMax = hData[key][1]  #mx header
        yMax = hData[key][2]  #my header
        xlow = hData[key][3]  #xlow header
        yhigh = hData[key][4]  #yhigh header
        width = hData[key][5]  #dx headers
        height = hData[key][6]  #dy headers

        fArray = rData[key]

        fName = directory + str(key) + fnBase
        fTemp = directory + str(key) + 'temp' + fnBase
        fNameFin = directory + str(key) + 'fin' + fnBase

        if amrVal in amr.keys():
            amr[amrVal].append(fName)

        else:
            amr[amrVal] = []
            amr[amrVal].append(fName)

        ds = driver.Create(fName,
                           xsize=xMax,
                           ysize=yMax,
                           bands=1,
                           eType=gdal.GDT_Float32)
        if ds is None:
            return
        ndv = -100  #The designated no data value
        band = ds.GetRasterBand(1).ReadAsArray()

        #v = 0
        try:
            for i in range(yMax):  #row then column
                for j in range(xMax):
                    if fArray[j][i] is None:
                        band[yMax - i - 1][
                            j] = ndv  #set value to no data, thus making it transparent
                    else:
                        band[yMax - i - 1][j] = fArray[j][i]

                    ds.GetRasterBand(1).WriteArray(band)
                    #v += 1

            geot = [xlow, width, 0, yhigh, 0, -1 * height]
            ds.SetGeoTransform(geot)
            ds = None

            #reset color map
            rlayer = QgsRasterLayer(fName)
            provider = rlayer.dataProvider()
            provider.setNoDataValue(1, ndv)
            extent = rlayer.extent()
            stats = provider.bandStatistics(1, QgsRasterBandStats.All, extent,
                                            0)
            pipe = QgsRasterPipe()

            width, height = rlayer.width(), rlayer.height()

            fcn = QgsColorRampShader()
            fcn.setColorRampType(QgsColorRampShader.Interpolated)

            midVal = (maxVal + minVal) / 2

            lst = [
                QgsColorRampShader.ColorRampItem(minVal, QColor(0, 0, 255)),
                QgsColorRampShader.ColorRampItem(midVal, QColor(0, 255, 255)),
                QgsColorRampShader.ColorRampItem(maxVal, QColor(255, 0, 0))
            ]

            fcn.setColorRampItemList(lst)
            shader = QgsRasterShader()
            shader.setRasterShaderFunction(fcn)

            renderer = QgsSingleBandPseudoColorRenderer(
                rlayer.dataProvider(), 1, shader)
            rlayer.setRenderer(renderer)
            rlayer.triggerRepaint()

            pipe.set(provider.clone())
            pipe.set(renderer.clone())

            write = QgsRasterFileWriter(fNameFin)
            write.writeRaster(pipe, width, height, extent, rlayer.crs())

        except Exception as ex:
            if type(ex) == IndexError:
                print(ex)
                continue
            else:
                print(ex)
                print(fArray[0][1])
                print(fArray[j][i + 1])
                print(band[yMax - i - 1][j])

                break

    #Merge tifs into one final tif
    sys.path.append('/usr/bin/')
    import gdal_merge as gm

    orderedFiles = [
        '', '-o', directory + '0' + extension + '.tif', '-ps',
        str(mindx),
        str(mindy)
    ]

    for key, val in sorted(amr.items()):
        for i in val:
            f = i[:-11]
            orderedFiles.append(f + 'finqraster.tif')

    sys.argv = orderedFiles
    gm.main()

    print('\nThe rasterization is complete. The file name is',
          '0' + extension + '.tif')
    return
Exemple #30
0
# download files
baseurl = "http://iridl.ldeo.columbia.edu/SOURCES/.IRI/.Analyses/.SPI/.SPI-CAMSOPI_3-Month"
filelistingurl = baseurl + "/downloadsarcinfo.html?missing_value=-9999."
page = bs.BeautifulSoup(urllib2.urlopen(filelistingurl))
filelist = []
for tablerow in page.findAll("td"):
    linkobj = tablerow.find("a")
    if linkobj:
        url = baseurl + "/" + linkobj.get("href")
        filename = url.split("=")[-1] + ".asc"  # url ends with 'filename=...'
        print filename
        # write data
        with open(filename, "w") as writer:
            writer.write(urllib2.urlopen(url).read())
        # collect file list
        filelist.append(filename)

# merge using gdal (requires python gdal bindings)
# using via python instead of commandline because latter has char limit

# first prep args ala commandline style
curdir = os.path.abspath("")
outfile = os.path.abspath("spi3.tif")
args = [curdir, "-separate", "-o", outfile]
args.extend((os.path.abspath(filepath) for filepath in filelist))

# run it
import gdal_merge

gdal_merge.main(args)
Exemple #31
0
for i in cache_list:
    zip_ref = zipfile.ZipFile(i, 'r')
    # with zipfile.ZipFile(i, 'r') as file:
    zip_ref.extract(i[-14:-4] + '.tif', './cache/')
    zip_ref.close()
    filename_list.append(i[-14:-4] + '.tif')

# Creating a mosaic for case of more than one tile:
n_tiles = len(filename_list)
if n_tiles != 1:
    files_to_mosaic = []
    for i in filename_list:
        files_to_mosaic.append('cache/' + i)
    files_string = ' '.join(files_to_mosaic)
    command = " -o cache/sector.tif -of gtiff " + files_string
    gdal_merge.main(command.split(' '))
    data_file = 'cache/sector.tif'
else:
    data_file = 'cache/' + filename_list[0]

# Processing data:
# ----------------------------------------------------------------------------
# Open file GeoTIFF:

# data = gdal.Open(data_file)  # use GDAL to open the file and read as 2D array
# band = data.GetRasterBand(1)
# # getting NaN values:
# nodataval = band.GetNoDataValue()

# # Convert to a numpy array:
# data_array = data.ReadAsArray().astype(np.float)
Exemple #32
0
data_folder = './AwsData'

product_request = AwsProductRequest(product_id=product_id,
                                    data_folder=data_folder,
                                    safe_format=True)
product_request.save_data()

for i in [10, 20, 60]:
    path = 'A:\lab4\S2A_MSIL2A_20190821T085601_N0213_R007_T36UUA_20190821T115206.SAFE\GRANULE\L2A_T36UUA_A021740_20190821T085815\IMG_DATA\R{}m\\'.format(
        i)
    in1 = glob.glob(str(path) + ('*B02_{}m.jp2').format(i))
    in2 = glob.glob(str(path) + '*B03_{}m.jp2'.format(i))
    in3 = glob.glob(str(path) + '*B04_{}m.jp2'.format(i))
    in4 = glob.glob(str(path) + '*_*_*8*_{}m.jp2'.format(i))
    gm.main([
        '', '-separate', '-o', 'AR{}.tif'.format(i), in1[0], in2[0], in3[0],
        in4[0]
    ])

for i in [1, 2, 6]:
    gdal.Warp('12AR{}0.tif'.format(i),
              '1AR{}0.tif'.format(i),
              dstSRS="EPSG:4326")

gdal.Warp('final.tif', [
    'proek_AR10.tif', 'proek_AR20.tif', 'proek_AR60.tif', 'proek_BR10.tif',
    'proek_BR20.tif', 'proek_BR60.tif'
])

gdal.Warp('WrapedImg.tif',
          'AllInOne.tif',
          format='GTiff',
Exemple #33
0
def trueColour(argv):

    # TODO - use gdal.GeneralCmdLineProcessor( argv ) instead
    inputdirectory = sys.argv[1]
    outputdirectory = sys.argv[2]
    platformname = sys.argv[3]
    producttype = sys.argv[4]
    if len(sys.argv) == 6:
        aoiwkt = sys.argv[5]

    if platformname == 'SENTINEL2':
        # find SAFE directory
        for file in os.listdir(inputdirectory):
            filePath = inputdirectory + file
            print filePath
            if os.path.isdir(filePath) and filePath.endswith(".SAFE"):
                safeDirectory = filePath
                break
        if safeDirectory is None:
            sys.exit("Could not find SAFE directory")
        # retrieve the tiff file now
        descriptorPath = safeDirectory + "/MTD_MSIL1C.xml"
        print "Opening dataset " + descriptorPath
        ds = gdal.Open(descriptorPath)

        footprintGeometryWKT = ds.GetMetadataItem("FOOTPRINT")
        print "FOOTPRINT: " + footprintGeometryWKT

        subdatasets = ds.GetMetadata_List("SUBDATASETS")
        for subdataset in subdatasets:
            if ":TCI:" in subdataset:
                tciFileName = subdataset.split("=")[1]
                break
        if tciFileName is None:
            sys.exit("Could not find true colour image in subdatasets")

        print "TCI file name " + tciFileName

        tciDs = gdal.Open(tciFileName)

        fileList = tciDs.GetFileList()

        for fileName in fileList:
            if fileName.endswith("_TCI.jp2"):
                jp2FilePath = fileName

        if jp2FilePath is None:
            sys.exit("Could not find jp2 file for true colour image")

        # no delete method available
        #ds.delete();
        #tciDs.delete();

        tciDs = gdal.Open(jp2FilePath)

        intersectionWKT = calculateCutline(footprintGeometryWKT, aoiwkt)

        csvFileDirectory = inputdirectory
        csvFilePath = createCutline(csvFileDirectory, intersectionWKT)

        warpedDs = executeWarp(tciDs, csvFilePath)

        tempFilePath = outputdirectory + '/temp.tiff'
        ds = gdal.Translate(tempFilePath, warpedDs, format='GTiff')
        executeOverviews(ds)
        outputFilePath = outputdirectory + '/productOutput.tiff'
        ds = gdal.Translate(outputFilePath, ds, format='GTiff')

        # a bit of clean up
        os.remove(tempFilePath)

        if intersectionWKT is None:
            productFootprintWKT = footprintGeometryWKT
        else:
            productFootprintWKT = intersectionWKT

        # now write the output json file
        product = {
            "name": "True colour image",
            "productType": "COVERAGE",
            "SRS": "EPSG:4326",
            "envelopCoordinatesWKT": productFootprintWKT,
            "filePath": outputFilePath,
            "description": "True colour image from Sentinel2 platform"
        }
        writeOutput(outputdirectory,
                    "True colour generation using geocento process", [product])

        print "True Colour script finished for SENTINEL2 product(s) at " + inputdirectory

    elif platformname == 'LANDSAT8':
        bandFiles = []
        # get the required bands
        for file in os.listdir(inputdirectory):
            filePath = inputdirectory + file
            print filePath
            if filePath.upper().endswith("_B2.TIF") or \
                    filePath.upper().endswith("_B3.TIF") or \
                    filePath.upper().endswith("_B4.TIF"):
                bandFiles.append(filePath)
            elif filePath.upper().endswith("_B8.TIF"):
                band8FilePath = filePath

        if len(bandFiles) != 3 or band8FilePath is None:
            sys.exit("Missing bands in Landsat8 directory")

        # make sure the bands are arranged in the right order
        bandFiles = sorted(bandFiles, reverse=True)

        # now merge into one file
        mergeFilePath = outputdirectory + '/merge.tiff'
        sys.argv = ['/usr/bin/gdal_merge.py', '-separate', '-o', mergeFilePath]
        sys.argv.extend(bandFiles)
        print sys.argv
        gdal_merge.main()

        if not os.path.exists(mergeFilePath):
            sys.exit("Merge failed")

        # pan sharpen the image
        panSharpenFilePath = outputdirectory + '/pansharpen.tiff'
        sys.argv = [
            '/usr/bin/gdal_pansharpen.py', '-nodata', '0', band8FilePath,
            mergeFilePath, panSharpenFilePath
        ]
        print sys.argv
        gdal_pansharpen.main()

        if not os.path.exists(panSharpenFilePath):
            sys.exit("Pansharpen failed")

        # stretch the values
        ds = gdal.Open(panSharpenFilePath)
        footprintGeometryWKT = generic.getDatasetFootprint(ds)
        print "FOOTPRINT: " + footprintGeometryWKT

        intersectionWKT = generic.calculateCutline(footprintGeometryWKT,
                                                   aoiwkt)

        csvFileDirectory = outputdirectory
        csvFilePath = generic.createCutline(csvFileDirectory, intersectionWKT)

        warpedDs = executeWarp(ds, csvFilePath)

        tempFilePath = outputdirectory + '/temp.tiff'
        scaleParams = generic.getScaleParams(warpedDs, 255)
        print scaleParams
        ds = gdal.Translate(tempFilePath,
                            warpedDs,
                            scaleParams=scaleParams,
                            exponents=[0.5, 0.5, 0.5],
                            format='GTiff')
        executeOverviews(ds)
        outputFilePath = outputdirectory + '/productOutput.tiff'
        ds = gdal.Translate(outputFilePath,
                            ds,
                            outputType=gdal.GDT_Byte,
                            format='GTiff')

        # a bit of clean up
        os.remove(mergeFilePath)
        os.remove(panSharpenFilePath)
        os.remove(tempFilePath)

        if intersectionWKT is None:
            productFootprintWKT = footprintGeometryWKT
        else:
            productFootprintWKT = intersectionWKT

        # now write the output json file
        product = {
            "name": "True colour image",
            "productType": "COVERAGE",
            "SRS": "EPSG:4326",
            "envelopCoordinatesWKT": productFootprintWKT,
            "filePath": outputFilePath,
            "description": "True colour image from Landsat 8 platform"
        }
        writeOutput(outputdirectory,
                    "True colour generation using geocento process", [product])

        print "True Colour script finished for LANDSAT8 STANDARD product(s) at " + inputdirectory

    elif platformname == 'LANDSAT7':
        bandFiles = []
        # get the required bands
        for file in os.listdir(inputdirectory):
            filePath = inputdirectory + file
            print filePath
            if filePath.upper().endswith("_B1.TIF") or \
                    filePath.upper().endswith("_B2.TIF") or \
                    filePath.upper().endswith("_B3.TIF"):
                bandFiles.append(filePath)
            elif filePath.upper().endswith("_B8.TIF"):
                band8FilePath = filePath

        if len(bandFiles) != 3 or band8FilePath is None:
            sys.exit("Missing bands in Landsat8 directory")

        # make sure the bands are arranged in the right order
        bandFiles = sorted(bandFiles, reverse=True)

        # now merge into one file
        mergeFilePath = outputdirectory + '/merge.tiff'
        sys.argv = ['/usr/bin/gdal_merge.py', '-separate', '-o', mergeFilePath]
        sys.argv.extend(bandFiles)
        print sys.argv
        gdal_merge.main()

        if not os.path.exists(mergeFilePath):
            sys.exit("Merge failed")

        # pan sharpen the image
        panSharpenFilePath = outputdirectory + '/pansharpen.tiff'
        sys.argv = [
            '/usr/bin/gdal_pansharpen.py', '-nodata', '0', band8FilePath,
            mergeFilePath, panSharpenFilePath
        ]
        print sys.argv
        gdal_pansharpen.main()

        if not os.path.exists(panSharpenFilePath):
            sys.exit("Pansharpen failed")

        # stretch the values
        ds = gdal.Open(panSharpenFilePath)
        footprintGeometryWKT = generic.getDatasetFootprint(ds)
        print "FOOTPRINT: " + footprintGeometryWKT

        intersectionWKT = generic.calculateCutline(footprintGeometryWKT,
                                                   aoiwkt)

        csvFileDirectory = outputdirectory
        csvFilePath = generic.createCutline(csvFileDirectory, intersectionWKT)

        warpedDs = executeWarp(ds, csvFilePath)

        tempFilePath = outputdirectory + '/temp.tiff'
        scaleParams = generic.getScaleParams(warpedDs, 255)
        print scaleParams
        ds = gdal.Translate(tempFilePath,
                            warpedDs,
                            scaleParams=scaleParams,
                            exponents=[0.5, 0.5, 0.5],
                            format='GTiff')
        executeOverviews(ds)
        outputFilePath = outputdirectory + '/productOutput.tiff'
        ds = gdal.Translate(outputFilePath,
                            ds,
                            outputType=gdal.GDT_Byte,
                            noData=0,
                            format='GTiff')

        # a bit of clean up
        os.remove(mergeFilePath)
        os.remove(panSharpenFilePath)
        os.remove(tempFilePath)

        if intersectionWKT is None:
            productFootprintWKT = footprintGeometryWKT
        else:
            productFootprintWKT = intersectionWKT

        # now write the output json file
        product = {
            "name": "True colour image",
            "productType": "COVERAGE",
            "SRS": "EPSG:4326",
            "envelopCoordinatesWKT": productFootprintWKT,
            "filePath": outputFilePath,
            "description": "True colour image from Landsat 7 platform"
        }
        writeOutput(outputdirectory,
                    "True colour generation using geocento process", [product])

        print "True Colour script finished for LANDSAT7 STANDARD product(s) at " + inputdirectory

    elif platformname == 'TRIPPLESAT' or platformname == 'DEIMOS-2':
        # get the tif files
        tifFiles = findFiles(inputdirectory, 'tif')

        if len(tifFiles) == 0:
            sys.exit("Missing TIFF file in directory")

        tifFile = tifFiles[0]

        # create overlays and extract footprint
        ds = gdal.Open(tifFile)
        # reproject to 4326
        tempFilePath = outputdirectory + '/temp.tiff'
        ds = gdal.Warp(tempFilePath, ds, format='GTiff', dstSRS='EPSG:4326')
        productFootprintWKT = generic.getDatasetFootprint(ds)
        print "FOOTPRINT: " + productFootprintWKT
        executeOverviews(ds)
        outputFilePath = outputdirectory + '/productOutput.tiff'
        ds = gdal.Translate(outputFilePath,
                            ds,
                            bandList=[1, 2, 3],
                            outputType=gdal.GDT_Byte,
                            noData=0,
                            format='GTiff')

        # now write the output json file
        product = {
            "name": "True colour image",
            "productType": "COVERAGE",
            "SRS": "EPSG:4326",
            "envelopCoordinatesWKT": productFootprintWKT,
            "filePath": outputFilePath,
            "description": "True colour image from TrippleSat platform"
        }

        writeOutput(outputdirectory,
                    "True colour generation using geocento process", [product])

        print "True Colour script finished for TRIPPLE SAT product(s) at " + inputdirectory

    elif platformname == 'PLANETSCOPE':
        # get the tif files
        tifFiles = findFiles(inputdirectory, 'tif')

        if len(tifFiles) == 0:
            sys.exit("Missing TIFF file in directory")

        for file in tifFiles:
            if not file.lower().endswith("_udm_clip.tif"):
                tifFile = file
                break
        # check if visual or analytics
        analytic = "Analytic" in tifFile

        # create overlays and extract footprint
        ds = gdal.Open(tifFile)
        # reproject to 4326
        tempFilePath = outputdirectory + '/temp.tiff'
        outputFilePath = outputdirectory + '/productOutput.tiff'
        # reduce bands if needed
        ds = gdal.Translate('temp', ds, format='MEM', bandList=[1, 2, 3])
        # if analytics we need to do some scaling for contrasts
        if analytic:
            print "Analytic product, modifying contrast for visualisation"
            scaleParams = generic.getScaleParams(ds, 255)
            print "Scale params "
            print(scaleParams)
            ds = gdal.Translate('temp',
                                ds,
                                format='MEM',
                                scaleParams=scaleParams,
                                exponents=[0.5, 0.5, 0.5])
        ds = gdal.Warp('temp',
                       ds,
                       format='GTiff',
                       srcNodata=0,
                       dstAlpha=True,
                       dstSRS='EPSG:4326')
        productFootprintWKT = generic.getDatasetFootprint(ds)
        print "FOOTPRINT: " + productFootprintWKT
        ds = gdal.Translate(tempFilePath,
                            ds,
                            outputType=gdal.GDT_Byte,
                            format='GTiff')
        executeOverviews(ds)
        ds = gdal.Translate(outputFilePath, ds, format='GTiff')

        # now write the output json file
        product = {
            "name": "True colour image",
            "productType": "COVERAGE",
            "SRS": "EPSG:4326",
            "envelopCoordinatesWKT": productFootprintWKT,
            "filePath": outputFilePath,
            "description": "True colour image from TrippleSat platform"
        }

        writeOutput(outputdirectory,
                    "True colour generation using geocento process", [product])

        print "True Colour script finished for TRIPPLE SAT product(s) at " + inputdirectory

    elif platformname == 'SENTINEL1':
        pass
    else:
        sys.exit("Unknown platform " + platformname)
def main():
    
    tic = timeit.default_timer()
    
    ##########################################################################
    ######################### Section paramètres #############################
    ##########################################################################
    
    # Localisation du fichier de formes contenant la zone d'étude
    inputFeature = r"C:\Users\user\Documents\zone_etude.shp"
    
    # Localisation de la carte bathymétrique
    bathymetrie = r"C:\Users\user\Documents\DEM_AlguesEauTerre_BG_FGA_L5_L7_L8_S2_10m_IDLMsud_1985_2019_Sud1.tif"
    
    # Localisation de la carte d'occupation du sol
    classes_sol = r"C:\Users\user\Documents\occ_sol.tif"
    
    # Chemin vers le fichier recevant les images téléchargées
    filePath = r"D:\img_S2\IDLM"
    
    # Chemin vers le fichier contenant les images S2 corrigées (L2A)
    locL2AFile = r"D:\img_S2\IDLM\L2A_S2_georef"
    
    # Chemin vers le fichier contenant les images S2 corrigées et clippées à la ZE.
    locL2AClip = r"D:\img_S2\IDLM\L2A_S2_georef\img_clip"
    
    # Pourcentage du couvert nuageux à appliquer pour le filtrage des images
    cloudPerc = 10
    
    # Mois de début et de fin à prendre en compte pour l'étude.
    startMonth = 6
    endMonth = 10
    
    # Bandes spectrale des images Sentinel-2 à utiliser dans l'analyse.
    bands = ['B02','B03','B04','B08','SCL']
    
    # Valeur de noData à attribuer aux rasters.
    noData = -9999
    
    # Information pour la connection au serveur de Copernicus pour le téléchargement des images.
    username = '******'
    password = '******'
    
    # Localisation de l'exécutable de Sen2Cor
    sen2cor = r"C:\Users\user\.snap\auxdata\Sen2Cor-02.08.00-win64\L2A_Process.bat"
    
    # Localisation du fichier des paramètres pour la correction atmosphérique de sen2cor.
    atmcorr_param = r"C:\Users\user\Documents\L2A_GIPP.xml"
    
    # Chemin d'accès au répertoire OSGeo.
    OSGeoPath = 'C:\\OSGeo4W64\\bin'
    
    ##########################################################################
    ################# Commandes pour exécuter le code ########################
    ##########################################################################
    

    # Fait la lecture du fichier de forme et l'ouvre
    ds = readFeature(inputFeature)
        
    lyr = ds.GetLayer('feature')
       
    for item in lyr:
        geom = item.GetGeometryRef()
        wkt = geom.ExportToWkt()
        extent = geom.GetEnvelope()
        geomLayer.append(wkt)
  
    # Active la couche pour obtenir le SRID de la couche
    proj = getProjection(lyr)

    # Reprojeter le fichier de formes
    wkt = reprojeter(geomLayer,proj,4326)
      
    # Convertie la géométrie WKT vers GeoJSON pour être compatible avec EarthEngine
    coord = getStudyAreaCoordinates(wkt)

    # Initialise la librairie earthengine. Un compte google earthengine doit être connecté à l'ordinateur.
    ee.Initialize()
    
    
    # Création du polygone délimitant la zone d'étude
    study_area = ee.Geometry.Polygon(coord, 'EPSG:4326').bounds()
           
    # Obtenir l'année en cours et calculer 24 mois avant l'année en cours
    actual_year = datetime.datetime.now().year
    start_year = actual_year - 2  
    
    #Identification du capteur à utiliser
    s2 = ee.ImageCollection('COPERNICUS/S2')
    
    # define your collection
    time_series_s2 = s2.filter(ee.Filter.calendarRange(start_year,actual_year,'year'))\
    .filter(ee.Filter.calendarRange(startMonth,endMonth,'month'))\
    .filterBounds(study_area)\
    .filter(ee.Filter.lte('CLOUDY_PIXEL_PERCENTAGE',cloudPerc))


    nb_img_s2 = time_series_s2.size().getInfo()
    print('Nombre d\'images recouvrant la zone d\'étude: ',nb_img_s2)
    
     # Tous les noms des images sont stockés.
    images_names_s2 = [item.get('id') for item in time_series_s2.getInfo().get('features')]
    
   
    # Enregistrer les noms des images 'offline' lors du téléchargement pour y
    # accéder lorsqu'elles seront mises en ligne
    imgOffline = []
    
     
    # Si des images n'ont pas été téléchargées en raison de leur status
    # 'offline', le serveur les remet en ligne maximum 24 heures après la demande.
        
    for i in os.listdir(filePath):
        
        if i.endswith('.json'):
            
            imgOnline = importJson(filePath+'\\'+i)
            
            count = 0
            for j in imgOnline: 
                count += 1
                
                print("\nImage %d sur %d \n" % (count,len(imgOnline)))
                print("Téléchargement de l'image :", j)
                
                # Activation de la fonction permettant de télécharger les images S2 
                # provenant des serveurs de Copernicus
                online = download_S2(j,filePath,username,password)
                
                if online == 'false':
          
                    imgOffline.append(j)
                    print("Image non téléchargée, car non disponible :", j)
                
                else:
                    print("Téléchargement de l'image complété\n")
            
                toc = timeit.default_timer()
                print('\nTemps écoulé :', (toc-tic)/60)
                    
                print("\nTéléchargement de l'ensemble des images S2 complété\n")
             
            
            for l in os.listdir(filePath):
                                
                for k in imgOnline:
                
                    if k in l:
                        
                        # Décompresser les images S2 téléchargées.
                        unzip(filePath+'\\'+l,filePath)
            
            # Activation de la correction atmosphérique pour les images S2
            atmCorrS2(sen2cor,filePath,locL2AFile,atmcorr_param,bands)
            
            
            if imgOffline:
                
                # Exporation du nom des images non téléchargées
                export2json(imgOffline,'imgoffline',filePath)
            
                print('\n\nDes images n\'ont pas été téléchargées. Status: offline')    
                
                print('\n\n\n*****************************************************\
                      \nInterruption du script, car l\'ensemble des images n\'est pas téléchargé. Veuillez relancer le script lorsque ces images seront disponibles.\
                          \n*****************************************************')
                
                # Quitter le script en raison d'images non téléchargées
                sys.exit()
            
            # Si toutes les images ont été téléchargées, le fichier .json contenant les noms des
            # images à télécharger est supprimé.
            if not imgOffline:
                os.remove(filePath+'\\'+i)
                
            
        else:
   
            # Téléchargement des images selon leur ID.
            count = 0
            for i in images_names_s2:
            
                # Accéder au Product_ID de l'image
                img = ee.Image(i).get('PRODUCT_ID').getInfo()
                print("\nImage %d sur %d \n" % (count,int(nb_img_s2)))
        
                # Activation de la fonction permettant de télécharger les images S2 
                # provenant des serveurs de Copernicus
                print("Téléchargement de l'image :", img)
        
                online = download_S2(img,filePath,username,password)
        
                # Ajoute le nom des images hors ligne, donc pas téléchargées pour y 
                # accéder plus tard lorsqu'elles seront en ligne
                
                if online == 'false':
                  
                    imgOffline.append(img)
                    print("Image non téléchargée, car non disponible :", img)
                
                else:
                    print("Téléchargement de l'image complété\n")
            
                toc = timeit.default_timer()
                print('\nTemps écoulé :', (toc-tic)/60)


            print("\nTéléchargement de l'ensemble des images S2 complété\n")
            
             
            # Décompresser les images S2 téléchargées.
            for f in os.listdir(filePath):
                if f.endswith('.zip'):
                    unzip(filePath+'\\'+f,filePath)
            
            toc = timeit.default_timer()
            print('\nTemps écoulé :', (toc-tic)/60)
            
            # Activation de la correction atmosphérique pour les images S2
            atmCorrS2(sen2cor,filePath,locL2AFile,atmcorr_param,bands)
                
                 
            toc = timeit.default_timer()
            print('\nTemps écoulé :', (toc-tic)/60) 
    
            
            if imgOffline:
                # Exporation du nom des images non téléchargées
                export2json(imgOffline,'imgoffline',filePath)
                
                print('\n\nDes images n\'ont pas été téléchargées. Status: offline')   
                
                print('\n\n\n*****************************************************\
                      \nInterruption du script, car l\'ensemble des images n\'est pas téléchargé. Veuillez relancer le script lorsque ces images seront disponibles.\
                          \n*****************************************************')
                # Quitter le script en raison d'images non téléchargées
                sys.exit()
                
    
    ### Clipper les images à l'étendue de la zone d'étude ###
    
    # Déclarer la résolution de l'image
    pixelSize = 10
    # Valeur de l'étendue
    minX = extent[0]
    minY = extent[2]
    maxX = extent[1]
    maxY = extent[3]
    bounds = (minX,minY,maxX,maxY)
    
    for i in os.listdir(locL2AFile):
        if i.endswith('.tif'):
            
            # Déclaration des options
            options = {
                'xRes':pixelSize,
                'yRes':pixelSize,
                'resampleAlg':None,
                'outputBounds':bounds,
                # 'cutlineDSName':inputFeature,
                # 'cropToCutline':True,
                # 'srcNodata':0,
                'dstNodata':0,
                'multithread':True
                        }
            # Appliquer le ré-échantillonage pour les image de résolution de 20m.
            if '20m' in i:
                options['resampleAlg'] = 'near'
                                
            gdal.Warp("%s/%s_clip.tif" % (locL2AClip,i[:-4]),"%s/%s" % (locL2AFile,i),**options)
         
            print('Clip de l\'image %s à la zone d\'étude' % i)
            
            
        
    # Détecter les images des mêmes dates pour les fusionner ensemble
    for i in sorted(os.listdir(locL2AClip)):
        
        if i[7:26] in name_img_dict:       
            name_img_dict[i[7:26]] += 1

        else:
            name_img_dict[i[7:26]] = 1



    # Déterminer si des images de même date existent. Si oui, extraire le nom des
    # images qui serviront à réaliser les futurs traitements. Si non, conversion des 
    # images en matrice numpy.
    for x,y in sorted(name_img_dict.items()):
            
        # Si une date image est comptée plus d'une fois, le nom des images à
        # mosaïquer est enregistré dans le dictionnaires img_to_merge.
        if y > 1:
            
            for i in sorted(os.listdir(locL2AClip)):
                
                if x in i and i.endswith('.tif'):
                    if x not in img_to_merge:
                        img_to_merge[x] = [i]

                    else:
                        img_to_merge[x].append(i)
        
        # Si une date d'image est présente une seule fois, l'image est
        # directement ajoutée au dictionnaire array_s2_clip.
        else:
            for i in sorted(os.listdir(locL2AClip)):
                
                if x in i and i.endswith('.tif'):
                    
                    # Lecture de l'image Sentinel-2 clippée
                    X, Y, geotransform,proj,array = readRaster("%s/%s" % (locL2AClip,i),[1])
                                        
                    # Ajout des différentes bandes spectrales utilisées pour
                    # chacune des dates.
                    if i[7:22] not in array_s2_clip:
                        array_s2_clip[i[7:22]] = [array/10000]
                
                    else:
                        array_s2_clip[i[7:22]].append(array/10000)                 
                        
                    del X,Y,geotransform,proj,array
 
    # Importer le script MERGE de GDAL.
    sys.path.append(OSGeoPath)
    import gdal_merge as gm
    
    # Si des images d'une même date sont détectées, appliquer gdal_merge pour
    # fusionner les tuiles de la zone d'étude.
    if img_to_merge:
        
        # Recherche des noms de fichier dans le dictionnaire.
        for x,y in img_to_merge.items():
            
            file = []
            for d in y:
                file.append("%s/%s" %(locL2AClip,d))
            
            # Activation du script pour exécuter le mosaïquage. Les images S2 ont comme nodata la valeur 0.
            gm.main(['','-o', "%s/%s_merge.tif" % (locL2AClip,y[1]), '-ot', 'Int16', '-n','0','-a_nodata',noData,*file])
            
            # Lecture des images mosaïquées
            X,Y,geotransform,proj,array = readRaster("%s/%s" % (locL2AClip,y[1]+'_merge.tif'),[1])
            
            # Stocker les images sous forme de liste
            if y[1][7:22] not in array_s2_clip:
                array_s2_clip[y[1][7:22]] = [array/10000]

            else:
                array_s2_clip[y[1][7:22]].append(array/10000)
          
            del X,Y,geotransform,proj,array
        
        print('Mosaïquage terminé')
        
    else:
        print('Aucune image n\'a été mosaïquée')
        
        
    #################################
    ### Créer la série temporelle ###
    #################################
    
    # Lire les rasters représentant le MNT et la couverture au sol
    x,y,geotransform,proj,bathym = readRaster(bathymetrie,[1])
    x,y,geotransform,proj,occ_sol = readRaster(classes_sol,[1])
    
    bathym = bathym[0]
    occ_sol = occ_sol[0]
    
    img_date = []
    
    # Contient l'index des images pour chacune des années.
    index_year = {}
    
    indice_prof = []
    
    indice_prof_mask = []
    
    limite_VeryShallow = [0.5,2.]
    limite_Shallow = [4.,6.]
    limite_Deep = [10.,12.]
    
    
    # Dictionnaire pour extraire les valeurs de l'indice de profondeur pour
    # chacun des pixels et pour chacune des zones de profondeur
    indice_VShal = {}
    indice_Shal = {}
    indice_Deep = {}

    # Liste pour extraire les valeurs de profondeur pour chacune des zones de profondeur.
    pixel_VShal = []
    pixel_Shal = []
    pixel_Deep = []
    
    # Index des bandes bleue,verte,rouge, pif et la couche de masque de nuages
    blue = 0
    green = 1
    red = 2
    nir = 3
    idx_mask_cloud = len(bands)-1
    
    
    countVShal = 0
    countShal = 0
    countDeep = 0
    
    # Compteur pour identifier l'index des images dans le dictionnaire
    count_index = 0
    
    # Masquer les zones nuageuses à l'aide de la bande 'SCL' de S2.
    for i,j in sorted(array_s2_clip.items()):
        
        for b in range(0,len(bands),1):

            # Ne prend pas en compte la dernière bande qui est le masque 'SCL'. Cette condition peut être changée en fonction de la localisation réelle de la bande de masque ou tout simplement si elle existe.
            if b < idx_mask_cloud:
                
                
                ### Appliquer un filtre gaussien aux images pour réduire le bruit.
                array_s2_clip[i][b][0] = gaussian_filter(j[b][0],2)
                
        print('Filtre gaussien appliqué aux images: ')
        
        # Localisation des informations dans le nom de l'image
        year = i[0:4]
        month = i[4:6]
        day = i[6:8]
        
        # Conversion des dates en année décimale.
        img_date.append(decimalYear(day,month,year))         
        
        # Identification des index des images pour chacune des années.
        if year not in index_year:
            index_year[year] = [count_index]
            
        else:
            index_year[year].append(count_index)
        
        count_index += 1
        
        
        print('Calcul de l\'indice de profondeur pour la date: ', year+month+day)
        
        ### Calcul de l'indice de profondeur
        DEPTH = depth(j[blue][0],j[green][0])
        
        indice_prof.append(DEPTH)
        
        ### Appliquer un filtre adaptatif Wiener ###
        DEPTH3 = wiener(DEPTH,3)
        DEPTH33 = wiener(DEPTH3,3)
        DEPTH335 = wiener(DEPTH33,5)
        
        del DEPTH,DEPTH3,DEPTH33
        
        print('Filtre adaptatif de type Wiener pour la date: ', year+month+day)
        
         
        ### Masquer les nuages présents dans les images ###
        
        mask_band = j[idx_mask_cloud][0].copy()
        
        mask_band[np.where((mask_band == 3) |
                            # mask_band == 7 |
                            (mask_band == 8) |
                            (mask_band == 9) |
                            (mask_band == 10))] = noData
        
        mask_band[np.where(mask_band != noData)] = 1
        
        mask_band[np.where(mask_band == noData)] = np.nan
        
        depth_mask = DEPTH335 * mask_band
                
        indice_prof_mask.append(depth_mask)
        
        del DEPTH335
        
        print('Masquage des nuages pour la date: ', year+month+day)
        
        
        
        for k in range(0,np.size(bathym,0),1):           
        
            for l in range(0,np.size(bathym,1),1):
                
                
                
                # Extraction des pixel de l'indice de profondeur représentant
                # les zones 'Shallow'.
                if occ_sol[k][l] == 20 and bathym[k][l] > limite_Shallow[0] and bathym[k][l] < limite_Shallow[1]:
  
                    if count_index == 1:
                        pixel_Shal.append(bathym[k][l])
                    
    
  
                    if countShal not in indice_Shal:
                        
                        indice_Shal[countShal] = [depth_mask[k][l]]
                    
                        countShal += 1
                    
                    else:
                        indice_Shal[countShal].append(depth_mask[k][l])
                        
                        countShal += 1
                    
                    
                # Extraction des pixel de l'indice de profondeur représentant
                # les zones 'Deep'.    
                if occ_sol[k][l] == 20 and bathym[k][l] > limite_Deep[0] and bathym[k][l] < limite_Deep[1]:
  
                    if count_index == 1:
                        pixel_Deep.append(bathym[k][l])
  
                    if countDeep not in indice_Deep:
                        
                        indice_Deep[countDeep] = [depth_mask[k][l]]
                    
                        countDeep += 1
                    
                    else:
                        indice_Deep[countDeep].append(depth_mask[k][l])
                        
                        countDeep += 1
                
                
                # Extraction des pixel de l'indice de profondeur représentant
                # les zones 'Very Shallow'.
                if occ_sol[k][l] == 20 and bathym[k][l] > limite_VeryShallow[0] and bathym[k][l] < limite_VeryShallow[1]:
  
                    if count_index == 1:
                        pixel_VShal.append(bathym[k][l])         
  
    
                    if countVShal not in indice_VShal:
                        
                        indice_VShal[countVShal] = [depth_mask[k][l]]
                    
                        countVShal += 1
                    
                    else:
                        indice_VShal[countVShal].append(depth_mask[k][l])
                        
                        countVShal += 1
                    

        # Remet les compteurs de pixels à 0.
        countVShal,countShal,countDeep = [0,0,0]
    
    # Conversion de la liste de dates en matrice numpy.
    img_date = np.array(img_date)   
        
    # Conversion des listes en matrice numpy.
    indice_prof = np.array(indice_prof)
    indice_prof_mask = np.array(indice_prof_mask)
    
    # Conversion des listes des pixels des différentes zones de profondeur en matrice numpy.
    pixel_Shal = np.array(pixel_Shal)
    pixel_Deep = np.array(pixel_Deep)
    pixel_VShal = np.array(pixel_VShal)
      
    # Moyenne de la valeur de l'indice de profondeur pour chaque pixel et pour chacune des classes de profondeur.
    moyPixDeep = [np.nanmean(j) for i,j in  indice_Deep.items()]
    moyPixShal = [np.nanmean(j) for i,j in  indice_Shal.items()]
    moyPixVShal = [np.nanmean(j) for i,j in indice_VShal.items()]

 
    # Valeur de l'indice de profondeur de référence pour chacune des classes de profondeur.
    refIndiceDeep = []
    refIndiceShal1 = []
    refIndiceShal2 = []
    refIndiceVShal = []
    refIndiceVShal2 = []

    # Permet de calculer le percentile des indices de profondeur pour chacune des zones.    
    for i in range(0,len(indice_prof_mask),1):
        
        valueDeep = []
        valueShal = []
        valueVShal = []

        for j in indice_Deep.values():
            
            valueDeep.append(j[i])
       
        for j in indice_Shal.values():
        
            valueShal.append(j[i])
            
        for j in indice_VShal.values():
         
            valueVShal.append(j[i])
                    
        refIndiceDeep.append(np.nanpercentile(valueDeep,25))
        refIndiceShal1.append(np.nanpercentile(valueShal,75))
        refIndiceShal2.append(np.nanpercentile(valueShal,20))
        refIndiceVShal.append(np.nanpercentile(valueVShal,60))
        refIndiceVShal2.append(np.nanpercentile(valueVShal,60))
        # del valueDeep,valueShal,valueVShal
    
    # Conversion des listes en matrice numpy
    refIndiceDeep = np.array(refIndiceDeep)
    refIndiceShal1 = np.array(refIndiceShal1)
    refIndiceShal2 = np.array(refIndiceShal2)
    refIndiceVShal = np.array(refIndiceVShal)
    refIndiceVShal2 = np.array(refIndiceVShal2)
    
    
    # Percentile des pixels convertir en valeur de profondeur
    refPixelDeep = np.percentile(pixel_Deep,75)
    refPixelShal1 = np.percentile(pixel_Shal,25)
    refPixelShal2 = np.percentile(pixel_Shal,60)
    refPixelVShal = np.percentile(pixel_VShal,20)
    refPixelVShal2 = np.percentile(pixel_VShal,20)
    
  
    ### Création de la matrice représentant les changements de bathyémtrie ###
    
    
    
    # Matrice des changements de bathymétrie. Les pixels enregistrés représentent
    # la pente des changements. Plus la pente est forte, plus il y a un changement.
   
    j = np.arange(0,37,1)
        
    pente_bathym = np.full_like(bathym,np.nan)
    
    # prof_calc1 = np.full_like(bathym,np.nan)
    
    
    n = img_date[0:len(j)]
    p = []
    
    for k in range(0,np.size(bathym,0),1):           
        
            for l in range(0,np.size(bathym,1),1):
                
                            
                # Extraction des pixel de l'indice de profondeur représentant
                # les zones 'Shallow' à 'Deep'.
                if occ_sol[k][l] == 20 and bathym[k][l] > 4. and bathym[k][l] < 12.:
                
                    # Extraction d'un pixel pour l'ensemble des dates.
                    x = indice_prof_mask[j[0]:j[-1]+1,k,l]
                    
                    # Calculer les changements pour les zones Deep.
                    norm_indice = (1-0.9)*((x-refIndiceDeep[j[0]:j[-1]+1])/(refIndiceShal1[j[0]:j[-1]+1]-refIndiceDeep[j[0]:j[-1]+1]))+0.9
                    
                    # Calculer changement pour les zones Shallow
                    # norm_indice = (1-0.9)*((x-refIndiceVShal2[j[0]:j[-1]+1])/(refIndiceShal2[j[0]:j[-1]+1]-refIndiceVShal2[j[0]:j[-1]+1]))+0.9
                    
                    
                    # Retire les données aberrantes             
                    # for z in range(0,len(norm_indice),1):
                        
                    #     if norm_indice[z] < 0.82 or norm_indice[z] > 1.1:
                    #         norm_indice[z] = np.nan
                    
                    idx = np.isfinite(n) & np.isfinite(norm_indice)
                    
                    pente_indice = np.polynomial.polynomial.Polynomial.fit(n[idx[0:len(idx)]],norm_indice[idx[0:len(idx)]],1)
                    
                    p_indice = pente_indice.convert().coef
                    
                    
                    # Conversion des indices de profondeur normalisés en profondeur réelle pour les zone Deep.
                    pente_metre = np.polynomial.polynomial.Polynomial.fit([0.9,1.0],[refPixelDeep,refPixelShal1],1)
                    
                    # Conversion des indices de profondeur normalisés en profondeur réelle pour les zone Shallow.
                    # pente_metre = np.polynomial.polynomial.Polynomial.fit([0.9,1.0],[refPixelShal2,refPixelVShal2],1)
                    
                    # Obtenir l'origine et la pente de la fonction
                    p_metre = pente_metre.convert().coef
                    p_metre = [p_metre[1],p_metre[0]]
                    
                    # Créer la fonction de conversion
                    p = np.poly1d(p_metre)
                    
                    # Liste contenant les profondeurs calculées
                    prof_calc = []
                    
                    for a in norm_indice:
                        prof_calc.append(p(a))
                        
                        
                    prof_calc = np.array(prof_calc)
                    
                    idx2 = np.isfinite(n) & np.isfinite(prof_calc)
                    
                    # Liste contenant la bathymétrie pour une date particulière
                    # prof_calc1[k,l] = p(norm_indice[3])
                    
                    
                    try:
                        pente = np.polynomial.polynomial.Polynomial.fit(n[idx2[0:len(idx2)]],prof_calc[idx2[0:len(idx2)]],1)
                        
                        pente_coef = pente.convert().coef
                        
                        pente_bathym[k,l] = pente_coef[1]
                        
                  
                    except:
                        pente_bathym[k,l] = np.nan
                        
                    
            
                    
                        
    
    writeRaster('changement_profondeur.tif',geotransform,proj,pente_bathym,noData)

    # writeRaster('profondeur.tif',geotransform,proj,prof_calc1,noData)
    
    toc = timeit.default_timer()
    
    print('\nTemps total de traitement: ', (toc-tic)/60)    
Exemple #35
0
            if '_Emissivity_Mean' in tif and tif.endswith('.tif'):
                DEM.append(os.path.join(folder, tif))

outputFile = r'ASTER2_emissivity_merged.tif'

command = []

command.insert(0, '')

command.extend(['-o', os.path.join(outputFoler, outputFile), '-of', 'GTiff',  '-a_nodata', '-9999', '-n', '-9999'])

command.extend(DEM) 

sys.argv = command

gdal_merge.main()
                
    
    

#sourFile = r'E:\Penghua\data\LST\Lybien-1\new_selected_data'
#
#for files in os.listdir(sourFile):  
#    
#    folder = os.path.join(sourFile, files)
#    
#    outputPath = os.path.join(folder, r'MOD11A1_LST.tif')
#    
#    L = []  
#    
#    L.insert(0,'')
Exemple #36
0
 def run(self):
     gdal_merge.main(["", "-separate", "-o", self.output_raster] +
                     sorted(list(self.input_rasters)))
Exemple #37
0
 def Merge(self, infile, outfile):
     sys.argv = [ infile[0], infile[1], infile[2], infile[3], '-o',outfile, '-a_nodata', '-999']
     gm.main()
Exemple #38
0
def do_fmask(mtl_file,
             filters_enabled,
             tmp_dir,
             min_cloud_size=0,
             cloud_prob_thresh=0.225,
             cloud_buffer_size=4,
             shadow_buffer_size=6,
             cirrus_prob_ratio=0.04,
             nir_fill_thresh=0.02,
             swir2_thresh=0.03,
             whiteness_thresh=0.7,
             swir2_water_test=0.03,
             nir_snow_thresh=0.11,
             green_snow_thresh=0.1):

    print("Fmask:")

    input_dir = os.path.dirname(mtl_file)

    # parser
    mtl_file_parse = mtl2dict(mtl_file)

    # get the landsat version
    landsat_version = int(mtl_file_parse['SPACECRAFT_ID'][-1])

    # set bands for reflective and thermal
    if landsat_version in [4, 5]:
        # get the reflective file names bands
        reflective_bands = [
            os.path.join(input_dir, mtl_file_parse['FILE_NAME_BAND_' + str(N)])
            for N in [1, 2, 3, 4, 5, 7]
        ]
        # get the thermal file names bands
        thermal_bands = [
            os.path.join(input_dir, mtl_file_parse['FILE_NAME_BAND_' + str(N)])
            for N in [6]
        ]

    # set bands for reflective and thermal
    if landsat_version == 7:
        # get the reflective file names bands
        reflective_bands = [
            os.path.join(input_dir, mtl_file_parse['FILE_NAME_BAND_' + str(N)])
            for N in [1, 2, 3, 4, 5, 7]
        ]
        # get the thermal file names bands
        thermal_bands = [
            os.path.join(input_dir,
                         mtl_file_parse['FILE_NAME_BAND_6_VCID_' + str(N)])
            for N in [1, 2]
        ]

    # set bands for reflective and thermal
    if landsat_version == 8:
        # get the reflective file names bands
        reflective_bands = [
            os.path.join(input_dir, mtl_file_parse['FILE_NAME_BAND_' + str(N)])
            for N in [1, 2, 3, 4, 5, 6, 7, 9]
        ]
        # get the thermal file names bands
        thermal_bands = [
            os.path.join(input_dir, mtl_file_parse['FILE_NAME_BAND_' + str(N)])
            for N in [10, 11]
        ]

    # set the prefer file name band for process
    reflective_bands = [
        get_prefer_name(file_path) for file_path in reflective_bands
    ]
    thermal_bands = [get_prefer_name(file_path) for file_path in thermal_bands]

    ########################################
    # reflective bands stack

    # tmp file for reflective bands stack
    reflective_stack_file = os.path.join(tmp_dir, "reflective_stack.tif")

    if not os.path.isfile(reflective_stack_file):
        gdal_merge.main(
            ["", "-separate", "-of", "GTiff", "-o", reflective_stack_file] +
            reflective_bands)

    ########################################
    # thermal bands stack

    # tmp file for reflective bands stack
    thermal_stack_file = os.path.join(tmp_dir, "thermal_stack.tif")

    if not os.path.isfile(thermal_stack_file):
        gdal_merge.main(
            ["", "-separate", "-of", "GTiff", "-o", thermal_stack_file] +
            thermal_bands)

    ########################################
    # estimates of per-pixel angles for sun
    # and satellite azimuth and zenith
    #
    # fmask_usgsLandsatMakeAnglesImage.py

    # tmp file for angles
    angles_file = os.path.join(tmp_dir, "angles.tif")

    mtlInfo = config.readMTLFile(mtl_file)

    imgInfo = fileinfo.ImageInfo(reflective_stack_file)
    corners = landsatangles.findImgCorners(reflective_stack_file, imgInfo)
    nadirLine = landsatangles.findNadirLine(corners)

    extentSunAngles = landsatangles.sunAnglesForExtent(imgInfo, mtlInfo)
    satAzimuth = landsatangles.satAzLeftRight(nadirLine)

    landsatangles.makeAnglesImage(reflective_stack_file, angles_file,
                                  nadirLine, extentSunAngles, satAzimuth,
                                  imgInfo)

    ########################################
    # saturation mask
    #
    # fmask_usgsLandsatSaturationMask.py

    # tmp file for angles
    saturationmask_file = os.path.join(tmp_dir, "saturationmask.tif")

    if landsat_version == 4:
        sensor = config.FMASK_LANDSAT47
    elif landsat_version == 5:
        sensor = config.FMASK_LANDSAT47
    elif landsat_version == 7:
        sensor = config.FMASK_LANDSAT47
    elif landsat_version == 8:
        sensor = config.FMASK_LANDSAT8

    # needed so the saturation function knows which
    # bands are visible etc.
    fmaskConfig = config.FmaskConfig(sensor)

    saturationcheck.makeSaturationMask(fmaskConfig, reflective_stack_file,
                                       saturationmask_file)

    ########################################
    # top of Atmosphere reflectance
    #
    # fmask_usgsLandsatTOA.py

    # tmp file for toa
    toa_file = os.path.join(tmp_dir, "toa.tif")

    landsatTOA.makeTOAReflectance(reflective_stack_file, mtl_file, angles_file,
                                  toa_file)

    ########################################
    # cloud mask
    #
    # fmask_usgsLandsatStacked.py

    # tmp file for cloud
    cloud_fmask_file = os.path.join(tmp_dir, "fmask.tif")

    # 1040nm thermal band should always be the first (or only) band in a
    # stack of Landsat thermal bands
    thermalInfo = config.readThermalInfoFromLandsatMTL(mtl_file)

    anglesInfo = config.AnglesFileInfo(angles_file, 3, angles_file, 2,
                                       angles_file, 1, angles_file, 0)

    if landsat_version == 4:
        sensor = config.FMASK_LANDSAT47
    elif landsat_version == 5:
        sensor = config.FMASK_LANDSAT47
    elif landsat_version == 7:
        sensor = config.FMASK_LANDSAT47
    elif landsat_version == 8:
        sensor = config.FMASK_LANDSAT8

    fmaskFilenames = config.FmaskFilenames()
    fmaskFilenames.setTOAReflectanceFile(toa_file)
    fmaskFilenames.setThermalFile(thermal_stack_file)
    fmaskFilenames.setOutputCloudMaskFile(cloud_fmask_file)
    fmaskFilenames.setSaturationMask(saturationmask_file)  # TODO: optional

    fmaskConfig = config.FmaskConfig(sensor)
    fmaskConfig.setThermalInfo(thermalInfo)
    fmaskConfig.setAnglesInfo(anglesInfo)
    fmaskConfig.setKeepIntermediates(False)
    fmaskConfig.setVerbose(False)
    fmaskConfig.setTempDir(tmp_dir)

    # Set the settings fmask filters from widget to FmaskConfig
    fmaskConfig.setMinCloudSize(min_cloud_size)
    fmaskConfig.setEqn17CloudProbThresh(cloud_prob_thresh)
    fmaskConfig.setCloudBufferSize(int(cloud_buffer_size))
    fmaskConfig.setShadowBufferSize(int(shadow_buffer_size))
    fmaskConfig.setCirrusProbRatio(cirrus_prob_ratio)
    fmaskConfig.setEqn19NIRFillThresh(nir_fill_thresh)
    fmaskConfig.setEqn1Swir2Thresh(swir2_thresh)
    fmaskConfig.setEqn2WhitenessThresh(whiteness_thresh)
    fmaskConfig.setEqn7Swir2Thresh(swir2_water_test)
    fmaskConfig.setEqn20NirSnowThresh(nir_snow_thresh)
    fmaskConfig.setEqn20GreenSnowThresh(green_snow_thresh)

    # set to 1 for all Fmask filters disabled
    if filters_enabled["Fmask Cloud"]:
        fmask.OUTCODE_CLOUD = 2
    else:
        fmask.OUTCODE_CLOUD = 1

    if filters_enabled["Fmask Shadow"]:
        fmask.OUTCODE_SHADOW = 3
    else:
        fmask.OUTCODE_SHADOW = 1

    if filters_enabled["Fmask Snow"]:
        fmask.OUTCODE_SNOW = 4
    else:
        fmask.OUTCODE_SNOW = 1

    if filters_enabled["Fmask Water"]:
        fmask.OUTCODE_WATER = 5
    else:
        fmask.OUTCODE_WATER = 1

    # process Fmask
    fmask.doFmask(fmaskFilenames, fmaskConfig)

    return cloud_fmask_file
Exemple #39
0
    def processAlgorithm(self, parameters, context, feedback):
        # the parameters previously defined must be incorporated into the algorithm
        # msot of them are incorporated as Strings. We need to modify the strings afterwards
        source = self.parameterAsString(parameters, self.INPUT, context)
        global path_exe
        if path_exe != '':
            pass
        else:
            path_exe = self.parameterAsString(parameters, self.PATH_EXE,
                                              context)
        cellsize = self.parameterAsString(parameters, self.CELLSIZE, context)
        clas = self.parameterAsString(parameters, self.CLASS, context)
        #c2a=self.parameterAsString(parameters, self.C2A,context)
        lx2 = self.parameterAsString(parameters, self.Lx2, context)
        out = self.parameterAsFileOutput(parameters, self.OUTPUT, context)

        if lx2 == None or lx2 == '':
            lx2 = '2'
        elif lx2 not in ['1', '2', None, '']:
            sys.exit(
                'Introduce a correct input for file size 1 for 1000x1000 2 for 2000x2000. You introduced {}'
                .format(lx2))

        feedback.pushInfo('LiDAR file size: {}'.format(lx2))

        if ' ' in [source, out] or '´' in [source, out
                                           ] or 'ñ' in [source, out]:
            sys.exit(
                '\n\n\nLa ruta de los archivos .laz o la ruta de salida contiene espacios o caracteres especiales.\n'
            )
        else:
            pass

        # requiered for gridmetrics
        minht = self.parameterAsString(parameters, self.MINHT, context)
        fcc = self.parameterAsString(parameters, self.FCC, context)
        clasg = self.parameterAsString(parameters, self.ClasG, context)
        pixel = self.parameterAsString(parameters, self.PIXEL, context)

        #csv2grid
        col = self.parameterAsString(parameters, self.COLUMN, context)
        # add the dominant height the canopy height and the ccanopy cover by default
        ho_p20_fcc = '49,36,27'
        col = ho_p20_fcc + col
        col = col.split(',')

        # vegetation index
        source_sen = self.parameterAsString(parameters, self.INPUT_SEN,
                                            context)

        # multiband

        monte = self.parameterAsString(parameters, self.MONTE, context)

        if os.path.basename(source) == 'INPUT' and os.path.basename(
                source_sen) == 'INPUT_SEN':
            sys.exit(
                'No input data. Please introduce LiDAR or Sentinel 2 data')

        # varaibles that obtaine the files needed en each process

        basename = os.path.basename(out).split('.dtm')[0]

        # path to folders
        o = os.path.dirname(out)
        feedback.pushInfo('source:{}'.format(source))
        feedback.pushInfo('source_sen:{}'.format(source_sen))

        # by defualt the non filled fields retrieve a path to a folder named after the class variables

        if os.path.basename(source_sen) == 'INPUT_SEN':
            #LiDAR data
            o_temp = os.path.join(o, 'temp_LiDAR_' + basename)
            o_raster = os.path.join(o_temp, 'Rasters_LiDAR_' + basename)
            o_metric = os.path.join(o_temp, 'Metrics_LiDAR_' + basename)
            o_MDE = os.path.join(o_temp, 'MDE_LiDAR_' + basename)
            o_final = os.path.join(o, 'Productos_finales_LiDAR_' + basename)
        elif os.path.basename(source) == 'INPUT':
            #Sentinel data
            o_temp = os.path.join(o, 'temp_Sentinel_' + basename)
            o_raster = os.path.join(o_temp, 'Rasters_Sentinel_' + basename)
            o_metric = os.path.join(o_temp, 'Metrics_Sentinel_' + basename)
            o_MDE = os.path.join(o_temp, 'MDE_Sentinel_' + basename)
            o_final = os.path.join(o, 'Productos_finales_Sentinel_' + basename)
        else:
            # Sentinel and LiDAR
            o_temp = os.path.join(o, 'temp_' + basename)
            o_raster = os.path.join(o_temp, 'Rasters_' + basename)
            o_metric = os.path.join(o_temp, 'Metrics_' + basename)
            o_MDE = os.path.join(o_temp, 'MDE_' + basename)
            o_final = os.path.join(o, 'Productos_finales_' + basename)

        # outputs paths
        o_MDE_ba = os.path.join(o_MDE, basename)
        o_metric_ba = os.path.join(o_metric, basename)
        o_raster_ba = os.path.join(o_raster, basename)
        o_final_ba = os.path.join(o_final, basename)

        # create folders to store the results
        folders = []
        for root, dirs, files in os.walk(o, topdown=False):
            for name in files:
                pass
                #print(os.path.join(root, name))
            for name in dirs:
                fold = (os.path.join(root, name))
                folders.append(fold)

        if o_temp in folders:
            pass
        else:
            os.mkdir(o_temp, 1)
        if o_raster in folders:
            pass
        else:
            os.mkdir(o_raster, 1)
        if o_MDE in folders:
            pass
        else:
            os.mkdir(o_MDE, 1)
        if o_metric in folders:
            pass
        else:
            os.mkdir(o_metric, 1)
        if o_final in folders:
            pass
        else:
            os.mkdir(o_final, 1)

        LAS_files_folder = os.path.dirname(source)  # LiDAR Files folder

        files = glob.glob(os.path.join(source, '*.laz'))

        if os.path.basename(source) == 'INPUT':
            feedback.pushInfo('\n\t\tUSING SENTINEL DATA\n')
            shutil.rmtree(o_temp)
        else:
            # set default values in case there is LiDAR data of the compulsory parameters for LiDAR processing
            if minht == '':
                minht = '2'
            else:
                pass
            if fcc == '':
                fcc = '2'
            else:
                pass
            if clasg == '':
                clasg = '2,3,4,5'  #gridmetrics classes
            else:
                pass
            if pixel == '':
                pixel = '20'  #gridmetrics processing
            else:
                pass
            if cellsize == '':
                cellsize = '2'  #MDE size
            else:
                pass
            if clas == '':
                clas = '2'  #MDE classes
            else:
                pass

            feedback.pushInfo(
                'LiDAR parameter values:\n · minht:{} \n · fcc:{}\n · GridMetrics classes:{}\n · GridMetrics pixelsize:{}\n · GridSurfaceCreate cellsize:{} \n · GridSurfaceCreate classes:{}'
                .format(minht, fcc, clasg, pixel, cellsize, clas))

            for i, file in enumerate(files):
                if feedback.isCanceled():
                    sys.exit('Process Canceled')

                # extract the file name
                nombre = os.path.basename(file)
                #reverse the string as the standar name is more stable at the end
                file = file[::-1]
                # get x and y
                y = file[16:20]
                x = file[21:24]
                # reverse the strings back to normal
                x = int(x[::-1])
                y = int(y[::-1])
                file = file[::-1]

                filename = os.path.join(
                    source,
                    str(x) + '-' + str(y) + "_lista_de_archivos.txt")
                #create the text files where the 9 .laz files will be stored and passed on to FUSION
                Txtfile = open(filename, 'w')

                if lx2 == '2':
                    # calculate inital coordiante where the iteration begins
                    c_ini = [x * 1000, y * 1000 - 2000]
                    c_fin = [x * 1000 + 2000, y * 1000]
                    # calculate buffer's inital  coordinate
                    c_ini_buf = [c_ini[0] - 200, c_ini[1] - 200]
                    # amount of cells (height and width) in the buffer 2400 m - 20 m of pixel size
                    # numero de celdas de alto y ancho del buffer 2400 metros -20 m de tamaño de pixel
                    t_pix = 2
                    W = 2400 - t_pix
                    H = 2400 - t_pix
                    comando_switchgrid = '/grid:' + str(
                        c_ini_buf[0]) + ',' + str(
                            c_ini_buf[1]) + ',' + str(W) + ',' + str(H)
                    comando_buffer = '/grid:' + str(c_ini_buf[0]) + ',' + str(
                        c_ini_buf[1]) + ',' + str(W) + ',' + str(H)
                    # obtain the files names that sorrounds the file "file" in the iteration. Next the MDE will be created.
                    files_list = [
                        str(x - 2) + '-' + str(y + 2),
                        str(x) + '-' + str(y + 2),
                        str(x + 2) + '-' + str(y - 2),
                        str(x - 2) + '-' + str(y),
                        str(x) + '-' + str(y),
                        str(x + 2) + '-' + str(y),
                        str(x - 2) + '-' + str(y - 2),
                        str(x) + '-' + str(y - 2),
                        str(x + 2) + '-' + str(y - 2)
                    ]

                else:
                    c_ini = [x * 1000, y * 1000 - 1000]
                    c_fin = [x * 1000 + 1000, y * 1000]
                    # calculate buffer's inital  coordinate
                    c_ini_buf = [c_ini[0] - 200, c_ini[1] - 200]

                    # amount of cells (height and width) in the buffer 2400 m - 20 m of pixel size
                    # numero de celdas de alto y ancho del buffer 2400 metros -20 m de tamaño de pixel
                    t_pix = 2
                    W = 1400 - t_pix
                    H = 1400 - t_pix
                    comando_switchgrid = '/grid:' + str(
                        c_ini_buf[0]) + ',' + str(
                            c_ini_buf[1]) + ',' + str(W) + ',' + str(H)
                    comando_buffer = '/grid:' + str(c_ini_buf[0]) + ',' + str(
                        c_ini_buf[1]) + ',' + str(W) + ',' + str(H)

                    # obtain the files names that sorrounds the file "file" in the iteration. Next the MDE will be created.
                    files_list = [
                        str(x - 1) + '-' + str(y + 1),
                        str(x) + '-' + str(y + 1),
                        str(x + 1) + '-' + str(y - 1),
                        str(x - 1) + '-' + str(y),
                        str(x) + '-' + str(y),
                        str(x + 1) + '-' + str(y),
                        str(x - 1) + '-' + str(y - 1),
                        str(x) + '-' + str(y - 1),
                        str(x + 1) + '-' + str(y - 1)
                    ]

                root = file.split('.laz')[0]
                no_ext_fn = root.split('_')
                tail = '_' + no_ext_fn[-1]

                #get common part of the file name
                common_name_part = "_".join(no_ext_fn[:-2]) + "_"

                for item in files_list:
                    arch = common_name_part + item + tail + '.laz'
                    Txtfile.write('{}\n'.format(
                        arch))  #Escribir en el fichero de comandos
                Txtfile.close()

                #define the folders where the files and .exes are

                dtm_filename = o_MDE_ba + '_' + str(x) + '-' + str(y) + '.dtm'
                ascii_filename = o_MDE_ba + '_' + str(x) + '-' + str(
                    y) + '.asc'
                commands = [os.path.join(path_exe, 'GridSurfaceCreate.exe')]
                string = path_exe + '\\GridSurfaceCreate.exe'

                #Switches
                commands.append('/verbose')
                string = string + ' ' + comando_buffer
                commands.append(comando_buffer)

                if str(clas).strip() != '':
                    commands.append('/class:' + str(clas))
                    string = string + ' /class:' + str(clas) + ' '

                #Parameters needed:
                commands.append(os.path.join(source, dtm_filename))
                commands.append(str(cellsize))
                commands.append('m')
                commands.append('m')
                commands.append('0')
                commands.append('0')
                commands.append('0')
                commands.append('0')
                commands.append(
                    filename)  #os.path.join(LAS_files_folder, source))
                string = string + ' ' + dtm_filename + ' ' + cellsize + ' m m 0 0 0 0 ' + filename

                feedback.pushInfo(
                    '\ncomando GridSurfaceCreate: {}'.format(string))

                proc = subprocess.run(string, shell=True)

                #ClipDTM [switches] InputDTM OutputDTM MinX MinY MaxX MaxY

                commands = [os.path.join(path_exe, 'ClipDTM.exe')]
                commands.append(dtm_filename)

                clip_filename = o_MDE_ba + '_' + str(x) + '-' + str(
                    y) + '_clip' + '.dtm'
                asclip_filename = o_MDE_ba + '_' + str(x) + '-' + str(
                    y) + '_clip' + '.asc'
                commands.append(clip_filename)
                #Min X
                commands.append(str(c_ini[0]))
                # Min Y
                commands.append(str(c_ini[1]))
                # Max X
                commands.append(str(c_fin[0]))
                # Max Y
                commands.append(str(c_fin[1]))

                minx = str(c_ini[0])
                miny = str(c_ini[1])
                maxx = str(c_fin[0])
                maxy = str(c_fin[1])

                string = path_exe + '\\ClipDTM.exe' + ' ' + dtm_filename + ' ' + clip_filename + ' ' + minx + ' ' + miny + ' ' + maxx + ' ' + maxy

                feedback.pushInfo('comando ClipDTM: {}'.format(string))

                proc = subprocess.run(string, shell=True)

                # turn dtm into asc

                commands = [os.path.join(path_exe, 'DTM2ASCII.exe')]
                commands.append(clip_filename)
                commands.append(ascii_filename)
                string = path_exe + '\\DTM2ASCII.exe' + ' ' + clip_filename + ' ' + asclip_filename

                feedback.pushInfo('\ncomando DTM2ASCII: {}'.format(string))

                proc = subprocess.run(string, shell=True)
                #proc.wait() #Crear .asc

                # -------------------------
                #       GRIDMETRICS
                # -------------------------

                archivos = glob.glob(os.path.join(
                    o_MDE, '*clip.dtm'))  # dtm fold out de la primera

                #define the folders where the files and .exes are

                csv_filename = o_metric_ba + '_' + str(x) + '-' + str(
                    y) + '.csv'
                commands = [os.path.join(path_exe, 'gridmetrics.exe')]
                string = path_exe + '\\gridmetrics.exe'

                #Switches
                commands.append('/verbose')

                # grid switch
                commands.append(comando_switchgrid)
                string = string + ' /verbose' + ' ' + comando_switchgrid

                if str(minht).strip() != '':
                    commands.append('/minht:' + str(minht))
                    string = string + ' /minht:' + str(minht)
                if str(clas).strip() != '':
                    commands.append('/class:' + str(clasg))
                    string = string + ' /class:' + str(clasg)
                string = string + ' /outlier:-1,40'

                #Parameters requiered:
                # clip.dtm file
                commands.append(archivos[i])
                # fcc and pixel size
                commands.append(str(fcc))
                commands.append(str(pixel))
                #output csv
                commands.append(csv_filename)
                # txt files with the 9 laz files
                commands.append(file)

                string = string + ' ' + archivos[
                    i] + ' ' + fcc + ' ' + pixel + ' ' + csv_filename + ' ' + file

                feedback.pushInfo('\ncomando Gridmetrics: {}'.format(string))
                proc = subprocess.run(string, shell=True)

                # -------------------------
                #       CSV2GRID
                # -------------------------

            metrics = glob.glob(
                os.path.join(o_metric, '*all_returns_elevation_stats.csv')
            )  #source = out gridmetrics

            o = os.path.dirname(out)

            fvar = [
                "Row", "Col", "Center", "Center",
                "Total_return_count_above_htmin", "Elev_minimum",
                "Elev_maximum", "Elev_mean", "Elev_mode", "Elev_stddev",
                "Elev_variance", "Elev_CV", "Elev_IQ_Int_IQ", "Elev_skewness",
                "Elev_kurtosis", "Elev_AAD", "Elev_L1", "Elev_L2", "Elev_L3",
                "Elev_L4", "Elev_L_CV", "Elev_L_skewness", "Elev_L_kurtosis",
                "Elev_P01", "Elev_P05", "Elev_P10", "Elev_P20", "Elev_P25",
                "Elev_P30", "Elev_P40", "Elev_P50", "Elev_P60", "Elev_P70",
                "Elev_P75", "Elev_P80", "Elev_P90", "Elev_P95", "Elev_P99",
                "Return_1_count_above_htmin", "Return_2_count_above_htmin",
                "Return_3_count_above_htmin", "Return_4_count_above_htmin",
                "Return_5_count_above_htmin", "Return_6_count_above_htmin",
                "Return_7_count_above_htmin", "Return_8_count_above_htmin",
                "Return_9_count_above_htmin", "Other_return_count_above_htmin",
                "Percentage_first_returns_above_heightbreak",
                "Percentage_all_returns_above_heightbreak",
                "(All_returns_above_heightbreak)/(Total_first_returns)*100",
                "First_returns_above_heightbreak",
                "All_returns_above_heightbreak",
                "Percentage_first_returns_above_mean",
                "Percentage_first_returns_above_mode",
                "Percentage_all_returns_above_mean",
                "Percentage_all_returns_above_mode",
                "(All_returns_above_mean)/(Total_first_returns)*100",
                "(All_returns_above_mode)/(Total_first_returns)*100",
                "First_returns_above_mean", "First_returns_above_mode",
                "All_returns_above_mean", "All_returns_above_mode",
                "Total_first_returns", "Total_all_returns", "Elev_MAD_median",
                "Elev_MAD_mode",
                "Canopy_relief_ratio((mean_-_min)/(max_–_min))",
                "Elev_quadratic_mean", "Elev_cubic_mean", "KDE_elev_modes",
                "KDE_elev_min_mode", "KDE_elev_max_mode", "KDE_elev_mode_range"
            ]

            for i, metric in enumerate(metrics):
                for c in col:
                    # extract the file name
                    c = int(c) - 1
                    nombre = os.path.basename(metric)

                    asc_filename = o_raster_ba + '_' + nombre.split(
                        'all_returns_elevation_stats.csv')[0] + str(
                            fvar[int(c)]) + '.asc'
                    commands = [os.path.join(path_exe, 'CSV2Grid.exe')]
                    string = path_exe + '\\CSV2Grid.exe'

                    #Switches
                    commands.append('/verbose')

                    #Parametros necesarios:
                    # input
                    commands.append(metric)
                    # columna
                    c = int(c) + 1
                    commands.append(str(c))
                    #salida csv
                    commands.append(asc_filename)
                    string = string + ' ' + metric + ' ' + str(
                        c) + ' ' + asc_filename

                    feedback.pushInfo('\ncomando CSV2Grid: {}'.format(string))

                    proc = subprocess.Popen(string, shell=True)
                    proc.wait()  #Crear .dtm

            # join the .asc
            for c in col:
                c = int(c) - 1
                variable = glob.glob(
                    os.path.join(o_raster, '*' + str(fvar[int(c)]) + '.asc'))

                if str(pixel).strip() != '':
                    variable.append('-ps')
                    variable.append(pixel)
                    variable.append(pixel)

                variable.append('-a_nodata')
                variable.append(str(-9999))

                out = o_final_ba + '_merged_' + str(fvar[int(c)]) + '.tif'
                variable.append('-o')
                variable.append(out)
                variable.insert(0, '')
                c = int(c) + 1

                feedback.pushInfo('\ncomando merge: {}'.format(variable))

                gdal_merge.main(variable)

            # merged asc files in one GRIDSURFACE CREATE

            merged = glob.glob(os.path.join(o_MDE, '*_clip.asc'))

            merged.append('-a_nodata')
            merged.append(str(-9999))
            out = o_final_ba + '_MDE_merged.tif'
            merged.append('-o')
            merged.append(out)
            merged.insert(0, '')

            gdal_merge.main(merged)

        # -------------------------
        #     VEGETATION INDEX
        # -------------------------

        # Create vegetation index. First, look fot the satellite scenes in the folder, second, call processing algorithm.

        out_vegind = o_final_ba + '_NDVI.tif'
        if os.path.basename(source_sen) == 'INPUT_SEN':
            feedback.pushInfo('\n\t\tUSING LIDAR DATA\n')
            if monte == 'MONTE':
                sys.exit(
                    'Study area shape file is requiered. Please introduce the shapefile'
                )
            else:
                pass
        else:
            satelite_img = glob.glob(os.path.join(source_sen, '*.jp2'))
            for img in satelite_img:
                if 'B02' in img:
                    blue = img
                elif 'B03' in img:
                    green = img
                elif 'B04' in img:
                    red = img
                elif 'B05' in img:
                    B05 = img
                elif 'B07' in img:
                    B07 = img
                elif 'B8A' in img:
                    nir = img

            buffered_layer = processing.run("grass7:i.vi", {
                'GRASS_RASTER_FORMAT_META': '',
                'GRASS_RASTER_FORMAT_OPT': '',
                'GRASS_REGION_CELLSIZE_PARAMETER': 0,
                'GRASS_REGION_PARAMETER': None,
                'band5': B05,
                'band7': B07,
                'blue': blue,
                'green': green,
                'nir': nir,
                'output': out_vegind,
                'red': red,
                'soil_line_intercept': None,
                'soil_line_slope': None,
                'soil_noise_reduction': None,
                'storage_bit': 1,
                'viname': 10
            },
                                            context=context,
                                            feedback=feedback)['output']

            # -------------------------
            #   Clip Vegetation index
            # -------------------------

            s = glob.glob(os.path.join(o_final,
                                       '*NDVI.tif'))  # salida del anterior
            s = s[0]
            # we use as mask one of the clipped asc with the variable information.
            if os.path.basename(source) == 'INPUT':
                clip = monte
            else:
                clip_var = int(col[0]) - 1
                clip = glob.glob(
                    os.path.join(o_final,
                                 '*' + str(fvar[int(clip_var)]) + '*.tif'))
                clip = clip[0]
            out = o_final_ba + '_NDVI_clip.tif'

            buffered_layer = processing.run("gdal:cliprasterbyextent", {
                'INPUT': s,
                'DATA_TYPE': 0,
                'NODATA': -9999,
                'OPTIONS': '',
                'PROJWIN': clip,
                'OUTPUT': str(out)
            },
                                            context=context,
                                            feedback=feedback)['OUTPUT']

        ### ------------------------------------------
        ###                 NORMALIZATION
        ### ------------------------------------------

        # check that only LiDAR raster variables and NDVI are in the list
        source = glob.glob(os.path.join(o_final, '*.tif'))
        source_NDVI = glob.glob(os.path.join(o_final, '*_NDVI.tif'))
        source_MDE = glob.glob(os.path.join(o_final, '*_MDE_merged.tif'))

        source_multi = glob.glob(os.path.join(o_final, '*_multiband.tif'))

        # when LiDAR data is not null
        if source_MDE != []:
            source_NDVI.append(source_MDE[0])
        else:
            pass

        # remove multiband raster if exists and MDE_merged raster from the list
        for i in source:
            if i in source_NDVI or i in source_multi:
                source.remove(i)
            else:
                pass
        #order bands alphabetically
        source.sort()

        for raster in source:

            RasterSource = gdal.Open(raster)
            rows = RasterSource.RasterYSize  # number of pixels in y
            cols = RasterSource.RasterXSize  # number of pixels in x
            print('Processing: {}\nrows: {}, cols: {} '.format(
                raster, rows, cols))
            geoTransform = RasterSource.GetGeoTransform()
            band = RasterSource.GetRasterBand(1)
            data = band.ReadAsArray(0, 0, cols, rows)
            n_width = range(rows)
            n_heigth = range(cols)

            geoTransform = RasterSource.GetGeoTransform()
            pixelWidth = geoTransform[1]
            pixelHeight = geoTransform[5]
            originX = geoTransform[0]
            originY = geoTransform[3]

            newRasterfn = raster.split('.tif')[0] + '_norm.tif'
            #obtaining info from each raster and band
            array = []
            if 'NDVI' in raster:
                for x in n_width:
                    for y in n_heigth:
                        aux = float(data[x, y])
                        if aux == -9999:
                            aux = -9999
                            array.append(aux)
                        elif -1 < aux < 0:
                            aux = 0
                            array.append(aux)
                        else:
                            array.append(aux)
            else:
                for x in n_width:
                    for y in n_heigth:
                        aux = float(data[x, y])
                        if aux == -9999:
                            aux = -9999
                            array.append(aux)
                        else:
                            array.append(aux)
            #store raster data in an array
            # obtain raster max value
            values = []
            for value in array:
                #print(value)
                if value != None:
                    values.append(value)
                else:
                    pass
            #print('values',values)
            max_val = max(values)

            p75 = np.percentile(values, 75)
            p90 = np.percentile(values, 90)

            print('valor maximo de {}: {} \t p75:{}'.format(
                os.path.basename(newRasterfn), max_val, p90))
            # reclasify raster  from 0-100.
            lista_nueva = []
            lista_norm = []
            # conversion ndvi
            min_value = min(values)
            for elemento in array:

                if elemento == None:
                    lista_norm.append(-9999)
                elif elemento == 0:
                    lista_norm.append(0)
                elif elemento == -9999:
                    lista_norm.append(-9999)
                else:
                    if 'P90' in raster or 'P20' in raster:
                        if elemento < p90:
                            elem_norm = ((elemento - 2) / (p90 - 2)) * 100
                            lista_norm.append(elem_norm)
                        else:
                            lista_norm.append(100)
                    #elif 'Percentage_first_returns_above_heightbreak' in raster:
                    #   lista_norm.append(elemento)
                    else:
                        elem_norm = (elemento / max_val) * 100
                        lista_norm.append(elem_norm)

            # Create numpy array required for gdal WriteArray comand
            # give the array a strucutre
            es = [cols, rows]
            estructura = max(es)
            for i in range(0, len(lista_norm), cols):
                lista_nueva.append(lista_norm[i:i + cols])
            array_norm = np.array(lista_nueva)
            #array_norm=array_norm[::-1]
            print(cols)

            driver = gdal.GetDriverByName('GTiff')
            outRaster = driver.Create(newRasterfn, cols, rows, 1,
                                      gdal.GDT_Float32)
            outRaster.SetGeoTransform(
                (originX, pixelWidth, 0, originY, 0, pixelHeight))
            outband = outRaster.GetRasterBand(1)
            outband.WriteArray(array_norm)
            outRasterSRS = osr.SpatialReference()
            outRasterSRS.ImportFromEPSG(25830)
            outRaster.SetProjection(outRasterSRS.ExportToWkt())
            outband.FlushCache()

        norm_source = []
        for i in source:
            if 'Percentage_first_returns_above_heightbreak' in i:
                norm_source.append(i)
            else:
                norm_file = i.split('.tif')[0] + '_norm.tif'
                norm_source.append(norm_file)

        merged = norm_source
        print('\n', merged)
        merged.append('-separate')

        merged.append('-n')
        merged.append(str(-9999))

        merged.append('-a_nodata')
        merged.append(str(-9999))

        out = o_final_ba + '_multiband.tif'
        merged.append('-o')
        merged.append(out)
        merged.insert(0, '')

        gdal_merge.main(merged)

        for raster in norm_source:
            if '_norm' in raster or 'Percentage_first_returns_above_heightbreak_norm' in raster:
                os.remove(raster)
            else:
                pass

        #-----------------------------------
        #       Segmentation meanshift
        #-----------------------------------

        # checks if the path exisits. If so, adds the path to the segmentation .bat. If not, the user will have to specify the path to the OTB folder.

        global path_bat
        if path_bat != '':
            path_bat = path_bat + '\\bin\\otbcli_Segmentation.bat'
            pass
        else:
            path_bat = self.parameterAsString(parameters, self.PATH_BAT,
                                              context)
            path_bat = path_bat + '\\bin\\otbcli_Segmentation.bat'

        spatialr = self.parameterAsString(parameters, self.SPATIALR, context)
        ranger = self.parameterAsString(parameters, self.RANGER, context)
        maxiter = self.parameterAsString(parameters, self.MAXITER, context)
        minsize = self.parameterAsString(parameters, self.MINSIZE, context)
        out = o_final_ba + '_meanshift_segmentation.shp'

        o = os.path.dirname(out)
        if len(norm_source) == 1:
            feedback.pushInfo('NDVI_norm')
            source = o_final_ba + '_NDVI_clip_norm.tif'
        else:
            source = glob.glob(os.path.join(o_final, '*_multiband.tif'))
            source = source[0]

        # bat path

        commands = [path_bat]
        # input
        commands.append('-in')
        commands.append(source)
        # output
        commands.append('-mode.vector.out')
        commands.append(out)

        string = path_bat + ' -in ' + source + ' -mode.vector.out ' + out

        #Parametros necesarios:

        if str(spatialr).strip() != '':
            commands.append('-filter.meanshift.spatialr ' + str(spatialr))
            string = string + ' -filter.meanshift.spatialr ' + str(spatialr)
        if str(ranger).strip() != '':
            commands.append('-filter.meanshift.ranger ' + str(ranger))
            string = string + ' -filter.meanshift.ranger ' + str(ranger)
        if str(minsize).strip() != '':
            commands.append('-filter.meanshift.minsize ' + str(minsize))
            string = string + ' -filter.meanshift.minsize ' + str(minsize)
        if str(maxiter).strip() != '':
            commands.append('-filter.meanshift.maxiter ' + str(maxiter))
            string = string + ' -filter.meanshift.maxiter ' + str(maxiter)

        # it seems that when shell=True it is better to pass the args as string rather that sequence

        feedback.pushInfo('\ncomando meanshift: {}'.format(string))
        proc = subprocess.Popen(string, shell=True)
        proc.wait()  #Crear .dtm

        #-------------------------------------
        # Chaiken generalizacion de contornos
        #------------------------------------

        # dictionary returned by the processAlgorithm function.
        source = glob.glob(
            os.path.join(o_final, '*_meanshift_segmentation.shp'))
        source = source[0]

        sink = o_final_ba + '_rodales_chaiken.shp'
        error = o_final_ba + 'error_snakes.gpkg'
        error_chai = o_final_ba + 'error_chaiken.gpkg'

        buffered_layer = processing.run("grass7:v.generalize", {
            '-l': True,
            '-t': False,
            'GRASS_MIN_AREA_PARAMETER': 0.0001,
            'GRASS_OUTPUT_TYPE_PARAMETER': 0,
            'GRASS_REGION_PARAMETER': None,
            'GRASS_SNAP_TOLERANCE_PARAMETER': -1,
            'GRASS_VECTOR_DSCO': '',
            'GRASS_VECTOR_LCO': '',
            'alpha': 1,
            'angle_thresh': 3,
            'beta': 1,
            'betweeness_thresh': 0,
            'cats': '',
            'closeness_thresh': 0,
            'degree_thresh': 0,
            'error': error_chai,
            'input': source,
            'iterations': 1,
            'look_ahead': 7,
            'method': 8,
            'output': sink,
            'reduction': 50,
            'slide': 0.5,
            'threshold': 1,
            'type': [0, 1, 2],
            'where': ''
        },
                                        context=context,
                                        feedback=feedback)['output']

        sink = o_final_ba + '_rodales_snakes.shp'
        smoothing_snakes = processing.run('grass7:v.generalize', {
            '-l': True,
            '-t': False,
            'GRASS_MIN_AREA_PARAMETER': 0.0001,
            'GRASS_OUTPUT_TYPE_PARAMETER': 0,
            'GRASS_REGION_PARAMETER': None,
            'GRASS_SNAP_TOLERANCE_PARAMETER': -1,
            'GRASS_VECTOR_DSCO': '',
            'GRASS_VECTOR_LCO': '',
            'alpha': 1,
            'angle_thresh': 3,
            'beta': 1,
            'betweeness_thresh': 0,
            'cats': '',
            'closeness_thresh': 0,
            'degree_thresh': 0,
            'error': error,
            'input': source,
            'iterations': 1,
            'look_ahead': 7,
            'method': 10,
            'output': sink,
            'reduction': 50,
            'slide': 0.5,
            'threshold': 1,
            'type': [0, 1, 2],
            'where': ''
        },
                                          context=context,
                                          feedback=feedback)['output']

        feedback.pushInfo('\nPROCESO FINALIZADO')

        return {self.OUTPUT: sink}