Пример #1
0
def write_images(file):
    # Disable JAI native MediaLib extensions
    System = jpy.get_type('java.lang.System')
    System.setProperty('com.sun.media.jai.disableMediaLib', 'true')

    NUM_BANDS = 15

    product = ProductIO.readProduct(file)
    band_arrays = []
    w = product.getSceneRasterWidth()
    h = product.getSceneRasterHeight()
    for i in range(0, NUM_BANDS):
        band = product.getBand("radiance_%d" % (i + 1))
        band_data = np.zeros(w * h, dtype=np.float32)
        band.readPixels(0, 0, w, h, band_data)
        band_arrays.append(band_data)

    result = []
    value_min = -1
    value_max = 1
    for i in range(0, NUM_BANDS):
        for j in range(i + 1, NUM_BANDS):
            a1 = band_arrays[i]
            a2 = band_arrays[j]
            array = (a1 - a2) / (a1 + a2)

            array = np.ma.masked_invalid(array)
            array = array.clip(value_min, value_max)
            array -= value_min
            array *= 1.0 / (value_max - value_min)
            array.shape = (h, w)
            array = cm.jet(array, bytes=True)

            image = Image.fromarray(array, 'RGBA')
            name = 'radiance_%d_to_%d.png' % (i + 1, j + 1)
            with io.FileIO(name, 'w') as fp:
                print('Writing ' + name)
                image.save(fp, format='PNG')
                result.append(name)

    return result
Пример #2
0
def main():

    year = 2019

    data = get_unprocessed_data(year)

    slaves = get_slaves()

    for _data in data:
        try:
            file_name = _data['file'].split(image_path)[1]
            print('********************************************************')
            print('started file: {}'.format(file_name))

            image_datetime = file_name.split('_')[4]
            image_year = image_datetime[0:4]
            image_month = image_datetime[4:6]
            image_month_string = get_string_month(image_month)
            image_day = image_datetime[6:8]
            band_name_template = 'Gamma0_{}_mst_' + image_day + image_month_string + image_year

            product = ProductIO.readProduct('{}.zip'.format(_data['file']))
            bands = list(product.getBandNames())
            _select_bands = []
            for band in bands:
                if 'VV' in band:
                    _select_bands.append(band_name_template.format('VV'))
                elif 'VH' in band:
                    _select_bands.append(band_name_template.format('VH'))
                elif 'HH' in band:
                    _select_bands.append(band_name_template.format('HH'))
                elif 'HV' in band:
                    _select_bands.append(band_name_template.format('HV'))

            select_bands = list(set(_select_bands))

            intersecting_slaves = get_intersecting_slaves(_data, slaves)

            if os.path.exists(intermediate_output_path + file_name +
                              '_Orb_Cal_ML_TF.dim'):
                print("Skipping :", _data['file'],
                      ' \n Intermediate output already exists.')
            else:
                before_coregistration = exec_cmd.format(
                    file_name, _data['file'])
                result = subprocess.check_output(before_coregistration,
                                                 shell=True)
                print(result)

            registration_tree, master_slave_node, terrain_correction_node = get_nodes(
            )
            master_slave_list = master_slave_node.xpath('//fileList')[0]
            master_slave_list.text = list_dict_to_string(
                intersecting_slaves, 'file')

            source_bands_list = terrain_correction_node.xpath(
                '//sourceBands')[0]
            source_bands_list.text = ','.join(select_bands)

            with open(coregister_path, 'w') as f:
                f.write(tostring(registration_tree, pretty_print=True))

            after_coregistration = coreg_cmd.format(file_name)
            result = subprocess.check_output(after_coregistration, shell=True)
            print(result)

            # subsetting to individual bands
            for select_band in select_bands:
                subset_tree, product_reader_node, subset_node = get_subset_node(
                )
                file_source = product_reader_node.xpath('//file')[0]
                file_source.text = '{}{}_Orb_Cal_ML_TF_Stack_Spk_EC.dim'.format(
                    intermediate_output_path, file_name)
                subset_source_band = subset_node.xpath('//sourceBands')[0]
                subset_source_band.text = select_band
                with open(subset_path, 'w') as f:
                    f.write(tostring(subset_tree, pretty_print=True))

                subsetting = export_cmd.format(file_name, select_band)
                result = subprocess.check_output(subsetting, shell=True)
                print(result)

                os.remove(subset_path)

            conn, cur = connect_to_db(db)
            cur.execute(
                "UPDATE sentinel1 SET processed=TRUE WHERE slave=FALSE and title='{}'"
                .format(file_name))
            conn.commit()
            close_connection(conn, cur)
            os.remove(coregister_path)

            print('end file')
            print('********************************************************')
        except Exception as e:
            logging.error(e)
            print(e)
            continue
Пример #3
0
# Preparing of product
productName = 'DF94'
fileName = 'D:/Test/AGB_2015_0.zip'
bandName = 'band_1'
savePath = 'D:/fyp-master/Polygon/'
polygon = []
topLeftLat = 4.431921373298707
topLeftLon = -52.14877215477987
botLeftLat = 2.885133324526054
botLeftLon = -52.468809464281705
botRightLat = 3.051240184812927
botRightLon = -53.258843412884204
topRightLat = 4.59619348038011
topRightLon = -52.9405767847531

radarImage = ProductIO.readProduct(fileName)
rasterBand = radarImage.getBand(bandName)

rasterBio = jpy.cast(rasterBand, RasterDataNode)
geo_CodingBio = jpy.cast(rasterBio.getGeoCoding(), GeoCoding) 

#Getting pixel positions
#POLYGON ((-54.459992631546 1.8573631048202515, -54.823678525590225 3.605360746383667, -52.60408776706431 4.070178031921387, -52.24492652190336 2.3271737098693848, -54.459992631546 1.8573631048202515))

pixPos = getPixCoord(geo_CodingBio, topLeftLat, topLeftLon)
print("Top Left: ", pixPos)
polygon.append(pixPos)

pixPos = getPixCoord(geo_CodingBio, botLeftLat, botLeftLon)
print("Bot Left: ", pixPos)
polygon.append(pixPos)
Пример #4
0
import sys
sys.path.append('/root/.snap/snap-python')
import os
os.environ.update({"LD_LIBRARY_PATH":"."})

import snappy
from snappy import ProductIO

ReprojectOp = snappy.jpy.get_type('org.esa.snap.core.gpf.common.reproject.ReprojectionOp')
in_file = '/home/zy/data_pool/U-TMP/S1A_IW_SLC__1SSV_20150109T112521_20150109T112553_004094_004F43_7041_Cal_deb_Spk_TC.dim'
out_file = '/home/zy/data_pool/U-TMP/S1A_IW_SLC__1SSV_20150109T112521_20150109T112553_004094_004F43_7041_Cal_deb_Spk_TC_reproj.dim'

product = ProductIO.readProduct(in_file)

# op = ReprojectOp()
# op.setSourceProduct(product)
# op.setParameter('crs', 'AUTO:42001')
# op.setParameter('resampling', 'Nearest')

parameters = HashMap()
parameters.put('crs', 'AUTO:42001')
parameters.put('resampling', 'Nearest')
reprojProduct = GPF.createProduct('Reproject', parameters, product)

# sub_product = op.getTargetProduct()
# ProductIO.writeProduct(sub_product, out_file, 'BEAM-DIMAP')

# # 产生了目标文件.dim和文件夹.data,但写入完成后,程序不能自动结束
Пример #5
0
from snappy import GPF
import glob

GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()
HashMap = snappy.jpy.get_type('java.util.HashMap')


def do_mosaic(source):
    parameters = HashMap()
    parameters.put('pixelSize', 200.0)
    parameters.put('resamplingMethod', 'BILINEAR_INTERPOLATION')
    parameters.put('mapProjection', "AUTO:42001")
    parameters.put('variables', 'Sigma0_HH')
    output = GPF.createProduct('SAR-Mosaic', parameters, source)


processed_path = 'D:/Project_Data/Arctic_PRIZE/Processed_Data/S1/'

text_file = open(processed_path + "list_dim.txt", "r")
mosaic_files = text_file.readlines()
#mosaic_files = glob.glob(os.path.join(processed_path, '*HH.dim*'))
#print(mosaic_files)

products = []
for fl in mosaic_files:
    print(processed_path + fl)
    products.append(ProductIO.readProduct(processed_path + fl))

mosaic_data = do_mosaic(mosaic_files)
ProductIO.writeProduct(mosaic_data, 'mosaic', 'GeoTIFF')
Пример #6
0
def read(filename):
    return ProductIO.readProduct(filename)
Пример #7
0
# check if band index given is correct
if not sys.argv[2] in ['2', '3', '4', '8']:
    print 'Incorrect band index'

# get cli arguments
product_file = sys.argv[1]
band_index = sys.argv[2]
band_name = 'B' + band_index
product_name = {
    'B2': 'blue',
    'B3': 'green',
    'B4': 'red',
    'B8': 'nir',
}[band_name]

# input product: open and get dimensions & name
input_product = ProductIO.readProduct(product_file)
product_width = input_product.getSceneRasterWidth()
product_height = input_product.getSceneRasterHeight()
product_name = input_product.getName()

# output product: copy selected band & save product
output_product = Product(product_name, product_name, product_width,
                         product_height)
ProductUtils.copyGeoCoding(input_product, output_product)
ProductUtils.copyBand(band_name, input_product, output_product, True)
ProductIO.writeProduct(output_product, product_name + '.band.dim',
                       'BEAM-DIMAP')
output_product.closeIO()
Пример #8
0
    print("usage: %s <inputProduct> <wkt>" % sys.argv[0])
    sys.exit(1)

input = sys.argv[1]
inputFileName = input[input.rfind('/')+1:]

wellKnownText = sys.argv[2]

try:
    geometry = WKTReader().read(wellKnownText)
except:
    geometry = None
    print('Failed to convert WKT into geometry')
    sys.exit(2)

product = ProductIO.readProduct(input)

wktFeatureType = PlainFeatureFactory.createDefaultFeatureType(DefaultGeographicCRS.WGS84)
featureBuilder = SimpleFeatureBuilder(wktFeatureType)
wktFeature = featureBuilder.buildFeature('shape')
wktFeature.setDefaultGeometry(geometry)

newCollection = ListFeatureCollection(wktFeatureType)
newCollection.add(wktFeature)

productFeatures = FeatureUtils.clipFeatureCollectionToProductBounds(newCollection, product, None, ProgressMonitor.NULL)

node = VectorDataNode('shape', wktFeatureType)
product.getVectorDataGroup().add(node)

vdGroup = product.getVectorDataGroup()
Пример #9
0
from snappy import jpy

from os.path import join
#
# mtd = 'MTD_MSIL1C.xml'
# fname = DIR.split('/')[-1]
# ID = fname.replace('.SAFE','')
#
# # _, rgb_image = mkstemp(dir='.', prefix=prefix , suffix='.png')
# source = join(DIR, mtd)

uncorr = '/home/nils/birdhouse/var/lib/pywps/cache/flyingpigeon/scihub.copernicus/S2A_MSIL1C_20170129T092221_N0204_R093_T33PVK_20170129T093530.SAFE/MTD_MSIL1C.xml'

corr = '/home/nils/birdhouse/var/lib/pywps/cache/flyingpigeon/scihub.copernicus/S2A_MSIL2A_20170129T092221_N0204_R093_T33PVK_20170129T093530.SAFE/MTD_MSIL2A.xml'

sourceProduct = ProductIO.readProduct(uncorr)

red = sourceProduct.getBand('B4')
green = sourceProduct.getBand('B3')
blue = sourceProduct.getBand('B2')

Color = jpy.get_type('java.awt.Color')
ColorPoint = jpy.get_type('org.esa.snap.core.datamodel.ColorPaletteDef$Point')
ColorPaletteDef = jpy.get_type('org.esa.snap.core.datamodel.ColorPaletteDef')
ImageInfo = jpy.get_type('org.esa.snap.core.datamodel.ImageInfo')
ImageLegend = jpy.get_type('org.esa.snap.core.datamodel.ImageLegend')
ImageManager = jpy.get_type('org.esa.snap.core.image.ImageManager')
JAI = jpy.get_type('javax.media.jai.JAI')
RenderedImage = jpy.get_type('java.awt.image.RenderedImage')

# Disable JAI native MediaLib extensions
Пример #10
0
for j in range(len(product_list)):
    #Download the product:
    if(os.path.isdir("products/"+product_list[j]+".SAFE")==False):
        f=open("products/products.dat","w")
        f.write(product_list[j])
        f.close()
        os.system("~/miniconda3/envs/senpy/bin/python /home/heido/cvat-vsm/dias_old/main_engine.py -d products")
        os.system("rm products/products*")
        #Make the .dim file:
        if(os.path.isdir("products/"+product_list[j]+".SAFE")==True):
            input_path="products/"+product_list[j]+".SAFE/MTD_MSIL2A.xml"
            output_path="products/"+product_list[j]+".SAFE/GRANULE/output.dim"
            line_for_gpt="/snap/snap8/bin/gpt output.xml -Pinput=\""+input_path+"\" -Poutput=\""+output_path+"\""
            os.system(line_for_gpt)
            #Make the RGB image:
            S2_product=ProductIO.readProduct('products/'+product_list[j]+'.SAFE/GRANULE/output.dim')
            band_names = S2_product.getBandNames()
            red = S2_product.getBand('B4')
            green = S2_product.getBand('B3')
            blue = S2_product.getBand('B2')
            write_rgb_image([red, green, blue], product_list[j]+".png", 'png')
            #Tile the image
            im_S2 = Image.open(product_list[j]+".png")
            os.system("mkdir products/"+product_list[j])
            where="products/"+product_list[j]
            tile_clear_image(im_S2,product_list[j],where)
            os.system("rm "+product_list[j]+".png")
            nr_of_tiles=len([name for name in os.listdir(where) if os.path.isfile(os.path.join(DIR, name))])
            if(nr_of_tiles>0):
                NDVI_im=product_list[j]+"_NDVI"
                width = S2_product.getSceneRasterWidth()
Пример #11
0
BandDescriptor = jpy.get_type(
    'org.esa.snap.core.gpf.common.BandMathsOp$BandDescriptor')

flist = listdir(sarIn)

status = 0
# Read products

for f in flist:
    status = status + 1

    print("SCENE " + str(status) + " of " + str(len(flist)) + "\n\n")

    print("3 begin reading product\n")

    product = ProductIO.readProduct(sarIn + "/" + f)
    print("\n processing " + f + "\n")
    print("at " + str(datetime.datetime.now()) + "\n")

    # Obtain some attributes

    height = product.getSceneRasterHeight()
    width = product.getSceneRasterWidth()
    name = product.getName()
    description = product.getDescription()
    band_names = product.getBandNames()

    # Initiate processing
    print("4 initiate processing\n")

    GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()
Пример #12
0
        for col_index in range(confusion_matrix.shape[1]):
            if row_index != col_index:
                value = confusion_matrix[row_index][col_index]
                f.write(RosebelPixelClass3(row_index + 1).name + ' pixels mis-predicted as ' +
                        RosebelPixelClass3(col_index + 1).name + ': %.2f%%' % (
                                    value * 100 / sum(confusion_matrix[row_index])) + '\n')
    f.close()

if __name__ == "__main__":
    start_time = time.time()

    if os.path.exists(LABEL_PATH + "rf_1A_1B_predictions.npy"):
        rf_predictions = np.load(LABEL_PATH + "rf_1A_1B_predictions.npy")
    else:
        print("Reading product:" + PRODUCT_1A_PATH)
        p1A = ProductIO.readProduct(PRODUCT_1A_PATH)

        print('Extracting Bands')
        band_names = p1A.getBandNames()
        bands = []
        number_of_bands = 0
        for band_name in band_names:
            print(str(number_of_bands+1) + ": " + str(band_name))
            number_of_bands += 1
            bands.append(p1A.getBand(band_name))
        print("Number of bands in product: " + str(number_of_bands))

        print('Extracting Feature Data from Bands')
        features_1A = []
        for band in bands:
            w = band.getRasterWidth()
Пример #13
0
def main(args=sys.argv[1:]):
    if len(args) != 1:
        print("usage: raycorr-processor <SENSOR>")
        sys.exit(1)

    SENSOR = args[0]
    # SENSOR = 'OLCI'
    # SENSOR = 'MERIS'

    # PRODPATH = "C:\\Users\\carsten\\Dropbox\\Carsten\\SWProjects\\Rayleigh-Correction\\testdata\\"
    # AUXPATH = "C:\\Users\\carsten\\Dropbox\\Carsten\\Tagesordner\\20160104\\Rayleigh-Correction-Processor\\"
    # O3PATH="C:\\Users\\carsten\\Dropbox\\Carsten\\SWProjects\\Rayleigh-Correction\\raycorr\\"
    PRODPATH = "D:\\Dropbox\\Carsten\\SWProjects\\Rayleigh-Correction\\testdata\\"
    # AUXPATH = "D:\\Dropbox\\Carsten\\Tagesordner\\20160104\\Rayleigh-Correction-Processor\\"
    O3PATH="D:\\Dropbox\\Carsten\\SWProjects\\Rayleigh-Correction\\raycorr\\"

    DEMFactory = jpy.get_type('org.esa.snap.dem.dataio.DEMFactory')
    Resampling = jpy.get_type('org.esa.snap.core.dataop.resamp.Resampling')
    GeoPos = jpy.get_type('org.esa.snap.core.datamodel.GeoPos')

    if (SENSOR=='MERIS'):
        IN_FILE = PRODPATH+"subset_1_of_MER_RR__1PTACR20050713_094325_000002592039_00022_17611_0000.dim"
        OUT_FILE = PRODPATH+'Testprodukt1_MER_RR_20050713.dim'
    else:
        if (SENSOR=='OLCI'):
            IN_FILE = PRODPATH+'subset_3_of_S3A_OL_1_EFR____20160509T103945_20160509T104245_20160509T124907_0180_004_051_1979_SVL_O_NR_001.dim'
            OUT_FILE = PRODPATH+'Testproduct3_OL_1_EFR____20160509T103945.dim'
        else:
            print("Sensor ",SENSOR," not supported - exit")
            return
    file = IN_FILE

    # AUX_FILE = AUXPATH+'ADF\\MER_ATP_AXVACR20091026_144725_20021224_121445_20200101_000000'

    # adf = ADF(AUX_FILE)
    # ray_coeff_matrix = adf.ray_coeff_matrix
    # rayADF = readRayADF(AUX_FILE)

    # new_aux = OrderedDict()
    # new_aux['tau_ray'] = rayADF['tR']
    # new_aux['theta'] = rayADF['theta']
    # new_aux['ray_albedo_lut'] = rayADF['rayAlbLUT']
    # new_aux['ray_coeff_matrix'] = ray_coeff_matrix
    # with open('raycorr_auxdata.json', 'w') as fp:
    #         json.dumps(new_aux, fp, cls=JSONNumpyEncoder, indent=2)
    # fp.close()
    with open('../test/raycorr_auxdata.json', 'r') as fp:
        obj = json.load(fp, object_hook=json_as_numpy)
    # json_str = json.dumps(new_aux, cls=JSONNumpyEncoder, indent=2)
    # print(json_str)
    # obj = json.loads(json_str, object_hook=json_as_numpy)
    # rayADF = new_aux
    rayADF = obj
    ray_coeff_matrix=rayADF['ray_coeff_matrix']

    print("Reading...")
    product = ProductIO.readProduct(file)
    width = product.getSceneRasterWidth()
    height = product.getSceneRasterHeight()
    name = product.getName()
    description = product.getDescription()
    band_names = product.getBandNames()

    print("Sensor:      %s" % SENSOR)
    print("Product:     %s, %s" % (name, description))
    print("Raster size: %d x %d pixels" % (width, height))
    print("Start time:  " + str(product.getStartTime()))
    print("End time:    " + str(product.getEndTime()))
    print("Bands:       %s" % (list(band_names)))

    raycorProduct = Product('RayCorr', 'RayCorr', width, height)
    writer = ProductIO.getProductWriter('BEAM-DIMAP')
    raycorProduct.setProductWriter(writer)

    if (SENSOR == 'MERIS'):
        nbands = product.getNumBands() - 2  # the last 2 bands are l1flags and detector index; we don't need them
        band_name = ["radiance_1"]
        for i in range(1,nbands):
            band_name += ["radiance_" + str(i+1)]
    if (SENSOR == 'OLCI'):
        nbands = 21
        band_name = ["Oa01_radiance"]
        sf_name = ["solar_flux_band_1"]
        for i in range(1,nbands):
            if (i < 9):
                band_name += ["Oa0" + str(i + 1) + "_radiance"]
                sf_name += ["solar_flux_band_" + str(i + 1)]
            else:
                band_name += ["Oa" + str(i + 1) + "_radiance"]
                sf_name += ["solar_flux_band_" + str(i + 1)]

    # Create TOA reflectance and Rayleig optical thickness bands
    for i in range(nbands):
        # bsource = product.getBandAt(i)
        bsource = product.getBand(band_name[i])
        btoa_name = "rtoa_" + str(i + 1)
        toareflBand = raycorProduct.addBand(btoa_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, toareflBand)

        btaur_name = "taur_" + str(i + 1)
        taurBand = raycorProduct.addBand(btaur_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, taurBand)

        brhor_name = "rRay_" + str(i + 1)
        rhorBand = raycorProduct.addBand(brhor_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rhorBand)
        # Fourier Terms, during debugging only
        brhorF1_name = "rRayF1_" + str(i + 1)
        rhorF1Band = raycorProduct.addBand(brhorF1_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rhorF1Band)
        brhorF2_name = "rRayF2_" + str(i + 1)
        rhorF2Band = raycorProduct.addBand(brhorF2_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rhorF2Band)
        brhorF3_name = "rRayF3_" + str(i + 1)
        rhorF3Band = raycorProduct.addBand(brhorF3_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rhorF3Band)
        rayTransS_name = "transSRay_" + str(i + 1)
        rayTransSBand = raycorProduct.addBand(rayTransS_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rayTransSBand)
        rayTransV_name = "transVRay_" + str(i + 1)
        rayTransVBand = raycorProduct.addBand(rayTransV_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rayTransVBand)
        sARay_name = "sARay_" + str(i + 1)
        sARayBand = raycorProduct.addBand(sARay_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, sARayBand)
        rtoaR_name = "rtoaRay_" + str(i + 1)
        rtoaRBand = raycorProduct.addBand(rtoaR_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rtoaRBand)
        rBRR_name = "rBRR_" + str(i + 1)
        rBRRBand = raycorProduct.addBand(rBRR_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rBRRBand)
        spf_name = "sphericalAlbedoFactor_" + str(i + 1)
        spfBand = raycorProduct.addBand(spf_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, spfBand)
        # simple Rayleigh reflectance (Roland's formular)
        rRaySimple_name = "RayleighSimple_" + str(i + 1)
        rRaySimpleBand = raycorProduct.addBand(rRaySimple_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rRaySimpleBand)
        # gaseous absorption corrected TOA reflectances
        rho_ng_name = "rtoa_ng_" + str(i + 1)
        rho_ngBand = raycorProduct.addBand(rho_ng_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, rho_ngBand)
        # simple Rayleigh optical thickness, for debugging
        taurS_name = "taurS_" + str(i + 1)
        taurSBand = raycorProduct.addBand(taurS_name, ProductData.TYPE_FLOAT32)
        ProductUtils.copySpectralBandProperties(bsource, taurSBand)

    raycorProduct.setAutoGrouping(
            'rtoa:taur:rRay:rRayF1:rRayF2:rRayF3:transSRay:transVRay:sARay:rtoaRay:rBRR:sphericalAlbedoFactor:RayleighSimple:rtoa_ng:taurS')

    airmassBand = raycorProduct.addBand('airmass', ProductData.TYPE_FLOAT32)
    azidiffBand = raycorProduct.addBand('azidiff', ProductData.TYPE_FLOAT32)
    altBand = raycorProduct.addBand('altitude', ProductData.TYPE_FLOAT32)

    # Create flag coding
    raycorFlagsBand = raycorProduct.addBand('raycor_flags', ProductData.TYPE_UINT8)
    raycorFlagCoding = FlagCoding('raycor_flags')
    raycorFlagCoding.addFlag("testflag_1", 1, "Flag 1 for Rayleigh Correction")
    raycorFlagCoding.addFlag("testflag_2", 2, "Flag 2 for Rayleigh Correction")
    group = raycorProduct.getFlagCodingGroup()
    group.add(raycorFlagCoding)
    raycorFlagsBand.setSampleCoding(raycorFlagCoding)

    # add geocoding and create the product on disk (meta data, empty bands)
    ProductUtils.copyGeoCoding(product, raycorProduct) #geocoding is copied when tie point grids are copied,
    ProductUtils.copyTiePointGrids(product, raycorProduct)
    raycorProduct.writeHeader(OUT_FILE)

    # Calculate and write toa reflectances and Rayleigh optical thickness
    # ===================================================================
    # some stuff needed to get the altitude from an external DEM; can be omitted if altitude is used from the product
    # resamplingMethod = 'NEAREST_NEIGHBOUR'  # Resampling.NEAREST_NEIGHBOUR.getName()
    resamplingMethod = Resampling.NEAREST_NEIGHBOUR.getName()
    demName = 'GETASSE30'  # alternative 'SRTM 3Sec'
    dem = DEMFactory.createElevationModel(demName, resamplingMethod)

    # constants
    AVO = 6.0221367E+23  # Avogadro's number
    m_a_zero = 28.9595  # Mean molecular weight of dry ait (zero CO2)
    g0_45 = 980.616  # Acceleration of gravity (sea level and 458 latitude)
    Ns = 2.5469E19  # Molecular density of gas in molecules / cm3

    # constants describing the state of the atmosphere and which we don't know; better values may be used if known
    CO2 = 3.E-4  # CO2 concentration at pixel; typical values are 300 to 360 ppm
    C_CO2 = CO2 * 100  # CO2 concentration in ppm
    m_a = 15.0556 * CO2 + m_a_zero  # mean molecular weight of dry air as function of actual CO2

    # other constants
    PA = 0.9587256  # Rayleigh Phase function, molecular asymetry factor 1
    PB = 1. - PA  # Rayleigh Phase function, molecular asymetry factor 2
    tpoly = rayADF['tau_ray']  # Polynomial coefficients for Rayleigh transmittance
    h2o_cor_poly = np.array(
            [0.3832989, 1.6527957, -1.5635101, 0.5311913])  # Polynomial coefficients for WV transmission @ 709nm
    # absorb_ozon = np.array([0.0, 0.0002174, 0.0034448, 0.0205669, 0.0400134, 0.105446, 0.1081787, 0.0501634, 0.0410249, \
    #                         0.0349671, 0.0187495, 0.0086322, 0.0, 0.0, 0.0, 0.0084989, 0.0018944, 0.0012369, 0.0, 0.0, 0.0000488]) # OLCI
    # absorb_ozon = np.array([0.0002174, 0.0034448, 0.0205669, 0.0400134, 0.105446, 0.1081787, 0.0501634,  \
    #                         0.0349671, 0.0187495, 0.0086322, 0.0, 0.0084989, 0.0018944, 0.0012369, 0.0]) # MERIS
    O3_FILE = O3PATH+'ozone-highres.txt'
    ozoneO = O3(O3_FILE)
    absorb_ozon = ozoneO.convolveInstrument(SENSOR)

    # arrays which are needed to store some stuff
    E0 = np.zeros(width, dtype=np.float32)
    radiance = np.zeros(width, dtype=np.float32)
    reflectance = np.zeros((nbands, width), dtype=np.float32)
    taur = np.zeros((nbands, width), dtype=np.float32)
    sigma = np.zeros(nbands, dtype=np.float32)
    airmass = np.zeros(width, dtype=np.float32)
    azidiff = np.zeros(width, dtype=np.float32)
    PR = np.zeros(3, dtype=np.float32)  # Fourier coefficients of the Rayleigh Phase function
    rho_Rf = np.zeros(3, dtype=np.float32)  # Fourier terms of the Rayleigh primary scattering reflectance
    rho_Rm = np.zeros((3, nbands, width),
                      dtype=np.float32)  # Fourier terms of the Rayleigh scattering reflectance, corrected for multiple scattering
    rho_R = np.zeros((nbands, width), dtype=np.float32)  # first approximation of Rayleigh reflectance
    rho_toaR = np.zeros((nbands, width), dtype=np.float32)  # toa reflectance corrected for Rayleigh scattering
    rho_BRR = np.zeros((nbands, width),
                       dtype=np.float32)  # top of aerosol reflectance, which is equal to bottom of Rayleigh reflectance
    sphericalFactor = np.zeros((nbands, width),
                               dtype=np.float32)  # spherical Albedo Correction Factor (for testing only, can be integrated into the equation later)
    rRaySimple = np.zeros((nbands, width),
                          dtype=np.float32)  # simple Rayleigh reflectance formular, after Roland (for testing only)
    rho_ng = np.zeros((nbands, width),
                      dtype=np.float32)  # toa reflectance corrected for gaseous absorption (rho_ng = "rho no gas")
    X2 = np.zeros(width, dtype=np.float32)  # temporary variable used for WV correction algorithm for gaseous absorption
    trans709 = np.zeros(width,
                        dtype=np.float32)  # WV transmission at 709nm, used for WV correction algorithm for gaseous absorption
    taurS = np.zeros((nbands, width), dtype=np.float32)  # simple Rayleigh optical thickness, for debugging only

    if (SENSOR == 'MERIS'):
        dem_alt = 'dem_alt'
        atm_press = 'atm_press'
        ozone = 'ozone'
        latitude = 'latitude'
        longitude = 'longitude'
        sun_zenith = 'sun_zenith'
        view_zenith = 'view_zenith'
        sun_azimuth = 'sun_azimuth'
        view_azimuth = 'view_azimuth'
        # water vapour correction:
        # MERIS band 9 @ 709nm to be corrected; WV absorption 900nm = band 15, WV reference 885nm= band 14
        b709 = 8  # the band to be corrected
        bWVRef = 13  # the reference reflectance outside WV absorption band
        bWV = 14  # the reflectance within the WV absorption band
    if (SENSOR == 'OLCI'):
        dem_alt = 'N/A'
        atm_press = 'sea_level_pressure'
        ozone = 'total_ozone'
        latitude = 'TP_latitude'
        longitude = 'TP_longitude'
        sun_zenith = 'SZA'
        view_zenith = 'OZA'
        sun_azimuth = 'SAA'
        view_azimuth = 'OAA'
        # water vapour correction:
        # OLCI band 11 @ 709nm, WV absorption 900nm = band 19, WV reference 885nm = band 18
        b709 = 11 # the band to be corrected
        bWVRef=17 # the reference reflectance outside WV absorption band
        bWV=18 # the reference reflectance outside WV absorption band

    if (SENSOR == 'MERIS'): # check if this is required at all!
        tp_alt = product.getTiePointGrid(dem_alt)
    alt = np.zeros(width, dtype=np.float32)

    tp_press = product.getTiePointGrid(atm_press)
    press0 = np.zeros(width, dtype=np.float32)

    tp_ozone = product.getTiePointGrid(ozone)
    ozone = np.zeros(width, dtype=np.float32)

    tp_latitude = product.getTiePointGrid(latitude)
    lat = np.zeros(width, dtype=np.float32)
    tp_longitude = product.getTiePointGrid(longitude)
    lon = np.zeros(width, dtype=np.float32)

    tp_theta_s = product.getTiePointGrid(sun_zenith)
    theta_s = np.zeros(width, dtype=np.float32)

    tp_theta_v = product.getTiePointGrid(view_zenith)
    theta_v = np.zeros(width, dtype=np.float32)

    tp_azi_s = product.getTiePointGrid(sun_azimuth)
    azi_s = np.zeros(width, dtype=np.float32)

    tp_azi_v = product.getTiePointGrid(view_azimuth)
    azi_v = np.zeros(width, dtype=np.float32)

    # Rayleigh multiple scattering
    # - Coefficients LUT
    dimTheta = 12
    dimThetaS = dimThetaV = dimTheta
    gridThetaS = rayADF['theta']
    gridThetaV = rayADF['theta']
    gridGeometry = [gridThetaS, gridThetaV]
    RayScattCoeffA = ray_coeff_matrix[:, :, :, 0]
    RayScattCoeffB = ray_coeff_matrix[:, :, :, 1]
    RayScattCoeffC = ray_coeff_matrix[:, :, :, 2]
    RayScattCoeffD = ray_coeff_matrix[:, :, :, 3]
    # - Fourier terms
    a = np.zeros(3, dtype=np.float32)
    b = np.zeros(3, dtype=np.float32)
    c = np.zeros(3, dtype=np.float32)
    d = np.zeros(3, dtype=np.float32)
    rayMultiCorr = np.zeros(3, dtype=np.float32)

    # Rayleigh transmittances and spherical albedo
    tR_thetaS = np.zeros((nbands, width), dtype=np.float32)  # Rayleigh Transmittance sun - surface
    tR_thetaV = np.zeros((nbands, width), dtype=np.float32)  # Rayleigh Transmittance surface - sun
    dimTaur = 17
    taurTab = np.linspace(0.0, 1.0, num=dimTaur)
    rayAlb_f = interp1d(taurTab, rayADF['ray_albedo_lut'])
    sARay = np.zeros((nbands, width), dtype=np.float32)  # Rayleigh spherical albedo

    print("Processing ...")
    # Calculate the Rayleigh cross section, which depends only on wavelength but not on air pressure
    for i in range(nbands):
        print("processing Rayleigh cross section of band", i)
#        b_source = product.getBandAt(i)
        b_source = product.getBand(band_name[i])
        lam = b_source.getSpectralWavelength()  # wavelength of band i in nm
        lam = lam / 1000.0  # wavelength in micrometer
        lam2 = lam / 10000.0  # wavelength in cm
        F_N2 = 1.034 + 0.000317 / (lam ** 2)  # King factor of N2
        F_O2 = 1.096 + 0.001385 / (lam ** 2) + 0.0001448 / (lam ** 4)  # King factor of O2
        F_air = (78.084 * F_N2 + 20.946 * F_O2 + 0.934 * 1 + C_CO2 * 1.15) / (
            78.084 + 20.946 + 0.934 + C_CO2)  # depolarization ratio or King Factor, (6+3rho)/(6-7rho)
        n_ratio = 1 + 0.54 * (CO2 - 0.0003)
        n_1_300 = (8060.51 + (2480990. / (132.274 - lam ** (-2))) + (17455.7 / (39.32957 - lam ** (-2)))) / 100000000.0
        nCO2 = n_ratio * (1 + n_1_300)  # reflective index at CO2
        sigma[i] = (24 * math.pi ** 3 * (nCO2 ** 2 - 1) ** 2) / (lam2 ** 4 * Ns ** 2 * (nCO2 ** 2 + 2) ** 2) * F_air

    for y in range(height):
        print("processing line ", y, " of ", height)
        # start radiance to reflectance conversion
        theta_s = tp_theta_s.readPixels(0, y, width, 1, theta_s)  # sun zenith angle in degree
        for i in range(nbands):
            b_source = product.getBand(band_name[i])
            radiance = b_source.readPixels(0, y, width, 1, radiance)
            if (SENSOR == 'MERIS'):
                E0.fill(b_source.getSolarFlux())
            if (SENSOR == 'OLCI'):
                    b_source = product.getBand(sf_name[i])
                    E0 = b_source.readPixels(0, y, width, 1, E0)
            reflectance[i] = radiance * math.pi / (E0 * np.cos(np.radians(theta_s)))
            b_out = raycorProduct.getBand("rtoa_" + str(i + 1))
            b_out.writePixels(0, y, width, 1, reflectance[i])
        # radiance to reflectance conversion completed

        # this is dummy code to create a flag
        flag1 = np.zeros(width, dtype=np.bool_)
        flag2 = np.zeros(width, dtype=np.bool_)
        raycorFlags = flag1 + 2 * flag2
        raycorFlagsBand.writePixels(0, y, width, 1, raycorFlags)
        # end flags dummy code

    # raycorProduct.closeIO()
    # if (0==1):
        lat = tp_latitude.readPixels(0, y, width, 1, lat)
        lon = tp_longitude.readPixels(0, y, width, 1, lon)

        # start Rayleigh optical thickness calculation
        # alt = tp_alt.readPixels(0, y, width, 1, alt)  # using the tie-point DEM in a MERIS product
        # get the altitude from an external DEM
        for x in range(width): alt[x] = dem.getElevation(GeoPos(lat[x], lon[x]))

        press0 = tp_press.readPixels(0, y, width, 1, press0)
        ozone = tp_ozone.readPixels(0, y, width, 1, ozone)

        theta_s = tp_theta_s.readPixels(0, y, width, 1, theta_s)  # sun zenith angle in degree
        theta_v = tp_theta_v.readPixels(0, y, width, 1, theta_v)  # view zenith angle in degree
        azi_s = tp_azi_s.readPixels(0, y, width, 1, azi_s)  # sun azimuth angle in degree
        azi_v = tp_azi_v.readPixels(0, y, width, 1, azi_v)  # view azimuth angle in degree

        # gaseous absorption correction
        rho_ng = reflectance  # to start: gaseous corrected reflectances equals toa reflectances
        # water vapour correction:
        # MERIS band 9 @ 709nm to be corrected; WV absorption 900nm = band 15, WV reference 885nm= band 14
        # b709 = 8  # the band to be corrected
        # bWVRef = 13  # the reference reflectance outside WV absorption band
        # bWV = 14  # the reflectance within the WV absorption band
        # OLCI band 11 @ 709nm, WV absorption 900nm = band 19, WV reference 885nm = band 18
        # b709 = 11 # the band to be corrected
        # bWVRef=17 # the reference reflectance outside WV absorption band
        # bWV=18 # the reference reflectance outside WV absorption band
        for i in range(width):
            if (reflectance[(bWV, i)] > 0):
                X2[i] = reflectance[(bWV, i)] / reflectance[(bWVRef, i)]
            else:
                X2[i] = 1
        trans709 = h2o_cor_poly[0] + (h2o_cor_poly[1] + (h2o_cor_poly[2] + h2o_cor_poly[3] * X2) * X2) * X2
        rho_ng[b709] /= trans709
        # ozone correction
        model_ozone = 0
        for x in range(width):
            ts = math.radians(theta_s[x])  # sun zenith angle in radian
            cts = math.cos(ts)  # cosine of sun zenith angle
            sts = math.sin(ts)  # sinus of sun zenith angle
            tv = math.radians(theta_v[x])  # view zenith angle in radian
            ctv = math.cos(tv)  # cosine of view zenith angle
            stv = math.sin(tv)  # sinus of view zenith angle
            for i in range(nbands):
                trans_ozoned12 = math.exp(-(absorb_ozon[i] * ozone[x] / 1000.0 - model_ozone) / cts)
                trans_ozoneu12 = math.exp(-(absorb_ozon[i] * ozone[x] / 1000.0 - model_ozone) / ctv)
                trans_ozone12 = trans_ozoned12 * trans_ozoneu12
                rho_ng[(i, x)] /= trans_ozone12
        # here we can decide if we continue with gaseous corrected reflectances or not
        reflectance = rho_ng

        # Now calculate the pixel dependent terms (like pressure) and finally the Rayleigh optical thickness
        for x in range(width):
            # Calculation to get the pressure
            z = alt[x]  # altitude at pixel in meters, taken from MERIS tie-point grid
            z = max(z, 0)  # clip to sea level
            Psurf0 = press0[x]  # pressure at sea level in hPa, taken from MERIS tie-point grid
            Psurf = Psurf0 * (
                                 1. - 0.0065 * z / 288.15) ** 5.255  # air pressure at the pixel (i.e. at altitude) in hPa, using the international pressure equation
            P = Psurf * 1000.  # air pressure at pixel location in dyn / cm2, which is hPa * 1000
            # calculation to get the constant of gravity at the pixel altitude, taking the air mass above into account
            dphi = math.radians(lat[x])  # latitude in radians
            cos2phi = math.cos(2 * dphi)
            g0 = g0_45 * (1 - 0.0026373 * cos2phi + 0.0000059 * cos2phi ** 2)
            zs = 0.73737 * z + 5517.56  # effective mass-weighted altitude
            g = g0 - (0.0003085462 + 0.000000227 * cos2phi) * zs + (0.00000000007254 + 0.0000000000001 * cos2phi) * \
                                                                   zs ** 2 - (1.517E-17 + 6E-20 * cos2phi) * zs ** 3
            # calculations to get the Rayeigh optical thickness
            factor = (P * AVO) / (m_a * g)
            for i in range(nbands):
                taur[(i, x)] = sigma[i] * factor

            # Calculate Rayleigh Phase function
            ts = math.radians(theta_s[x])  # sun zenith angle in radian
            cts = math.cos(ts)  # cosine of sun zenith angle
            sts = math.sin(ts)  # sinus of sun zenith angle
            tv = math.radians(theta_v[x])  # view zenith angle in radian
            ctv = math.cos(tv)  # cosine of view zenith angle
            stv = math.sin(tv)  # sinus of view zenith angle
            airmass[x] = 1 / cts + 1 / ctv  # air mass
            # Rayleigh Phase function, 3 Fourier terms
            PR[0] = 3. * PA / 4. * (1. + cts ** 2 * ctv ** 2 + (sts ** 2 * stv ** 2) / 2.) + PB
            PR[1] = -3. * PA / 4. * cts * ctv * sts * stv
            PR[2] = 3. * PA / 16. * sts ** 2 * stv ** 2
            # Calculate azimuth difference
            azs = math.radians(azi_s[x])
            azv = math.radians(azi_v[x])
            cosdeltaphi = math.cos(azv - azs)
            azidiff[x] = math.acos(cosdeltaphi)  # azimuth difference in radian
            # Fourier components of multiple scattering
            for j in [0, 1, 2]:
                a[j] = interpn(gridGeometry, RayScattCoeffA[j, :, :], [theta_s[x], theta_v[x]], method='linear',
                               bounds_error=False, fill_value=None)
                b[j] = interpn(gridGeometry, RayScattCoeffB[j, :, :], [theta_s[x], theta_v[x]], method='linear',
                               bounds_error=False, fill_value=None)
                c[j] = interpn(gridGeometry, RayScattCoeffC[j, :, :], [theta_s[x], theta_v[x]], method='linear',
                               bounds_error=False, fill_value=None)
                d[j] = interpn(gridGeometry, RayScattCoeffD[j, :, :], [theta_s[x], theta_v[x]], method='linear',
                               bounds_error=False, fill_value=None)

            for i in range(nbands):
                # Fourier series, loop
                for j in [0, 1, 2]:
                    # Rayleigh primary scattering
                    rho_Rf[j] = (PR[j] / (4.0 * (cts + ctv))) * (1. - math.exp(-airmass[x] * taur[(i, x)]))
                    # correction for multiple scattering
                    rayMultiCorr[j] = a[j] + b[j] * taur[(i, x)] + c[j] * taur[(i, x)] ** 2 + d[j] * taur[(i, x)] ** 3
                    rho_Rm[(j, i, x)] = rho_Rf[j] * rayMultiCorr[j]
                # rho_Rm[(0, i, x)]  = rho_Rf[0]
                # rho_Rm[(1, i, x)]  = 0.
                # rho_Rm[(2, i, x)]  = 0.
                # Fourier sum to get the Rayleigh Reflectance
                rho_R[(i, x)] = rho_Rm[(0, i, x)] + 2.0 * rho_Rm[(1, i, x)] * math.cos(azidiff[x]) + 2. * rho_Rm[
                    (2, i, x)] * math.cos(2. * azidiff[x])
                # complete the Rayleigh correction: see MERIS DPM PDF-p251 or DPM 9-16
                # polynomial coefficients tpoly0, tpoly1 and tpoly2 from MERIS LUT
                tRs = ((2. / 3. + cts) + (2. / 3. - cts) * math.exp(-taur[(i, x)] / cts)) / (4. / 3. + taur[(i, x)])
                tR_thetaS[(i, x)] = tpoly[0] + tpoly[1] * tRs + tpoly[
                                                                    2] * tRs ** 2  # Rayleigh Transmittance sun - surface
                tRv = ((2. / 3. + ctv) + (2. / 3. - ctv) * math.exp(-taur[(i, x)] / ctv)) / (4. / 3. + taur[(i, x)])
                tR_thetaV[(i, x)] = tpoly[0] + tpoly[1] * tRv + tpoly[
                                                                    2] * tRv ** 2  # Rayleigh Transmittance surface - sensor

                sARay[(i, x)] = rayAlb_f(taur[(i, x)])  # Rayleigh spherical albedo

                rho_toaR[(i, x)] = (reflectance[(i, x)] - rho_R[(i, x)]) / (
                    tR_thetaS[(i, x)] * tR_thetaV[(i, x)])  # toa reflectance corrected for Rayleigh scattering
                sphericalFactor[(i, x)] = 1.0 / (1.0 + sARay[(i, x)] * rho_toaR[
                    (i, x)])  # factor used in the next equation to account for the spherical albedo
                rho_BRR[(i, x)] = rho_toaR[(i, x)] * sphericalFactor[
                    (i, x)]  # top of aerosol reflectance, which is equal to bottom of Rayleigh reflectance

            # simple Rayleigh correction
            azi_diff_deg = math.fabs(azi_v[x] - azi_s[x])
            if (azi_diff_deg > 180.0):
                azi_diff_deg = 360.0 - azi_diff_deg
            azi_diff_rad = math.radians(azi_diff_deg)
            cos_scat_ang = (-ctv * cts) - (stv * sts * math.cos(azi_diff_rad))
            phase_rayl_min = 0.75 * (1.0 + cos_scat_ang * cos_scat_ang)
            for i in range(nbands):
                # b_source = product.getBandAt(i)
                b_source = product.getBand(band_name[i])
                lam = b_source.getSpectralWavelength()
                taurS[(i, x)] = math.exp(-4.637) * math.pow((lam / 1000.0), -4.0679)
                pressureAtms = press0[x] * math.exp(-alt[x] / 8000.0)
                pressureFactor = taurS[(i, x)] / 1013.0
                taurS[(i, x)] = pressureAtms * pressureFactor
                rRaySimple[(i, x)] = cts * taurS[(i, x)] * phase_rayl_min / (4 * 3.1415926) * (1 / ctv) * 3.1415926

        # Write bands to product
        airmassBand.writePixels(0, y, width, 1, airmass)
        azidiffBand.writePixels(0, y, width, 1, azidiff)
        altBand.writePixels(0, y, width, 1, alt)

        for i in range(nbands):
            taurBand = raycorProduct.getBand("taur_" + str(i + 1))
            taurBand.writePixels(0, y, width, 1, taur[i])
            rhorBand = raycorProduct.getBand("rRay_" + str(i + 1))
            rhorBand.writePixels(0, y, width, 1, rho_R[i])
            rhorF1Band = raycorProduct.getBand("rRayF1_" + str(i + 1))
            rhorF1Band.writePixels(0, y, width, 1, rho_Rm[0, i])
            rhorF2Band = raycorProduct.getBand("rRayF2_" + str(i + 1))
            rhorF2Band.writePixels(0, y, width, 1, rho_Rm[1, i])
            rhorF3Band = raycorProduct.getBand("rRayF3_" + str(i + 1))
            rhorF3Band.writePixels(0, y, width, 1, rho_Rm[2, i])
            rayTransSBand = raycorProduct.getBand("transSRay_" + str(i + 1))
            rayTransSBand.writePixels(0, y, width, 1, tR_thetaS[i])
            rayTransVBand = raycorProduct.getBand("transVRay_" + str(i + 1))
            rayTransVBand.writePixels(0, y, width, 1, tR_thetaV[i])
            sARayBand = raycorProduct.getBand("sARay_" + str(i + 1))
            sARayBand.writePixels(0, y, width, 1, sARay[i])
            rtoaRBand = raycorProduct.getBand("rtoaRay_" + str(i + 1))
            rtoaRBand.writePixels(0, y, width, 1, rho_toaR[i])
            rBRRBand = raycorProduct.getBand("rBRR_" + str(i + 1))
            rBRRBand.writePixels(0, y, width, 1, rho_BRR[i])
            spfBand = raycorProduct.getBand("sphericalAlbedoFactor_" + str(i + 1))
            spfBand.writePixels(0, y, width, 1, sphericalFactor[i])
            rRaySimpleBand = raycorProduct.getBand("RayleighSimple_" + str(i + 1))
            rRaySimpleBand.writePixels(0, y, width, 1, rRaySimple[i])
            rho_ngBand = raycorProduct.getBand("rtoa_ng_" + str(i + 1))
            rho_ngBand.writePixels(0, y, width, 1, rho_ng[i])
            taurSBand = raycorProduct.getBand("taurS_" + str(i + 1))
            taurSBand.writePixels(0, y, width, 1, taurS[i])
            # Rayleigh calculation completed

    raycorProduct.closeIO()

    print("Done.")
##############
import csv
###############MSVR
from sklearn.svm import SVR
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
########################

if len(sys.argv) != 2:
    print("usage: %s <file>" % sys.argv[0])
    sys.exit(1)

file = sys.argv[1]

print("Reading...")
product = ProductIO.readProduct(file)
width = product.getSceneRasterWidth()
height = product.getSceneRasterHeight()
name = product.getName()
description = product.getDescription()
band_names = product.getBandNames()

print("Product:     %s, %s" % (name, description))
print("Raster size: %d x %d pixels" % (width, height))
print("Start time:  " + str(product.getStartTime()))
print("End time:    " + str(product.getEndTime()))
print("Bands:       %s" % (list(band_names)))
##---------------------------------------------------------------------------------
with open('rice_LUT.csv', 'r') as dest_f:
    data_iter = csv.reader(dest_f, delimiter=',', quotechar='"')
    data = [data for data in data_iter]
Пример #15
0
# Preparation
productName = '5007'
maskPath = 'D:/fyp-master/Polygon/'
#fileName = 'D:/Test/collocate_' + productName + '.dim'
fileName = 'D:/Test/' + productName + '.dim'
bandName = 'Intensity_VV'
savePath = 'D:/fyp-master/TrainingData/'

# jpy conversion
PixelPos = jpy.get_type('org.esa.snap.core.datamodel.PixelPos')
GeoPos = jpy.get_type('org.esa.snap.core.datamodel.GeoPos')
GeoCoding = jpy.get_type('org.esa.snap.core.datamodel.GeoCoding')
RasterDataNode = jpy.get_type('org.esa.snap.core.datamodel.RasterDataNode')

# Prepare French Guyana .tif
p = ProductIO.readProduct('D:/Test/AGB_2015_0.zip')
bioBand = p.getBand('band_1')
bioW = bioBand.getRasterWidth()
bioH = bioBand.getRasterHeight()
bio_data = np.zeros(
    bioW * bioH, np.float32
)  #Return a new array of given shape and type, filled with zeros. Filled only x-ways. np.zeros(5) = {0,0,0,0,0}
bioBand.readPixels(
    0, 0, bioW, bioH, bio_data
)  #readPixels(x,y,w,h, Array) x : x offset of upper left corner. y : y offset of upper left corner. w : width. h : height. Array : output array
bio_data.shape = bioH, bioW

# Prepare radar image
r = ProductIO.readProduct(fileName)
radarBand = r.getBand(bandName)
radarW = radarBand.getRasterWidth()
Пример #16
0
#     lat=[]
#     lon=[]
#     averageSD=[]
#     for col in  turku_insitu_data:
#         lat=col[3]
#         lon=col[4]
#         averageSD=col[5]

##Buraya file path list seklinde ** kullanarak tum klasoru at
#file_path="/media/simsek/1CB4C75FB4C73A52/SummerWork2/SentinelDataForThesis&Validation_Oct&May/2A/S2A_MSIL2A_20171005T095031_N0205_R079_T34VFN_20171005T095027.SAFE"

file_path="/media/simsek/1CB4C75FB4C73A52/SummerWork2/SentinelDataForThesis&Validation_Oct&May/2A/"
##"./ EKLE bulundugun yerden calissin"


for filename in os.listdir(file_path):
    if filename.endswith(".SAFE"):
        product_name = os.path.join(file_path,filename)
        product = ProductIO.readProduct(product_name)
        ## resampling to 60 m to be able to use pixel extraction operator
        print(product_name)
        resampled_product = GPF.createProduct('Resample', resampling_parameters, product)
        ## Pixel extraction
        result = GPF.createProduct('PixEx', pixex_parameters, resampled_product)
        ## Renaming the last txt file to data date
        list_of_text_files=glob.glob('/home/simsek/workspace/EDUCATION/Thesis/Scripts/*.txt')
        latest_text_file = max(list_of_text_files, key=os.path.getctime)
        os.rename(latest_text_file,filename.split("_")[2].split("T")[0]+"_Sentinel_insitu_pixels_snow.csv" )
    else:
        print("There is no Sentinel-2 data in the given folder")
        GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()
        HashMap = snappy.jpy.get_type('java.util.HashMap')

        gc.enable()
	
	### Setting up filenames
        folder = os.path.join(processing_dir, file)
        output = folder
        timestamp = file.split("_")[:4]
        print("Timestamp:", timestamp)
        date = file.split("_")[4:5]
        List_to_str_date = ''.join(date)
        date = List_to_str_date[0:15]
        print("date:", date)

        sentinel_1 = ProductIO.readProduct(output + "\\manifest.safe")

	### Creating endfolder for pre-processed product
        pp_endfolder = processed_path + '_'.join(timestamp) + '_' + date
        if not os.path.exists(pp_endfolder):
            os.mkdir(pp_endfolder)
        print("pp_endfolder:", pp_endfolder)

	### Looping over polarizations of each scene
        pols = ['HH', 'HV']
        for p in pols:
            polarization = p

            print("#########################################")
            print("#########################################")
            print("Preprocessing:", file, ", Polarization:", p, "....", datetime.now())
Пример #18
0
    def test_end_to_end(self):
        # PRODPATH = "C:\\Users\\carsten\\Dropbox\\Carsten\\SWProjects\\Rayleigh-Correction\\testdata\\"
        PRODPATH = "D:\\Dropbox\\Carsten\\SWProjects\\Rayleigh-Correction\\testdata\\"

        # validate here
        numerr=0
        # read output
        # SENSOR = 'MERIS'
        SENSOR = 'OLCI'
        main([SENSOR])
        if (SENSOR=='MERIS'):
            REF_FILE =OUT_FILE = PRODPATH+'Reftestprodukt1_MER_RR_20050713.dim'
            TEST_FILE=OUT_FILE = PRODPATH+'Testprodukt1_MER_RR_20050713.dim'
            NBANDS=15
        if (SENSOR=='OLCI'):
            REF_FILE = PRODPATH+'Reftestproduct3_S3A_OL_1_EFR____20160509T103945.dim'
            TEST_FILE = PRODPATH+'Testproduct3_OL_1_EFR____20160509T103945.dim'
            NBANDS=21

        print("Opening reference product ...")
        refproduct = ProductIO.readProduct(REF_FILE)
        width = refproduct.getSceneRasterWidth()
        height = refproduct.getSceneRasterHeight()

        print("Opening test product ...")
        testproduct = ProductIO.readProduct(TEST_FILE)
        widthtest = testproduct.getSceneRasterWidth()
        heighttest = testproduct.getSceneRasterHeight()

        # compare
        print("Start comparing ...")
        try:
            self.assertEqual(width, widthtest)
            print("  widths agree: ",width)
        except:
            print("  widths error: ref=",width," test=",widthtest)
            numerr+=1

        try:
            self.assertEqual(height, heighttest)
            print("  height agree: ",height)
        except:
            print("  height error: ref=",height," test=",heighttest)
            numerr+=1

        refvalues=np.zeros((width,height),dtype=np.float32)
        testvalues=np.zeros((width,height),dtype=np.float32)
        bandname="rBRR_"
        for i in range(1,NBANDS+1):
            refsource = refproduct.getBand(bandname+str(i))
            refvalues = refsource.readPixels(0, 0, width, height, refvalues)
            testsource = testproduct.getBand(bandname+str(i))
            testvalues = testsource.readPixels(0, 0, width, height, testvalues)
            try:
                result=np.allclose(refvalues,testvalues, rtol=0.0, atol=1e-6, equal_nan=True)
                self.assertEqual(result,True)
                print("   ",bandname+str(i)," agrees")
            except:
                print("   ",bandname+str(i)," disagrees")
                numerr+=1
        print("toal number of tests failed =", numerr)
def InSAR_graph_processing_for_2_S1_productSets(
        study_area, graphPath, graphSet, dataPath, productSet1, productSet2,
        savePath, saveFltPostFix, executeGraphFlag,
        printGraphConfigurationFlag, applyTerrainCorrectionFlag):
    FileReader = jpy.get_type('java.io.FileReader')
    GraphIO = jpy.get_type('org.esa.snap.core.gpf.graph.GraphIO')
    Graph = jpy.get_type('org.esa.snap.core.gpf.graph.Graph')
    GraphProcessor = jpy.get_type('org.esa.snap.core.gpf.graph.GraphProcessor')
    PrintPM = jpy.get_type('com.bc.ceres.core.PrintWriterProgressMonitor')

    t1_product1_name = productSet1[0]
    t1_product2_name = productSet1[1]

    t2_product1_name = productSet2[0]
    t2_product2_name = productSet2[1]

    if not os.path.exists(savePath):
        os.makedirs(savePath)

    roi_name = study_area

    sat1 = t1_product1_name[:3]
    sat2 = t2_product1_name[:3]

    date1 = t1_product1_name.split("_")[5][:8]
    date2 = t2_product1_name.split("_")[5][:8]

    t1_info = date1 + "_[" + sat1 + "_" + date1
    t2_info = sat2 + "_" + date2 + "]"

    saveFileName = roi_name + "_" + t1_info + "_" + t2_info + "_"
    print("Prefix of savedFile: " + saveFileName)

    graph1_name = graphSet[0]  # IW2...deb
    graph2_name = graphSet[1]  # IW3...deb
    graph3_name = graphSet[2]  # IW2_IW3_mrg_tpr_ml_flt

    ### load graph1 for processing ProduceSet1
    graphFile1 = FileReader(graphPath + graph1_name)
    graph1 = GraphIO.read(graphFile1)
    saveDeb1Name = saveFileName + graph1_name[
        26:len(graph1_name) - 4]  # IW2_VV_b4_16_bgd_ESD_Ifg103_deb

    ### load graph2 for processing ProduceSet1
    graphFile2 = FileReader(graphPath + graph2_name)
    graph2 = GraphIO.read(graphFile2)
    saveDeb2Name = saveFileName + graph2_name[
        26:len(graph2_name) - 4]  # IW3_VV_b4_18_bgd_ESD_Ifg103_deb

    ### load graph3 for merging debursted products
    graphFile3 = FileReader(graphPath + graph3_name)
    graph3 = GraphIO.read(graphFile3)

    # saveFltPostFix = "_bgd_ifg_dbt_tpr_ml_flt"
    saveFltName = saveFileName + saveFltPostFix

    if printGraphConfigurationFlag:
        print(
            "======================= Used TOPSAR-Split Configuration ==========================="
        )
        print("-------------------------------------------------------")
        print("graph1 TOPSAR-Split Configuration: ")
        print("-------------------------------------------------------")
        print(graph1.getNode("TOPSAR-Split").getConfiguration().toXml())
        print("-------------------------------------------------------")
        print("graph2 TOPSAR-Split Configuration: ")
        print("-------------------------------------------------------")
        print(graph2.getNode("TOPSAR-Split").getConfiguration().toXml())
        print(
            "==================================================================================="
        )

    ###
    # print("====================== Before Configuration ============================")
    # print(graph.getNode("read").getConfiguration().toXml())
    # print(graph.getNode("read(2)").getConfiguration().toXml())
    # print(graph.getNode('write').getConfiguration().toXml())

    saveDebPath = savePath + "TOPSAR_Deburst_results\\"
    if not os.path.exists(saveDebPath):
        os.makedirs(saveDebPath)

    saveFltPath = savePath + "GoldsteinPhaseFiltering_results\\"
    if not os.path.exists(saveFltPath):
        os.makedirs(saveFltPath)

    graph1.getNode(
        "ProductSet-Reader").getConfiguration().getChild(0).setValue(
            dataPath + t1_product1_name + "," + dataPath + t1_product2_name)  #
    graph1.getNode("ProductSet-Reader(2)").getConfiguration().getChild(
        0).setValue(dataPath + t2_product1_name + "," + dataPath +
                    t2_product2_name)
    graph1.getNode("Write").getConfiguration().getChild(0).setValue(
        saveDebPath + saveDeb1Name)

    graph2.getNode("ProductSet-Reader").setConfiguration(
        graph1.getNode("ProductSet-Reader").getConfiguration())
    graph2.getNode("ProductSet-Reader(2)").setConfiguration(
        graph1.getNode("ProductSet-Reader(2)").getConfiguration())
    graph2.getNode("Write").getConfiguration().getChild(0).setValue(
        saveDebPath + saveDeb2Name)

    graph3.getNode("ProductSet-Reader").getConfiguration().getChild(
        0).setValue(saveDebPath + saveDeb1Name + ".dim," + saveDebPath +
                    saveDeb2Name + ".dim")
    graph3.getNode("Write").getConfiguration().getChild(0).setValue(
        saveFltPath + saveFltName)

    # print("====================== After Configuration ============================")
    # print(graph.getNode("read").getConfiguration().toXml())
    # print(graph.getNode("read(2)").getConfiguration().toXml())
    # print(graph.getNode('write').getConfiguration().toXml())

    if printGraphConfigurationFlag:
        print(
            "======================= Used ProductSet-Reader and Write Configuration ==========================="
        )
        print("-------------------------------------------------------")
        print("graph1: ")
        print("-------------------------------------------------------")
        print(graph1.getNode("ProductSet-Reader").getConfiguration().toXml())
        print(
            graph1.getNode("ProductSet-Reader(2)").getConfiguration().toXml())
        print(graph1.getNode("Write").getConfiguration().toXml())

        print("-------------------------------------------------------")
        print("graph2: ")
        print("-------------------------------------------------------")
        print(graph2.getNode("ProductSet-Reader").getConfiguration().toXml())
        print(
            graph2.getNode("ProductSet-Reader(2)").getConfiguration().toXml())
        print(graph2.getNode("Write").getConfiguration().toXml())

        print("-------------------------------------------------------")
        print("graph3: ")
        print("-------------------------------------------------------")
        print(graph3.getNode("ProductSet-Reader").getConfiguration().toXml())
        print(graph3.getNode("Write").getConfiguration().toXml())

    ### or a more concise implementation
    ConcisePM = jpy.get_type(
        'com.bc.ceres.core.PrintWriterConciseProgressMonitor')
    System = jpy.get_type('java.lang.System')
    pm = PrintPM(System.out)

    ### Execute Graph
    GraphProcessor = GraphProcessor()
    print("savedFltName:", saveFltPath + saveFltName)

    # executeGraphFlag = True
    if executeGraphFlag:
        # if not os.path.exists(saveDebPath + saveDeb1Name + ".dim"):
        print("Start to execute graph1 ...")
        start = time.clock()
        GraphProcessor.executeGraph(
            graph1, pm
        )  # ============================================== Save "..._Flt.dim"===================
        elapsed = (time.clock() - start) / 60
        print("Time Used by graph1: " + str(elapsed) + "m")
        print("-------------------------------------------------------")

        # if not os.path.exists(saveDebPath + saveDeb2Name + ".dim"):
        print("Start to execute graph2 ...")
        start = time.clock()
        GraphProcessor.executeGraph(graph2, pm)
        elapsed = (time.clock() - start) / 60
        print("Time Used by graph2: " + str(elapsed) + "m")
        print("-------------------------------------------------------")

        # if not os.path.exists(saveFltPath + saveFltName + ".dim"):
        print("Start to execute graph3 ...")
        start = time.clock()
        GraphProcessor.executeGraph(graph3, pm)
        elapsed = (time.clock() - start) / 60
        print("Time Used by graph3: " + str(elapsed) + "m")
        print("-------------------------------------------------------")

    ### define for writing images
    ImageManager = jpy.get_type('org.esa.snap.core.image.ImageManager')
    JAI = jpy.get_type('javax.media.jai.JAI')

    def write_image(band, filename, format):
        im = ImageManager.getInstance().createColoredBandImage(
            [band], band.getImageInfo(), 0)
        JAI.create("filestore", im, filename, format)

    # applyTerrainCorrectionflag = False
    if applyTerrainCorrectionFlag:
        ### =============== Terrain-Correction ====================
        print("Terrain-Correction...")

        savedFlt = ProductIO.readProduct(
            saveFltPath + saveFltName + '.dim'
        )  # # ================= Read "..._Flt.dim"===================
        write_image(savedFlt.getBandAt(3),
                    saveFltPath + saveFltName + "_phase.png", "PNG")
        write_image(savedFlt.getBandAt(5),
                    saveFltPath + saveFltName + "_coh.png", "PNG")

        pixelSpacing = 80
        params = {
            # Terrain-Correction
            "demName": "SRTM 3Sec",
            "pixelSpacingInMeter": float(pixelSpacing),
            "outputComplex": False,
            "externalDEMApplyEGM": True,
            "demResamplingMethod": "BILINEAR_INTERPOLATION",
            "imgResamplingMethod": "BILINEAR_INTERPOLATION",
            "nodataValueAtSea": False
        }

        parameters = HashMap()
        for a in params:
            # print(a)
            parameters.put(a, params[a])

        TC = GPF.createProduct("Terrain-Correction", parameters, savedFlt)
        saveTCname = saveFltName + "_TC" + str(pixelSpacing)

        ### Write "..._TC.dim"
        # ProductIO.writeProduct(TC, savePath + saveFileName, 'BEAM-DIMAP', pm) # ============== Save "..._Flt_TC.dim"===================

        ### Read " ...TC.dim" and Write coherence band as an image in Tiff format.
        # savedTC = ProductIO.readProduct(savePath + saveFileName + '.dim')
        savedTC = TC

        ### Delete other bands except for coherence band
        for i in range(savedTC.getNumBands() - 1):
            # print("Deleted band: ", savedTC.getBandAt(0))
            savedTC.removeBand(savedTC.getBandAt(0))

        saveCohPath = savePath + "Flt_TC_coherence_maps\\"
        if not os.path.exists(saveCohPath):
            os.makedirs(saveCohPath)

        print("savedCohMap: ", saveCohPath + saveTCname + "_coh")
        ProductIO.writeProduct(
            savedTC, saveCohPath + saveTCname + "_coh", 'GeoTIFF',
            pm)  # =========== Save coherence map =======================
        write_image(savedTC.getBandAt(0),
                    saveCohPath + saveTCname + "_coh.png", "PNG")
import sys

import numpy
from snappy import String
from snappy import Product
from snappy import ProductData
from snappy import ProductIO
from snappy import ProductUtils


if len(sys.argv) != 2:
    print("usage: %s <file>" % sys.argv[0]);
    sys.exit(1)

print("Reading...")
product = ProductIO.readProduct(sys.argv[1])
width = product.getSceneRasterWidth()
height = product.getSceneRasterHeight()
name = product.getName()
desc = product.getDescription()
band_names = product.getBandNames()

print("Product: %s, %d x %d pixels, %s" % (name, width, height, desc))
print("Bands:   %s" % (band_names))

b7 = product.getBand('radiance_7')
b10 = product.getBand('radiance_10')
ndviProduct = Product('NDVI', 'NDVI', width, height)
ndviBand = ndviProduct.addBand('ndvi', ProductData.TYPE_FLOAT32)
ndviBand.setNoDataValue(numpy.nan)
ndviBand.setNoDataValueUsed(True)
Пример #21
0
def pre_process_s2(data_dir, out_dir, area_of_int):
    """
    Subsets and resamples to 10m all L2A products from in directory.
    
    Args:
        data_dir (str): location of L2A products (.SAFE directories)
        out_dir (str): locaation to write processed products
        area_of_int (geoJSON): extent of region of interest
    
    """
    import snappy
    from sentinelsat.sentinel import SentinelAPI, read_geojson, geojson_to_wkt
    from snappy import ProductIO
    from snappy import HashMap
    from snappy import GPF
    GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()
    HashMap = snappy.jpy.get_type('java.util.HashMap')

    os.chdir(data_dir)

    # Check location for saving results
    out_direc = out_dir
    if not out_direc.endswith('/'):
        out_direc += '/'
    if not os.path.exists(out_direc):
        os.makedirs(out_direc)
        print("New directory {} was created".format(out_direc))

    # Get a list of S2 L2A product directory names
    prdlist = filter(
        re.compile(r'^S2.*L2A.*SAFE$').search, os.listdir(data_dir))

    # Filter products that have already been processed
    pre_files = filter(
        re.compile(r'^S2.*L2A.*data$').search, os.listdir(out_dir))
    checker = list(map(lambda x: x[0:-5] + r'.SAFE', pre_files))

    prdlist = [i for i in prdlist if i not in checker]

    # Create a dictionary to read Sentinel-2 L2A products
    product = {}

    ## TODO-to merge contiguous tiles
    ## Extract the dates of all the products with regex
    ## Make a list of unique dates
    ## Append list in dictionary with dates as keys with the name of the product

    for element in prdlist:
        product[element[:-5]] = {}

    ## TODO use only one variable for the procesing chain
    for key, value in product.iteritems():
        try:
            # Read the product
            reader = filter(
                re.compile(r'MTD_.*xml$').search,
                os.listdir(data_dir + key + '.SAFE/'))
            print('Reading {}'.format(key + '.SAFE/' + reader[0]))
            value['GRD'] = ProductIO.readProduct(data_dir + key + '.SAFE/' +
                                                 reader[0])

            # Resample all bands to 10m resolution
            resample_subset = HashMap()
            resample_subset.put('targetResolution', 10)
            print('Resampling {}'.format(key))
            value['res10'] = GPF.createProduct('Resample', resample_subset,
                                               value['GRD'])

            # Subset to area of interest
            param_subset = HashMap()
            param_subset.put('geoRegion', area_of_int)
            param_subset.put('outputImageScaleInDb', False)
            param_subset.put(
                'bandNames',
                'B2,B3,B4,B8,B11,B12,quality_cloud_confidence,quality_scene_classification'
            )
            print('Subsetting {}'.format(key))
            value['sub'] = GPF.createProduct("Subset", param_subset,
                                             value['res10'])

            # Write product
            print('Writing {} subset resampled to 10m'.format(key))
            ProductIO.writeProduct(value['sub'], out_dir + key, 'BEAM-DIMAP')

            # Dispose all the intermediate products
            value['GRD'].dispose()
            value['res10'].dispose()
            value['sub'].dispose()

        except:
            e = sys.exc_info()
            print("{} could not be processed: {} {} {}".format(
                key, e[0], e[1], e[2]))
            with open(data_dir + "L2A_corrupt.txt", "a") as efile:
                efile.write(key + '\n')
        #  Print out incorrectness
        for col_index in range(confusion_matrix.shape[1]):
            if row_index != col_index:
                value = confusion_matrix[row_index][col_index]
                f.write(RosebelPixelASMClass(row_index + 1).name + ' pixels mis-predicted as ' +
                        RosebelPixelASMClass(col_index + 1).name + ': %.2f%%' % (
                                value * 100 / sum(confusion_matrix[row_index])) + '\n')
    f.close()


if __name__ == "__main__":
    start_time = time.time()

    print("Reading product:" + MERIAN_PATH + PRODUCT_NAME)
    p = ProductIO.readProduct(MERIAN_PATH + PRODUCT_NAME)

    print('Extracting Bands')
    band_names = p.getBandNames()
    bands = []
    number_of_bands = 0
    for band_name in band_names:
        if "Gamma0" in str(band_name):
            print(str(number_of_bands + 1) + ": " + str(band_name))
            number_of_bands += 1
            bands.append(p.getBand(band_name))
    print("Number of bands in product: " + str(number_of_bands))

    print('Extracting Feature Data from Bands')
    features = []
    for band in bands:
Пример #23
0
def prepros(scene_folder):

    ### Folder, date & timestamp
    preprocessed = rootpath + "\\preprocessed"
    scene_name = str(scene_folder.split("\\")[X])[:32] # [X] element of filepath, change this to match scene name. [:32] first caracters of element
    print("scene_name:", scene_name)
    output_folder = os.path.join(preprocessed, scene_name)
    print("output_folder:", output_folder)

    if not os.path.exists(output_folder):
        os.mkdir(output_folder)

    GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()
    HashMap = snappy.jpy.get_type('java.util.HashMap')
    gc.enable()

    sentinel_1 = ProductIO.readProduct(scene_folder + "\\manifest.safe")

    pols = ['HH', 'HV'] # Be aware of polarizations for different scenes
    for p in pols:
        polarization = p

        ### Apply-Orbit-File
        parameters = HashMap()
        parameters.put('orbitType', 'Sentinel Precise (Auto Download)')
        parameters.put('polyDegree', 3)
        parameters.put('continueOnFail', True)

        target_0 = GPF.createProduct("Apply-Orbit-File", parameters, sentinel_1)
        del parameters

        ### Calibration
        parameters = HashMap()
        parameters.put('outputSigmaBand', True)
        parameters.put('sourceBands', 'Intensity_' + polarization)
        parameters.put('selectedPolarisations', polarization)
        parameters.put('outputImageScaleInDb', False)

        target_1 = GPF.createProduct("Calibration", parameters, target_0)
        del target_0
        del parameters

        ### Terrain-Correction
        parameters = HashMap()
        parameters.put('demResamplingMethod', 'NEAREST_NEIGHBOUR')
        parameters.put('imgResamplingMethod', 'NEAREST_NEIGHBOUR')
        parameters.put('demName', 'External DEM')
        parameters.put('externalDEMFile', dem)
        parameters.put('externalDEMNoDataValue', -9999.0) # DEM nodata value
        parameters.put('pixelSpacingInMeter', 20.0) # DEM pixelsize
        parameters.put('sourceBands', 'Sigma0_' + polarization)
        parameters.put('saveSelectedSourceBand', True)
        parameters.put('nodataValueAtSea', False)

        target_2 = GPF.createProduct("Terrain-Correction", parameters, target_1)
        del target_1
        del parameters

        ### Import-Vector
        parameters = HashMap()
        parameters.put('vectorFile', mask)
        parameters.put('separateShapes', False)

        target_3 = GPF.createProduct('Import-Vector', parameters, target_2)
        del target_2
        del parameters

        ### Land-Sea-Mask
        parameters = HashMap()
        parameters.put('useSRTM', False)
        parameters.put('landMask', True)
        parameters.put('geometry', 'OceanMask_250mBuffer_rep') # input shapefile name

        out_file = output_folder + "\\" + scene_name + "_preprocessed_" + polarization
        target_4 = GPF.createProduct("Land-Sea-Mask", parameters, target_3)
        ProductIO.writeProduct(target_4, out_file, 'GeoTIFF-BigTIFF', pm)
        del target_3
        del parameters
Пример #24
0
def main():
    ## All Sentinel-1 data sub folders are located within a super folder (make sure the data is already unzipped and each sub folder name ends with '.SAFE'):
    path = r'data\s1_images'
    outpath = r'data\s1_preprocessed'
    if not os.path.exists(outpath):
        os.makedirs(outpath)
    ## well-known-text (WKT) file for subsetting (can be obtained from SNAP by drawing a polygon)
    wkt = 'POLYGON ((-157.79579162597656 71.36872100830078, -155.4447021484375 71.36872100830078, \
    -155.4447021484375 70.60020446777344, -157.79579162597656 70.60020446777344, -157.79579162597656 71.36872100830078))'

    ## UTM projection parameters
    proj = '''PROJCS["UTM Zone 4 / World Geodetic System 1984",GEOGCS["World Geodetic System 1984",DATUM["World Geodetic System 1984",SPHEROID["WGS 84", 6378137.0, 298.257223563, AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich", 0.0, AUTHORITY["EPSG","8901"]],UNIT["degree", 0.017453292519943295],AXIS["Geodetic longitude", EAST],AXIS["Geodetic latitude", NORTH]],PROJECTION["Transverse_Mercator"],PARAMETER["central_meridian", -159.0],PARAMETER["latitude_of_origin", 0.0],PARAMETER["scale_factor", 0.9996],PARAMETER["false_easting", 500000.0],PARAMETER["false_northing", 0.0],UNIT["m", 1.0],AXIS["Easting", EAST],AXIS["Northing", NORTH]]'''

    for folder in os.listdir(path):
        gc.enable()
        gc.collect()
        sentinel_1 = ProductIO.readProduct(path + "\\" + folder +
                                           "\\manifest.safe")
        print(sentinel_1)

        loopstarttime = str(datetime.datetime.now())
        print('Start time:', loopstarttime)
        start_time = time.time()

        ## Extract mode, product type, and polarizations from filename
        modestamp = folder.split("_")[1]
        productstamp = folder.split("_")[2]
        polstamp = folder.split("_")[3]

        polarization = polstamp[2:4]
        if polarization == 'DV':
            pols = 'VH,VV'
        elif polarization == 'DH':
            pols = 'HH,HV'
        elif polarization == 'SH' or polarization == 'HH':
            pols = 'HH'
        elif polarization == 'SV':
            pols = 'VV'
        else:
            print("Polarization error!")

        ## Start preprocessing:
        applyorbit = do_apply_orbit_file(sentinel_1)
        thermaremoved = do_thermal_noise_removal(applyorbit)
        calibrated = do_calibration(thermaremoved, polarization, pols)
        down_filtered = do_speckle_filtering(calibrated)
        del applyorbit
        del thermaremoved
        del calibrated
        ## IW images are downsampled from 10m to 40m (the same resolution as EW images).
        if (modestamp == 'IW'
                and productstamp == 'GRDH') or (modestamp == 'EW'
                                                and productstamp == 'GRDH'):
            down_tercorrected = do_terrain_correction(down_filtered, proj, 1)
            down_subset = do_subset(down_tercorrected, wkt)
            del down_filtered
            del down_tercorrected
        elif modestamp == 'EW' and productstamp == 'GRDM':
            tercorrected = do_terrain_correction(down_filtered, proj, 0)
            subset = do_subset(tercorrected, wkt)
            del down_filtered
            del tercorrected
        else:
            print("Different spatial resolution is found.")

        down = 1
        try:
            down_subset
        except NameError:
            down = None
        if down is None:
            print("Writing...")
            ProductIO.writeProduct(subset, outpath + '\\' + folder[:-5],
                                   'GeoTIFF')
            del subset
        elif down == 1:
            print("Writing undersampled image...")
            ProductIO.writeProduct(down_subset,
                                   outpath + '\\' + folder[:-5] + '_40',
                                   'GeoTIFF')
            del down_subset
        else:
            print("Error.")

        print('Done.')
        sentinel_1.dispose()
        sentinel_1.closeIO()
        print("--- %s seconds ---" % (time.time() - start_time))
    
    return threshold_isodata(np.asarray(band_data, dtype='float'), return_all=True)

# ------------------------------------------------------------------------------------------------------


if __name__ == '__main__':

    # GPF Initialization
    GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()

    # Product initialization
    s1_path = 'C:/Users/jales/Desktop/S1A.zip'

    # Reading the data
    product = ProductIO.readProduct(s1_path)

    # ------------------------------------------------------------------------------------------------------

    ProductInformation(product)

    S1_Orb = ApplyOrbitFile(product)

    S1_Orb_Subset = Subset(S1_Orb, 0, 9928, 25580, 16846)

    ProductInformation(S1_Orb_Subset)

    S1_Orb_Subset_Cal = Calibration(S1_Orb_Subset, 'Intensity_VH', 'VH')

    S1_Orb_Subset_Cal_Ter = Terrain_Correction(S1_Orb_Subset_Cal, 'Sigma0_VH')
Пример #26
0
print(date)



idx = 0
in1 = date_output_dict[date[idx]]
in2 = date_output_dict[date[idx + 1]]

# print(date[idx], date[idx + 1])
print(in1)
print(in2)

sat1 = in1.split("\\")[3][:3]  # obtain sensor name : 'S1A' or 'S1B'
sat2 = in2.split("\\")[3][:3]

product_t1 = ProductIO.readProduct(in1)
product_t2 = ProductIO.readProduct(in2)


# product_t1 = ProductIO.readProduct(output[0] + "\\manifest.safe")
# product_t2 = ProductIO.readProduct(output[1] + "\\manifest.safe")

# product_t1 = ProductIO.readProduct(output[0])
# product_t2 = ProductIO.readProduct(output[1])

print(product_t1)
print(product_t2)

# print(sat[0], sat[1])
#
# print(date[0])
Пример #27
0

dd = pd.DataFrame({'file_name':f1, 'ndbc_dir':ndbc_dir})


# In[52]:


dd.to_csv('file.csv', index=False)


# In[7]:


path = '/Volumes/Yangchao/Sentient/'
product = ProductIO.readProduct(path+'S1B_IW_GRDH_1SDV_20170522T161401_20170522T161430_005713_00A024_4EF8.zip')


# In[142]:


ProductIO.writeProduct(product, 'S1A_IW_GRDH_1SDV_20170719T232015_20170719T232040_017547_01D590_DF13.nc', 'NetCDF4-CF')


# In[8]:


produc1 = calibrate.thermal_app(product)
product2 = calibrate.calibrate(produc1)
product3 = calibrate.specklefilter(product2, filter='Median')
#ProductIO.writeProduct(product3, 'S1B_IW_GRDH_1SDV_20170522T161401_20170522T161430_005713_00A024_4EF8.nc', 'NetCDF4-CF')
Пример #28
0
def process_SLC_product(master, slave):
    # read list of files from common_file.txt in processing dir (already sorted in chronological order)
    ######################## COREGISTRATION ########################
    # master_file = "S1A_IW_SLC__1SDV_20190519T112034_20190519T112101_027296_03140A_CB6F"
    master_file = master
    timestamp1 = master_file.split("_")[5]
    date1 = timestamp1[:8]

    # input_file_name2 = input_dir + slave_file
    slave_file = slave  # -1 to remove \n at the end
    timestamp2 = slave_file.split("_")[5]
    date2 = timestamp2[:8]

    combined_name = timestamp1 + "_" + timestamp2
    logger.info("Current pair: " + combined_name)
    # Read in the Sentinel-1 data product:
    # sentinel_1_m = ProductIO.readProduct(output_dir + master_file + f"_top_sar_split_{POLARIZATIONS}.dim")
    # sentinel_1_s = ProductIO.readProduct(output_dir + slave_file + f"_top_sar_split_{POLARIZATIONS}.dim")
    sentinel_1_m = ProductIO.readProduct(output_dir + master_file)
    sentinel_1_s = ProductIO.readProduct(output_dir + slave_file)
    logger.info("Read")

    ### BACK-GEOCODING
    back_geocoded_prdt = sp.back_geocoding([sentinel_1_s, sentinel_1_m])

    ### ENHANCED SPECTRAL DIVERSITY
    esd_prdt = sp.enhanced_spectral_diversity(back_geocoded_prdt)

    ######################## INTERFEROGRAM PROCESSING TO GET DINSAR ########################
    ### INTERFEROGRAM
    interferogram_prdt = sp.interferogram(esd_prdt)

    ### TOPSAR DEBURST
    deburst_prdt = sp.top_sar_deburst(interferogram_prdt, POLARIZATIONS)
    deburst_path = interferogram_dir + combined_name + f'_deburst_{POLARIZATIONS}'
    ProductIO.writeProduct(deburst_prdt, deburst_path, "BEAM-DIMAP")
    logger.info('Write done')

    ### TOPO PHASE REMOVAL
    in_deburst_prdt = ProductIO.readProduct(deburst_path + '.dim')
    tpr_prdt = sp.topo_phase_removal(in_deburst_prdt)

    ### MULTILOOK
    multilook_prdt = sp.multilook(tpr_prdt)

    ### GOLDSTEIN PHASE FILTERING
    goldstein_prdt = sp.goldstein_phase_filtering(multilook_prdt)
    goldstein_path = interferogram_dir + combined_name + f'_goldstein_{POLARIZATIONS}'

    ProductIO.writeProduct(goldstein_prdt, goldstein_path, "BEAM-DIMAP")
    logger.info('Write done')

    # ### GET COHERENCE
    # # terrain correction
    # tc_goldstein_prdt = sp.terrain_correction(goldstein_prdt, snappyconfigs.UTM_WGS84, default_pixel_spacing)
    # is_found = False
    # for src_band in tc_goldstein_prdt.getBands():
    #     band_name = src_band.getName()
    #     if band_name.startswith('coh'):
    #         coh_prdt = sp.subset(tc_goldstein_prdt, None, band_name)
    #         coh_path = interferogram_dir + combined_name + f'_coh_{POLARIZATIONS}'
    #         ProductIO.writeProduct(coh_prdt, coh_path, 'GeoTIFF')
    #         is_found = True
    #         break
    # if not is_found:
    #     logger.critical("No coherence band is found in interferogram product!")

    ####################### PHASE UNWRAPPING ########################
    ### SNAPHU EXPORT
    in_goldstein_prdt = ProductIO.readProduct(goldstein_path + '.dim')
    snaphu_output_path = interferogram_dir + SNAPHU_PATH + combined_name + f'_{POLARIZATIONS}' + '\\'
    snaphu_export_prdt = sp.snaphu_export(in_goldstein_prdt,
                                          snaphu_output_path)
    ProductIO.writeProduct(snaphu_export_prdt, snaphu_output_path, "Snaphu")
    logger.info("Snaphu Export done")

    ### Unwrapping
    call_snaphu_command(snaphu_output_path)

    unwrapped_phase_prdt = None
    for file in os.listdir(snaphu_output_path):
        if file.endswith('.hdr') and 'UnwPhase' in file:
            unwrapped_phase_prdt = ProductIO.readProduct(snaphu_output_path +
                                                         file)
    if unwrapped_phase_prdt is None:
        logger.critical(
            f'Unwrapped phase product not found in {snaphu_output_path} for phase unwrapping'
        )
        exit(1)

    # ### SNAPHU IMPORT
    snaphu_import_prdt = sp.snaphu_import(
        [unwrapped_phase_prdt, in_goldstein_prdt])
    snaphu_import_path = interferogram_dir + combined_name + f'_snaphu_import_{POLARIZATIONS}'
    ProductIO.writeProduct(snaphu_import_prdt, snaphu_import_path,
                           "BEAM-DIMAP")

    ### BandMaths
    in_snaphu_import_prdt = ProductIO.readProduct(snaphu_import_path + '.dim')
    unwrapped_phase_prdt_name = re.sub("\\..*$", "",
                                       unwrapped_phase_prdt.getName())
    unwrapped_phase_prdt_name = unwrapped_phase_prdt_name.replace(
        'UnwPhase', 'Unw_Phase')
    unwrapped_phase_prdt_name = unwrapped_phase_prdt_name.replace('_VV', '')
    vert_disp_prdt = sp.band_math(
        in_snaphu_import_prdt, 'vert_disp',
        f'({unwrapped_phase_prdt_name} * 0.056) / (-4 * PI * cos(rad(incident_angle)))'
    )

    ### GEOCODING / TERRAIN CORRECTION
    terrain_corrected_prdt = sp.terrain_correction(vert_disp_prdt,
                                                   snappyconfigs.UTM_WGS84)

    ### SUBSET
    subset_prdt = sp.subset(terrain_corrected_prdt, TEXANA_WKT)
    subset_path = interferogram_dir + combined_name + f'_vert_disp_subset_{POLARIZATIONS}'
    ProductIO.writeProduct(subset_prdt, subset_path, "BEAM-DIMAP")
import os
import snappy
from snappy import Product
from snappy import ProductIO
from snappy import ProductUtils
from snappy import WKTReader
from snappy import HashMap
from snappy import GPF
import os.path
from os import path

### Reading the product subset
subset_path = 'dataset/subset_0_of_S1A_IW_GRDH_1SDV_20191004T011831_20191004T011856_029302_035471_E23D.dim'
if path.exists(subset_path):
    try:
        product = ProductIO.readProduct("dataset/subset_0_of_S1A_IW_GRDH_1SDV_20191004T011831_20191004T011856_029302_035471_E23D.dim")
    except:
        print("error reading file")
else:
    print("file not found")

print("Reading...")
width = product.getSceneRasterWidth()
print("Width: {} px".format(width))
height = product.getSceneRasterHeight()
print("Height: {} px".format(height))
name = product.getName()
print("Name: {}".format(name))
band_names = product.getBandNames()
print("Band names: {}".format(", ".join(band_names)))
Пример #30
0
    if any(file_name in ef for ef in existing_files):
        continue

    # # to find common GRD and SLC products
    # if any(cf[:-1] in file_name for cf in common_files):
    #     continue

    # # for blank images
    # if not any(ef in file_name for ef in existing_files):
    #     continue

    logger.info("Current folder: " + folder)
    # Read in the Sentinel-1 data product:
    # sentinel_1 = ProductIO.readProduct(input_file_name + filemanager.manifest_extension)
    sentinel_1 = ProductIO.readProduct(input_file_name)
    logger.debug(sentinel_1)

    ### APPLY-ORBIT FILE
    apply_orbit_prdt = sp.apply_orbit_file(sentinel_1)

    ### THERMAL NOISE REMOVAL
    noise_rem_prdt = sp.thermal_noise_removal(apply_orbit_prdt)

    ### CALIBRATION
    calibrated_prdt = sp.calibration(apply_orbit_prdt, config.POLARIZATIONS)

    ### SPECKLE FILTER
    speckle_prdt = sp.speckle_filter(calibrated_prdt)

    ### TERRAIN CORRECTION
Пример #31
0
print("Phase unwrapping performed successfully ..............................")
#%%
# Snaphu import
bandList = os.listdir(TempFolder)
for item in bandList:
    if item.endswith('.hdr') and item[0:8] == 'UnwPhase':
        upha = item
print(upha)

#working_dir='D:\\PhD Info\\InSAR\\Examples\\SNAPPY_Ecuador_Galapagos'
#os.chdir (working_dir + '\\' + Fname)
#
Files = jpy.array('org.esa.snap.core.datamodel.Product', 2)
#Files[0]    = ProductIO.readProduct(os.path.join(r'D:\PhD Info\InSAR\Examples\SNAPPY_Ecuador_Galapagos','subset_0_of_InSAR_pipeline_II.dim'))
Files[0] = read(os.path.join(Fpath, Fname + '.dim'))  # reads .dim
Files[1] = ProductIO.readProduct(glob.glob(TempFolder + '\\' + upha)[0])
HashMap = jpy.get_type("java.util.HashMap")
params = HashMap()
Product = GPF.createProduct("SnaphuImport", params, Files)
os.chdir(Fpath)
ProductIO.writeProduct(Product, Fname + "_ifg_ml_fit_unwph.dim", "BEAM-DIMAP")
print("Snaphu import performed successfully .................................")
# Phase To Displacement
#Product = read(os.path.join(r'D:\PhD Info\InSAR\Examples\SNAPPY_Ecuador_Galapagos\exp_subset','subset_0_of_InSAR_pipeline_II_ifg_ml_fit_unwph.dim')) # reads .dim
params = HashMap()
Product = GPF.createProduct("PhaseToDisplacement", params, Product)
os.chdir(Fpath)
ProductIO.writeProduct(Product, Fname + '_ifg_ml_fit_unwph_disp.dim',
                       "BEAM-DIMAP")
print(
    "Phase To Displacement performed successfully ................................."
Пример #32
0
from snappy import ProductIO
from snappy import HashMap

import os
from snappy import GPF

GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()
HashMap = snappy.jpy.get_type('java.util.HashMap')

data_path = 'D:/Project_Data/Arctic_PRIZE/Data/S1/'
temp_path = 'D:/Project_Data/Arctic_PRIZE/Temp/'
out_path = 'D:/Project_Data/Arctic_PRIZE/Processed_Data/S1/'
file1 = 'S1B_EW_GRDM_1SDH_20170601T062150_20170601T062250_005853_00A430_9294'

sentinel_1 = ProductIO.readProduct(data_path + file1 + '.zip')
print(sentinel_1)

### Calibration
pols = ['HH']
for p in pols:

    polarization = p

    #### Speckle filter
    parameters = HashMap()
    parameters.put('sourceBands', 'Intensity_' + polarization)
    parameters.put('filter', 'Median')
    #parameters.put('numberOfLooks',1)
    #parameters.put('windowSize', 7)
    #parameters.put('sigma',0.9)
Пример #33
0
def read(filename):
    print('Reading...')
    return ProductIO.readProduct(filename)
Пример #34
0
"""
Spyder Editor

ESA SNAP API for Python (snappy) testing 



"""

from snappy import ProductIO
import matplotlib.pyplot as plt
import numpy as np

# read .dim file
p = ProductIO.readProduct(
    'C:/Users/Ap/anaconda3/envs/snappy36/snappy/testdata/MER_FRS_L1B_SUBSET.dim'
)
list(p.getBandNames())

# assign one band to a variable
band = p.getBand('radiance_1')
# get band width and height
w = p.getSceneRasterWidth()
h = p.getSceneRasterHeight()

# create an empty array
band_data = np.zeros(w * h, np.float32)

# populate array with band values
band.readPixels(0, 0, w, h, band_data)
Пример #35
0
from snappy import ProductIO
from snappy import HashMap

import os, gc
from snappy import GPF

GPF.getDefaultInstance().getOperatorSpiRegistry().loadOperatorSpis()
HashMap = snappy.jpy.get_type('java.util.HashMap')

import time

start = time.time()
path = 'dataset/subset_0_of_S1A_IW_GRDH_1SDV_20191004T011831_20191004T011856_029302_035471_E23D.dim'
gc.enable()
output = "/output/"
sentinel_1 = ProductIO.readProduct(path)
print sentinel_1

pols = ['VH', 'VV']
for p in pols:
    polarization = p

    # APPLY ORBIT FILE

    parameters = HashMap()
    parameters.put('orbitType', 'Sentinel Precise (Auto Download)')
    parameters.put('polyDegree', '3')
    parameters.put('continueOnFail', 'false')
    s1_orbit_applied = GPF.createProduct('Apply-Orbit-File', parameters,
                                         sentinel_1)
    file_prefix = output + "_orbit_applied_" + polarization
Пример #36
0
import numpy
from snappy import Product
from snappy import ProductData
from snappy import ProductIO
from snappy import ProductUtils
from snappy import FlagCoding

if len(sys.argv) != 2:
    print("usage: %s <file>" % sys.argv[0])
    sys.exit(1)

file = sys.argv[1]

print("Reading...")
product = ProductIO.readProduct(file)
width = product.getSceneRasterWidth()
height = product.getSceneRasterHeight()
name = product.getName()
description = product.getDescription()
band_names = product.getBandNames()

print("Product:     %s, %s" % (name, description))
print("Raster size: %d x %d pixels" % (width, height))
print("Start time:  " + str(product.getStartTime()))
print("End time:    " + str(product.getEndTime()))
print("Bands:       %s" % (list(band_names)))


b7 = product.getBand('radiance_7')
b10 = product.getBand('radiance_10')
Пример #37
0
    print 'Product file and band index required'
    sys.exit(1)

# check if band index given is correct
if not sys.argv[2] in ['2', '3', '4', '8']:
    print 'Incorrect band index'

# get cli arguments
product_file = sys.argv[1]
band_index = sys.argv[2]
band_name = 'B' + band_index
product_name = {
    'B2': 'blue',
    'B3': 'green',
    'B4': 'red',
    'B8': 'nir',
}[band_name]

# input product: open and get dimensions & name
input_product = ProductIO.readProduct(product_file)
product_width = input_product.getSceneRasterWidth()
product_height = input_product.getSceneRasterHeight()
product_name = input_product.getName()

# output product: copy selected band & save product
output_product = Product(product_name, product_name, product_width, product_height)
ProductUtils.copyGeoCoding(input_product, output_product)
ProductUtils.copyBand(band_name, input_product, output_product, True)
ProductIO.writeProduct(output_product, product_name + '.band.dim', 'BEAM-DIMAP')
output_product.closeIO()