Beispiel #1
0
def main(ctx, ndvi_threshold, ndwi_threshold, pre_event, post_event):

    dump(ctx)

    os.environ["PREFIX"] = "/opt/anaconda/envs/env_burned_area"

    os.environ["PROJ_LIB"] = os.path.join(os.environ["PREFIX"], "share/proj")
    os.environ["GDAL_DATA"] = os.path.join(os.environ["PREFIX"], "share/gdal")

    burned_area_item = burned(
        pre_item=get_item(os.path.join(pre_event, "catalog.json")),
        post_item=get_item(os.path.join(post_event, "catalog.json")),
        ndvi_threshold=ndvi_threshold,
        ndwi_threshold=ndwi_threshold,
    )

    logging.info("Output catalog")

    catalog = Catalog(id="catalog", description="Results")

    catalog.clear_items()
    catalog.clear_children()

    catalog.add_items([burned_area_item])

    catalog.describe()

    catalog.normalize_and_save(root_href="./",
                               catalog_type=CatalogType.SELF_CONTAINED)
Beispiel #2
0
def main(data_dir, input_references, store_username, store_apikey):

    if store_username is not None:

        os.environ['STAGEIN_USERNAME'] = store_username
        os.environ['STAGEIN_PASSWORD'] = store_apikey

    STAC_IO.read_text_method = my_read_method

    items = []

    for input_reference in input_references:

        thing = pystac.read_file(input_reference)

        if isinstance(thing, pystac.item.Item):

            items.append(thing)

        elif isinstance(thing, pystac.catalog.Catalog):

            for item in thing.get_items():

                items.append(item)

    # create catalog
    catalog = Catalog(id='catalog', description='staged STAC catalog')

    catalog.add_items(items)

    catalog.normalize_and_save(root_href=data_dir,
                               catalog_type=CatalogType.RELATIVE_PUBLISHED)

    catalog.describe()
Beispiel #3
0
def main(ctx, input_path):

    # dump the CWL and params (if requested)
    dump(ctx)

    if 'TMPDIR' in os.environ:
        os.chdir(os.environ['TMPDIR'])

    logging.info(os.path.join(input_path, 'catalog.json'))

    item = get_item(os.path.join(input_path, 'catalog.json'))

    output_dir = f'{item.id}'

    calibrator = Calibrator()

    item_out = calibrator.calibrate(item)

    logging.info('STAC')

    cat = Catalog(id='catalog',
                  description="Calibrated sar product")

    cat.add_items([item_out])

    cat.normalize_and_save(root_href='./',
                           catalog_type=CatalogType.SELF_CONTAINED)

    logging.info('Done!')


    #os.mkdir(output_dir)

    sys.exit(0)
Beispiel #4
0
def get_root_catalog():
    """Get Cirrus root catalog from s3

    Returns:
        Dict: STAC root catalog
    """
    caturl = f"{ROOT_URL}/catalog.json"
    if s3().exists(caturl):
        cat = Catalog.from_file(caturl)
    else:
        catid = DATA_BUCKET.split('-data-')[0]
        cat = Catalog(id=catid, description=DESCRIPTION)
        cat.normalize_and_save(ROOT_URL, CatalogType.ABSOLUTE_PUBLISHED)
    logger.debug(f"Fetched {cat.describe()}")
    return cat
Beispiel #5
0
def stage(input_references):
    
    STAC_IO.read_text_method = my_read_method
    
    catalogs = []

    for index, input_reference in enumerate(input_references):

        items = []

        thing = read_file(input_reference)

        if isinstance(thing, Item):

            items.append(thing)

        elif isinstance(thing, Catalog):

            for item in thing.get_items():

                items.append(item)

        # create catalog
        catalog = Catalog(id=items[0].id,
                  description='staged STAC catalog with {}'.format(items[0].id))

        catalog.add_items(items)

        catalog.normalize_and_save(root_href=items[0].id,
                                   catalog_type=CatalogType.RELATIVE_PUBLISHED)

        catalog.describe()

        catalogs.append(os.path.dirname(catalog.get_self_href()))
        
    return catalogs
def main():
    """

# The Data

446 qc'ed chips containing flood events, hand-labeled flood classifications
4385 non-qc'ed chips containing water exported only with sentinel 1 and 2 flood classifications

# The Catalog Outline

** We want to generate a root catalog that is all, or only training, or only validation items **
^^^ Script should support this

- Root Catalog
    - Collection: Sentinel 1 data chips
        - Item: The Item
    - Collection: Sentinel 2 data chips
        - Item: The Item
    - Collection: Sentinel 1 weak labels
        - Item: The Item
    - Collection: Sentinel 2 weak labels
        - Item: The Item
    - Collection: Hand labels
        - Item: The Item
    - Collection: Permanent water labels
        - Item: The Item
    - Collection: Traditional otsu algo labels
        - Item: The Item

## Alternate catalog structure

This structure was considered but rejected in the interest of facilitating collections for each
of the label datasets.

- Root Catalog
    - Collection: Sentinel 1
        - Catalog: Country
            - Catalog: Event ID
                (Note: Catalog will always have the first item. Then it will either have the second
                       item or all the others depending on which dir the first item came from)
                - Item: (dir: S1 + S1_NoQC) Sentinel 1 data chip
                - Item: (dir: S1Flood_NoQC) Labels from "weak" classification algorithm applied to S1
                - Item: (dir: QC_v2) Labels from hand classification (ORed with item below)
                - Item: (dir: S1Flood) Labels from traditional Otsu algorithm
                - Item: (dir: Perm) Labels from perm water dataset (this is a Byte tiff, only 1 or 0
                        for yes or no perm water)
    - Collection: Sentinel 2
        - Catalog: Country
            - Catalog: Event ID
                - Item: (dir: S2 + S2_NoQC) Sentinel 2 data chip
                - Item: (dir: S2Flood) Labels from traditional Otsu algorithm applied to S2
    - Collection: PermJRC
        - Catalog: Lat 10
            - Catalog: Lon 10
                - Item: (dir: PermJRC)
    """
    parser = argparse.ArgumentParser(
        description="Build STAC Catalog for sen1floods11")
    parser.add_argument("--debug", action="store_true")
    args = parser.parse_args()
    debug = args.debug

    storage = S3Storage("sen1floods11-data")

    catalog_description = "Bonafilia, D., Tellman, B., Anderson, T., Issenberg, E. 2020. Sen1Floods11: a georeferenced dataset to train and test deep learning flood algorithms for Sentinel-1. The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020, pp. 210-211. Available Open access at: http://openaccess.thecvf.com/content_CVPRW_2020/html/w11/Bonafilia_Sen1Floods11_A_Georeferenced_Dataset_to_Train_and_Test_Deep_Learning_CVPRW_2020_paper.html"  # noqa: E501
    catalog_title = "A georeferenced dataset to train and test deep learning flood algorithms for Sentinel-1"  # noqa: E501

    catalog = Catalog("sen1floods11", catalog_description, title=catalog_title)
    print("Created Catalog {}".format(catalog.id))

    # Build Sentinel 1 Collection
    sentinel1 = Collection(
        "S1",
        "Sentinel-1 GRD Chips overlapping labeled data. IW mode, GRD product. See https://developers.google.com/earth-engine/sentinel1 for information on preprocessing",  # noqa: E501
        extent=Extent(SpatialExtent([None, None, None, None]), None),
    )
    collection_add_sentinel_chips(sentinel1,
                                  storage.ls("S1/"),
                                  "s1",
                                  debug=debug)
    collection_add_sentinel_chips(sentinel1,
                                  storage.ls("S1_NoQC/"),
                                  "s1",
                                  debug=debug)
    collection_update_extents(sentinel1)
    catalog.add_child(sentinel1)

    # Build Sentinel 2 Collection
    sentinel2 = Collection(
        "S2",
        "Sentinel-2 MSI L1C chips overlapping labeled data. Contains all spectral bands (1 - 12). Does not contain QA mask.",  # noqa: E501
        extent=Extent(SpatialExtent([None, None, None, None]), None),
    )
    collection_add_sentinel_chips(sentinel2,
                                  storage.ls("S2/"),
                                  "s2",
                                  debug=debug)
    collection_add_sentinel_chips(sentinel2,
                                  storage.ls("S2_NoQC/"),
                                  "s2",
                                  debug=debug)
    collection_update_extents(sentinel2)
    catalog.add_child(sentinel2)

    # Build S1 Weak Labels Collection
    s1weak_labels = Collection(
        "S1Flood_NoQC",
        "Chips of water/nowater labels derived from standard OTSU thresholding of Sentinel-1 VH band overlapping weakly-labeled data.",  # noqa: E501
        extent=Extent(SpatialExtent([None, None, None, None]), None),
        stac_extensions=[Extensions.LABEL],
    )
    label_collection_add_items(
        s1weak_labels,
        catalog,
        storage.ls("S1Flood_NoQC/"),
        sentinel1_links_func,
        "0: Not Water. 1: Water.",
        LabelType.RASTER,
        label_classes=[LabelClasses([0, 1])],
        label_tasks=["classification"],
        debug=debug,
    )
    collection_update_extents(s1weak_labels)
    catalog.add_child(s1weak_labels)

    # Build S2 Weak Labels Collection
    s2weak_labels = Collection(
        "NoQC",
        "Weakly-labeled chips derived from traditional Sentinel-2 Classification",  # noqa: E501
        extent=Extent(SpatialExtent([None, None, None, None]), None),
        stac_extensions=[Extensions.LABEL],
    )
    label_collection_add_items(
        s2weak_labels,
        catalog,
        storage.ls("NoQC/"),
        sentinel2_links_func,
        "-1: No Data / Not Valid. 0: Not Water. 1: Water.",  # noqa: E501
        LabelType.RASTER,
        label_classes=[LabelClasses([-1, 0, 1])],
        label_tasks=["classification"],
        debug=debug,
    )
    collection_update_extents(s2weak_labels)
    catalog.add_child(s2weak_labels)

    # Build Hand Labels Collection
    hand_labels = Collection(
        "QC_v2",
        "446 hand labeled chips of surface water from selected flood events",
        extent=Extent(SpatialExtent([None, None, None, None]), None),
        stac_extensions=[Extensions.LABEL],
    )
    label_collection_add_items(
        hand_labels,
        catalog,
        storage.ls("QC_v2/"),
        sentinel1_sentinel2_links_func,
        "Hand labeled chips containing ground truth. -1: No Data / Not Valid. 0: Not Water. 1: Water.",  # noqa: E501
        LabelType.RASTER,
        label_classes=[LabelClasses([-1, 0, 1])],
        label_tasks=["classification"],
        debug=debug,
    )
    collection_update_extents(hand_labels)
    catalog.add_child(hand_labels)

    # Build Permanent Labels collection
    permanent_labels = Collection(
        "Perm",
        "Permanent water chips generated from the 'transition' layer of the JRC (European Commission Joint Research Centre) dataset",  # noqa: E501
        extent=Extent(SpatialExtent([None, None, None, None]), None),
        stac_extensions=[Extensions.LABEL],
    )
    label_collection_add_items(
        permanent_labels,
        catalog,
        storage.ls("Perm/"),
        lambda *_: [
        ],  # No easy way to map JRC source files to the label chips...
        "0: Not Water. 1: Water.",
        LabelType.RASTER,
        label_classes=[LabelClasses([0, 1])],
        label_tasks=["classification"],
        debug=debug,
    )
    collection_update_extents(permanent_labels)
    catalog.add_child(permanent_labels)

    # Build Otsu algorithm Labels collection
    otsu_labels = Collection(
        "S1Flood",
        "Chips of water/nowater derived from standard OTSU thresholding of Sentinel-1 VH band overlapping labeled data",  # noqa: E501
        extent=Extent(SpatialExtent([None, None, None, None]), None),
        stac_extensions=[Extensions.LABEL],
    )
    label_collection_add_items(
        otsu_labels,
        catalog,
        storage.ls("S1Flood/"),
        sentinel1_links_func,
        "0: Not Water. 1: Water.",
        LabelType.RASTER,
        label_classes=[LabelClasses([0, 1])],
        label_tasks=["classification"],
        debug=debug,
    )
    collection_update_extents(otsu_labels)
    catalog.add_child(otsu_labels)

    # Save Complete Catalog
    root_path = "./catalog"
    catalog.normalize_and_save(root_path,
                               catalog_type=CatalogType.SELF_CONTAINED)
    print("Saved STAC Catalog {} to {}...".format(catalog.id, root_path))
Beispiel #7
0
def main(ctx, input_reference, s_expression, cbn):

    dump(ctx)

    item = get_item(os.path.join(input_reference, "catalog.json"))

    logging.info(f"Processing {item.id}")

    try:
        os.mkdir(item.id)
    except FileExistsError:
        pass

    cbn = cbn.replace(' ', '-')

    result = os.path.join(item.id, f"{cbn}.tif")

    logging.info(f"Apply {s_expression} to {item.id}")

    apply_s_expression(item=item, s_expression=s_expression, out_tif=result)

    logging.info("STAC")

    item_out = Item(
        id=item.id,
        geometry=item.geometry,
        bbox=item.bbox,
        datetime=item.datetime,
        properties=item.properties,
        stac_extensions=item.stac_extensions,
    )

    eo_item = extensions.eo.EOItemExt(item_out)

    asset_properties = dict()

    asset_properties["s-expression"] = s_expression

    asset = Asset(
        href=os.path.basename(result),
        media_type=MediaType.COG,
        roles=["data"],
        properties=asset_properties,
    )

    eo_bands = [
        extensions.eo.Band.create(
            name=cbn.lower(),
            common_name=cbn.lower(),
            description=f"{cbn.lower()} ({s_expression})",
        )
    ]

    eo_item.set_bands(eo_bands, asset=asset)

    item_out.add_asset(key=cbn.lower(), asset=asset)

    logging.info("STAC")

    cat = Catalog(id="catalog", description="s-expression")

    cat.add_items([item_out])

    cat.normalize_and_save(root_href="./",
                           catalog_type=CatalogType.SELF_CONTAINED)

    logging.info("Done!")
Beispiel #8
0
                datetime=start_time,
                properties={},
            )
            for asset in assets:
                image_item.add_asset(
                    asset.href.split("/")[-1].split(".")[0], asset)

            stac_items.append(image_item)
        aggregate_spatial_extent = SpatialExtent([[
            aggregate_bounds.bottom,
            aggregate_bounds.left,
            aggregate_bounds.top,
            aggregate_bounds.right,
        ]])
        aggregate_extent = Extent(aggregate_spatial_extent, temporal_extent)
        collection = Collection(
            flood_id,
            "Imagery coextensive with GLOFIMR flood {}".format(flood_id),
            extent=aggregate_extent,
        )
        for stac_item in stac_items:
            collection.add_item(stac_item)

        catalog.add_child(collection)

    # Save Complete Catalog
    root_path = "./data/catalog"
    catalog.normalize_and_save(root_path,
                               catalog_type=CatalogType.SELF_CONTAINED)
    print("Saved STAC Catalog {} to {}...".format(catalog.id, root_path))
Beispiel #9
0
def scombi(channel_inputs, bands, s_expressions, resolution='highest', aoi=None, color=None, profile=None, lut=None, epsg=None):

    target_dir = 'combi'
    
    if not os.path.exists(target_dir):
    
        os.mkdir(target_dir)
        
    items = []
    assets_href = []
    rescaled = []
    
    for index, input_path in enumerate(channel_inputs):
    #for index, input_path in enumerate([red_channel_input, green_channel_input, blue_channel_input]):
    
        if input_path is None:
            
            items.append(None)
            assets_href.append(None)
            continue
            
        item = get_item(input_path) 
        
        logging.info(item)
        
        items.append(item)
        assets_href.append(get_band_asset_href(item, bands[index]))

    # define AOI, if none is supplied, get the minimum bbox 
    if aoi is None:
        aoi = get_mbb([shape(item.geometry) for item in items]).wkt

    min_lon, min_lat, max_lon, max_lat = loads(aoi).bounds

    # analyze get an EPSG code if it hasn't been supplied
    # check if warp is needed
    epsg, epsg_codes = get_epsg(epsg, assets_href)

    # rescale and get the original assets (these are part of the output)
    logging.info('Rescaling and COG for input assets')
    rescaled = []
    
    # get the data
    for index, asset_href in enumerate(assets_href):

        if asset_href is None:
            
            rescaled.append(None)
            
            continue
            
        logging.info('Getting band {} from {}'.format(bands[index], asset_href))
        
        output_name = '{}/{}_{}.tif'.format(target_dir, index+1, bands[index])

        
        if epsg_codes[index] == epsg:

            ds = gdal.Translate(output_name, 
                                asset_href, 
                                outputType=gdal.GDT_Float32,
                                projWin=[min_lon, max_lat, max_lon, min_lat],
                                projWinSRS='EPSG:4326')

        else:

            logging.info('Warp')
            ds = gdal.Warp(output_name, 
                        asset_href, 
                        outputType=gdal.GDT_Float32,
                        outputBounds=[min_lon, min_lat, max_lon, max_lat],
                        outputBoundsSRS='EPSG:4326',
                        dstSRS=epsg) 
        

        ds = None

        del(ds)
        #rescaled.append(ds)
        rescaled.append(output_name)
    
    # build a VRT with the rescaled assets with the selected resolution mode
    logging.info('Build VRT')
    vrt = 'temp.vrt'
    ds = gdal.BuildVRT(vrt,
                       [ds for ds in rescaled if ds],
                       resolution=resolution, 
                       separate=True)

    ds.FlushCache()
    
    output_cell_size = ds.GetGeoTransform()[1]

    logging.info(str(output_cell_size))

    logging.info('Pimp me')

    pimp.me(vrt, 
            f'{target_dir}/combi.tif', 
            bands, 
            s_expressions,
            color, 
            lut)

    ds = None
    del(ds)
    
    # to STAC
    logging.info('STAC')
    cat = Catalog(id='scombidooo',
                  description="Combined RGB composite") 

    # TODO fix datetime
    item = Item(id='combi',
                geometry=mapping(loads(aoi)),
                bbox=list(loads(aoi).bounds),
                datetime=items[0].datetime,
                properties={'bands': bands,
                            's_expressions': s_expressions,
                            'input_items': [_item.id for _item in items],
                            'color': 'N/A' if not color else color,
                            'profile': 'N/A' if not profile else profile}) 

    item.common_metadata.set_gsd(output_cell_size)

    eo_item = extensions.eo.EOItemExt(item)

    for index, asset_href in enumerate(assets_href):
        if asset_href is None:
            continue
        _asset =  get_band_asset(items[index],
                                 bands[index]) 
      
        _asset.href = './{}_{}.tif'.format(index+1, bands[index])

        item.add_asset('{}_{}'.format(index+1, bands[index]), _asset)

        
    # add the result.tif Asset
    item.add_asset(key='rgb',
                   asset=Asset(href='./combi.tif',
                               media_type=MediaType.COG))
        
    cat.add_items([item])
    
    cat.normalize_and_save(root_href='./',
                           catalog_type=CatalogType.SELF_CONTAINED)
     
    logging.info('Done!')

    return(cat.get_self_href())
Beispiel #10
0
class TestCliInvokeAction(unittest.TestCase):
    def setUp(self):
        self.workdir = mkdtemp()
        self.inputdir = mkdtemp()
        self.catalog = Catalog('test-id', 'test catalog')
        self.catalog.normalize_and_save(self.inputdir,
                                        CatalogType.SELF_CONTAINED)
        self.config = config_fixture()
        print(self.config)

    def tearDown(self):
        MockAdapter.messages = []
        shutil.rmtree(self.workdir)

    def test_when_a_service_completes_it_writes_a_output_catalog_to_the_output_dir(
            self):
        with cli_parser('--harmony-action', 'invoke', '--harmony-input',
                        '{"test": "input"}', '--harmony-sources',
                        'example/source/catalog.json',
                        '--harmony-metadata-dir', self.workdir) as parser:
            args = parser.parse_args()
            cli.run_cli(parser, args, MockAdapter, cfg=self.config)
            output = Catalog.from_file(
                os.path.join(self.workdir, 'catalog.json'))
            self.assertTrue(output.validate)

    def test_when_a_service_completes_it_writes_the_output_message_to_the_output_dir(
            self):
        with cli_parser('--harmony-action', 'invoke', '--harmony-input',
                        '{"test": "input"}', '--harmony-sources',
                        'example/source/catalog.json',
                        '--harmony-metadata-dir', self.workdir) as parser:
            args = parser.parse_args()
            cli.run_cli(parser, args, MockAdapter, cfg=self.config)
            with open(os.path.join(self.workdir, 'message.json')) as file:
                self.assertEqual(file.read(), '{"test": "input"}')

    def test_when_the_cli_has_a_staging_location_it_overwites_the_message_staging_location(
            self):
        with cli_parser('--harmony-action', 'invoke', '--harmony-input',
                        '{"test": "input"}', '--harmony-sources',
                        'example/source/catalog.json',
                        '--harmony-metadata-dir', self.workdir,
                        '--harmony-data-location',
                        's3://fake-location/') as parser:
            args = parser.parse_args()
            cli.run_cli(parser, args, MockAdapter, cfg=self.config)
            self.assertEqual(MockAdapter.message.stagingLocation,
                             's3://fake-location/')
            # Does not output the altered staging location
            with open(os.path.join(self.workdir, 'message.json')) as file:
                self.assertEqual(file.read(), '{"test": "input"}')

    def test_when_the_backend_service_throws_a_known_error_it_writes_the_error_to_the_output_dir(
            self):
        with cli_parser('--harmony-action', 'invoke', '--harmony-input',
                        '{"test": "input"}', '--harmony-sources',
                        'example/source/catalog.json',
                        '--harmony-metadata-dir', self.workdir) as parser:

            class MockImpl(MockAdapter):
                def invoke(self):
                    self.is_complete = False
                    raise ForbiddenException('Something bad happened')

            args = parser.parse_args()
            with self.assertRaises(Exception) as context:
                cli.run_cli(parser, args, MockImpl, cfg=self.config)

            self.assertTrue('Something bad happened' in str(context.exception))
            with open(os.path.join(self.workdir, 'error.json')) as file:
                self.assertEqual(
                    file.read(),
                    '{"error": "Something bad happened", "category": "Forbidden"}'
                )

    def test_when_the_backend_service_throws_an_unknown_error_it_writes_a_generic_error_to_the_output_dir(
            self):
        with cli_parser('--harmony-action', 'invoke', '--harmony-input',
                        '{"test": "input"}', '--harmony-sources',
                        'example/source/catalog.json',
                        '--harmony-metadata-dir', self.workdir) as parser:

            class MockImpl(MockAdapter):
                def invoke(self):
                    self.is_complete = False
                    raise Exception('Something bad happened')

            args = parser.parse_args()
            with self.assertRaises(Exception) as context:
                cli.run_cli(parser, args, MockImpl, cfg=self.config)

            self.assertTrue('Something bad happened' in str(context.exception))
            with open(os.path.join(self.workdir, 'error.json')) as file:
                self.assertEqual(
                    file.read(),
                    '{"error": "Service request failed with an unknown error", "category": "Unknown"}'
                )
Beispiel #11
0
def main(ndvi_threshold, ndwi_threshold, pre_event, post_event):

    os.environ['PREFIX']='/opt/anaconda/envs/env_burned_area'
    
    os.environ['PROJ_LIB'] = os.path.join(os.environ['PREFIX'], 'share/proj')
    os.environ['GDAL_DATA'] = os.path.join(os.environ['PREFIX'], 'share/gdal')

    s2_item_pre = S2_stac_item(pre_event['value'])
    s2_item_post = S2_stac_item(post_event['value'])
    
    s2_items = dict()
    s2_items['pre-event'] = S2_stac_item(pre_event['value'])
    s2_items['post-event'] = S2_stac_item(post_event['value'])
    
    dates = []
    bboxes = []
    
    for index, item in enumerate([s2_item_pre.item, s2_item_post.item]):
        
        dates.append(item.datetime)
        bboxes.append(shape(item.geometry).bounds)
        
        logging.info('Stacking bands for input {}'.format(item.id))
        vrt_bands = []

        for band in ['B04', 'B08', 'B11', 'SCL']:

            vrt_bands.append('/vsicurl/{}'.format(item.assets[band].get_absolute_href()))

        vrt = '{}.vrt'.format('pre_event' if index == 0 else 'post_event')
        tif = '{}.tif'.format('pre_event' if index == 0 else 'post_event')

        logging.info('Build vrt for {}'.format(item.id))

        ds = gdal.BuildVRT(vrt,
                           vrt_bands,
                           srcNodata=0,
                           xRes=10, 
                           yRes=10,
                           separate=True)
        ds.FlushCache()


        logging.info('Translate {}'.format(item.id))

        gdal.Translate(tif,
                       vrt,
                       outputType=gdal.GDT_UInt16)

        os.remove(vrt)
    
    ds = gdal.Open('pre_event.tif')

    pre_b04 = ds.GetRasterBand(1).ReadAsArray()
    pre_b08 = ds.GetRasterBand(2).ReadAsArray()
    pre_b11 = ds.GetRasterBand(3).ReadAsArray()
    pre_scl = ds.GetRasterBand(4).ReadAsArray()

    ds = None

    os.remove('pre_event.tif')

    ds = gdal.Open('post_event.tif')

    post_b04 = ds.GetRasterBand(1).ReadAsArray()
    post_b08 = ds.GetRasterBand(2).ReadAsArray()
    post_b11 = ds.GetRasterBand(3).ReadAsArray()
    post_scl = ds.GetRasterBand(4).ReadAsArray()

    width = ds.RasterXSize
    height = ds.RasterYSize

    input_geotransform = ds.GetGeoTransform()
    input_georef = ds.GetProjectionRef()

    ds = None

    os.remove('post_event.tif')
    
    gain = 10000

    pre_ndwi2 = (pre_b08 / gain - pre_b11 / gain) / (pre_b08 / gain  + pre_b11 / gain)
    post_ndwi2 = (post_b08 / gain - post_b11 / gain) / (post_b08 / gain + post_b11 / gain)

    pre_b11 = None
    post_b11 = None

    pre_ndvi = (pre_b08 / gain - pre_b04 / gain) / (pre_b08 / gain  + pre_b04 / gain)
    post_ndvi = (post_b08 / gain - post_b04 / gain) / (post_b08 / gain + post_b04 / gain)

    pre_b04 = None
    post_b04 = None

    pre_b08 = None
    post_b08 = None

    conditions = (((post_ndwi2 - pre_ndwi2) > float(ndwi_threshold['value'])) & ((post_ndvi - pre_ndvi) > float(ndvi_threshold['value'])) & (pre_scl == 4) | (post_scl == 4))  

    burned = np.zeros((height, width), dtype=np.uint8) 

    burned[conditions] = 1

    pre_ndwi2 = None
    post_ndwi2 = None

    pre_ndvi = None
    post_ndvi = None

    burned[np.where((pre_scl == 0) | (post_scl == 0) | (pre_scl == 1) | (post_scl == 1) | (pre_scl == 5) | (post_scl == 5) | (pre_scl == 6) | (post_scl == 6) | (pre_scl == 7) | (post_scl == 7) | (pre_scl == 8) | (post_scl == 8) | (pre_scl == 9) | (post_scl == 9))] = 2 
    
    
    logging.info('Write output product')
    
    output_name = 'S2_BURNED_AREA_{}'.format('_'.join([d.strftime("%Y%m%d") for d in dates])) 

    write_tif(burned, '{}.tif'.format(output_name), width, height, input_geotransform, input_georef)

    logging.info('Output catalog')

    catalog = Catalog(id='catalog', description='Results')

    catalog.clear_items()
    catalog.clear_children()

    result_titles = dict()

    result_titles[output_name] = {'title': 'Burned area analysis from Sentinel-2',
                                  'media_type': MediaType.COG}



    items = []

    for key, value in result_titles.items():

        result_item = Item(id=key,
                           geometry=s2_items['pre-event'].item.geometry,
                           bbox=s2_items['pre-event'].item.bbox,
                           datetime=s2_items['pre-event'].item.datetime,
                           properties={})

        result_item.add_asset(key='data',
                              asset=Asset(href='./{}.tif'.format(key), 
                              media_type=value['media_type'], 
                              title=value['title']))

        items.append(result_item)

    #collection.add_items(items)

    catalog.add_items(items)

    catalog.describe()

    catalog.normalize_and_save(root_href='./',
                               catalog_type=CatalogType.SELF_CONTAINED)

    
    shutil.move('{}.tif'.format(output_name), 
            os.path.join('./',
                         output_name,
                         '{}.tif'.format(output_name)))