예제 #1
0
def check_availability(inventory_gdf, download_dir, data_mount):
    '''This function checks if the data is already downloaded or 
       available through a mount point on DIAS cloud
    
    '''

    from ost import Sentinel1Scene
    # add download path, or set to None if not found
    inventory_gdf['download_path'] = inventory_gdf.identifier.apply(
        lambda row: Sentinel1Scene(row).get_path(download_dir, data_mount))
    return inventory_gdf
예제 #2
0
def _to_ard_batch(inventory_df,
                  download_dir,
                  processing_dir,
                  ard_parameters,
                  subset=None,
                  polar='VV,VH,HH,HV',
                  max_workers=int(os.cpu_count() / 2)):
    # we create a processing dictionary,
    # where all frames are grouped into acquisitions
    processing_dict = _create_processing_dict(inventory_df)
    for track, allScenes in processing_dict.items():
        for list_of_scenes in processing_dict[track]:
            # get acquisition date
            acquisition_date = Sentinel1Scene(list_of_scenes[0]).start_date
            # create a subdirectory baed on acq. date
            out_dir = opj(processing_dir, track, acquisition_date)
            os.makedirs(out_dir, exist_ok=True)

            # check if already processed
            if os.path.isfile(opj(out_dir, '.processed')):
                logger.debug('Acquisition from {} of track {}'
                             'already processed'.format(
                                 acquisition_date, track))
            else:
                # get the paths to the file
                for i in list_of_scenes:
                    s1_process_scene = Sentinel1Scene(i)
                    s1_process_scene.ard_parameters = ard_parameters
                    scene_paths = ([
                        Sentinel1Scene(i).get_path(download_dir)
                        for i in list_of_scenes
                    ])
                    s1_process_scene.create_ard(
                        filelist=scene_paths,
                        out_dir=out_dir,
                        out_prefix=s1_process_scene.start_date.replace(
                            '-', ''),
                        subset=subset,
                        polar=polar,
                        max_workers=max_workers)
예제 #3
0
def test_s1scene_metadata(s1_id):
    s1 = Sentinel1Scene(s1_id)
    s1_info = s1.info()
    control = {
        'Scene_Identifier':
        'S1A_IW_GRDH_1SDV_20191116T170638_20191116T170703_029939_036AAB_070F',
        'Satellite': 'Sentinel-1A',
        'Acquisition_Mode': 'Interferometric Wide Swath',
        'Processing_Level': '1',
        'Product_Type': 'Ground Range Detected (GRD)',
        'Acquisition_Date': '20191116',
        'Start_Time': '170638',
        'Stop_Time': '170703',
        'Absolute_Orbit': '029939',
        'Relative_Orbit': '117'
    }
    assert s1_info == control
예제 #4
0
def grd_to_ard_batch(inventory_df, config_file):

    # load relevant config parameters
    with open(config_file, 'r') as file:
        config_dict = json.load(file)
        download_dir = Path(config_dict['download_dir'])
        data_mount = Path(config_dict['data_mount'])

    # where all frames are grouped into acquisitions
    processing_dict = _create_processing_dict(inventory_df)
    processing_df = pd.DataFrame(
        columns=['identifier', 'outfile', 'out_ls', 'error'])

    iter_list = []
    for _, list_of_scenes in processing_dict.items():

        # get the paths to the file
        scene_paths = ([
            Sentinel1Scene(scene).get_path(download_dir, data_mount)
            for scene in list_of_scenes
        ])

        iter_list.append(scene_paths)

    # now we run with godale, which works also with 1 worker
    executor = Executor(executor=config_dict['executor_type'],
                        max_workers=config_dict['max_workers'])

    for task in executor.as_completed(func=grd_to_ard.grd_to_ard,
                                      iterable=iter_list,
                                      fargs=([
                                          str(config_file),
                                      ])):

        list_of_scenes, outfile, out_ls, error = task.result()

        # return the info of processing as dataframe
        temp_df = create_processed_df(inventory_df, list_of_scenes, outfile,
                                      out_ls, error)

        processing_df = processing_df.append(temp_df)

    return processing_df
예제 #5
0
def test_s1scene_metadata(s1_id):
    s1 = Sentinel1Scene(s1_id)
    control_id = 'S1A_IW_GRDH_1SDV_20141003T040550_' \
                 '20141003T040619_002660_002F64_EC04'
    control_dict = {
        'Scene_Identifier': 'S1A_IW_GRDH_1SDV_20141003T040550_20141003T040619_'
        '002660_002F64_EC04',
        'Satellite': 'Sentinel-1A',
        'Acquisition_Mode': 'Interferometric Wide Swath',
        'Processing_Level': '1',
        'Product_Type': 'Ground Range Detected (GRD)',
        'Acquisition_Date': '20141003',
        'Start_Time': '040550',
        'Stop_Time': '040619',
        'Absolute_Orbit': '002660',
        'Relative_Orbit': '138'
    }
    assert control_dict == s1.info_dict()
    assert s1.scene_id == control_id
예제 #6
0
def s1_grd_notnr_ost_product(s1_grd_notnr):
    scene_id = s1_grd_notnr.split('/')[-1]
    return (scene_id, Sentinel1Scene(scene_id))
예제 #7
0
def s1_slc_ost_slave(s1_slc_slave):
    scene_id = os.path.basename(s1_slc_slave).replace('.zip', '')
    return (scene_id, Sentinel1Scene(scene_id))
예제 #8
0
def s1_slc_ost_master(s1_slc_master):
    scene_id = os.path.basename(s1_slc_master).replace('.zip', '')
    return (scene_id, Sentinel1Scene(scene_id))