Exemplo n.º 1
0
def run(reference_fname, test_fname, out_fname, compression, save_inputs,
        filter_opts):
    """ Run the residuals analysis. """
    # note: lower level h5py access is required in order to visit links
    with h5py.File(reference_fname, 'r') as ref_fid:
        with h5py.File(test_fname, 'r') as test_fid:
            with h5py.File(out_fname, 'w') as out_fid:
                root = h5py.h5g.open(ref_fid.id, b'/')
                root.links.visit(
                    partial(residuals, ref_fid, test_fid, out_fid, compression,
                            save_inputs, filter_opts))

                # create singular TABLES for each Dataset CLASS

                # IMAGE
                try:
                    grp = out_fid[ppjoin('RESULTS', 'IMAGE')]
                    image_results(grp, compression, filter_opts)
                except KeyError:
                    pass

                # SCALAR
                try:
                    grp = out_fid[ppjoin('RESULTS', 'SCALAR')]
                    scalar_results(grp)
                except KeyError:
                    pass

                # TABLE
                try:
                    grp = out_fid[ppjoin('RESULTS', 'TABLE')]
                    table_results(grp)
                except KeyError:
                    pass
Exemplo n.º 2
0
    def custom_print(path):
        """
        A custom print function for dealing with HDF5 object
        types, and print formatting.
        """
        try:
            pathname = normpath(ppjoin("/", path.decode("utf-8")))
        except AttributeError:
            pathname = normpath(ppjoin("/", path))

        obj = h5_obj[path]
        attrs = {k: v for k, v in obj.attrs.items()}
        if isinstance(obj, h5py.Group):
            h5_type = "`Group`"
        elif isinstance(obj, h5py.Dataset):
            class_name = obj.attrs.get("CLASS")
            h5_class = "" if class_name is None else class_name
            fmt = "`{}Dataset`" if class_name is None else "`{} Dataset`"
            h5_type = fmt.format(h5_class)
        else:
            h5_type = "`Other`"  # we'll deal with links and references later

        print("{path}\t{h5_type}".format(path=pathname, h5_type=h5_type))
        if verbose:
            print("Attributes:")
            pprint(attrs, width=100)
            print("*" * 80)
Exemplo n.º 3
0
    def custom_print(path):
        """
        A custom print function for dealing with HDF5 object
        types, and print formatting.
        """
        try:
            pathname = normpath(ppjoin('/', path.decode('utf-8')))
        except AttributeError:
            pathname = normpath(ppjoin('/', path))

        obj = h5_obj[path]
        attrs = {k: v for k, v in obj.attrs.items()}
        if isinstance(obj, h5py.Group):
            h5_type = '`Group`'
        elif isinstance(obj, h5py.Dataset):
            class_name = obj.attrs.get('CLASS')
            h5_class = '' if class_name is None else class_name
            fmt = '`{}Dataset`' if class_name is None else '`{} Dataset`'
            h5_type = fmt.format(h5_class)
        else:
            h5_type = '`Other`'  # we'll deal with links and references later

        print('{path}\t{h5_type}'.format(path=pathname, h5_type=h5_type))
        if verbose:
            print('Attributes:')
            pprint(attrs, width=100)
            print('*' * 80)
Exemplo n.º 4
0
def image_results(image_group,
                  compression=H5CompressionFilter.LZF,
                  filter_opts=None):
    """
    Combine the residual results of each IMAGE Dataset into a
    single TABLE Dataset.
    """
    # potentially could just use visit...
    img_paths = find(image_group, 'IMAGE')

    min_ = []
    max_ = []
    percent = []
    pct_90 = []
    pct_99 = []
    resid_paths = []
    hist_paths = []
    chist_paths = []
    products = []
    name = []

    for pth in img_paths:
        hist_pth = pth.replace('RESIDUALS', 'FREQUENCY-DISTRIBUTIONS')
        chist_pth = pth.replace('RESIDUALS', 'CUMULATIVE-DISTRIBUTIONS')
        resid_paths.append(ppjoin(image_group.name, pth))
        hist_paths.append(ppjoin(image_group.name, hist_pth))
        chist_paths.append(ppjoin(image_group.name, chist_pth))

        dset = image_group[pth]
        min_.append(dset.attrs['min_residual'])
        max_.append(dset.attrs['max_residual'])
        percent.append(dset.attrs['percent_difference'])
        products.append(pbasename(dset.parent.name))
        name.append(pbasename(dset.name))

        dset = image_group[chist_pth]
        pct_90.append(dset.attrs['90th_percentile'])
        pct_99.append(dset.attrs['99th_percentile'])

    df = pandas.DataFrame({
        'product': products,
        'dataset_name': name,
        'min_residual': min_,
        'max_residual': max_,
        'percent_difference': percent,
        '90th_percentile': pct_90,
        '99th_percentile': pct_99,
        'residual_image_pathname': resid_paths,
        'residual_histogram_pathname': hist_paths,
        'residual_cumulative_pathname': chist_paths
    })

    # output
    write_dataframe(df,
                    'IMAGE-RESIDUALS',
                    image_group,
                    compression,
                    title='RESIDUALS-TABLE',
                    filter_opts=filter_opts)
Exemplo n.º 5
0
def convert(aerosol_path, output_filename, compression, filter_opts):
    """
    Converts all the .pix and .cmp files found in `aerosol_path`
    to a HDF5 file.
    """
    # define a case switch
    func = {'pix': read_pix, 'cmp': read_cmp}

    # create the output file
    with h5py.File(output_filename, 'w') as fid:

        pattern = ['*.pix', '*.cmp']
        for p in pattern:
            search = pjoin(aerosol_path, p)
            files = glob.glob(search)
            for fname in files:
                pth, ext = splitext(fname)
                ext = ext.split(".")[-1]
                grp_name = basename(pth)
                out_path = ppjoin(ext, grp_name)

                # read/write
                df, extents = func[ext](fname)
                attrs = {'extents': wkt.dumps(extents),
                         'source filename': fname}
                write_dataframe(df, out_path, fid, compression=compression,
                                attrs=attrs, filter_opts=filter_opts)
Exemplo n.º 6
0
def run(aerosol_path, output_filename):
    """
    Converts all the .pix and .cmp files found in `aerosol_path`
    to a HDF5 file.
    """
    # define a case switch
    func = {"pix": read_pix, "cmp": read_cmp}

    # create the output file
    fid = h5py.File(output_filename, "w")

    pattern = ["*.pix", "*.cmp"]
    for p in pattern:
        search = pjoin(aerosol_path, p)
        files = glob.glob(search)
        for fname in files:
            pth, ext = splitext(fname)
            ext = ext.split(".")[-1]
            grp_name = basename(pth)
            out_path = ppjoin(ext, grp_name)

            # read/write
            df, extents = func[ext](fname)
            attrs = {"extents": wkt.dumps(extents), "source filename": fname}
            write_dataframe(df, out_path, fid, attrs=attrs)

    fid.close()
Exemplo n.º 7
0
def browse_for_images(path_to_folder, subfolder, type):
    matches = []
    (_, _, filenames) = folder_walk(ppjoin(path_to_folder, subfolder))
    for filename in fnmatch.filter(filenames, ".".join([type, 'svg'])):
        matches.append(filename)

    return matches
Exemplo n.º 8
0
def process_wn(path_to_folder, fname, data_length):
    """
    Function loads white noise used in stimulation
    :param path_to_folder: folder containting the experimental subfolders
    :param fname: white noise file name joined with the subfolder path
    :param samp_rate: desired sample rate (to match the recording sample rate)
    :param data_length: number of samples in spike train (must match to length white noise)
    :param WN_dur: duration of the white noise
    :return datasWN: vector containing white noise values only.
    """
    # wn_dur /= 1000 #    convert to seconds
    # wn_N_samples = wn_dur*samp_rate

    #   define the input data
    filenameWN = ppjoin(path_to_folder, fname)

    #   get the file, extract the three data subunits
    relacs_file = load(filenameWN)

    #   if relacs file is empty or too short due to aborted RePro presentation
    #   "try:" provides normal termination of the loop instead of error
    try:
        metasWN, _, datasWN = relacs_file.selectall()

    except:
        return None

    #   check if the number of samples matches the number samples in convolved spike train
    datas_whitenoise = datasWN[0][:,1]
    if datas_whitenoise.shape != data_length:
        timepoints_new = np.linspace(datasWN[0][0,0],datasWN[0][-1,0], data_length)
        datas_interpolated = np.interp(timepoints_new, datasWN[0][:,0], datasWN[0][:,1])

    return datas_interpolated
Exemplo n.º 9
0
def link_shadow_datasets(self_shadow_fname, cast_shadow_sun_fname,
                         cast_shadow_satellite_fname, out_fname):
    """
    Link the self shadow mask, and the two cast shadow masks into a
    single file for easier access.
    """
    group_path = GroupName.SHADOW_GROUP.value
    dname_fmt = DatasetName.CAST_SHADOW_FMT.value
    dname = ppjoin(group_path, DatasetName.SELF_SHADOW.value)
    create_external_link(self_shadow_fname, dname, out_fname, dname)

    dname = ppjoin(group_path, dname_fmt.format(source='SUN'))
    create_external_link(cast_shadow_sun_fname, dname, out_fname, dname)

    dname = ppjoin(group_path, dname_fmt.format(source='SATELLITE'))
    create_external_link(cast_shadow_satellite_fname, dname, out_fname, dname)
Exemplo n.º 10
0
def aggregate_ancillary(granule_groups):
    """
    If the acquisition is part of a `tiled` scene such as Sentinel-2a,
    then we need to average the point measurements gathered from
    all granules.
    """
    # initialise the mean result
    ozone = vapour = aerosol = elevation = 0.0

    # number of granules in the scene
    n_tiles = len(granule_groups)

    for granule in granule_groups:
        group = granule[GroupName.ANCILLARY_GROUP.value]

        ozone += group[DatasetName.OZONE.value][()]
        vapour += group[DatasetName.WATER_VAPOUR.value][()]
        aerosol += group[DatasetName.AEROSOL.value][()]
        elevation += group[DatasetName.ELEVATION.value][()]

    # average
    ozone /= n_tiles
    vapour /= n_tiles
    aerosol /= n_tiles
    elevation /= n_tiles

    description = ("The {} value is an average from all the {} values "
                   "retreived for each Granule.")
    attrs = {"data_source": "granule_average"}

    # output each average value back into the same granule ancillary group
    group_name = ppjoin(GroupName.ANCILLARY_GROUP.value,
                        GroupName.ANCILLARY_AVG_GROUP.value)
    for granule in granule_groups:
        # for the multifile workflow, we only want to write to one granule
        try:
            group = granule.create_group(group_name)
        except ValueError:
            continue

        dset = group.create_dataset(DatasetName.OZONE.value, data=ozone)
        attrs["description"] = description.format(*(2 * ["Ozone"]))
        attach_attributes(dset, attrs)

        dset = group.create_dataset(DatasetName.WATER_VAPOUR.value,
                                    data=vapour)
        attrs["description"] = description.format(*(2 * ["Water Vapour"]))
        attach_attributes(dset, attrs)

        dset = group.create_dataset(DatasetName.AEROSOL.value, data=aerosol)
        attrs["description"] = description.format(*(2 * ["Aerosol"]))
        attach_attributes(dset, attrs)

        dset = group.create_dataset(DatasetName.ELEVATION.value,
                                    data=elevation)
        attrs["description"] = description.format(*(2 * ["Elevation"]))
        attach_attributes(dset, attrs)
Exemplo n.º 11
0
def read_info(path_to_folder, subfolder):
    """
    Loads info.dat containing experiment metadata and extracts and returns information of interest
    :param path_to_folder : path to the subfolders, containg experimental data
    :param subfolder : subfolder containing experiment/cell data
    :return meta_info :  returns name of the segment in which the recording was performed

    """

    filename = ppjoin(path_to_folder, subfolder, "info.dat")

    meta_info = load(filename)
    # print(meta_info)
    return (meta_info)
Exemplo n.º 12
0
def spont_act(path_to_folder, subfolder, info):
    """
    main calls various sub-functions to exctract the data and plot the raster, return map and interval histogram
    :param path_to_folder: folder containing experiments
    :param subfolder: folder containing cell experimental data
    :param info: experiment metadata
    :return fnames
    """
    #   define the input data
    filename = ppjoin(path_to_folder, subfolder, "saveevents-Spikes-1.dat")

    #   get the file, extract the three data subunits
    relacs_file = load(filename)

    #   if relacs file is empty or too short due to aborted RePro presentation
    #   "try:" provides normal termination of the loop instead of error
    try:
        metas, _, datas = relacs_file.selectall()

    except:
        return None

    #   define list for rug filenames
    fnames_spont = []

    #   for each iteration of the same RePro
    for i in range(0, len(metas)):

        #   print processed RePro iteration
        print("Spont activity ReProIx", i)

        #   sorts spikes
        aa = [metas[i]]
        #   conversion into miliseconds
        spikeDict, stim_amps = fi_spikes_sort(aa, datas[i][:, 0] * 1000)

        #   computes instantaneous spike frequencies based on spikes
        freqDict, timeDict = fi_instant_freq(
            spikeDict)  #, metas[counter[i]:counter[i+1]])
        freq_continuous, freq_continuous2, timeLine, freqSD_continuous = fi_inst_freq_continuous(
            spikeDict, freqDict, metas)

        #   plots
        fnames = sp_raster_plot(i, path_to_folder, subfolder, spikeDict, freq_continuous, freq_continuous2, timeDict,\
                    timeLine, metas[i], info, fileN = i)

        #   appends the list of figure file names
        fnames_spont.append(fnames)

    return fnames_spont
Exemplo n.º 13
0
def convert(aerosol_path, out_h5: h5py.Group, compression, filter_opts):
    """
    Converts all the .pix and .cmp files found in `aerosol_path`
    to a HDF5 file.
    """
    # define a case switch
    func = {"pix": read_pix, "cmp": read_cmp}
    dataset_names = []
    metadata = []

    pattern = ["*.pix", "*.cmp"]
    for p in pattern:
        for search_path in aerosol_path.glob(p):
            _path = search_path.resolve()
            fname, ext = _path.stem, _path.suffix[
                1:]  # exclude the period from ext
            out_path = ppjoin(ext, fname)
            df, extents = func[ext](_path)

            # read/write
            df, extents = func[ext](_path)

            # src checksum; used to help derive fallback uuid
            with _path.open("rb") as src:
                src_checksum = generate_md5sum(src).hexdigest()

            attrs = {
                "extents": wkt.dumps(extents),
                "source filename": str(_path)
            }
            write_dataframe(
                df,
                out_path,
                out_h5,
                compression=compression,
                attrs=attrs,
                filter_opts=filter_opts,
            )
            dataset_names.append(out_path)
            metadata.append({
                "id":
                str(
                    generate_fallback_uuid(PRODUCT_HREF,
                                           path=str(_path.stem),
                                           md5=src_checksum))
            })

    return metadata, dataset_names
Exemplo n.º 14
0
def generate_exp_html(path_to_folder, subfolder, info, figs):

    #   generate page title
    rec_location = info["Cell"]["Location"]
    pagetitle = "".join([rec_location, ": ", subfolder])

    #   creates page instance
    pg = mup.page()

    #   initialize the page, embed css, titles, header and footer
    pg.init(title=pagetitle,
            css=('markup.css', 'two.css'),
            header="".join(
                ['<font size="10" color="red" >', pagetitle, '</font>']),
            footer="The bitter end.")

    #   line break under header
    pg.br()

    #   loop over the figs ordered dictionary for every type of the figure
    for k, v in figs.items():
        images = []
        pg.p("".join(
            ['<font size="5" color="red" >', '<b>', k, '</b>', '</font>']))
        for i in range(0, len(v)):
            # images.append (".".join([v[i],'svg']))
            pg.p()
            v[i].split("_FI")[0]
            pg.a(href="".join([v[i].split("_FI")[0], "_FI_rug.html"]))
            pg.img(src=".".join([v[i], 'svg']),
                   alt="click for rug and return plot",
                   align="middle")
        pg.br()
        pg.hr()

    #   line break before footer
    pg.br()

    #   dump the generated html code into the .html file
    html_name = ".".join(
        ["".join([info["Cell"]["Location"], "_", "index"]), "html"])
    t = []
    f = open(ppjoin(path_to_folder, subfolder, html_name), "w")
    t.append(str(pg))
    f.write(t[0])
    f.close()

    return html_name, rec_location
Exemplo n.º 15
0
def extract(output_directory, group, name):
    """
    A simple utility that sends an object to the appropriate
    extraction utility.
    """
    dataset_name = ppjoin(group.name, name.decode('utf-8'))
    obj = group[dataset_name]
    obj_class = obj.attrs.get('CLASS')

    if obj_class == 'IMAGE':
        convert_image(obj, output_directory)
    elif obj_class == 'TABLE':
        convert_table(group, dataset_name, output_directory)
    elif obj_class == 'SCALAR':
        convert_scalar(obj, output_directory)
    else:
        return None
Exemplo n.º 16
0
    def test_lon_array(self):
        """
        Test that the interpolated longitude array has sensible
        values.

        :notes:
        The default image that is used is a 1000 by 1000. This might
        be too small for a recursive depth of 7 (default within the
        NBAR framework). As such a depth of 3 is used and produced
        correct results. If a larger image is used, then the depth
        level should probably be increased.
        """
        acq = acquisitions(LS5_SCENE1).get_acquisitions()[0]
        geobox = acq.gridded_geo_box()
        fid = create_lon_lat_grids(acq, depth=5)
        dataset_name = ppjoin(GroupName.LON_LAT_GROUP.value,
                              DatasetName.LON.value)
        lon = fid[dataset_name][:]
        ids = ut.random_pixel_locations(lon.shape)

        # We'll transform (reproject) to WGS84
        sr = osr.SpatialReference()
        sr.SetFromUserInput(CRS)

        # Get a list of x reprojected co-ordinates
        reprj = []
        for i in range(ids[0].shape[0]):
            # Get pixel (x,y)
            xy = (ids[1][i], ids[0][i])

            # Convert pixel to map
            mapXY = geobox.convert_coordinates(xy, centre=True)

            # Transform map to another crs
            x, _ = geobox.transform_coordinates(mapXY, to_crs=sr)
            reprj.append(x)

        reprj = numpy.array(reprj)

        lon_values = lon[ids]

        # A decimal degree co-ordinate should be correct up to the 6th dp
        self.assertIsNone(npt.assert_almost_equal(lon_values, reprj,
                                                  decimal=5))
Exemplo n.º 17
0
    def run(self):
        with self.output().temporary_path() as out_fname:
            for root, _, files in os.walk(self.work_root):
                # skip any private files
                if basename(root)[0] == "_":
                    continue

                for file_ in files:
                    if splitext(file_)[1] == ".h5":
                        fname = pjoin(root, file_)
                        grp_name = basename(dirname(fname.replace(self.work_root, "")))

                        with h5py.File(fname, "r") as fid:
                            groups = [g for g in fid]

                        for pth in groups:
                            new_path = ppjoin(self.granule, grp_name, pth)
                            create_external_link(fname, pth, out_fname, new_path)

            with h5py.File(out_fname, "a") as fid:
                fid.attrs["level1_uri"] = self.level1
Exemplo n.º 18
0
def generate_index(exp_pages, wd, fish_n_chips):

    pagetitle = "eFISH intracellular in vitro"
    pg = mup.page()
    pg.init(title=pagetitle,
            css=('markup.css', 'two.css'),
            header="".join(
                ['<font size="10" color="red" >', pagetitle, '</font>']),
            footer="The void.")

    for k, v in exp_pages.items():
        pages = []
        pg.p("".join(
            ['<font size="5" color="red" >', '<b>', k, '</b>', '</font>']))
        pg.hr()
        pg.br()
        for i in range(0, len(v)):
            if fish_n_chips[v[i].split("/")[-2]] == []:
                # pg.p(fish_n_chips[v[i]])
                continue
            else:
                pg.p(fish_n_chips[v[i].split("/")[-2]][0])
                pg.a("".join([
                    v[i], "     ",
                    str(fish_n_chips[v[i].split("/")[-2]][1:])
                ]),
                     class_='internal',
                     href=v[i])

        pg.br()
        pg.hr()

    #   dump the generated html code into the .html file
    html_name = "index.html"
    t = []
    f = open(ppjoin(wd, html_name), "w")
    t.append(str(pg))
    # print(t[0])
    f.write(t[0])
    f.close()
Exemplo n.º 19
0
    def elevation_provenance(anc_grp):
        ids = []

        # low resolution source
        dname = DatasetName.ELEVATION.value
        dset = anc_grp[dname]
        ids.extend(dset.attrs['id'])

        # high resolution source (res group is adjacent to ancillary group)
        parent_group = anc_grp.parent
        for res_group in res_group_bands:
            dname = ppjoin(res_group, GroupName.ELEVATION_GROUP.value,
                           DatasetName.DSM_SMOOTHED.value)
            dset = parent_group[dname]
            ids.extend(dset.attrs['id'])

        # unique listing of ids
        ids = numpy.unique(numpy.array(ids)).tolist()
        md = {
            'id': ids,
        }

        return md
Exemplo n.º 20
0
    def test_modtran_run(self):
        """
        Tests that the interface to modtran (run_modtran)
        works for known inputs.
        Used to validate environment configuration/setup
        """

        band_names = [
            'BAND-1', 'BAND-2', 'BAND-3', 'BAND-4', 'BAND-5', 'BAND-6',
            'BAND-7', 'BAND-8'
        ]
        point = 0
        albedo = Albedos.ALBEDO_0

        # setup mock acquistions object
        acquisitions = []
        for bandn in band_names:
            acq = mock.MagicMock()
            acq.acquisition_datetime = datetime(2001, 1, 1)
            acq.band_type = BandType.REFLECTIVE
            acq.spectral_response = mock_spectral_response
            acquisitions.append(acq)

        # setup mock atmospherics group
        attrs = {'lonlat': 'TEST'}
        atmospherics = mock.MagicMock()
        atmospherics.attrs = attrs
        atmospherics_group = {POINT_FMT.format(p=point): atmospherics}

        # Compute base path -- prefix for hdf5 file
        base_path = ppjoin(GroupName.ATMOSPHERIC_RESULTS_GRP.value,
                           POINT_FMT.format(p=point))

        with tempfile.TemporaryDirectory() as workdir:
            run_dir = pjoin(workdir, POINT_FMT.format(p=point),
                            ALBEDO_FMT.format(a=albedo.value))
            os.makedirs(run_dir)

            # TODO replace json_input copy with json input generation
            with open(INPUT_JSON, 'r') as fd:
                json_data = json.load(fd)
                for mod_input in json_data['MODTRAN']:
                    mod_input['MODTRANINPUT']['SPECTRAL'][
                        'FILTNM'] = SPECTRAL_RESPONSE_LS8

            with open(
                    pjoin(run_dir,
                          POINT_ALBEDO_FMT.format(p=point, a=albedo.value)) +
                    ".json", 'w') as fd:
                json.dump(json_data, fd)

            fid = run_modtran(
                acquisitions,
                atmospherics_group,
                Workflow.STANDARD,
                npoints=12,  # number of track points
                point=point,
                albedos=[albedo],
                modtran_exe=MODTRAN_EXE,
                basedir=workdir,
                out_group=None)
            assert fid

            # Test base attrs
            assert fid[base_path].attrs['lonlat'] == 'TEST'
            assert fid[base_path].attrs['datetime'] == datetime(2001, 1,
                                                                1).isoformat()
            # test albedo headers?
            # Summarise modtran results to surface reflectance coefficients
            test_grp = fid[base_path][ALBEDO_FMT.format(a=albedo.value)]
            nbar_coefficients, _ = coefficients(
                read_h5_table(
                    fid,
                    pjoin(base_path, ALBEDO_FMT.format(a=albedo.value),
                          DatasetName.CHANNEL.value)),
                read_h5_table(
                    fid,
                    pjoin(base_path, ALBEDO_FMT.format(a=albedo.value),
                          DatasetName.SOLAR_ZENITH_CHANNEL.value)))

            expected = pd.read_csv(EXPECTED_CSV, index_col='band_name')
            pd.testing.assert_frame_equal(nbar_coefficients,
                                          expected,
                                          check_less_precise=True)
Exemplo n.º 21
0
def format_json(acquisitions, ancillary_group, satellite_solar_group,
                lon_lat_group, workflow, out_group):
    """
    Creates json files for the albedo (0) and thermal
    """
    # angles data
    sat_view = satellite_solar_group[DatasetName.SATELLITE_VIEW.value]
    sat_azi = satellite_solar_group[DatasetName.SATELLITE_AZIMUTH.value]
    longitude = lon_lat_group[DatasetName.LON.value]
    latitude = lon_lat_group[DatasetName.LAT.value]

    # retrieve the averaged ancillary if available
    anc_grp = ancillary_group.get(GroupName.ANCILLARY_AVG_GROUP.value)
    if anc_grp is None:
        anc_grp = ancillary_group

    # ancillary data
    coordinator = ancillary_group[DatasetName.COORDINATOR.value]
    aerosol = anc_grp[DatasetName.AEROSOL.value][()]
    water_vapour = anc_grp[DatasetName.WATER_VAPOUR.value][()]
    ozone = anc_grp[DatasetName.OZONE.value][()]
    elevation = anc_grp[DatasetName.ELEVATION.value][()]

    npoints = coordinator.shape[0]
    view = numpy.zeros(npoints, dtype='float32')
    azi = numpy.zeros(npoints, dtype='float32')
    lat = numpy.zeros(npoints, dtype='float64')
    lon = numpy.zeros(npoints, dtype='float64')

    for i in range(npoints):
        yidx = coordinator['row_index'][i]
        xidx = coordinator['col_index'][i]
        view[i] = sat_view[yidx, xidx]
        azi[i] = sat_azi[yidx, xidx]
        lat[i] = latitude[yidx, xidx]
        lon[i] = longitude[yidx, xidx]

    view_corrected = 180 - view
    azi_corrected = azi + 180
    rlon = 360 - lon

    # check if in western hemisphere
    idx = rlon >= 360
    rlon[idx] -= 360

    idx = (180 - view_corrected) < 0.1
    view_corrected[idx] = 180
    azi_corrected[idx] = 0

    idx = azi_corrected > 360
    azi_corrected[idx] -= 360

    # get the modtran profiles to use based on the centre latitude
    _, centre_lat = acquisitions[0].gridded_geo_box().centre_lonlat

    if out_group is None:
        out_group = h5py.File('atmospheric-inputs.h5', 'w')

    if GroupName.ATMOSPHERIC_INPUTS_GRP.value not in out_group:
        out_group.create_group(GroupName.ATMOSPHERIC_INPUTS_GRP.value)

    group = out_group[GroupName.ATMOSPHERIC_INPUTS_GRP.value]
    iso_time = acquisitions[0].acquisition_datetime.isoformat()
    group.attrs['acquisition-datetime'] = iso_time

    json_data = {}
    # setup the json files required by MODTRAN
    if workflow in (Workflow.STANDARD, Workflow.NBAR):
        acqs = [a for a in acquisitions if a.band_type == BandType.REFLECTIVE]

        for p in range(npoints):

            for alb in Workflow.NBAR.albedos:

                input_data = {'name': POINT_ALBEDO_FMT.format(p=p, a=str(alb.value)),
                              'water': water_vapour,
                              'ozone': ozone,
                              'doy': acquisitions[0].julian_day(),
                              'visibility': -aerosol,
                              'lat': lat[p],
                              'lon': rlon[p],
                              'time': acquisitions[0].decimal_hour(),
                              'sat_azimuth': azi_corrected[p],
                              'sat_height': acquisitions[0].altitude / 1000.0,
                              'elevation': elevation,
                              'sat_view': view_corrected[p],
                              'albedo': float(alb.value),
                              'filter_function': acqs[0].spectral_filter_name,
                              'binary': False
                              }

                if centre_lat < -23.0:
                    data = mpjson.midlat_summer_albedo(**input_data)
                else:
                    data = mpjson.tropical_albedo(**input_data)

                input_data['description'] = 'Input file for MODTRAN'
                input_data['file_format'] = 'json'
                input_data.pop('binary')

                json_data[(p, alb)] = data

                data = json.dumps(data, cls=JsonEncoder, indent=4)
                dname = ppjoin(POINT_FMT.format(p=p),
                               ALBEDO_FMT.format(a=alb.value),
                               DatasetName.MODTRAN_INPUT.value)

                write_scalar(data, dname, group, input_data)

    # create json for sbt if it has been collected
    if ancillary_group.attrs.get('sbt-ancillary'):
        dname = ppjoin(POINT_FMT, DatasetName.ATMOSPHERIC_PROFILE.value)
        acqs = [a for a in acquisitions if a.band_type == BandType.THERMAL]

        for p in range(npoints):

            atmos_profile = read_h5_table(ancillary_group, dname.format(p=p))

            n_layers = atmos_profile.shape[0] + 6
            elevation = atmos_profile.iloc[0]['GeoPotential_Height']

            input_data = {'name': POINT_ALBEDO_FMT.format(p=p, a='TH'),
                          'ozone': ozone,
                          'n': n_layers,
                          'prof_alt': list(atmos_profile['GeoPotential_Height']),
                          'prof_pres': list(atmos_profile['Pressure']),
                          'prof_temp': list(atmos_profile['Temperature']),
                          'prof_water': list(atmos_profile['Relative_Humidity']),
                          'visibility': -aerosol,
                          'sat_height': acquisitions[0].altitude / 1000.0,
                          'gpheight': elevation,
                          'sat_view': view_corrected[p],
                          'filter_function': acqs[0].spectral_filter_name,
                          'binary': False
                          }

            data = mpjson.thermal_transmittance(**input_data)

            input_data['description'] = 'Input File for MODTRAN'
            input_data['file_format'] = 'json'
            input_data.pop('binary')

            json_data[(p, Albedos.ALBEDO_TH)] = data

            data = json.dumps(data, cls=JsonEncoder, indent=4)
            out_dname = ppjoin(POINT_FMT.format(p=p),
                               ALBEDO_FMT.format(a=Albedos.ALBEDO_TH.value),
                               DatasetName.MODTRAN_INPUT.value)
            write_scalar(data, out_dname, group, input_data)

    # attach location info to each point Group
    for p in range(npoints):
        lonlat = (coordinator['longitude'][p], coordinator['latitude'][p])
        group[POINT_FMT.format(p=p)].attrs['lonlat'] = lonlat

    return json_data, out_group
Exemplo n.º 22
0
def card4l(level1, granule, workflow, vertices, method, pixel_quality, landsea,
           tle_path, aerosol, brdf_path, brdf_premodis_path, ozone_path,
           water_vapour, dem_path, dsm_fname, invariant_fname, modtran_exe,
           out_fname, ecmwf_path=None, rori=0.52, buffer_distance=8000,
           compression=H5CompressionFilter.LZF, filter_opts=None,
           h5_driver=None, acq_parser_hint=None):
    """
    CEOS Analysis Ready Data for Land.
    A workflow for producing standardised products that meet the
    CARD4L specification.

    :param level1:
        A string containing the full file pathname to the level1
        dataset.

    :param granule:
        A string containing the granule id to process.

    :param workflow:
        An enum from wagl.constants.Workflow representing which
        workflow workflow to run.

    :param vertices:
        An integer 2-tuple indicating the number of rows and columns
        of sample-locations ("coordinator") to produce.
        The vertex columns should be an odd number.

    :param method:
        An enum from wagl.constants.Method representing the
        interpolation method to use during the interpolation
        of the atmospheric coefficients.

    :param pixel_quality:
        A bool indicating whether or not to run pixel quality.

    :param landsea:
        A string containing the full file pathname to the directory
        containing the land/sea mask datasets.

    :param tle_path:
        A string containing the full file pathname to the directory
        containing the two line element datasets.

    :param aerosol:
        A string containing the full file pathname to the HDF5 file
        containing the aerosol data.

    :param brdf_path:
        A string containing the full file pathname to the directory
        containing the BRDF data.

    :param brdf_premodis_path:
        A string containing the full file pathname to the directory
        containing the decadal averaged BRDF data used for acquisitions
        prior to TERRA/AQUA satellite operations, or for near real time
        applications.

    :param ozone_path:
        A string containing the full file pathname to the directory
        containing the ozone datasets.

    :param water_vapour:
        A string containing the full file pathname to the directory
        containing the water vapour datasets.

    :param dem_path:
        A string containing the full file pathname to the directory
        containing the reduced resolution DEM.

    :param dsm_path:
        A string containing the full file pathname to the directory
        containing the Digital Surface Workflow for use in terrain
        illumination correction.

    :param invariant_fname:
        A string containing the full file pathname to the image file
        containing the invariant geo-potential data for use within
        the SBT process.

    :param modtran_exe:
        A string containing the full file pathname to the MODTRAN
        executable.

    :param out_fname:
        A string containing the full file pathname that will contain
        the output data from the data standardisation process.
        executable.

    :param ecmwf_path:
        A string containing the full file pathname to the directory
        containing the data from the European Centre for Medium Weather
        Forcast, for use within the SBT process.

    :param rori:
        A floating point value for surface reflectance adjustment.
        TODO Fuqin to add additional documentation for this parameter.
        Default is 0.52.

    :param buffer_distance:
        A number representing the desired distance (in the same
        units as the acquisition) in which to calculate the extra
        number of pixels required to buffer an image.
        Default is 8000, which for an acquisition using metres would
        equate to 8000 metres.

    :param compression:
        An enum from hdf5.compression.H5CompressionFilter representing
        the desired compression filter to use for writing H5 IMAGE and
        TABLE class datasets to disk.
        Default is H5CompressionFilter.LZF.

    :param filter_opts:
        A dict containing any additional keyword arguments when
        generating the configuration for the given compression Filter.
        Default is None.

    :param h5_driver:
        The specific HDF5 file driver to use when creating the output
        HDF5 file.
        See http://docs.h5py.org/en/latest/high/file.html#file-drivers
        for more details.
        Default is None; which writes direct to disk using the
        appropriate driver for the underlying OS.

    :param acq_parser_hint:
        A string containing any hints to provide the acquisitions
        loader with.
    """
    tp5_fmt = pjoin(POINT_FMT, ALBEDO_FMT, ''.join([POINT_ALBEDO_FMT, '.tp5']))
    nvertices = vertices[0] * vertices[1]

    container = acquisitions(level1, hint=acq_parser_hint)

    # TODO: pass through an acquisitions container rather than pathname
    with h5py.File(out_fname, 'w', driver=h5_driver) as fid:
        fid.attrs['level1_uri'] = level1

        for grp_name in container.supported_groups:
            log = STATUS_LOGGER.bind(level1=container.label, granule=granule,
                                     granule_group=grp_name)

            # root group for a given granule and resolution group
            root = fid.create_group(ppjoin(granule, grp_name))
            acqs = container.get_acquisitions(granule=granule, group=grp_name)

            # longitude and latitude
            log.info('Latitude-Longitude')
            create_lon_lat_grids(acqs[0], root, compression, filter_opts)

            # satellite and solar angles
            log.info('Satellite-Solar-Angles')
            calculate_angles(acqs[0], root[GroupName.LON_LAT_GROUP.value],
                             root, compression, filter_opts, tle_path)

            if workflow == Workflow.STANDARD or workflow == Workflow.NBAR:

                # DEM
                log.info('DEM-retriveal')
                get_dsm(acqs[0], dsm_fname, buffer_distance, root, compression,
                        filter_opts)

                # slope & aspect
                log.info('Slope-Aspect')
                slope_aspect_arrays(acqs[0],
                                    root[GroupName.ELEVATION_GROUP.value],
                                    buffer_distance, root, compression,
                                    filter_opts)

                # incident angles
                log.info('Incident-Angles')
                incident_angles(root[GroupName.SAT_SOL_GROUP.value],
                                root[GroupName.SLP_ASP_GROUP.value],
                                root, compression, filter_opts)

                # exiting angles
                log.info('Exiting-Angles')
                exiting_angles(root[GroupName.SAT_SOL_GROUP.value],
                               root[GroupName.SLP_ASP_GROUP.value],
                               root, compression, filter_opts)

                # relative azimuth slope
                log.info('Relative-Azimuth-Angles')
                incident_group_name = GroupName.INCIDENT_GROUP.value
                exiting_group_name = GroupName.EXITING_GROUP.value
                relative_azimuth_slope(root[incident_group_name],
                                       root[exiting_group_name],
                                       root, compression, filter_opts)

                # self shadow
                log.info('Self-Shadow')
                self_shadow(root[incident_group_name],
                            root[exiting_group_name], root, compression,
                            filter_opts)

                # cast shadow solar source direction
                log.info('Cast-Shadow-Solar-Direction')
                dsm_group_name = GroupName.ELEVATION_GROUP.value
                calculate_cast_shadow(acqs[0], root[dsm_group_name],
                                      root[GroupName.SAT_SOL_GROUP.value],
                                      buffer_distance, root, compression,
                                      filter_opts)

                # cast shadow satellite source direction
                log.info('Cast-Shadow-Satellite-Direction')
                calculate_cast_shadow(acqs[0], root[dsm_group_name],
                                      root[GroupName.SAT_SOL_GROUP.value],
                                      buffer_distance, root, compression,
                                      filter_opts, False)

                # combined shadow masks
                log.info('Combined-Shadow')
                combine_shadow_masks(root[GroupName.SHADOW_GROUP.value],
                                     root[GroupName.SHADOW_GROUP.value],
                                     root[GroupName.SHADOW_GROUP.value],
                                     root, compression, filter_opts)

        # nbar and sbt ancillary
        log = STATUS_LOGGER.bind(level1=container.label, granule=granule,
                                 granule_group=None)

        # granule root group
        root = fid[granule]

        # get the highest resoltion group cotaining supported bands
        acqs, grp_name = container.get_highest_resolution(granule=granule)

        grn_con = container.get_granule(granule=granule, container=True)
        res_group = root[grp_name]

        log.info('Ancillary-Retrieval')
        nbar_paths = {'aerosol_dict': aerosol,
                      'water_vapour_dict': water_vapour,
                      'ozone_path': ozone_path,
                      'dem_path': dem_path,
                      'brdf_path': brdf_path,
                      'brdf_premodis_path': brdf_premodis_path}
        collect_ancillary(grn_con, res_group[GroupName.SAT_SOL_GROUP.value],
                          nbar_paths, ecmwf_path, invariant_fname,
                          vertices, root, compression, filter_opts)

        # atmospherics
        log.info('Atmospherics')

        ancillary_group = root[GroupName.ANCILLARY_GROUP.value]

        # satellite/solar angles and lon/lat for a resolution group
        sat_sol_grp = res_group[GroupName.SAT_SOL_GROUP.value]
        lon_lat_grp = res_group[GroupName.LON_LAT_GROUP.value]

        # TODO: supported acqs in different groups pointing to different response funcs
        # tp5 files
        tp5_data, _ = format_tp5(acqs, ancillary_group, sat_sol_grp,
                                 lon_lat_grp, workflow, root)

        # atmospheric inputs group
        inputs_grp = root[GroupName.ATMOSPHERIC_INPUTS_GRP.value]

        # radiative transfer for each point and albedo
        for key in tp5_data:
            point, albedo = key

            log.info('Radiative-Transfer', point=point, albedo=albedo.value)
            with tempfile.TemporaryDirectory() as tmpdir:

                prepare_modtran(acqs, point, [albedo], tmpdir, modtran_exe)

                # tp5 data
                fname = pjoin(tmpdir,
                              tp5_fmt.format(p=point, a=albedo.value))
                with open(fname, 'w') as src:
                    src.writelines(tp5_data[key])

                run_modtran(acqs, inputs_grp, workflow, nvertices, point,
                            [albedo], modtran_exe, tmpdir, root, compression,
                            filter_opts)

        # atmospheric coefficients
        log.info('Coefficients')
        results_group = root[GroupName.ATMOSPHERIC_RESULTS_GRP.value]
        calculate_coefficients(results_group, root, compression, filter_opts)

        # interpolate coefficients
        for grp_name in container.supported_groups:
            log = STATUS_LOGGER.bind(level1=container.label, granule=granule,
                                     granule_group=grp_name)
            log.info('Interpolation')

            # acquisitions and available bands for the current group level
            acqs = container.get_acquisitions(granule=granule, group=grp_name)
            nbar_acqs = [acq for acq in acqs if
                         acq.band_type == BandType.REFLECTIVE]
            sbt_acqs = [acq for acq in acqs if
                        acq.band_type == BandType.THERMAL]

            res_group = root[grp_name]
            sat_sol_grp = res_group[GroupName.SAT_SOL_GROUP.value]
            comp_grp = root[GroupName.COEFFICIENTS_GROUP.value]

            for coefficient in workflow.atmos_coefficients:
                if coefficient in Workflow.NBAR.atmos_coefficients:
                    band_acqs = nbar_acqs
                else:
                    band_acqs = sbt_acqs

                for acq in band_acqs:
                    log.info('Interpolate', band_id=acq.band_id,
                             coefficient=coefficient.value)
                    interpolate(acq, coefficient, ancillary_group, sat_sol_grp,
                                comp_grp, res_group, compression, filter_opts,
                                method)

            # standardised products
            band_acqs = []
            if workflow == Workflow.STANDARD or workflow == Workflow.NBAR:
                band_acqs.extend(nbar_acqs)

            if workflow == Workflow.STANDARD or workflow == Workflow.SBT:
                band_acqs.extend(sbt_acqs)

            for acq in band_acqs:
                interp_grp = res_group[GroupName.INTERP_GROUP.value]

                if acq.band_type == BandType.THERMAL:
                    log.info('SBT', band_id=acq.band_id)
                    surface_brightness_temperature(acq, interp_grp, res_group,
                                                   compression, filter_opts)
                else:
                    slp_asp_grp = res_group[GroupName.SLP_ASP_GROUP.value]
                    rel_slp_asp = res_group[GroupName.REL_SLP_GROUP.value]
                    incident_grp = res_group[GroupName.INCIDENT_GROUP.value]
                    exiting_grp = res_group[GroupName.EXITING_GROUP.value]
                    shadow_grp = res_group[GroupName.SHADOW_GROUP.value]

                    log.info('Surface-Reflectance', band_id=acq.band_id)
                    calculate_reflectance(acq, interp_grp, sat_sol_grp,
                                          slp_asp_grp, rel_slp_asp,
                                          incident_grp, exiting_grp,
                                          shadow_grp, ancillary_group,
                                          rori, res_group, compression,
                                          filter_opts)

            # metadata yaml's
            if workflow == Workflow.STANDARD or workflow == Workflow.NBAR:
                create_ard_yaml(band_acqs, ancillary_group, res_group)

            if workflow == Workflow.STANDARD or workflow == Workflow.SBT:
                create_ard_yaml(band_acqs, ancillary_group, res_group, True)

            # pixel quality
            sbt_only = workflow == Workflow.SBT
            if pixel_quality and can_pq(level1, acq_parser_hint) and not sbt_only:
                run_pq(level1, res_group, landsea, res_group, compression, filter_opts, AP.NBAR, acq_parser_hint)
                run_pq(level1, res_group, landsea, res_group, compression, filter_opts, AP.NBART, acq_parser_hint)
Exemplo n.º 23
0
def convert_file(fname,
                 out_fname,
                 group_name='/',
                 dataset_name='dataset',
                 compression=H5CompressionFilter.LZF,
                 filter_opts=None,
                 attrs=None):
    """
    Convert generic single band image file to HDF5.
    Processes in a tiled fashion to minimise memory use.
    Will process all columns by n (default 256) rows at a time,
    where n can be specified via command line using:
    --filter-opts '{"chunks": (n, xsize)}'

    :param fname:
        A str containing the raster filename.

    :param out_fname:
        A str containing the output filename for the HDF5 file.

    :param dataset_name:
        A str containing the dataset name to use in the HDF5 file.

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :param filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :param attrs:
        A dict containing any attribute information to be attached
        to the HDF5 Dataset.

    :return:
        None. Content is written directly to disk.
    """
    # opening as `append` mode allows us to add additional datasets
    with h5py.File(out_fname) as fid:
        with rasterio.open(fname) as ds:

            # create empty or copy the user supplied filter options
            if not filter_opts:
                filter_opts = dict()
            else:
                filter_opts = filter_opts.copy()

            # use sds native chunks if none are provided
            if 'chunks' not in filter_opts:
                filter_opts['chunks'] = (256, 256)

            # read all cols for n rows (ytile), as the GA's DEM is BSQ interleaved
            ytile = filter_opts['chunks'][0]

            # dataset attributes
            if attrs:
                attrs = attrs.copy()
            else:
                attrs = {}

            attrs['geotransform'] = ds.transform.to_gdal()
            attrs['crs_wkt'] = ds.crs.wkt

            # dataset creation options
            kwargs = compression.config(
                **filter_opts).dataset_compression_kwargs()
            kwargs['shape'] = (ds.height, ds.width)
            kwargs['dtype'] = ds.dtypes[0]

            dataset_name = ppjoin(group_name, dataset_name)
            dataset = fid.create_dataset(dataset_name, **kwargs)
            attach_image_attributes(dataset, attrs)

            # process each tile
            for tile in generate_tiles(ds.width, ds.height, ds.width, ytile):
                idx = (slice(tile[0][0],
                             tile[0][1]), slice(tile[1][0], tile[1][1]))
                data = ds.read(1, window=tile)
                dataset[idx] = data
Exemplo n.º 24
0
def calculate_coefficients(atmospheric_results_group, out_group,
                           compression=H5CompressionFilter.LZF,
                           filter_opts=None):
    """
    Calculate the atmospheric coefficients from the MODTRAN output
    and used in the BRDF and atmospheric correction.
    Coefficients are computed for each band for each each coordinate
    for each atmospheric coefficient. The atmospheric coefficients can be
    found in `Workflow.STANDARD.atmos_coefficients`.

    :param atmospheric_results_group:
        The root HDF5 `Group` that contains the atmospheric results
        from each MODTRAN run.

    :param out_group:
        If set to None (default) then the results will be returned
        as an in-memory hdf5 file, i.e. the `core` driver. Otherwise,
        a writeable HDF5 `Group` object.

        The datasets will be formatted to the HDF5 TABLE specification
        and the dataset names will be as follows:

        * DatasetName.NBAR_COEFFICIENTS (if Workflow.STANDARD or Workflow.NBAR)
        * DatasetName.SBT_COEFFICIENTS (if Workflow.STANDARD or Workflow.SBT)

    :param compression:
        The compression filter to use.
        Default is H5CompressionFilter.LZF

    :param filter_opts:
        A dict of key value pairs available to the given configuration
        instance of H5CompressionFilter. For example
        H5CompressionFilter.LZF has the keywords *chunks* and *shuffle*
        available.
        Default is None, which will use the default settings for the
        chosen H5CompressionFilter instance.

    :return:
        An opened `h5py.File` object, that is either in-memory using the
        `core` driver, or on disk.
    """
    nbar_coefficients = pd.DataFrame()
    sbt_coefficients = pd.DataFrame()

    channel_data = channel_solar_angle = upward = downward = None

    # Initialise the output group/file
    if out_group is None:
        fid = h5py.File('atmospheric-coefficients.h5', driver='core',
                        backing_store=False)
    else:
        fid = out_group

    res = atmospheric_results_group
    npoints = res.attrs['npoints']
    nbar_atmos = res.attrs['nbar_atmospherics']
    sbt_atmos = res.attrs['sbt_atmospherics']

    for point in range(npoints):
        point_grp = res[POINT_FMT.format(p=point)]
        lonlat = point_grp.attrs['lonlat']
        timestamp = pd.to_datetime(point_grp.attrs['datetime'])
        grp_path = ppjoin(POINT_FMT.format(p=point), ALBEDO_FMT)

        if nbar_atmos:
            channel_path = ppjoin(grp_path.format(a=Albedos.ALBEDO_0.value),
                                  DatasetName.CHANNEL.value)
            channel_data = read_h5_table(res, channel_path)

            channel_solar_angle_path = ppjoin(
                grp_path.format(a=Albedos.ALBEDO_0.value),
                DatasetName.SOLAR_ZENITH_CHANNEL.value
            )

            channel_solar_angle = read_h5_table(res, channel_solar_angle_path)

        if sbt_atmos:
            dname = ppjoin(grp_path.format(a=Albedos.ALBEDO_TH.value),
                           DatasetName.UPWARD_RADIATION_CHANNEL.value)
            upward = read_h5_table(res, dname)

            dname = ppjoin(grp_path.format(a=Albedos.ALBEDO_TH.value),
                           DatasetName.DOWNWARD_RADIATION_CHANNEL.value)
            downward = read_h5_table(res, dname)

        kwargs = {'channel_data': channel_data,
                  'solar_zenith_angle': channel_solar_angle,
                  'upward_radiation': upward,
                  'downward_radiation': downward}

        result = coefficients(**kwargs)

        # insert some datetime/geospatial fields
        if result[0] is not None:
            result[0].insert(0, 'POINT', point)
            result[0].insert(1, 'LONGITUDE', lonlat[0])
            result[0].insert(2, 'LATITUDE', lonlat[1])
            result[0].insert(3, 'DATETIME', timestamp)
            nbar_coefficients = nbar_coefficients.append(result[0])

        if result[1] is not None:
            result[1].insert(0, 'POINT', point)
            result[1].insert(1, 'LONGITUDE', lonlat[0])
            result[1].insert(2, 'LATITUDE', lonlat[1])
            result[1].insert(3, 'DATETIME', pd.to_datetime(timestamp))
            sbt_coefficients = sbt_coefficients.append(result[1])

    nbar_coefficients.reset_index(inplace=True)
    sbt_coefficients.reset_index(inplace=True)

    attrs = {'npoints': npoints}
    description = "Coefficients derived from the VNIR solar irradiation."
    attrs['description'] = description
    dname = DatasetName.NBAR_COEFFICIENTS.value

    if GroupName.COEFFICIENTS_GROUP.value not in fid:
        fid.create_group(GroupName.COEFFICIENTS_GROUP.value)

    group = fid[GroupName.COEFFICIENTS_GROUP.value]

    if nbar_atmos:
        write_dataframe(nbar_coefficients, dname, group, compression,
                        attrs=attrs, filter_opts=filter_opts)

    description = "Coefficients derived from the THERMAL solar irradiation."
    attrs['description'] = description
    dname = DatasetName.SBT_COEFFICIENTS.value

    if sbt_atmos:
        write_dataframe(sbt_coefficients, dname, group, compression,
                        attrs=attrs, filter_opts=filter_opts)

    if out_group is None:
        return fid
Exemplo n.º 25
0
def run_modtran(acquisitions, atmospherics_group, workflow, npoints, point,
                albedos, modtran_exe, basedir, out_group,
                compression=H5CompressionFilter.LZF, filter_opts=None):
    """
    Run MODTRAN and channel results.
    """
    lonlat = atmospherics_group[POINT_FMT.format(p=point)].attrs['lonlat']

    # determine the output group/file
    if out_group is None:
        fid = h5py.File('atmospheric-results.h5', driver='core',
                        backing_store=False)
    else:
        fid = out_group

    # initial attributes
    base_attrs = {'Point': point,
                  'lonlat': lonlat,
                  'datetime': acquisitions[0].acquisition_datetime}

    base_path = ppjoin(GroupName.ATMOSPHERIC_RESULTS_GRP.value,
                       POINT_FMT.format(p=point))

    # what atmospheric calculations have been run and how many points
    group_name = GroupName.ATMOSPHERIC_RESULTS_GRP.value
    if group_name not in fid:
        fid.create_group(group_name)

    fid[group_name].attrs['npoints'] = npoints
    applied = workflow in (Workflow.STANDARD, Workflow.NBAR)
    fid[group_name].attrs['nbar_atmospherics'] = applied
    applied = workflow in (Workflow.STANDARD, Workflow.SBT)
    fid[group_name].attrs['sbt_atmospherics'] = applied

    acqs = acquisitions
    for albedo in albedos:
        base_attrs['Albedo'] = albedo.value
        workpath = pjoin(basedir, POINT_FMT.format(p=point),
                         ALBEDO_FMT.format(a=albedo.value))

        json_mod_infile = pjoin(workpath, ''.join(
            [POINT_ALBEDO_FMT.format(p=point, a=albedo.value), '.json']))

        group_path = ppjoin(base_path, ALBEDO_FMT.format(a=albedo.value))

        subprocess.check_call([modtran_exe, json_mod_infile], cwd=workpath)

        chn_fname = glob.glob(pjoin(workpath, '*.chn'))[0]
        tp6_fname = glob.glob(pjoin(workpath, '*.tp6'))[0]

        if albedo == Albedos.ALBEDO_TH:
            acq = [acq for acq in acqs if acq.band_type == BandType.THERMAL][0]

            channel_data = read_modtran_channel(chn_fname, tp6_fname, acq, albedo)

            attrs = base_attrs.copy()
            dataset_name = DatasetName.UPWARD_RADIATION_CHANNEL.value
            attrs['description'] = ('Upward radiation channel output from '
                                    'MODTRAN')
            dset_name = ppjoin(group_path, dataset_name)
            write_dataframe(channel_data[0], dset_name, fid, compression,
                            attrs=attrs, filter_opts=filter_opts)

            # downward radiation
            attrs = base_attrs.copy()
            dataset_name = DatasetName.DOWNWARD_RADIATION_CHANNEL.value
            attrs['description'] = ('Downward radiation channel output from '
                                    'MODTRAN')
            dset_name = ppjoin(group_path, dataset_name)
            write_dataframe(channel_data[1], dset_name, fid, compression,
                            attrs=attrs, filter_opts=filter_opts)
        else:
            acq = [acq for acq in acqs if
                   acq.band_type == BandType.REFLECTIVE][0]

            # Will require updating to handle JSON output from modtran
            channel_data = read_modtran_channel(chn_fname, tp6_fname, acq, albedo)

            attrs = base_attrs.copy()
            dataset_name = DatasetName.CHANNEL.value
            attrs['description'] = 'Channel output from MODTRAN'
            dset_name = ppjoin(group_path, dataset_name)
            write_dataframe(channel_data[0], dset_name, fid, compression,
                            attrs=attrs, filter_opts=filter_opts)

            # solar zenith angle at surface
            attrs = base_attrs.copy()
            dataset_name = DatasetName.SOLAR_ZENITH_CHANNEL.value
            attrs['description'] = 'Solar zenith angle at different atmosphere levels'
            dset_name = ppjoin(group_path, dataset_name)
            write_dataframe(channel_data[1], dset_name, fid, compression,
                            attrs=attrs, filter_opts=filter_opts)

    # metadata for a given point
    alb_vals = [alb.value for alb in workflow.albedos]
    fid[base_path].attrs['lonlat'] = lonlat
    fid[base_path].attrs['datetime'] = acqs[0].acquisition_datetime.isoformat()
    fid[base_path].attrs.create('albedos', data=alb_vals, dtype=VLEN_STRING)

    if out_group is None:
        return fid
Exemplo n.º 26
0
def link_atmospheric_results(input_targets, out_fname, npoints, workflow):
    """
    Uses h5py's ExternalLink to combine the atmospheric results into
    a single file.

    :param input_targets:
        A `list` of luigi.LocalTargets.

    :param out_fname:
        A `str` containing the output filename.

    :param npoints:
        An `int` containing the number of points (vertices) used for
        evaluating the atmospheric conditions.

    :param workflow:
        An Enum given by wagl.constants.Workflow.

    :return:
        None. Results from each file in `input_targets` are linked
        into the output file.
    """
    base_group_name = GroupName.ATMOSPHERIC_RESULTS_GRP.value
    nbar_atmospherics = False
    sbt_atmospherics = False
    attributes = []
    for fname in input_targets:
        with h5py.File(fname.path, 'r') as fid:
            points = list(fid[base_group_name].keys())

            # copy across several attributes on the POINT Group
            # as the linking we do here links direct to a dataset
            # which will create the required parent Groups
            for point in points:
                group = ppjoin(base_group_name, point)
                attributes.append((point,
                                   fid[group].attrs['lonlat'],
                                   fid[group].attrs['datetime'],
                                   fid[group].attrs['albedos']))

        for point in points:
            for albedo in workflow.albedos:
                if albedo == Albedos.ALBEDO_TH:
                    datasets = [DatasetName.UPWARD_RADIATION_CHANNEL.value,
                                DatasetName.DOWNWARD_RADIATION_CHANNEL.value]
                    sbt_atmospherics = True
                else:
                    datasets = [DatasetName.CHANNEL.value,
                                DatasetName.SOLAR_ZENITH_CHANNEL.value]
                    nbar_atmospherics = True

                grp_path = ppjoin(base_group_name, point,
                                  ALBEDO_FMT.format(a=albedo.value))

                for dset in datasets:
                    dname = ppjoin(grp_path, dset)
                    create_external_link(fname.path, dname, out_fname, dname)

    with h5py.File(out_fname) as fid:
        group = fid[GroupName.ATMOSPHERIC_RESULTS_GRP.value]
        group.attrs['npoints'] = npoints
        group.attrs['nbar_atmospherics'] = nbar_atmospherics
        group.attrs['sbt_atmospherics'] = sbt_atmospherics

        # assign the lonlat attribute for each POINT Group
        for point, lonlat, date_time, albedos in attributes:
            group[point].attrs['lonlat'] = lonlat
            group[point].attrs['datetime'] = date_time
            group[point].attrs.create('albedos', data=albedos,
                                      dtype=VLEN_STRING)
Exemplo n.º 27
0
def plot_spike_shapes(shape_dict, f_handle, meta, info, my_name):
    """
    Plots spike shapes superimposed, together with the appropriate mean
    :f_handle: list of figure and axes handles
    :param shape_dict: dictionary with spike shape for all selected repros and stimuli
    :return: figure handle, maybe
    """
    color_spread = np.linspace(0.8, 0.4, len(shape_dict))
    cmapGrey = [cm.Greys(x) for x in color_spread]

    counter = 0
    for k, v in shape_dict.items():

        # for spike in range(shape_dict[k].shape[0]):
        # peak_voltage = np.max(shape_dict[k][spike,:])
        #     f_handle[1].plot(shape_dict[k][spike,:]-peak_voltage, color=cmapGrey[counter])

        avg_spike = np.mean(shape_dict[k], 0)
        med_spike = np.median(shape_dict[k], 0)

        peak_avg_voltage = np.max(avg_spike)
        peak_med_voltage = np.max(med_spike)

        q25spike = np.percentile(shape_dict[k], 5, 0)
        q75spike = np.percentile(shape_dict[k], 95, 0)
        std_spike = np.std(shape_dict[k], 0)

        # f_handle[1].fill_between(range(0,med_spike.shape[0]), med_spike-peak_med_voltage-q25spike, med_spike-peak_med_voltage + q75spike, color=cmapGrey[counter], alpha=0.2)
        # f_handle[1].plot(med_spike-peak_med_voltage, color=cmapGrey[counter])

        f_handle[1].fill_between(range(0, avg_spike.shape[0]),
                                 avg_spike - peak_med_voltage - std_spike,
                                 med_spike - peak_med_voltage + std_spike,
                                 color=cmapGrey[counter],
                                 alpha=0.1)
        f_handle[1].plot(avg_spike - peak_avg_voltage, color=cmapGrey[counter])

        if "SyncPulse" in meta[k][0]["Status"].keys():
            f_handle[1].text(35,
                             -5 - counter * 5,
                             "".join([
                                 r'$g_{Vgate}$', ' = ',
                                 meta[k][0]["Status"]["gvgate"].split('.')[0],
                                 ' ns'
                             ]),
                             fontsize=10,
                             color=cmapGrey[counter])
            f_handle[1].text(
                60,
                -5 - counter * 5,
                "".join([
                    r'$\tau_{Vgate}$', ' = ',
                    meta[k][0]["Status"]["vgatetau"].split('.')[0], ' ms'
                ]),
                fontsize=10,
                color=cmapGrey[counter])

        counter += 1

    f_handle[1].set_title(".".join([
        "_".join([info["Cell"]["Location"], expfolder, "spike_shape",
                  my_name]), 'pdf'
    ]))

    f_handle[1].set_ylim(-40, 0)
    f_handle[1].set_xlabel("time [ms]")
    f_handle[1].set_ylabel("relative voltage [mV]")
    f_handle[1].set_xticklabels(['-1', '0', '1', '2', '3', '4'])

    f_handle[0].savefig(ppjoin(
        '../overviewSpikeShape/', ".".join([
            "_".join(
                [info["Cell"]["Location"], expfolder, "spike_shape", my_name]),
            'pdf'
        ])),
                        transparent=True)
Exemplo n.º 28
0
    """
    Extracts the selected FI traces, detects spikes and plots them one over the other, to get average spike shape.
    Runs from command line in the current folder
    Input argument is a dictionary with the RePro indexes which you want to present in the same analysis as key.
    Stimulus current is used as a value
    example: report_spike_shape.py -d"{'16':'-0.14nA','34':'-0.14nA','40':'-0.14nA'}"
    """
    #   get the current working dir
    wd = getcwd().split(getcwd().split("/")[-1])[0]
    expfolder = getcwd().split("/")[-1]

    #   get the command arguments
    ReProList = command_interpreter(sys.argv[1:])

    #   get the info.dat
    (_, _, filenames) = next(walk(ppjoin(wd, expfolder)))
    if "info.dat" in filenames:
        exp_info = dict(read_info(wd, expfolder)[0])
        print(exp_info)
    else:
        exp_info = {"Cell": {"Location": "UNLABELED"}}

    fn_trace = "ficurve-traces.dat"
    fn_spike = "ficurve-spikes.dat"

    #   compose list of RePro indices to be analysed
    ReProDict = ReProList['di']
    ReProIndex = sorted([k for k, v in ReProDict.items()])
    desired_shapes = OrderedDict()
    spike_shapes = OrderedDict()
    metka = OrderedDict()
Exemplo n.º 29
0
def wht_noise(path_to_folder, subfolder, info):
    """
    Main engine, opens the spike files.

    :param path_to_folder:  folder with recordings
    :param subfolder:       folder containing the experiment/cell
    :param info:            experimental data from info.dat
    :return fnames:         list of figure names to be used in the html build

    """

    #   define sample rate
    FS = 20000

    #   define the Gauss kernel width (= 1SD)
    sigma = 0.001  # seconds, from Berman & Maler 1998

    #   define the input data
    filename = ppjoin(path_to_folder, subfolder, "stimulus-whitenoise-spikes.dat")

    #   get the file, extract the three data subunits
    relacs_file = load(filename)

    #   extract RePro indices
    try:
        ReProIx = relacs_file.fields[('ReProIndex',)]

    except:
        return None

    #   convert set objet into a list
    ReProIx = list(ReProIx)
    ReProIx.sort()

    #   define empty list containing figure names
    fnames = []

    for ix in ReProIx:

        #   if relacs file is empty or too short due to aborted RePro presentation
        #   "try:" provides normal termination of the loop instead of error
        try:
            metas, _, datas = relacs_file.select({"ReProIndex": ix})

        except:
            return None

        print("ReProIx", ix, "Iterations", len(metas))

        #   determine figure handles
        fig = figure_handles()

        #   FFT is defined as something + 1, due to mlab reasoning
        nFFT = 2048
        FFT = (nFFT/2)+1

        #   prepare empty variables
        coh = np.zeros([len(metas), FFT], )
        coh_short = np.zeros([len(metas), FFT], )
        P_csd = np.zeros([len(metas), FFT], dtype=complex)
        P_csd_short = np.zeros([len(metas), FFT], dtype=complex)
        P_psd = np.zeros([len(metas), FFT])
        P_psd_short = np.zeros([len(metas), FFT])
        H = np.zeros([len(metas), FFT],)# dtype=complex)
        H_short = np.zeros([len(metas), FFT],)# dtype=complex)
        MI = np.zeros([len(metas), FFT], )
        MI_short = np.zeros([len(metas), FFT], )
        #   number of stimulus iterations

        for i in range(0, len(metas)):

            color_spread = np.linspace(0.35,0.8, len(metas))
            cmap = [ cm.Greys(x) for x in color_spread ]

            #   extract meta infos
            wnFname = metas[i]["envelope"]
            wnDur = float(metas[i]["Settings"]["Waveform"]["duration"].split("m")[0])  # duration in miliseconds

            #   conversions
            spikes = np.array(datas[i])

            #   conversions
            wnDur /= 1000   #   conversion to miliseconds
            spikes /= 1000

            print(spikes.shape)
            convolved_Train, _ = train_convolve(spikes, sigma, FS, wnDur)
            print(sum(convolved_Train)/FS)
            wNoise = process_wn(path_to_folder, wnFname, len(convolved_Train))

            #   compute coherence, mutual information, transfer and the power spectra and cross-spectra density
            freq, coh[i,:], coh_short[i,:], H[i,:], H_short[i,:], MI[i,:], MI_short[i,:], \
                P_csd[i,:], P_csd_short[i,:], P_psd[i,:], P_psd_short[i,:] \
                = cohere_transfere_MI (convolved_Train, wNoise, nFFT, FS)

        #   plot coherence, mutual information etc....
        plot_the_lot(fig, freq, coh, coh_short, MI, MI_short, H, H_short, metas, cmap, np.array(datas))

        avgCoh, avgCoh_short, avgH, avgH_short, mut_inf, mut_inf_short = compute_avgs(coh, coh_short, H, H_short, MI, MI_short)

        plot_the_lot(fig, freq, avgCoh, avgCoh_short, mut_inf, mut_inf_short, avgH, avgH_short, metas, cmap = [cm.Reds(0.6)],raster='empty', annotation=True)

        fig[2].text(0.05, 0.95, " ".join(['Species:', info["Subject"]["Species"]]), transform=fig[1].transAxes,
                fontsize=10)
        fig[2].text(0.05, 0.90, " ".join(['ELL Segment:', info["Cell"]["Location"]]), transform=fig[1].transAxes,
                fontsize=10)

        #   define file name
        filename = "".join([str(metas[i]["ReProIndex"]), '_', 'whitenoise'])
        fnames.append(filename)

        fig[0].savefig(ppjoin(path_to_folder, subfolder, ".".join([filename, 'png'])), transparent=True)
        fig[0].savefig(ppjoin(path_to_folder, subfolder, ".".join([filename, 'svg'])), transparent=True)

        plt.close()

    return fnames
Exemplo n.º 30
0
def noise_transfer(tests, wd, expfolder):
    """"
    Plots the transfer and coherence curve into the graphic file (*.png, *.svg)
        Requires:
    :param tests:   dictionary containing experimental conditions as keys and ReProIx as values
    :param wd: location of the experimental folder
    :param expfolder: name of the experimental folder

    Outputs:
            graphic files containing coherence, MI and transfer
    """

    #   define sample rate
    FS = 20000

    #   define the Gauss kernel width (= 1SD)
    sigma = 0.001  # seconds, from Berman & Maler 1998

    #   define the input data
    filename = ppjoin(wd, expfolder, "stimulus-whitenoise-spikes.dat")

    #   read in some experimental data
    (_, _, filenames) = next(walk(ppjoin(wd, expfolder)))
    if "info.dat" in filenames:
        exp_info = dict(read_info(wd, expfolder)[0])
        print(exp_info)
    else:
        exp_info = {"Cell": {"Location": "UNLABELED"}}

    #   load data
    relacs_file = load(filename)

    #   four panel figure
    FHandles = figure_handles()

    #   define colors
    color_spread = np.linspace(0.5, 0.9, len(tests))
    cmap = [cm.Greys(x) for x in color_spread]
    # cmap = [ cm.viridis(x) for x in color_spread ]

    #   color counter
    col_count = 0

    #   FFT is defined as something + 1, due to mlab reasoning
    nFFT = 1024
    FFT = (nFFT / 2) + 1

    #   define spike dict for the raster plot
    spike_dict = OrderedDict()

    for k, v in tests.items():
        #   get RePro indexes of the same experimental condition
        ReProIx = tests[k]

        #   define empty variables, containing traces of the same experiments, different RePros
        coh_repro = np.zeros([len(ReProIx), FFT])
        coh_repro_short = np.zeros([len(ReProIx), FFT])
        H_repro = np.zeros([len(ReProIx), FFT])
        H_repro_short = np.zeros([len(ReProIx), FFT])
        MI_repro = np.zeros([len(ReProIx), FFT])
        MI_repro_short = np.zeros([len(ReProIx), FFT])

        spike_list = []

        #   define/reset counter
        counter = 0

        #   iteration of same experimental condition
        for ix in ReProIx:

            try:
                metas, _, datas = relacs_file.select({"ReProIndex": ix})

            except:
                return None

            #   define empty variables
            coh = np.zeros([len(metas), FFT])
            coh_short = np.zeros([len(metas), FFT])
            P_csd = np.zeros([len(metas), FFT], dtype=complex)
            P_csd_short = np.zeros([len(metas), FFT], dtype=complex)
            P_psd = np.zeros([len(metas), FFT])
            P_psd_short = np.zeros([len(metas), FFT])
            H = np.zeros([len(metas), FFT])
            H_short = np.zeros([len(metas), FFT])
            MI = np.zeros([len(metas), FFT])
            MI_short = np.zeros([len(metas), FFT])

            meta_repros = []

            #   number of stimulus iterations
            for i in range(0, len(metas)):

                #   extract meta infos
                wnFname = metas[i]["envelope"]
                wnDur = float(
                    metas[i]["Settings"]["Waveform"]["duration"].split(
                        "m")[0])  # duration in miliseconds

                #   spikes
                spikes = np.array(datas[i])

                #   conversions
                wnDur /= 1000  #   conversion to miliseconds
                spikes /= 1000

                print(spikes.shape)
                convolved_Train, _ = train_convolve(spikes, sigma, FS, wnDur)
                print(sum(convolved_Train) / FS)
                wNoise = process_wn(wd, wnFname, len(convolved_Train))

                #   compute coherence, mutual information, transfer and the power spectra and cross-spectra density
                freq, coh[i,:], coh_short[i,:], H[i,:], H_short[i,:], MI[i,:], MI_short[i,:], \
                    P_csd[i,:], P_csd_short[i,:], P_psd[i,:], P_psd_short[i,:] \
                    = cohere_transfere_MI (convolved_Train, wNoise, nFFT, FS)

            #   compute averages over iterations of the *same* repro
            coh_repro[counter,:], coh_repro_short[counter,:], \
                H_repro[counter,:], H_repro_short[counter,:], \
                MI_repro[counter,:], MI_repro_short[counter,:] = compute_avgs(coh, coh_short, H, H_short, MI, MI_short)

            #   store one of the metas
            meta_repros.append(metas[0])
            #   store all the spikes from the same type of experiment
            spike_list.append(datas)
            counter = counter + 1
        #   plot the lot
        plot_the_lot(FHandles,
                     freq,
                     coh_repro,
                     coh_repro_short,
                     MI_repro,
                     MI_repro_short,
                     H_repro,
                     H_repro_short,
                     metas,
                     cmap=[cmap[col_count]],
                     raster='empty',
                     annotation=False,
                     comparison=True)

        #   compute the average of the different repro presentations (with same test conditions)
        avgCoh, avgCoh_short, avgH, avgH_short, avgMI, avgMI_short = compute_avgs(
            coh_repro, coh_repro_short, H_repro, H_repro_short, MI_repro,
            MI_repro_short)

        #   plot the lot
        plot_the_lot(FHandles,
                     freq,
                     avgCoh,
                     avgCoh_short,
                     avgMI,
                     avgMI_short,
                     avgH,
                     avgH_short,
                     meta_repros,
                     cmap=[cmap[col_count]],
                     raster='empty',
                     annotation=False,
                     comparison=True)

        #   provide gVgate values
        FHandles[1].text(0.5,
                         0.8 - 0.05 * col_count,
                         " ".join([
                             r'$g_{Vgate}$', ' = ',
                             meta_repros[0]["Status"]["gvgate"].split(".")[0],
                             'nS'
                         ]),
                         color=cmap[col_count],
                         transform=FHandles[1].transAxes,
                         fontsize=10)
        FHandles[5].text(
            0.5,
            0.8 - 0.05 * col_count,
            " ".join([
                r'$\tau_{Vgate}$', ' = ',
                meta_repros[0]["Status"]["vgatetau"].split(".")[0], 'ms'
            ]),
            color=cmap[col_count],
            transform=FHandles[5].transAxes,
            fontsize=10)

        #   update spike dictionary
        spike_dict[k] = spike_list

        #   update the color counter
        col_count += 1

    #   write FFT value
    FHandles[1].text(0.05,
                     0.90,
                     " ".join(['FFT = ', str(nFFT)]),
                     color='k',
                     transform=FHandles[1].transAxes,
                     fontsize=10)

    #   plot raster plot
    spike_iter_count = 0
    for i, k in enumerate(spike_dict):
        for j in range(len(spike_dict[k])):
            for gnj in range(len(spike_dict[k][j])):
                FHandles[7].plot(spike_dict[k][j][gnj],
                                 np.zeros(len(spike_dict[k][j][gnj])) +
                                 spike_iter_count,
                                 '|',
                                 color=cmap[i],
                                 ms=12)
                spike_iter_count += 1

    FHandles[7].set_title(
        ppjoin(".".join([
            exp_info["Cell"]["Location"], ':', expfolder, "dyn_noise_transfer",
            '_'.join([k for k, v in tests.items()]), '_'.join([
                str(x) for sublist in [v for k, v in tests.items()]
                for x in sublist
            ]), "fft",
            str(nFFT)
        ])))
    #   Save figures
    FHandles[0].savefig(ppjoin(".".join([
        expfolder, "coherence_transfer",
        '.'.join([k for k, v in tests.items()]), '.'.join([
            str(x) for sublist in [v for k, v in tests.items()]
            for x in sublist
        ]), "fft",
        str(nFFT), 'svg'
    ])),
                        transparent=True)
    FHandles[0].savefig(ppjoin(".".join([
        expfolder, "coherence_transfer",
        '.'.join([k for k, v in tests.items()]), ".".join([
            str(x) for sublist in [v for k, v in tests.items()]
            for x in sublist
        ]), "fft",
        str(nFFT), 'png'
    ])),
                        transparent=True)
    #   save figures into dedicated folder if necessary
    FHandles[0].savefig(ppjoin(
        '../overviewTransfer/', ".".join([
            "_".join([
                exp_info["Cell"]["Location"], expfolder, "coherence_transfer",
                "".join([k for k, v in tests.items()]), "_".join([
                    str(x) for sublist in [v for k, v in tests.items()]
                    for x in sublist
                ]), "fft",
                str(nFFT)
            ]), 'pdf'
        ])),
                        transparent=True)