コード例 #1
0
ファイル: ATL15_write.py プロジェクト: whigg/surfaceChange
    def make_dataset(field,data,field_attrs,file_obj,group_obj,scale_dict,dimScale=False):
        dimensions = field_attrs[field]['dimensions'].split(',')
        if field_attrs[field]['datatype'].startswith('int'):
            data = np.nan_to_num(data,nan=np.iinfo(np.dtype(field_attrs[field]['datatype'])).max)
            fillvalue = np.iinfo(np.dtype(field_attrs[field]['datatype'])).max
        elif field_attrs[field]['datatype'].startswith('float'):
            data = np.nan_to_num(data,nan=np.finfo(np.dtype(field_attrs[field]['datatype'])).max)
            fillvalue = np.finfo(np.dtype(field_attrs[field]['datatype'])).max
        dset = group_obj.create_dataset(field.encode('ASCII'),data=data,fillvalue=fillvalue,chunks=True,compression=6,dtype=field_attrs[field]['datatype'])
        for ii,dim in enumerate(dimensions):
            dset.dims[ii].label = scale[dim.strip()]
            if dimScale:
                dset.make_scale(field)
            else:
                if dim.strip().startswith('Nt'):
                    dset.dims[ii].attach_scale(file_obj[scale[dim.strip()]])
                else:
                    dset.dims[ii].attach_scale(group_obj[scale[dim.strip()]])

        for attr in attr_names:
             if 'dimensions' not in attr and 'datatype' not in attr:
                 create_attribute(dset.id, attr, [], str(field_attrs[field][attr]))
        if field_attrs[field]['datatype'].startswith('int'):
            dset.attrs['_FillValue'.encode('ASCII')] = np.iinfo(np.dtype(field_attrs[field]['datatype'])).max
        elif field_attrs[field]['datatype'].startswith('float'):
            dset.attrs['_FillValue'.encode('ASCII')] = np.finfo(np.dtype(field_attrs[field]['datatype'])).max
        return file_obj
コード例 #2
0
ファイル: ATL14_write.py プロジェクト: whigg/surfaceChange
def ATL14_write():
    dz_dict = {
        'x': 'x',  # ATL14 varname : z0.h5 varname
        'y': 'y',
        'h': 'z0',
        'h_sigma': 'sigma_z0',
        #              'cell_area':'area',
        #              'data_count':'count',
        'misfit_rms': 'misfit_rms',
        'misfit_scaled_rms': 'misfit_scaled_rms',
    }
    scale = {
        'Nx': 'x',
        'Ny': 'y',
    }

    # establish output file
    fileout = 'ATL14_tile01.h5'
    print('output file', fileout)
    if os.path.isfile(fileout):
        os.remove(fileout)
    with h5py.File(fileout.encode('ASCII'), 'w') as fo:
        # get handle for input file with ROOT and height_change variables.
        FH = h5py.File('z0.h5', 'r')
        if 'z0' not in FH:
            print('no zo')
            FH.close()
            exit(-1)

        with open('ATL14_output_attrs.csv', 'r',
                  encoding='utf-8-sig') as attrfile:
            reader = list(csv.DictReader(attrfile))
#        group_names = set([row['group'] for row in reader])

        attr_names = [
            x for x in reader[0].keys() if x != 'field' and x != 'group'
        ]

        # work ROOT group first
        field_names = [
            row['field'] for row in reader if 'ROOT' in row['group']
        ]
        print(field_names)
        #establish variables that are dimension scales first
        for field in ['x', 'y']:
            print('field', field)
            field_attrs = {
                row['field']: {
                    attr_names[ii]: row[attr_names[ii]]
                    for ii in range(len(attr_names))
                }
                for row in reader if field in row['field']
            }
            dimensions = field_attrs[field]['dimensions'].split(',')
            data = np.array(FH['z0'][dz_dict[field]])
            data = np.nan_to_num(
                data,
                nan=np.finfo(np.dtype(field_attrs[field]['datatype'])).max)
            fillvalue = np.finfo(np.dtype(field_attrs[field]['datatype'])).max
            dset = fo.create_dataset(field.encode('ASCII'),
                                     data=data,
                                     fillvalue=fillvalue,
                                     chunks=True,
                                     compression=6,
                                     dtype=field_attrs[field]['datatype'])
            dset.make_scale(field)
            for ii, dim in enumerate(dimensions):
                print(field, dim)
                dset.dims[ii].label = scale[dim.strip()]
            for attr in attr_names:
                if 'dimensions' not in attr and 'datatype' not in attr:
                    create_attribute(dset.id, attr, [],
                                     str(field_attrs[field][attr]))
            if field_attrs[field]['datatype'].startswith('int'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.iinfo(
                    np.dtype(field_attrs[field]['datatype'])).max
            elif field_attrs[field]['datatype'].startswith('float'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.finfo(
                    np.dtype(field_attrs[field]['datatype'])).max

        for field in [
                item for item in field_names if item != 'x' and item != 'y'
        ]:
            # read attrs from .csv
            field_attrs = {
                row['field']: {
                    attr_names[ii]: row[attr_names[ii]]
                    for ii in range(len(attr_names))
                }
                for row in reader if field in row['field']
            }
            dimensions = field_attrs[field]['dimensions'].split(',')
            if dz_dict.get(field) != None:  # if key in dz_dict
                data = np.squeeze(np.array(FH['z0'][dz_dict[field]]))
            else:
                data = np.ndarray(shape=tuple(
                    [ii + 1 for ii in range(len(dimensions))]),
                                  dtype=float)
            print('line 93', field, dimensions)
            data = np.nan_to_num(
                data,
                nan=np.finfo(np.dtype(field_attrs[field]['datatype'])).max)
            print('shape', data.shape)
            fillvalue = np.finfo(np.dtype(field_attrs[field]['datatype'])).max
            dset = fo.create_dataset(field.encode('ASCII'),
                                     data=data,
                                     fillvalue=fillvalue,
                                     chunks=True,
                                     compression=6,
                                     dtype=field_attrs[field]['datatype'])
            for ii, dim in enumerate(dimensions):
                print('line 98', ii, dim)
                dset.dims[ii].label = scale[dim.strip()]
                dset.dims[ii].attach_scale(fo[scale[dim.strip()]])
            for attr in attr_names:
                if 'dimensions' not in attr and 'datatype' not in attr:
                    create_attribute(dset.id, attr, [],
                                     str(field_attrs[field][attr]))
            if field_attrs[field]['datatype'].startswith('int'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.iinfo(
                    np.dtype(field_attrs[field]['datatype'])).max
            elif field_attrs[field]['datatype'].startswith('float'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.finfo(
                    np.dtype(field_attrs[field]['datatype'])).max

        FH.close()


#
    return fileout
コード例 #3
0
    def write_to_file(self, fileout, params_11=None):
        # Generic code to write data from an ATL11 object to an h5 file
        # Input:
        #   fileout: filename of hdf5 filename to write
        # Optional input:
        #   parms_11: ATL11.defaults structure
        beam_pair_name='/pt%d' % self.beam_pair
        beam_pair_name=beam_pair_name.encode('ASCII')
        if os.path.isfile(fileout):
            f = h5py.File(fileout.encode('ASCII'),'r+')
            if beam_pair_name in f:
                del f[beam_pair_name]
        else:
            f = h5py.File(fileout.encode('ASCII'),'w')
        g=f.create_group(beam_pair_name)

        # set the output pair and track attributes
        g.attrs['beam_pair'.encode('ASCII')]=self.beam_pair
        g.attrs['ReferenceGroundTrack'.encode('ASCII')]=self.track_num
        g.attrs['first_cycle'.encode('ASCII')]=self.cycles[0]
        g.attrs['last_cycle'.encode('ASCII')]=self.cycles[1]
        # put default parameters as top level attributes
        if params_11 is None:
            params_11=ATL11.defaults()

        # write each variable in params_11 as an attribute
        for param, val in  vars(params_11).items():
            if not isinstance(val,(dict,type(None))):
                try:
                    if param == 'ATL06_xover_field_list':
                        xover_attr=getattr(params_11, param)
                        xover_attr = [x.encode('ASCII') for x in xover_attr]
                        g.attrs[param.encode('ASCII')]=xover_attr
                    else:
                        g.attrs[param.encode('ASCII')]=getattr(params_11, param)
                except Exception as e:
                    print("write_to_file:could not automatically set parameter: %s error = %s" % (param,str(e)))
                    continue

        # put groups, fields and associated attributes from .csv file
        with importlib.resources.path('ATL11','package_data') as pp:
            with open(os.path.join(pp,'ATL11_output_attrs.csv'),'r') as attrfile:
                reader=list(csv.DictReader(attrfile))
        group_names=set([row['group'] for row in reader])
        attr_names=[x for x in reader[0].keys() if x != 'field' and x != 'group']

        # start with 'ROOT' group
        list_vars=getattr(self,'ROOT').list_of_fields
        list_vars.append('cycle_number')
        # establish the two main dimension scales
        for field in ['ref_pt','cycle_number']:
            field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if 'ROOT' in row['group']}
            dimensions = field_attrs[field]['dimensions'].split(',')
            data = getattr(getattr(self,'ROOT'),field)
            dset = g.create_dataset(field.encode('ASCII'),data=data,chunks=True,compression=6,dtype=field_attrs[field]['datatype'].lower()) #,fillvalue=fillvalue)
            dset.dims[0].label = field
            for attr in attr_names:
                if 'dimensions' not in attr and 'datatype' not in attr:
                    create_attribute(dset.id, attr, [], str(field_attrs[field][attr]))
            if field_attrs[field]['datatype'].startswith('int'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.iinfo(np.dtype(field_attrs[field]['datatype'].lower())).max
            elif field_attrs[field]['datatype'].startswith('Float'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.finfo(np.dtype(field_attrs[field]['datatype'].lower())).max

        for field in [item for item in list_vars if (item != 'ref_pt') and (item != 'cycle_number')]:
            field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if 'ROOT' in row['group']}
            dimensions = field_attrs[field]['dimensions'].split(',')
            data = getattr(getattr(self,'ROOT'),field)
            # change nans to proper invalid, depending on datatype
            if field_attrs[field]['datatype'].startswith('int'):
                data = np.nan_to_num(data,nan=np.iinfo(np.dtype(field_attrs[field]['datatype'].lower())).max)
                data = data.astype('int')  # don't change to int before substituting nans with invalid.
                fillvalue = np.iinfo(np.dtype(field_attrs[field]['datatype'].lower())).max
            elif field_attrs[field]['datatype'].startswith('Float'):
                data = np.nan_to_num(data,nan=np.finfo(np.dtype(field_attrs[field]['datatype'].lower())).max)
                fillvalue = np.finfo(np.dtype(field_attrs[field]['datatype'].lower())).max
            dset = g.create_dataset(field.encode('ASCII'),
                                    data=data,fillvalue=fillvalue,
                                    chunks=True,
                                    compression=6,
                                    dtype=field_attrs[field]['datatype'].lower())
            dset.dims[0].label = field

            for ii,dim in enumerate(dimensions):
                dim=dim.strip()
                if 'N_pts' in dim:
                    dset.dims[ii].attach_scale(g['ref_pt'])
                    dset.dims[ii].label = 'ref_pt'
                if 'N_cycles' in dim:
                    dset.dims[ii].attach_scale(g['cycle_number'])
                    dset.dims[ii].label = 'cycle_number'
            for attr in attr_names:
                if 'dimensions' not in attr and 'datatype' not in attr:
                    create_attribute(dset.id, attr, [], str(field_attrs[field][attr]))
            if field_attrs[field]['datatype'].startswith('int'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.iinfo(np.dtype(field_attrs[field]['datatype'].lower())).max
            elif field_attrs[field]['datatype'].startswith('Float'):
                dset.attrs['_FillValue'.encode('ASCII')] = np.finfo(np.dtype(field_attrs[field]['datatype'].lower())).max

        for group in [item for item in group_names if item != 'ROOT']:
            if hasattr(getattr(self,group),'list_of_fields'):
                grp = g.create_group(group.encode('ASCII'))

                field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if group in row['group']}

                if 'crossing_track_data' in group:
                    this_ref_pt=getattr(getattr(self,group),'ref_pt')
                    if len(this_ref_pt) > 0:
                        dset = grp.create_dataset('ref_pt'.encode('ASCII'),data=this_ref_pt,chunks=True,compression=6,dtype=field_attrs['ref_pt']['datatype'])
                    else:
                        dset = grp.create_dataset('ref_pt'.encode('ASCII'), shape=[0],chunks=True,compression=6,dtype=np.int32)
                    dset.dims[0].label = 'ref_pt'.encode('ASCII')
                    for attr in attr_names:
                        if 'dimensions' not in attr and 'datatype' not in attr:
                            create_attribute(dset.id, attr, [], field_attrs['ref_pt'][attr])

                if 'ref_surf' in group:
                    dset = grp.create_dataset('poly_exponent_x'.encode('ASCII'),data=np.array([item[0] for item in params_11.poly_exponent_list]),chunks=True,compression=6,dtype=field_attrs['poly_exponent_x']['datatype'])
                    dset.dims[0].label = 'poly_exponent_x'.encode('ASCII')
                    for attr in attr_names:
                        if 'dimensions' not in attr and 'datatype' not in attr:
                            create_attribute(dset.id, attr, [], field_attrs['poly_exponent_x'][attr])
                    dset = grp.create_dataset('poly_exponent_y'.encode('ASCII'),data=np.array([item[1] for item in params_11.poly_exponent_list]),chunks=True,compression=6, dtype=field_attrs['poly_exponent_y']['datatype'])
                    dset.dims[0].label = 'poly_exponent_y'.encode('ASCII')
                    for attr in attr_names:
                        if 'dimensions' not in attr and 'datatype' not in attr:
                            create_attribute(dset.id, attr, [], field_attrs['poly_exponent_y'][attr])

                    grp.attrs['poly_exponent_x'.encode('ASCII')]=np.array([item[0] for item in params_11.poly_exponent_list], dtype=int)
                    grp.attrs['poly_exponent_y'.encode('ASCII')]=np.array([item[1] for item in params_11.poly_exponent_list], dtype=int)
                    grp.attrs['slope_change_t0'.encode('ASCII')]=np.mean(self.slope_change_t0).astype('int')
                    g.attrs['N_poly_coeffs'.encode('ASCII')]=int(self.N_coeffs)

                list_vars=getattr(self,group).list_of_fields
                if group == 'crossing_track_data':
                    list_vars.remove('ref_pt')  # handled above
                if list_vars is not None:
                    for field in list_vars:
                        dimensions = field_attrs[field]['dimensions'].split(',')
                        data = getattr(getattr(self,group),field)
                        # change nans to proper invalid, depending on datatype
                        if field_attrs[field]['datatype'].startswith('int'):
                            data = np.nan_to_num(data,nan=np.iinfo(np.dtype(field_attrs[field]['datatype'])).max)
                            data = data.astype('int')  # don't change to int before substituting nans with invalid.
                            fillvalue = np.iinfo(np.dtype(field_attrs[field]['datatype'].lower())).max
                        elif field_attrs[field]['datatype'].startswith('Float'):
                            data = np.nan_to_num(data,nan=np.finfo(np.dtype(field_attrs[field]['datatype'].lower())).max)
                            fillvalue = np.finfo(np.dtype(field_attrs[field]['datatype'].lower())).max

                        dset = grp.create_dataset(field.encode('ASCII'),data=data,fillvalue=fillvalue,chunks=True,compression=6,dtype=field_attrs[field]['datatype'].lower())
                        for ii,dim in enumerate(dimensions):
                            dim=dim.strip()
                            if 'N_pts' in dim:
                                dset.dims[ii].attach_scale(g['ref_pt'])
                                dset.dims[ii].label = 'ref_pt'
                            if 'N_cycles' in dim:
                                dset.dims[ii].attach_scale(g['cycle_number'])
                                dset.dims[ii].label = 'cycle_number'
                            if 'N_coeffs' in dim:
                                dset.dims[ii].attach_scale(grp['poly_exponent_x'])
                                dset.dims[ii].attach_scale(grp['poly_exponent_y'])
                                dset.dims[ii].label = '(poly_exponent_x, poly_exponent_y)'
                            if 'Nxo' in dim:
                                dset.dims[ii].attach_scale(grp['ref_pt'])
                                dset.dims[ii].label = 'ref_pt'
                        for attr in attr_names:
                            if 'dimensions' not in attr and 'datatype' not in attr:
                                create_attribute(dset.id, attr, [], str(field_attrs[field][attr]))
                        if field_attrs[field]['datatype'].startswith('int'):
                            dset.attrs['_FillValue'.encode('ASCII')] = np.iinfo(np.dtype(field_attrs[field]['datatype'].lower())).max
                        elif field_attrs[field]['datatype'].startswith('Float'):
                            dset.attrs['_FillValue'.encode('ASCII')] = np.finfo(np.dtype(field_attrs[field]['datatype'].lower())).max
        f.close()
        return
コード例 #4
0
ファイル: write_METADATA.py プロジェクト: cuihaotian/ATL11
def poly_buffered_linestring(outfile):
    lonlat_11 = []
    with h5py.File(outfile, 'r') as h5f:
        for pair in ['pt1', 'pt2', 'pt3']:
            try:
                lonlat_11 += [
                    np.c_[h5f[pair + '/longitude'], h5f[pair + '/latitude']]
                ]
            except Exception as e:
                print(e)
    print('avg lat lonlat_11[0]',
          np.sum(lonlat_11[0][:, 1]) / len(lonlat_11[0]))
    if np.sum(lonlat_11[0][:, 1]) / len(lonlat_11[0]) >= 0.0:
        polarEPSG = 3413
    else:
        polarEPSG = 3031

    xformer_ll2pol = pyproj.Transformer.from_crs(4326, polarEPSG)
    xformer_pol2ll = pyproj.Transformer.from_crs(polarEPSG, 4326)
    xy_11 = []
    for ll in lonlat_11:
        xy_11 += [np.c_[xformer_ll2pol.transform(ll[:, 1], ll[:, 0])]]
    lines = []
    for xx in xy_11:
        lines += [shapely.geometry.LineString(xx)]
    line_simp = []
    for line in lines:
        line_simp += [line.simplify(tolerance=100)]
    all_lines = shapely.geometry.MultiLineString(line_simp)
    common_buffer = all_lines.buffer(3000, 4)
    common_buffer = common_buffer.simplify(tolerance=500)

    xpol, ypol = np.array(common_buffer.exterior.coords.xy)
    y1, x1 = xformer_pol2ll.transform(xpol, ypol)
    print("polygon size:", len(x1))

    with h5py.File(outfile, 'r+') as h5f:
        if '/orbit_info/bounding_polygon_dim1' in h5f:
            del h5f['/orbit_info/bounding_polygon_dim1']
            del h5f['/orbit_info/bounding_polygon_lon1']
            del h5f['/orbit_info/bounding_polygon_lat1']
        if '/orbit_info/bounding_polygon_dim2' in h5f:
            del h5f['/orbit_info/bounding_polygon_dim2']
            del h5f['/orbit_info/bounding_polygon_lon2']
            del h5f['/orbit_info/bounding_polygon_lat2']

        h5f.create_dataset('/orbit_info/bounding_polygon_dim1',
                           data=np.arange(1,
                                          np.size(x1) + 1),
                           chunks=True,
                           compression=6,
                           dtype='int32')
        create_attribute(h5f['orbit_info/bounding_polygon_dim1'].id,
                         'description', [], 'Polygon extent vertex count')
        create_attribute(h5f['orbit_info/bounding_polygon_dim1'].id, 'units',
                         [], '1')
        create_attribute(h5f['orbit_info/bounding_polygon_dim1'].id,
                         'long_name', [], 'Polygon vertex count')
        create_attribute(h5f['orbit_info/bounding_polygon_dim1'].id, 'source',
                         [], 'model')
        dset = h5f.create_dataset('/orbit_info/bounding_polygon_lon1',
                                  data=x1,
                                  chunks=True,
                                  compression=6,
                                  dtype='float32')
        dset.dims[0].attach_scale(h5f['orbit_info']['bounding_polygon_dim1'])
        create_attribute(h5f['orbit_info/bounding_polygon_lon1'].id,
                         'description', [], 'Polygon extent vertex longitude')
        create_attribute(h5f['orbit_info/bounding_polygon_lon1'].id, 'units',
                         [], 'degrees East')
        create_attribute(h5f['orbit_info/bounding_polygon_lon1'].id,
                         'long_name', [], 'Polygon vertex longitude')
        create_attribute(h5f['orbit_info/bounding_polygon_lon1'].id, 'source',
                         [], 'model')
        create_attribute(h5f['orbit_info/bounding_polygon_lon1'].id,
                         'coordinates', [], 'bounding_polygon_dim1')
        dset = h5f.create_dataset('/orbit_info/bounding_polygon_lat1',
                                  data=y1,
                                  chunks=True,
                                  compression=6,
                                  dtype='float32')
        dset.dims[0].attach_scale(h5f['orbit_info']['bounding_polygon_dim1'])
        create_attribute(h5f['orbit_info/bounding_polygon_lat1'].id,
                         'description', [], 'Polygon extent vertex latitude')
        create_attribute(h5f['orbit_info/bounding_polygon_lat1'].id, 'units',
                         [], 'degrees North')
        create_attribute(h5f['orbit_info/bounding_polygon_lat1'].id,
                         'long_name', [], 'Polygon vertex latitude')
        create_attribute(h5f['orbit_info/bounding_polygon_lat1'].id, 'source',
                         [], 'model')
        create_attribute(h5f['orbit_info/bounding_polygon_lat1'].id,
                         'coordinates', [], 'bounding_polygon_dim1')
コード例 #5
0
ファイル: write_METADATA.py プロジェクト: cuihaotian/ATL11
def write_METADATA(outfile, infiles):
    if os.path.isfile(outfile):
        #
        # Call filemeta, copies METADATA group from template
        #
        filemeta(outfile, infiles)
        g = h5py.File(outfile, 'r+')
        gf = g.create_group('METADATA/Lineage/ATL06'.encode(
            'ASCII', 'replace'))
        fname = []
        sname = []
        scycle = []
        ecycle = []
        srgt = []
        ergt = []
        sregion = []
        eregion = []
        sgeoseg = []
        egeoseg = []
        sorbit = []
        eorbit = []
        uuid = []
        version = []
        for ii, infile in enumerate(sorted(infiles)):
            fname.append(os.path.basename(infile).encode('ASCII'))
            if os.path.isfile(infile):
                f = h5py.File(infile, 'r')
                # Read the datasets from ATL06 ancillary_data, where available
                # All fields must be arrays, not just min/max, even if just repeats
                #
                sname.append(f['/'].attrs['short_name'])
                uuid.append(f['/'].attrs['identifier_file_uuid'])
                scycle.append(f['ancillary_data/start_cycle'])
                ecycle.append(f['ancillary_data/end_cycle'])
                sorbit.append(f['ancillary_data/start_orbit'])
                eorbit.append(f['ancillary_data/end_orbit'])
                sregion.append(f['ancillary_data/start_region'])
                eregion.append(f['ancillary_data/end_region'])
                srgt.append(f['ancillary_data/start_rgt'])
                ergt.append(f['ancillary_data/end_rgt'])
                version.append(f['ancillary_data/version'])
        for pt in g.keys():
            if pt.startswith('pt'):
                sgeoseg = np.min([sgeoseg, np.min(g[pt]['ref_pt'][:])])
                egeoseg = np.max([egeoseg, np.max(g[pt]['ref_pt'][:])])

        gf.attrs['description'] = 'ICESat-2 ATLAS Land Ice'.encode(
            'ASCII', 'replace')
        #
        # Use create_attribute for strings to get ASCII and NULLTERM
        #
        create_attribute(gf.id, 'fileName', [2], fname)
        create_attribute(gf.id, 'shortName', [2], sname)

        gf.attrs['start_orbit'] = np.ravel(sorbit)
        gf.attrs['end_orbit'] = np.ravel(eorbit)

        gf.attrs['start_cycle'] = np.ravel(scycle)
        gf.attrs['end_cycle'] = np.ravel(ecycle)

        gf.attrs['start_rgt'] = np.ravel(srgt)
        gf.attrs['end_rgt'] = np.ravel(ergt)

        gf.attrs['start_region'] = np.ravel(sregion)
        gf.attrs['end_region'] = np.ravel(eregion)

        gf.attrs['start_geoseg'] = np.repeat(sgeoseg, np.size(sregion))
        gf.attrs['end_geoseg'] = np.repeat(egeoseg, np.size(sregion))

        create_attribute(gf.id, 'uuid', [2], uuid)
        gf.attrs['version'] = np.ravel(version)

        g.close()
    return outfile
コード例 #6
0
ファイル: write_METADATA.py プロジェクト: cuihaotian/ATL11
def filemeta(outfile, infiles):

    orbit_info={'crossing_time':0., 'cycle_number':0, 'lan':0., \
        'orbit_number':0., 'rgt':0, 'sc_orient':0, 'sc_orient_time':0.}
    root_info={'date_created':'', 'geospatial_lat_max':0., 'geospatial_lat_min':0., \
        'geospatial_lat_units':'', \
        'geospatial_lon_max':0., 'geospatial_lon_min':0., 'geospatial_lon_units':'', \
        'hdfversion':'', 'history':'', \
        'identifier_file_uuid':'', 'identifier_product_format_version':'', 'time_coverage_duration':0., \
        'time_coverage_end':'', 'time_coverage_start':''}
    # copy METADATA group from ATL11 template. Make lineage/cycle_array conatining each ATL06 file, where the ATL06 filenames
    if os.path.isfile(outfile):
        g = h5py.File(outfile, 'r+')
        for ii, infile in enumerate(sorted(infiles)):
            m = h5py.File(
                os.path.dirname(os.path.realpath(__file__)) +
                '/atl11_metadata_template.h5', 'r')
            if ii == 0:
                if 'METADATA' in list(g['/'].keys()):
                    del g['METADATA']
                # get all METADATA groups except Lineage, which we set to zero
                m.copy('METADATA', g)
                # fix up Lineage
                if 'Lineage' in list(g['METADATA'].keys()):
                    del g['METADATA']['Lineage']
                g['METADATA'].create_group('Lineage'.encode(
                    'ASCII', 'replace'))
                gf = g['METADATA']['Lineage'].create_group('ANC36-11'.encode(
                    'ASCII', 'replace'))
                gf = g['METADATA']['Lineage'].create_group('ANC38-11'.encode(
                    'ASCII', 'replace'))
                gf = g['METADATA']['Lineage'].create_group('Control'.encode(
                    'ASCII', 'replace'))
                # Add in needed root attributes
                create_attribute(
                    gf.id, 'description', [],
                    'Exact command line execution of ICESat-2/ATL11 algorithm providing all of the conditions required for each individual run of the software.'
                )
                create_attribute(gf.id, 'shortName', [], 'CNTL')
                create_attribute(gf.id, 'version', [], '1')
                create_attribute(gf.id, 'control', [], ' '.join(sys.argv))
                # handle METADATA aatributes
                create_attribute(g['METADATA/DatasetIdentification'].id,
                                 'fileName', [], os.path.basename(outfile))
                create_attribute(g['METADATA/DatasetIdentification'].id,
                                 'uuid', [], str(uuid.uuid4()))
                create_attribute(g['METADATA/ProcessStep/PGE'].id,
                                 'runTimeParameters', [], ' '.join(sys.argv))
                create_attribute(g['METADATA/ProcessStep/PGE'].id,
                                 'identifier', [], identifier())
                create_attribute(g['METADATA/ProcessStep/PGE'].id,
                                 'softwareDate', [], softwareDate())
                create_attribute(g['METADATA/ProcessStep/PGE'].id,
                                 'softwareTitle', [], softwareTitle())
                gf = g.create_group('quality_assessment'.encode(
                    'ASCII', 'replace'))

                if os.path.isfile(infile):
                    f = h5py.File(infile, 'r')
                    f.copy('quality_assessment/qa_granule_fail_reason',
                           g['quality_assessment'])
                    f.copy('quality_assessment/qa_granule_pass_fail',
                           g['quality_assessment'])
                    f.copy('ancillary_data', g)
                    del g['ancillary_data/land_ice']
                    gf = g['METADATA']['Lineage']['Control'].attrs[
                        'control'].decode()
                    g['ancillary_data/control'][...] = gf.encode(
                        'ASCII', 'replace')
                    del g['METADATA/Extent']
                    f.copy('METADATA/Extent', g['METADATA'])
                    start_delta_time = f['ancillary_data/start_delta_time'][0]
                    create_attribute(g.id, 'short_name', [], 'ATL11')
                    for key, keyval in root_info.items():
                        dsname = key
                        if key == 'date_created' or key == 'history':
                            val = str(datetime.now().date())
                            val = val + 'T' + str(datetime.now().time()) + 'Z'
                            create_attribute(g.id, key, [], val)
                            create_attribute(g['METADATA/ProcessStep/PGE'].id,
                                             'stepDateTime', [], val)
                            create_attribute(
                                g['METADATA/DatasetIdentification'].id,
                                'creationDate', [], val)
                            continue
                        if key == 'identifier_product_format_version':
                            val = softwareVersion()
                            create_attribute(g.id, key, [], val)
                            create_attribute(g['METADATA/ProcessStep/PGE'].id,
                                             'softwareVersion', [], val)
                            create_attribute(
                                g['METADATA/DatasetIdentification'].id,
                                'VersionID', [], val)
                            create_attribute(
                                g['METADATA/SeriesIdentification'].id,
                                'VersionID', [], series_version())
                            continue
                        if key == 'time_coverage_start':
                            val = f.attrs[key].decode()
                            create_attribute(g.id, key, [], val)
                            continue
                        if key == 'time_coverage_end' or key == 'time_coverage_duration':
                            continue
                        if dsname in f.attrs:
                            if isinstance(keyval, float):
                                val = f.attrs[key]
                                g.attrs[key] = val
                            else:
                                val = f.attrs[key].decode()
                                create_attribute(g.id, key, [], val)

#
# Read the datasets from orbit_info
                    duplicate_group(f, g, 'orbit_info')
                    g['orbit_info/cycle_number'].dims[0].attach_scale(
                        g['orbit_info/crossing_time'])
                    g['orbit_info/lan'].dims[0].attach_scale(
                        g['orbit_info/crossing_time'])
                    g['orbit_info/orbit_number'].dims[0].attach_scale(
                        g['orbit_info/crossing_time'])
                    g['orbit_info/rgt'].dims[0].attach_scale(
                        g['orbit_info/crossing_time'])
                    g['orbit_info/sc_orient_time'].dims[0].attach_scale(
                        g['orbit_info/sc_orient'])

                    m.close()
                    f.close()
# Fill orbit_info for each ATL06
            if ii > 0:
                if os.path.isfile(infile):
                    f = h5py.File(infile, 'r')
                    for oi_dset in g['orbit_info'].values():
                        oi_dset.resize((oi_dset.shape[0] + 1, ))
                        oi_dset[-1] = f[oi_dset.name][0]
                    f.close()


# Capture ending dates, etc from last ATL06
            if ii == len(infiles) - 1:
                if os.path.isfile(infile):
                    f = h5py.File(infile, 'r')
                    for key, keyval in root_info.items():
                        dsname = key
                        if key == 'time_coverage_end':
                            val = f.attrs[key].decode()
                            create_attribute(g.id, key, [], val)
                            continue
                        if key == 'time_coverage_duration':
                            end_delta_time = f[
                                'ancillary_data/end_delta_time'][0]
                            val = float(end_delta_time) - float(
                                start_delta_time)
                            g.attrs[key] = val
                    g['ancillary_data/data_end_utc'][
                        ...] = f['ancillary_data/data_end_utc']
                    g['ancillary_data/end_cycle'][
                        ...] = f['ancillary_data/end_cycle']
                    g['ancillary_data/end_delta_time'][
                        ...] = f['ancillary_data/end_delta_time']
                    g['ancillary_data/end_gpssow'][
                        ...] = f['ancillary_data/end_gpssow']
                    g['ancillary_data/end_gpsweek'][
                        ...] = f['ancillary_data/end_gpsweek']
                    g['ancillary_data/end_orbit'][
                        ...] = f['ancillary_data/end_orbit']
                    g['ancillary_data/end_region'][
                        ...] = f['ancillary_data/end_region']
                    g['ancillary_data/end_rgt'][
                        ...] = f['ancillary_data/end_rgt']
                    g['ancillary_data/granule_end_utc'][
                        ...] = f['ancillary_data/granule_end_utc']
                    g['METADATA/Extent'].attrs['rangeEndingDateTime'] = f[
                        'METADATA/Extent'].attrs['rangeEndingDateTime']

                    m.close()
                    f.close()

        g.close()
        poly_buffered_linestring(outfile)
        return ()