示例#1
0
def write(datasetDict, out_file, metadata=None, ref_file=None, compression=None):
    """ Write one file.
    Parameters: datasetDict : dict of dataset, with key = datasetName and value = 2D/3D array, e.g.:
                    {'height'        : np.ones((   200,300), dtype=np.int16),
                     'incidenceAngle': np.ones((   200,300), dtype=np.float32),
                     'bperp'         : np.ones((80,200,300), dtype=np.float32),
                     ...}
                out_file : str, output file name
                metadata : dict of attributes
                ref_file : str, reference file to get auxliary info
                compression : str, compression while writing to HDF5 file, None, "lzf", "gzip"
    Returns:    out_file : str
    Examples:   dsDict = dict()
                dsDict['velocity'] = np.ones((200,300), dtype=np.float32)
                write(datasetDict=dsDict, out_file='velocity.h5', metadata=atr)
    """
    # copy metadata to meta
    if metadata:
        meta = {key: value for key, value in metadata.items()}
    elif ref_file:
        meta = readfile.read_attribute(ref_file)
    else:
        raise ValueError('No metadata or reference file input.')

    # convert ndarray input into dict type
    if isinstance(datasetDict, np.ndarray):
        data = np.array(datasetDict, datasetDict.dtype)
        datasetDict = dict()
        datasetDict[meta['FILE_TYPE']] = data

    ext = os.path.splitext(out_file)[1].lower()
    # HDF5 File
    if ext in ['.h5', '.he5']:
        if compression is None and ref_file:
            compression = readfile.get_hdf5_compression(ref_file)

        k = meta['FILE_TYPE']
        if k == 'timeseries':
            if ref_file is None:
                raise Exception('Can not write {} file without reference file!'.format(k))
            obj = timeseries(out_file)
            obj.write2hdf5(datasetDict[k],
                           metadata=meta,
                           refFile=ref_file,
                           compression=compression)

        else:
            if os.path.isfile(out_file):
                print('delete exsited file: {}'.format(out_file))
                os.remove(out_file)

            print('create HDF5 file: {} with w mode'.format(out_file))
            with h5py.File(out_file, 'w') as f:
                # 1. Write input datasets
                maxDigit = max([len(i) for i in list(datasetDict.keys())])
                for dsName in datasetDict.keys():
                    data = datasetDict[dsName]
                    print(('create dataset /{d:<{w}} of {t:<10} in size of {s:<20} '
                           'with compression={c}').format(d=dsName,
                                                          w=maxDigit,
                                                          t=str(data.dtype),
                                                          s=str(data.shape),
                                                          c=compression))
                    ds = f.create_dataset(dsName,
                                          data=data,
                                          chunks=True,
                                          compression=compression)

                # 2. Write extra/auxliary datasets from ref_file
                if ref_file and os.path.splitext(ref_file)[1] in ['.h5', '.he5']:
                    atr_ref = readfile.read_attribute(ref_file)
                    shape_ref = (int(atr_ref['LENGTH']), int(atr_ref['WIDTH']))
                    with h5py.File(ref_file, 'r') as fr:
                        dsNames = [i for i in fr.keys()
                                   if (i not in list(datasetDict.keys())
                                       and isinstance(fr[i], h5py.Dataset) 
                                       and fr[i].shape[-2:] != shape_ref)]
                        maxDigit = max([len(i) for i in dsNames]+[maxDigit])
                        for dsName in dsNames:
                            ds = fr[dsName]
                            print(('create dataset /{d:<{w}} of {t:<10} in size of {s:<10} '
                                   'with compression={c}').format(d=dsName,
                                                                  w=maxDigit,
                                                                  t=str(ds.dtype),
                                                                  s=str(ds.shape),
                                                                  c=compression))
                            f.create_dataset(dsName,
                                             data=ds[:],
                                             chunks=True,
                                             compression=compression)

                # 3. metadata
                for key, value in meta.items():
                    f.attrs[key] = str(value)
                print('finished writing to {}'.format(out_file))

    # ISCE / ROI_PAC GAMMA / Image product
    else:
        key_list = list(datasetDict.keys())
        data_list = []
        for key in key_list:
            data_list.append(datasetDict[key])

        # Write Data File
        print('write {}'.format(out_file))
        if ext in ['.unw', '.cor', '.hgt']:
            write_float32(data_list[0], out_file)
            meta['DATA_TYPE'] = 'float32'
        elif ext == '.dem':
            write_real_int16(data_list[0], out_file)
            meta['DATA_TYPE'] = 'int16'
        elif ext in ['.trans']:
            write_float32(data_list[0], data_list[1], out_file)
        elif ext in ['.utm_to_rdc', '.UTM_TO_RDC']:
            data = np.zeros(rg.shape, dtype=np.complex64)
            data.real = datasetDict['rangeCoord']
            data.imag = datasetDict['azimuthCoord']
            data.astype('>c8').tofile(out_file)
        elif ext in ['.mli', '.flt']:
            write_real_float32(data_list[0], out_file)
        elif ext == '.slc':
            write_complex_int16(data_list[0], out_file)
        elif ext == '.int':
            write_complex64(data_list[0], out_file)
        elif meta['DATA_TYPE'].lower() in ['float32', 'float']:
            write_real_float32(data_list[0], out_file)
        elif meta['DATA_TYPE'].lower() in ['int16', 'short']:
            write_real_int16(data_list[0], out_file)
        elif meta['DATA_TYPE'].lower() in ['byte','bool']:
            write_byte(data_list[0], out_file)
        else:
            print('Un-supported file type: '+ext)
            return 0

        # write metadata file
        write_roipac_rsc(meta, out_file+'.rsc', print_msg=True)
    return out_file
示例#2
0
def write(datasetDict, out_file, metadata=None, ref_file=None, compression=None, ds_unit_dict=None, print_msg=True):
    """ Write one file.
    Parameters: datasetDict  - dict of dataset, with key = datasetName and value = 2D/3D array, e.g.:
                    {'height'        : np.ones((   200,300), dtype=np.int16),
                     'incidenceAngle': np.ones((   200,300), dtype=np.float32),
                     'bperp'         : np.ones((80,200,300), dtype=np.float32),
                     ...}
                out_file     - str, output file name
                metadata     - dict of attributes
                ref_file     - str, reference file to get auxliary info
                compression  - str, compression while writing to HDF5 file, None, "lzf", "gzip"
                ds_unit_dict - dict, dataset unit definition
                    {dname : dunit,
                     dname : dunit,
                     ...
                    }
    Returns:    out_file     - strs
    Examples:   dsDict = dict()
                dsDict['velocity'] = np.ones((200,300), dtype=np.float32)
                write(datasetDict=dsDict, out_file='velocity.h5', metadata=atr)
    """
    vprint = print if print_msg else lambda *args, **kwargs: None

    # copy metadata to meta
    if metadata:
        meta = {key: value for key, value in metadata.items()}
    elif ref_file:
        meta = readfile.read_attribute(ref_file)
    else:
        raise ValueError('No metadata or reference file input.')

    # convert ndarray input into dict type
    if isinstance(datasetDict, np.ndarray):
        data = np.array(datasetDict, datasetDict.dtype)
        datasetDict = dict()
        datasetDict[meta['FILE_TYPE']] = data

    # file extension
    fbase, fext = os.path.splitext(out_file)
    # ignore certain meaningless file extensions
    while fext in ['.geo', '.rdr', '.full', '.wgs84']:
        fbase, fext = os.path.splitext(fbase)
    if not fext:
        fext = fbase
    fext = fext.lower()

    # output file info
    out_dir = os.path.dirname(os.path.abspath(out_file))
    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
        vprint('create directory: {}'.format(out_dir))

    # HDF5 File
    if fext in ['.h5', '.he5']:
        # grab info from reference h5 file
        if ref_file and os.path.splitext(ref_file)[1] in ['.h5', '.he5']:
            # compression
            if compression is None:
                compression = readfile.get_hdf5_compression(ref_file)

            # list of auxiliary datasets
            shape2d = (int(meta['LENGTH']), int(meta['WIDTH']))
            with h5py.File(ref_file, 'r') as fr:
                auxDsNames = [i for i in fr.keys()
                              if (i not in list(datasetDict.keys())
                                  and isinstance(fr[i], h5py.Dataset)
                                  and fr[i].shape[-2:] != shape2d)]
        else:
            auxDsNames = []

        # check required datasets
        dsNames = list(datasetDict.keys()) + auxDsNames
        if meta['FILE_TYPE'] in ['timeseries', 'ifgramStack']:
            if 'date' not in dsNames:
                raise Exception("Can not write {} file without 'date' dataset!".format(meta['FILE_TYPE']))

        # remove existing file
        if os.path.isfile(out_file):
            os.remove(out_file)
            vprint('delete exsited file: {}'.format(out_file))

        # writing
        print('create HDF5 file: {} with w mode'.format(out_file))
        maxDigit = max([len(i) for i in dsNames])
        with h5py.File(out_file, 'w') as f:
            # 1. write input datasets
            for dsName in datasetDict.keys():
                data = datasetDict[dsName]
                vprint(('create dataset /{d:<{w}} of {t:<10} in size of {s:<20} '
                        'with compression={c}').format(d=dsName,
                                                       w=maxDigit,
                                                       t=str(data.dtype),
                                                       s=str(data.shape),
                                                       c=compression))
                ds = f.create_dataset(dsName,
                                      data=data,
                                      chunks=True,
                                      compression=compression)

            # 2. Write extra/auxliary datasets from ref_file
            if len(auxDsNames) > 0:
                with h5py.File(ref_file, 'r') as fr:
                    for dsName in auxDsNames:
                        ds = fr[dsName]
                        vprint(('create dataset /{d:<{w}} of {t:<10} in size of {s:<10} '
                                'with compression={c}').format(d=dsName,
                                                               w=maxDigit,
                                                               t=str(ds.dtype),
                                                               s=str(ds.shape),
                                                               c=compression))
                        f.create_dataset(dsName,
                                         data=ds[:],
                                         chunks=True,
                                         compression=compression)

            # 3. metadata
            for key, value in meta.items():
                try:
                    f.attrs[key] = str(value)
                except:
                    f.attrs[key] = str(value.encode('utf-8'))

            # write attributes in dataset level
            if ds_unit_dict is not None:
                for key, value in ds_unit_dict.items():
                    if value is not None:
                        f[key].attrs['UNIT'] = value
                        vprint(f'add /{key:<{maxDigit}} attribute: UNIT = {value}')

        vprint('finished writing to {}'.format(out_file))

    # ISCE / ROI_PAC GAMMA / Image product
    else:
        # basic info
        key_list = list(datasetDict.keys())
        data_list = list(datasetDict.values())
        meta['BANDS'] = len(key_list)
        meta['INTERLEAVE'] = meta.get('INTERLEAVE', 'BIL').upper()
        # data type
        meta['DATA_TYPE'] = meta.get('DATA_TYPE', 'float32').lower()
        DATA_TYPE_DICT = {'float' : 'float32',
                          'short' : 'int16',
                          'byte'  : 'int8'}
        if meta['DATA_TYPE'] in DATA_TYPE_DICT.keys():
            meta['DATA_TYPE'] = DATA_TYPE_DICT[meta['DATA_TYPE']]

        # adjust for pre-defined files determined by fext
        if fext in ['.unw']:
            meta['DATA_TYPE'] = 'float32'
            meta['INTERLEAVE'] = 'BIL'
            if key_list != ['magnitude', 'phase']:
                data_list = [data_list[0], data_list[0]]

        elif fext in ['.cor', '.hgt']:
            meta['DATA_TYPE'] = 'float32'
            meta['INTERLEAVE'] = 'BIL'
            if meta.get('PROCESSOR', 'isce') == 'roipac':
                data_list = [data_list[0], data_list[0]]

        elif fext == '.dem':
            meta['DATA_TYPE'] = 'int16'

        elif fext in ['.trans']:
            # ROI_PAC lookup table
            meta['DATA_TYPE'] = 'float32'
            meta['INTERLEAVE'] = 'BIL'

        elif fext in ['.utm_to_rdc']:
            # Gamma lookup table
            meta['BANDS'] = 2
            meta['DATA_TYPE'] = 'float32'
            meta['INTERLEAVE'] = 'BIP'

        elif fext in ['.mli', '.flt']:
            meta['DATA_TYPE'] = 'float32'

        elif fext == '.slc':
            # SLC: complex 64 or 32
            meta['BANDS'] = 1

        elif fext == '.int':
            # wrapped interferogram: complex 64
            meta['BANDS'] = 1
            meta['DATA_TYPE'] = 'complex64'
            if key_list == ['magnitude', 'phase']:
                data_list[0] = data_list[0] * np.exp(1j * data_list[1])

        elif fext == '.msk':
            meta['DATA_TYPE'] = 'int8'

        data_types = ['bool', 'int8', 'int16', 'float32', 'float64', 'complex32', 'complex64', 'complex128']
        if meta['DATA_TYPE'] not in data_types:
            msg = 'Un-supported file type "{}" with data type "{}"!'.format(fext, meta['DATA_TYPE'])
            msg += '\nSupported data type list: {}'.format(data_types)
            raise ValueError(msg)

        # write binary file
        write_binary(data_list, out_file, data_type=meta['DATA_TYPE'], interleave=meta['INTERLEAVE'])
        vprint('write file: {}'.format(out_file))

        # write metadata file
        write_roipac_rsc(meta, out_file+'.rsc', print_msg=print_msg)

    return out_file
示例#3
0
def write(datasetDict,
          out_file,
          metadata=None,
          ref_file=None,
          compression=None):
    """ Write one file.
    Parameters: datasetDict : dict of dataset, with key = datasetName and value = 2D/3D array, e.g.:
                    {'height'        : np.ones((   200,300), dtype=np.int16),
                     'incidenceAngle': np.ones((   200,300), dtype=np.float32),
                     'bperp'         : np.ones((80,200,300), dtype=np.float32),
                     ...}
                out_file : str, output file name
                metadata : dict of attributes
                ref_file : str, reference file to get auxliary info
                compression : str, compression while writing to HDF5 file, None, "lzf", "gzip"
    Returns:    out_file : str
    Examples:   dsDict = dict()
                dsDict['velocity'] = np.ones((200,300), dtype=np.float32)
                write(datasetDict=dsDict, out_file='velocity.h5', metadata=atr)
    """
    # copy metadata to meta
    if metadata:
        meta = {key: value for key, value in metadata.items()}
    elif ref_file:
        meta = readfile.read_attribute(ref_file)
    else:
        raise ValueError('No metadata or reference file input.')

    # convert ndarray input into dict type
    if isinstance(datasetDict, np.ndarray):
        data = np.array(datasetDict, datasetDict.dtype)
        datasetDict = dict()
        datasetDict[meta['FILE_TYPE']] = data

    ext = os.path.splitext(out_file)[1].lower()
    # HDF5 File
    if ext in ['.h5', '.he5']:
        # grab info from reference h5 file
        if ref_file and os.path.splitext(ref_file)[1] in ['.h5', '.he5']:
            # compression
            if compression is None:
                compression = readfile.get_hdf5_compression(ref_file)

            # list of auxiliary datasets
            shape2d = (int(meta['LENGTH']), int(meta['WIDTH']))
            with h5py.File(ref_file, 'r') as fr:
                auxDsNames = [
                    i for i in fr.keys()
                    if (i not in list(datasetDict.keys()) and isinstance(
                        fr[i], h5py.Dataset) and fr[i].shape[-2:] != shape2d)
                ]
        else:
            auxDsNames = []

        # check required datasets
        dsNames = list(datasetDict.keys()) + auxDsNames
        if meta['FILE_TYPE'] in ['timeseries', 'ifgramStack']:
            if 'date' not in dsNames:
                raise Exception(
                    "Can not write {} file without 'date' dataset!".format(
                        meta['FILE_TYPE']))

        # remove existing file
        if os.path.isfile(out_file):
            print('delete exsited file: {}'.format(out_file))
            os.remove(out_file)

        # writing
        print('create HDF5 file: {} with w mode'.format(out_file))
        maxDigit = max([len(i) for i in dsNames])
        with h5py.File(out_file, 'w') as f:
            # 1. write input datasets
            for dsName in datasetDict.keys():
                data = datasetDict[dsName]
                print(
                    ('create dataset /{d:<{w}} of {t:<10} in size of {s:<20} '
                     'with compression={c}').format(d=dsName,
                                                    w=maxDigit,
                                                    t=str(data.dtype),
                                                    s=str(data.shape),
                                                    c=compression))
                ds = f.create_dataset(dsName,
                                      data=data,
                                      chunks=True,
                                      compression=compression)

            # 2. Write extra/auxliary datasets from ref_file
            if len(auxDsNames) > 0:
                with h5py.File(ref_file, 'r') as fr:
                    for dsName in auxDsNames:
                        ds = fr[dsName]
                        print((
                            'create dataset /{d:<{w}} of {t:<10} in size of {s:<10} '
                            'with compression={c}').format(d=dsName,
                                                           w=maxDigit,
                                                           t=str(ds.dtype),
                                                           s=str(ds.shape),
                                                           c=compression))
                        f.create_dataset(dsName,
                                         data=ds[:],
                                         chunks=True,
                                         compression=compression)

            # 3. metadata
            for key, value in meta.items():
                try:
                    f.attrs[key] = str(value)
                except:
                    f.attrs[key] = str(value.encode('utf-8'))
            print('finished writing to {}'.format(out_file))

    # ISCE / ROI_PAC GAMMA / Image product
    else:
        key_list = list(datasetDict.keys())
        data_list = []
        for key in key_list:
            data_list.append(datasetDict[key])
        data_type = meta.get('DATA_TYPE', str(data_list[0].dtype)).lower()

        # Write Data File
        print('write {}'.format(out_file))
        # determined by ext
        if ext in ['.unw', '.cor', '.hgt']:
            write_float32(data_list[0], out_file)
            meta['DATA_TYPE'] = 'float32'

        elif ext == '.dem':
            write_real_int16(data_list[0], out_file)
            meta['DATA_TYPE'] = 'int16'

        elif ext in ['.trans']:
            write_float32(data_list[0], data_list[1], out_file)
            meta['DATA_TYPE'] = 'float32'

        elif ext in ['.utm_to_rdc', '.UTM_TO_RDC']:
            data = np.zeros(data_list[0].shape, dtype=np.complex64)
            data.real = datasetDict['rangeCoord']
            data.imag = datasetDict['azimuthCoord']
            data.astype('>c8').tofile(out_file)

        elif ext in ['.mli', '.flt']:
            write_real_float32(data_list[0], out_file)

        elif ext == '.slc':
            write_complex_int16(data_list[0], out_file)

        elif ext == '.int':
            write_complex64(data_list[0], out_file)

        elif ext == '.msk':
            write_byte(data_list[0], out_file)
            meta['DATA_TYPE'] = 'byte'

        # determined by DATA_TYPE
        elif data_type in ['float64']:
            write_real_float64(data_list[0], out_file)

        elif data_type in ['float32', 'float']:
            if len(data_list) == 1:
                write_real_float32(data_list[0], out_file)

            elif len(data_list) == 2 and meta['scheme'] == 'BIL':
                write_float32(data_list[0], data_list[1], out_file)

        elif data_type in ['int16', 'short']:
            write_real_int16(data_list[0], out_file)

        elif data_type in ['int8', 'byte']:
            write_byte(data_list[0], out_file)

        elif data_type in ['bool']:
            write_bool(data_list[0], out_file)

        else:
            print('Un-supported file type: ' + ext)
            return 0

        # write metadata file
        write_roipac_rsc(meta, out_file + '.rsc', print_msg=True)
    return out_file
示例#4
0
def run_geocode(inps):
    """geocode all input files"""
    start_time = time.time()

    # feed the largest file for resample object initiation
    ind_max = np.argmax([os.path.getsize(i) for i in inps.file])

    # prepare geometry for geocoding
    kwargs = dict(interp_method=inps.interpMethod,
                  fill_value=inps.fillValue,
                  nprocs=inps.nprocs,
                  max_memory=inps.maxMemory,
                  software=inps.software,
                  print_msg=True)
    if inps.latFile and inps.lonFile:
        kwargs['lat_file'] = inps.latFile
        kwargs['lon_file'] = inps.lonFile
    res_obj = resample(lut_file=inps.lookupFile,
                       src_file=inps.file[ind_max],
                       SNWE=inps.SNWE,
                       lalo_step=inps.laloStep,
                       **kwargs)
    res_obj.open()
    res_obj.prepare()

    # resample input files one by one
    for infile in inps.file:
        print('-' * 50 + '\nresampling file: {}'.format(infile))
        atr = readfile.read_attribute(infile, datasetName=inps.dset)
        outfile = auto_output_filename(infile, inps)

        # update_mode
        if inps.updateMode:
            print('update mode: ON')
            if ut.run_or_skip(outfile, in_file=[infile,
                                                inps.lookupFile]) == 'skip':
                continue

        ## prepare output
        # update metadata
        if inps.radar2geo:
            atr = attr.update_attribute4radar2geo(atr, res_obj=res_obj)
        else:
            atr = attr.update_attribute4geo2radar(atr, res_obj=res_obj)

        # instantiate output file
        file_is_hdf5 = os.path.splitext(outfile)[1] in ['.h5', '.he5']
        if file_is_hdf5:
            compression = readfile.get_hdf5_compression(infile)
            writefile.layout_hdf5(outfile,
                                  metadata=atr,
                                  ref_file=infile,
                                  compression=compression)
        else:
            dsDict = dict()

        ## run
        dsNames = readfile.get_dataset_list(infile, datasetName=inps.dset)
        maxDigit = max([len(i) for i in dsNames])
        for dsName in dsNames:

            if not file_is_hdf5:
                dsDict[dsName] = np.zeros((res_obj.length, res_obj.width))

            # loop for block-by-block IO
            for i in range(res_obj.num_box):
                src_box = res_obj.src_box_list[i]
                dest_box = res_obj.dest_box_list[i]

                # read
                print('-' * 50 +
                      '\nreading {d:<{w}} in block {b} from {f} ...'.format(
                          d=dsName,
                          w=maxDigit,
                          b=src_box,
                          f=os.path.basename(infile)))

                data = readfile.read(infile,
                                     datasetName=dsName,
                                     box=src_box,
                                     print_msg=False)[0]

                # resample
                data = res_obj.run_resample(src_data=data, box_ind=i)

                # write / save block data
                if data.ndim == 3:
                    block = [
                        0, data.shape[0], dest_box[1], dest_box[3],
                        dest_box[0], dest_box[2]
                    ]
                else:
                    block = [
                        dest_box[1], dest_box[3], dest_box[0], dest_box[2]
                    ]

                if file_is_hdf5:
                    print('write data in block {} to file: {}'.format(
                        block, outfile))
                    writefile.write_hdf5_block(outfile,
                                               data=data,
                                               datasetName=dsName,
                                               block=block,
                                               print_msg=False)
                else:
                    dsDict[dsName][block[0]:block[1], block[2]:block[3]] = data

            # for binary file: ensure same data type
            if not file_is_hdf5:
                dsDict[dsName] = np.array(dsDict[dsName], dtype=data.dtype)

        # write binary file
        if not file_is_hdf5:
            atr['BANDS'] = len(dsDict.keys())
            writefile.write(dsDict,
                            out_file=outfile,
                            metadata=atr,
                            ref_file=infile)

            # create ISCE XML and GDAL VRT file if using ISCE lookup table file
            if inps.latFile and inps.lonFile:
                writefile.write_isce_xml(atr, fname=outfile)

    m, s = divmod(time.time() - start_time, 60)
    print('time used: {:02.0f} mins {:02.1f} secs.\n'.format(m, s))
    return outfile