Exemple #1
0
 def createBrowseImages(self):
     import math
     for name in self._listPng:
         '''
         if(name.count(self._corrections)):
             command = 'mdx.py -P ' + name + ' -cmap cmy -wrap 20'
             self.saveImage(command,name + '_20rad')
         '''
         if name.count('unw.geo'):
             command = 'mdx.py -P ' + name
             createImage(command, name)
             command = 'mdx.py -P ' + name + ' -wrap 20'
             createImage(command, name + '_20rad')
             parser = createFileParser('xml')
             #get the properties from the one of the geo files
             prop, fac, misc = parser.parse(name + '.xml')
             coordinate1 = key_of_same_content('coordinate1', prop)[1]
             width = int(key_of_same_content('size', coordinate1)[1])
             command = 'mdx -P ' + name + ' -s ' + str(
                 width) + ' -amp -r4 -rtlr ' + str(int(width) * 4) + ' -CW'
             createImage(command, self._amplitude)
         elif name.count('cor.geo'):
             command = 'mdx.py -P ' + name
             createImage(command, name)
             parser = createFileParser('xml')
             #get the properties from the one of the geo files
             prop, fac, misc = parser.parse(name + '.xml')
             coordinate1 = key_of_same_content('coordinate1', prop)[1]
             width = int(key_of_same_content('size', coordinate1)[1])
             command = 'mdx -P ' + name + ' -s ' + str(
                 width) + ' -r4 -rhdr ' + str(
                     int(width) * 4) + ' -cmap cmy -wrap 1.2'
             createImage(command,
                         name.replace('.cor.geo', '_ph_only.cor.geo'))
Exemple #2
0
def loadImage(fname):
    '''
    Load into appropriate image object.
    '''
    try:
        import iscesys
        import isceobj
        from iscesys.Parsers.FileParserFactory import createFileParser
    except:
        raise ImportError('ISCE has not been installed or is not importable')

    if not fname.endswith('.xml'):
        dataName = fname
        metaName = fname + '.xml'
    else:
        metaName = fname
        dataName = os.path.splitext(fname)[0]

    parser = createFileParser('xml')
    prop,fac,misc = parser.parse(metaName)

    if 'reference' in prop:
        img=isceobj.createDemImage()
        img.init(prop,fac,misc)
    elif 'number_good_bytes' in prop:
        img = isceobj.createRawImage()
        img.init(prop,fac,misc)
    else:
        img = isceobj.createImage()
        img.init(prop,fac,misc)

    img.setAccessMode('READ')
    return img, dataName, metaName
Exemple #3
0
def tropoCorrection(filein, datetime1, datetime2, demxml):
    from iscesys.Parsers.FileParserFactory import createFileParser
    parser = createFileParser('xml')
    prop, fact, misc = parser.parse(demxml)
    dem = demxml.replace('.xml', '')
    latstart = prop['Coordinate2']['startingvalue']
    latdelta = prop['Coordinate2']['delta']
    length = prop['Coordinate2']['size']
    lonstart = prop['Coordinate1']['startingvalue']
    londelta = prop['Coordinate1']['delta']
    width = prop['Coordinate1']['size']
    latend = latstart + latdelta * length
    lonend = lonstart + londelta * width
    latmin = min(latstart, latend)
    latmax = max(latstart, latend)
    lonmin = min(lonstart, lonend)
    lonmax = max(lonstart, lonend)
    date1 = datetime1.date().isoformat()
    date2 = datetime2.date().isoformat()
    time1 = datetime1.time()
    time2 = datetime2.time()
    hour1 = time1.hour
    hour2 = time2.hour
    min1 = time1.minute
    min2 = time2.minute

    command = 'tropwrap.py -igram ' + filein + ' -resolution ' + str(width) + '+/' + str(length) + '+' +   ' -latmin ' + str(latmin) +' -latmax ' + str(latmax) + ' -lonmin ' + str(lonmin) \
              + ' -lonmax ' + str(lonmax) + ' -date1 ' + str(date1) + \
              ' -date2 ' + str(date2)  + ' -hour1 ' + str(hour1) + ' -min1 '+ str(min1) + ' -hour2 ' + \
              str(hour2) + ' -min2 ' + str(min2) + ' -gps gipsy -Wx off -interp triang -png on -ISCE_DEM ' + dem
    subprocess.call(command, shell=True)
Exemple #4
0
def mmapFromISCE(fname, logger):
    '''
    Create a file mmap object using information in an ISCE XML.
    '''
    try:
        import isce
        import iscesys
        from iscesys.Parsers.FileParserFactory import createFileParser
    except:
        raise ImportError('ISCE has not been installed or is not importable')

    if not fname.endswith('.xml'):
        dataName = fname
        metaName = fname + '.xml'
    else:
        metaName = fname
        dataName = os.path.splitext(fname)[0]

    parser = createFileParser('xml')
    prop, fac, misc = parser.parse(metaName)

    logger.debug('Creating readonly ISCE mmap with \n' + 'file = %s \n' %
                 (dataName) + 'bands = %d \n' % (prop['number_bands']) +
                 'width = %d \n' % (prop['width']) + 'length = %d \n' %
                 (prop['length']) + 'scheme = %s \n' % (prop['scheme']) +
                 'dtype = %s \n' % (prop['data_type']))

    mObj = memmap(dataName,
                  nchannels=prop['number_bands'],
                  nxx=prop['width'],
                  nyy=prop['length'],
                  scheme=prop['scheme'],
                  dataType=NUMPY_type(prop['data_type']))

    return mObj
    def checkLocation(self):
        from iscesys.Parsers.FileParserFactory import createFileParser
        parser = createFileParser('xml')
        #get the properties from the file
        prop, fac, misc = parser.parse(self.metadatalocation)
        #first check if it exists as it is
        filename = ''

        if not (os.path.exists(prop['file_name'])):
            name = os.path.basename(prop['file_name'])
            #check the path relative to the xml file
            filename = os.path.join(
                os.path.split(self.metadatalocation)[0], name)
            #check if relative to cwd
            if not (os.path.exists(filename)):
                filename = os.path.join(os.getcwd(), name)
                if not (os.path.exists(filename)):
                    filename = ''
        else:
            filename = prop['file_name']
        if not filename:
            paths = self.uniquePath([
                os.path.split(prop['file_name'])[0],
                os.path.split(self.metadatalocation)[0],
                os.getcwd()
            ])
            toptr = '\n'.join(paths)
            print(
                'The image file', name, 'specified in the metadata file',
                self.metadatalocation, 'cannot be found in',
                'any of the following default locations:'
                if len(paths) > 1 else 'in the following location:', toptr)
            raise Exception

        return filename
    def getInfoFromXml(self, imagexml, image):
        """ Determines  image name, width, image type and data type from input xml"""
        # first is alway the xml file
        ext = None
        dataType = None
        width = None
        length = None
        PA = createFileParser('xml')
        dictNow, dictFact, dictMisc = PA.parse(
            imagexml)  #get only the property dictionary
        numBands = 0

        numBands = key_of_same_content('number_bands', dictNow)[1]
        dataTypeImage = key_of_same_content('data_type', dictNow)[1]
        dataType = self._mapDataType['xml'][dataTypeImage]
        try:  #new format of image
            coordinate1 = key_of_same_content('coordinate1', dictNow)[1]
            width = key_of_same_content('size', coordinate1)[1]
            coordinate2 = key_of_same_content('coordinate2', dictNow)[1]
            length = key_of_same_content('size', coordinate2)[1]
            try:  #only for geo image to create kml
                self._width.append(float(width))
                self._startLon.append(
                    float(
                        key_of_same_content('startingValue', coordinate1)[1]))
                self._deltaLon.append(
                    float(key_of_same_content('delta', coordinate1)[1]))

                coordinate2 = key_of_same_content('coordinate2', dictNow)[1]
                self._length.append(
                    float(key_of_same_content('size', coordinate2)[1]))
                self._startLat.append(
                    float(
                        key_of_same_content('startingValue', coordinate2)[1]))
                self._deltaLat.append(
                    float(key_of_same_content('delta', coordinate2)[1]))
                self._names.append(imagexml.replace('.xml', ''))
            except Exception as e:
                pass  # not a geo image
        except:  # use old format
            try:
                width = key_of_same_content('width', dictNow)[1]
            except:
                print("Error. Cannot figure out width from input file.")
                raise Exception

        ext = self.getIsceExt(dictNow, image)

        if ext is None or dataType is None or width is None:  #nothing worked. Through exception caught next
            print("Error. Cannot figure out extension from input file.")
            raise Exception
        return {
            'image': image,
            'ext': ext,
            'width': width,
            'length': length,
            'dataType': dataType,
            'numBands': numBands
        }
def main():
    #need actual or soft link to alos.int and dem.la
    referenceOrbit = sys.argv[1]  #look for reference_orbit.txt
    fin1 = open(referenceOrbit)
    allLines = fin1.readlines()
    s_mocomp = []
    for line in allLines:
        lineS = line.split()
        s_mocomp.append(float(lineS[2]))
    fin1.close()
    from isceobj import Image as IF

    demNameXml = 'la.dem.xml'
    from iscesys.Parsers.FileParserFactory import createFileParser
    parser = createFileParser('xml')
    #get the properties from the file init file
    prop = parser.parse(demNameXml)[0]
    objDem = IF.createDemImage()
    objDem.initProperties(prop)
    objDem.createImage()
    obj = Topo()
    obj.setReferenceOrbit(s_mocomp)
    intImage = IF.createIntImage()
    width = 1328
    filename = 'alos.int'
    intImage.initImage(filename, 'read', width)
    intImage.createImage()
    obj.wireInputPort(name='interferogram', object=intImage)
    obj.wireInputPort(name='dem', object=objDem)
    obj.pegLatitude = 0.58936848339144254
    obj.pegLongitude = -2.1172133973559606
    obj.pegHeading = -0.22703294510994310
    obj.planetLocalRadius = 6356638.1714100000
    # Frame information
    obj.slantRangePixelSpacing = 9.3685142500000005
    obj.prf = 1930.502000000000
    obj.radarWavelength = 0.23605699999999999
    obj.rangeFirstSample = 750933.00000000000
    # Doppler information
    # Make_raw information
    obj.spacecraftHeight = 698594.96239000000
    obj.bodyFixedVelocity = 7595.2060428100003
    obj.isMocomp = 3072
    obj.numberRangeLooks = 1
    obj.numberAzimuthLooks = 4
    obj.dopplerCentroidConstantTerm = .0690595
    obj.topo()
    minLat = obj.getMinimumLatitude()
    maxLat = obj.getMaximumLatitude()
    minLon = obj.getMinimumLongitude()
    maxLon = obj.getMaximumLongitude()
    azspace = obj.getAzimuthSpacing()
    s0 = obj.getSCoordinateFirstLine()
    print(minLat, maxLat, minLon, maxLon, azspace, s0)
    #squintShift = obj.getSquintShift()
    #for el in squintShift:
    #print(el)
    intImage.finalizeImage()
    objDem.finalizeImage()
Exemple #8
0
def wrapCorrection(waveLength, ifgName, fileLat, fileLon, fileLos,
                   ifgCorrectedName):
    import cPickle as cp
    from iscesys.Parsers.FileParserFactory import createFileParser
    parser = createFileParser('xml')
    prop, fact, misc = parser.parse(ifgName + '.xml')
    width = prop['Coordinate1']['size']
    fp = open('tropwrap.pck')
    dataIn = cp.load(fp)
    fp.close()
    correctionName = dataIn[0]
    grdinfo = dataIn[1]
    correctionXyzName = correctionName[:-3] + 'xyz'
    command = 'grd2xyz -Zf ' + correctionName + ' > ' + correctionXyzName
    subprocess.call(command, shell=True)
    # hate to do that but the grdinfo is a string and needs to be parsed
    grdinfoSp = grdinfo.split(' ')
    lonMin = float(grdinfoSp[grdinfoSp.index('x_min:') + 1])
    lonDelta = float(grdinfoSp[grdinfoSp.index('x_inc:') + 1])
    lonN = int(grdinfoSp[grdinfoSp.index('nx:') + 1].split()[0])
    latMax = float(grdinfoSp[grdinfoSp.index('y_max:') + 1])
    latDelta = float(grdinfoSp[grdinfoSp.index('y_inc:') + 1])
    latN = int(grdinfoSp[grdinfoSp.index('ny:') + 1].split()[0])
    lon = lonMin + lonDelta * np.arange(lonN)
    lat = latMax - latDelta * np.arange(latN)
    datain = readImage(correctionXyzName, '<f', lonN)
    indxBad = np.where(np.isnan(datain))
    datain[indxBad[0], indxBad[1]] = 0
    latOut = readImage(fileLat, '<f', width)
    lonOut = readImage(fileLon, '<f', width)
    #read as same width as lanOut or lonOut, but then skip every other line when looping
    losOut = readImage(fileLos, '<f', width)
    bi = BI(lon, -lat, datain)
    geoCorrection = np.zeros(latOut.shape)
    for i in xrange(latOut.shape[0]):
        geoCorrection[i, :] = -bi(lonOut[i, :], -latOut[i, :]) * (
            4 * np.pi / waveLength) / np.cos(np.radians(losOut[i * 2, :]))
    #free memory
    del latOut
    del lonOut
    ifg = readImage(ifgName, '<f', 2 * width)
    ifg = np.reshape(ifg, (ifg.shape[0], width, 2))
    dim = geoCorrection.shape
    geoCorrection.astype(np.float32).tofile('uwcorr.unw')
    #save('test.geo',np.reshape(geoCorrection,dim[0]*dim[1]),'f')
    fp = open(ifgCorrectedName, 'w')
    line = np.zeros((width, 2))
    for i in xrange(ifg.shape[0]):
        cpx = (ifg[i, :, 0] + np.complex64(1.0j) * ifg[i, :, 1]) * (np.exp(
            np.complex64(1.0j) * geoCorrection[i, :]))
        line[:, 0] = np.real(cpx)
        line[:, 1] = np.imag(cpx)
        arrayOut = array('f', np.reshape(line, 2 * width))
        arrayOut.tofile(fp)
    fp.close()
    def getInfoFromRsc(self, imagersc, image):
        """ Determines image name, width, image type and data type from input rsc"""
        try:
            PA = createFileParser('rsc')
            dictOut = PA.parse(imagersc)
            #dictOut has a top node that is just a name
            dictNow = dictOut[list(dictOut.keys())[0]]
            if 'WIDTH' in dictNow:
                width = int(dictNow['WIDTH'])
            try:
                if 'LAT_REF1' in dictNow:
                    #extract the geo info
                    self._width.append(float(width))
                    self._startLon.append(float(dictNow['X_FIRST']))
                    self._deltaLon.append(float(dictNow['X_STEP']))
                    self._length.append(float(dictNow['FILE_LENGTH']))
                    self._startLat.append(float(dictNow['Y_FIRST']))
                    self._deltaLat.append(float(dictNow['Y_STEP']))
                    self._names.append(image)
            except:
                pass  #not a geo file
        except:
            print("Error. Cannot figure out width from input file.")
            raise Exception
        # assume imagersc = 'name.ext.rsc'
        try:
            ext = imagersc.split('.')[-2]
        except:
            print("Error. Cannot figure out extension from input file.")
            raise Exception
        found = False

        for k, v in self._ext.items():
            if ext in v:
                found = True
                break
        if not found:
            print("Error. Invalid image extension", ext, ".")
            self.printExtensions()
            raise Exception

        extNow = self.getRscExt(ext)

        dataType = self._mapDataType['rsc'][extNow]

        return {
            'image': image,
            'ext': ext,
            'width': width,
            'dataType': dataType
        }
Exemple #10
0
def main(inps):
    '''
    The main driver.
    '''

    if inps.infile.endswith('.xml'):
        inFileXml = inps.infile
        inFile = os.path.splitext(inps.infile)[0]
    else:
        inFile = inps.infile
        inFileXml = inps.infile + '.xml'

    if inps.outfile is None:
        spl = os.path.splitext(inFile)
        ext = '.{0}alks_{1}rlks'.format(inps.azlooks, inps.rglooks)
        outFile = spl[0] + ext + spl[1]

    elif inps.outfile.endswith('.xml'):
        outFile = os.path.splitext(inps.outfile)[0]
    else:
        outFile = inps.outfile

    print('Output filename : {0}'.format(outFile))
    #hackish, just to know the image type to instantiate the correct type
    #until we put the info about how to generate the instance in the xml
    from iscesys.Parsers.FileParserFactory import createFileParser
    FP = createFileParser('xml')
    tmpProp, tmpFact, tmpMisc = FP.parse(inFileXml)
    if ('image_type' in tmpProp and tmpProp['image_type'] == 'dem'):
        inImage = createDemImage()
    else:
        inImage = createImage()

    inImage.load(inFileXml)
    inImage.filename = inFile

    lkObj = Looks()
    lkObj.setDownLooks(inps.azlooks)
    lkObj.setAcrossLooks(inps.rglooks)
    lkObj.setInputImage(inImage)
    lkObj.setOutputFilename(outFile)
    lkObj.looks()

    return outFile
Exemple #11
0
    def create(self,filename):
        from iscesys.Parsers.FileParserFactory import createFileParser
        from isceobj import createImage
        parser = createFileParser('xml')
        prop, fac, misc = parser.parse(filename + '.xml')

        self._image  = createImage()
        self._image.init(prop,fac,misc)
        self._image.accessMode = 'read'
        #try few ways. If the image type is not part of the map use sinc for complex and nearest for float 
        if self._image.imageType in self._interp_map:
            self._method = self._interp_map[self._image.imageType]
        elif self.image.dataType == 'CFLOAT':
            self._method = 'sinc'
        else:#use nearest for all other cases including int type of images
            self._method = 'nearest'
        
        #allow to get image and method from the instance or as return value
        return self._image,self._method
Exemple #12
0
def main():
    from iscesys.Parsers.FileParserFactory import createFileParser
    from isceobj import createImage
    parser = createFileParser('xml')
    #get the properties from the file init file
    prop, fac, misc = parser.parse(sys.argv[1])
    #this dictionary has an initial dummy key whose value is the dictionary with all the properties

    image = createImage()
    image.init(prop, fac, misc)

    #create the params
    azOrder = 2
    rgOrder = 3
    cnt = 0.0
    params = [[0 for x in range(rgOrder + 1)] for x in range(azOrder + 1)]
    paramsaz = [0 for x in range(azOrder + 1)]
    for i in range(azOrder + 1):
        paramsaz[i] = cnt
        for j in range(rgOrder + 1):
            params[i][j] = cnt
            cnt = cnt + 1
    #create a 2d accessor
    p2d = createPoly('2d', name='test')
    p2d.initPoly(rgOrder, azOrder, coeffs=params, image=image)

    #create a 1d accessor for azimuth poly (direction = 'y')
    p1d = createPoly('1d', name='test')
    p1d.initPoly(azOrder, coeffs=paramsaz, image=image, direction='y')

    #call the test
    p2d.dump('p2d.xml')
    p1d.dump('p1d.xml')

    ti.testInterpolator(p2d._accessor, p1d._accessor)

    p2dNew = createPoly('2d', name='test')
    #create a 1d accessor for azimuth poly (direction = 'y')
    p1dNew = createPoly('1d', name='test')
    #call the test
    p2dNew.load('p2d.xml')
    p1dNew.load('p1d.xml')
    ti.testInterpolator(p2dNew._accessor, p1dNew._accessor)
Exemple #13
0
def runMultilook(in_dir, out_dir, alks, rlks):
    print(
        'generate multilooked geometry files with alks={} and rlks={}'.format(
            alks, rlks))
    from iscesys.Parsers.FileParserFactory import createFileParser
    FP = createFileParser('xml')

    if not os.path.isdir(out_dir):
        os.makedirs(out_dir)
        print('create directory: {}'.format(out_dir))

    for fbase in [
            'hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask', 'waterMask'
    ]:
        fname = '{}.rdr'.format(fbase)
        in_file = os.path.join(in_dir, fname)
        out_file = os.path.join(out_dir, fname)

        if os.path.isfile(in_file):
            xmlProp = FP.parse(in_file + '.xml')[0]
            if ('image_type' in xmlProp and xmlProp['image_type'] == 'dem'):
                inImage = isceobj.createDemImage()
            else:
                inImage = isceobj.createImage()

            inImage.load(in_file + '.xml')
            inImage.filename = in_file

            lkObj = Looks()
            lkObj.setDownLooks(alks)
            lkObj.setAcrossLooks(rlks)
            lkObj.setInputImage(inImage)
            lkObj.setOutputFilename(out_file)
            lkObj.looks()

            # copy the full resolution xml/vrt file from ./merged/geom_master to ./geom_master
            # to facilitate the number of looks extraction
            # the file path inside .xml file is not, but should, updated
            shutil.copy(in_file + '.xml', out_file + '.full.xml')
            shutil.copy(in_file + '.vrt', out_file + '.full.vrt')

    return out_dir
Exemple #14
0
    def getGeoLocation(self):
        parser = createFileParser('xml')
        #get the properties from the one of the geo files

        prop, fac, misc = parser.parse('topophase.cor.geo.xml')
        coordinate1 = key_of_same_content('coordinate1', prop)[1]
        width = float(key_of_same_content('size', coordinate1)[1])
        startLon = float(key_of_same_content('startingValue', coordinate1)[1])
        deltaLon = float(key_of_same_content('delta', coordinate1)[1])
        endLon = startLon + deltaLon * width
        coordinate2 = key_of_same_content('coordinate2', prop)[1]
        length = float(key_of_same_content('size', coordinate2)[1])
        startLat = float(key_of_same_content('startingValue', coordinate2)[1])
        deltaLat = float(key_of_same_content('delta', coordinate2)[1])
        endLat = startLat + deltaLat * length
        minLat = min(startLat, endLat)
        maxLat = max(startLat, endLat)
        minLon = min(startLon, endLon)
        maxLon = max(startLon, endLon)
        return minLat, maxLat, minLon, maxLon
Exemple #15
0
def runMultilook(in_dir,
                 out_dir,
                 alks,
                 rlks,
                 in_ext='.rdr',
                 out_ext='.rdr',
                 method='gdal',
                 fbase_list=[
                     'hgt', 'incLocal', 'lat', 'lon', 'los', 'shadowMask',
                     'waterMask'
                 ]):
    """
    Multilook geometry files.
    """
    from iscesys.Parsers.FileParserFactory import createFileParser
    from mroipac.looks.Looks import Looks

    msg = 'generate multilooked geometry files with alks={} and rlks={}'.format(
        alks, rlks)
    if method == 'isce':
        msg += ' using mroipac.looks.Looks() ...'
    else:
        msg += ' using gdal.Translate() ...'
    print('-' * 50 + '\n' + msg)

    # create 'geom_reference' directory
    os.makedirs(out_dir, exist_ok=True)

    # multilook files one by one
    for fbase in fbase_list:
        in_file = os.path.join(in_dir, '{}{}'.format(fbase, in_ext))
        out_file = os.path.join(out_dir, '{}{}'.format(fbase, out_ext))

        if all(os.path.isfile(in_file + ext) for ext in ['', '.vrt', '.xml']):
            print('multilook {}'.format(in_file))

            # option 1 - Looks module (isce)
            if method == 'isce':
                xmlProp = createFileParser('xml').parse(in_file + '.xml')[0]
                if ('image_type' in xmlProp
                        and xmlProp['image_type'] == 'dem'):
                    inImage = isceobj.createDemImage()
                else:
                    inImage = isceobj.createImage()

                inImage.load(in_file + '.xml')
                inImage.filename = in_file

                lkObj = Looks()
                lkObj.setDownLooks(alks)
                lkObj.setAcrossLooks(rlks)
                lkObj.setInputImage(inImage)
                lkObj.setOutputFilename(out_file)
                lkObj.looks()

            # option 2 - gdal_translate (gdal)
            elif method == 'gdal':
                ds = gdal.Open(in_file, gdal.GA_ReadOnly)
                in_wid = ds.RasterXSize
                in_len = ds.RasterYSize

                out_wid = int(in_wid / rlks)
                out_len = int(in_len / alks)
                src_wid = out_wid * rlks
                src_len = out_len * alks

                options_str = '-of ENVI -a_nodata 0 -outsize {ox} {oy} -srcwin 0 0 {sx} {sy} '.format(
                    ox=out_wid, oy=out_len, sx=src_wid, sy=src_len)
                gdal.Translate(out_file, ds, options=options_str)

                # generate ISCE .xml file
                if not os.path.isfile(out_file + '.xml'):
                    cmd = 'gdal2isce_xml.py -i {}.vrt'.format(out_file)
                    print(cmd)
                    os.system(cmd)

            else:
                raise ValueError(
                    'un-supported multilook method: {}'.format(method))

            # copy the full resolution xml/vrt file from ./merged/geom_reference to ./geom_reference
            # to facilitate the number of looks extraction
            # the file path inside .xml file is not, but should, updated
            if in_file != out_file + '.full':
                shutil.copy(in_file + '.xml', out_file + '.full.xml')
                shutil.copy(in_file + '.vrt', out_file + '.full.vrt')

    return out_dir
    return values

if __name__ == "__main__":
    inps = cmdLineParse()

    if inps.infile.endswith('.xml'):
        inFileXml = inps.infile
        inFile = os.path.splitext(inps.infile)[0]
    else:
        inFile = inps.infile
        inFileXml = inps.infile + '.xml'

    if inps.outfile.endswith('.xml'):
        outFile = os.path.splitext(inps.outfile)[0]
    else:
        outFile = inps.outfile

    parser = createFileParser('xml')
    prop, fac, misc = parser.parse(inFileXml)


    inImage = createDemImage()
    inImage.init(prop,fac,misc)
    inImage.filename = inFile 
    inImage.createImage()

    upsampObj = UpsampleDem()
    upsampObj.method = inps.method
    upsampObj.setOutputFilename(outFile)
    upsampObj.upsampledem(demImage=inImage, yFactor=inps.factor[0], xFactor=inps.factor[1])
Exemple #17
0
def main(argv):
    # GET THE COMMAND LINE OPTIONS
    clos = parse()

    ### READ GEOCODE DATASETS ###
    # these are hardwired in here, change if you have different naming conventions or want to include different products
    int_file = "filt_topophase.flat.geo"
    xmlfile = int_file + ".xml"
    cor_file = "phsig.cor.geo"
    unw_file = "filt_topophase.flat.unw.geo"
    rdr_file = 'los.rdr.geo'

    PA = createFileParser('xml')
    dictOut, dictFact, dictMisc = PA.parse(xmlfile)
    width = dictOut['width']
    length = dictOut['length']
    xstep = dictOut['Coordinate1']['delta']
    ystep = dictOut['Coordinate2']['delta']
    north = dictOut['Coordinate2']['startingvalue']
    south = north + length * ystep
    west = dictOut['Coordinate1']['startingvalue']
    east = west + width * xstep

    inta, intp = read_complex64(int_file, length, width)
    unwa, unwp = read_float32(unw_file, length, width)
    rdra, rdrp = read_float32(rdr_file, length, width)
    corp = np.fromfile(cor_file, dtype=np.float32).reshape(length, width)

    #################################
    ###  METADATA
    #################################
    # we will grab most of the metadata from the insarProc.xml file
    tree = ET.parse('insarProc.xml')
    root = tree.getroot()
    first_date = datetime.datetime.strptime(
        root.find('master/frame/SENSING_START').text, '%Y-%m-%d %H:%M:%S.%f')
    last_date = datetime.datetime.strptime(
        root.find('slave/frame/SENSING_START').text, '%Y-%m-%d %H:%M:%S.%f')
    meta_dict = {}
    ## MANDATORY METADATA ##
    meta_dict['mission'] = root.find('master/platform/MISSION').text.replace(
        "'", "").replace("b", "")
    meta_dict['beam_swath'] = clos.beam_swath
    if root.find('master/frame/TRACK_NUMBER').text == 'None':
        if 'CSK' in meta_dict['mission']:
            if meta_dict['mission'] == 'CSKS4':
                meta_dict['relative_orbit'] = (int(
                    root.find('master/frame/ORBIT_NUMBER').text) - 193) % 237
            else:
                meta_dict['relative_orbit'] = int(
                    root.find('master/frame/ORBIT_NUMBER').text) % 237
            meta_dict['mission'] = meta_dict[
                'mission'][:3]  # only take the CSK part
    else:
        meta_dict['relative_orbit'] = int(
            root.find('master/frame/TRACK_NUMBER').text)
    meta_dict['first_date'] = first_date.strftime("%Y%m%d")
    meta_dict['last_date'] = last_date.strftime("%Y%m%d")
    meta_dict[
        'processing_type'] = clos.processing_type  # SET AS A DEFAULT IN parse()
    #    meta_dict['scene_footprint'] = footprintFromPickle()
    meta_dict['scene_footprint'] = footprintFromLogFile()

    ## RECOMMENDED METADATA ##
    if clos.beam_mode:
        meta_dict['beam_mode'] = clos.beam_mode
    meta_dict['frame'] = 0000
    meta_dict['flight_direction'] = root.find(
        'master/frame/PASS_DIRECTION').text.replace("'", "").replace("b", "")
    if int(root.find('master/lookSide').text) == -1:
        meta_dict['look_direction'] = 'R'
    else:
        meta_dict['look_direction'] = 'L'
    meta_dict['wavelength'] = float(root.find('master/wavelength').text)
    meta_dict['polarization'] = root.find(
        'master/frame/POLARIZATION').text.replace("'", "").replace("b", "")
    meta_dict['prf'] = float(root.find('master/prf').text)
    meta_dict['master_platform'] = root.find(
        'master/platform/MISSION').text.replace("'", "").replace("b", "")
    meta_dict['master_absolute_orbit'] = int(
        root.find('master/frame/ORBIT_NUMBER').text)
    meta_dict['master_sensing_start'] = root.find(
        'master/frame/SENSING_START').text
    meta_dict['master_sensing_stop'] = root.find(
        'master/frame/SENSING_STOP').text
    meta_dict['slave_platform'] = root.find(
        'slave/platform/MISSION').text.replace("'", "").replace("b", "")
    meta_dict['slave_absolute_orbit'] = int(
        root.find('slave/frame/ORBIT_NUMBER').text)
    meta_dict['slave_sensing_start'] = root.find(
        'slave/frame/SENSING_START').text
    meta_dict['slave_sensing_stop'] = root.find(
        'slave/frame/SENSING_STOP').text

    meta_dict['width'] = width
    meta_dict['length'] = length
    meta_dict['xstep'] = xstep
    meta_dict['ystep'] = ystep
    meta_dict['north'] = north
    meta_dict['south'] = south
    meta_dict['west'] = west
    meta_dict['east'] = east
    meta_dict['ellipsoid'] = 'WGS84'

    meta_dict['incidence_angle'] = ''
    meta_dict['producer_names'] = ''

    meta_dict['processing_facility'] = clos.processing_facility
    meta_dict['processing_software'] = clos.processing_software
    meta_dict['processing_software_version'] = clos.processing_software_version
    if clos.processing_atmos_correct_method:
        meta_dict[
            'processing_atmos_correct_method'] = clos.processing_atmos_correct_method
    meta_dict['processing_dem'] = 'SRTM1'
    meta_dict['history'] = 'H5 file created: %s' % datetime.datetime.utcnow()

    meta_dict['average_coherence'] = np.mean(corp)
    meta_dict['max_coherence'] = np.nanmax(corp)
    #    meta_dict['percent_unwrapped'] = ''
    #    meta_dict['percent_atmos'] = ''
    meta_dict['baseline_perp'] = float(
        root.find('baseline/perp_baseline_top').text)
    meta_dict['temporal_baseline'] = abs((first_date - last_date).days)

    ## add any other metadata to dictionary, for example if you parsed some other XML file into a dictionary
    #    for key,value in wraprsc.iteritems():
    #        meta_dict[key] = value

    print(
        'Creating HDF5 file containing the geocoded *int, *unw, *cor, los, and dem '
    )
    filename_root = '%s_%s_%03d_%04d_%s-%s_%04d_%05d' % (
        meta_dict['mission'], meta_dict['beam_swath'],
        meta_dict['relative_orbit'], meta_dict['frame'],
        first_date.strftime("%Y%m%d"), last_date.strftime("%Y%m%d"),
        meta_dict['temporal_baseline'], meta_dict['baseline_perp'])
    h5file = os.getcwd() + '/' + filename_root + '.h5'
    ## OPEN HDF5 FILE ##
    f = h5py.File(h5file)
    ## CREATE GEOCODE GROUP ##
    group = f.create_group('GEOCODE')
    ## CREATE GEOCODE DATASETS ##
    if not os.path.basename('unwrapped_interferogram') in group:
        dset = group.create_dataset('unwrapped_interferogram',
                                    data=unwp,
                                    compression='gzip')
    if not os.path.basename('wrapped_interferogram') in group:
        dset = group.create_dataset('wrapped_interferogram',
                                    data=intp,
                                    compression='gzip')
    if not os.path.basename('correlation') in group:
        dset = group.create_dataset('correlation',
                                    data=corp,
                                    compression='gzip')
    if not os.path.basename('incidence_angle') in group:
        dest = group.create_dataset('incidence_angle',
                                    data=rdrp,
                                    compression='gzip')


#    if not os.path.basename('digital_elevatino_model') in group:
#        dest = group.create_dataset('digital_elevation_model',data=dem,compression='gzip')

## WRITE ATTRIBUTES TO THE HDF ##
    for key, value in meta_dict.items():
        f.attrs[key] = value

    f.close()
def main(argv):
    # GET THE COMMAND LINE OPTIONS
    clos = parse()

    ### READ GEOCODE DATASETS ###
    # these are hardwired in here, change if you have different naming conventions or want to include different products
    int_file = "filt_topophase.flat.geo"
    xmlfile = int_file+".xml"
    cor_file = "phsig.cor.geo"
    unw_file = "filt_topophase.flat.unw.geo"
    rdr_file = 'los.rdr.geo'

    PA = createFileParser('xml')
    dictOut,dictFact,dictMisc = PA.parse(xmlfile)
    width = dictOut['width']
    length = dictOut['length']
    xstep = dictOut['Coordinate1']['delta']
    ystep = dictOut['Coordinate2']['delta']
    north = dictOut['Coordinate2']['startingvalue']
    south = north + length*ystep
    west = dictOut['Coordinate1']['startingvalue']
    east = west + width*xstep

    inta,intp = read_complex64(int_file,length,width)
    unwa,unwp = read_float32(unw_file,length,width)
    rdra,rdrp = read_float32(rdr_file,length,width)
    corp = np.fromfile(cor_file,dtype=np.float32).reshape(length,width)

    #################################
    ###  METADATA
    #################################
    # we will grab most of the metadata from the insarProc.xml file
    tree = ET.parse('insarProc.xml')
    root = tree.getroot()
    first_date = datetime.datetime.strptime(root.find('master/frame/SENSING_START').text,'%Y-%m-%d %H:%M:%S.%f') 
    last_date = datetime.datetime.strptime(root.find('slave/frame/SENSING_START').text,'%Y-%m-%d %H:%M:%S.%f') 
    meta_dict = {}
    ## MANDATORY METADATA ##
    meta_dict['mission'] = root.find('master/platform/MISSION').text.replace("'","").replace("b","")
    meta_dict['beam_swath'] = clos.beam_swath 
    if root.find('master/frame/TRACK_NUMBER').text == 'None':
        if 'CSK' in meta_dict['mission']:
            if meta_dict['mission']=='CSKS4':
                meta_dict['relative_orbit']  = (int(root.find('master/frame/ORBIT_NUMBER').text)-193) % 237
            else:
                meta_dict['relative_orbit']  = int(root.find('master/frame/ORBIT_NUMBER').text) % 237
            meta_dict['mission'] = meta_dict['mission'][:3] # only take the CSK part
    else:
        meta_dict['relative_orbit'] = int(root.find('master/frame/TRACK_NUMBER').text)
    meta_dict['first_date'] = first_date.strftime("%Y%m%d") 
    meta_dict['last_date'] = last_date.strftime("%Y%m%d")
    meta_dict['processing_type'] = clos.processing_type # SET AS A DEFAULT IN parse()
#    meta_dict['scene_footprint'] = footprintFromPickle()
    meta_dict['scene_footprint'] = footprintFromLogFile()

    ## RECOMMENDED METADATA ##
    if clos.beam_mode:
        meta_dict['beam_mode'] = clos.beam_mode
    meta_dict['frame'] = 0000
    meta_dict['flight_direction'] = root.find('master/frame/PASS_DIRECTION').text.replace("'","").replace("b","")
    if int(root.find('master/lookSide').text) == -1:
        meta_dict['look_direction'] = 'R' 
    else:
        meta_dict['look_direction'] = 'L'
    meta_dict['wavelength'] = float(root.find('master/wavelength').text)
    meta_dict['polarization'] = root.find('master/frame/POLARIZATION').text.replace("'","").replace("b","")
    meta_dict['prf'] = float(root.find('master/prf').text)
    meta_dict['master_platform'] = root.find('master/platform/MISSION').text.replace("'","").replace("b","")
    meta_dict['master_absolute_orbit'] = int(root.find('master/frame/ORBIT_NUMBER').text)
    meta_dict['master_sensing_start'] = root.find('master/frame/SENSING_START').text 
    meta_dict['master_sensing_stop'] = root.find('master/frame/SENSING_STOP').text
    meta_dict['slave_platform'] = root.find('slave/platform/MISSION').text.replace("'","").replace("b","")
    meta_dict['slave_absolute_orbit'] = int(root.find('slave/frame/ORBIT_NUMBER').text)
    meta_dict['slave_sensing_start'] = root.find('slave/frame/SENSING_START').text
    meta_dict['slave_sensing_stop'] = root.find('slave/frame/SENSING_STOP').text

    meta_dict['width'] = width
    meta_dict['length'] = length
    meta_dict['xstep'] = xstep
    meta_dict['ystep'] = ystep
    meta_dict['north'] = north
    meta_dict['south'] = south
    meta_dict['west'] = west
    meta_dict['east'] = east
    meta_dict['ellipsoid'] = 'WGS84'


    meta_dict['incidence_angle'] = ''
    meta_dict['producer_names'] = ''

    meta_dict['processing_facility'] = clos.processing_facility
    meta_dict['processing_software'] = clos.processing_software
    meta_dict['processing_software_version'] = clos.processing_software_version 
    if clos.processing_atmos_correct_method:
        meta_dict['processing_atmos_correct_method'] = clos.processing_atmos_correct_method
    meta_dict['processing_dem'] = 'SRTM1'
    meta_dict['history'] = 'H5 file created: %s' % datetime.datetime.utcnow()

    meta_dict['average_coherence'] = np.mean(corp)
    meta_dict['max_coherence'] = np.nanmax(corp)
#    meta_dict['percent_unwrapped'] = ''
#    meta_dict['percent_atmos'] = ''
    meta_dict['baseline_perp'] = float(root.find('baseline/perp_baseline_top').text) 
    meta_dict['temporal_baseline'] = abs((first_date-last_date).days)
 
    ## add any other metadata to dictionary, for example if you parsed some other XML file into a dictionary
#    for key,value in wraprsc.iteritems():
#        meta_dict[key] = value

    print( 'Creating HDF5 file containing the geocoded *int, *unw, *cor, los, and dem ' )
    filename_root = '%s_%s_%03d_%04d_%s-%s_%04d_%05d' % (meta_dict['mission'],meta_dict['beam_swath'],meta_dict['relative_orbit'],meta_dict['frame'],first_date.strftime("%Y%m%d"),last_date.strftime("%Y%m%d"),meta_dict['temporal_baseline'],meta_dict['baseline_perp']) 
    h5file = os.getcwd() + '/'+filename_root+'.h5' 
    ## OPEN HDF5 FILE ##
    f = h5py.File(h5file)
    ## CREATE GEOCODE GROUP ##
    group = f.create_group('GEOCODE')
    ## CREATE GEOCODE DATASETS ##
    if not os.path.basename('unwrapped_interferogram') in group:
        dset = group.create_dataset('unwrapped_interferogram', data=unwp, compression='gzip')
    if not os.path.basename('wrapped_interferogram') in group:
        dset = group.create_dataset('wrapped_interferogram', data=intp, compression='gzip')
    if not os.path.basename('correlation') in group:
        dset = group.create_dataset('correlation', data=corp, compression='gzip')
    if not os.path.basename('incidence_angle') in group:
        dest = group.create_dataset('incidence_angle', data=rdrp, compression='gzip')
#    if not os.path.basename('digital_elevatino_model') in group:
#        dest = group.create_dataset('digital_elevation_model',data=dem,compression='gzip')

    ## WRITE ATTRIBUTES TO THE HDF ##
    for key,value in meta_dict.items():
        f.attrs[key] = value

    f.close()
def createDem(self, info):
    #we get there only if a dem image was not specified as input
    import math
    from contrib.demUtils.DemStitcher import DemStitcher
    #import pdb
    #pdb.set_trace()
    bbox = info.bbox

    ####If the user has requested a bounding box
    if self.geocode_bbox:
        latMax = self.geocode_bbox[1]
        latMin = self.geocode_bbox[0]
        lonMin = self.geocode_bbox[3]
        lonMax = self.geocode_bbox[2]
    else:
        latMax = -1000
        latMin = 1000
        lonMax = -1000
        lonMin = 1000

    for bb in bbox:
        if bb[0] > latMax:
            latMax = bb[0]
        if bb[0] < latMin:
            latMin = bb[0]
        if bb[1] > lonMax:
            lonMax = bb[1]
        if bb[1] < lonMin:
            lonMin = bb[1]

    ####Extra padding around bbox
    #### To account for timing errors
    #### To account for shifts due to topography
    delta = 0.2

    latMin = math.floor(latMin - 0.2)
    latMax = math.ceil(latMax + 0.2)
    lonMin = math.floor(lonMin - 0.2)
    lonMax = math.ceil(lonMax + 0.2)
    demName = self.demStitcher.defaultName([latMin, latMax, lonMin, lonMax])
    demNameXml = demName + '.xml'
    self.demStitcher.setCreateXmlMetadata(True)
    self.demStitcher.setMetadataFilename(
        demNameXml)  #it adds the .xml automatically

    #if it's already there don't recreate it
    if not (os.path.exists(demNameXml) and os.path.exists(demName)):

        #check whether the user want to just use high res dems and filling the
        # gap or go to the lower res if it cannot complete the region
        # Better way would be to use the union of the dems and doing some
        # resampling
        if self.useHighResolutionDemOnly:
            #it will use the high res no matter how many are missing
            self.demStitcher.setFilling()
            #try first the best resolution
            source = 1
            stitchOk = self.demStitcher.stitchDems(
                [latMin, latMax], [lonMin, lonMax],
                source,
                demName,
                keep=False)  #remove zip files
        else:
            #try first the best resolution
            self.demStitcher.setNoFilling()
            source = 1
            stitchOk = self.demStitcher.stitchDems(
                [latMin, latMax], [lonMin, lonMax],
                source,
                demName,
                keep=False)  #remove zip files
            if not stitchOk:  #try lower resolution if there are no data
                self.demStitcher.setFilling()
                source = 3
                stitchOk = self.demStitcher.stitchDems([latMin, latMax],
                                                       [lonMin, lonMax],
                                                       source,
                                                       demName,
                                                       keep=False)

        if not stitchOk:
            logger.error(
                "Cannot form the DEM for the region of interest. If you have one, set the appropriate DEM component in the input file."
            )
            raise Exception

    #save the name just in case
    self.insar.demInitFile = demNameXml
    #if stitching is performed a DEM image instance is created (returns None otherwise). If not we have to create one
    demImage = self.demStitcher.getImage()
    if demImage is None:
        from iscesys.Parsers.FileParserFactory import createFileParser
        from isceobj import createDemImage
        parser = createFileParser('xml')
        #get the properties from the file init file
        prop, fac, misc = parser.parse(demNameXml)
        #this dictionary has an initial dummy key whose value is the dictionary with all the properties

        demImage = createDemImage()
        demImage.init(prop, fac, misc)
        demImage.metadatalocation = demNameXml

    self.insar.demImage = demImage
Exemple #20
0
 def load(self, filename):
     from iscesys.Parsers.FileParserFactory import createFileParser
     parser = createFileParser('xml')
     #get the properties from the file
     prop, fac, misc = parser.parse(filename)
     self.init(prop, fac, misc)
Exemple #21
0
    def commandLineParser(self, args):
        from iscesys.DictUtils.DictUtils import DictUtils as DU
        """commandLineParser

        Parses a command line, which may include files and command line options
        and returns dictionaries containing propDict, factDict, miscDict, and
        listOfOptions where

        propDict contains the input values for the properties of an ISCE
        application as well as those for the components declared as facilities
        in the application

        factDict contains input values for the factories used in constructing
        instances of the components declared as facilities in the application.

        miscDict contains the above two types of information that are entered
        in-line on the command line.  These will override those given in the
        files during component initialization if there are conflicts.

        listOfOptions contains the '--' style options such as '--help'.
        """

        propDict = {}
        factDict = {}
        miscDict = {}
        listOfOptions = []
        for arg in args:
            if arg.startswith('--'):
                listOfOptions.append(arg)
                continue

            isFile = False
            for filetype in self._filetypes:
                if arg.endswith('.' + filetype):
                    ## imports
                    from iscesys.DictUtils.DictUtils import DictUtils as DU
                    from iscesys.Parsers.FileParserFactory import createFileParser
                    FP = createFileParser(filetype)
                    tmpProp, tmpFact, tmpMisc = FP.parse(arg)

                    if tmpProp:
                        DU.updateDictionary(propDict, tmpProp, replace=True)
                    if tmpFact:
                        DU.updateDictionary(factDict, tmpFact, replace=True)
                    if tmpMisc:
                        DU.updateDictionary(miscDict, tmpMisc, replace=True)

                    isFile = True
                    break

            if isFile:
                continue

            #if it gets here the argument is not a file
            #assume a form like,
            #component1.component2 .... .componentN.attribute=value .
            #no space otherwise the split above will not work properly
            #probably it is better if we specify from the top component so it
            #is easier to handle the case in which the files come after
            #(otherwise the key of the first node is not defined)

            tmpProp, tmpFact, tmpMisc = self.dotStringToDicts(arg)

            if tmpProp:
                DU.updateDictionary(propDict, tmpProp, replace=True)
            if tmpFact:
                DU.updateDictionary(factDict, tmpFact, replace=True)
            if tmpMisc:
                DU.updateDictionary(miscDict, tmpMisc, replace=True)

        return (DU.renormalizeKeys(propDict), DU.renormalizeKeys(factDict),
                DU.renormalizeKeys(miscDict), listOfOptions)
Exemple #22
0
    def parseComponent(self,
                       root,
                       dictIn,
                       dictFact,
                       dictMisc=None,
                       metafile=None):
        # Check for constants
        self.parseConstants(root, dictIn, dictMisc)
        self.apply_consts_dict(dictIn[const_key], dictIn[const_key])
        # check if it has some property to set. it will overwrite the ones possibly present in the catalog
        self.parseProperty(root, dictIn, dictMisc)

        nodes = root.findall('component')

        for node in nodes:
            #Normalize the input node name per our convention
            name = self.getNormalizedComponentName(node)
            factoryname = self.getComponentElement(node, 'factoryname')
            factorymodule = self.getComponentElement(node, 'factorymodule')
            args = node.find('args')
            kwargs = node.find('kwargs')
            doc = node.find('doc')
            #check if any of the facility attributes are defined
            # don't ask me why but checking just "if factoryname or factorymodule .. " did not work

            if (not factoryname == None) or (not factorymodule == None) or (
                    not args == None) or (not kwargs == None) or (not doc
                                                                  == None):
                if not name in dictFact:
                    dictFact.update({name: {}})
            if not factoryname == None:
                dictFact[name].update({'factoryname': factoryname})
            if not factorymodule == None:
                dictFact[name].update({'factorymodule': factorymodule})
            if not args == None:
                #this must be a tuple
                argsFact = eval(args.text)
                dictFact[name].update({'args': argsFact})
            if not kwargs == None:
                #this must be a dictionary
                kwargsFact = eval(kwargs.text)
                dictFact[name].update({'kwargs': kwargsFact})
            if not doc is None:
                #the doc should be a list of strings. if not create a list
                if self.isStr(doc.text):
                    dictFact[name].update({'doc': [doc.text]})
                else:  #if not a string it should be a list
                    exec("dictFact[name].update({'doc': " + doc.text + "})")

            catalog = node.find('catalog')
            if not catalog == None:
                parser = node.find('parserfactory')

                # if a parser is present than call the factory otherwise use default.
                #it should return a dictionary (of dictionaries possibly) with name,value.
                #complex objects are themselves rendered into dictionaries
                tmpDictIn = {}
                tmpDictFact = {}
                tmpDictMisc = {}

                #the catalog can be a string i.e. a filename (that will be parsed) or a dictionary
                catalog_text = catalog.text.strip()
                if self.isStr(catalog_text):
                    #Create a file parser in XP
                    if parser:
                        #If the inputs specified a parser, then use it
                        filetype = node.find('filetype').text
                        XP = eval(parser.text + '(\"' + filetype + '\")')

                    else:
                        #If the inputs did not specify a parser, then create one from an input extension type
                        #or, if not given as input, from the extension of the catalog
                        filetype = node.find('filetype')
                        if filetype:
                            ext = filetype.text
                        else:
                            ext = catalog_text.split('.')[-1]

                        from .FileParserFactory import createFileParser
                        XP = createFileParser(ext)
                    self._metafile = catalog_text
                    (tmpDictIn, tmpDictFact,
                     tmpDictMisc) = XP.parse(catalog_text)

                    #the previous parsing will return dict of dicts with all the subnodes of that entry, so update the  node.
                    if not tmpDictIn == {}:
                        if not name in dictIn:
                            dictIn.update({name: tmpDictIn})
                        else:
                            dictIn[name].update(tmpDictIn)
                    if not tmpDictFact == {}:
                        if not name in dictFact:
                            dictFact.update({name: tmpDictFact})
                        else:
                            dictFact[name].update(tmpDictFact)
                    if not tmpDictMisc == {}:
                        if not name in dictMisc:
                            dictMisc.update({name: tmpDictMisc})
                        else:
                            dictMisc[name].update(tmpDictMisc)

                else:
                    #the catalog is a dictionary of type {'x1':val1,'x2':val2}
                    tmpDictIn = eval(catalog_text)
                    if isinstance(tmpDictIn, dict):
                        if not tmpDictIn == {}:
                            if not name in dictIn:
                                dictIn.update({name: tmpDictIn})
                            else:
                                dictIn[name].update(tmpDictIn)

                    else:
                        logging.error(
                            "Error. catalog must be a filename or  a dictionary"
                        )
                        raise

            tmpDict = {}
            tmpDict[const_key] = dictIn[const_key]  #pass the constants down
            tmpDictFact = {}
            tmpDictMisc = {}

            #add the attribute metalocation to the object paramenter
            tmpDict['metadata_location'] = os.path.abspath(self._metafile)
            self.parseComponent(node, tmpDict, tmpDictFact, tmpDictMisc)
            if not tmpDict == {}:
                if not name in dictIn:
                    dictIn.update({name: tmpDict})
                else:
                    dictIn[name].update(tmpDict)
            if not tmpDictFact == {}:
                if not name in dictFact:
                    dictFact.update({name: tmpDictFact})
                else:
                    dictFact[name].update(tmpDictFact)
            if not tmpDictMisc == {}:
                if not name in dictMisc:
                    dictMisc.update({name: tmpDictMisc})
                else:
                    dictMisc[name].update(tmpDictMisc)