Esempio n. 1
0
    def __init__(self, name):
	self.config_file_name = name
	try:
	    doc = parse(self.config_file_name)
	except IOError:
	    print '****** Could not find configure file!'
	    raise IOError

	# default value
	self.datarootdir = "/data1/sensors/atrain/cloudsat/CLDCLASS/R04"
	###self.datarootdir = "/data1/sensors/atrain/amsre.aqua"

	elems = doc.getElementsByTagName('data_root_dir')
	###print 'elems type: ', type(elems[0])
	###print 'elems: ', elems[0].childNodes
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    ###print 'c: ', c.data
		    self.datarootdir = (c.data).encode('UTF-8').strip('\n').strip()
        else:
            print "***** warning in file_list.__init__ - missing data_root_dir in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.datarootdir

            print 'self.datarootdir: ', self.datarootdir
            
        # get front end name, if present in xml file
        # gblock 3/17/2011 - added front end name to xml file
        self.dataset_name = ''
        elems = doc.getElementsByTagName('dataset_name')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    ###print 'c: ', c.data
		    self.dataset_name = (c.data).encode('UTF-8').strip('\n').strip()

        # check for name defined by xml statement
        if (self.dataset_name == ''):
            self.dataset_name = UT.parse_data_type (self.datarootdir)

        print 'self.dataset_name: ', self.dataset_name

        # get container for dataset
        self.dataset_container = dataset_registry.get_dataset_container (self.dataset_name)

        self.dataset_container.set_datarootdir (self.datarootdir)
        
	# default value
	self.listdir = os.path.expanduser('~')

	elems = doc.getElementsByTagName('list_dir')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.listdir = (c.data).encode('UTF-8').strip('\n').strip()
        else:
            print "***** warning in file_list.__init__ - missing list_dir in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.listdir
	###print 'self.listdir: ', self.listdir

	# default value
	self.outputdir = os.path.expanduser('~')

	elems = doc.getElementsByTagName('output_dir')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.outputdir = (c.data).encode('UTF-8').strip('\n').strip()
        else:
            print "***** warning in file_list.__init__ - missing output_dir in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.outputdir

	print 'self.outputdir: ', self.outputdir

	# default value
	self.start_t = datetime.strptime('11/02/2006 03:15:25', "%m/%d/%Y %H:%M:%S") # TBD: use CloudSat start time

	elems = doc.getElementsByTagName('start_time')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    st = (c.data).encode('UTF-8').strip('\n').strip()
		    ###print 'st: ', st
		    ### self.start_t = datetime.strptime(st, "%m/%d/%Y %H:%M:%S%p")
		    self.start_t = datetime.strptime(st, "%m/%d/%Y %H:%M:%S")
        else:
            print "***** warning in file_list.__init__ - missing start_time in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.start_t
            
	# default value
	self.end_t = datetime.strptime('11/04/2006 07:35:20', "%m/%d/%Y %H:%M:%S") # TBD: use CloudSat end time

	elems = doc.getElementsByTagName('end_time')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    et = (c.data).encode('UTF-8').strip('\n').strip()
		    ###print 'et: ', et
		    self.end_t = datetime.strptime(et, "%m/%d/%Y %H:%M:%S")
        else:
            print "***** warning in file_list.__init__ - missing end_time in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.end_t
            
	# default value (2min)
	self.time_diff = '120'

	elems = doc.getElementsByTagName('time_diff')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.time_diff = (c.data).encode('UTF-8').strip('\n').strip()
		    ### print 'self.time_diff: ', self.time_diff
        else:
            print "***** warning in file_list.__init__ - missing time_diff in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.time_diff
            
	# default value (15min)
	self.time_search_range = '900'

	elems = doc.getElementsByTagName('time_search_range')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.time_search_range = (c.data).encode('UTF-8').strip('\n').strip()
        else:
            print "***** warning in file_list.__init__ - missing time_search_range in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.time_search_range
            
	print 'self.time_search_range: ', self.time_search_range

	# default value (20km)
	self.footprint_size = '20'

	elems = doc.getElementsByTagName('footprint_size')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.footprint_size = (c.data).encode('UTF-8').strip('\n').strip()
        else:
            print "***** warning in file_list.__init__ - missing footprit_size in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.footprint_size
            
	print 'self.footprint_size: ', self.footprint_size

	# default value (1.5)
	self.space_search_factor = '1.5'

	elems = doc.getElementsByTagName('space_search_factor')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.space_search_factor = (c.data).encode('UTF-8').strip('\n').strip()
        else:
            print "***** warning in file_list.__init__ - missing space_search_factor in config file"
            print "      config filename: ", self.config_file_name
            print "      using : ", self.space_search_factor
            
	print 'self.space_search_factor: ', self.space_search_factor

	# default value (40)
        self.cell_search_limit = '40'

	elems = doc.getElementsByTagName('cell_search_limit')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.cell_search_limit = (c.data).encode('UTF-8').strip('\n').strip()
        ###else:
            ###print "***** warning in file_list.__init__ - missing cell_search_limit in config file"
            ###print "      config filename: ", self.config_file_name
            ###print "      using : ", self.cell_search_limit
            
	print 'self.cell_search_limit: ', self.cell_search_limit

	# default value (-909090)
	self.invalid_data = '-909090'

	elems = doc.getElementsByTagName('invalid_data')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.invalid_data = (c.data).encode('UTF-8').strip('\n').strip()
	print 'self.invalid_data: ', self.invalid_data

	# default value ('None')
	self.missing_value = 'None'

	elems = doc.getElementsByTagName('missing_value')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.missing_value = (c.data).encode('UTF-8').strip('\n').strip()
	print 'self.missing_value: ', self.missing_value

	# default value (1)
	self.num_cores = '1'

	elems = doc.getElementsByTagName('num_cores')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.num_cores = (c.data).encode('UTF-8').strip('\n').strip()
	print 'self.num_cores: ', self.num_cores

	# default value (1)
	self.proc_size = '1'

	elems = doc.getElementsByTagName('proc_size')
	if len(elems) > 0:
	    for c in elems[0].childNodes:
		if c.nodeType != Node.COMMENT_NODE and len(c.data) > 1:
		    self.proc_size = (c.data).encode('UTF-8').strip('\n').strip()
	print 'self.proc_size: ', self.proc_size


	# set from list, for fast membership check
	self.dirNameSet  = set(self.dirNameList)
Esempio n. 2
0
import util as UT

src_data = UT.parse_data_type('/mnt/nas-0-0/users/collocation/ecmwf/idaily-surface')
src_file = '/mnt/nas-0-0/users/collocation/ecmwf/idaily-surface/2008/01/ecmwf-interim-daily-surface-2008-01-06.nc'
src_front_end = UT.get_front_end(src_data, src_file)
#### print 'src_front_end: ', src_front_end

print 'time: ', src_front_end.get_time()
print 'lat: ', src_front_end.get_latitude()
print 'lon: ', src_front_end.get_longitude()
print 'data: ', src_front_end.get_data()
print 'grid info: ', src_front_end.get_src_uniform_grid_info()
Esempio n. 3
0
t2 = afl1.startTimeList[index2]
f2 = afl1.dirNameList[index2]

if index2 < index1:
    print '****** Error: In user specified time range, no target data set exists!'
    sys.exit(-1)

print target_data + ' (start) time range:', t1, t2
print target_data + ' file range:', f1, f2

print 'User specified ' + target_data + ' granule number range:', afl1.granuleNumList[
    index1], afl1.granuleNumList[index2]

# find out what data set the source data is
src_data = UT.parse_data_type(afl.datarootdir)
print 'src_data: ', src_data

# load the file list for src data granules (afl.listdir is where all the list files reside)
list_file1 = afl.listdir + '/' + src_data + '_granules.list.pkl'
print 'list_file1: ', list_file1

try:
    afl.load_lists(list_file1)
except IOError:
    print '****** list file for ' + src_data + ' does not exist.'
    sys.exit(-1)

# Instantiate a middle end class
midEnd = MD.middle_end()
Esempio n. 4
0
    index2 = len(afl1.startTimeList) - 1

t2 = afl1.startTimeList[index2]
f2 = afl1.dirNameList[index2]

if index2 < index1:
    print '****** Error: In user specified time range, no target data set exists!'
    sys.exit(-1)

print target_data+' (start) time range:', t1, t2
print target_data+' file range:', f1, f2

print 'User specified '+target_data+' granule number range:', afl1.granuleNumList[index1], afl1.granuleNumList[index2]

# find out what data set the source data is
src_data = UT.parse_data_type(afl.datarootdir)
print 'src_data: ', src_data

# load the file list for src data granules (afl.listdir is where all the list files reside)
list_file1 = afl.listdir + '/' + src_data + '_granules.list.pkl'
print 'list_file1: ', list_file1

try:
    afl.load_lists(list_file1)
except IOError:
    print '****** list file for ' + src_data + ' does not exist.'
    sys.exit(-1)


# Instantiate a middle end class
midEnd = MD.middle_end()
import util as UT

src_data = UT.parse_data_type(
    '/mnt/nas-0-0/users/collocation/ecmwf/idaily-surface')
src_file = '/mnt/nas-0-0/users/collocation/ecmwf/idaily-surface/2008/01/ecmwf-interim-daily-surface-2008-01-06.nc'
src_front_end = UT.get_front_end(src_data, src_file)
#### print 'src_front_end: ', src_front_end

print 'time: ', src_front_end.get_time()
print 'lat: ', src_front_end.get_latitude()
print 'lon: ', src_front_end.get_longitude()
print 'data: ', src_front_end.get_data()
print 'grid info: ', src_front_end.get_src_uniform_grid_info()