Пример #1
0
    def globtime(self):

        mat = np.load('globcummat.npz')
        jd1 = pl.datestr2num('2003-01-01')
        jd2 = pl.datestr2num('2012-12-31')
        jdvec = np.arange(jd1, jd2)

        figpref.current()
        pl.close(1)
        pl.figure(1)

        pl.plot_date(jdvec,
                     mat['northmat'][10, :],
                     'r-',
                     alpha=0.5,
                     label="Northern hemisphere")
        pl.plot_date(jdvec,
                     mat['southmat'][10, :],
                     'b-',
                     alpha=0.5,
                     label="Southern hemisphere")
        pl.ylim(0, 1)
        pl.legend(loc='lower right')
        pl.savefig('figs/liege/glob_time.pdf',
                   transparent=True,
                   bbox_inches='tight')
Пример #2
0
    def _l3read_nc4(self):
        self.vprint("Reading netCDF4 file")
        print(self.filename)
        nc = Dataset(self.filename)
        nc.set_auto_mask(False)
        nc.set_auto_scale(True)

        var = nc.variables[nc.variables.keys()[0]]
        field = var[self.j1:self.j2, self.i1:self.i2].copy()
        try:
            self.minval = var.valid_min
        except AttributeError:
            self.minval = var.display_min
        try:
            valid_max = var.valid_max
        except AttributeError:
            valid_max = 0
        try:
            display_max = var.display_max
        except AttributeError:
            display_max = 0
        self.maxval = max(valid_max, display_max)

        start_jd = pl.datestr2num(nc.time_coverage_start)
        end_jd = pl.datestr2num(nc.time_coverage_end)
        self.jd = ((start_jd + end_jd) / 2)
        self.date = pl.num2date(self.jd)
        return field
Пример #3
0
 def __init__(self, lon1=-30,lon2=-15,lat1=40,lat2=50,res="4km",**kwargs):
     self.projname = "nasa.MODIS"
     super(L3, self).__init__(lon1=lon1, lat1=lat1, lon2=lon2, lat2=lat2,
                              res=res, map_region='dimensions', **kwargs)
     jd1 = pl.datestr2num('2003-01-01')
     jd2 = pl.datestr2num('2013-05-30')+1
     self.jdvec = np.arange(jd1, jd2)
Пример #4
0
 def _l3read_nc4(self):
     self.vprint( "Reading netCDF4 file")
     print(self.filename)
     nc = Dataset(self.filename)
     nc.set_auto_mask(False)
     nc.set_auto_scale(True)
     
     var         = nc.variables[nc.variables.keys()[0]]
     field       = var[self.j1:self.j2, self.i1:self.i2].copy()
     try:
         self.minval = var.valid_min
     except AttributeError:
         self.minval = var.display_min
     try:
         valid_max = var.valid_max
     except AttributeError:
         valid_max = 0
     try:
         display_max = var.display_max
     except AttributeError:
         display_max = 0
     self.maxval = max(valid_max, display_max)
         
     start_jd    = pl.datestr2num(nc.time_coverage_start)
     end_jd      = pl.datestr2num(nc.time_coverage_end)
     self.jd     = ((start_jd + end_jd)/2)
     self.date   = pl.num2date(self.jd)
     return field
Пример #5
0
def plot_timeseries(ns,nsexp):
    pl.clf()
    pl.subplot(2,1,1)
    pl.scatter(ns.jdvec[:,np.newaxis,np.newaxis] + 
               ns.timecube[:,20:30,20:30]*0,ns.timecube[:,20:30,20:30],2,'r')
    pl.scatter(ns.jdvec,nanmean(nanmean(ns.timecube[:,20:30,20:30],axis=1),axis=1),5,'y')
    pl.legend(('Satellite Observations','Daily means'))
    pl.gca().xaxis.axis_date()
    pl.xlim(pl.datestr2num('2003-01-01'), pl.datestr2num('2013-05-31'))
    pl.setp(pl.gca(), yscale="log")
    pl.ylim(0.01,5)
    pl.title('BATS')
    pl.ylabel(r'Chl (mg m$^{-3}$ d$^{-1}$)')

    pl.subplot(2,1,2)
    pl.scatter(nsexp.jdvec[:,np.newaxis,np.newaxis] + 
               nsexp.timecube[:,20:30,20:30]*0,
               nsexp.timecube[:,20:30,20:30],2,'r')
    pl.scatter(nsexp.jdvec,
               nanmean(nanmean(nsexp.timecube[:,20:30,20:30],axis=1),axis=1),5,'y')
    pl.gca().xaxis.axis_date()
    pl.xlim(pl.datestr2num('2003-01-01'), pl.datestr2num('2013-05-31'))
    pl.setp(pl.gca(), yscale="log")
    pl.ylabel(r'Chl (mg m$^{-3}$ d$^{-1}$)')
    pl.ylim(0.01,5)
    pl.title(r'Experiment Site (45$\degree$N 24$\degree$W)')
def plot1(country, data, casesData):
    ukData1 = data[(data['Country/Region'] == country)
                   & (data['Province/State'].isnull())]
    countryKey = ukData1.index[0]
    droppedData = ukData1.drop(
        ['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1)
    data5 = droppedData.transpose()
    singleCasesData = casesData[(casesData['Country/Region'] == country)
                                & (casesData['Province/State'].isnull())]
    droppedCasesData = singleCasesData.drop(
        ['Province/State', 'Country/Region', 'Lat', 'Long'], axis=1)
    data6 = droppedCasesData.transpose()

    N = len(data5.index.values)
    minVal = pl.datestr2num(
        datetime.strptime(data5.index.values[0],
                          '%m/%d/%y').strftime('%m/%d/%y'))

    print(data5.index.values[N - 1])
    print(" minVal ", minVal)

    def format_date(x, pos=None):
        logging.debug("Input to format_date".format(int(x)))
        logging.debug(" Length of array {} ".format(N))

        #  print( "First  " , datetime.strptime(data5.index.values[0], '%m/%d/%y' ).strftime('%m/%d/%y') )
        #  print( "Last  ",  datetime.strptime(data5.index.values[N-1], '%m/%d/%y' ).strftime('%m/%d/%y')  )
        #  print( "First " , pl.datestr2num( datetime.strptime(data5.index.values[0], '%m/%d/%y' ).strftime('%m/%d/%y')  )  )
        #  print( "Last ",   pl.datestr2num( datetime.strptime(data5.index.values[N-1], '%m/%d/%y' ).strftime('%m/%d/%y')  )  )

        minVal2 = pl.datestr2num(
            datetime.strptime(data5.index.values[0],
                              '%m/%d/%y').strftime('%m/%d/%y'))
        thisind = int(x - minVal2)

        if (thisind > N - 1):
            thisind = N - 1
        if (thisind < 0):
            thisind = 0
        logging.debug(" INDEX :  {} ".format(thisind))
        # strftime removes the time component
        return datetime.strptime(
            data5.index.values[thisind], '%m/%d/%y'
        ).strftime(
            '%m/%d/%y'
        )  #      return datetime.strptime(xV[thisind], '%m/%d/%y' ).strftime('%m/%d/%y')

    ax.xaxis.set_major_formatter(ticker.FuncFormatter(format_date))

    for tl in ax.w_xaxis.get_ticklabels(
    ):  # re-create what autofmt_xdate but with w_xaxis
        tl.set_ha('right')
        tl.set_rotation(30)

    #ax.plot_trisurf(pd.to_datetime(data5.index.values, format='%m/%d/%y' ), data6[countryKey], data5[countryKey]  )
    plt = ax.plot_trisurf(pl.datestr2num(data5.index.values),
                          data6[countryKey], data5[countryKey])
Пример #7
0
 def get_tvec(self, jd1, jd2):
     jd1 = pl.datestr2num(jd1) if type(jd1) is str else jd1
     jd2 = pl.datestr2num(jd2) if type(jd2) is str else jd2
     tvec = self.fulltvec
     if jd1 < tvec.min():
         raise ValueError, "jd1 too small"
     if jd2 > tvec.max():
         raise ValueError, "jd2 too large"
     return tvec[(tvec >= jd1) & (tvec <= jd2)]
Пример #8
0
 def get_tvec(self, jd1, jd2):
     jd1 = pl.datestr2num(jd1) if type(jd1) is str else jd1
     jd2 = pl.datestr2num(jd2) if type(jd2) is str else jd2
     tvec = self.fulltvec
     if jd1 < tvec.min():
         raise ValueError, "jd1 too small"
     if jd2 > tvec.max():
         raise ValueError, "jd2 too large"
     return tvec[(tvec >= jd1) & (tvec <= jd2)]
Пример #9
0
def dealias_berrimah_volume(filename, **kwargs):
	pattern=kwargs.get('pattern', 'BerrimaVol')
	deal_add=kwargs.get('deal_add', '_deal')
	raw_path=kwargs.get('raw_path', '/data/uf_ber/')
	deal_path=kwargs.get('deal_path', '/data/deal_ber/')
	
	deal_files=os.listdir(deal_path)
	raw_files=os.listdir(raw_path)
	#deal_files=[]
	#raw_files=[]
	#for file in uf_files:
	#	if 'deal' in file: 
	#		deal_files.append(file)
	#	else:
	#		raw_files.append(file)
	#
	if not(filename in raw_files):
		raise IOError, 'File not there'
	deal_files.sort()
	raw_files.sort()
	this_date_str=filename[len(pattern):-3].replace('_', ' ')
	this_date_str=this_date_str[0:this_date_str.find(' ')+5]
	sonde_name=read_sounding.make_deal_sonde(this_date_str)
	deal_date=sonde_name[0:sonde_name.find('_')]
	
	date_num_list=[]
	for file in deal_files:
		deal_str=file[len(pattern):-8].replace('_', ' ')
		deal_str=deal_str[0:deal_str.find(' ')+5]
		date_num_list.append(datestr2num(deal_str))
		
	offset=array(date_num_list)-datestr2num(this_date_str)
	idec_where=nwhere(offset < 0.0)[0]
	offset_least=100.0
	if len(idec_where)!=0: offset_least=offset[idec_where[-1]]
	print "**********************"
	if abs(offset_least) < 30.0/(60.0*24.0):
		deal_fname=deal_files[idec_where[-1]]
		print "Found a previous de-aliased file ", deal_fname
	else:
		print "No de-aliased file found within 30 minutes, only using sounding"
		deal_fname='dummy'
	
	print "********************"
	outfile=filename[0:-3]+deal_add+".uf"
	cwd=os.getcwd()
	os.chdir('/home/scollis/bom_mds/dealias/')
	execbl='./FourDD_berrimah '
	command=execbl+deal_path+deal_fname+' '+raw_path+filename+' '+deal_path+outfile+' '+ sonde_name+' '+deal_date+' '+'0 1 1 1'
	print command
	os.system(command)
	os.chdir(cwd)
	return outfile
Пример #10
0
def dealias_single_volume(filename, **kwargs):	
	pattern=kwargs.get('pattern', 'Gunn_pt_')
	deal_add=kwargs.get('deal_add', '_deal')
	lassen_path=kwargs.get('lassen_path', '/bm/gscratch/scollis/lassen_cpol/')
	uf_path=kwargs.get('lassen_path', '/bm/gscratch/scollis/uf_cpol/')
	deal_files=os.listdir(uf_path)
	raw_files=os.listdir(lassen_path)
	#deal_files=[]
	#raw_files=[]
	#for file in uf_files:
	#	if 'deal' in file: 
	#		deal_files.append(file)
	#	else:
	#		raw_files.append(file)
	#
	if not(filename in raw_files):
		raise IOError, 'File not there: '+filename
	deal_files.sort()
	raw_files.sort()
	this_date_str=filename[len(pattern):-11]
	#this_date_str=this_date_str[0:this_date_str.find(' ')+5]
	sonde_name=read_sounding.make_deal_sonde(this_date_str)
	deal_date=sonde_name[0:sonde_name.find('_')]
	date_num_list=[]
	for file in deal_files:
		deal_str=file[len(pattern):-8]
		#print deal_str
		#deal_str=deal_str[0:deal_str.find(' ')+5]
		#print deal_str
		date_num_list.append(datestr2num(deal_str))
	offset=array(date_num_list)-datestr2num(this_date_str)
	idec_where=nwhere(offset < 0.0)[0]
	offset_least=100.0
	if len(idec_where)!=0: offset_least=offset[idec_where[-1]]
	print "**********************"
	if abs(offset_least) < 30.0/(60.0*24.0):
		deal_fname=deal_files[idec_where[-1]]
		print "Found a previous de-aliased file ", deal_fname
	else:
		print "No de-aliased file found within 30 minutes, only using sounding"
		deal_fname='dummy'
	print "********************"
	outfile=filename[0:-11]+deal_add+".uf"
	cwd=os.getcwd()
	os.chdir('/flurry/home/scollis/bom_mds/dealias/')
	execbl='./FourDD_lassen '
	command=execbl+uf_path+deal_fname+' '+lassen_path+filename+' '+uf_path+outfile+' '+ sonde_name+' '+deal_date+' '+'1 1 1 1'
	print command
	os.system(command)
	os.chdir(cwd)	
	return outfile
Пример #11
0
    def time(self, i=500,j=250):

        if not hasattr(self,'h5f'): self.h5open()
        mat = self.h5f.root.chl[:,j-5:j+5,i-5:i+5]

        figpref.presentation()
        jd =  pl.datestr2num('2012-01-01')
        jd2 = pl.datestr2num('2013-04-30')+1
        pl.gca().xaxis.axis_date()
        pl.scatter(self.jdvec[:,np.newaxis,np.newaxis]+mat*0,mat, 5,'g')
        pl.xlim(jd,jd2)
        pl.scatter(self.jdvec,nanmean(nanmean(mat,axis=1),axis=1),20,'y')
        #setp(gca(),yscale='log')
        pl.ylim(0.01,5)
        return pl.gca()
    def format_date(x, pos=None):
        logging.debug("Input to format_date".format(int(x)))
        logging.debug(" Length of array {} ".format(N))

        #  print( "First  " , datetime.strptime(data5.index.values[0], '%m/%d/%y' ).strftime('%m/%d/%y') )
        #  print( "Last  ",  datetime.strptime(data5.index.values[N-1], '%m/%d/%y' ).strftime('%m/%d/%y')  )
        #  print( "First " , pl.datestr2num( datetime.strptime(data5.index.values[0], '%m/%d/%y' ).strftime('%m/%d/%y')  )  )
        #  print( "Last ",   pl.datestr2num( datetime.strptime(data5.index.values[N-1], '%m/%d/%y' ).strftime('%m/%d/%y')  )  )

        minVal2 = pl.datestr2num(
            datetime.strptime(data5.index.values[0],
                              '%m/%d/%y').strftime('%m/%d/%y'))
        thisind = int(x - minVal2)

        if (thisind > N - 1):
            thisind = N - 1
        if (thisind < 0):
            thisind = 0
        logging.debug(" INDEX :  {} ".format(thisind))
        # strftime removes the time component
        return datetime.strptime(
            data5.index.values[thisind], '%m/%d/%y'
        ).strftime(
            '%m/%d/%y'
        )  #      return datetime.strptime(xV[thisind], '%m/%d/%y' ).strftime('%m/%d/%y')
Пример #13
0
 def __init__(self, ob):
     # populate attributes with sounding data, initially this will
     # only work with a netcdf variable object (from Sceintific.IO)
     # but more objects can be added by simply adding elif..
     # PLEASE always populate height in the values['alt'] position and
     # append values['date_list'] and datetime
     # datetime and date_list[index] are datetime objects
     # check if it is a netcdf variable list
     if "getValue" in dir(ob[ob.keys()[0]]):
         # this is a netcdf variables object
         self.datetime = num2date(datestr2num("19700101") + ob["base_time"].getValue() / (24.0 * 60.0 * 60.0))
         values = {}
         units = {}
         longname = {}
         for var in ob.keys():
             values.update({var: ob[var][:]})
             try:
                 units.update({var: ob[var].units})
             except AttributeError:
                 units.update({var: "no units"})
             try:
                 longname.update({var: ob[var].long_name})
             except AttributeError:
                 longname.update({var: "no longname"})
             values.update(
                 {"date_list": num2date(date2num(self.datetime) + values["time_offset"] / (24.0 * 60.0 * 60.0))}
             )
             units.update({"date_list": "unitless (object)"})
             self.values = values
             self.units = units
             self.long_name = longname
Пример #14
0
    def verifObs(self, inputDate, filename):
        '''
        Import and process data and parameters
        '''

        try:
            #_________________Open file___________________________#
            obsOutput=np.zeros([len(inputDate)], dtype=float)
            if os.path.exists(filename):
                obsData=np.loadtxt(filename, dtype = str)
                obsDate = pylab.datestr2num(obsData[:,0])
                obsValue = obsData[:,1].astype(float)
                if obsDate[0]<inputDate[0]:
                    print 'WARNING, Obs data starting before RF and PET,\n these obs data will not be plotted correctly\n'
                if len(inputDate)<len(obsDate):
                    print 'WARNING, there is more Obs data than RF and PET data,\n these obs data will not be plotted correctly\n'
                j=0
                for i in range(len(inputDate)):
                    if j<len(obsDate):
                        if inputDate[i]==obsDate[j]:
                            obsOutput[i]=obsValue[j]
                            j=j+1
                        else:
                            obsOutput[i]=self.hnoflo
                    else:
                        obsOutput[i]=self.hnoflo
            else:
                for i in range(len(inputDate)):
                    obsOutput[i]=self.hnoflo

        except (ValueError, TypeError, KeyboardInterrupt, IOError), e:
            obsOutput = 0
            print e
Пример #15
0
    def load(self, **kwargs):
        """ Load Oscar fields for a given day"""
        self._timeparams(**kwargs)
        md = self.jd - pl.datestr2num('1992-10-05')
        filename = os.path.join(self.datadir, "oscar_vel%i.nc" % self.yr)
        if not os.path.exists(filename):
            self.download(filename)
        filenam2 = os.path.join(self.datadir, "oscar_vel%i.nc" % (self.yr + 1))
        if not os.path.exists(filenam2):
            self.download(filenam2)

        nc1 = netcdf_file(filename)
        tvec = nc1.variables['time'][:]
        t1 = int(np.nonzero((tvec <= md))[0].max())
        print t1, max(tvec)
        if t1 < (len(tvec) - 1):
            nc2 = nc1
            t2 = t1 + 1
        else:
            nc2 = netcdf_file(filenam2)
            t2 = 0

        def readfld(ncvar):
            return self.gmt.field(ncvar[t1, 0, :, :self.imt])[self.j1:self.j2,
                                                              self.i1:self.i2]

        u1 = readfld(nc1.variables['u'])
        v1 = readfld(nc1.variables['v'])
        u2 = readfld(nc2.variables['u'])
        v2 = readfld(nc2.variables['v'])
        rat = float(md - tvec[t1]) / float(tvec[t2] - tvec[t1])
        self.u = u2 * rat + u1 * (1 - rat)
        self.v = v2 * rat + v1 * (1 - rat)
        print self.jd, md, t1, t2
Пример #16
0
 def refresh(self, fld, fldtype="DAY", jd1=None, jd2=None, delall=False):
     """ Read a L3 mapped file and add field to current instance"""
     jd1 = pl.datestr2num('2003-01-01') if jd1 is None else jd1
     jd2 = int(pl.date2num(dtm.now())) - 1 if jd2 is None else jd2
     for jd in np.arange(jd1, jd2):
         print " --- %s --- " % pl.num2date(jd).strftime('%Y-%m-%d')
         filename = os.path.join(
             self.datadir,
             self.generate_filename(jd, fld, fldtype) + ".nc")
         if delall:
             for fn in glob.glob(filename + "*"):
                 print "Deleted %s" % fn
                 os.remove(fn)
         print "Checking files"
         if not os.path.isfile(filename[:-3] + '.npz'):
             try:
                 self.load(fld, fldtype, jd=jd, verbose=True)
             except IOError:
                 print "Downloading failed. Trying to remove old files."
                 try:
                     os.remove(filename)
                 except:
                     pass
                 try:
                     self.load(fld, fldtype, jd=jd, verbose=True)
                 except:
                     print("   ###   Warning! Failed to add %s   ###" %
                           os.path.basename(filename))
             print "\n"
         else:
             print "found"
Пример #17
0
    def load(self,**kwargs):
        """ Load Oscar fields for a given day"""
        self._timeparams(**kwargs)
        md  = self.jd - pl.datestr2num('1992-10-05')
        filename = os.path.join(self.datadir, "oscar_vel%i.nc" % self.yr)
        if not os.path.exists(filename):
            self.download(filename)
        filenam2 = os.path.join(self.datadir, "oscar_vel%i.nc" % (self.yr+1))
        if not os.path.exists(filenam2):
            self.download(filenam2)

        nc1 = netcdf_file(filename)        
        tvec = nc1.variables['time'][:]
        t1 = int(np.nonzero((tvec<=md))[0].max())
        print t1,max(tvec)
        if t1<(len(tvec)-1):
            nc2 = nc1
            t2 = t1 +1
        else:
            nc2 = netcdf_file(filenam2)                    
            t2 = 0
        def readfld(ncvar):
            return self.gmt.field(ncvar[t1, 0,:,:self.imt])[self.j1:self.j2,
                                                            self.i1:self.i2]
        u1 = readfld(nc1.variables['u'])
        v1 = readfld(nc1.variables['v'])
        u2 = readfld(nc2.variables['u'])
        v2 = readfld(nc2.variables['v'])
        rat = float(md-tvec[t1])/float(tvec[t2]-tvec[t1])
        self.u = u2*rat + u1*(1-rat)
        self.v = v2*rat + v1*(1-rat)
        print self.jd,md,t1,t2
Пример #18
0
def parse_sounding_block(sounding_block):
	headers=sounding_block[0].split()[0:17]
	start_date_str=sounding_block[1].split()[0]+" "+sounding_block[1].split()[1]
	data_dict=dict([(headers[i],array([float_conv(sounding_block[j+1].split()[i]) for j in range(len(sounding_block)-1)])) for i in range(len(headers))])
	date_list=[num2date(datestr2num(sounding_block[i+1].split()[0]+" "+sounding_block[i+1].split()[1])) for i in range(len(sounding_block)-1)]
	data_dict.update({'date_list':array(date_list)})
	return data_dict
Пример #19
0
    def drawCity(self):
        """
        作图
        :return:
        """
        pl.title("pm25 / time   " + str(self.numMonitors) + "_monitors")# give plot a title
        pl.xlabel('time')# make axis labels
        pl.ylabel('pm2.5')
        self.fill_cityPm25List()


        for monitorStr in self.cityPm25List:
            data = np.loadtxt(StringIO(monitorStr), dtype=np.dtype([("t", "S13"),("v", float)]))
            datestr = np.char.replace(data["t"], "T", " ")
            t = pl.datestr2num(datestr)
            v = data["v"]
            pl.plot_date(t, v, fmt="-o")



        pl.subplots_adjust(bottom=0.3)
        # pl.legend(loc=4)#指定legend的位置,读者可以自己help它的用法
        ax = pl.gca()
        ax.fmt_xdata = pl.DateFormatter('%Y-%m-%d %H:%M:%S')
        pl.xticks(rotation=70)
        # pl.xticks(t, datestr) # 如果以数据点为刻度,则注释掉这一行
        ax.xaxis.set_major_formatter(pl.DateFormatter('%Y-%m-%d %H:%M'))
        pl.grid()
        pl.show()# show the plot on the screen
Пример #20
0
def get_two_best_sondes(date_str, **kwargs):
	sonde_file=kwargs.get('sonde_file', '/data/twpice/darwin.txt')
	#outdir=kwargs.get('outdir', '/flurry/home/scollis/bom_mds/dealias/')
	sonde_file=kwargs.get('sonde_file', '/data/twpice/darwin.txt')
	outdir=kwargs.get('outdir', '/home/scollis/bom_mds/dealias/')
	tim_date=num2date(datestr2num(date_str))
	sonde_list=read_sounding_within_a_day(sonde_file, tim_date)
	launch_dates=[sonde['date_list'][0] for sonde in sonde_list]
	#print launch_dates
	launch_date_offset=[date2num(sonde['date_list'][0])- date2num(tim_date)  for sonde in sonde_list]
	sonde_made_it=False
	candidate=0
	while not(sonde_made_it):
		best_sonde=sonde_list[argsort(abs(array(launch_date_offset)))[candidate]]
		candidate=candidate+1
		sonde_made_it=best_sonde['alt(m)'][-1] > 18000.
		if not sonde_made_it: print "Sonde Burst at ", best_sonde['alt(m)'][-1], "m rejecting"
	print "Sonde Burst at ", best_sonde['alt(m)'][-1], "m Accepting"
	sonde_made_it=False
	while not(sonde_made_it):
		sec_best_sonde=sonde_list[argsort(abs(array(launch_date_offset)))[candidate]]
		candidate=candidate+1
		sonde_made_it=sec_best_sonde['alt(m)'][-1] > 18000.
		if not sonde_made_it: print "Sonde Burst at ", sec_best_sonde['alt(m)'][-1], "m rejecting"
	print "Sonde Burst at ", sec_best_sonde['alt(m)'][-1], "m Accepting"
	print 'Time of radar: ', tim_date, ' Time of  best sonde_launch: ', best_sonde['date_list'][0], ' Time of sonde_termination: ', best_sonde['date_list'][-1]
	print 'Time of radar: ', tim_date, ' Time of second sonde_launch: ', sec_best_sonde['date_list'][0], ' Time of sonde_termination: ', best_sonde['date_list'][-1]
	for i in range(len(sonde_list)):
		best_sonde=sonde_list[argsort(abs(array(launch_date_offset)))[i]]
		print 'Time of radar: ', tim_date, ' Time of  best sonde_launch: ', best_sonde['date_list'][0], ' Offset', abs(date2num(best_sonde['date_list'][0])-date2num(tim_date))*24.0
	return best_sonde, sec_best_sonde
Пример #21
0
 def refresh(self, fld, fldtype="DAY", jd1=None, jd2=None, delall=False):
     """ Read a L3 mapped file and add field to current instance"""
     jd1 = pl.datestr2num('2003-01-01') if jd1 is None else jd1
     jd2 = int(pl.date2num(dtm.now())) - 1  if jd2 is None else jd2
     for jd in np.arange(jd1, jd2):
         print " --- %s --- " % pl.num2date(jd).strftime('%Y-%m-%d')
         filename = os.path.join(
             self.datadir, self.generate_filename(jd,fld,fldtype) + ".nc")
         if delall:
             for fn in glob.glob(filename + "*"):
                 print "Deleted %s" % fn
                 os.remove(fn)
         print "Checking files"
         if not os.path.isfile(filename[:-3] + '.npz'):
             try:
                 self.load(fld, fldtype, jd=jd, verbose=True)
             except IOError:
                 print "Downloading failed. Trying to remove old files."
                 try:
                     os.remove(filename)
                 except:
                     pass
                 try:
                     self.load(fld,fldtype,jd=jd,verbose=True)
                 except:
                     print ("   ###   Warning! Failed to add %s   ###" %
                            os.path.basename(filename))
             print "\n"
         else:
             print "found"
Пример #22
0
def process_data_ria ( data ):
    """This function proesses the data bundle downloaded from a RIA
    station, and returns a number of arrays with the data."""

    t_string = [ line.split()[0] for line in data[2:] \
            if len(line.split()) == 13 ]
    year = [ 2000 + int ( line.split()[0].split( "-" )[-1] ) \
            for line in data[2:] if len(line.split()) == 13 ]
    doy = [ int ( line.split()[1]) \
            for line in data[2:] if len(line.split()) == 13 ]
    tmax = [ float ( line.split()[2]) \
            for line in data[2:] if len(line.split()) == 13 ]
    tmin = [ float ( line.split()[4]) \
            for line in data[2:] if len(line.split()) == 13 ]
    tmean = [ float ( line.split()[6]) \
            for line in data[2:] if len(line.split()) == 13 ]
    swrad = [ float ( line.split()[-3]) \
            for line in data[2:] if len(line.split()) == 13 ]
    wspd = [ float ( line.split()[-5]) \
            for line in data[2:] if len(line.split()) == 13 ]
    eto = [ float ( line.split()[-1]) \
            for line in data[2:] if len(line.split()) == 13 ]
    hum = [ float ( line.split()[9]) \
            for line in data[2:] if len(line.split()) == 13 ]
    prec = [ float ( line.split()[-2]) \
            for line in data[2:] if len(line.split()) == 13 ]
    t_axis = [ time.strftime("%Y-%m-%d", \
            time.strptime( line.split()[0], "%d-%m-%y")) \
            for line in data[2:] if len(line.split()) == 13 ]
    t_axis = pylab.datestr2num ( t_axis )
    return ( t_axis, t_string,year, doy, tmax, tmin, tmean, swrad, \
            wspd, eto, hum, prec )
Пример #23
0
    def drawCity(self):
        """
        作图
        :return:
        """
        pl.title("pm25 / time   " + str(self.numMonitors) +
                 "_monitors")  # give plot a title
        pl.xlabel('time')  # make axis labels
        pl.ylabel('pm2.5')
        self.fill_cityPm25List()

        for monitorStr in self.cityPm25List:
            data = np.loadtxt(StringIO(monitorStr),
                              dtype=np.dtype([("t", "S13"), ("v", float)]))
            datestr = np.char.replace(data["t"], "T", " ")
            t = pl.datestr2num(datestr)
            v = data["v"]
            pl.plot_date(t, v, fmt="-o")

        pl.subplots_adjust(bottom=0.3)
        # pl.legend(loc=4)#指定legend的位置,读者可以自己help它的用法
        ax = pl.gca()
        ax.fmt_xdata = pl.DateFormatter('%Y-%m-%d %H:%M:%S')
        pl.xticks(rotation=70)
        # pl.xticks(t, datestr) # 如果以数据点为刻度,则注释掉这一行
        ax.xaxis.set_major_formatter(pl.DateFormatter('%Y-%m-%d %H:%M'))
        pl.grid()
        pl.show()  # show the plot on the screen
Пример #24
0
	def load_csv(self,f):
		"""
		Loading data from a csv file. Uses pylab's load function. Seems much faster
		than scipy.io.read_array.
		"""
		varnm = f.readline().split(',')

		# what is the date variable's key if any, based on index passed as argument
		if self.date_key != '':
			try:
				rawdata = pylab.load(f, delimiter=',',converters={self.date_key:pylab.datestr2num})			# don't need to 'skiprow' here
			except ValueError:																				# if loading via pylab doesn't work use csv
				rawdata = self.load_csv_nf(f)	

				# converting the dates column to a date-number
				rawdata[self.date_key] = pylab.datestr2num(rawdata[self.date_key])

			self.date_key = varnm[self.date_key]
		else:
			try:
				rawdata = pylab.load(f, delimiter=',')														# don't need to 'skiprow' here
			except ValueError:																				# if loading via pylab doesn't work use csv
				rawdata = self.load_csv_nf(f)	

		# making sure that the variable names contain no leading or trailing spaces
		varnm = [i.strip() for i in varnm]

		# transforming the data into a dictionary
		if type(rawdata) == list:
			# if the csv module was used
			self.data = dict(zip(varnm,rawdata))
		else:
			# if the pylab.load module was used
			self.data = dict(zip(varnm,rawdata.T))
Пример #25
0
    def timeseries(self, fieldname, jd1, jd2, mask=None):
        """Create a timeseries of fields using mask to select data"""
        mask = mask if mask is not None else self.llat == self.llat
        jd1 = pl.datestr2num(jd1) if type(jd1) is str else jd1
        jd2 = pl.datestr2num(jd2) if type(jd2) is str else jd2

        self.tvec = np.arange(jd1, jd2+1)
        field = np.zeros((len(self.tvec),) + self.llat.shape, dtype=np.float32)
        for n,jd in enumerate(self.tvec):
            print pl.num2date(jd), pl.num2date(jd2)
            try:
                field[n,:,:] = self.get_field(fieldname, jd=jd).astype(np.float32)
            except KeyError:
                field[n,:,:] = np.nan
            field[n, ~mask] = np.nan
        setattr(self, fieldname + 't', field)
Пример #26
0
 def _timeparams(self, **kwargs):
     """Calculate time parameters from given values"""
     for key in kwargs.keys():
         self.__dict__[key] = kwargs[key]
     if "date" in kwargs:
         self.jd = pl.datestr2num(kwargs['date'])
         self.jd = int(self.jd) if self.jd == int(self.jd) else self.jd
     elif ('yd' in kwargs) & ('yr' in kwargs):
         if self.yd < 1:
             self.yr = self.yr -1
             ydmax = (pl.date2num(dtm(self.yr, 12, 31)) -
                      pl.date2num(dtm(self.yr,  1,  1))) + 1    
             self.yd = ydmax + self.yd     
         self.jd = self.yd + pl.date2num(dtm(self.yr,1,1)) - 1
     elif  ('yr' in kwargs) & ('mn' in kwargs) & ('dy' in kwargs):
         self.jd = pl.date2num(dtm(self.yr,self.mn,self.dy))
     elif not 'jd' in kwargs:
         if hasattr(self, 'defaultjd'):
             self.jd = self.defaultjd
         else:
             raise KeyError, "Time parameter missing"
     if hasattr(self,'hourlist'):
         dd = self.jd-int(self.jd)
         ddlist = np.array(self.hourlist).astype(float)/24
         ddpos = np.argmin(np.abs(ddlist-dd))
         self.jd = int(self.jd) + ddlist[ddpos]
     self._jd_to_dtm()
Пример #27
0
def parse_info_line(info_line):
    # date_obj=num2date(datestr2num(info_line[(info_line.find('date/time=')+len('date/time=')):-1]))
    # n_rays=int(info_line[(info_line.find('number_of_rays=')+len('number_of_rays=')):(info_line.find('date/time=')-1)])
    ilsplit = info_line.split()
    date_obj = num2date(datestr2num(ilsplit[-2] + " " + ilsplit[-1]))
    sweep_number = int(ilsplit[ilsplit.index("sweep_no=") + 1])
    nrays = int(ilsplit[ilsplit.index("number_of_rays=") + 1])
    return {"date": date_obj, "sweep number": sweep_number, "rays": nrays}
Пример #28
0
def analysisImg(rslist,spercode,start,end):
    # import matplotlib.font_manager as font_manager
    #
    # path = '/usr/share/fonts/winfonts/simfang.ttf'
    #
    # prop = font_manager.FontProperties(fname=path)
    # prop.set_weight = 'light'
    #
    # matplotlib.rc('font', family='sans-serif')
    # matplotlib.rc('font', serif='FangSong')
    # matplotlib.rc('text', usetex='false')
    # matplotlib.rcParams.update({'font.size': 12})


    #日期:结束日期 >= 27 ,取每月的周三
    #      13< 。。。< 27 , 取奇数
    #      6 <=  。。。<= 13 ,顺序数
    #      。。。<6 ,每日00:00,12:00

    list = []
    if rslist:
        for item in rslist:
            sd =  pylab.datestr2num(item[0])
            dict = (sd,float(item[6].quantize(decimal.Decimal('0.0'))))
            list.append(dict)

    #排序
    dict= sorted(list, key=lambda d:d[0])

    x,xt,y = [],[],[]
    for item in dict:
        x.append(item[0])
        y.append(item[1])
    if x:
        xmin = min(x)
        xmax = max(x)
        plt.xlim(xmin,xmax)

    if y:
        ymin = min(y)
        ymax = max(y)
        plt.ylim(ymin,ymax)

    plt.plot(x, y, linestyle='-',color="red")

    ds = [pylab.num2date(d) for d in x]
    xt = [(d.strftime('%d')+"-"+d.strftime("%m")+"月") for d in ds]
    plt.xticks(x,xt)
    plt.xlabel('时间(天)')
    plt.ylabel('金额(元)')
    plt.title('供应商日销售汇总折线图')
    plt.grid(True)


    root = settings.BASE_DIR
    filepath = "/static/image/daysale/" + spercode+".png"
    plt.savefig(root+filepath)
    return  filepath
Пример #29
0
 def _convert_options(self):
     """
     Convert options, which are only strings in the beginning
     to numerical values or execute functions to set directory
     names appropriately
     """
     for v in self.options.keys():
         var = self.options[v]
         for s in var.keys():  # each section
             sec = var[s]
             for k in sec.keys():
                 if k == 'start':
                     sec.update({k: pl.num2date(pl.datestr2num(sec[k]))})
                 elif k == 'stop':
                     sec.update({k: pl.num2date(pl.datestr2num(sec[k]))})
                 else:
                     # update current variable with valid value
                     sec.update({k: self.__convert(sec[k])})
Пример #30
0
 def _convert_options(self):
     """
     Convert options, which are only strings in the beginning
     to numerical values or execute functions to set directory
     names appropriately
     """
     for v in self.options.keys():
         var = self.options[v]
         for s in var.keys():  # each section
             sec = var[s]
             for k in sec.keys():
                 if k == 'start':
                     sec.update({k: pl.num2date(pl.datestr2num(sec[k]))})
                 elif k == 'stop':
                     sec.update({k: pl.num2date(pl.datestr2num(sec[k]))})
                 else:
                     # update current variable with valid value
                     sec.update({k: self.__convert(sec[k])})
Пример #31
0
def seawinds(date, lonvec, latvec):

    jd = pl.datestr2num(date) if type(date) is str else date
    if not hasattr(persist, "swn"):
        persist.swn = winds.Seawinds()
        persist.swn.add_kd()
    ivec,jvec = persist.swn.ll2ij(lonvec, latvec)
    persist.swn.load(jd)
    return persist.swn.nwnd[jvec, ivec]
Пример #32
0
def single_bar_charts(labels, capacity, file_path, title):
    time_list = pl.datestr2num(labels)
    fig, ax = plt.subplots(figsize=(8, 6))
    fig.autofmt_xdate()
    plt.bar(time_list, capacity, ec='k', ls='-', alpha=0.5, color='r')
    ax.xaxis_date()
    plt.legend([title], loc='best', fontsize=7)
    plt.grid()
    plt.savefig(file_path)
Пример #33
0
def analysisImg(rslist, spercode, start, end):
    # import matplotlib.font_manager as font_manager
    #
    # path = '/usr/share/fonts/winfonts/simfang.ttf'
    #
    # prop = font_manager.FontProperties(fname=path)
    # prop.set_weight = 'light'
    #
    # matplotlib.rc('font', family='sans-serif')
    # matplotlib.rc('font', serif='FangSong')
    # matplotlib.rc('text', usetex='false')
    # matplotlib.rcParams.update({'font.size': 12})

    #日期:结束日期 >= 27 ,取每月的周三
    #      13< 。。。< 27 , 取奇数
    #      6 <=  。。。<= 13 ,顺序数
    #      。。。<6 ,每日00:00,12:00

    list = []
    if rslist:
        for item in rslist:
            sd = pylab.datestr2num(item[0])
            dict = (sd, float(item[6].quantize(decimal.Decimal('0.0'))))
            list.append(dict)

    #排序
    dict = sorted(list, key=lambda d: d[0])

    x, xt, y = [], [], []
    for item in dict:
        x.append(item[0])
        y.append(item[1])
    if x:
        xmin = min(x)
        xmax = max(x)
        plt.xlim(xmin, xmax)

    if y:
        ymin = min(y)
        ymax = max(y)
        plt.ylim(ymin, ymax)

    plt.plot(x, y, linestyle='-', color="red")

    ds = [pylab.num2date(d) for d in x]
    xt = [(d.strftime('%d') + "-" + d.strftime("%m") + "月") for d in ds]
    plt.xticks(x, xt)
    plt.xlabel('时间(天)')
    plt.ylabel('金额(元)')
    plt.title('供应商日销售汇总折线图')
    plt.grid(True)

    root = settings.BASE_DIR
    filepath = "/static/image/daysale/" + spercode + ".png"
    plt.savefig(root + filepath)
    return filepath
Пример #34
0
    def globtime(self):

        mat = np.load('globcummat.npz')
        jd1 = pl.datestr2num('2003-01-01')
        jd2 = pl.datestr2num('2012-12-31')
        jdvec = np.arange(jd1,jd2)
        
        figpref.current()
        pl.close(1)
        pl.figure(1)
        
        pl.plot_date(jdvec, mat['northmat'][10,:],'r-',
                     alpha=0.5, label="Northern hemisphere")
        pl.plot_date(jdvec, mat['southmat'][10,:],'b-',
                     alpha=0.5, label="Southern hemisphere")
        pl.ylim(0,1)
        pl.legend(loc='lower right')
        pl.savefig('figs/liege/glob_time.pdf',
                   transparent=True, bbox_inches='tight')
Пример #35
0
 def availabe_jds(self, datadir=None, filestamp="nwa_avg_*.nc"):
     """List the julian dates for all available fields in datadir"""
     datadir = self.datadir if datadir==None else datadir
     flist = glob.glob(os.path.join(datadir, filestamp))
     eplist = []
     baseep = pl.num2epoch(pl.datestr2num("1900-01-01"))
     for fn in flist:
         with Dataset(fn) as nc:
             eplist.append(nc.variables['ocean_time'][:] + baseep)
     return np.squeeze(np.array(pl.epoch2num(eplist)))
Пример #36
0
    def globcumcum(self):
        """Create global cucum curves from MODIS 4km data"""
        ns = nasa.MODIS(res='4km')
        jd1 = pl.datestr2num('2003-01-01')
        jd2 = pl.datestr2num('2012-12-31')

        self.northmat = np.zeros((100, jd2 - jd1))
        self.globmat = np.zeros((100, jd2 - jd1))
        self.southmat = np.zeros((100, jd2 - jd1))

        for n, jd in enumerate(np.arange(jd1, jd2)):
            ns.load('chl', jd=jd)
            _, y = self.cumcum(an(ns.chl))
            self.globmat[:, n] = y
            _, y = self.cumcum(an(ns.chl[:ns.jmt / 2, :]))
            self.northmat[:, n] = y
            _, y = self.cumcum(an(ns.chl[ns.jmt / 2:, :]))
            self.southmat[:, n] = y
            print jd2 - jd
Пример #37
0
    def globcumcum(self):
        """Create global cucum curves from MODIS 4km data"""
        ns = nasa.MODIS(res='4km')
        jd1 = pl.datestr2num('2003-01-01')
        jd2 = pl.datestr2num('2012-12-31')

        self.northmat = np.zeros((100,jd2-jd1)) 
        self.globmat = np.zeros((100,jd2-jd1))
        self.southmat = np.zeros((100,jd2-jd1)) 

        for n,jd in enumerate(np.arange(jd1,jd2)):
            ns.load('chl',jd=jd)
            _,y = self.cumcum(an(ns.chl))
            self.globmat[:,n] = y
            _,y = self.cumcum(an(ns.chl[:ns.jmt/2,:]))
            self.northmat[:,n] = y
            _,y = self.cumcum(an(ns.chl[ns.jmt/2:,:]))
            self.southmat[:,n] = y
            print jd2-jd
Пример #38
0
def pick_a_pickle(datestr, **kwargs):
	path=kwargs.get('path', '/bm/gkeep/scollis/deal_ber/')
	target_date=num2date(datestr2num(datestr))
	target_cpol='C-POL_deal_'+std_datestr(target_date)+'.pickle.gz'
	target_ber='Berrimah_deal_'+std_datestr(target_date)+'.pickle.gz'
	
	if not(target_cpol in os.listdir(path)):
		raise IOError, 'No time found for Cpol, need '+ target_cpol
	if not(target_ber in os.listdir(path)):
		raise IOError, 'No time found for Berrimah, need '+ target_ber
	pickle_to_cappi(target_cpol, target_ber, debug=True)
Пример #39
0
 def timeseries(self,fld,i1,j1,i2=None,j2=None):
     """Generate a time series of a field"""
     if not i2: i2 = i1+1
     if not j2: j2 = j1+1
     self.jdvec = []
     tserie = []
     for yr in np.arange(self.y1,self.y2+1):
         for mn in np.arange(self.m1,self.m2+1):
             self.load(fld,yr=yr,mn=mn)
             tserie.append(self.fld[i1:i2,j1:j2].mean())
             self.jdvec.append(pl.datestr2num('%04i%02i01' % (yr,mn)))
     return  np.array(self.jdvec), np.array(tserie)
Пример #40
0
    def _generate_datadir(self, kwargs):
        """Generate datadir string based on info from nlist"""
        if not hasattr(self, 'datadir'):                
            if len(self.nlrun.outdatadir) > 0:
                self.datadir = self.nlrun.outdatadir
            elif os.getenv("TRMOUTDATADIR") is not None:
                self.datadir = os.path.join(os.getenv("TRMOUTDATADIR"),
                                            self.projname)
            else:
                self.datadir = ""
            if not os.path.isabs(self.datadir):
                self.datadir = os.path.join(self.trmdir, self.datadir)

        if getattr(self.nlrun, "outdircase", True):
            self.datadir = os.path.join(self.datadir, self.casename)

        if getattr(self.nlrun, "outdirdate", True):
            if type(kwargs.get('startdate')) is float:
                self.jdstart = kwargs['startdate']
            elif type(kwargs.get('startdate')) is int:
                self.jdstart = kwargs['startdate'] + 0.5
            elif type(kwargs.get('startdate')) is str:
                self.jdstart = pl.datestr2num(kwargs['startdate'])
            else:
                self.jdstart = pl.datestr2num("%04i%02i%02i-%02i%02i" % (
                    self.nlrun.startyear, self.nlrun.startmon,
                    self.nlrun.startday,  self.nlrun.starthour,
                    self.nlrun.startmin))
            self.outdirdatestr = pl.num2date(self.jdstart).strftime("%Y%m%d-%H%M")
            self.datadir = os.path.join(self.datadir, self.outdirdatestr)
        else:
            self.outdirdatestr = ""
            
        if self.nlrun.outdatafile:
            self.datafilepref = os.path.join(self.datadir,
                                             self.nlrun.outdatafile)
        else:
            self.datafilepref = os.path.join(self.datadir, self.casename)
Пример #41
0
def do_tseries_plots ( chunk, af_date ):
    import datetime
    import pylab
    import numpy
    import pdb
    
    af_date = pylab.datestr2num ( datetime.datetime.strptime(af_date, \
                "A%Y%j").strftime("%Y-%m-%d") )
    dates_mcd43 = pylab.datestr2num([ \
        datetime.datetime.strptime(d, \
        "A%Y%j").strftime("%Y-%m-%d") \
        for d in chunk['dates'] ])
    pylab.subplot(211)
    rho_nir = chunk['Nadir_Reflectance_Band2']
    rho_qa = chunk ['BRDF_Albedo_Quality']
    rho_nir = numpy.where ( rho_qa <= 1, rho_nir, numpy.nan )
    rho_nir[rho_nir>1] = numpy.nan
    pdb.set_trace()
    [ pylab.plot_date ( dates_mcd43, \
        rho_nir[:, i], '-s', label="Pix %d" % (i+1) ) \
        for i in xrange(9) ]
    #pylab.axvline ( af_date )
    pylab.legend(loc='best')
    pylab.ylabel(r'NIR NBAR reflectance [-]')
    pylab.xlabel(r'Date' )
    pylab.grid(True)
    pylab.subplot(212)
    rho_swir = chunk['Nadir_Reflectance_Band5']
    rho_swir = numpy.where ( rho_qa <= 1, rho_swir, numpy.nan )
    rho_swir[rho_swir>1] = numpy.nan
    [ pylab.plot_date ( dates_mcd43, \
        rho_swir[:, i], '-s', label="Pix %d" % (i+1) ) \
        for i in xrange(9) ]
    pylab.legend(loc='best')
    #pylab.axvline ( af_date )
    pylab.ylabel(r'SWIR NBAR reflectance [-]')
    pylab.xlabel(r'Date' )
    pylab.grid(True)
Пример #42
0
def do_tseries_plots(chunk, af_date):
    import datetime
    import pylab
    import numpy
    import pdb

    af_date = pylab.datestr2num ( datetime.datetime.strptime(af_date, \
                "A%Y%j").strftime("%Y-%m-%d") )
    dates_mcd43 = pylab.datestr2num([ \
        datetime.datetime.strptime(d, \
        "A%Y%j").strftime("%Y-%m-%d") \
        for d in chunk['dates'] ])
    pylab.subplot(211)
    rho_nir = chunk['Nadir_Reflectance_Band2']
    rho_qa = chunk['BRDF_Albedo_Quality']
    rho_nir = numpy.where(rho_qa <= 1, rho_nir, numpy.nan)
    rho_nir[rho_nir > 1] = numpy.nan
    pdb.set_trace()
    [ pylab.plot_date ( dates_mcd43, \
        rho_nir[:, i], '-s', label="Pix %d" % (i+1) ) \
        for i in xrange(9) ]
    #pylab.axvline ( af_date )
    pylab.legend(loc='best')
    pylab.ylabel(r'NIR NBAR reflectance [-]')
    pylab.xlabel(r'Date')
    pylab.grid(True)
    pylab.subplot(212)
    rho_swir = chunk['Nadir_Reflectance_Band5']
    rho_swir = numpy.where(rho_qa <= 1, rho_swir, numpy.nan)
    rho_swir[rho_swir > 1] = numpy.nan
    [ pylab.plot_date ( dates_mcd43, \
        rho_swir[:, i], '-s', label="Pix %d" % (i+1) ) \
        for i in xrange(9) ]
    pylab.legend(loc='best')
    #pylab.axvline ( af_date )
    pylab.ylabel(r'SWIR NBAR reflectance [-]')
    pylab.xlabel(r'Date')
    pylab.grid(True)
Пример #43
0
def make_deal_sonde(date_str, **kwargs):
	#sonde_file=kwargs.get('sonde_file', '/bm/gdata/scollis/twpice/darwin.txt')
	outdir=kwargs.get('outdir', '/home/scollis/bom_mds/dealias/')
	tim_date=num2date(datestr2num(date_str))
	nl=kwargs.get('nl', 990)
	#sonde_list=read_sounding.read_sounding_within_a_day(sonde_file, tim_date)
	#launch_dates=[sonde['date_list'][0] for sonde in sonde_list]
	#launch_date_offset=[date2num(sonde['date_list'][0])- date2num(tim_date)  for sonde in sonde_list]
	#best_sonde=sonde_list[argsort(abs(array(launch_date_offset)))[0]]
	req=[ 'alt(m)',  'wspd(m/s)',  'wdir(degs)']
	first_sonde,second_sonde = get_two_best_conc_sondes(date_str, req_vars=req)
	interp_sonde=interp_sonde_time(first_sonde, second_sonde, tim_date, nl)
	#print 'Time of radar: ', tim_date, ' Time of sonde_launch: ', best_sonde['date_list'][0], ' Time of sonde_termination: ', best_sonde['date_list'][-1]
	#levels_onto=linspace(best_sonde['alt(m)'][0],  best_sonde['alt(m)'][-1], 900)
	#interp_sonde=read_sounding.interp_sounding(best_sonde, levels_onto)
	days=(int(date2num(tim_date))- datestr2num('01/01/'+date_str[0:4])) +1.0
	yr4="%(y)4d" %{'y':tim_date.year}
	date_dict={'ddd':days, 'HH':tim_date.hour, 'MM':tim_date.minute}
	post_name="%(ddd)03d%(HH)02d%(MM)02d_ti_sounding.txt" %date_dict
	dfname=yr4[2:4]+post_name
	fname=kwargs.get('fname', dfname)
	#days=(int(date2num(best_sonde['date_list'][0]))- datestr2num('01/01/'+date_str[0:4])) +1.0
	#yr4="%(y)4d" %{'y':best_sonde['date_list'][0].year}
	#date_dict={'ddd':days, 'HH':best_sonde['date_list'][0].hour, 'MM':best_sonde['date_list'][0].minute}
	#post_name="%(ddd)03d%(HH)02d%(MM)02d_sounding.txt" %date_dict
	#dfname=yr4[2:4]+post_name
	#fname=kwargs.get('fname', dfname)
	print "opening ", outdir+fname
	f=open(outdir+fname, 'w')
	f.write('H\n')
	for i in range(interp_sonde['alt(m)'].shape[0]):
		items=['alt(m)', 'press(hPa)', 'press(hPa)', 'wdir(degs)', 'wspd(m/s)', 'press(hPa)', 'press(hPa)', 'press(hPa)', 'press(hPa)']
		printdict=dict([(str(n+1),interp_sonde[items[n]][i]) for n in range(len(items))])
		prstr="%(1)f %(2)f %(3)f %(4)f %(5)f %(6)f %(7)f %(8)f %(9)f \n" %printdict
		f.write(prstr)
	f.write('here endeth the file \n')
	f.close()
	return fname
Пример #44
0
def timecube(region=None, lat1=None, lat2=None, lon1=None, lon2=None):

    if region == "BATS":
        lat1 = 30
        lat2 = 34
        lon1 = -66
        lon2 = -62
    if region == "experiment":
        lat1 = 43
        lat2 = 47
        lon1 = -26
        lon2 = -22
    ns = nasa.MODIS(lat1=lat1,lat2=lat2,lon1=lon1,lon2=lon2)
           
    jd1 = pl.datestr2num('2003-01-01')
    jd2 = pl.datestr2num('2013-05-31')
    ns.jdvec = np.arange(jd1,jd2+1)
    ns.timecube = np.zeros((len(ns.jdvec),) + ns.llat.shape)

    for n,jd in enumerate(ns.jdvec):
       ns.timecube[n,:,:] =  ns.get_field('chl',jd=jd)
       print ns.jdvec.max() -jd
    return ns
Пример #45
0
    def setup_grid(self):
        """Setup necessary variables for grid """
        g = netcdf_file(self.gridfile, 'r')
        mat = np.genfromtxt(self.datadir + '/mass_grid.dat')
        self.llon = np.zeros((self.jmt, self.imt))
        self.llat = np.zeros((self.jmt, self.imt))
        for vec in mat:
            self.llon[vec[1] - 1, vec[0] - 1] = -vec[2]
            self.llat[vec[1] - 1, vec[0] - 1] = vec[3]
        self.depth = g.variables['depth'][:].copy()
        self.depth[self.depth < 0.01] = np.nan

        #self.Cs_r = g.variables['Cs_r'][:]
        dayvec = g.variables['time'][:].astype(np.float).copy()
        self.jdvec = np.array(pl.datestr2num('2000-12-31') + dayvec)
Пример #46
0
def get_best_sounding(target, sdir, minl, maxl):
    sondes = os.listdir(sdir)
    sondes.sort()
    offsets = [np.abs(datestr2num(s[18:33].replace('.', ' ')) -
                      date2num(target)) for s in sondes]
    cont = True
    n = 1
    while cont:
        test_sonde = sondes[offsets.index(nth_smallest(n, offsets))]
        ncf_obj = netCDF4.Dataset(sdir + test_sonde, 'r')
        ncf_min = ncf_obj.variables['alt'][:].min()
        ncf_max = ncf_obj.variables['alt'][:].max()
        if ncf_min < minl and ncf_max > maxl:
            cont = False
            chosen_sonde = test_sonde
        ncf_obj.close()
        n = n + 1
    return chosen_sonde
Пример #47
0
 def filedict(self, year):
     filename = os.path.join(self.datadir, "fdict_%04i.npz" % year)
     if os.path.isfile(filename):
         return np.load(filename)["fdict"].item()
     else:
         req = requests.get("%s/%04i/%s" %
                            (self.dataurl, year, self.flistpage))
         soup = bs4.BeautifulSoup(req.text, "html.parser")
         taglist = soup.find_all("a", itemprop="contentUrl", string="html")
         fdict = {}
         for tag in taglist:
             stamp = ".".join(tag.attrs['href'].split(".")[:2])
             datestr = stamp.split("_")[-1].split(".")[0]
             jd = int(
                 pl.datestr2num("%s-01-01" % datestr[:4]) +
                 int(datestr[4:]))
             fdict[jd] = stamp
         np.savez(filename, fdict=fdict)
         return fdict
Пример #48
0
    def drawCityAQI_avg(self):
        """
        绘制city级别的AQI曲线
        :return:
        """
        pl.title("AVGaqi/time   ")  # give plot a title
        pl.xlabel('time')  # make axis labels
        pl.ylabel('aqi')
        self.fill_cityAqiAVG_hour()

        # # 画出每个时间点
        # data = np.loadtxt(StringIO(self.cityAqiAVG_hour), dtype=np.dtype([("t", "S13"), ("v", float)]))
        # datestr = np.char.replace(data["t"], "T", " ")
        # t = pl.datestr2num(datestr)
        # t1 = dt.datestr2num(datestr)
        # k = pl.num2date(t)
        # v = data["v"]
        # pl.plot_date(t[-20:], v[-20:], fmt="-o")
        # # polyfit(t[-20:], v[-20:])

        # 画出每天00:00的图
        self.fill_cityAqiAVG_day()
        data = np.loadtxt(StringIO(self.cityAqiAVG_day),
                          dtype=np.dtype([("t", "S13"), ("v", float)]))
        datestr = np.char.replace(data["t"], "T", " ")
        t = pl.datestr2num(datestr)
        k = pl.num2date(t)
        k2 = dt.num2date(t)
        v = data["v"]
        pl.plot_date(t, v, fmt="-o")
        teet = polyfit(t, v)

        pl.subplots_adjust(bottom=0.3)
        # pl.legend(loc=4)#指定legend的位置,读者可以自己help它的用法
        ax = pl.gca()
        ax.fmt_xdata = pl.DateFormatter('%Y-%m-%d %H:%M:%S')
        pl.xticks(rotation=70)
        # pl.xticks(t, datestr) # 如果以数据点为刻度,则注释掉这一行
        ax.xaxis.set_major_formatter(pl.DateFormatter('%Y-%m-%d %H:%M'))
        # pl.xlim(('2016-03-09 00:00', '2016-03-12 00:00'))
        pl.grid()  # 有格子
        pl.show()  # show the plot on the screen
        print 'over'
Пример #49
0
		def moveIntoDataArrays( self, version, dataBlock, ad2cpInstrument ):
			if self.ensembleCounter == 0:
				self.moveHeader( ad2cpInstrument )
			for datasetNumber in ad2cpInstrument[ 'datasetDescription' ]:
				if self.configuration.includesVelocity:
					ad2cpInstrument[ 'velocity' ][ 'data' ][ :, self.ensembleCounter, datasetNumber ] \
						= dataBlock.velocity[ datasetNumber ][ : ]
					ad2cpInstrument[ 'velocity' ][ 'data' ][ :, self.ensembleCounter, datasetNumber ] *= 1000 * 10**self.velocityScaling
				if self.configuration.includesAmplitude:
					ad2cpInstrument[ 'amplitude' ][ 'data' ][ :, self.ensembleCounter, datasetNumber ] \
						= dataBlock.amplitude[ datasetNumber ][ : ]
				if self.configuration.includesCorrelation:
					ad2cpInstrument[ 'correlation' ][ 'data' ][ :, self.ensembleCounter, datasetNumber ] \
						= dataBlock.correlation[ datasetNumber ][ : ]
			ad2cpInstrument[ 'ensemble' ][ self.ensembleCounter, ] = self.ensembleCounter
			ensembleDateStr = datetime.datetime( self.year + 1900, 
												 self.month + 1, 
												 self.day, 
												 self.hour, 
												 self.minute, 
												 self.seconds, 
												 self.microseconds )
			ad2cpInstrument[ 'time' ][ self.ensembleCounter, ] = pylab.datestr2num( 
				'{:%Y-%m-%d %H:%M:%S}'.format( ensembleDateStr ) )
			ad2cpInstrument[ 'temperature' ][ self.ensembleCounter, ] = self.temperature
			ad2cpInstrument[ 'battery' ][ self.ensembleCounter, ] = self.battery
			ad2cpInstrument[ 'state' ][ 'heading' ][ 'data' ][ 0, self.ensembleCounter ] = self.heading
			ad2cpInstrument[ 'state' ][ 'pitch' ][ 'data' ][ 0, self.ensembleCounter ] = self.pitch
			ad2cpInstrument[ 'state' ][ 'roll' ][ 'data' ][ 0, self.ensembleCounter ] = self.roll
			if version is 2 or version is 3:
				ad2cpInstrument[ 'state' ][ 'magnetometer' ][ 'data' ][ :, self.ensembleCounter ] = [ 
					self.magentometerRawData_x,
					self.magentometerRawData_y,
					self.magentometerRawData_z ]
				ad2cpInstrument[ 'state' ][ 'accelorometer' ][ 'data' ][ :, self.ensembleCounter ] = [ 
					self.accelorometerRawData_x,
					self.accelorometerRawData_y,
					self.accelorometerRawData_z ]
			self.incrementCounters()
Пример #50
0
def read_licor(lines):
    """
    Reads a block of licor data from waveglider raw files.
    This contains the following variables:
        temperature [C]             -   temp_degC
        licor pressure [kPa]        -   licor_pres
        xCO2 [um/mm]                -   xco2_ummm
        oxygen saturation [%]       -   o2_sat
        relative humidity [%]       -   rel_hum

    created: 2014-01-08
    author:  Luke Gregor
    """

    dat = {}

    while not lines[0]:
        lines.pop(0)
    glider = np.array(lines[0].split())  # WAVEGLIDER INFO
    gps_data = lines[1].split()  # GPS DATA
    diagnost = np.array(lines[2].split())[:4]  # DIAGNOSTICS
    co2calc = np.array(lines[-1].split())[[1,
                                           3]].astype(float)  # CO2 CALCULATION
    co2calc = np.array(co2calc, ndmin=2).repeat(10, axis=0)

    try:
        cycles = np.array(map(str.split, lines[3:-1])).astype(float)
    except:
        # Items are not space seperated, but rather index based. IDX = new item idx
        cycles = np.ndarray([10, 17])
        IDX = [
            0, 3, 9, 15, 22, 28, 36, 42, 48, 52, 55, 59, 64, 69, 78, 84, 93, 99
        ]
        for i, line in enumerate(lines[3:-1]):
            for j in range(len(IDX) - 1):
                cycles[i, j] = line[IDX[j]:IDX[j + 1]].strip()

    # SORTING OUT TIMES (1440 = mins in a day)
    dat['wg_datenum'] = plt.datestr2num(' '.join(glider[3:5]))
    dat['wg_datenum'] = dat['wg_datenum'] + (cycles[:, 0] -
                                             cycles[0, 0]) / 1440
    # the following two lines deal with sampling that crosses the hour mark
    idx = dat['wg_datenum'] < dat['wg_datenum'][0]
    dat['wg_datenum'][idx] = dat['wg_datenum'][idx] + (1. / 24)
    dat['wg_datetime'] = plt.num2date(dat['wg_datenum'])

    # LATITUDES AND LONGITUDES
    dat['wg_latitude'] = convert_coords(gps_data[2], gps_data[3])
    dat['wg_longitude'] = convert_coords(gps_data[4], gps_data[5])

    # COMMANDS          PUMP ON          PUMP OFF         CALIBRATION
    dat['licor_cmnd'] = [
        'zero_pump_on', 'zero_pump_off', 'zero_post_cal', 'span_pump_on',
        'span_pump_off', 'span_post_cal', 'equil_pump_on', 'equil_pump_off',
        'air_pump_on', 'air_pump_off'
    ]
    # GLIDER INFO
    glider_hdr = [
        'licor_cycle', 'licor_x1', 'licor_x2', 'wg_date', 'wg_time',
        'wg_location', 'wg_unit'
    ]
    # DIAGNOSTIC DATA
    diagnost_hdr = [
        'wg_battery_logic',  # 0
        'wg_battery_transmitter',  # 1
        'licor_zero_coeff',  # 2
        'licor_span_coeff'
    ]  # 3
    # CYCLES       VARIABLE           STANDARD DEVIATION
    cycles_hdr = [
        'licor_minutes',  # min
        'licor_temp',
        'licor_tempstd',  # deg C
        'licor_pres',
        'licor_pressstd',  # kPa
        'licor_xco2',
        'licor_xco2std',  # ppm
        'licor_o2sat',
        'licor_o2std',  # %
        'licor_RH',
        'licor_RHstd',  # %
        'licor_RH_temp',
        'licor_RH_tempstd',  # deg C
        'licor_raw1',
        'licor_raw1std',
        'licor_raw2',
        'licor_raw2std'
    ]
    # CO2_MEAS      ATMOSPHERIC             SEAWATER
    co2calc_hdr = ['licor_sw_xco2_dry', 'licor_atm_xco2_dry']

    # Turn the arrays into a dictionary
    dat.update(zip(glider_hdr, glider.T))
    dat.update(zip(diagnost_hdr, diagnost.T))
    dat.update(zip(cycles_hdr, cycles.T))
    dat.update(zip(co2calc_hdr, co2calc.T))

    # some boring number formatting
    dat['licor_zero_coeff'] = float(dat['licor_zero_coeff'])
    dat['licor_span_coeff'] = float(dat['licor_span_coeff'])

    # Create a pandas.DataFrame from the dictionary
    # This fills each key to have as many items as the index = 10
    dat = DataFrame(dat, index=range(10))

    return dat.to_dict(outtype='list')
Пример #51
0
	# making the database smaller, inplace, by deleting selected variables
	c.delvar('a','x2')
	varnm, data = c.get()
	print "\nTwo variable names deleted\n", varnm
	print "\nTwo columns deleted\n", data

	# making the database smaller, inplace, by keeping only selected variables
	c = copy.deepcopy(b)
	c.keepvar('a','x2')
	varnm, data = c.get()
	print "\nAll but two variable names deleted\n", varnm
	print "\nAll but Two columns deleted\n", data

	# specifying a selection rule
	sel_rule = b.data['date'] > pylab.datestr2num("8/1/2001")

	# making the database smaller, inplace, by delecting selected observation
	c = copy.deepcopy(b)
	c.delobs(sel_rule)

	varnm, data = c.get()
	print "\nReduced number of observations following the selection rule\n", data

	# making the database smaller, inplace, by delecting all but the selected observation
	c = copy.deepcopy(b)
	c.keepobs(sel_rule)

	varnm, data = c.get()
	print "\nReduced number of observations following the inverse of the selection rule\n", data
Пример #52
0
2012-04-04_11 69

2012-04-04_14 71

2012-04-04_20 69

2012-04-07_02 75

"""

data = np.loadtxt(StringIO(data_str),
                  dtype=np.dtype([("t", "S13"), ("v", float)]))

datestr = np.char.replace(data["t"], "_", " ")

t = pl.datestr2num(datestr)

v = data["v"]

pl.plot_date(t, v, fmt="-o")

pl.subplots_adjust(bottom=0.3)

ax = pl.gca()
ax.fmt_xdata = pl.DateFormatter('%Y-%m-%d %H:%M:%S')

pl.xticks(rotation=90)
# pl.xticks(t, datestr) # 如果以数据点为刻度,则注释掉这一行

ax.xaxis.set_major_formatter(pl.DateFormatter('%Y-%m-%d %H:%M'))
Пример #53
0
def do_tseries_plots(chunk, af_date, num_pixels=9):
    """
    Do plots of NBAR reflectance before and after the event.
    """
    import datetime
    import numpy
    import pdb
    import pylab
    af_date = pylab.datestr2num ( datetime.datetime.strptime(af_date, \
                "A%Y%j").strftime("%Y-%m-%d") )
    dates_mcd43 = pylab.datestr2num([ \
        datetime.datetime.strptime(d, \
        "A%Y%j").strftime("%Y-%m-%d") \
        for d in chunk['dates'] ])

    passer = chunk['BRDF_Albedo_Quality']
    rho = numpy.zeros ( (7, chunk['Nadir_Reflectance_Band1'].shape[0], \
            chunk['Nadir_Reflectance_Band1'].shape[1]))
    rho[0, : :] = numpy.where ( numpy.logical_and (passer<=1, \
        chunk['Nadir_Reflectance_Band1']<1.), \
        chunk['Nadir_Reflectance_Band1'], numpy.nan )

    rho[1, : :] = numpy.where ( numpy.logical_and (passer<=1, \
        chunk['Nadir_Reflectance_Band2']<1.), \
        chunk['Nadir_Reflectance_Band2'], numpy.nan )

    rho[2, : :] = numpy.where ( numpy.logical_and (passer<=1, \
        chunk['Nadir_Reflectance_Band3']<1.), \
        chunk['Nadir_Reflectance_Band3'], numpy.nan )

    rho[3, : :] = numpy.where ( numpy.logical_and (passer<=1, \
        chunk['Nadir_Reflectance_Band4']<1.), \
        chunk['Nadir_Reflectance_Band4'], numpy.nan )

    rho[4, : :] = numpy.where ( numpy.logical_and (passer<=1, \
        chunk['Nadir_Reflectance_Band5']<1.), \
        chunk['Nadir_Reflectance_Band5'], numpy.nan )

    rho[5, : :] = numpy.where ( numpy.logical_and (passer<=1, \
        chunk['Nadir_Reflectance_Band6']<1.), \
        chunk['Nadir_Reflectance_Band6'], numpy.nan )

    rho[6, : :] = numpy.where ( numpy.logical_and (passer<=1, \
        chunk['Nadir_Reflectance_Band7']<1.), \
        chunk['Nadir_Reflectance_Band7'], numpy.nan )

    wavelengths = numpy.array([645., 858.5, 469., 555., \
                    1240., 1640., 2130.])

    tdiff = (af_date - dates_mcd43)
    post_sel = (af_date - dates_mcd43) <= 0
    pre_sel = (af_date - dates_mcd43) > 0
    rho_pre = numpy.zeros((num_pixels, 7))
    rho_post = numpy.zeros((num_pixels, 7))
    fcc_arr = numpy.zeros(num_pixels)
    a0_arr = numpy.zeros(num_pixels)
    a1_arr = numpy.zeros(num_pixels)
    a2_arr = numpy.zeros(num_pixels)
    pre_tsample = -1
    post_tsample = -1
    for pxl in xrange(num_pixels):
        for band in xrange(0, 7):
            for tsample in xrange(pre_sel.shape[0]):
                tdiff[tsample]
                if  pre_sel[tsample] and \
                    (numpy.isfinite ( rho[ band, tsample, pxl ] )):
                    rho_pre[pxl, band] = rho[band, tsample, pxl]
                    pre_tsample = tsample

            #pdb.set_trace()
            for tsample in xrange(post_sel.shape[0] - 1, -1, -1):
                #pdb.set_trace()
                if (post_sel[tsample]) and \
                            numpy.isfinite ( rho[ band, tsample, pxl ] ):
                    rho_post[pxl, band] = rho[band, tsample, pxl]
                    post_tsample = tsample


        ( fcc, a0, a1, a2, sBurn, sFWD, rmse,fccUnc, a0Unc, a1Unc, a2Unc ) = \
            fire_inverter.InvertSpectralMixtureModel (  rho_pre[pxl,:], \
            rho_post[pxl,:], wavelengths )
        fcc_arr[pxl] = fcc
        a0_arr[pxl] = a0
        a1_arr[pxl] = a1
        a2_arr[pxl] = a2
    # Select a single pixel based on some heuristics
    winner = -1
    fcc_max = 0.
    for pxl in xrange(num_pixels):

        if (fcc_arr[pxl] > fcc_max):
            fcc_max = fcc_arr[pxl]
            winner = pxl
    if winner == -1:
        return None  # No suitable fires returned
    else:
        store_file = save_inversion ( fcc_arr[pxl], a0_arr[pxl], a1_arr[pxl], \
            a2_arr[pxl], \
            rho_pre[pxl, :], rho_post[pxl, :], wavelengths )
        save_chunk (  fcc_arr[pxl], a0_arr[pxl], a1_arr[pxl], \
            a2_arr[pxl], rho, wavelengths, \
            pxl, af_date, dates_mcd43, pre_tsample, post_tsample )
        return ( fcc_arr[pxl], a0_arr[pxl], a1_arr[pxl], a2_arr[pxl], \
                store_file )
def main():
    plt.close('all')
    if len(sys.argv) > 1:
        if len(sys.argv) == 2:
            # a single argument was provides as option
            if sys.argv[1] == 'init':
                # copy INI files and a template configuration file
                # to current directory
                create_dummy_configuration()
                sys.exit()
            else:
                file = sys.argv[1]  # name of config file
                if not os.path.exists(file):
                    raise ValueError('Configuration file can not be \
                                      found: %s' % file)
        else:
            raise ValueError('Currently not more than one command \
                               line parameter supported!')
    else:  # default
        print('*******************************************')
        print('* WELCOME to pycmbs.py                    *')
        print('* Happy benchmarking ...                  *')
        print('*******************************************')
        print ''
        print 'please specify a configuration filename as argument'
        sys.exit()

    ####################################################################
    # CONFIGURATION and OPTIONS
    ####################################################################

    # read configuration file
    CF = config.ConfigFile(file)

    # read plotting options
    PCFG = config.PlotOptions()
    PCFG.read(CF)
    plot_options = PCFG

    ####################################################################
    # REMOVE previous Data warnings
    ####################################################################
    outdir = CF.options['outputdir']
    if outdir[-1] != os.sep:
        outdir += os.sep

    os.environ['PYCMBS_OUTPUTDIR'] = outdir
    os.environ['PYCMBS_OUTPUTFORMAT'] = CF.options['report_format']

    os.environ['DATA_WARNING_FILE'] = outdir + 'data_warnings_' \
        + CF.options['report'] + '.log'
    if os.path.exists(os.environ['DATA_WARNING_FILE']):
        os.remove(os.environ['DATA_WARNING_FILE'])

    for thevar in plot_options.options.keys():
        if thevar in plot_options.options.keys():
            print('Variable: %s' % thevar)
            for k in plot_options.options[thevar].keys():
                print('    Observation: %s' % k)

    if CF.options['basemap']:
        f_fast = False
    else:
        f_fast = True
    shift_lon = use_basemap = not f_fast

    ########################################################################
    # TIMES
    ########################################################################
    s_start_time = CF.start_date
    s_stop_time = CF.stop_date
    start_time = pylab.num2date(pylab.datestr2num(s_start_time))
    stop_time = pylab.num2date(pylab.datestr2num(s_stop_time))

    ########################################################################
    # INIT METHODS
    ########################################################################
    # names of analysis scripts for all variables ---
    scripts = CF.get_analysis_scripts()

    # get dictionary with methods how to read data for model variables to be
    # analyzed
    variables = CF.variables
    varmethods = CF.get_methods4variables(CF.variables)

    # READ DATA
    # create a Model instance for each model specified
    # in the configuration file
    #
    # read the data for all variables and return a list
    # of Data objects for further processing

    model_cnt = 1
    proc_models = []

    for i in range(len(CF.models)):
        # assign model information from configuration
        data_dir = CF.dirs[i]
        model = CF.models[i]
        experiment = CF.experiments[i]

        # create model object and read data
        # results are stored in individual variables namex modelXXXXX
        if CF.dtypes[i].upper() == 'CMIP5':
            themodel = CMIP5Data(data_dir,
                                 model,
                                 experiment,
                                 varmethods,
                                 intervals=CF.intervals,
                                 lat_name='lat',
                                 lon_name='lon',
                                 label=model,
                                 start_time=start_time,
                                 stop_time=stop_time,
                                 shift_lon=shift_lon)
        elif CF.dtypes[i].upper() == 'CMIP5RAW':
            themodel = CMIP5RAWData(data_dir,
                                    model,
                                    experiment,
                                    varmethods,
                                    intervals=CF.intervals,
                                    lat_name='lat',
                                    lon_name='lon',
                                    label=model,
                                    start_time=start_time,
                                    stop_time=stop_time,
                                    shift_lon=shift_lon)
        elif 'CMIP5RAWSINGLE' in CF.dtypes[i].upper():
            themodel = CMIP5RAW_SINGLE(data_dir,
                                       model,
                                       experiment,
                                       varmethods,
                                       intervals=CF.intervals,
                                       lat_name='lat',
                                       lon_name='lon',
                                       label=model,
                                       start_time=start_time,
                                       stop_time=stop_time,
                                       shift_lon=shift_lon)

        elif CF.dtypes[i].upper() == 'JSBACH_BOT':
            themodel = JSBACH_BOT(data_dir,
                                  varmethods,
                                  experiment,
                                  intervals=CF.intervals,
                                  start_time=start_time,
                                  stop_time=stop_time,
                                  name=model,
                                  shift_lon=shift_lon)
        elif CF.dtypes[i].upper() == 'JSBACH_RAW':
            themodel = JSBACH_RAW(data_dir,
                                  varmethods,
                                  experiment,
                                  intervals=CF.intervals,
                                  name=model,
                                  shift_lon=shift_lon,
                                  start_time=start_time,
                                  stop_time=stop_time)
        elif CF.dtypes[i].upper() == 'JSBACH_RAW2':
            themodel = JSBACH_RAW2(data_dir,
                                   varmethods,
                                   experiment,
                                   intervals=CF.intervals,
                                   start_time=start_time,
                                   stop_time=stop_time,
                                   name=model,
                                   shift_lon=shift_lon)  # ,
            # model_dict=model_dict)
        elif CF.dtypes[i].upper() == 'JSBACH_SPECIAL':
            themodel = JSBACH_SPECIAL(data_dir,
                                      varmethods,
                                      experiment,
                                      intervals=CF.intervals,
                                      start_time=start_time,
                                      stop_time=stop_time,
                                      name=model,
                                      shift_lon=shift_lon)  # ,
            # model_dict=model_dict)
        elif CF.dtypes[i].upper() == 'CMIP3':
            themodel = CMIP3Data(data_dir,
                                 model,
                                 experiment,
                                 varmethods,
                                 intervals=CF.intervals,
                                 lat_name='lat',
                                 lon_name='lon',
                                 label=model,
                                 start_time=start_time,
                                 stop_time=stop_time,
                                 shift_lon=shift_lon)
        else:
            raise ValueError('Invalid model type: %s' % CF.dtypes[i])

        # read data for current model

        # options that specify regrid options etc.
        themodel._global_configuration = CF
        themodel.plot_options = plot_options
        themodel.get_data()

        # copy current model to a variable named modelXXXX
        cmd = 'model' + str(model_cnt).zfill(4) + ' = ' \
            + 'themodel.copy(); del themodel'
        exec(cmd)  # store copy of cmip5 model in separate variable

        # append model to list of models ---
        proc_models.append('model' + str(model_cnt).zfill(4))
        model_cnt += 1

    ########################################################################
    # MULTIMODEL MEAN
    # here we have now all the model and variables read.
    # The list of all models is contained in the variable proc_models.
    f_mean_model = True
    if f_mean_model:
        # calculate climatological mean values: The models contain already
        # climatological information in the variables[] list. Thus there is
        # not need to take care for the different timesteps here. This
        # should have been handled already in the preprocessing.
        # generate instance of MeanModel to store result
        MEANMODEL = MeanModel(varmethods, intervals=CF.intervals)

        # sum up all models
        for i in range(len(proc_models)):
            exec('actmodel = ' + proc_models[i] + '.copy()')
            MEANMODEL.add_member(actmodel)
            del actmodel

        # calculate ensemble mean
        MEANMODEL.ensmean()

        # save mean model to file
        # include filename of configuration file
        MEANMODEL.save(get_temporary_directory(),
                       prefix='MEANMODEL_' + file[:-4])

        # add mean model to general list of models to process in analysis
        proc_models.append('MEANMODEL')

    ########################################################################
    # END MULTIMODEL MEAN
    ########################################################################

    ########################################################################
    # INIT reporting and plotting and diagnostics
    ########################################################################
    # Gleckler Plot
    global_gleckler = GlecklerPlot()

    # Report
    rep = Report(CF.options['report'],
                 'pyCMBS report - ' + CF.options['report'],
                 CF.options['author'],
                 outdir=outdir,
                 dpi=300,
                 format=CF.options['report_format'])
    cmd = 'cp ' + os.environ['PYCMBSPATH'] + os.sep + \
        'logo' + os.sep + 'Phytonlogo5.pdf ' + rep.outdir
    os.system(cmd)

    ########################################################################
    ########################################################################
    ########################################################################
    # MAIN ANALYSIS LOOP: perform analysis for each model and variable
    ########################################################################
    ########################################################################
    ########################################################################
    skeys = scripts.keys()
    for variable in variables:

        # register current variable in Gleckler Plot
        global_gleckler.add_variable(variable)

        # call analysis scripts for each variable
        for k in range(len(skeys)):
            if variable == skeys[k]:

                print 'Doing analysis for variable ... ', variable
                print '   ... ', scripts[variable]
                # model list is reformatted so it can be evaluated properly
                model_list = str(proc_models).replace("'", "")
                cmd = 'analysis.' + scripts[variable] + '(' + model_list \
                    + ',GP=global_gleckler,shift_lon=shift_lon, \
                        use_basemap=use_basemap,report=rep,\
                        interval=CF.intervals[variable],\
                        plot_options=PCFG)'

                eval(cmd)

    ########################################################################
    # GLECKLER PLOT finalization ...
    ########################################################################
    # generate Gleckler analysis plot for all variables and models analyzed ///
    global_gleckler.plot(vmin=-0.1,
                         vmax=0.1,
                         nclasses=16,
                         show_value=False,
                         ticks=[-0.1, -0.05, 0., 0.05, 0.1])
    oname = outdir + 'gleckler.pkl'
    if os.path.exists(oname):
        os.remove(oname)
    pickle.dump(global_gleckler.models,
                open(outdir + 'gleckler_models.pkl', 'w'))
    pickle.dump(global_gleckler.variables,
                open(outdir + 'gleckler_variables.pkl', 'w'))
    pickle.dump(global_gleckler.data, open(outdir + 'gleckler_data.pkl', 'w'))
    pickle.dump(global_gleckler._raw_data,
                open(outdir + 'gleckler_rawdata.pkl', 'w'))

    rep.section('Summary error statistics')
    rep.subsection('Gleckler metric')
    rep.figure(global_gleckler.fig,
               caption='Gleckler et al. (2008) model performance index',
               width='10cm')
    global_gleckler.fig.savefig(outdir + 'portraet_diagram.png',
                                dpi=200,
                                bbox_inches='tight')
    global_gleckler.fig.savefig(outdir + 'portraet_diagram.pdf',
                                dpi=200,
                                bbox_inches='tight')

    plt.close(global_gleckler.fig.number)

    # generate dictionary with observation labels for each variable
    labels_dict = {}
    for variable in variables:
        if variable not in PCFG.options.keys():
            continue
        varoptions = PCFG.options[variable]
        thelabels = {}
        for k in varoptions.keys():  # keys of observational datasets
            if k == 'OPTIONS':
                continue
            else:
                # only add observation to legend,
                # if option in INI file is set
                if varoptions[k]['add_to_report']:
                    # generate dictionary for GlecklerPLot legend
                    thelabels.update(
                        {int(varoptions[k]['gleckler_position']): k})
        labels_dict.update({variable: thelabels})
        del thelabels

    # legend for gleckler plot ///
    lcnt = 1
    for variable in variables:
        if variable not in PCFG.options.keys():
            continue
        varoptions = PCFG.options[variable]
        thelabels = labels_dict[variable]
        fl = global_gleckler._draw_legend(thelabels, title=variable.upper())
        if fl is not None:
            rep.figure(fl, width='8cm', bbox_inches=None)
            fl.savefig(outdir + 'legend_portraet_' + str(lcnt).zfill(5) +
                       '.png',
                       bbox_inches='tight',
                       dpi=200)
            plt.close(fl.number)
        del fl
        lcnt += 1

    # plot model ranking between different observational datasets ///
    rep.subsection('Model ranking consistency')
    for v in global_gleckler.variables:
        rep.subsubsection(v.upper())
        tmpfig = global_gleckler.plot_model_ranking(v,
                                                    show_text=True,
                                                    obslabels=labels_dict[v])
        if tmpfig is not None:
            rep.figure(tmpfig,
                       width='8cm',
                       bbox_inches=None,
                       caption='Model RANKING for different observational \
                       datasets: ' + v.upper())
            plt.close(tmpfig.number)
        del tmpfig

        # write a table with model ranking
        tmp_filename = outdir + 'ranking_table_' + v + '.tex'
        rep.open_table()
        global_gleckler.write_ranking_table(v,
                                            tmp_filename,
                                            fmt='latex',
                                            obslabels=labels_dict[v])
        rep.input(tmp_filename)
        rep.close_table(caption='Model rankings for variable ' + v.upper())

        # plot absolute model error
        tmpfig = global_gleckler.plot_model_error(v, obslabels=labels_dict[v])
        if tmpfig is not None:
            rep.figure(tmpfig,
                       width='8cm',
                       bbox_inches=None,
                       caption='Model ERROR for different observational \
                       datasets: ' + v.upper())
            plt.close(tmpfig.number)
        del tmpfig

    ########################################################################
    # CLEAN up and finish
    ########################################################################
    plt.close('all')
    rep.close()

    print('##########################################')
    print('# BENCHMARKING FINIHSED!                 #')
    print('##########################################')
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import startMARMITESsurface as startMMsurf
import MARMITESunsat_v2 as MMunsat
import MARMITESprocess as MMproc
import ppMODFLOW_flopy as ppMF
import MARMITESplot as MMplot
import CreateColors
import StringIO
import h5py

#####################################

# workspace (ws) definition
timestart = pylab.datestr2num(pylab.datetime.datetime.today().isoformat())
print '\n##############\nMARMITES started!\n%s\n##############' % pylab.num2date(
    timestart).isoformat()[:19]

# read input file (called _input.ini in the MARMITES workspace
# the first character on the first line has to be the character used to comment
# the file can contain any comments as the user wish, but the sequence of the input has to be respected
MM_ws = r'E:\00code_ws\00_TESTS\MARMITESv2_r13c6l2_REF'
MM_fn = '_inputMM.ini'

inputFile = MMproc.readFile(MM_ws, MM_fn)

l = 0
try:
    # # ECHO ON/OFF (1 - interpreter verbose BUT NO report, 0 - NO interpreter verbose BUT report)
    verbose = int(inputFile[l].strip())