コード例 #1
0
ファイル: base.py プロジェクト: brorfred/njord
    def movie(self, fldname, jd1=None, jd2=None, jdvec=None, fps=10, **kwargs):
        curr_backend = plt.get_backend()
        plt.switch_backend('Agg')
        FFMpegWriter = animation.writers['ffmpeg']
        metadata = dict(title='%s' % (self.projname),
                        artist=self.projname,
                        comment='https://github.com/brorfred/njord')
        writer = FFMpegWriter(fps=fps, metadata=metadata,
            extra_args=['-vcodec', 'libx264',"-pix_fmt", "yuv420p"])

        jdvec = self.get_tvec(jd1, jd2) if jdvec is None else jdvec
        fig = plt.figure()
        with writer.saving(fig, "%s.mp4" % self.projname, 200):
            for jd in jdvec:
                pl.clf()
                print(pl.num2date(jd).strftime("%Y-%m-%d %H:%M load "), end="")
                sys.stdout.flush()
                try:
                    fld= self.get_field(fldname, jd=jd)
                except:
                    print("not downloaded" % jd)
                    continue
                print("plot ", end="")
                sys.stdout.flush()
                self.pcolor(fld, **kwargs)
                pl.title(pl.num2date(jd).strftime("%Y-%m-%d %H:%M"))
                print("write")
                writer.grab_frame()#bbox_inches="tight", pad_inches=0)
        plt.switch_backend(curr_backend)
コード例 #2
0
ファイル: sounding.py プロジェクト: vanandel/pyart
 def __init__(self, ob):
     # populate attributes with sounding data, initially this will
     # only work with a netcdf variable object (from Sceintific.IO)
     # but more objects can be added by simply adding elif..
     # PLEASE always populate height in the values['alt'] position and
     # append values['date_list'] and datetime
     # datetime and date_list[index] are datetime objects
     # check if it is a netcdf variable list
     if "getValue" in dir(ob[ob.keys()[0]]):
         # this is a netcdf variables object
         self.datetime = num2date(datestr2num("19700101") + ob["base_time"].getValue() / (24.0 * 60.0 * 60.0))
         values = {}
         units = {}
         longname = {}
         for var in ob.keys():
             values.update({var: ob[var][:]})
             try:
                 units.update({var: ob[var].units})
             except AttributeError:
                 units.update({var: "no units"})
             try:
                 longname.update({var: ob[var].long_name})
             except AttributeError:
                 longname.update({var: "no longname"})
             values.update(
                 {"date_list": num2date(date2num(self.datetime) + values["time_offset"] / (24.0 * 60.0 * 60.0))}
             )
             units.update({"date_list": "unitless (object)"})
             self.values = values
             self.units = units
             self.long_name = longname
コード例 #3
0
ファイル: trm.py プロジェクト: raphaeldussin/pytraj
    def __str__(self):
        """Print statistics about current instance"""
        alist = ['projname','casename', 'trmdir', 'datadir',
                 'njord_module', 'njord_class', 'imt', 'jmt']
        print ""
        print "="*79
        for a in alist:
            print a.rjust(15) + " : " + str(self.__dict__[a])
        if hasattr(self.nlrun, 'seedparts'):
            print "%s : %i" % ("seedparts".rjust(15),  self.nlrun.seedparts)
            print "%s : %i" % ("seedpart_id".rjust(15),self.nlrun.seedpart_id)

        for a in ['part','rank','arg1','arg2']:
            if self.__dict__[a] is not None:
                print "%s : %i" % (a.rjust(15),  self.__dict__[a])

        if hasattr(self,'x'):
            print ""
            print "%s : %s" % ("file".rjust(15), self.filename)
            print "%s : %s - %s" % (
                "time range".rjust(15),
                pl.num2date(self.jdvec.min()).strftime('%Y-%m-%d'),
                pl.num2date(self.jdvec.max()).strftime('%Y-%m-%d'))
            print "%s : %i" % ("timesteps".rjust(15), len(self.jdvec))
            print "%s : %i" % ("particles".rjust(15),
                               len(np.unique(self.ntrac)))
            print "%s : %i" % ("positions".rjust(15), len(self.ntrac))

        return ''
コード例 #4
0
ファイル: dbase.0.1.py プロジェクト: BKJackson/SciPy-CookBook
    def info(self, *var, **adict):
        """
		Printing descriptive statistics on selected variables
		"""

        # calling convenience functions to clean-up input parameters
        var, sel = self.__var_and_sel_clean(var, adict)
        dates, nobs = self.__dates_and_nobs_clean(var, sel)

        # setting the minimum and maximum dates to be used
        mindate = pylab.num2date(min(dates)).strftime("%d %b %Y")
        maxdate = pylab.num2date(max(dates)).strftime("%d %b %Y")

        # number of variables (excluding date if present)
        nvar = len(var)

        print "\n=============================================================="
        print "==================== Database information ===================="
        print "==============================================================\n"

        print "file:				%s" % self.DBname
        print "# obs:				%s" % nobs
        print "# variables:		%s" % nvar
        print "Start date:			%s" % mindate
        print "End date:			%s" % maxdate

        print "\nvar				min			max			mean		std.dev"
        print "=============================================================="

        for i in var:
            _min = self.data[i][sel].min()
            _max = self.data[i][sel].max()
            _mean = self.data[i][sel].mean()
            _std = self.data[i][sel].std()
            print """%-5s			%-5.2f		%-5.2f		%-5.2f		%-5.2f""" % tuple([i, _min, _max, _mean, _std])
コード例 #5
0
ファイル: oscar.py プロジェクト: raphaeldussin/njord
 def movie(self):
     import matplotlib as mpl
     mpl.rcParams['axes.labelcolor'] = 'white'
     pl.close(1)
     pl.figure(1,(8,4.5),facecolor='k')
     miv = np.ma.masked_invalid
     figpref.current()
     jd0 = pl.date2num(dtm(2005,1,1))
     jd1 = pl.date2num(dtm(2005,12,31))
     mp = projmaps.Projmap('glob')
     x,y = mp(self.llon,self.llat)
     for t in np.arange(jd0,jd1):
         print pl.num2date(t)
         self.load(t)
     
         pl.clf()
         pl.subplot(111,axisbg='k')
         mp.pcolormesh(x,y,
                       miv(np.sqrt(self.u**2 +self.v**2)),
                       cmap=cm.gist_heat)
         pl.clim(0,1.5)
         mp.nice()
         pl.title('%04i-%02i-%02i' % (pl.num2date(t).year,
                                      pl.num2date(t).month,
                                      pl.num2date(t).day),
                  color='w')
         pl.savefig('/Users/bror/oscar/norm/%03i.png' % t,
                    bbox_inches='tight',facecolor='k',dpi=150)
コード例 #6
0
ファイル: sounding.py プロジェクト: vanandel/pyart
    def interp_time_base(self, other_sounding, time_desired):
        # interpolates the current sounding and the other sounding according
        # to the time at launch
        # fist check that time_desired fits in both
        time_between_sondes = date2num(other_sounding.datetime) - date2num(self.datetime)
        second_wt = (date2num(time_desired) - date2num(self.datetime)) / time_between_sondes
        first_wt = (date2num(other_sounding.datetime) - date2num(time_desired)) / time_between_sondes
        if date2num(self.datetime) > date2num(time_desired) > date2num(other_sounding.datetime):
            order = "before"  # the desired time is before self
        elif date2num(self.datetime) < date2num(time_desired) < date2num(other_sounding.datetime):
            order = "after"  # the desired time is after self
        else:
            print "time desired is outside of range"
            return

        min_ht = array([self.alt.min(), other_sounding.alt.min()]).max()
        max_ht = array([self.alt.max(), other_sounding.alt.max()]).min()
        idx_min = where(abs(other_sounding.alt - min_ht) == abs(other_sounding.alt - min_ht).min())[0][0]
        idx_max = where(abs(other_sounding.alt - max_ht) == abs(other_sounding.alt - max_ht).min())[0][0]
        myhts = other_sounding.alt[idx_min:idx_max]
        self.interpolate_values(myhts)
        altshape = self.alt.shape

        for key in self.values.keys():
            if array(self.values[key]).shape == altshape and key != "date_list":
                self.values[key] = self.values[key] * first_wt + other_sounding.values[key][idx_min:idx_max] * second_wt
        self.values["date_list"] = num2date(
            date2num(self.values["date_list"]) * first_wt
            + date2num(other_sounding.values["date_list"][idx_min:idx_max]) * second_wt
        )
        self.datetime = num2date(date2num(self.datetime) * first_wt + date2num(other_sounding.datetime) * second_wt)
コード例 #7
0
ファイル: partsat.py プロジェクト: brorfred/pytraj
 def copy(jd):
     tr = traj('jplNOW','ftp','/Volumes/keronHD3/ormOut/')
     print pl.num2date(jd), jd
     tr.load(jd)
     tr.remove_satnans()
     if len(tr.x>0):
         tr.db_copy()
コード例 #8
0
ファイル: connect.py プロジェクト: TRACMASS/pytraj
    def multiplot(self,jd1=730120.0, djd=60, dt=20):

        if not hasattr(self,'disci'):
            self.generate_regdiscs()
            self.x = self.disci
            self.y = self.discj
        if not hasattr(self,'lon'):
            self.ijll()

        figpref.presentation()
        pl.close(1)
        pl.figure(1,(10,10))

        conmat = self[jd1-730120.0:jd1-730120.0+60, dt:dt+10]
        x,y = self.gcm.mp(self.lon, self.lat)
        self.gcm.mp.merid = []
        self.gcm.mp.paral = []

        pl.subplots_adjust(wspace=0,hspace=0,top=0.95)

        pl.subplot(2,2,1)
        pl.pcolormesh(miv(conmat),cmap=cm.hot)
        pl.clim(0,250)
        pl.plot([0,800],[0,800],'g',lw=2)
        pl.gca().set_aspect(1)
        pl.setp(pl.gca(),yticklabels=[])
        pl.setp(pl.gca(),xticklabels=[])
        pl.colorbar(aspect=40,orientation='horizontal',
                    pad=0,shrink=.8,fraction=0.05,ticks=[0,50,100,150,200])

        pl.subplot(2,2,2)
        colorvec = (np.nansum(conmat,axis=1)-np.nansum(conmat,axis=0))[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0,10000)

        
        pl.subplot(2,2,3)
        colorvec = np.nansum(conmat,axis=1)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0,10000)

        pl.subplot(2,2,4)
        colorvec = np.nansum(conmat,axis=0)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0,10000)

        mycolor.freecbar([0.2,.06,0.6,0.020],[2000,4000,6000,8000])

        pl.suptitle("Trajectories seeded from %s to %s, Duration: %i-%i days" %
                    (pl.num2date(jd1).strftime("%Y-%m-%d"),
                     pl.num2date(jd1+djd).strftime("%Y-%m-%d"), dt,dt+10))

        pl.savefig('multplot_%i_%03i.png' % (jd1,dt),transparent=True)
コード例 #9
0
ファイル: netcdf_utis.py プロジェクト: scollis/bom_mds
def load_cube(fname):  # weird edits added for changed behaviour
    ncf = NetCDFFile(fname, "r")
    print "Ok1"
    xar = ncf.variables["xar"][0]
    # print mxar.shape
    # xar=mxar[0]
    print "plew"
    yar = array(ncf.variables["yar"][0])
    print "Ok2"
    levs = array(ncf.variables["levs"][0])
    print "Ok3"
    parms = [
        "VE",
        "VR",
        "CZ",
        "RH",
        "PH",
        "ZD",
        "SW",
        "KD",
        "i_comp",
        "j_comp",
        "k_comp",
        "v_array",
        "u_array",
        "w_array",
    ]
    radar1_poss_parms = [par + "_radar1" for par in parms]
    radar2_poss_parms = [par + "_radar2" for par in parms]
    radar1_parms = set(radar1_poss_parms) & set(ncf.variables.keys())
    radar2_parms = set(radar2_poss_parms) & set(ncf.variables.keys())
    print "Doing radar1"
    radar1_dict = dict([(par[0:-7], array(ncf.variables[par].getValue())) for par in radar1_parms])
    radar1_dict.update(
        {
            "xar": xar,
            "yar": yar,
            "levs": levs,
            "radar_loc": ncf.variables["radar1_loc"][0],
            "displacement": ncf.variables["radar1_dis"][0],
            "date": num2date(ncf.variables["radar1_date"][0, 0]),
            "radar_name": getattr(ncf, "radar1_name"),
        }
    )
    print "doing radar2"
    radar2_dict = dict([(par[0:-7], array(ncf.variables[par].getValue())) for par in radar2_parms])
    radar2_dict.update(
        {
            "xar": xar,
            "yar": yar,
            "levs": levs,
            "radar_loc": ncf.variables["radar2_loc"][0],
            "displacement": ncf.variables["radar2_dis"][0],
            "date": num2date(ncf.variables["radar2_date"][0, 0]),
            "radar_name": getattr(ncf, "radar2_name"),
        }
    )
    ncf.close()
    return radar1_dict, radar2_dict
コード例 #10
0
ファイル: connect.py プロジェクト: tamu-pong/Py-TRACMASS
    def multiplot(self,jd1=730120.0, djd=60, dt=20):

        if not hasattr(self,'disci'):
            self.generate_regdiscs()
            self.x = self.disci
            self.y = self.discj
            self.ijll()

        figpref.manuscript()
        pl.close(1)
        pl.figure(1,(10,10))

        conmat = self[jd1-730120.0:jd1-730120.0+60, dt:dt+10]
        x,y = self.gcm.mp(self.lon, self.lat)
        self.gcm.mp.merid = []
        self.gcm.mp.paral = []

        pl.subplots_adjust(wspace=0,hspace=0,top=0.95)

        pl.subplot(2,2,1)
        pl.pcolormesh(miv(conmat))
        pl.clim(0,50)
        pl.plot([0,800],[0,800],'g',lw=2)
        pl.gca().set_aspect(1)
        pl.setp(pl.gca(),yticklabels=[])
        pl.setp(pl.gca(),xticklabels=[])

        pl.subplot(2,2,2)
        colorvec = (np.nansum(conmat,axis=1)-np.nansum(conmat,axis=0))[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0,10000)

        
        pl.subplot(2,2,3)
        colorvec = np.nansum(conmat,axis=1)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0,10000)

        pl.subplot(2,2,4)
        colorvec = np.nansum(conmat,axis=0)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0,10000)

        pl.suptitle("Trajectories seeded from %s to %s, Duration: %i-%i days" %
                    (pl.num2date(jd1).strftime("%Y-%m-%d"),
                     pl.num2date(jd1+djd).strftime("%Y-%m-%d"), dt,dt+10))

        pl.savefig('multplot_%i_%03i.png' % (jd1,dt))
コード例 #11
0
ファイル: oscar.py プロジェクト: raphaeldussin/njord
    def uvmat(self):
        hsmat = np.zeros ([20]+list(self.llat.shape)).astype(np.int16)
        jd1 = pl.date2num(dtm(2003,1,1))
        jd2 = pl.date2num(dtm(2009,12,31))

        vlist = np.linspace(0,1.5,21)
        for jd in np.arange(jd1,jd2+1):
            print pl.num2date(jd)
            self.load(jd=jd)
            uv = np.sqrt(self.u**2 + self.v**2)
            for n,(v1,v2) in enumerate(zip(vlist[:-1],vlist[1:])):
                msk = (uv>=v1) & (uv<v2)
                hsmat[n,msk] += 1
        return hsmat
コード例 #12
0
ファイル: plot.py プロジェクト: ivanovev/start
 def filesave(self, fname):
     f = open(fname, 'w')
     cmds = self.data['y']
     vv = [v for v in cmds.values()]
     xx = list(chain(*vv[0]['xx']))
     if self.mode == 'ft':
         xx0 = pylab.num2date(xx[0])
         xx1 = [(pylab.num2date(x) - xx0).total_seconds() for x in xx]
         xx = xx1
     yy = [list(chain(*v['yy'])) for v in vv]
     for i in range(0, len(xx)):
         yyi = [k[i] for k in yy]
         f.write(','.join(['%.5f' % j for j in [xx[i]] + yyi]) + '\n')
     f.close()
コード例 #13
0
ファイル: ecgplotter.py プロジェクト: hamalawy/telehealth
 def updateWindow(self, val):
     """ redraw the canvas based from the slider position """
     self.updatestarttime = self.time_scroller.val
     self.updatecurrenttime = date2num(num2date(self.updatestarttime) + datetime.timedelta(seconds=3))
     self.axes.set_xlim((self.updatestarttime, self.updatecurrenttime))
     self.axes.xaxis.set_ticklabels(
         self.createXTickLabels(num2date(self.updatecurrenttime)),
         rotation=30,
         ha="right",
         size="smaller",
         name="Calibri",
     )
     ### FIX ME: Is there a conflict here?
     self.canvas.draw()
     self.canvas.gui_repaint()
コード例 #14
0
ファイル: ecgplotter.py プロジェクト: hamalawy/telehealth
    def updateWindow(self, val):
        """ redraw the canvas based from the slider position """

        # get the current value of the slider based from its position
        # then set the time range of the plotter window based from this value
        self.updatestarttime = self.time_scroller.val
        self.updatecurrenttime = date2num(num2date(self.updatestarttime) + datetime.timedelta(seconds = 3))
        self.updatecurrenttime_tick = time.mktime(num2date(self.updatecurrenttime).timetuple())
        self.axes.set_xlim((self.updatestarttime, self.updatecurrenttime))
        # update the x-axis ticklabels depending on the current time of plotting
        self.axes.xaxis.set_ticklabels(self.createXTickLabels(self.updatecurrenttime_tick), rotation = 30, ha = "right", size = 'smaller', name = 'Calibri')

        # update the plotting window based from the slider position
        self.canvas.draw()
        self.canvas.gui_repaint()
コード例 #15
0
ファイル: gmail-plot.py プロジェクト: Diwahars/gmail-plot
def monthlyTimeSeries(headers):
    months = []
    for h in headers: 
        if len(h) > 1:
            timestamp = mktime(parsedate(h[1][5:].replace('.',':')))
            mailstamp = datetime.fromtimestamp(timestamp)
            # Time the email is arrived
            y = datetime(mailstamp.year,mailstamp.month,mailstamp.day, mailstamp.hour, mailstamp.minute, mailstamp.second)
            months.append(y)

    """ draw the histogram of the daily distribution """
    # converting dates to numbers
    numtime = [date2num(t) for t in months] 
    # plotting the histogram
    ax = figure(figsize=(18, 6), dpi=80).gca()
    _, _, patches = hist(numtime, bins=30,alpha=0.5)
    # adding the labels for the x axis
    tks = [num2date(p.get_x()) for p in patches] 
    xticks(tks,rotation=30)
    # formatting the dates on the x axis
    ax.xaxis.set_major_formatter(DateFormatter('%Y-%m'))
    ax.spines['right'].set_color('none')
    ax.spines['top'].set_color('none')
    ax.xaxis.set_ticks_position('bottom')
    ax.yaxis.set_ticks_position('left')
    ax.autoscale(tight=False) 
コード例 #16
0
ファイル: base.py プロジェクト: raphaeldussin/njord
    def timeseries(self, fieldname, jd1, jd2, mask=None):
        """Create a timeseries of fields using mask to select data"""
        mask = mask if mask is not None else self.llat == self.llat
        jd1 = pl.datestr2num(jd1) if type(jd1) is str else jd1
        jd2 = pl.datestr2num(jd2) if type(jd2) is str else jd2

        self.tvec = np.arange(jd1, jd2+1)
        field = np.zeros((len(self.tvec),) + self.llat.shape, dtype=np.float32)
        for n,jd in enumerate(self.tvec):
            print pl.num2date(jd), pl.num2date(jd2)
            try:
                field[n,:,:] = self.get_field(fieldname, jd=jd).astype(np.float32)
            except KeyError:
                field[n,:,:] = np.nan
            field[n, ~mask] = np.nan
        setattr(self, fieldname + 't', field)
コード例 #17
0
ファイル: read_sounding.py プロジェクト: scollis/bom_mds
def get_two_best_sondes(date_str, **kwargs):
	sonde_file=kwargs.get('sonde_file', '/data/twpice/darwin.txt')
	#outdir=kwargs.get('outdir', '/flurry/home/scollis/bom_mds/dealias/')
	sonde_file=kwargs.get('sonde_file', '/data/twpice/darwin.txt')
	outdir=kwargs.get('outdir', '/home/scollis/bom_mds/dealias/')
	tim_date=num2date(datestr2num(date_str))
	sonde_list=read_sounding_within_a_day(sonde_file, tim_date)
	launch_dates=[sonde['date_list'][0] for sonde in sonde_list]
	#print launch_dates
	launch_date_offset=[date2num(sonde['date_list'][0])- date2num(tim_date)  for sonde in sonde_list]
	sonde_made_it=False
	candidate=0
	while not(sonde_made_it):
		best_sonde=sonde_list[argsort(abs(array(launch_date_offset)))[candidate]]
		candidate=candidate+1
		sonde_made_it=best_sonde['alt(m)'][-1] > 18000.
		if not sonde_made_it: print "Sonde Burst at ", best_sonde['alt(m)'][-1], "m rejecting"
	print "Sonde Burst at ", best_sonde['alt(m)'][-1], "m Accepting"
	sonde_made_it=False
	while not(sonde_made_it):
		sec_best_sonde=sonde_list[argsort(abs(array(launch_date_offset)))[candidate]]
		candidate=candidate+1
		sonde_made_it=sec_best_sonde['alt(m)'][-1] > 18000.
		if not sonde_made_it: print "Sonde Burst at ", sec_best_sonde['alt(m)'][-1], "m rejecting"
	print "Sonde Burst at ", sec_best_sonde['alt(m)'][-1], "m Accepting"
	print 'Time of radar: ', tim_date, ' Time of  best sonde_launch: ', best_sonde['date_list'][0], ' Time of sonde_termination: ', best_sonde['date_list'][-1]
	print 'Time of radar: ', tim_date, ' Time of second sonde_launch: ', sec_best_sonde['date_list'][0], ' Time of sonde_termination: ', best_sonde['date_list'][-1]
	for i in range(len(sonde_list)):
		best_sonde=sonde_list[argsort(abs(array(launch_date_offset)))[i]]
		print 'Time of radar: ', tim_date, ' Time of  best sonde_launch: ', best_sonde['date_list'][0], ' Offset', abs(date2num(best_sonde['date_list'][0])-date2num(tim_date))*24.0
	return best_sonde, sec_best_sonde
コード例 #18
0
ファイル: nasa.py プロジェクト: brorfred/njord
 def _l3read_nc4(self):
     self.vprint( "Reading netCDF4 file")
     print(self.filename)
     nc = Dataset(self.filename)
     nc.set_auto_mask(False)
     nc.set_auto_scale(True)
     
     var         = nc.variables[nc.variables.keys()[0]]
     field       = var[self.j1:self.j2, self.i1:self.i2].copy()
     try:
         self.minval = var.valid_min
     except AttributeError:
         self.minval = var.display_min
     try:
         valid_max = var.valid_max
     except AttributeError:
         valid_max = 0
     try:
         display_max = var.display_max
     except AttributeError:
         display_max = 0
     self.maxval = max(valid_max, display_max)
         
     start_jd    = pl.datestr2num(nc.time_coverage_start)
     end_jd      = pl.datestr2num(nc.time_coverage_end)
     self.jd     = ((start_jd + end_jd)/2)
     self.date   = pl.num2date(self.jd)
     return field
コード例 #19
0
ファイル: traj.py プロジェクト: brorfred/pytraj
    def heatmap(self, maskvec=None, jd=None, cmap=None, alpha=1, colorbar=True,
                clf=True, map_region=None, log=False, kwargs={}):
        """Plot particle positions on a map
                 
                 mask: Boolean vector determining which particles to plot
                ntrac: Particle ID
                   jd: Julian date to plot, has to be included in jdvec.
                 cmap: color map to use
                alpha: transparancy of pcolormap
                  clf: Clear figure if True
             colorbar: Show colorbar if True (default)
           map_region: Use other map_regions than set by config file
               kwargs: Other arguments to pcolor (must be a dict).
        """
        if maskvec is None:
            maskvec = self.ntrac==self.ntrac
        if jd is not None:
            maskvec = maskvec & (self.jd==jd)

        if USE_FIGPREF:
            figpref.current()
        if clf:
            pl.clf()

        if cmap is not None:
            kwargs['cmap'] = cmap
        if log is True:
            kwargs['norm'] = LogNorm()
        if colorbar is True:
            kwargs['colorbar'] = True
        mat = self.heatmat(nan=True, maskvec=maskvec, jd=None)
        mat[mat==0] = np.nan
        self.gcm.pcolor(mat, **kwargs)
        if jd: pl.title(pl.num2date(jd).strftime("%Y-%m-%d %H:%M"))
コード例 #20
0
ファイル: avhrr.py プロジェクト: brorfred/njord
 def refresh(self, jd1=None, jd2=None):
     """ Read a L3 mapped file and add field to current instance"""
     jd1 = self.jdmin if jd1 is None else jd1
     jd2 = int(pl.date2num(dtm.now())) - 1  if jd2 is None else jd2
     for jd in np.arange(jd1, jd2+1):
         filename = os.path.join(self.datadir, self.generate_filename(jd))
         print " --- %s --- " % pl.num2date(jd).strftime('%Y-%m-%d')
         print "Checking %s" % filename + '.bz2'
         if not os.path.isfile(filename + '.bz2'):
             try:
                 self.load(jd=jd, verbose=True)
             except IOError:
                 print "Downloading failed. Trying to remove old files."
                 try:
                     os.remove(filename)
                 except:
                     pass
                 try:
                     os.remove(filename + ".bz2")
                 except:
                     pass
                 try:
                     self.load(jd=jd,verbose=True)
                 except:
                     print ("   ###   Warning! Failed to add %s   ###" %
                            os.path.basename(filename))
             print "\n"
         else:
             print "found"
コード例 #21
0
ファイル: nasa.py プロジェクト: brorfred/njord
 def refresh(self, fld, fldtype="DAY", jd1=None, jd2=None, delall=False):
     """ Read a L3 mapped file and add field to current instance"""
     jd1 = pl.datestr2num('2003-01-01') if jd1 is None else jd1
     jd2 = int(pl.date2num(dtm.now())) - 1  if jd2 is None else jd2
     for jd in np.arange(jd1, jd2):
         print " --- %s --- " % pl.num2date(jd).strftime('%Y-%m-%d')
         filename = os.path.join(
             self.datadir, self.generate_filename(jd,fld,fldtype) + ".nc")
         if delall:
             for fn in glob.glob(filename + "*"):
                 print "Deleted %s" % fn
                 os.remove(fn)
         print "Checking files"
         if not os.path.isfile(filename[:-3] + '.npz'):
             try:
                 self.load(fld, fldtype, jd=jd, verbose=True)
             except IOError:
                 print "Downloading failed. Trying to remove old files."
                 try:
                     os.remove(filename)
                 except:
                     pass
                 try:
                     self.load(fld,fldtype,jd=jd,verbose=True)
                 except:
                     print ("   ###   Warning! Failed to add %s   ###" %
                            os.path.basename(filename))
             print "\n"
         else:
             print "found"
コード例 #22
0
ファイル: ecgplotter.py プロジェクト: hamalawy/telehealth
    def initPlot(self):
        """ redraw the canvas to set the initial x and y axes when plotting starts """

        self.starttime = datetime.datetime.today()
        self.currenttime = self.starttime + datetime.timedelta(seconds=3)
        self.endtime = self.starttime + datetime.timedelta(seconds=15)
        self.timeaxis = num2date(drange(self.starttime, self.endtime, datetime.timedelta(milliseconds=10)))

        self.xvalues.append(self.timeaxis[0])
        self.yvalues.append(self.parentPanel.myECG.ecg_leadII[0])

        # for counter purposes only
        self.ybuffer = self.yvalues

        self.lines[0].set_data(self.xvalues, self.yvalues)

        self.axes.set_xlim((date2num(self.starttime), date2num(self.currenttime)))
        self.axes.xaxis.set_ticklabels(
            self.createXTickLabels(self.currenttime), rotation=30, ha="right", size="smaller", name="Calibri"
        )

        self.samples_counter += 1
        self.ysamples_counter += 1

        self.buff_counter = 1
コード例 #23
0
ファイル: model.py プロジェクト: nyuhuhuu/trachacks
    def get_daily_backlog_chart(self, backlog_history):

        numdates = backlog_history[0]
        backlog_stats = backlog_history[1]

        # create counted list.
        opened_tickets_dataset = [len(list) for list in backlog_stats['opened']]
        created_tickets_dataset = [len(list) for list in backlog_stats['created']]

        # need to add create and closed ticket for charting purpose. We want to show
        # closed tickets on top of opened ticket in bar chart.
        closed_tickets_dataset = []
        for i in range(len(created_tickets_dataset)):
            closed_tickets_dataset.append(created_tickets_dataset[i] + len(backlog_stats['closed'][i]))

        bmi_dataset = []
        for i in range(len(opened_tickets_dataset)):
            if opened_tickets_dataset[i] == 0:
                 bmi_dataset.append(0.0)
            else:
                bmi_dataset.append(float(closed_tickets_dataset[i]) * 100 / float(opened_tickets_dataset[i]))

#        for idx, numdate in enumerate(numdates):
#            self.env.log.info("%s: %s, %s, %s" % (num2date(numdate), 
#                                                    closed_tickets_dataset[idx],
#                                                    opened_tickets_dataset[idx],
#                                                    created_tickets_dataset[idx]))
        ds_daily_backlog = ''

        for idx, numdate in enumerate(numdates):
                    ds_daily_backlog = ds_daily_backlog + '{ date: "%s", opened: %d, closed: %d, created: %d}, ' \
                          % (format_date(num2date(numdate), tzinfo=utc), opened_tickets_dataset[idx], \
                             closed_tickets_dataset[idx], created_tickets_dataset[idx])

        return '[ ' + ds_daily_backlog + ' ];'
コード例 #24
0
ファイル: base.py プロジェクト: raphaeldussin/njord
 def _jd_to_dtm(self):
     dtobj = pl.num2date(self.jd)
     njattrlist = ['yr',  'mn',   'dy', 'hr',  'min',    'sec']
     dtattrlist = ['year','month','day','hour','minute', 'second']
     for njattr,dtattr in zip(njattrlist, dtattrlist):
         setattr(self, njattr, getattr(dtobj, dtattr))
     self.yd = self.jd - pl.date2num(dtm(self.yr,1,1)) + 1
コード例 #25
0
ファイル: trm.py プロジェクト: tamu-pong/Py-TRACMASS
    def scatter(self,ntrac=None,ints=None,k1=None,k2=None,c="g",clf=True):
        self.add_mp()
        mask = self.ntrac==self.ntrac
        if ints:
            mask = mask & (self.ints==ints)
        if ntrac:
            mask = mask & (self.ntrac==ntrac)

        if clf: pl.clf()
        self.ijll()
        x,y = self.mp(self.lon[mask],self.lat[mask])

        self.mp.pcolormesh(self.mpxll,self.mpyll,
                           np.ma.masked_equal(self.landmask,1),cmap=GrGr())
        xl,yl = self.mp(
            [self.llon[0,0], self.llon[0,-1], self.llon[-1,-1],
             self.llon[-1,0],self.llon[0,0]],
            [self.llat[0,0], self.llat[0,-1], self.llat[-1,-1],
             self.llat[-1,0], self.llat[0,0]]
             )
        self.mp.plot(xl,yl,'0.5')

        if ntrac: self.mp.plot(x,y,'-w',lw=0.5)
        self.mp.scatter(x,y,5,c)
        if ints:
            jd = self.jd[self.ints==ints][0]
            pl.title(pl.num2date(jd).strftime("%Y-%m-%d %H:%M"))
        print len(x)
コード例 #26
0
def plot_date_time_graph(db_name,table_name):  
	format='%d %b %Y %I:%M %p'
	conn = sqlite3.connect(os.getcwd()+'/data/'+db_name+'.db')
	c=conn.cursor()
	date_time_arr=[]
	tweet_count=[]
	for row in c.execute('SELECT date_posted,time_posted From '+table_name):
		date_string= ' '.join(row)
		date_time_arr.append(datetime.strptime(date_string, format))

	for row in c.execute('SELECT retweets From '+table_name):
		tweet_count.append(row[0]+1)
		y= np.array(tweet_count)
		x=np.array(date_time_arr)
		N=len(tweet_count)
		colors = np.random.rand(N)
	numtime = [date2num(t) for t in x] 
 	# plotting the histogram
	ax = figure().gca()
	x, y, patches = hist(numtime, bins=50,alpha=.5)
	print x,y
	# adding the labels for the x axis
	tks = [num2date(p.get_x()) for p in patches] 
	xticks(tks,rotation=40)
	# formatting the dates on the x axis
	ax.xaxis.set_major_formatter(DateFormatter('%d %b %H:%M'))
	ax.set_xlabel('Time(dd-mm HH:MM)', fontsize=16)
	ax.set_ylabel('Tweet Count', fontsize=16)
	show()
コード例 #27
0
ファイル: read_sounding.py プロジェクト: scollis/bom_mds
def parse_sounding_block(sounding_block):
	headers=sounding_block[0].split()[0:17]
	start_date_str=sounding_block[1].split()[0]+" "+sounding_block[1].split()[1]
	data_dict=dict([(headers[i],array([float_conv(sounding_block[j+1].split()[i]) for j in range(len(sounding_block)-1)])) for i in range(len(headers))])
	date_list=[num2date(datestr2num(sounding_block[i+1].split()[0]+" "+sounding_block[i+1].split()[1])) for i in range(len(sounding_block)-1)]
	data_dict.update({'date_list':array(date_list)})
	return data_dict
コード例 #28
0
ファイル: ecgplotter.py プロジェクト: hamalawy/telehealth
    def run(self):
        """ start ECG plotting """

        while not self.stopPlotThread.isSet():

            self.parentFrame.ecgplotter.plot()
            # check if the thread has been stopped. if yes, no need to update endtime.
            if not self.stopPlotThread.isSet():
                self.parentFrame.ecgplotter.endtime += datetime.timedelta(seconds=15)
                # initialize the time size (nagiiba kase after every 15 seconds. needs to be extended
                self.parentFrame.ecgplotter.timeaxis = num2date(
                    drange(
                        self.parentFrame.ecgplotter.starttime,
                        self.parentFrame.ecgplotter.endtime,
                        datetime.timedelta(milliseconds=10),
                    )
                )

            else:
                break

        # add slider at the end of plotting only
        self.parentFrame.ecgplotter.addSlider(self.parentFrame.ecgplotter.endtime)
        self.parentFrame.ecgplotter.canvas.draw()
        self.parentFrame.ecgplotter.gui_repaint()
コード例 #29
0
ファイル: dbase.0.6.py プロジェクト: BKJackson/SciPy-CookBook
	def info(self,*var, **adict):
		"""
		Printing descriptive statistics on selected variables
		"""
			
		# calling convenience functions to clean-up input parameters
		var, sel = self.__var_and_sel_clean(var, adict)
		dates, nobs = self.__dates_and_nobs_clean(var, sel)
			
		# setting the minimum and maximum dates to be used
		mindate = pylab.num2date(min(dates)).strftime('%d %b %Y')
		maxdate = pylab.num2date(max(dates)).strftime('%d %b %Y')

		# number of variables (excluding date if present)
		nvar = len(var)

		print '\n=============================================================================='
		print '============================ Database information ============================'
		print '==============================================================================\n'

		print 'file:				%s' % self.DBname
		print '# obs:				%s' % nobs
		print '# variables:		%s' % nvar 
		print 'Start date:			%s' % mindate
		print 'End date:			%s' % maxdate

		print '\nvar				min			max			mean		std.dev		miss	levels'
		print '=============================================================================='
		
		sets = {}
		for i in var:
			col = self.data[i][sel];
			if type(col[0]) == string_:
				_miss = sum(col == '')
				col_set = set(col)
				sets[i] = col_set
				print '''%-5s			%-5s		%-5s		%-5s		%-5s		% -5.0f	%-5i''' % tuple([i,'-','-','-','-',_miss,len(col_set)]) 
			else:
				_miss = isnan(col); col = col[_miss == False]; _min = col.min(); _max = col.max(); _mean = col.mean(); _std = col.std()
				print '''% -5s			% -5.2f		% -5.2f		% -5.2f		% -5.2f		% -5.0f''' % tuple([i,_min,_max,_mean,_std,sum(_miss)]) 

		if sets:
			print '\n\nLevels for non-numeric data:'
			for i in sets.keys():
				print '=============================================================================='
				print '''% -5s	% -5s''' % tuple([i,sets[i]])
コード例 #30
0
ファイル: read_rays.py プロジェクト: scollis/bom_mds
def parse_info_line(info_line):
    # date_obj=num2date(datestr2num(info_line[(info_line.find('date/time=')+len('date/time=')):-1]))
    # n_rays=int(info_line[(info_line.find('number_of_rays=')+len('number_of_rays=')):(info_line.find('date/time=')-1)])
    ilsplit = info_line.split()
    date_obj = num2date(datestr2num(ilsplit[-2] + " " + ilsplit[-1]))
    sweep_number = int(ilsplit[ilsplit.index("sweep_no=") + 1])
    nrays = int(ilsplit[ilsplit.index("number_of_rays=") + 1])
    return {"date": date_obj, "sweep number": sweep_number, "rays": nrays}
コード例 #31
0
ファイル: time.py プロジェクト: jliuocean/octant
 def get_dates(self):
     return num2date(self.jd)
コード例 #32
0
ファイル: bgccsm.py プロジェクト: raphaeldussin/njord
 def create_months(self):
     self.create_tvec()
     self.months = np.array([ dt.month for dt in pl.num2date(self.tvec)])
コード例 #33
0
 def format_date(x, pos=None):
     if x <= 0:
         return "0"
     return num2date(x).strftime("%H:%M:%S")
コード例 #34
0
    def scatter(self,
                mask=None,
                ntrac=None,
                jd=None,
                k1=None,
                k2=None,
                c="g",
                clf=True,
                coord="latlon",
                land="nice",
                map_region=None):
        """Plot particle positions on a map

                 mask: Boolean vector inidcating which particles to plot
                ntrac: Particle ID
                   jd: Julian date to plot, has to be included in jdvec.
                   k1: only plot particle deeper than k1
                   k2: only plot particle shallower than k1
                    c: color of particles
                  clf: Clear figure  if True
                coord: Use lat-lon coordinates if set to "latlon" (default),
                           i-j coordinates if set to "ij" 
                 land: Use landmask from basemap if set to "nice" (default),
                           landmask from model if set to "model". 
           map_region: Use other map_regions than set by config file

        """
        if (not hasattr(self, 'mp')) & (coord == "latlon"):
            self.add_mp(map_region)
        if (not hasattr(self, 'lon')) & (coord == "latlon"):
            self.ijll()
        if mask is None:
            mask = self.ntrac == self.ntrac
            if jd is not None:
                mask = mask & (self.jd == jd)
            if ntrac is not None:
                mask = mask & (self.ntrac == ntrac)

        figpref.current()
        if clf: pl.clf()

        if coord is "latlon":
            x, y = self.mp(self.lon[mask], self.lat[mask])
            scH = self.mp.scatter(x, y, 5, c)
        else:
            scH = pl.scatter(self.x[mask], self.y[mask], 5, c)

        if land is "nice":
            land = self.gcm.mp.nice()
        elif coord is "latlon":
            self.mp.pcolormesh(self.mpxll,
                               self.mpyll,
                               np.ma.masked_equal(self.landmask, False),
                               cmap=GrGr())
        else:
            pl.pcolormesh(np.arange(self.gcm.i1, self.gcm.i2 + 1),
                          np.arange(self.gcm.j1, self.gcm.j2 + 1),
                          np.ma.masked_equal(self.landmask, False),
                          cmap=GrGr())
        if (ntrac is not None) & (coord is "latlon"):
            self.mp.plot(x, y, '-w', lw=0.5)
        """ 
        xl,yl = self.mp(
            [self.llon[0,0], self.llon[0,-1], self.llon[-1,-1],
             self.llon[-1,0],self.llon[0,0]],
            [self.llat[0,0], self.llat[0,-1], self.llat[-1,-1],
             self.llat[-1,0], self.llat[0,0]]
             )
        #self.mp.plot(xl,yl,'0.5')
        """
        if jd: pl.title(pl.num2date(jd).strftime("%Y-%m-%d %H:%M"))
        return scH
コード例 #35
0
ファイル: GraphWindow.py プロジェクト: billpeet/BBQpi
 def format_date(x, pos=None):
     dt = pl.num2date(x)
     if dt.second == 0:
         return dt.strftime('%I:%M %p')
     else:
         return pl.num2date(x).strftime('%I:%M:%S')
コード例 #36
0
 def datestr(self):
     jd = getattr(self, "jd", self.defaultjd)
     if type(jd) is int:
         return pl.num2date(jd).strftime("%Y-%m-%d")
     else:
         return pl.num2date(jd).strftime("%Y-%m-%d %H:%M")
コード例 #37
0
def kmz_anim(lon, lat, time, prop, **kwargs):
    lon = asarray(lon)
    lat = asarray(lat)

    jd = pylab.date2num(time)
    jd_edges = hstack((1.5 * jd[0] - 0.5 * jd[1], 0.5 * (jd[1:] + jd[:-1]),
                       1.5 * jd[-1] - 0.5 * jd[-2]))
    time_edges = pylab.num2date(jd_edges)
    time_starts = time_edges[:-1]
    time_stops = time_edges[1:]

    name = kwargs.pop('name', 'overlay')
    color = kwargs.pop('color', '9effffff')
    visibility = str(kwargs.pop('visibility', 1))
    kmzfile = kwargs.pop('kmzfile', 'overlay.kmz')
    pixels = kwargs.pop('pixels', 300)  # pixels of the max. dimension
    units = kwargs.pop('units', '')
    vmax = kwargs.pop('vmax', prop.max())
    kwargs['vmax'] = vmax
    vmin = kwargs.pop('vmin', prop.min())
    kwargs['vmin'] = vmin

    geo_aspect = cos(lat.mean() * pi / 180.0)
    xsize = lon.ptp() * geo_aspect
    ysize = lat.ptp()

    aspect = ysize / xsize
    if aspect > 1.0:
        figsize = (10.0 / aspect, 10.0)
    else:
        figsize = (10.0, 10.0 * aspect)

    kml_text = kml_preamble

    ioff()
    fig = figure(figsize=figsize,
                 dpi=pixels // 10,
                 facecolor=None,
                 frameon=False)
    ax = fig.add_axes([0, 0, 1, 1])

    f = zipfile.ZipFile(kmzfile, 'w')

    for frame in range(prop.shape[0]):
        tstart = time_starts[frame]
        tstop = time_stops[frame]
        print('Writing frame ', frame, tstart.isoformat(), tstop.isoformat())
        ax.cla()
        pc = ax.pcolor(lon, lat, prop[frame], **kwargs)
        ax.set_xlim(lon.min(), lon.max())
        ax.set_ylim(lat.min(), lat.max())
        ax.set_axis_off()
        icon = 'overlay_%d.png' % frame
        savefig(icon)
        kml_text += kml_frame.replace('__NAME__', name)\
                             .replace('__COLOR__', color)\
                             .replace('__VISIBILITY__', visibility)\
                             .replace('__SOUTH__', str(lat.min()))\
                             .replace('__NORTH__', str(lat.max()))\
                             .replace('__EAST__', str(lon.max()))\
                             .replace('__WEST__', str(lon.min()))\
                             .replace('__FRAME__', icon)\
                             .replace('__TIMEBEGIN__', tstart.isoformat())\
                             .replace('__TIMEEND__', tstop.isoformat())

        f.write(icon)
        os.remove(icon)

    # legend
    fig = figure(figsize=(1.0, 4.0), facecolor=None, frameon=False)
    cax = fig.add_axes([0.0, 0.05, 0.2, 0.90])
    cb = colorbar(pc, cax=cax)
    cb.set_label(units, color='0.9')
    for lab in cb.ax.get_yticklabels():
        setp(lab, 'color', '0.9')

    savefig('legend.png')
    f.write('legend.png')
    os.remove('legend.png')

    kml_text += kml_legend

    kml_text += kml_closing
    f.writestr('overlay.kml', kml_text)
    f.close()
コード例 #38
0
    ###################################
    ### usage examples of dbase class
    ###################################

    import sys
    from scipy import c_

    # making a directory to store simulate data
    if not os.path.exists('./dbase_test_files'): os.mkdir('./dbase_test_files')

    # creating simulated data and variable labels
    varnm = ['date', 'a', 'b', 'c']  # variable labels
    nobs = 100
    data = randn(nobs, 3)  # the data array
    dates = pylab.num2date(arange(730493, 730493 + (nobs * 7), 7))
    dates = [i.strftime('%d %b %y') for i in dates]
    data = c_[dates, data]

    # adding a few missing values
    data[5, 1] = ''
    data[9, 3] = ''

    # adding a non-numeric variable
    varnm = varnm + ['id']  # variable labels
    id = [('id' + str(i)) for i in range(nobs)]
    id[8] = ''  # setting one id to missing
    data = c_[data, id]

    # saving simulated data to a csv file
    f = open('./dbase_test_files/data.csv', 'w')
コード例 #39
0
            """
            print(matlab_time)
            print(np.shape(matlab_time))
            print(np.shape(matlab_time[0]))
            print(type(matlab_time))
            print(type(matlab_time[0][0]))
            """

            days = []
            for i in range(3):
                days.append(matlab_time[i].flatten() - matlab_time[i][0])

            utc = []
            for i in range(3):
                utc.append(
                    np.asarray(pl.num2date(matlab_time[i] - 366)).flatten())

            patched_u = data["patched_u"]
            patched_v = data["patched_v"]
            depths_of_cuts = data["depths_of_cuts"].flatten()
            patched_mask = data["nan_mask"]

            if np.shape(patched_u)[0] == 1:
                patched_u = patched_u[0]
                patched_v = patched_v[0]
                patched_mask = patched_mask[0]
            #print(np.shape(utc))
            #print(np.shape(patched_u[0]))

            #top=0.954,bottom=0.081,left=0.05,right=0.985,hspace=0.231,wspace=0.02
            print("test", np.shape(days[0]), np.shape(patched_u[0]))
コード例 #40
0
 def num2date(self, mjd):
     import pylab
     return pylab.num2date(mjd)
コード例 #41
0
 def format_date(x, pos=None):
     return pylab.num2date(x).strftime('%m-%d-%Y')
コード例 #42
0
def main():
    plt.close('all')
    if len(sys.argv) > 1:
        if len(sys.argv) == 2:
            # a single argument was provides as option
            if sys.argv[1] == 'init':
                # copy INI files and a template configuration file
                # to current directory
                create_dummy_configuration()
                sys.exit()
            else:
                file = sys.argv[1]  # name of config file
                if not os.path.exists(file):
                    raise ValueError('Configuration file can not be \
                                      found: %s' % file)
        else:
            raise ValueError('Currently not more than one command \
                               line parameter supported!')
    else:  # default
        print('*******************************************')
        print('* WELCOME to pycmbs.py                    *')
        print('* Happy benchmarking ...                  *')
        print('*******************************************')
        print ''
        print 'please specify a configuration filename as argument'
        sys.exit()

    ####################################################################
    # CONFIGURATION and OPTIONS
    ####################################################################

    # read configuration file
    CF = config.ConfigFile(file)

    # read plotting options
    PCFG = config.PlotOptions()
    PCFG.read(CF)
    plot_options = PCFG

    ####################################################################
    # REMOVE previous Data warnings
    ####################################################################
    outdir = CF.options['outputdir']
    if outdir[-1] != os.sep:
        outdir += os.sep

    os.environ['PYCMBS_OUTPUTDIR'] = outdir
    os.environ['PYCMBS_OUTPUTFORMAT'] = CF.options['report_format']

    os.environ['DATA_WARNING_FILE'] = outdir + 'data_warnings_' \
        + CF.options['report'] + '.log'
    if os.path.exists(os.environ['DATA_WARNING_FILE']):
        os.remove(os.environ['DATA_WARNING_FILE'])

    for thevar in plot_options.options.keys():
        if thevar in plot_options.options.keys():
            print('Variable: %s' % thevar)
            for k in plot_options.options[thevar].keys():
                print('    Observation: %s' % k)

    if CF.options['basemap']:
        f_fast = False
    else:
        f_fast = True
    shift_lon = use_basemap = not f_fast

    ########################################################################
    # TIMES
    ########################################################################
    s_start_time = CF.start_date
    s_stop_time = CF.stop_date
    start_time = pylab.num2date(pylab.datestr2num(s_start_time))
    stop_time = pylab.num2date(pylab.datestr2num(s_stop_time))

    ########################################################################
    # INIT METHODS
    ########################################################################
    # names of analysis scripts for all variables ---
    scripts = CF.get_analysis_scripts()

    # get dictionary with methods how to read data for model variables to be
    # analyzed
    variables = CF.variables
    varmethods = CF.get_methods4variables(CF.variables)

    # READ DATA
    # create a Model instance for each model specified
    # in the configuration file
    #
    # read the data for all variables and return a list
    # of Data objects for further processing

    model_cnt = 1
    proc_models = []

    for i in range(len(CF.models)):
        # assign model information from configuration
        data_dir = CF.dirs[i]
        model = CF.models[i]
        experiment = CF.experiments[i]

        # create model object and read data
        # results are stored in individual variables namex modelXXXXX
        if CF.dtypes[i].upper() == 'CMIP5':
            themodel = CMIP5Data(data_dir,
                                 model,
                                 experiment,
                                 varmethods,
                                 intervals=CF.intervals,
                                 lat_name='lat',
                                 lon_name='lon',
                                 label=model,
                                 start_time=start_time,
                                 stop_time=stop_time,
                                 shift_lon=shift_lon)
        elif CF.dtypes[i].upper() == 'CMIP5RAW':
            themodel = CMIP5RAWData(data_dir,
                                    model,
                                    experiment,
                                    varmethods,
                                    intervals=CF.intervals,
                                    lat_name='lat',
                                    lon_name='lon',
                                    label=model,
                                    start_time=start_time,
                                    stop_time=stop_time,
                                    shift_lon=shift_lon)
        elif 'CMIP5RAWSINGLE' in CF.dtypes[i].upper():
            themodel = CMIP5RAW_SINGLE(data_dir,
                                       model,
                                       experiment,
                                       varmethods,
                                       intervals=CF.intervals,
                                       lat_name='lat',
                                       lon_name='lon',
                                       label=model,
                                       start_time=start_time,
                                       stop_time=stop_time,
                                       shift_lon=shift_lon)

        elif CF.dtypes[i].upper() == 'JSBACH_BOT':
            themodel = JSBACH_BOT(data_dir,
                                  varmethods,
                                  experiment,
                                  intervals=CF.intervals,
                                  start_time=start_time,
                                  stop_time=stop_time,
                                  name=model,
                                  shift_lon=shift_lon)
        elif CF.dtypes[i].upper() == 'JSBACH_RAW':
            themodel = JSBACH_RAW(data_dir,
                                  varmethods,
                                  experiment,
                                  intervals=CF.intervals,
                                  name=model,
                                  shift_lon=shift_lon,
                                  start_time=start_time,
                                  stop_time=stop_time)
        elif CF.dtypes[i].upper() == 'JSBACH_RAW2':
            themodel = JSBACH_RAW2(data_dir,
                                   varmethods,
                                   experiment,
                                   intervals=CF.intervals,
                                   start_time=start_time,
                                   stop_time=stop_time,
                                   name=model,
                                   shift_lon=shift_lon)  # ,
            # model_dict=model_dict)
        elif CF.dtypes[i].upper() == 'JSBACH_SPECIAL':
            themodel = JSBACH_SPECIAL(data_dir,
                                      varmethods,
                                      experiment,
                                      intervals=CF.intervals,
                                      start_time=start_time,
                                      stop_time=stop_time,
                                      name=model,
                                      shift_lon=shift_lon)  # ,
            # model_dict=model_dict)
        elif CF.dtypes[i].upper() == 'CMIP3':
            themodel = CMIP3Data(data_dir,
                                 model,
                                 experiment,
                                 varmethods,
                                 intervals=CF.intervals,
                                 lat_name='lat',
                                 lon_name='lon',
                                 label=model,
                                 start_time=start_time,
                                 stop_time=stop_time,
                                 shift_lon=shift_lon)
        else:
            raise ValueError('Invalid model type: %s' % CF.dtypes[i])

        # read data for current model

        # options that specify regrid options etc.
        themodel._global_configuration = CF
        themodel.plot_options = plot_options
        themodel.get_data()

        # copy current model to a variable named modelXXXX
        cmd = 'model' + str(model_cnt).zfill(4) + ' = ' \
            + 'themodel.copy(); del themodel'
        exec(cmd)  # store copy of cmip5 model in separate variable

        # append model to list of models ---
        proc_models.append('model' + str(model_cnt).zfill(4))
        model_cnt += 1

    ########################################################################
    # MULTIMODEL MEAN
    # here we have now all the model and variables read.
    # The list of all models is contained in the variable proc_models.
    f_mean_model = True
    if f_mean_model:
        # calculate climatological mean values: The models contain already
        # climatological information in the variables[] list. Thus there is
        # not need to take care for the different timesteps here. This
        # should have been handled already in the preprocessing.
        # generate instance of MeanModel to store result
        MEANMODEL = MeanModel(varmethods, intervals=CF.intervals)

        # sum up all models
        for i in range(len(proc_models)):
            exec('actmodel = ' + proc_models[i] + '.copy()')
            MEANMODEL.add_member(actmodel)
            del actmodel

        # calculate ensemble mean
        MEANMODEL.ensmean()

        # save mean model to file
        # include filename of configuration file
        MEANMODEL.save(get_temporary_directory(),
                       prefix='MEANMODEL_' + file[:-4])

        # add mean model to general list of models to process in analysis
        proc_models.append('MEANMODEL')

    ########################################################################
    # END MULTIMODEL MEAN
    ########################################################################

    ########################################################################
    # INIT reporting and plotting and diagnostics
    ########################################################################
    # Gleckler Plot
    global_gleckler = GlecklerPlot()

    # Report
    rep = Report(CF.options['report'],
                 'pyCMBS report - ' + CF.options['report'],
                 CF.options['author'],
                 outdir=outdir,
                 dpi=300,
                 format=CF.options['report_format'])
    cmd = 'cp ' + os.environ['PYCMBSPATH'] + os.sep + \
        'logo' + os.sep + 'Phytonlogo5.pdf ' + rep.outdir
    os.system(cmd)

    ########################################################################
    ########################################################################
    ########################################################################
    # MAIN ANALYSIS LOOP: perform analysis for each model and variable
    ########################################################################
    ########################################################################
    ########################################################################
    skeys = scripts.keys()
    for variable in variables:

        # register current variable in Gleckler Plot
        global_gleckler.add_variable(variable)

        # call analysis scripts for each variable
        for k in range(len(skeys)):
            if variable == skeys[k]:

                print 'Doing analysis for variable ... ', variable
                print '   ... ', scripts[variable]
                # model list is reformatted so it can be evaluated properly
                model_list = str(proc_models).replace("'", "")
                cmd = 'analysis.' + scripts[variable] + '(' + model_list \
                    + ',GP=global_gleckler,shift_lon=shift_lon, \
                        use_basemap=use_basemap,report=rep,\
                        interval=CF.intervals[variable],\
                        plot_options=PCFG)'

                eval(cmd)

    ########################################################################
    # GLECKLER PLOT finalization ...
    ########################################################################
    # generate Gleckler analysis plot for all variables and models analyzed ///
    global_gleckler.plot(vmin=-0.1,
                         vmax=0.1,
                         nclasses=16,
                         show_value=False,
                         ticks=[-0.1, -0.05, 0., 0.05, 0.1])
    oname = outdir + 'gleckler.pkl'
    if os.path.exists(oname):
        os.remove(oname)
    pickle.dump(global_gleckler.models,
                open(outdir + 'gleckler_models.pkl', 'w'))
    pickle.dump(global_gleckler.variables,
                open(outdir + 'gleckler_variables.pkl', 'w'))
    pickle.dump(global_gleckler.data, open(outdir + 'gleckler_data.pkl', 'w'))
    pickle.dump(global_gleckler._raw_data,
                open(outdir + 'gleckler_rawdata.pkl', 'w'))

    rep.section('Summary error statistics')
    rep.subsection('Gleckler metric')
    rep.figure(global_gleckler.fig,
               caption='Gleckler et al. (2008) model performance index',
               width='10cm')
    global_gleckler.fig.savefig(outdir + 'portraet_diagram.png',
                                dpi=200,
                                bbox_inches='tight')
    global_gleckler.fig.savefig(outdir + 'portraet_diagram.pdf',
                                dpi=200,
                                bbox_inches='tight')

    plt.close(global_gleckler.fig.number)

    # generate dictionary with observation labels for each variable
    labels_dict = {}
    for variable in variables:
        if variable not in PCFG.options.keys():
            continue
        varoptions = PCFG.options[variable]
        thelabels = {}
        for k in varoptions.keys():  # keys of observational datasets
            if k == 'OPTIONS':
                continue
            else:
                # only add observation to legend,
                # if option in INI file is set
                if varoptions[k]['add_to_report']:
                    # generate dictionary for GlecklerPLot legend
                    thelabels.update(
                        {int(varoptions[k]['gleckler_position']): k})
        labels_dict.update({variable: thelabels})
        del thelabels

    # legend for gleckler plot ///
    lcnt = 1
    for variable in variables:
        if variable not in PCFG.options.keys():
            continue
        varoptions = PCFG.options[variable]
        thelabels = labels_dict[variable]
        fl = global_gleckler._draw_legend(thelabels, title=variable.upper())
        if fl is not None:
            rep.figure(fl, width='8cm', bbox_inches=None)
            fl.savefig(outdir + 'legend_portraet_' + str(lcnt).zfill(5) +
                       '.png',
                       bbox_inches='tight',
                       dpi=200)
            plt.close(fl.number)
        del fl
        lcnt += 1

    # plot model ranking between different observational datasets ///
    rep.subsection('Model ranking consistency')
    for v in global_gleckler.variables:
        rep.subsubsection(v.upper())
        tmpfig = global_gleckler.plot_model_ranking(v,
                                                    show_text=True,
                                                    obslabels=labels_dict[v])
        if tmpfig is not None:
            rep.figure(tmpfig,
                       width='8cm',
                       bbox_inches=None,
                       caption='Model RANKING for different observational \
                       datasets: ' + v.upper())
            plt.close(tmpfig.number)
        del tmpfig

        # write a table with model ranking
        tmp_filename = outdir + 'ranking_table_' + v + '.tex'
        rep.open_table()
        global_gleckler.write_ranking_table(v,
                                            tmp_filename,
                                            fmt='latex',
                                            obslabels=labels_dict[v])
        rep.input(tmp_filename)
        rep.close_table(caption='Model rankings for variable ' + v.upper())

        # plot absolute model error
        tmpfig = global_gleckler.plot_model_error(v, obslabels=labels_dict[v])
        if tmpfig is not None:
            rep.figure(tmpfig,
                       width='8cm',
                       bbox_inches=None,
                       caption='Model ERROR for different observational \
                       datasets: ' + v.upper())
            plt.close(tmpfig.number)
        del tmpfig

    ########################################################################
    # CLEAN up and finish
    ########################################################################
    plt.close('all')
    rep.close()

    print('##########################################')
    print('# BENCHMARKING FINIHSED!                 #')
    print('##########################################')
コード例 #43
0
ファイル: connect.py プロジェクト: raphaeldussin/pytraj
    def multiplot(self, jd1=730120.0, djd=60, dt=20):

        if not hasattr(self, 'disci'):
            self.generate_regdiscs()
            self.x = self.disci
            self.y = self.discj
        if not hasattr(self, 'lon'):
            self.ijll()

        if USE_FIGPREF: figpref.presentation()
        pl.close(1)
        pl.figure(1, (10, 10))

        conmat = self[jd1 - 730120.0:jd1 - 730120.0 + 60, dt:dt + 10]
        x, y = self.gcm.mp(self.lon, self.lat)
        self.gcm.mp.merid = []
        self.gcm.mp.paral = []

        pl.subplots_adjust(wspace=0, hspace=0, top=0.95)

        pl.subplot(2, 2, 1)
        pl.pcolormesh(miv(conmat), cmap=cm.hot)
        pl.clim(0, 250)
        pl.plot([0, 800], [0, 800], 'g', lw=2)
        pl.gca().set_aspect(1)
        pl.setp(pl.gca(), yticklabels=[])
        pl.setp(pl.gca(), xticklabels=[])
        pl.colorbar(aspect=40,
                    orientation='horizontal',
                    pad=0,
                    shrink=.8,
                    fraction=0.05,
                    ticks=[0, 50, 100, 150, 200])

        pl.subplot(2, 2, 2)
        colorvec = (np.nansum(conmat, axis=1) - np.nansum(conmat, axis=0))[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0, 10000)

        pl.subplot(2, 2, 3)
        colorvec = np.nansum(conmat, axis=1)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0, 10000)

        pl.subplot(2, 2, 4)
        colorvec = np.nansum(conmat, axis=0)[1:]
        self.gcm.mp.scatter(x, y, 10, 'w', edgecolor='k')
        self.gcm.mp.scatter(x, y, 10, colorvec)
        self.gcm.mp.nice()
        pl.clim(0, 10000)
        if 'mycolor' in sys.modules:
            mycolor.freecbar([0.2, .06, 0.6, 0.020], [2000, 4000, 6000, 8000])
        pl.suptitle("Trajectories seeded from %s to %s, Duration: %i-%i days" %
                    (pl.num2date(jd1).strftime("%Y-%m-%d"),
                     pl.num2date(jd1 + djd).strftime("%Y-%m-%d"), dt, dt + 10))

        pl.savefig('multplot_%i_%03i.png' % (jd1, dt), transparent=True)
コード例 #44
0

data = sio.loadmat(
    "/home/ole/thesis/all_data/emb217/deployments/moorings/TC_Flach/ADCP600/data/EMB217_TC-flach_adcp600_val.mat"
)

#print(data.keys())
data = data["adcpavg"]
substructure = data.dtype
#print(substructure)

rtc = data["rtc"][0][0].flatten()
curr = data["curr"][0][0]
vertical_v = data["vu"][0][0].T
#convert matlab time to utc
utc = np.asarray(pl.num2date(rtc - 366))

path = "dissipation_rate_adcp_emb217_TC_Flach.npz"  #("dissipation_rate_estimation.npz")

npzfile = np.load(path)
print(npzfile.files)
depth = npzfile["depth"]
utc_chunks = npzfile["utc"]
dissipation_rate_up = npzfile["dissipation_rate_up"]
dissipation_rate_down = npzfile["dissipation_rate_down"]
total_dissipation_rate = npzfile["dissipation_total"]

#figure 1 for the measurements
f1, axarr1 = plt.subplots(2, sharex=True, sharey=True)

#figure 2 for the test
コード例 #45
0
    def _render_view(self, req, db, milestone):
        milestone_groups = []
        available_groups = []
        component_group_available = False
        ticket_fields = TicketSystem(self.env).get_ticket_fields()

        # collect fields that can be used for grouping
        for field in ticket_fields:
            if field['type'] == 'select' and field['name'] != 'milestone' \
                    or field['name'] in ('owner', 'reporter'):
                available_groups.append({
                    'name': field['name'],
                    'label': field['label']
                })
                if field['name'] == 'component':
                    component_group_available = True

        # determine the field currently used for grouping
        by = None
        if component_group_available:
            by = 'component'
        elif available_groups:
            by = available_groups[0]['name']
        by = req.args.get('by', by)

        tickets = get_tickets_for_milestone(self.env, db, milestone.name, by)
        stat = get_ticket_stats(self.stats_provider, tickets)
        tstat = get_ticket_stats(self.tickettype_stats_provider, tickets)

        # Parse the from date and adjust the timestamp to the last second of
        # the day
        today = to_datetime(None, req.tz)

        # Get milestone start date from session or use default day back.
        # TODO: add logic to remember the start date either in db or session.
        #        if  req.session.get('mdashboard.fromdate') != None:
        #
        #            fromdate = parse_date(req.session.get('mdashboard.fromdate'), req.tz)
        #        else:
        fromdate = today - timedelta(days=self.default_daysback + 1)
        fromdate = fromdate.replace(hour=23, minute=59, second=59)

        # Data for milestone and timeline
        data = {
            'fromdate': fromdate,
            'milestone': milestone,
            'tickethistory': [],
            'dates': [],
            'ticketstat': {},
            'yui_base_url': self.yui_base_url
        }

        data.update(milestone_stats_data(self.env, req, stat, milestone.name))

        ticketstat = {'name': 'ticket type'}
        ticketstat.update(
            milestone_stats_data(self.env, req, tstat, milestone.name))
        data['ticketstat'] = ticketstat

        #self.env.log.info("ticketstat = %s" % (ticketstat,))

        # get list of ticket ids that in the milestone
        #ctickets = get_tickets_for_milestone(self.env, db, milestone.name, 'type')
        everytickets = get_every_tickets_in_milestone(db, milestone.name)

        if everytickets != []:

            #tkt_history = {}

            #            collect_tickets_status_history(self.env, db, tkt_history, \
            #                                           everytickets, milestone)

            tkt_history = collect_tickets_status_history(
                self.env, db, everytickets, milestone)

            if tkt_history != {}:

                # Sort the key in the history list
                # returns sorted list of tuple of (key, value)
                sorted_events = sorted(tkt_history.items(),
                                       key=lambda (k, v): (k))

                #debug
                self.env.log.info("sorted_event content")
                for event in sorted_events:
                    self.env.log.info(
                        "date: %s: event: %s" %
                        (format_date(to_datetime(event[0])), event[1]))

                # Get first date that ticket enter the milestone
                min_time = min(sorted_events)[0]  #in Epoch Seconds
                begin_date = to_datetime(min_time).date()
                end_date = milestone.completed or to_datetime(None).date()

                # this is array of date in numpy
                numdates = drange(begin_date, end_date + timedelta(days=1),
                                  timedelta(days=1))

                tkt_history_table = make_ticket_history_table(
                    self.env, numdates, sorted_events)

                #debug
                #self.env.log.info("tkt_history_table: %s", (tkt_history_table,))

                #Create a data for the cumulative flow chart.
                tkt_cumulative_table = make_cumulative_data(
                    self.env, tkt_history_table)

                #debug
                #self.env.log.info(tkt_cumulative_table)

                # creat list of dateobject from dates
                dates = []
                for numdate in numdates:

                    utc_date = num2date(numdate)
                    dates.append(utc_date)
                    #self.env.log.info("%s: %s" % (utc_date, format_date(utc_date, tzinfo=utc)))

                    #prepare Yahoo datasource for comulative flow chart
                dscumulative = ''
                for idx, date in enumerate(dates):
                    dscumulative = dscumulative + '{ date: "%s", enter: %d, leave: %d, finish: %d}, ' \
                          % (format_date(date, tzinfo=utc), tkt_cumulative_table['Enter'][idx], \
                             tkt_cumulative_table['Leave'][idx], tkt_cumulative_table['Finish'][idx])

                data['tickethistory'] = tkt_cumulative_table
                data['dates'] = dates
                data['dscumulative'] = '[ ' + dscumulative + ' ];'

        return 'mdashboard.html', data, None
コード例 #46
0
ファイル: ocean_time.py プロジェクト: simion1232006/pyroms
 def get_dates(self):
     return asarray(num2date(self.jd))
コード例 #47
0
def adaptive_date_ticks(axx,
                        dtrange=None,
                        format=None,
                        formatm=None,
                        nticks=0,
                        fit=None,
                        label=True,
                        lformat=None,
                        ltime=None,
                        debug=False):
    '''Takes an axis and a time range in seconds and chooses the optimal time display

	ax is the xaxis or yaxis, 
        trange is the time range of the data in seconds, 
            defaults to the existing plot data range
        format is the preferred time format 
            e.g. '%Y' vs '%y' or '%b %d, %Y' vs '%m/%d'
        formatm is the preferred time format for the minor ticks
            e.g. '%Y' vs '%y' or '%b %d, %Y' vs '%m/%d'
        nticks forces the number of ticks, 
            defaults to number already in figure
        axname allows time on y axis (default is x axis)
	fit determines whether the image should:
            fit the data range exactly (fit='exact')
            fit the nearest tick mark outside the data range (fit='tick')
	    use the default fit (default or fit=None)
        label determines whether to add a label to the axis
        lformat allows the user to specify the format of the label
        ltime is the date number to use to generate the label,
            defaults to middle of data range'''
    if dtrange is None:
        bounds = pylab.getp(pylab.getp(axx, 'data_interval'), 'bounds')
        dtrange = pylab.diff(bounds)
    trange = dtrange * 86400
    if label and ltime is None:
        ltime = pylab.mean(bounds)
    if nticks == 0:
        nticks = len(axx.get_ticklocs())
    if trange < 60:  #image covers less than 1 minute
        bt = int(60 / nticks)
        tloc = MinuteLocator()
        tlocm = SecondLocator(bysecond=range(bt, 60, bt))
        tfor = ':%S'
        tform = '%M:%S'
        tlabel = '%b %d, %Y %H:%M'
        if debug: print 'seconds'
    elif trange / nticks < 60:
        tloc = MinuteLocator()
        tlocm = SecondLocator(bysecond=range(15, 60, 15))
        tform = ':%S'
        tfor = '%M:00'
        tlabel = '%b %d, %Y %H:'
        if debug: print 'half minutes'
    elif trange / nticks < 90:
        tloc = MinuteLocator(interval=1)
        tlocm = SecondLocator(bysecond=30)
        tform = ''
        tfor = '%M'
        tlabel = '%b %d, %Y %H:'
        if debug: print 'minutes'
    elif trange / nticks < 120:
        tloc = MinuteLocator(byminute=range(0, 60, 2))
        tlocm = SecondLocator(bysecond=(0, 30))
        tfor = '%M'
        tform = ''
        tlabel = '%b %d, %Y %H:'
        if debug: print '2 minutes'
    elif trange / nticks < 240:
        tloc = MinuteLocator(interval=5)
        tlocm = MinuteLocator(interval=1)
        tfor = '%H:%M'
        tform = ''
        tlabel = '%b %d, %Y'
        if debug: print '5 minutes'
    elif trange / nticks < 600:
        tloc = MinuteLocator(interval=10)
        tlocm = MinuteLocator(interval=1)
        tform = ''
        tfor = '%H:%M'
        tlabel = '%b %d, %Y'
        if debug: print '10 minutes'
    elif trange / nticks < 1200:
        tloc = HourLocator(interval=1)
        tlocm = MinuteLocator(byminute=range(15, 60, 15))
        tfor = '%H:%M'
        tform = ':%M'
        tlabel = '%b %d, %Y'
        if debug: print '30 minutes'
    elif trange / nticks < 2400:
        tloc = MinuteLocator(byminute=0)
        tlocm = MinuteLocator(byminute=30)
        tfor = '%H:00'
        tform = ''
        tlabel = '%b %d, %Y'
        if debug: print 'hour'
    elif trange / nticks < 4500:
        tloc = HourLocator(interval=2)
        tlocm = HourLocator(interval=1)
        tform = ''
        tfor = '%H:00'
        tlabel = '%b %d, %Y'
        if debug: print '2 hours'
    elif trange < 86400:
        tloc = HourLocator(byhour=range(0, 24, 3))
        tlocm = HourLocator()
        tform = ''
        tfor = '%H:00'
        tlabel = '%b %d, %Y'
        if debug: print '3 hours'
    elif trange < 86400 * 2:
        tloc = HourLocator(byhour=range(0, 24, 6))
        tlocm = HourLocator(byhour=range(0, 24, 2))
        tform = ''
        tfor = '%H:00\n%m/%d'
        tlabel = '%b %d, %Y'
        if debug: print '6 hours'
    elif dtrange < 2.5:
        tloc = DayLocator()
        tlocm = HourLocator(byhour=range(6, 24, 6))
        tfor = '%m/%d'
        tform = '%H:%M'
        tlabel = '%Y'
        if debug: print '1 day/ 6 hours'
    elif dtrange < 3:
        tloc = DayLocator()
        tlocm = HourLocator(byhour=12)
        tfor = '%m/%d'
        tform = '%H:%M'
        tlabel = '%Y'
        if debug: print '1 day/ 12 hours'
    elif dtrange / nticks < 0.5:
        tloc = DayLocator(interval=1)
        tlocm = HourLocator(byhour=range(0, 24, 12))
        tform = ''
        tfor = '%m/%d'
        tlabel = '%Y'
        if debug: print '1 day'
    elif dtrange < 31 * 6:
        tloc = MonthLocator()
        if dtrange / nticks < 1:
            interv = 1
            endv = 32
        elif dtrange / nticks < 2:
            interv = 2
            endv = 31
        elif dtrange / nticks < 3:
            interv = 3
            endv = 30
        elif dtrange / nticks < 4:
            interv = 5
            endv = 30
        elif dtrange / nticks < 6:
            interv = 7
            endv = 28
        elif dtrange / nticks < 8:
            interv = 10
            endv = 30
        else:
            interv = 15
            endv = 30
        tlocm = MonthLocator(bymonthday=range(interv, endv, interv))
        tfor = "%d\n%b '%y"
        tform = '%d'
        tlabel = 'date'
        if debug: print 'month'
    elif dtrange / nticks < 30:
        tloc = MonthLocator(bymonth=range(1, 13, 2))
        tlocm = MonthLocator(bymonth=range(2, 13, 2))
        tfor = "%b\n%y"
        tform = "%b"
        tlabel = 'date'
        if debug: print '2 month'
    elif dtrange < 366:
        tloc = MonthLocator(bymonth=range(1, 13, 6))
        tlocm = MonthLocator(interval=1)
        tfor = "%b\n'%y"
        tform = "%b"
        tlabel = 'date'
        if debug: print '3 month/month'
    elif dtrange < 730:
        tloc = MonthLocator(bymonth=range(1, 13, 6))
        tlocm = MonthLocator(bymonth=range(1, 13, 2))
        tfor = "%b\n'%y"
        tform = "%b"
        tlabel = 'date'
    elif dtrange < 365 * 3:
        tloc = MonthLocator(bymonth=range(1, 13, 6))
        tlocm = MonthLocator(bymonth=range(1, 13, 2))
        tfor = "%b\n'%y"
        tform = ""
        tlabel = 'date'
        if debug: print '6 month/2 month'
    elif dtrange < 365 * 4:
        tloc = MonthLocator(bymonth=1)
        tlocm = MonthLocator(bymonth=7)
        tfor = "%b\n'%y"
        tform = "%b"
        tlabel = 'date'
        if debug: print '6 month/2 month'
    elif dtrange / nticks < 240:
        tloc = YearLocator()
        tlocm = MonthLocator(bymonth=(1, 7))
        tform = ""
        tfor = "%Y"
        tlabel = 'Year'
        if debug: print 'year/6 month'
    elif dtrange / nticks < 365:
        tloc = YearLocator(base=2)
        tlocm = MonthLocator(bymonth=(1, 7))
        tfor = '%Y'
        tform = ''
        tlabel = 'year'
        if debug: print '12 month'
    elif dtrange / nticks < 580:
        tloc = YearLocator(base=2)
        tlocm = YearLocator()
        tfor = '%Y'
        tform = ''
        tlabel = 'year'
        if debug: print '2 year/year'
    elif dtrange < 20 * 365:
        tloc = YearLocator(base=5)
        tlocm = YearLocator()
        tfor = '%Y'
        tform = ''
        tlabel = 'year'
        if debug: print '5 year/year'
    elif dtrange < 100 * 365:
        tloc = YearLocator(base=10)
        tlocm = YearLocator(base=2)
        tfor = '%Y'
        tform = ''
        tlabel = 'year'
        if debug: print '10 year'
    else:
        interv = 2.7 * dtrange / (nticks * 365.25)
        print interv
        interv = int(
            numpy.round(
                pylab.matplotlib.numerix.power(
                    10,
                    int(pylab.matplotlib.numerix.log10(interv) * 3) / 3.) /
                (pylab.matplotlib.numerix.power(
                    10,
                    int(pylab.matplotlib.numerix.log10(interv) * 3) / 3))) *
            (pylab.matplotlib.numerix.power(
                10,
                int(pylab.matplotlib.numerix.log10(interv) * 3) / 3) * 1.))
        print interv
        tloc = YearLocator(base=interv)
        tlocm = YearLocator(base=int(interv / 5))
        tfor = '%Y'
        tform = ''
        tlabel = 'year'
        if debug: print 'variable long scale'
    if format is None:
        format = tfor
    if formatm is None:
        formatm = tform
    if lformat is None:
        lformat = tlabel
    axx.set_major_locator(tloc)
    axx.set_minor_locator(tlocm)
    axx.set_major_formatter(DateFormatter(format))
    axx.set_minor_formatter(DateFormatter(formatm))
    if label:
        if ltime is not None:  #and lformat.find('%')<0:
            print 'reached here'
            dateform = pylab.DateFormatter(lformat)
            print lformat
            print pylab.num2date(ltime)
            tlabel = dateform.strftime(pylab.num2date(ltime), lformat)
            print tlabel
        elif lformat.find('%') > 0:
            tlabel = 'time'
        #if debug:
        # str = "range %8.4f range per tick %4.4f ticks %d\ntick format %s label %s" %  (dtrange, dtrange/nticks, nticks, tform, tlabel)
        # print str
        # ax.set_title(str)
        pylab.setp(axx.get_label(), text=tlabel)
    enlarge_allticklines(axx, factor=1.5)

    pylab.draw_if_interactive()