Example #1
0
 def read(self):
     remainder=None
     base=None
     import lg_dpl_toolbox.dpl.TimeSource as TimeSource
     timesource=TimeSource.CompoundTimeGenerator(self.timesource)
     for f in self.framestream:
         if timesource.isDone:
             break
         if remainder==None:
             remainder=copy.deepcopy(f)
         else:
             remainder.append(f)
         t=getattr(remainder,remainder._timevarname)
         #print t.shape
         if t.shape[0]==0:
             continue
         requestedtimes=hau.T_Array(timesource.getBinsFor(starttime=base,endtime=t[-1]))
         if requestedtimes.size<2:
             continue
         lastTime=requestedtimes[-1]
         retarr=remainder
         remainder=hau.trimTimeInterval(retarr,lastTime,datetime(2200,1,1,0,0,0))
         retarr.trimTimeInterval(base,lastTime)
         retarr.hereGoneBinTimes(requestedtimes,allow_nans=self.allow_nans)
         print 'range',base,lastTime,'returning:',retarr,'remainder',remainder
         yield retarr
         base=lastTime
     if remainder!=None and timesource.end_time!=None:
         requestedtimes=hau.T_Array(timesource.getBinsFor(starttime=base,endtime=timesource.end_time,inclusive=True))
         remainder.hereGoneBinTimes(requestedtimes,allow_nans=self.allow_nans)
         if getattr(remainder,remainder._timevarname).shape[0]>0:
             print 'range',base,timesource.end_time,'returning:',remainder
             yield remainder
Example #2
0
 def __init__(self,framestream,basetime=None,endtime=None,timeres=None,timesource=None,allow_nans=False):
     self.framestream=framestream
     self.base=basetime
     self.endtime=endtime
     self.step=timeres
     self.timesource=timesource
     self.allow_nans=allow_nans
     import lg_dpl_toolbox.dpl.TimeSource as TimeSource
     if self.timesource==None:
         self.timesource=TimeSource.TimeGenerator(start_time=basetime,end_time =endtime,time_resolution=timeres)
Example #3
0
    def search(self, start_time_datetime=None, end_time_datetime=None,reverse_padding=None,timeres_timedelta=None,min_alt_m=None,max_alt_m=None,altres_m=None,\
        window_width_timedelta=None,forimage=None,inclusive=False,timesource=None,allow_nans=False,*args, **kwargs):
        """
        :param start_time_datetime: start time (optional)
        :type start_time_datetime: datetime.datetime
        :param end_time_datetime: end time (optional) if unspecified, will continue to return frames thru now, unending
        :type end_time_datetime: datetime.datetime
        :param reverse_padding: (optional)in the case of reading up to the current time, this timedelta is always subtracted from the current time to get the most recent moment to read
        :type reverse_padding: datetime.timedelta
        :param timeres_timedelta: (optional) time resolution, or None to optimized for images
        :type timeres_timedelta: datetime.timedelta
        :param min_alt_m: minimum altitude in meters
        :param max_alt_m: maximum altitude in meters
        :param altres_m: (optional) altitude resolution
        :param window_width_timedelta:   used with start or end time (not both) to determine preferred return width. if unspecified, will imply the other unspecified, or from now if neither
        :type window_width_timedelta: datetime.timedelta
        :param corr_adjusts: correction adjustments. if unspecified, will use default   
        :param block_when_out_of_data: (optional) if unspecified or True, will block for 'timeres_timedelta' until trying for more frames. if False, yield None until more data is available
        :param forimage: (optional) if provided, is a dict(x=##,y=##) containing preferred x and y pixel count for an image. if no resolutions are provided, these are used to create an optimal one. if not provided, lacking resolutions are native
        :param inclusive: if true, will expand window to ensure including any data that intersects the requested times
        """
        if len(args):
            print 'Unused dpl_pars.search args = ',args
        if len(kwargs):
            print "Unused dpl_pars.search kwargs = ",kwargs

        # altitude configuration notes: min and max alt are required. resolution is optional, in which case the process_control will determine pixel count, and thus resolution

        # time configuration notes: there are many modes to this operation, and two layers
        #   calibration layer: possible parameters are start, end, and window width. (implemented by dpl_calibration.parse_timewindow(), which will return in all cases start and width, and end if will terminate)
        #      specification groups possible: start+end, start+window,start,end+window,window (in order of priority)
        #      start and end specify the range of calibrations to stream
        #      if only one is specified, window is used to roll forward or back the one specified to make the other
        #         if window is not specified, it goes from start to now (defining the window)
        #      if neither are specified, start is set to now-window
        #      if end is not specified, it keeps rolling forward without termination. if it is, it WILL go to that point (even in future) and terminate
        #         if start and window are specified, and start+window is past now, start will be ignored
        #   processing layer:
        #      extra parameter of maxtimeslice specifies the largest volume of actual data to be returned (may be violated if no data is available, and filler piles up)
        #         if it is not specified, natural flow is not interrupted for the sake of data volume
        #      here, it only cares if timeres is not specified.  If it is not specified, it will follows the same steps as calibration to find the preferred window size, and use process_control to determine pixel count, and resolution

        prms={}
        if reverse_padding!=None:
            prms['now']=datetime.datetime.utcnow()-reverse_padding
        d=parse_timewindow(start_time_datetime,end_time_datetime,window_width_timedelta,**prms)

        if forimage!=None:
            if not isinstance(forimage,dict):
                import lg_base.core.canvas_info as ci
                forimage=ci.load_canvas_info()['canvas_pixels']
            if altres_m==None:
                if min_alt_m==None:
                    min_alt_m=0
                if max_alt_m==None:
                    max_alt_m=30
                altres_m=(max_alt_m-min_alt_m)/forimage['y']
            if timeres_timedelta==None:
                timeres_timedelta=datetime.timedelta(seconds=d['windowwidth'].total_seconds()/forimage['x'])
        from lg_dpl_toolbox.filters import time_frame,resample_altitude

        nar=self.lib(start=d['starttime'],end=d['endtime'],firstaltitude=min_alt_m,lastaltitude=max_alt_m)
        #FIXME too constant, and hardcoded
        if timeres_timedelta!=None and nar.parsNativeTimeResolution>timeres_timedelta:
            #dropping time resolution to 'pure native'
            timeres_timedelta=None

        if inclusive:
            #remake with padding
            padAmount=(2*timeres_timedelta) if timeres_timedelta!=None else (5*nar.parsNativeTimeResolution)
            if d['starttime']:
                d['starttime']-=padAmount
            if d['endtime']:
                d['endtime']+=padAmount
            nar=self.lib(start=d['starttime'],end=d['endtime'],firstaltitude=min_alt_m,lastaltitude=max_alt_m)
        elif timeres_timedelta:
            nar=self.lib(start=d['starttime']-timeres_timedelta,end=d['endtime']+timeres_timedelta,firstaltitude=min_alt_m,lastaltitude=max_alt_m)

        if timesource:
            nar=time_frame.MeanNarrator(nar,timesource=timesource,allow_nans=allow_nans)
        elif timeres_timedelta:
            nar=time_frame.MeanNarrator(nar,basetime=d['starttime'],timeres=timeres_timedelta,endtime=d['endtime'],allow_nans=allow_nans)
        #if altres_m:
        #    requested_altitudes=hau.Z_Array(np.arange(min_alt_m,max_alt_m+altres_m*0.1,altres_m))
        #    nar=resample_altitude.ResampleXd(nar,'heights',requested_altitudes,left=np.NaN,right=np.NaN)
        import lg_dpl_toolbox.dpl.TimeSource as TimeSource
        nar=TimeSource.AddPseudoDeltaT(nar,'times','delta_t')

        return nar
def makeArchiveImages(instrument,datetimestart,range_km=15,full24hour=None,filename=None,reprocess=False,attenuated=False,frappe=False,ir1064=False,ismf2ship=False,completeframe=None,*args,**kwargs):
    import lg_dpl_toolbox.filters.substruct as frame_substruct
    frame_substruct.SubstructBrancher.multiprocessable=False
    from lg_dpl_toolbox.dpl.dpl_read_templatenetcdf import dpl_read_templatenetcdf
    from lg_dpl_toolbox.filters.time_frame import FrameCachedConcatenate
    from hsrl.dpl.dpl_hsrl import dpl_hsrl
    from radar.dpl.dpl_radar import dpl_radar
    from lg_dpl_toolbox.dpl.dpl_create_templatenetcdf import dpl_create_templatenetcdf
    import hsrl.dpl.dpl_artists as hsrl_artists
    import radar.dpl.dpl_artists as radar_artists
    import raman.dpl.dpl_artists as raman_artists

    import lg_dpl_toolbox.core.archival as hru
    import hsrl.graphics.hsrl_display as du
    if filename!=None:
        useFile=True
    else:
         useFile=False #True
    if not useFile or not os.access(filename,os.F_OK):
        reprocess=True
    realend=None
    #process_control=None
    if reprocess:
        print datetime
        #instrument='ahsrl'
        if useFile:
            n=Dataset(filename,'w',clobber=True)
            n.instrument=instrument
        print datetime
        realstart=datetimestart
        if full24hour==None:
            realstart=datetimestart.replace(hour=0 if datetimestart.hour<12 else 12,minute=0,second=0,microsecond=0)
        elif frappe:
            realstart=datetimestart.replace(hour=0,minute=0,second=0,microsecond=0)
        elif ismf2ship:
            realend=realstart
            realstart=realstart-timedelta(days=2.0)
        else:
            realstart=realstart-timedelta(days=1.0)
        if 'realstart' in kwargs:
            realstart=kwargs['realstart']
        if realend is None:
            realend=realstart+timedelta(days=.5 if full24hour==None else 1.0)
        if 'realend' in kwargs:
            realend=kwargs['realend']
        isHsrl=False
        isRadar=False
        isRaman=False
        instrumentprefix=None
        if instrument.endswith('hsrl'):
            dpl=dpl_hsrl(instrument=instrument,filetype='data')
            dpl.hsrl_process_control.set_value('quality_assurance','enable',False)
            #dpl.hsrl_process_control.set_value('extinction_processing','enable',False)
            dpl.hsrl_process_control.set_value('wfov_corr','enable',False)
            gen=dpl(start_time_datetime=realstart,end_time_datetime=realend,min_alt_m=0,max_alt_m=range_km*1000,with_profiles=False)
            isHsrl=True
            #process_control=gen.hsrl_process_control
            hsrlinstrument=instrument
            if os.getenv('COMPLETEFRAME',completeframe)==None:
                import lg_dpl_toolbox.filters.substruct as frame_substruct
                dropcontent=['rs_raw','rs_mean']
                gen=frame_substruct.DropFrameContent(gen,dropcontent)

        elif ('kazr' in instrument) or ('mwacr' in instrument) or instrument=='mmcr':
            dpl=dpl_radar(instrument=instrument)
            gen=dpl(start_time_datetime=realstart,end_time_datetime=realend,min_alt_m=0,max_alt_m=range_km*1000,forimage=True,allow_nans=True)
            hsrlinstrument=dpl.instrumentbase
            isRadar=True
            if 'mwacr' in instrument:
                instrumentprefix='mwacr'
            else:
                instrumentprefix='radar'
            #merge=picnicsession.PicnicProgressNarrator(dplc,getLastOf('start'), searchparms['start_time_datetime'],searchparms['end_time_datetime'],session)
            #hasProgress=True
        elif instrument.startswith('rlprof'):
            from raman.dpl.raman_dpl import dpl_raman
            import lg_dpl_toolbox.filters.time_frame as time_slicing
            dpl=dpl_raman('bagohsrl',instrument.replace('rlprof',''))
            gen=dpl(start_time_datetime=realstart,end_time_datetime=realend,min_alt_m=0,max_alt_m=range_km*1000,forimage=True,allow_nans=True,inclusive=True)
            import functools
            import lg_base.core.array_utils as hau
            from dplkit.simple.blender import TimeInterpolatedMerge
            import lg_dpl_toolbox.filters.substruct as frame_substruct
            import lg_dpl_toolbox.dpl.TimeSource as TimeSource
            import lg_base.core.canvas_info as ci
            gen=time_slicing.TimeGinsu(gen,timefield='times',dtfield=None)
            forimage=ci.load_canvas_info()['canvas_pixels']
            timesource=TimeSource.TimeGenerator(realstart,realend,time_step_count=forimage['x'])
            gen=TimeInterpolatedMerge(timesource,[gen], allow_nans=True)
            gen=TimeSource.AddAppendableTime(gen,'times','delta_t')
            gen=frame_substruct.Retyper(gen,functools.partial(hau.Time_Z_Group,timevarname='times',altname='altitudes'))
            hsrlinstrument='bagohsrl'
            instrumentprefix=instrument
            isRaman=True


    if reprocess and useFile:
        v=None

        for i in gen:
            if v==None:
                v=dpl_create_templatenetcdf(locate_file('hsrl_nomenclature.cdl'),n,i)
            v.appendtemplatedata(i)
        #raise TypeError
        #break
            n.sync()
        n.close()

        #fn='outtest.nc'

    if useFile:
        v=dpl_read_templatenetcdf(filename)

        instrument=v.raw_netcdf().instrument[:]
    else:
        v=gen

    defaultjson='archive_plots.json'
    if ismf2ship:
        defaultjson='mf2ship_plots.json'
    (disp,conf)=du.get_display_defaults(os.getenv("PLOTSJSON",defaultjson))
  
    v=FrameCachedConcatenate(v)
    import lg_dpl_toolbox.dpl.TimeSource as TimeSource
    import lg_dpl_toolbox.filters.fill as fill
    import lg_base.core.canvas_info as ci
    forimage=ci.load_canvas_info()['canvas_pixels']
    ts=TimeSource.TimeGenerator(realstart,realend,time_step_count=forimage['x'])
    v=fill.FillIn(v,[np.array([x['start'] for x in ts])],ignoreGroups=ignoreSomeGroups)

    rs=None
    for n in v:
        if rs==None:
            rs=n
        else:
            rs.append(n)
        if isHsrl:
            if attenuated:
                bsfigname='atten_backscat_image'
                bsprefix='attbscat'
                disp.set_value(bsfigname,'enable',1)
                disp.set_value('backscat_image','enable',0)
                disp.set_value('linear_depol_image','figure',bsfigname)
                #disp.set_value('circular_depol_image','figure',bsfigname)
            else:
                bsfigname='backscat_image'
                bsprefix='bscat'
            if ir1064 or (hasattr(rs,'rs_inv') and hasattr(rs.rs_inv,'color_ratio')):
                disp.set_value('raw_color_ratio_image','enable',1)
                disp.set_value('color_ratio_image','enable',1)
            #if hasattr(rs.rs_inv,'circular_depol'):
            #    disp.set_value('linear_depol_image','enable',0) #this modification works because the artist doesn't render until after the yield
            #    field='circular_depol'
            #else:
            #disp.set_value('circular_depol_image','enable',0)
            field='linear_depol'


    if isHsrl:
        #if process_control==None:
        #    print 'loading process control from json'
        #    process_control=jc.json_config(locate_file('process_control.json'),'process_defaults')
        v=hsrl_artists.dpl_images_artist(v,instrument=instrument,max_alt=None,display_defaults=disp)
    elif isRadar:
        v=radar_artists.dpl_radar_images_artist(framestream=v,instrument=v.radarType,display_defaults=disp,subframe=None)
    elif isRaman:
        v=raman_artists.dpl_raman_images_artist(framestream=v,display_defaults=disp)

    for n in v:
        pass #run once more with cached value and actual artists

    usedpi=90

    #rs=Rti('ahsrl',stime.strftime('%d-%b-%y %H:%M'),dtime.total_seconds()/(60*60),minalt_km,maxalt_km,.5,'archive_plots.json')
    imtime=realstart
    imetime=realend
    imtimestr=imtime.strftime('%e-%b-%Y ')
    file_timetag=imtime.strftime("_%Y%m%dT%H%M_")+imetime.strftime("%H%M_")+("%i" % range_km)
    if not full24hour:
        imtimestr+= ('AM' if imtime.hour<12 else 'PM' )
        file_timetag+="_am" if imtime.hour<12 else "_pm"
    filelocation=hru.get_path_to_data(hsrlinstrument,None)
    print filelocation
    filelocation=os.path.join(filelocation,imtime.strftime("%Y/%m/%d/images/"))
    try:
        os.makedirs(filelocation)
    except OSError:
        pass
    print filelocation
    if os.getenv("DEBUG",None):
        filelocation='.'
        print 'DEBUG is on. storing in current directory'
    figs=v.figs
    extensionsList=('.png','.jpg','.gif','.jpeg')
    preferredFormat='jpg'
    preferredExtension='.'+preferredFormat
    if isHsrl:
        #figs=du.show_images(instrument,rs,None,{},process_control,disp,None,None,None)

        #for x in figs:
        #    fig = figs.figure(x)
        #    fig.canvas.draw()
        
        print figs
        f=figs.figure(bsfigname)
        #f.set_size_inches(f.get_size_inches(),forward=True)
        if frappe:
            frappetag=imtime.strftime('upperair.UW_HSRL.%Y%m%d%H%M.BAO_UWRTV_')
            hiresfile=os.path.join('.',frappetag+'backscatter_%ikm%s' %(range_km,preferredExtension))
            ensureOnlyOne(hiresfile,extensionsList)
            f.savefig(hiresfile,format=preferredFormat,bbox_inches='tight')
        else:
            hiresfile=os.path.join(filelocation,bsprefix+"_depol" + file_timetag + preferredExtension)
            ensureOnlyOne(hiresfile,extensionsList)
            f.savefig(hiresfile,format=preferredFormat,bbox_inches='tight')
        if disp.enabled('raw_color_ratio_image'):
            f=figs.figure('color_ratio_image')
            #f.set_size_inches(f.get_size_inches(),forward=True)
            hiresfileir=os.path.join(filelocation,"ratioIR" + file_timetag + preferredExtension)
            ensureOnlyOne(hiresfileir,extensionsList)
            f.savefig(hiresfileir,format=preferredFormat,bbox_inches='tight')
        figs.close()

        if not full24hour and hasattr(rs,'rs_inv'):
            scale = [float(disp.get_value(bsfigname,'lo_color_lmt')),
                     float(disp.get_value(bsfigname,'hi_color_lmt'))]
            depol=100*getattr(rs.rs_inv,field)
            depolscale = [float(disp.get_value(field+'_image', 'lo_color_lmt')),
                          float(disp.get_value(field+'_image', 'hi_color_lmt'))]

            if attenuated:
                backscat=rs.rs_inv.atten_beta_a_backscat
            elif hasattr(rs.rs_inv,'beta_a_backscat'):
                backscat=rs.rs_inv.beta_a_backscat
            else:
                backscat=rs.rs_inv.beta_a_backscat_par + rs.rs_inv.beta_a_backscat_perp 
            print rs.rs_inv.times.shape
            print backscat.shape
            if disp.get_value(bsfigname,"log_linear")=='log':
                scale=np.log10(scale)
                backscat[backscat<=0]=np.NAN;
                backscat=np.log10(backscat)
            if disp.get_value(field+'_image',"log_linear")=='log':
                depol[depol<=0]=np.NAN;
                depolscale=np.log10(depolscale)
                depol=np.log10(depol)
            print backscat.shape
            qc_mask=None
            if hasattr(rs.rs_inv,'qc_mask'):
               qc_mask=np.ones_like(rs.rs_inv.qc_mask)           
               qcbits={'mol_lost':64,'mol_sn_ratio':16,'cloud_mask':128,'I2_lock_lost':4}
               for name,maskbit in qcbits.items():
                 if disp.get_value('mask_image',name):
                   qc_mask = np.logical_and(rs.rs_inv.qc_mask & maskbit > 0,qc_mask)
            #print np.sum(backscat<=0)
            f=tinythumb(backscat,scale,imtimestr,dpi=usedpi,qcmask=qc_mask)
            thumbname=os.path.join(filelocation,bsprefix + file_timetag + '_thumb'+preferredExtension)
            print thumbname
            ensureOnlyOne(thumbname,extensionsList)
            f.savefig(thumbname,format=preferredFormat,bbox_inches='tight',dpi=usedpi)
            trimborder(thumbname)
            
            f=tinythumb(depol,depolscale,imtimestr,dpi=usedpi,qcmask=qc_mask)
            thumbname=os.path.join(filelocation,'depol' + file_timetag + '_thumb'+preferredExtension)
            ensureOnlyOne(thumbname,extensionsList)
            f.savefig(thumbname,format=preferredFormat,bbox_inches='tight',dpi=usedpi)
            trimborder(thumbname)

            if ir1064 or hasattr(rs.rs_inv,'color_ratio'):
                if hasattr(rs.rs_inv,'qc_mask'):
                   qc_mask=np.ones_like(rs.rs_inv.qc_mask)           
                   qcbits={'mol_lost':64,'mol_sn_ratio':16,'cloud_mask':128,'I2_lock_lost':4,'1064_shutter':0x8000}
                   for name,maskbit in qcbits.items():
                        if disp.get_value('mask_image',name):
                            qc_mask = np.logical_and(rs.rs_inv.qc_mask & maskbit > 0,qc_mask)
                cr=rs.rs_inv.color_ratio
                scale = [float(disp.get_value('color_ratio_image','lo_color_lmt')),
                         float(disp.get_value('color_ratio_image','hi_color_lmt'))]
                if disp.get_value('color_ratio_image',"log_linear")=='log':
                    scale=np.log10(scale)
                    cr[cr<=0]=np.NAN;
                    cr=np.log10(cr)
                f=tinythumb(cr,scale,imtimestr,dpi=usedpi,qcmask=qc_mask)
                thumbname=os.path.join(filelocation,'ratioIR' + file_timetag + '_thumb'+preferredExtension)
                print thumbname
                ensureOnlyOne(thumbname,extensionsList)
                f.savefig(thumbname,format=preferredFormat,bbox_inches='tight',dpi=usedpi)
                trimborder(thumbname)


    elif isRadar:
        f=figs.figure('radar_backscatter_image')
        #f.set_size_inches(f.get_size_inches(),forward=True)
        hiresfile=os.path.join(filelocation,instrumentprefix+"_bscat" + file_timetag + preferredExtension)
        ensureOnlyOne(hiresfile,extensionsList)
        f.savefig(hiresfile,format=preferredFormat,bbox_inches='tight')
        figs.close()
        if not full24hour:
            radarscale = [float(disp.get_value('radar_backscatter_image', 'lo_color_lmt')),
                          float(disp.get_value('radar_backscatter_image', 'hi_color_lmt'))]
            radar=rs.Backscatter
            if disp.get_value('radar_backscatter_image','log_linear')=='log':
                radar[radar<=0]=np.NAN
                radarscale=np.log10(radarscale)
                radar=np.log10(radar)
            f=tinythumb(radar,radarscale,imtimestr,dpi=usedpi)
            thumbname=os.path.join(filelocation,instrumentprefix+'_bscat' + file_timetag + '_thumb'+preferredExtension)
            ensureOnlyOne(thumbname,extensionsList)
            f.savefig(thumbname,format=preferredFormat,bbox_inches='tight',dpi=usedpi)
            trimborder(thumbname)
    elif isRaman:
        if hasattr(rs,'backscatter'):
            f=figs.figure('rl_backscatter_image')
            #f.set_size_inches(f.get_size_inches(),forward=True)
            hiresfile=os.path.join(filelocation,instrumentprefix+"_bscat" + file_timetag + preferredExtension)
            ensureOnlyOne(hiresfile,extensionsList)
            f.savefig(hiresfile,format=preferredFormat,bbox_inches='tight')
        elif hasattr(rs,'beta'):
            f=figs.figure('rl_backscatter_image')
            #f.set_size_inches(f.get_size_inches(),forward=True)
            hiresfile=os.path.join(filelocation,instrumentprefix+"_beta" + file_timetag + preferredExtension)
            ensureOnlyOne(hiresfile,extensionsList)
            f.savefig(hiresfile,format=preferredFormat,bbox_inches='tight')
        if hasattr(rs,'linear_depol'):
            f=figs.figure('rl_depol_image')
            #f.set_size_inches(f.get_size_inches(),forward=True)
            hiresfile=os.path.join(filelocation,instrumentprefix+"_dep" + file_timetag + preferredExtension)
            ensureOnlyOne(hiresfile,extensionsList)
            f.savefig(hiresfile,format=preferredFormat,bbox_inches='tight')
        figs.close()
        if not full24hour:
            if hasattr(rs,'backscatter'):
                ramanscale = [float(disp.get_value('rl_backscatter_image', 'lo_color_lmt')),
                              float(disp.get_value('rl_backscatter_image', 'hi_color_lmt'))]
                raman=rs.backscatter.copy()
                #print np.nanmax(raman),np.nanmin(raman),' is ramans actual range'
                if disp.get_value('rl_backscatter_image','log_linear')=='log':
                    raman[raman<=0]=np.NAN
                    ramanscale=np.log10(ramanscale)
                    raman=np.log10(raman)
                f=tinythumb(raman,ramanscale,imtimestr,dpi=usedpi)
                thumbname=os.path.join(filelocation,instrumentprefix+'_bscat' + file_timetag + '_thumb'+preferredExtension)
                ensureOnlyOne(thumbname,extensionsList)
                f.savefig(thumbname,format=preferredFormat,bbox_inches='tight',dpi=usedpi)
                trimborder(thumbname)
            if hasattr(rs,'beta'):
                ramanscale = [float(disp.get_value('rl_backscatter_image', 'lo_color_lmt')),
                              float(disp.get_value('rl_backscatter_image', 'hi_color_lmt'))]
                raman=rs.beta.copy()
                #print np.nanmax(raman),np.nanmin(raman),' is ramans actual range'
                if disp.get_value('rl_backscatter_image','log_linear')=='log':
                    raman[raman<=0]=np.NAN
                    ramanscale=np.log10(ramanscale)
                    raman=np.log10(raman)
                f=tinythumb(raman,ramanscale,imtimestr,dpi=usedpi)
                thumbname=os.path.join(filelocation,instrumentprefix+'_beta' + file_timetag + '_thumb'+preferredExtension)
                ensureOnlyOne(thumbname,extensionsList)
                f.savefig(thumbname,format=preferredFormat,bbox_inches='tight',dpi=usedpi)
                trimborder(thumbname)
            if hasattr(rs,'linear_depol'):
                ramanscale = [float(disp.get_value('rl_depol_image', 'lo_color_lmt')),
                              float(disp.get_value('rl_depol_image', 'hi_color_lmt'))]
                raman=rs.linear_depol.copy()
                #print np.nanmax(raman),np.nanmin(raman),' is ramans actual range'
                if disp.get_value('rl_depol_image','log_linear')=='log':
                    raman[raman<=0]=np.NAN
                    ramanscale=np.log10(ramanscale)
                    raman=np.log10(raman)
                f=tinythumb(raman,ramanscale,imtimestr,dpi=usedpi)
                thumbname=os.path.join(filelocation,instrumentprefix+'_dep' + file_timetag + '_thumb'+preferredExtension)
                ensureOnlyOne(thumbname,extensionsList)
                f.savefig(thumbname,format=preferredFormat,bbox_inches='tight',dpi=usedpi)
                trimborder(thumbname)

    today=datetime.utcnow()
    if not frappe and not ismf2ship and full24hour and imetime.year==today.year and imetime.month==today.month and imetime.day==today.day:
        destpath=os.getenv("DAYSTORE",os.path.join('/','var','ftp','public_html','hsrl'))
        destfile=os.path.join(destpath,full24hour + '_current'+preferredExtension)
        fulln=hiresfile
        if not fulln:
            return
        outf=file(destfile,'w')
        inf=file(fulln,'r')
        ensureOnlyOne(destfile,extensionsList)
        outf.write(inf.read())
        inf.close()
        outf.close()
        os.unlink(fulln)
    elif ismf2ship:
        if not hiresfile:
            return
        destpath='./'#os.getenv("DAYSTORE",os.path.join('/','var','ftp','public_html','hsrl'))
        destfile=os.path.join(destpath,realend.strftime('hsrl_%Y%m%d%H%M')+preferredExtension)
        fulln=hiresfile
        outf=file(destfile,'w')
        inf=file(fulln,'r')
        ensureOnlyOne(destfile,extensionsList)
        outf.write(inf.read())
        inf.close()
        outf.close()
        os.unlink(fulln)
        recompressImage(destfile,quality=30,optimize=True)
        varlist=('rs_mean.times','rs_mean.latitude','rs_mean.longitude','rs_mean.transmitted_energy','rs_mean.seedvoltage',\
            'rs_mean.seeded_shots','rs_mean.coolant_temperature','rs_mean.laserpowervalues','rs_mean.opticalbenchairpressure',\
            'rs_mean.humidity','rs_mean.l3locking_stats','rs_mean.l3cavityvoltage','rs_mean.nonfiltered_energy','rs_mean.filtered_energy',\
            'rs_mean.builduptime','rs_mean.one_wire_temperatures')
        skip=0
        while skip<16:
           try:
              print 'skip is ',skip
              addExifComment(destfile,makedict(n,varlist,skip=skip))
              break
           except Exception as e:
              skip+=1
              print e
              traceback.print_exc()
              pass
        sendSCPFile(destfile,host='198.129.80.15',path='/ftpdata/outgoing',user='******',keyfile=os.getenv('MF2_HSRL_KEY'))
    elif frappe:
        if not hiresfile:
            return
        sendFTPFile(hiresfile,host='catalog.eol.ucar.edu',path='/pub/incoming/catalog/frappe/',user='******',password='******')
Example #5
0
    def search(self, start_time_datetime=None, end_time_datetime=None,reverse_padding=None,timeres_timedelta=None,min_alt_m=None,max_alt_m=None,altres_m=None,window_width_timedelta=None,
        corr_adjusts=None,block_when_out_of_data=True,forimage=True,inclusive=None, mol_norm_alt_m=None,timesource=None,raw_only=False,cal_only=False,with_profiles=True,do_inversion=True,calibration_overrides=None,
        requested_altitudes=None,calsrc=None,constsrc=None,sounding_source=None,*args, **kwargs):
        """
        :param start_time_datetime: start time (optional)
        :type start_time_datetime: datetime.datetime
        :param end_time_datetime: end time (optional) if unspecified, will continue to return frames thru now, unending
        :type end_time_datetime: datetime.datetime
        :param reverse_padding: (optional)in the case of reading up to the current time, this timedelta is always subtracted from the current time to get the most recent moment to read
        :type reverse_padding: datetime.timedelta
        :param timeres_timedelta: (optional) time resolution, or None to optimized for images
        :type timeres_timedelta: datetime.timedelta
        :param min_alt_m: minimum altitude in meters
        :param max_alt_m: maximum altitude in meters
        :param altres_m: (optional) altitude resolution
        :param window_width_timedelta:   used with start or end time (not both) to determine preferred return width. if unspecified, will imply the other unspecified, or from now if neither
        :type window_width_timedelta: datetime.timedelta
        :param corr_adjusts: correction adjustments. if unspecified, will use default   
        :param block_when_out_of_data: (optional) if unspecified or True, will block for 'timeres_timedelta' until trying for more frames. if False, yield None until more data is available. this only effects behavior if end_time_datetime is None/unspecified
        :param forimage: (optional) True (default) will implicitly set *res if not specified to match image configuration. if false, unspecified resolution will result in native resolution
        :param inclusive: if true, will expand window to ensure including any data that intersects the requested times (NOT IMPLEMENTED)
        """
        if len(args):
            print 'Unused dpl_hsrl.search args = ',args
        if len(kwargs):
            print "Unused dpl_hsrl.search kwargs = ",kwargs

        # altitude configuration notes: min and max alt are required. resolution is optional, in which case the process_control will determine pixel count, and thus resolution

        # time configuration notes: there are many modes to this operation, and two layers
        #   calibration layer: possible parameters are start, end, and window width. (implemented by dpl_calibration.parse_timewindow(), which will return in all cases start and width, and end if will terminate)
        #      specification groups possible: start+end, start+window,start,end+window,window (in order of priority)
        #      start and end specify the range of calibrations to stream
        #      if only one is specified, window is used to roll forward or back the one specified to make the other
        #         if window is not specified, it goes from start to now (defining the window)
        #      if neither are specified, start is set to now-window
        #      if end is not specified, it keeps rolling forward without termination. if it is, it WILL go to that point (even in future) and terminate
        #         if start and window are specified, and start+window is past now, start will be ignored
        #   processing layer:
        #      here, it only cares if timeres is not specified.  If it is not specified, it will follows the same steps as calibration to find the preferred window size, and use process_control to determine pixel count, and resolution

        params={}
        #params['windowwidth']=window_width_timedelta
        #params['intervalTime']=start_time_datetime#pytz.UTC.localize(start_time_datetime);
        import lg_base.core.canvas_info as ci
        canvas_info=ci.load_canvas_info()
        if timesource is not None:
            ts=timesource
        elif timeres_timedelta is None and not forimage:#NATIVE
            ts=None
        else:
            ts=TimeSource.TimeGenerator(start_time=start_time_datetime,end_time=end_time_datetime,width=window_width_timedelta,
                time_resolution=timeres_timedelta,time_step_count=None if (not forimage) else float(canvas_info['canvas_pixels']['x']))
        if ts is None:
            tmp=TimeSource.TimeGenerator(start_time=start_time_datetime,end_time=end_time_datetime,width=window_width_timedelta,
                time_resolution=timeres_timedelta,time_step_count=None if (not forimage) else float(canvas_info['canvas_pixels']['x']))
            params['realStartTime']=tmp.start_time
            params['intervalTime']=tmp.start_time
            params['finalTime']=tmp.end_time
            params['timeres']=tmp.time_resolution
        elif hasattr(ts,'start_time'):
            params['realStartTime']=ts.start_time
            params['intervalTime']=ts.start_time
            params['finalTime']=ts.end_time
            params['timeres']=ts.time_resolution if hasattr(ts,'time_resolution') else None
            #params['windowwidth']=ts.
        else:
            params['timeres']=None

        params['reverse_padding']=reverse_padding
        #actualStartTime=params['realStartTime']
        #params['finalTime']=end_time_datetime
        #params['timeres']=timeres_timedelta
        params['forimage']=forimage

        params['min_alt']=min_alt_m
        params['max_alt']=max_alt_m
        params['altres']=altres_m
        params['block_when_out_of_data']=block_when_out_of_data
        instrument=self.instrument
        intcount=0
        #fixme this should deprecate the params structure, making only deliberate parts exposed to the narrator as needed, via the calibration (altitude) and timesource (time), and timeslice/blocking (local)
        #ts=None
        ret=None

        const_narr=calsrc or constsrc
        if const_narr is None:
            const_narr=self.cal(interval_start_time=params['intervalTime'],reverse_padding=params['reverse_padding'],interval_end_time=params['finalTime'],
                            corr_adjusts=corr_adjusts,mol_norm_alt=mol_norm_alt_m,nocal=True)
            if cal_only and raw_only:
                return const_narr
        if not cal_only:
            ret=dpl_raw_hsrl_narr(params=params,const_narr=const_narr,lib=self.lib,zoo=self.zoo,inclusive=inclusive)
        if not raw_only:

            params=self.__params__(params,const_narr.hsrl_constants_first)
            cal_narr=calsrc
            if cal_narr is None:
                cal_narr=self.cal(min_alt_m=params['min_alt'],max_alt_m=params['max_alt'],altres_m=params['deriv_altres'],useconsts=const_narr
                    ,calibration_overrides=calibration_overrides,requested_altitudes=requested_altitudes,soundingsource=sounding_source)
            elif sounding_source is not None or requested_altitudes is not None or calibration_overrides is not None:
                warnings.warn("Not using sounding source, altitudes, or calibration overrides as provided.")
            if cal_only:
                return cal_narr
            ret=dpl_hsrl_narr(params=params,cal_narr=cal_narr,rawsrc=ret,timesource=ts)

            if 'rs_mean' in ret.provides:#mean filter
                windowparms=dfe.mean_filter_setup(ret.hsrl_process_control,ret.hsrl_constants_first,ret.provides['rs_mean'],cal_narr.provides)

                if windowparms is not None:
                    import lg_dpl_toolbox.filters.time_frame as time_slicing
                    import lg_dpl_toolbox.filters.substruct as frame_substruct
                    splitter=frame_substruct.SubstructBrancher(ret)
                    sliced=time_slicing.TimeGinsu(splitter.narrateSubstruct('rs_mean'),timefield='times',dtfield='delta_t',omitTime=False) #break it up
                    subslice=splitter.narrateSubstruct('rs_mean')
                    masterslice=splitter.narrateSubstruct(None)

                    if windowparms is not None:
                        if not isinstance(windowparms,(list,tuple)):
                            windowparms=[windowparms]
                        for w in windowparms:
                            sliced=dpl_rolling_window_filter(sliced,w) #run the filters

                    sliced=frame_substruct.CountDeGinsu(frame_substruct.FrameLength(subslice,'times'),sliced) #re-assemble in same chunk size
                    ret=frame_substruct.NestingCompositer(masterslice,dict(rs_mean=sliced))


            if do_inversion:
                ret=dpl_hsrl_inv_process_complex(ret)

                if 'rs_inv' in ret.provides:
                  windowparms=dfe.inv_filter_setup(ret.hsrl_process_control,ret.hsrl_constants_first,ret.provides['rs_inv'],ret.provides['rs_mean'])
                  qa=self.qa
                  if not ret.hsrl_process_control.enabled('quality_assurance',return_if_missing=True): #omitted or explicitly not enabled
                        qa=None

                  if (windowparms is not None or qa is not None):
                    import lg_dpl_toolbox.filters.time_frame as time_slicing
                    import lg_dpl_toolbox.filters.substruct as frame_substruct
                    splitter=frame_substruct.SubstructBrancher(ret)
                    sliced=time_slicing.TimeGinsu(splitter.narrateSubstruct('rs_inv'),timefield='times',dtfield='delta_t',omitTime=False) #break it up
                    subslice=splitter.narrateSubstruct('rs_inv')
                    masterslice=splitter.narrateSubstruct(None)

                    if windowparms is not None:
                        if not isinstance(windowparms,(list,tuple)):
                            windowparms=[windowparms]
                        for w in windowparms:
                            sliced=dpl_rolling_window_filter(sliced,w) #run the filters

                    if qa is not None:
                        sliced=qa(start_time_datetime= params['realStartTime'], end_time_datetime=params['finalTime'],hostsource=sliced,hostsource_newframe='qa_flags')

                    sliced=frame_substruct.CountDeGinsu(frame_substruct.FrameLength(subslice,'times'),sliced) #re-assemble in same chunk size
                    ret=frame_substruct.NestingCompositer(masterslice,dict(rs_inv=sliced))

        if with_profiles:
            ret=dpl_hsrl_profile_filter(ret,subscopename='raw_profiles',useraw=True)
            if not raw_only:
                ret=dpl_hsrl_profile_filter(ret)
        ret=dpl_hsrl_strip_calv(ret)
        return ret
Example #6
0
    def read(self):
        """ main read generator
        """
        import hsrl.data_stream.processing_utilities as pu
        params=self.params
        firsttimeever=None
        intervalTime=None
        intervalEnd=None
        rawsrc=iter(self.rawsrc)
        #if params['timeres']!=None and params['timeres']<datetime.timedelta(seconds=float(self.cal_narr.hsrl_constants['integration_time'])):
        #    params['timeres']=None #pure native
        end_time_datetime=params['finalTime']
        #timemodoffset=time_mod(params['realStartTime'],params['timeres'])
        noframe='noframe'
        fullrange=False #if this is true, it will pad the start with any missing times.

        remainder=None
        cdf_to_hsrl = None
        preprocess_ave = None
        requested_times=None
        instrument=self.hsrl_instrument
        intcount=0
        rs_mem = None
        #rs=None
        timesource=TimeSource.CompoundTimeGenerator(self.timesource) if self.timesource is not None else None
        
        for calv in self.cal_narr:
            if intervalTime is None:
                firsttimeever=calv['chunk_start_time']
                intervalTime=calv['chunk_start_time']
                intervalEnd=intervalTime
            chunk_end_to_use=calv['chunk_end_time']#-time_mod(calv['chunk_end_time'],params['timeres'],timemodoffset)
            #print 'old end',calv['chunk_end_time'],'vs new end',chunk_end_to_use,'mod base',params['timeres']
            if calv['chunk_end_time']==calv['chunk_start_time'] and end_time_datetime is None:
                if params['block_when_out_of_data']:
                    if 'timeres' not in params or params['timeres'] is None:
                        sleep(calv['rs_constants']['integration_time'])
                    else:
                        sleep(params['timeres'].total_seconds())
                else:
                    yield None #this is done to get out of here, and not get stuck in a tight loop
                continue
            while intervalTime<chunk_end_to_use:
                integration_time = calv['rs_constants']['integration_time']
                doPresample=True
                #END init section
                if intervalEnd>chunk_end_to_use:
                    print 'Breaking calibration on endtime. proc ',intervalEnd,chunk_end_to_use,end_time_datetime
                    break
                else:
                    intervalEnd=chunk_end_to_use
                #print ' Absolute window is ', actualStartTime, ' to ' , params['finalTime']
                print ' prior window was ', intervalTime, ' to ' , intervalEnd, 'terminating at ',chunk_end_to_use,rs_mem
                if True:#requested_times==None or requested_times.shape[0]>0:
                    try:
                            try:
                                while rawsrc is not None:
                                    if rs_mem is not None and rs_mem.times[0]>=chunk_end_to_use  and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                        break
                                    tmp=rawsrc.next()
                                    if hasattr(tmp,'rs_raw'):
                                        if rs_mem is not None:
                                            rs_mem.append(tmp.rs_raw)
                                        else:
                                            rs_mem=copy.deepcopy(tmp.rs_raw)
                                    if rs_mem is not None and rs_mem.times.shape>0:
                                        break
                                    else:
                                        rs_mem=None
                            except StopIteration:
                                print 'Raw HSRL stream is ended'
                                rawsrc=None
                            if rs_mem is None or rs_mem.times.size==0:
                                rs_mem=None
                            elif rs_mem.times[0]>=chunk_end_to_use and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                print 'HSRL RAW skipping to next cal because of times',intervalTime,chunk_end_to_use,end_time_datetime,rs_mem.times[0]
                                break
                            else:
                                intervalEnd=rs_mem.times[-1]
                            print 'read in raw frame to mean',rs_mem,remainder
                            if rawsrc is None:
                                intervalEnd=chunk_end_to_use

                            print 'trimmed ',rs_mem
                            if timesource is not None:
                                if timesource.isDone:
                                    break
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet
                                inclusive=rawsrc is None and (end_time_datetime!=None and intervalEnd>=end_time_datetime)
                                timevals=hau.T_Array(timesource.getBinsFor(starttime=intervalTime,endtime=intervalEnd,inclusive=inclusive))#,inclusive=(end_time_datetime!=None and intervalEnd>=end_time_datetime)))
                                print 'Now %i intervals %s' % (timevals.size-1, "INC" if inclusive else "NOINC"),intervalTime,intervalEnd
                            elif 'timeres' in params and params['timeres'] is not None:
                                tmp=intervalTime
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet

                                timevals=[]
                                timevals.append(tmp)
                                while tmp<intervalEnd:# using python datetimes for making the axis is much much more precise than matplotlib floats.
                                        #print tmp, ' = ' , du.date2num(tmp) , ' = ' , (tmp-self.actualStartTime).total_seconds()
                                        tmp+=params['timeres']
                                        timevals.append(tmp)
                                        
                                #intervalEnd=tmp
                                intcount+=len(timevals)
                                if usePrebinnedTimes:
                                    intcount-=1
                                print 'Now %i intervals' % (intcount)
                                timevals=hau.T_Array(timevals)
                            else:

                                print 'Using Native timing'
                                timevals=None

                            print ' new window is ', intervalTime, ' to ' , intervalEnd

                            requested_times=timevals
                           
                            requested_chunk_times= requested_times#requested_times[requested_times >=intervalTime]

                            if requested_chunk_times is not None and len(requested_chunk_times)<2 and rawsrc is not None:
                                #if rawsrc is not None:
                                print "not enough time to process"
                                continue
                            elif rawsrc is None and rs_mem is None and remainder is None:
                                #chunk_end_to_use=intervalTime
                                #continue
                                #print ''
                                break
                 
          
                            rs_chunk,remainder = pu.process_data( instrument, intervalTime, intervalEnd
                                ,params['min_alt'], params['max_alt'], requested_chunk_times
                                , rs_mem, calv['rs_Cxx'], calv['rs_constants'], calv['rs_cal']
                                , None , self.cal_narr.hsrl_corr_adjusts, self.cal_narr.hsrl_process_control
                                , self.compute_stats,remainder=remainder)
                            rs_mem=None
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size==0:
                                rs_chunk=None
                            if rs_chunk is None and rawsrc is None:
                                break
                           #print rs_chunk
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size>0:
                                if fullrange and requested_chunk_times is not None:
                                    v=hau.Time_Z_Group(like=rs_chunk.rs_mean)
                                    v.times=hau.T_Array(requested_chunk_times[requested_chunk_times<rs_chunk.rs_mean.times[0]])
                                    if v.times.size>0:
                                        rs_chunk.rs_mean.prepend(v)
                                rs_chunk.calv=calv

                                yield rs_chunk
                                intervalTime=intervalEnd
                    except Exception, e:
                        print 'Exception occured in update_cal_and_process'
                        print 'Exception = ',e
                        print traceback.format_exc()
                        if isinstance(e,(MemoryError,)):
                            print 'Please Adjust Your Parameters to be more Server-friendly and try again'
                            raise
                        if not isinstance(e,(AttributeError,)):
                            raise