Пример #1
0
 def flush(self, raw=None):
     if raw is not None:
         self.Rem.append(raw)
     raw = self.Rem
     self.Rem = hau.Time_Z_Group(like=raw)
     if self.post_operator != None:
         return self.post_operator.flush(raw)
     return raw
Пример #2
0
 def triage(self,*fr):
     timeaxis=None
     for fram in fr:
         if (timeaxis==None or timeaxis.size==0) and fram!=None and hasattr(fram,fram._timevarname):
             timeaxis=getattr(fram,fram._timevarname)
     if timeaxis is None:
         return None
     return hau.Time_Z_Group(timeaxis.copy(),timevarname='times',altname='heights')
Пример #3
0
def ms_sums(diameter, beta, depol, multiple_scatter_parameters, ms_obj):
    '''  ms_sums(diameter,beta,depol,multiple_scatter_parameters,ms_obj):

             diameter = particle diameter profile or profiles
             beta     = extinction or backscatter cross section profile or profiles
             depol    = depolarization profile or profiles
             returns:
             ms_obj   = summed profiles
               "       .diameter_ice
               "       .diameter_water
               "       .beta_ice
               "       .beta_water
               "       .n_samples_ice
               "       .n_samples_water  '''
    is_ice = np.zeros_like(beta)
    is_water = np.zeros_like(beta)
    is_ice[depol >= multiple_scatter_parameters['h2o_depol_threshold']] = 1.0
    is_water[depol < multiple_scatter_parameters['h2o_depol_threshold']] = 1.0

    #diameter_ice = diameter * is_ice
    #diameter_water = diameter * is_water
    beta_ice = beta * is_ice
    beta_water = beta * is_water

    #compute diameter * beta for ice and water components
    if not diameter == None:
        diameter_ice = diameter * beta * is_ice
        diameter_water = diameter * beta * is_water
    #if diameter is not supplied from lidar-radar retrieval get it from constants in multiple_scatter_parameters
    else:
        diameter_ice = hau.Z_Array(
            np.ones_like(beta) *
            multiple_scatter_parameters['mode_diameter_ice'] * beta * is_ice)
        diameter_water = hau.Z_Array(
            np.ones_like(beta) *
            multiple_scatter_parameters['mode_diameter_water'] * beta *
            is_water)

    if ms_obj == None:
        ms_obj = hau.Time_Z_Group()
        setattr(ms_obj, 'beta_ice', hau.Z_Array(nansum(beta_ice, 0)))
        setattr(ms_obj, 'beta_water', hau.Z_Array(nansum(beta_water, 0)))
        setattr(ms_obj, 'diameter_ice', hau.Z_Array(nansum(diameter_ice, 0)))
        setattr(ms_obj, 'diameter_water',
                hau.Z_Array(nansum(diameter_water, 0)))
        setattr(ms_obj, 'n_samples_ice',
                hau.Z_Array(sum(~np.isnan(beta_ice) * is_ice, 0)))
        setattr(ms_obj, 'n_samples_water',
                hau.Z_Array(sum(~np.isnan(beta_water) * is_water, 0)))
    else:
        ms_obj.beta_ice += nansum(beta_ice, 0)
        ms_obj.beta_water += nansum(beta_water, 0)
        ms_obj.diameter_ice += nansum(diameter_ice, 0)
        ms_obj.diameter_water += nansum(diameter_water, 0)
        ms_obj.n_samples_ice += sum(~np.isnan(beta) * is_ice, 0)
        ms_obj.n_samples_water += sum(~np.isnan(beta) * is_water, 0)

    return ms_obj
Пример #4
0
 def triage(self,hsrl,radar):
     timeaxis=None
     if (timeaxis==None or timeaxis.size==0) and hsrl!=None and hasattr(hsrl,hsrl._timevarname):
         timeaxis=getattr(hsrl,hsrl._timevarname)
     if (timeaxis==None or timeaxis.size==0) and radar!=None and hasattr(radar,radar._timevarname):
         timeaxis=getattr(radar,radar._timevarname)
     if timeaxis!=None:
         return hau.Time_Z_Group(timeaxis.copy(),timevarname='times',altname='heights')
     return None
Пример #5
0
 def combine(self):
     tracks = OrderedDict()
     for k in self.sechost.keys():
         tracks[k] = {}
         tracks[k]['iter'] = iter(self.sechost[k])
         tracks[k]['store'] = []
     if True:  #try:
         for pf in self.prim:
             ret = OrderedDict()
             ret[self.primaryname] = pf
             lastTime = self.__getLastTime(pf)
             frameCount = self.__getCount(pf)
             for k in tracks.keys():
                 if k in self.countMatch:
                     frames, timerange = self.__eatFrameByCount(
                         k, tracks[k], frameCount)
                 else:
                     frames, timerange = self.__eatFrameByTime(
                         k, tracks[k], lastTime)
                 if timerange != None and len(frames) > 0 and hasattr(
                         frames[0], 'append'):
                     fms = frames
                     frames = copy.deepcopy(fms[0])
                     for x in range(1, len(fms)):
                         if fms[x] is self.noFramePlaceholder:
                             if k in self.countMatch:
                                 VERBOSE(
                                     'extending', k,
                                     'FIXME this should have been done by the resampler'
                                 )
                                 frames.append(
                                     hau.Time_Z_Group(
                                         like=frames,
                                         times=self.__getTime(
                                             pf,
                                             self.__getCount(frames),
                                             asArray=True)))
                             else:
                                 pass  #by time, or no extend
                         else:
                             frames.append(fms[x])
                     setattr(frames, 'start', timerange['start'])
                     setattr(frames, 'width', timerange['width'])
                 if not frames is self.noFramePlaceholder:
                     ret[k] = frames
             if self.retclass == None:
                 retv = ret
             elif self.classdictparm:
                 d = self.classparms.copy()
                 d[self.classdictparm] = ret
                 retv = self.retclass(**d)
             else:
                 retv = self.retclass(**self.classparms)
                 for k, i in ret.items():
                     setattr(retv, k, i)
                 #print 'merged is',retv
             yield retv
    def convert(self, profile, posframe):
        sounding = hau.Time_Z_Group()
        if 'cache' in self.name:
            sounding.sounding_type = 'virtual'
            sounding.sounding_id = 'Cached Forecast'
            sounding.station_id = 'Cached Forecast'
        elif 'virtual' in self.name:
            sounding.sounding_type = 'virtual'
            sounding.sounding_id = 'NWP Virt'
            sounding.station_id = 'NWP Virt'
        else:
            sounding.sounding_type = 'model'
            sounding.sounding_id = 'Static GRIB'
            sounding.station_id = 'Static GRIB'
        sounding.latitude = profile['lat'][0]
        sounding.longitude = profile['lon'][0]
        sounding.times = datetime(1970, 1, 1, 0, 0, 0) + timedelta(
            seconds=profile['base_time']) + timedelta(
                seconds=profile['time_offset'][0])
        if posframe is None:
            sounding.sample_latitude = sounding.latitude
            sounding.sample_longitude = sounding.longitude
            sounding.sample_time = sounding.times
        else:
            sounding.sample_latitude = posframe['latitude']
            sounding.sample_longitude = posframe['longitude']
            sounding.sample_time = posframe['start']
        minlen = profile['alt'].size
        for k, v in profile.items():
            if hasattr(v, 'shape'):
                if profile['alt'].size != v.size:
                    print 'WARNING SOUNDING ATTRIBUTE ' + k + ' has a length', v.size, 'while alts are', profile[
                        'alt'].size
                    profile[k] = v[:profile['alt'].size]
                    if minlen > v.size:
                        minlen = v.size
        if minlen < profile['alt'].size:
            print "TRIMMING TO SHORTENED SOUNDING ATTRIBUTE of length", minlen
            for k, v in profile.items():
                if hasattr(v, 'shape'):
                    profile[k] = v[:minlen]

        sounding.top = numpy.max(profile['alt'])
        sounding.bot = numpy.min(profile['alt'])
        sounding.altitudes = hau.Z_Array(profile['alt'])
        if 'tkel' in profile:
            sounding.temps = hau.Z_Array(profile['tkel'])
        else:
            sounding.temps = hau.Z_Array(profile['tdry']) + 273.15
        sounding.pressures = hau.Z_Array(profile['pres'])
        sounding.dew_points = hau.Z_Array(
            self.su.cal_dew_point(hau.Z_Array(profile['rh']), sounding.temps))
        sounding.frost_points = hau.Z_Array(
            self.su.cal_frost_point(sounding.dew_points))
        return sounding
Пример #7
0
 def process(self):
     fr = None
     flags = None
     olda = None
     qasource = tf.TimeTrickle(self.qasource, 'time')
     altitudes = hau.Z_Array(self.qaparser.altitudeAxis)
     for f in self.timealtsource:
         #FIXME include angles
         if not isinstance(f, dict):
             f = vars(f)
         t = f[self.timename]
         a = self.constantAltitude
         if fr is None or ((not qasource.atEnd) and t >= qasource.nextTime
                           ):  #if need an update to the qa record
             #print 'Getting qa source for time',t
             fr = qasource(t)
             flags = None
         if 'range_flags' in fr and fr[
                 'range_flags'] is not None:  #if there is a range dependence, and a potentially non-constant altitude
             if self.altname is not None and self.altname in f:
                 a = f[self.altname]
             if a is None:
                 raise RuntimeError(
                     'Need platform altitude to merge in range-dependant qa Flags'
                 )
             if olda is None or a != olda:
                 flags = None
                 olda = a
         if flags is None:  #was cleared either because new flags from the qa file, or new altitude from the stream
             if 'range_flags' in fr and fr['range_flags'] is not None:
                 flags = self.qaparser.mergeVectors(fr['flags'],
                                                    fr['range_flags'], a)
             else:
                 flags = fr['flags']
             flags = self.qaparser.translateToEnumeration(flags)
             flags = hau.TZ_Array(flags.reshape([1] + list(flags.shape)),
                                  dtype='int32',
                                  summode='and')
         ret = hau.Time_Z_Group(timevarname='times', altname='altitudes')
         setattr(ret, 'times', hau.T_Array([t]))
         setattr(ret, 'delta_t', hau.T_Array([f['width'].total_seconds()]))
         setattr(ret, 'altitudes', copy.copy(altitudes))
         setattr(ret, 'start', f['start'])
         setattr(ret, 'width', f['width'])
         if self.splitFields:
             for f, idx in self.qaparser.flagbits.items():
                 setattr(
                     ret, 'qa_' + f,
                     hau.TZ_Array((flags / (10**idx)) % 10,
                                  dtype='int32',
                                  summode='and'))
         else:
             setattr(ret, 'qaflags', flags)
         yield ret
Пример #8
0
def process_hsrl_raman_profile(hsrl_profile,
                               raman_profile,
                               parameters,
                               entire_frame=None):
    print
    print 'entering process_hsrl_raman_profile in cooperative-------------------'
    rs = hau.Time_Z_Group(like=hsrl_profile)
    rs.hsrl_profile = copy.deepcopy(hsrl_profile)
    rs.raman_profile = copy.deepcopy(raman_profile)
    print 'leaving process_hsrl_raman_profile'
    return rs
Пример #9
0
 def triage(self, hsrl, raman):
     timeaxis = None
     if (timeaxis == None or timeaxis.size
             == 0) and hsrl != None and hasattr(hsrl, hsrl._timevarname):
         timeaxis = getattr(hsrl, hsrl._timevarname)
     if (timeaxis == None or timeaxis.size
             == 0) and raman != None and hasattr(raman, raman._timevarname):
         timeaxis = getattr(raman, raman._timevarname)
     if timeaxis != None:
         return hau.Time_Z_Group(timeaxis.copy(),
                                 timevarname='times',
                                 altname='altitudes')
     return None
 def triage(self, hsrl, particle, rs):
     timeaxis = None
     if (timeaxis == None or timeaxis.size
             == 0) and hsrl != None and hasattr(hsrl, hsrl._timevarname):
         timeaxis = getattr(hsrl, hsrl._timevarname)
     if (timeaxis == None
             or timeaxis.size == 0) and particle != None and hasattr(
                 particle, particle._timevarname):
         timeaxis = getattr(particle, particle._timevarname)
     if timeaxis == None:
         return None
     return hau.Time_Z_Group(timeaxis.copy(),
                             timevarname='times',
                             altname='heights')
def process_hsrl_radar(process_parameters=None,rs_inv=None,rs_mean=None,rs_mwacr=None,rs_kazrge=None,**kwargs):
    #create timez group and add heights            
    rs_cooperative=hau.Time_Z_Group(rs_inv.times.copy(),timevarname='times',altname='heights')
    setattr(rs_cooperative,'heights',rs_inv.msl_altitudes.copy())



    print 'called process_hsrl_radar'
    print kwargs
    print rs_mwacr
    print rs_kazrge
    #raise RuntimeError('RUNNING HSRLRADAR')



    return rs_cooperative
Пример #12
0
 def render(self):
     subframes = None
     for frame in self.framestream:
         if frame != None:
             if self.limit_frame_to != None or self.breakup_nesting:
                 if subframes is None:
                     if self.limit_frame_to is not None:
                         subframes = [self.limit_frame_to]
                     else:
                         subframes = []
                         omit = None
                         include = None
                         if hasattr(frame, 'rs_mean') or hasattr(
                                 frame,
                                 'rs_inv') or not hasattr(frame, 'rs_raw'):
                             omit = 'raw'
                         else:
                             include = 'raw'
                         for x in vars(frame).keys():
                             if x.startswith('_'):
                                 continue
                             if omit is not None and omit in x:
                                 continue
                             if include is not None and include not in x:
                                 continue
                             subframes.append(x)
                 for k in subframes:
                     if hasattr(frame, k):
                         if self.flat_frame:
                             print 'Showing images for HSRL flat frame ' + k
                             self.renderframe(getattr(frame, k), frame)
                         else:
                             import lg_base.core.array_utils as hau
                             sendframe = hau.Time_Z_Group(like=frame)
                             setattr(sendframe, k, getattr(frame, k))
                             print 'Showing images for HSRL subframe ' + k
                             if k == 'rs_raw':
                                 setattr(sendframe, 'rs_mean',
                                         getattr(frame, k))
                             elif k == 'rs_mean':
                                 setattr(sendframe, 'rs_raw',
                                         getattr(frame, k))
                             self.renderframe(sendframe, frame)
             else:
                 self.renderframe(frame, frame)
             self.figcontainer.shownew()
         yield frame
Пример #13
0
def accumulate_raman_profiles(consts,
                              rs_mean,
                              qc_mask,
                              old_profiles,
                              process_control=None,
                              rs_cal=None,
                              Cxx=None,
                              corr_adjusts=None):
    indices = np.arange(rs_mean.times.shape[0])
    if len(indices) == 0:
        return old_profiles

    if qc_mask is not None and processing_defaults.get_value(
            'averaged_profiles', 'apply_mask'):
        #make mask array with NaN's for array elements where bit[0] of qc_mask==0
        #all other elements of mask = 1
        mask = (np.bitwise_and(qc_mask, 1)).astype(
            'float')  #mask is float to allow use of NaN values
        mask[mask == 0] = np.NaN

        print 'qc_mask applied to time averaged profiles vs altitude'
    else:
        #set mask == 1
        mask = None
        #for sh in channel_shorthand.keys():
        #
        #	if hasattr(rs_mean,sh):
        #        mask = np.ones_like(getattr(rs_mean,sh))
        #        break
        #if mask is None:
        #    mask = np.ones((rs_mean.times.shape[0],0))
        print 'qc_mask has not been applied to time averaged profiles'

    #energies=('transmitted_1064_energy','transmitted_energy')

    profiles = hau.Time_Z_Group(can_append=False, altname='altitudes')

    profiles.hist = hau.Time_Z_Group()
    ft = None
    tc = 0
    #tv=0
    lt = None
    if old_profiles is not None:
        #total_seeded_shots=total_seeded_shots+profiles.hist.total_seeded_shots
        ft = old_profiles.hist.ft
        tc = old_profiles.hist.tc
        #tv=old_profiles.hist.tv
        lt = old_profiles.hist.lt
    if len(indices) > 0:
        if ft is None:
            ft = rs_mean.times[indices][0]
        lt = rs_mean.times[indices][-1] + timedelta(
            seconds=rs_mean.delta_t[indices][-1]
            if not np.isnan(rs_mean.delta_t[indices][-1]) else 0)
        for x in indices:
            if rs_mean.times[x] is not None:
                tc = tc + 1
                #tv=tv+(rs_mean.times[x]-ft).total_seconds()
    if tc > 0:
        profiles.times = hau.T_Array([ft])  #+timedelta(seconds=tv/tc), ])
        profiles.start = ft
        profiles.width = lt - ft
        profiles.delta_t = hau.T_Array([profiles.width.total_seconds()])
    else:
        profiles.times = hau.T_Array([])
        profiles.start = ft
        profiles.width = timedelta(seconds=0)
        profiles.delta_t = hau.T_Array([])
    profiles.start_time = ft
    profiles.end_time = lt
    profiles.hist.ft = ft
    profiles.hist.tc = tc
    #profiles.hist.tv=tv
    profiles.hist.lt = lt
    #profiles.hist.total_seeded_shots=total_seeded_shots
    if rs_mean is not None and hasattr(rs_mean, 'altitudes'):
        profiles.altitudes = rs_mean.altitudes.copy()

    #elif hasattr(old_profiles,'altitudes'):
    #  profiles.altitudes=old_profiles.altitudes
    elif hasattr(old_profiles, 'heights'):
        profiles.heights = old_profiles.heights

    accumulate(profiles,
               old_profiles,
               rs_mean,
               indices,
               'shots',
               pref='mean_',
               filler=hau.T_Array([0]))
    total_shots = profiles.mean_shots
    profiles.shots = total_shots.copy()
    print 'Total shots for profile =', total_shots
    for e in energies:
        accumulate(profiles, old_profiles, rs_mean, indices, e, total_shots)
    # create TZ_Array with time dimension of '1', so hsrl_inversion doesn't choke
    for chan in channel_shorthand.keys():
        accumulate(profiles, old_profiles, rs_mean, indices, chan, total_shots,
                   mask)

    #compute inverted profiles from mean count profiles
    if Cxx is not None and hasattr(profiles, 'elastic_counts'):
        import raman.core.raman_processing_utilities as rpu
        profiles.inv = rpu.process_raman(consts, profiles, process_control,
                                         rs_cal, Cxx, corr_adjusts)
        import lidar.sg_extinction as lsge
        filter_params = lsge.filter_setup(profiles.inv.altitudes,
                                          process_control, consts)
        sg_ext = lsge.sg_extinction(filter_params)
        profiles.inv.extinction,profiles.inv.extinction_aerosol,profiles.inv.p180 \
              = sg_ext(profiles.times,profiles.delta_t,profiles.nitrogen_counts ,profiles.inv.beta_a_backscat,profiles.inv.integ_backscat,beta_r=Cxx.beta_r_355)

    elif hasattr(old_profiles, 'inv'):
        profiles.inv = old_profiles.inv
    return profiles
Пример #14
0
 def __init__(self, ntime_ave, post_operator=None):
     self.ntime_ave = ntime_ave
     self.post_operator = post_operator
     self.Rem = hau.Time_Z_Group()
Пример #15
0
    def read(self):
        """ main read generator
        """
        import hsrl.data_stream.processing_utilities as pu
        params=self.params
        firsttimeever=None
        intervalTime=None
        intervalEnd=None
        rawsrc=iter(self.rawsrc)
        #if params['timeres']!=None and params['timeres']<datetime.timedelta(seconds=float(self.cal_narr.hsrl_constants['integration_time'])):
        #    params['timeres']=None #pure native
        end_time_datetime=params['finalTime']
        #timemodoffset=time_mod(params['realStartTime'],params['timeres'])
        noframe='noframe'
        fullrange=False #if this is true, it will pad the start with any missing times.

        remainder=None
        cdf_to_hsrl = None
        preprocess_ave = None
        requested_times=None
        instrument=self.hsrl_instrument
        intcount=0
        rs_mem = None
        #rs=None
        timesource=TimeSource.CompoundTimeGenerator(self.timesource) if self.timesource is not None else None
        
        for calv in self.cal_narr:
            if intervalTime is None:
                firsttimeever=calv['chunk_start_time']
                intervalTime=calv['chunk_start_time']
                intervalEnd=intervalTime
            chunk_end_to_use=calv['chunk_end_time']#-time_mod(calv['chunk_end_time'],params['timeres'],timemodoffset)
            #print 'old end',calv['chunk_end_time'],'vs new end',chunk_end_to_use,'mod base',params['timeres']
            if calv['chunk_end_time']==calv['chunk_start_time'] and end_time_datetime is None:
                if params['block_when_out_of_data']:
                    if 'timeres' not in params or params['timeres'] is None:
                        sleep(calv['rs_constants']['integration_time'])
                    else:
                        sleep(params['timeres'].total_seconds())
                else:
                    yield None #this is done to get out of here, and not get stuck in a tight loop
                continue
            while intervalTime<chunk_end_to_use:
                integration_time = calv['rs_constants']['integration_time']
                doPresample=True
                #END init section
                if intervalEnd>chunk_end_to_use:
                    print 'Breaking calibration on endtime. proc ',intervalEnd,chunk_end_to_use,end_time_datetime
                    break
                else:
                    intervalEnd=chunk_end_to_use
                #print ' Absolute window is ', actualStartTime, ' to ' , params['finalTime']
                print ' prior window was ', intervalTime, ' to ' , intervalEnd, 'terminating at ',chunk_end_to_use,rs_mem
                if True:#requested_times==None or requested_times.shape[0]>0:
                    try:
                            try:
                                while rawsrc is not None:
                                    if rs_mem is not None and rs_mem.times[0]>=chunk_end_to_use  and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                        break
                                    tmp=rawsrc.next()
                                    if hasattr(tmp,'rs_raw'):
                                        if rs_mem is not None:
                                            rs_mem.append(tmp.rs_raw)
                                        else:
                                            rs_mem=copy.deepcopy(tmp.rs_raw)
                                    if rs_mem is not None and rs_mem.times.shape>0:
                                        break
                                    else:
                                        rs_mem=None
                            except StopIteration:
                                print 'Raw HSRL stream is ended'
                                rawsrc=None
                            if rs_mem is None or rs_mem.times.size==0:
                                rs_mem=None
                            elif rs_mem.times[0]>=chunk_end_to_use and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                print 'HSRL RAW skipping to next cal because of times',intervalTime,chunk_end_to_use,end_time_datetime,rs_mem.times[0]
                                break
                            else:
                                intervalEnd=rs_mem.times[-1]
                            print 'read in raw frame to mean',rs_mem,remainder
                            if rawsrc is None:
                                intervalEnd=chunk_end_to_use

                            print 'trimmed ',rs_mem
                            if timesource is not None:
                                if timesource.isDone:
                                    break
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet
                                inclusive=rawsrc is None and (end_time_datetime!=None and intervalEnd>=end_time_datetime)
                                timevals=hau.T_Array(timesource.getBinsFor(starttime=intervalTime,endtime=intervalEnd,inclusive=inclusive))#,inclusive=(end_time_datetime!=None and intervalEnd>=end_time_datetime)))
                                print 'Now %i intervals %s' % (timevals.size-1, "INC" if inclusive else "NOINC"),intervalTime,intervalEnd
                            elif 'timeres' in params and params['timeres'] is not None:
                                tmp=intervalTime
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet

                                timevals=[]
                                timevals.append(tmp)
                                while tmp<intervalEnd:# using python datetimes for making the axis is much much more precise than matplotlib floats.
                                        #print tmp, ' = ' , du.date2num(tmp) , ' = ' , (tmp-self.actualStartTime).total_seconds()
                                        tmp+=params['timeres']
                                        timevals.append(tmp)
                                        
                                #intervalEnd=tmp
                                intcount+=len(timevals)
                                if usePrebinnedTimes:
                                    intcount-=1
                                print 'Now %i intervals' % (intcount)
                                timevals=hau.T_Array(timevals)
                            else:

                                print 'Using Native timing'
                                timevals=None

                            print ' new window is ', intervalTime, ' to ' , intervalEnd

                            requested_times=timevals
                           
                            requested_chunk_times= requested_times#requested_times[requested_times >=intervalTime]

                            if requested_chunk_times is not None and len(requested_chunk_times)<2 and rawsrc is not None:
                                #if rawsrc is not None:
                                print "not enough time to process"
                                continue
                            elif rawsrc is None and rs_mem is None and remainder is None:
                                #chunk_end_to_use=intervalTime
                                #continue
                                #print ''
                                break
                 
          
                            rs_chunk,remainder = pu.process_data( instrument, intervalTime, intervalEnd
                                ,params['min_alt'], params['max_alt'], requested_chunk_times
                                , rs_mem, calv['rs_Cxx'], calv['rs_constants'], calv['rs_cal']
                                , None , self.cal_narr.hsrl_corr_adjusts, self.cal_narr.hsrl_process_control
                                , self.compute_stats,remainder=remainder)
                            rs_mem=None
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size==0:
                                rs_chunk=None
                            if rs_chunk is None and rawsrc is None:
                                break
                           #print rs_chunk
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size>0:
                                if fullrange and requested_chunk_times is not None:
                                    v=hau.Time_Z_Group(like=rs_chunk.rs_mean)
                                    v.times=hau.T_Array(requested_chunk_times[requested_chunk_times<rs_chunk.rs_mean.times[0]])
                                    if v.times.size>0:
                                        rs_chunk.rs_mean.prepend(v)
                                rs_chunk.calv=calv

                                yield rs_chunk
                                intervalTime=intervalEnd
                    except Exception, e:
                        print 'Exception occured in update_cal_and_process'
                        print 'Exception = ',e
                        print traceback.format_exc()
                        if isinstance(e,(MemoryError,)):
                            print 'Please Adjust Your Parameters to be more Server-friendly and try again'
                            raise
                        if not isinstance(e,(AttributeError,)):
                            raise
Пример #16
0
def generate_ave_profiles(rs_mean,
                          qc_mask,
                          rs_Cxx,
                          rs_constants,
                          processing_defaults,
                          sel_telescope_dir,
                          corr_adjusts,
                          old_profiles=None):
    """create average of profiles dependent on telescope pointing direction
                     , telescope pointing may be 'all', 'zenith', or 'nadir' """

    try:
        [ntimes, nalts] = rs_mean.molecular_counts.shape
    except AttributeError:
        #raise RuntimeError, \
        #    "molecular counts missing"
        ntimes = 0
        nalts = 500
    #if rs_mean.times.shape[0] == 0:
    #raise RuntimeError, \
    #  "times missing"
    #    return old_profiles

    if qc_mask is not None and processing_defaults.get_value(
            'averaged_profiles', 'apply_mask'):
        #make mask array with NaN's for array elements where bit[0] of qc_mask==0
        #all other elements of mask = 1
        mask = (np.bitwise_and(qc_mask, 1)).astype('float')
        #mask is float to allow use of NaN values
        mask[mask == 0] = np.NaN

        print 'qc_mask applied to time averaged profiles vs altitude'
    else:
        #set mask == 1
        mask = None
        print 'qc_mask has not been applied to time averaged profiles'
    # most of the time we need to generate some/all of these profiles,
    # because users are plotting them
    # if this is too time-consuming, we'll generate just the ones necessary for the
    # user selected plots.

    indices = np.arange(rs_mean.times.shape[0])
    indices = (indices >= 0)  #boolean now
    if ntimes == 0 and len(indices) == 0:
        return old_profiles

    #average only those profiles occuring when locked to the i2 line
    #and not in cal_scan mode
    #op_mode = rs.rs_raw.op_mode.astype(int)
    #not_cal_scan = (~op_mode[:] & 32)/32
    #locked_to_i2 = ((op_mode[:] & 4)/4)

    #bits = np.zeros((len(rs.rs_raw.times),len(bit_value)+1))
    #opmode=rs.rs_raw.op_mode.astype('uint32')
    #for i in range(len(bit_value)):
    #   bits[:,i]=(i+1)*(opmode[:] & bit_value[i])/bit_value[i]

    #this dictionary maps the long name to the shorthand used with dark_counts variables
    # the keys are used here as a list of all possible channels too
    channel_shorthand = dict(molecular_counts='mol',
                             combined_hi_counts='c_hi',
                             combined_lo_counts='c_lo',
                             combined_wfov_counts='c_wfov',
                             combined_1064_counts='combined_1064',
                             molecular_wfov_counts='m_wfov',
                             molecular_i2a_counts='mol_i2a',
                             cross_pol_counts='c_pol',
                             combined_counts='comb')

    #,combined_1064_counts='combined_1064'
    #select desired telescope pointing direction for profiles

    if (rs_constants['installation'] == 'ground'
            or rs_constants['installation'] == 'shipborne'
            or sel_telescope_dir == 'all'):
        print 'Selecting all telescope pointing directions for profiles'

        if processing_defaults is not None and processing_defaults.get_value(
                'averaged_profiles', 'apply_mask'):
            indices[rs_mean.i2_locked <= 0.99] = False

    elif sel_telescope_dir == 'zenith':
        #if telescope pointing exists, limit to selected pointing direction
        if rs_mean.telescope_pointing.shape[0]:
            print 'Selecting only zenith pointing data for profiles'
            indices[rs_mean.telescope_pointing != 1] = False
            rs_mean.telescope_pointing = rs_mean.telescope_pointing[indices]
        else:
            print 'Warning--using all shots--no telescope pointing direction in data file'
    elif sel_telescope_dir == 'nadir':
        #if telescope pointing exists, limit to selected pointing direction
        if rs_mean.telescope_pointing.shape[0]:
            print 'Selecting only nadir pointing data for profiles'
            indices[rs_mean.telescope_pointing != 0] = False
        else:
            print 'Warning--using all shots--no telescope pointing direction in data file'
    else:
        raise RuntimeError, \
       "Unrecognized value '%s' for telescope pointing dir--valid(all, zenith,nadir)" \
       % (sel_telescope_dir)
    #this is meant to filter out chunks from rs_mean that are calibration intervals
    if hasattr(rs_mean, 'op_mode'):
        indices[np.bitwise_and(rs_mean.op_mode, 16) != 0] = False
    indices = np.arange(indices.shape[0])[indices]  #back to indexes
    profiles = hau.Time_Z_Group(can_append=False)
    profiles.hist = hau.Time_Z_Group()
    ft = None
    tc = 0
    #tv=0
    lt = None
    if old_profiles is not None:
        #total_seeded_shots=total_seeded_shots+profiles.hist.total_seeded_shots
        ft = old_profiles.hist.ft
        tc = old_profiles.hist.tc
        #tv=old_profiles.hist.tv
        lt = old_profiles.hist.lt
    if len(indices) > 0:
        if ft is None:
            ft = rs_mean.times[indices][0]
        lt = rs_mean.times[indices][-1] + timedelta(
            seconds=rs_mean.delta_t[indices][-1]
            if not np.isnan(rs_mean.delta_t[indices][-1]) else 0)
        for x in indices:
            if rs_mean.times[x] is not None:
                tc = tc + 1
                #tv=tv+(rs_mean.times[x]-ft).total_seconds()
    if tc > 0:
        profiles.times = hau.T_Array([ft])  #+timedelta(seconds=tv/tc), ])
        profiles.start = ft
        profiles.width = lt - ft
        profiles.delta_t = hau.T_Array([profiles.width.total_seconds()])
    else:
        profiles.times = hau.T_Array([])
        profiles.start = ft
        profiles.width = timedelta(seconds=0)
        profiles.delta_t = hau.T_Array([])
    profiles.start_time = ft
    profiles.end_time = lt
    profiles.hist.ft = ft
    profiles.hist.tc = tc
    #profiles.hist.tv=tv
    profiles.hist.lt = lt
    #profiles.hist.total_seeded_shots=total_seeded_shots
    if rs_mean is not None and hasattr(rs_mean, 'msl_altitudes'):
        profiles.msl_altitudes = rs_mean.msl_altitudes
    elif hasattr(old_profiles, 'msl_altitudes'):
        profiles.msl_altitudes = old_profiles.msl_altitudes

    if rs_mean is not None and hasattr(rs_mean, 'geo_corr'):
        profiles.geo_corr = rs_mean.geo_corr
    elif hasattr(old_profiles, 'geo_corr'):
        profiles.geo_corr = old_profiles.geo_corr

    if rs_constants['installation'] == 'airborne' and len(indices) > 0:
        if hasattr(rs_mean, 'GPS_MSL_Alt'):
            profiles.min_GPS_MSL_Alt = np.min(rs_mean.GPS_MSL_Alt[indices])
            profiles.mean_GPS_MSL_Alt = nanmean(rs_mean.GPS_MSL_Alt[indices])
            profiles.max_GPS_MSL_Alt = np.max(rs_mean.GPS_MSL_Alt[indices])
            profiles.telescope_pointing = np.zeros(1)
        if hasattr(rs_mean, 'telescope_pointing'):
            if (rs_mean.telescope_pointing > .95).all():
                profiles.telescope_pointing[0] = 1
            elif (rs_mean.telescope_pointing < .05).all():
                profiles.telescope_pointing[0] = 0
            else:
                profiles.telescope_pointing[0] = np.NaN

    accumulate(profiles,
               old_profiles,
               rs_mean,
               indices,
               'seeded_shots',
               pref='mean_',
               filler=hau.T_Array([0]))
    total_seeded_shots = profiles.mean_seeded_shots
    profiles.seeded_shots = total_seeded_shots.copy()
    print 'Total seeded shots for profile =', total_seeded_shots
    accumulate(profiles, old_profiles, rs_mean, indices,
               'transmitted_1064_energy', total_seeded_shots)
    accumulate(profiles, old_profiles, rs_mean, indices, 'transmitted_energy',
               total_seeded_shots)
    # create TZ_Array with time dimension of '1', so hsrl_inversion doesn't choke

    for chan in channel_shorthand.keys():
        accumulate(profiles, old_profiles, rs_mean, indices, chan,
                   total_seeded_shots, mask)

    #compute inverted profiles from mean count profiles
    if rs_Cxx is not None and hasattr(profiles, 'molecular_counts'):
        profiles.inv = cu.hsrl_inversion(profiles, rs_Cxx, rs_constants,
                                         corr_adjusts, processing_defaults)

    elif hasattr(old_profiles, 'inv'):
        profiles.inv = old_profiles.inv

    #adds raw_color_ratio to profiles.inv
    if hasattr(profiles,'inv') and hasattr(profiles,'combined_counts') \
               and hasattr(profiles,'combined_1064_counts'):
        if 0:
            print 'profiles'
            print dir(profiles)
            print 'inv'
            print dir(profiles.inv)

            import matplotlib.pylab as plt
            plt.figure(3333)
            plt.plot(
                profiles.combined_counts[0, :], profiles.inv.msl_altitudes,
                'r', profiles.cross_pol_counts[0, :],
                profiles.inv.msl_altitudes, 'g',
                profiles.combined_counts[0, :] +
                rs_constants['combined_to_cross_pol_gain_ratio'] *
                profiles.cross_pol_counts[0, :], 'c',
                profiles.combined_1064_counts[0, :],
                profiles.inv.msl_altitudes, 'k')
            plt.grid(True)
            ax = plt.gca()
            ax.set_xscale('log')

        profiles.inv.raw_color_ratio = cu.compute_raw_color_ratio(
            profiles, rs_Cxx, rs_constants, corr_adjusts)

        if 0:
            plt.figure(3334)
            plt.plot(profiles.inv.raw_color_ratio[0, :],
                     profiles.inv.msl_altitudes, 'c')
            plt.grid(True)
            ax = plt.gca()
            ax.set_xscale('log')

    #generate klett profiles if requested
    if processing_defaults is not None and processing_defaults.get_value(
            'klett', 'enable'):
        ref_altitude = processing_defaults.get_value('klett',
                                                     'ref_altitude') * 1000.0
        if ref_altitude < profiles.inv.msl_altitudes[0] \
                        or ref_altitude > profiles.inv.msl_altitudes[-1] :
            print
            print
            print 'WARNING---klett ref altitutde=', ref_altitude, ' is not in requested altitudes'
            print 'no klett profile retrieval attempted '
            print
        else:
            if hasattr(profiles, 'combined_1064_counts'):
                profiles.inv.beta_a_1064_backscat_klett = lu.compute_klett_backscatter(
                    profiles.combined_1064_counts,
                    profiles.inv.beta_r_backscat / 16.0,
                    profiles.inv.msl_altitudes,
                    processing_defaults.get_value('klett', 'lidar_ratio_532'),
                    ref_altitude)
            if hasattr(profiles, 'combined_counts'):
                profiles.inv.beta_a_532_backscat_klett = lu.compute_klett_backscatter(
                    profiles.combined_counts, profiles.inv.beta_r_backscat,
                    profiles.msl_altitudes,
                    processing_defaults.get_value('klett', 'lidar_ratio_532'),
                    ref_altitude)

    for chan in channel_shorthand.keys():
        accumulate(profiles, old_profiles, rs_mean, indices, 'var_raw_' + chan,
                   total_seeded_shots, mask)
        accumulate(profiles, old_profiles, rs_mean, indices, 'raw_' + chan,
                   total_seeded_shots, mask)

    if processing_defaults is not None and processing_defaults.get_value(
            'compute_stats', 'enable'):

        #kludge
        pf = hau.Time_Z_Group()
        for chan in channel_shorthand.keys():
            if hasattr(profiles, 'sum_var_raw_' + chan):
                setattr(pf, chan, getattr(profiles, 'sum_' + chan))
                setattr(pf, 'var_raw_' + chan,
                        getattr(profiles, 'sum_var_raw_' + chan))

        if rs_Cxx is not None and hasattr(profiles, 'inv'):
            pu.compute_photon_statistics(pf, profiles.inv, rs_Cxx,
                                         rs_constants)

    if rs_Cxx is not None and hasattr(profiles, 'inv'):
        [profiles.inv.optical_depth, profiles.inv.optical_depth_aerosol
               , profiles.inv.mol_norm_index,profiles.inv.mol_ref_aod] = \
                   lu.compute_optical_depth(profiles.inv.Nm \
                   ,profiles.inv.beta_r_backscat\
                   ,profiles.msl_altitudes\
                   ,processing_defaults\
                   ,rs_constants
                   ,profiles.inv.telescope_pointing if hasattr(profiles.inv,'telescope_pointing') else None)

        #add 1064 aerosol backscatter and color ratio to profiles
        if hasattr(profiles, 'combined_counts') and hasattr(
                profiles, 'combined_1064_counts'):
            cu.compute_1064_aerosol_backscatter(profiles, profiles.inv,
                                                processing_defaults,
                                                rs_constants, corr_adjusts)
            cu.compute_color_ratio(profiles.inv)

        if processing_defaults.get_value('extinction_processing',
                                         'filter_type') == 'savitzky_golay':
            od_threshhold = processing_defaults.get_value(
                'extinction_processing', 'od_threshhold')
            z_window_width = processing_defaults.get_value(
                'extinction_processing', 'alt_window_length')
            order = processing_defaults.get_value('extinction_processing',
                                                  'polynomial_order')

            min_filter_alt = processing_defaults.get_value(
                'extinction_processing', 'min_alt')
            if min_filter_alt < profiles.msl_altitudes[0]:
                min_filter_alt = profiles.msl_altitudes[0]

            adaptive = processing_defaults.get_value('extinction_processing',
                                                     'adaptive')

            t_window_width = 0.0
            if profiles.inv.times.size == 0:
                pass
            elif hasattr(rs_mean, 'telescope_pointing'):
                profiles.inv = filtered_extinction(
                    profiles.inv, profiles.msl_altitudes, min_filter_alt,
                    od_threshhold, t_window_width, z_window_width, order,
                    adaptive, rs_mean.telescope_pointing)
            else:
                profiles.inv = filtered_extinction(
                    profiles.inv, profiles.msl_altitudes, min_filter_alt,
                    od_threshhold, t_window_width, z_window_width, order,
                    adaptive)

        else:
            bin_delta = processing_defaults.get_value('extinction_processing',
                                                      'bin_delta')
            bin_delta = int(bin_delta)
            pts_to_ave = processing_defaults.get_value('extinction_processing',
                                                       'ext_pts_to_ave')

            if hasattr(rs_mean, 'telescope_pointing'):
                profiles.inv = pu.compute_extinction(
                    profiles.inv, profiles.msl_altitudes, bin_delta,
                    pts_to_ave, rs_mean.telescope_pointing)
            else:
                profiles.inv = pu.compute_extinction(profiles.inv,
                                                     profiles.msl_altitudes,
                                                     bin_delta, pts_to_ave)

    # raw profiles--ave photons/bin/laser_pulse
    #for chan in channel_shorthand.keys():
    #    accumulate(profiles,old_profiles,rs_raw,raw_indices,chan,raw_total_seeded_shots,pref='raw_')

    if 0:
        import matplotlib.pylab as plt
        plt.figure(898989)
        plt.plot(np.nanmean(rs_mean.combined_1064_counts,
                            0), rs_mean.msl_altitudes, 'k',
                 np.nanmean(rs_mean.combined_counts, 0), rs_mean.msl_altitudes,
                 'r')
        ax = plt.gca()
        ax.set_xscale('log')

    # dark corrected raw profiles
    for chan, shorthand in channel_shorthand.items():
        dcchan = shorthand + '_dark_counts'
        correc = 'dc_' + chan
        source = 'raw_' + chan
        if accumulate(profiles,
                      old_profiles,
                      rs_mean,
                      indices,
                      dcchan,
                      total_seeded_shots,
                      extravars=[correc]):
            if hasattr(profiles, source):
                #print 'Applying dark count from raw frame to raw counts*******'
                setattr(profiles, correc,
                        getattr(profiles, source) - getattr(profiles, dcchan))
            elif hasattr(old_profiles, correc):
                setattr(profiles, correc, getattr(old_profiles, correc))
                print 'Copying corrected counts because source doesn\'t exist???? WARNING ***'
                if hasattr(old_profiles, source):
                    setattr(profiles, source, getattr(old_profiles, source))
                else:
                    raise RuntimeError(
                        'Something is wrong with input channels. BUG!')

    if processing_defaults is not None:
        profiles.mol_norm_alt = processing_defaults.get_value(
            'mol_norm_alt', 'meters')
    return profiles
Пример #17
0
def select_raqms_profile(soundings,
                         request_time,
                         requested_altitudes,
                         offset=0):
    """selects sounding prior to request_time from soundings -- the sounding
       is returned in a Time_Z_Group as Z_arrays"""

    if soundings is None or soundings.times.size == 0:
        raise RuntimeError, "select_faqms_profile: No soundings for %s " %\
              request_time

    import atmospheric_profiles.soundings.sounding_utilities as su
    sounding = hau.Time_Z_Group()

    sounding.altitudes = hau.Z_Array(requested_altitudes)
    max_alt = requested_altitudes[-1]
    max_bin = len(requested_altitudes)
    index = sum(soundings.times <= request_time) - 1 + offset

    if index < 0 or index >= len(soundings.times):
        return None
    #initialize variables for inclusion in T_Z_Group
    sounding.temps = hau.Z_Array(np.zeros((max_bin)))
    sounding.dew_points = hau.Z_Array(np.zeros(max_bin))
    sounding.frost_points = hau.Z_Array(np.zeros(max_bin))
    sounding.pressures = hau.TZ_Array(np.zeros(max_bin))
    sounding.ext_total = hau.TZ_Array(np.zeros(max_bin))
    sounding.ext_salt = hau.TZ_Array(np.zeros(max_bin))
    sounding.wind_spd = hau.TZ_Array(np.zeros(max_bin))
    sounding.wind_dir = hau.TZ_Array(np.zeros(max_bin))

    #sounding.times is a single time at this point, however it will later be included
    #in a list of all the soundings used in this processing request. In order that it
    #be treated properly it must be defined as a T_Array

    sounding.times = hau.T_Array([soundings.times[index]])
    sounding.latitude = hau.T_Array([soundings.latitude[index]])
    sounding.longitude = hau.T_Array([soundings.longitude[index]])

    #sounding.times=hau.T_Array([soundings.times[index]])

    #interpolate model levels to lidar bin altitudes

    #temp=interpolate.splrep(soundings.model_level_alts[index,-1::-1] \
    #     ,soundings.temperatures[index,-1::-1])
    #sounding.temps=interpolate.splev(sounding.altitudes,temp,der=0)

    temp=interpolate.splrep(soundings.model_level_alts[index,-1::-1] \
                            ,soundings.pressures[index,-1::-1])
    sounding.pressures = interpolate.splev(sounding.altitudes, temp, der=0)



    sounding.temps=np.interp(sounding.altitudes \
                             ,soundings.model_level_alts[index,-1::-1] \
                             ,soundings.temperatures[index,-1::-1])

    #calculate dew point at model levels for selected profile
    dew_pts=su.cal_dew_point(soundings.relative_humidity[index,:] \
                             ,soundings.temperatures[index,:])
    frost_pts = su.cal_frost_point(dew_pts)

    #calculate wind speed and direction from u and v
    u_vel = soundings.u_vel[index, -1::-1]
    v_vel = soundings.v_vel[index, -1::-1]

    wind_spd = np.sqrt(u_vel**2 + v_vel**2)
    wind_dir = np.arctan(v_vel / u_vel) * 180.0 / np.pi

    for i in range(len(u_vel)):
        if (u_vel[i] < 0 and v_vel[i]) < 0:
            wind_dir[i] = 180.0 - wind_dir[i]
        elif (u_vel[i] > 0 and v_vel[i]) > 0:
            wind_dir[i] = 180.0 + wind_dir[i]
        elif u_vel[i] < 0:
            wind_dir[i] = 270.0 - wind_dir[i]
        else:
            wind_dir[i] = np.nan

    #interpolate to lidar bin altitudes
    sounding.frost_points=np.interp(sounding.altitudes \
                                    ,soundings.model_level_alts[index,-1::-1],frost_pts[-1::-1])
    sounding.dew_points=np.interp(sounding.altitudes \
                                  ,soundings.model_level_alts[index,-1::-1],dew_pts[-1::-1])
    sounding.ext_total=np.interp(sounding.altitudes\
                                 ,soundings.model_level_alts[index,-1::-1]\
                                 ,soundings.ext_total[index,-1::-1])
    sounding.ext_salt=np.interp(sounding.altitudes\
                                ,soundings.model_level_alts[index,-1::-1]\
                                ,soundings.ext_salt[index,-1::-1])
    sounding.ext_dust=np.interp(sounding.altitudes\
                                ,soundings.model_level_alts[index,-1::-1]\
                                ,soundings.ext_dust[index,-1::-1])



    sounding.wind_dir = np.interp(sounding.altitudes \
                                  ,soundings.model_level_alts[index,-1::-1],wind_dir)
    sounding.wind_spd = np.interp(sounding.altitudes \
                                  ,soundings.model_level_alts[index,-1::-1],wind_spd)

    sounding.top = sounding.altitudes[-1]
    sounding.bot = sounding.altitudes[0]

    #plt.figure(1)
    #plt.plot(temperatures,altitudes,dew_points,altitudes)

    #plt.figure(2)
    #plt.plot(ext_total,altitudes)
    #plt.show()
    return sounding
def quick_cal( i2_scan, Cam, Cam_i2a,sounding, wavelength, method_string
               , i2_scan_corr, i2a_scan_corr):
    """quick_cal_stream(i2_scan, Cam, sounding, wavelength, method_string, i2_scan_corr)
       A function which computes hsrl calibration coefficients.
       at the altitudes specified (in meters) within 'alt_vector'
       using a precomputed iodine scan file(i2_scan_file).
       i2_scan(:,:) = input containing i2 scan info
       i2_scan(:,0) = freq (GHz)
       -------(:,1) = combined channel scan
       -------(:,2) = molecular channel scan
       -------(:,3) = theoretical i2 transmission
       -------(:,4) = measured i2/combined
       if bagohsrl with argon buffered i2 cell
       -------(:,5) = molecular i2a/combined
       -------(:,6) = molecular i2a channel scan

       Cam           = aerosol in molecular coefficent
       Cam_i2a       = aerosol in argon-buffered molecular channel (will = None when not present)
       sounding      = return structure from read_sounding_file.py contains temp profile
       wavelength    = laser wavelength (nm)
       method_string = molecular line shape ''maxwellian','tenti_s6','wirtschas'
       i2_scan_corr  = adjustment factor for i2 scan that adjusts
                       i2 molecular channel gain relative to combined channel gain
       i2a_scan_corr = adjustment factor for i2a scan that adjusts
                       i2a molecular channel gain relative to combined channel gain
       rs            = return structure containing calibration coefficents
                       calibration values are returned at altitudes
                       rs_sounding.alititudes[:]
                       
                       1            = particulate in combined channel
                       rs.Cmc[i]    = molecular in combined channel
                       rs.Cmm[i]    = molecular in molecular channel
                       rs.Cam       = particulate in molecular channel
                       rs.beta_r[i] = Rayleigh scattering cross section (1/m)"""

       
    rs = hau.Time_Z_Group()  # calibration structure

    # i2_scan=cal_vec.i2_scan
    i2_scan=i2_scan.copy()

    # if selected use theory*combined as synthetic mol scan
    print method_string
    if method_string.find('i2 theory') >= 0:
        i2_scan[:, 2] = i2_scan[:, 4] * i2_scan[:, 1]
   
    # trim i2 scan to +-4 GHz about line center

    #i2_scan = i2_scan[abs(i2_scan[:, 0]) <= 4, :]

    # rescale i2 molecular component of i2 scan if required
    if i2_scan_corr != 1.0:
        i2_scan[:, 2] = i2_scan[:, 2] * i2_scan_corr
        
    # rescale i2a molecular component of i2 scan if required    
    if i2a_scan_corr != 1.0 and i2_scan.shape[1]>6:
        i2_scan[:, 6] = i2_scan[:, 6] * i2a_scan_corr

    if 0:
       import matplotlib.pylab as plt
       plt.figure(444443)
       plt.plot(i2_scan[:,0],i2_scan[:,1:3],i2_scan[:,0],i2_scan[:,2]/i2_scan[:,1],'k')
       plt.xlabel('freq GHz')
       plt.grid(True)

    # trim i2 scan to +-4 GHz about line center
    i2_scan = i2_scan[abs(i2_scan[:, 0]) <= 4, :]

    if 0:
       
       import matplotlib.pylab as plt
       plt.figure(444444)
       plt.plot(i2_scan[:,0],i2_scan[:,1:3])
       plt.xlabel('freq GHz')
       plt.grid(True)
    
    # compute Rayleigh scattering cross section
    # see R Holz thesis for this equation giving the Rayleigh scatter cross section.
    # beta=3.78e-6*press/temp at a wavelength of 532 nm
    # then rescale to actual wavelength

    nalts = sounding.altitudes.shape[0]
    if not (nalts==sounding.pressures.shape[0] and nalts==sounding.temps.shape[0]):
      print "ERROR: SOMETHIGN BAD ABOUT SOUNDING AT TIME ",sounding.times
      return None
    rs.beta_r = hau.Z_Array(np.zeros( nalts ))
    rs.beta_r[:nalts] = 3.78e-6 * sounding.pressures[:nalts]  / sounding.temps[:nalts]
    rs.beta_r = rs.beta_r * (532.0 / wavelength) ** 4
   
    # spectral width of molecular scattering

    m_bar = 28.97 * 1.65978e-27  # average mass of an air molecule
    sigma_0 = 1 / (wavelength * 1e-9)  # number in 1/meters
    kb = 1.38044e-23  # Boltzmans constant J/(K deg)
    c = 3e8  # speed of light in m/s
    sigma = i2_scan[:, 0] * 1e9 / c  # wavenumber vector

    rs.Cmm     =  hau.Z_Array(np.zeros( nalts ))
    rs.Cmc     =  hau.Z_Array(np.zeros( nalts ))
    sample_altitudes = np.zeros(nalts)   
    #for bagohsrl with argon buffered i2 cell
    if len(i2_scan[0,:]) > 6:   
        rs.Cmm_i2a =  hau.Z_Array(np.zeros( nalts ))
        rs.Cam_i2a =  Cam_i2a
    rs.Cam = Cam 
    
    print 'RBS computed with '+method_string+  ' spectrum'
  
    spectrum_time = datetime.utcnow()        
    
    dz = sounding.altitudes[2]-sounding.altitudes[1]
    delta_i = np.int(np.ceil(300.0/dz))
    nk=int(nalts/delta_i)
    if delta_i>1 and nk<2:# if interpolation is to happen, but not enough to interpolate, force it to the edge
        nk=2
        delta_i=nalts-1
    sample_altitudes = np.zeros(nk)
    rs.msl_altitudes = sounding.altitudes.copy()

    i=0
    k=0
    while k < len(sample_altitudes):
        if not np.isfinite(sounding.temps[i]) or not np.isfinite(sounding.pressures[i]):
          i=i+delta_i
          k=k+1
          continue
        if method_string.find('maxwellian') >= 0:
            norm = m_bar * c ** 2 / (8 * sigma_0 ** 2 * kb
                    * sounding.temps[ i])
            spectrum = np.exp(-norm * sigma ** 2)
        elif method_string.find('tenti_s6') >= 0:
            from tenti_s6 import tenti_s6
            spectrum = tenti_s6(wavelength * 1e-9,sounding.temps[i],
                    sounding.pressures[ i],
                    i2_scan[:, 0] * 1e9)
            
        elif method_string.find('witschas') >= 0:
            spectrum = witschas_spectrum(sounding.temps[i],
                    sounding.pressures[ i], wavelength * 1e-9,
                    i2_scan[:, 0] * 1e9)

        spectrum = spectrum / sum(spectrum)
       
    
        sample_altitudes[k] = sounding.altitudes[i]
        rs.Cmc[ k] = sum(spectrum * i2_scan[:, 1])
        rs.Cmm[ k] = sum(spectrum * i2_scan[:, 2])
        if i2_scan.shape[1]>6:
            rs.Cmm_i2a[ k] = sum(spectrum * i2_scan[:, 6])
        i = i + delta_i
        k = k + 1

    # if Cxx computed at less than full altitude resolution 
    if delta_i >1:
       rs.Cmc = np.interp(sounding.altitudes,sample_altitudes[0:k-1]
                        ,rs.Cmc[0:k-1])
       rs.Cmm = np.interp(sounding.altitudes,sample_altitudes[0:k-1]
                        ,rs.Cmm[0:k-1])
       if hasattr(rs,'Cmm_i2a'):
           rs.Cmm_i2a = np.interp(sounding.altitudes,sample_altitudes[0:k-1]
                        ,rs.Cmm_i2a[0:k-1])
    print method_string, 'computed for ',k-1,' altitudes in '\
          , (datetime.utcnow() - spectrum_time).total_seconds(),' seconds'
    plots = 0
    if plots:
        import matplotlib.pyplot as plt
        plt.figure(600)
        plt.plot(i2_scan[:, 0], spectrum[:])
        fig = plt.grid(True)
        plt.xlabel('Frequency (GHz)')
        plt.ylabel('Intensity')
        # ax=gca()
        # ax.set_yscale('log')

        plt.figure(601)
        plt.plot(rs.Cmm,sounding.altitudes/1000.0,'b'
                 ,rs.Cmm_i2a,sounding.altitudes/1000.0,'r')
        plt.grid(True)
        plt.xlabel('Cmm, Cmm_i2a')
        plt.ylabel('Altitude')
        plt.show()
        
    # add sounding identification to return structure
    rs.sounding_id = sounding.station_id
    rs.sounding_time = sounding.times
  
    return rs
def hsrl_inversion(r_msl, rs_Cxx, rs_constants,corr_adjusts,process_defaults):
    """hsrl_inversion(range_processed_returns,calibration_structure
    ,system_constants)
    
    Invert hsrl raw count data into separate molecular and particulate profiles
    and compute backcatter cross-section and depolarization from the profiles
    returned structure rs always returns:
       times               = times of records
       delta_t             = time seperation between records
       msl_altitudes       = bin altitudes in meters   
       seeded shots        = total number of seeded laser shots in profile
       beta_r_backscat     = Rayleigh scattering cross section from sounding
       Na                  = number of aerosol photon counts
       Nm                  = total number of  molecular counts(including i2a if present)
       Nm_i2               = number of molecular counts in i2 channel
       Na                  = number of particulate counts
       Ncp                 = Ncp photon counts
       linear_depol        = fractional particulate depolarization
       beta_a_backscat     = particulate aerosol backscatter cross-section
       beta_a_backscat_par = par polarization component of backscat cross-section
       beta_a_backscat_perp= perp polarization component of backscat cross-section
       integrated_backscat = cumsum of backscatter cross section in altitude
       
    if i2a channel exists the following are added to rs:
       Nm_i2a = number of molecular photons derived from i2a channel    
       Nm     =  rs.Nm_2i + rs.Nm_i2a
       Ncp_i2a
       beta_a_backscat_par_i2a/Nm
       beta_a_backscat_perp_i2a
       
    if these exist in input file they are added to rs:
       telescope_pointing
       GPS_MSL_Alt
       circular_depol
    """

    rs = hau.Time_Z_Group(like=r_msl)
    # r_msl.molecular_counts is  hau.TZ_Array (2D)
    nalts = r_msl.molecular_counts.shape[1]
    if  rs_Cxx.beta_r.shape[0] < nalts:
        print 'hsrl_inversion(): size too small on calibration arrays : rs_Cxx.beta_r = %d vs nalts = %d. padding cal with last value' % \
            (rs_Cxx.beta_r.shape[0], nalts)
        #assert(rs_Cxx.beta_r.shape[0] != nalts)
        os=rs_Cxx.beta_r.shape[0]
        for k,v in vars(rs_Cxx).items():
          if hasattr(v,'size') and v.size==os:
            ns=list(v.shape)
            ns[0]=nalts
            tmp=np.zeros(ns,dtype=v.dtype)
            tmp[:]=v[-1]
            tmp[:os]=v
            setattr(rs_Cxx,k,tmp)
    elif rs_Cxx.beta_r.shape[0] > nalts:
        print 'WARNING hsrl_inversion(): size larger on calibration arrays. may be an error : rs_Cxx.beta_r = %d vs nalts = %d' % \
            (rs_Cxx.beta_r.shape[0], nalts)
    rs.times = r_msl.times.copy()
    rs.delta_t = r_msl.delta_t.copy()
    rs.msl_altitudes = r_msl.msl_altitudes.copy()
    rs.seeded_shots=r_msl.seeded_shots.copy()
    if hasattr(r_msl,'telescope_pointing'):
        rs.telescope_pointing=r_msl.telescope_pointing.copy()

    # Rayleigh backscatter cross section profile
    rs.beta_r_backscat =  hau.Z_Array(np.zeros(nalts))
    rs.beta_r_backscat[:nalts] = rs_Cxx.beta_r[:nalts] * 3.0 / (8.0 * np.pi)
    #for normal i2 channel
    kk = 1.0 / (rs_Cxx.Cmm[:nalts] - rs_Cxx.Cmc[:nalts] * rs_Cxx.Cam)
    rs.Na =  hau.TZ_Array( kk[:nalts] * (rs_Cxx.Cmm[:nalts] * r_msl.combined_counts\
               - rs_Cxx.Cmc[ : nalts] * r_msl.molecular_counts) )
    rs.Nm =  hau.TZ_Array( kk[:nalts] * (r_msl.molecular_counts \
               - rs_Cxx.Cam * r_msl.combined_counts) )
    rs.Nm_i2 = rs.Nm.copy()
    
    #if data includes an i2a channel we generation a seperate inversion--bagohsrl only
    #systems with i2a channel record linear depolarization
    if hasattr(r_msl,'molecular_i2a_counts') and hasattr(rs_Cxx,'Cmm_i2a'):
       
            kk = 1.0 / (rs_Cxx.Cmm_i2a[:nalts] - rs_Cxx.Cmc[:nalts] * rs_Cxx.Cam_i2a)
            rs.Na_i2a = hau.TZ_Array( kk[:nalts] * (rs_Cxx.Cmm_i2a[:nalts]
                 * r_msl.combined_counts - rs_Cxx.Cmc[ : nalts] * r_msl.molecular_i2a_counts) )
            rs.Nm_i2a =  hau.TZ_Array( kk[:nalts] * (r_msl.molecular_i2a_counts \
                    - rs_Cxx.Cam_i2a * r_msl.combined_counts) )
            rs.Nm = (rs.Nm_i2 + rs.Nm_i2a)/2.0

            #bagohsrl is only hsrl with i2a channel--it measures linear depolarization
            rs.Ncp = rs_constants['combined_to_cross_pol_gain_ratio'] \
                    * (r_msl.cross_pol_counts
                    - rs_constants['polarization_cross_talk']*corr_adjusts['pol_x_talk']
                    * r_msl.combined_counts) - rs.Nm_i2 * 0.0035 / (1.0 - 0.0035)
            rs.Ncp_i2a = rs_constants['combined_to_cross_pol_gain_ratio'] \
                    * (r_msl.cross_pol_counts
                    - rs_constants['polarization_cross_talk']*corr_adjusts['pol_x_talk']
                    * r_msl.combined_counts) - rs.Nm_i2a * 0.0035 / (1.0 - 0.0035)
           
            if not rs_constants.has_key('no_depol_channel') or rs_constants['no_depol_channel']==0 :   
                rs.linear_depol_i2a = rs.Ncp_i2a / rs.Na_i2a
                #compute the linear depolarization as the average of normal and i2a values
                #note these two componets should be almost identical
                rs.linear_depol = (rs.Ncp + rs.Ncp_i2a)/(rs.Na_i2a + rs.Na)
            
                #when backscatter is small linear_depol can become indeterminate--bound values
                rs.linear_depol[rs.linear_depol < 0.0] = 0.0
                rs.linear_depol[rs.linear_depol > 0.6] = 0.6
            else:
                rs.linear_depol = np.zeros_like(rs.Nm)
                rs.linear_depol_i2a = np.zeros_like(rs.Nm)
                
            rs.beta_a_backscat_perp_i2a=rs.Na_i2a/rs.Nm_i2a \
                            *rs.linear_depol_i2a*rs.beta_r_backscat
            rs.beta_a_backscat_perp = rs.Na/rs.Nm_i2 \
                            *rs.linear_depol * rs.beta_r_backscat
            
            rs.beta_a_backscat_par_i2a = rs.Na_i2a / rs.Nm_i2a * rs.beta_r_backscat
            rs.beta_a_backscat_par = rs.Na / rs.Nm_i2 * rs.beta_r_backscat

            rs.beta_a_backscat = 0.5 *(rs.beta_a_backscat_par +rs.beta_a_backscat_perp \
                       + rs.beta_a_backscat_par_i2a + rs.beta_a_backscat_perp_i2a)

            if rs_constants.has_key('no_i2_channel') and rs_constants['no_i2_channel']==1:
                print
                print 'WARNING--I2 channel is not being used in calculations'
                print "calvals has 'no_i2_channel'== 1"
                print
                rs.linear_depol = rs.Ncp_i2a / rs.Na_i2a 
                rs.beta_a_backscat = rs.beta_a_backscat_par_i2a + rs.beta_a_backscat_perp_i2a 
            
            if not process_defaults.enabled('depolarization_is_aerosol_only'):    
                #user wants bulk depolarization aerosol combined with molecular
                #recompute depolarization
                print 'computing bulk depolarization--aerosol and molecular combined'
                rs.linear_depol = rs_constants['combined_to_cross_pol_gain_ratio'] \
                    * (r_msl.cross_pol_counts\
                    - rs_constants['polarization_cross_talk']*corr_adjusts['pol_x_talk']\
                    * r_msl.combined_counts) / r_msl.combined_counts
            #compute circular polarization from linear--good only for vertical pointing systems.    
            rs.circular_depol = 2 * rs.linear_depol / (1 - rs.linear_depol)
            
    elif hasattr(rs_Cxx,'Cmm_i2a') or rs_constants.has_key('polarization_is_linear') \
                and rs_constants['polarization_is_linear']==1:
       if hasattr(rs_Cxx,'Cmm_i2a'):
            print
            print 'hsrl_inversion(): WARNING  i2a counts found, but no calibration'
            print 'computing without i2a channel'
            print
       rs.Ncp = rs_constants['combined_to_cross_pol_gain_ratio'] \
               * (r_msl.cross_pol_counts
               - rs_constants['polarization_cross_talk']*corr_adjusts['pol_x_talk']
               * r_msl.combined_counts) - rs.Nm_i2 * 0.0035 / (1.0 - 0.0035)
       rs.linear_depol = rs.Ncp/rs.Na
            
            #when backscatter is small linear_depol can become indeterminate--bound values
       rs.linear_depol[rs.linear_depol < 0.0] = 0.0
       rs.linear_depol[rs.linear_depol > 0.6] = 0.6

       if not process_defaults.enabled('depolarization_is_aerosol_only'):    
                #user wants bulk depolarization aerosol combined with molecular
                #recompute depolarization 
                rs.linear_depol = rs_constants['combined_to_cross_pol_gain_ratio'] \
                    * (r_msl.cross_pol_counts\
                    - rs_constants['polarization_cross_talk']*corr_adjusts['pol_x_talk']\
                    * r_msl.combined_counts) / r_msl.combined_counts


       #compute circular from linear--good only for vertical pointing system
       rs.circular_depol = 2*rs.linear_depol /(1 - rs.linear_depol)
       
       rs.beta_a_backscat_perp = rs.Na/rs.Nm_i2 \
                            *rs.linear_depol * rs.beta_r_backscat
       rs.beta_a_backscat_par = rs.Na / rs.Nm * rs.beta_r_backscat
       rs.beta_a_backscat = rs.beta_a_backscat_par +rs.beta_a_backscat_perp
    else: #instrument with no i2a channel and measures circular polarization
        rs.Ncp = rs_constants['combined_to_cross_pol_gain_ratio'] \
                * (r_msl.cross_pol_counts\
                - rs_constants['polarization_cross_talk']*corr_adjusts['pol_x_talk']\
                * r_msl.combined_counts) - rs.Nm_i2 * 0.0074 / (1.0 - 0.0074)
        rs.circular_depol = rs.Ncp / rs.Na

        #when Na becomes small, circular_depol may become indeterminate
        # (and don't fault on Nans)
        #rs.circular_depol = np.nan_to_num(rs.circular_depol)
        rs.circular_depol[rs.circular_depol < 0.0] = 0.0
        rs.circular_depol[rs.circular_depol > 3.0] = 3.0

        rs.beta_a_backscat_perp=rs.Na/rs.Nm_i2*rs.circular_depol*rs.beta_r_backscat
        rs.beta_a_backscat_par = rs.Na / rs.Nm_i2 * rs.beta_r_backscat
        rs.beta_a_backscat = rs.beta_a_backscat_par + rs.beta_a_backscat_perp
    
        if not process_defaults.enabled('depolarization_is_aerosol_only'): 
           #user wants bulk depolarization containing both aerosol and molecular
           print 'computing bulk depolarization--air and aerosol together'
           rs.circular_depol = rs_constants['combined_to_cross_pol_gain_ratio'] \
                    * (r_msl.cross_pol_counts\
                    - rs_constants['polarization_cross_talk']*corr_adjusts['pol_x_talk']\
                    * r_msl.combined_counts) / r_msl.combined_counts
        rs.linear_depol = rs.circular_depol / (2
                    + rs.circular_depol)
    #compute integrated backscatter cross section
    rs.integrated_backscat = rs.beta_a_backscat.copy()
    rs.integrated_backscat[np.isnan(rs.integrated_backscat)] = 0.0
    da=rs.msl_altitudes.copy()
    da[:-1]=da[1:]-da[:-1]
    da[-1]=da[-2]
    tda=hau.TZ_Array(np.transpose(da[:,np.newaxis]*np.ones((1,rs.times.size))))
    rs.integrated_backscat = np.cumsum(rs.integrated_backscat,1) \
                 *tda               

    if hasattr(r_msl,'GPS_MSL_Alt'):
        rs.GPS_MSL_Alt = r_msl.GPS_MSL_Alt

    if hasattr(r_msl,'combined_1064_counts'):
        rs.combined_1064_counts = r_msl.combined_1064_counts\
                              * rs_constants['IR_combined_hi_gain_ratio']

    if hasattr(r_msl,'wfov_extinction_corr'):
        #transf wfov_extinction correction to output structure
        rs.wfov_extinction_corr = r_msl.wfov_extinction_corr
  
    return rs
Пример #20
0
def process_spheroid_particle(rs_inv,
                              rs_radar,
                              particle_parameters,
                              lambda_radar,
                              entire_frame,
                              sounding=None,
                              size_dist=None):
    """
            process_spheroid_particle(rs_inv,rs_radar,particle_parameters,lambda_radar,entire_frame,
                              sounding=None,p180_water=None,size_dist=None):
            generate and return the particle measurements based on a given hsrl inverted data,
            radar (and its lambda), and particle parameters dictionary
            """

    #create timez group and add heights
    rs_particle = hau.Time_Z_Group(rs_inv.times.copy(),
                                   timevarname='times',
                                   altname='heights')
    setattr(rs_particle, 'heights', rs_inv.msl_altitudes.copy())
    setattr(rs_particle, 'delta_t', rs_inv.delta_t.copy())

    #remove points where lidar signal is noise dominated by setting to
    #very small value.
    #clipped_beta_a_back=rs_inv.beta_a_backscat.copy()
    #if 0: #if hasattr(rs_inv,'std_beta_a_backscat'):
    #    clipped_beta_a_back[clipped_beta_a_back<(2*rs_inv.std_beta_a_backscat)]=-np.inf
    #else:
    #    print 'No std_beta_a_backscat statistics to filter particle measurements'
    #clipped_beta_a_back[np.logical_not(np.isfinite(rs_inv.beta_a_backscat))]=-np.inf;

    rs_particle.q_backscatter = np.NaN * np.zeros_like(rs_inv.beta_a_backscat)
    rs_particle.phase = np.zeros_like(rs_inv.beta_a_backscat)
    rs_particle.phase[
        rs_inv.linear_depol > particle_parameters['h2o_depol_threshold']] = 1
    rs_particle.phase[np.isnan(rs_inv.beta_a_backscat)] = np.NaN

    #set aspect ratio parameter for ice filled bins
    rs_particle.zeta = np.ones(rs_inv.beta_a_backscat.shape)
    rs_particle.zeta[rs_inv.linear_depol > particle_parameters['h2o_depol_threshold']] \
                  = particle_parameters['zeta']

    print 'Extinction cross section for particle size calculations derived from ' \
                ,particle_parameters['ext_source']
    print 'Extinction due nonprecipitating aerosols = '\
                ,particle_parameters['background_aerosol_bs'],'1/(m sr)'

    #store the mask field with the particle info
    rs_particle.qc_mask = rs_inv.qc_mask.copy()

    clipped_beta_a_backscat = rs_inv.beta_a_backscat.copy()
    copy_radar_backscatter = rs_radar.Backscatter.copy()
    #clipped_beta_a_backscat = copy_beta_a.copy()
    clipped_beta_a_backscat = clipped_beta_a_backscat \
              - particle_parameters['background_aerosol_bs']
    clipped_beta_a_backscat[clipped_beta_a_backscat < 0] = np.NaN

    #create an empty mode_diameter array
    rs_particle.mode_diameter = np.zeros_like(rs_inv.beta_a_backscat)

    #create an empty array for extinction--used only for particle calculations
    #bs_ratio_to_dmode will return extinction cross section in clipped_beta_a
    clipped_beta_a = np.NaN * np.zeros_like(rs_inv.beta_a_backscat)

    #water
    #compute mode diameter, extinction cross section, and backscatter efficeincy
    #from radar and lidar backscatter cross sections using mie theory and assumed
    #size distribution to predict mode diameter and q_backscatter for points
    #identified as water.
    if particle_parameters['radar_model'] == "Mie":
        rs_particle.mode_diameter, clipped_beta_a, rs_particle.q_backscatter\
                  ,rs_particle.dstar \
              = size_dist.dmode_from_radar_lidar_mie(copy_radar_backscatter\
                  ,clipped_beta_a_backscat)

    else:
        #use only Rayliegh approx solution--particle_parameter['radar_model']=="Rayleigh"
        #mode diameter is computed for all points assuming everything is water
        #subsequent calculation will replace ice phase points.
        if particle_parameters['ext_source'] == 'ext':
            clipped_beta_a = rs_inv.extinction_aerosol.copy()
        elif particle_parameters['ext_source'] == 'bs/p180':
            clipped_beta_a = clipped_beta_a_backscat / particle_parameters[
                'p180_water']
        else:
            print 'particle_parameters=', particle_parameters[
                'ext_source'], ' not supported'
            print 'in spheroid_particle_processing'
            print j
        clipped_beta_a[np.isnan(clipped_beta_a_backscat)] = np.NaN
        phase = np.zeros_like(rs_inv.beta_a_backscat)
        zeta = np.ones_like(rs_inv.beta_a_backscat)

        rs_particle.mode_diameter = size_dist.dmode_from_lidar_radar_rayleigh(
            rs_particle.mode_diameter, clipped_beta_a, copy_radar_backscatter,
            zeta, phase)

    #ice
    #compute extinction cross section for ice points using backscatter phase function
    clipped_beta_a[rs_particle.phase==1] = \
        clipped_beta_a_backscat[rs_particle.phase==1]/particle_parameters['p180_ice']
    zeta = np.zeros_like(clipped_beta_a)
    zeta[rs_particle.phase == 1] = particle_parameters['zeta']

    #derive mode_diameter directly from radar backscatter and lidar extinction
    #cross sections for parts of image populated by ice
    rs_particle.mode_diameter[rs_particle.phase==1] = size_dist.dmode_from_lidar_radar_rayleigh(\
        rs_particle.mode_diameter[rs_particle.phase==1] \
        ,clipped_beta_a[rs_particle.phase==1],copy_radar_backscatter[rs_particle.phase==1]\
        ,zeta[rs_particle.phase==1],rs_particle.phase[rs_particle.phase==1])

    #creates effective_diameter_prime array from mode diameter
    rs_particle.effective_diameter_prime = \
      size_dist.deff_prime(rs_particle.mode_diameter,rs_particle.phase,zeta)

    rs_particle.effective_diameter = size_dist.eff_diameter(\
                          rs_particle.mode_diameter,rs_particle.phase)

    rs_particle.mean_diameter = size_dist.mean_diameter(\
                          rs_particle.mode_diameter,rs_particle.phase)

    #compute liquid water content for bins with phase == 0
    #bins with phase > 0 will return with NaN's
    if particle_parameters['radar_model'] == "Mie":
        rs_particle.LWC = su.liquid_water_content_mie(
            rs_particle.effective_diameter, clipped_beta_a,
            rs_particle.q_backscatter)
        rs_particle.p180_extinction = rs_inv.beta_a_backscat / rs_particle.q_backscatter

    else:
        if particle_parameters['ext_source'] == 'bs/p180':
            rs_particle.extinction_aerosol = rs_inv.beta_a_backscat / particle_parameters[
                'p180_water']
            clipped_beta_a = rs_particle.extinction_aerosol.copy()
        else:
            clipped_beta_a = rs_inv.extinction_aerosol.copy()
        clipped_beta_a[np.isnan(clipped_beta_a_backscat)] = np.NaN
        rs_particle.LWC = np.NaN * np.zeros_like(
            rs_particle.effective_diameter)
        su.liquid_water_content_ext_approx(rs_particle.LWC,
                                           rs_particle.effective_diameter,
                                           clipped_beta_a, rs_particle.phase)
        rs_particle.p180_extinction = rs_inv.beta_a_backscat / particle_parameters[
            'p180_water']

    rs_particle.extinction_aerosol = rs_inv.extinction_aerosol.copy()
    #compute ice water water content for bins with phase > 0 (kg/m^3)
    #return in LWC array bins with phase > 0
    su.ice_water_content(rs_particle.LWC, rs_particle.effective_diameter,
                         clipped_beta_a, rs_particle.phase)

    if hasattr(rs_radar, 'vertically_averaged_doppler'):
        rs_radar.raw_MeanDopplerVelocity = rs_radar.MeanDopplerVelocity.copy()
        motion_correction = np.transpose(rs_radar.vertically_averaged_doppler\
                            *np.transpose(np.ones_like(rs_radar.MeanDopplerVelocity)))
        rs_radar.MeanDopplerVelocity -= motion_correction

    if sounding != None:
        s_time = datetime.utcnow()

        rs_particle.rw_fall_velocity,rs_particle.mw_fall_velocity \
             ,rs_particle.model_spectral_width,rs_particle.nw_fall_velocity\
             = su.weighted_fall_velocity(
             rs_particle.mode_diameter
            ,particle_parameters
            ,rs_particle.zeta
            ,sounding.temps
            ,sounding.pressures
            ,rs_particle.phase,size_dist)
        print 'time for fall_velocity = ', datetime.utcnow() - s_time

    # compute precip rate (m/s) #rain_rate = 1/density
    # (m^3/kg) * LWC (kg/m^3) * fall_velocity (m/s) #using
    # Doppler velocity rs_particle.hsrl_radar_dv_precip_rate =
    # 0.001 * rs_particle.LWC * rs_radar.MeanDopplerVelocity

    #using raw Doppler to give precip rate in m/s
    rs_particle.hsrl_radar_dv_precip_rate = 0.001 * rs_particle.LWC * rs_radar.MeanDopplerVelocity
    #remove points with Doppler folding
    rs_particle.hsrl_radar_dv_precip_rate[
        rs_radar.MeanDopplerVelocity < -2.0] = np.NaN

    if sounding != None:
        #using modeled mass weighted velocity and dividing by the density of water,
        #                              1000 kg/m^3, to give precip_rate in m/s
        rs_particle.hsrl_radar_precip_rate = 0.001 * rs_particle.LWC * rs_particle.mw_fall_velocity

    #retype all these fields to a proper TZ_Array
    #for f in ['effective_diameter_prime']:
    for f in vars(rs_particle).keys():
        v = getattr(rs_particle, f)
        if isinstance(v, hau.Z_Array):
            continue  #properly typed. continue
        elif isinstance(v, np.ndarray):
            if len(v.shape) == 2:
                setattr(rs_particle, f, hau.TZ_Array(v))
            elif len(v.shape) == 1:
                print '1 Dimensional Variable ' + f + ' will be changed to a T_Array. FIXME!!!!'
                setattr(rs_particle, f, hau.T_Array(v))
            else:
                raise RuntimeError(
                    "I don't know what to type particle array " + f +
                    " with dimensions " + repr(v.shape))
        else:
            pass  #not an array type. should be safe to ignore
    """
            #compute fitting error of computed radar weighted fall velocity
            #to measured Doppler veleocity.
            temp = rs_radar.Backscatter.copy()
            temp[np.isnan(rs_radar.Backscatter)]=0.0
            temp[rs_inv.msl_altitudes>400]=0.0
            fitting_error = np.sqrt(nanmean((rs_particle.rw_fall_velocity[temp >1e-9] \
                                  -  rs_radar.MeanDopplerVelocity[temp >1e-9])**2))
            print
            print rs_radar.times[0],'  --->  ' ,rs_radar.times[-1]
            print 'fitting_error (m/s)= ',fitting_error
            print
            """

    'rs_particle--spher'
    print dir(rs_particle)
    return rs_particle
Пример #21
0
def accumulate_raman_inverted_profiles(consts,
                                       rs_mean,
                                       qc_mask,
                                       old_profiles,
                                       process_control=None,
                                       corr_adjusts=None):
    indices = np.arange(rs_mean.times.shape[0])
    if len(indices) == 0:
        return old_profiles

    if qc_mask is not None and processing_defaults.get_value(
            'averaged_profiles', 'apply_mask'):
        #make mask array with NaN's for array elements where bit[0] of qc_mask==0
        #all other elements of mask = 1
        mask = (np.bitwise_and(qc_mask, 1)).astype(
            'float')  #mask is float to allow use of NaN values
        mask[mask == 0] = np.NaN

        print 'qc_mask applied to time averaged profiles vs altitude'
    else:
        #set mask == 1
        mask = None
        #for sh in channel_shorthand.keys():
        #
        # if hasattr(rs_mean,sh):
        #        mask = np.ones_like(getattr(rs_mean,sh))
        #        break
        #if mask is None:
        #    mask = np.ones((rs_mean.times.shape[0],0))
        print 'qc_mask has not been applied to time averaged profiles'

    #energies=('transmitted_1064_energy','transmitted_energy')

    profiles = hau.Time_Z_Group(can_append=False, altname='altitudes')

    profiles.hist = hau.Time_Z_Group()
    ft = None
    tc = 0
    #tv=0
    lt = None
    if old_profiles is not None:
        ft = old_profiles.hist.ft
        tc = old_profiles.hist.tc
        #tv=old_profiles.hist.tv
        lt = old_profiles.hist.lt
    if len(indices) > 0:
        if ft is None:
            ft = rs_mean.times[indices][0]
        lt = rs_mean.times[indices][-1] + timedelta(
            seconds=rs_mean.delta_t[indices][-1]
            if not np.isnan(rs_mean.delta_t[indices][-1]) else 0)
        for x in indices:
            if rs_mean.times[x] is not None:
                tc = tc + 1
                #tv=tv+(rs_mean.times[x]-ft).total_seconds()
    if tc > 0:
        profiles.times = hau.T_Array([ft])  #+timedelta(seconds=tv/tc), ])
        profiles.start = ft
        profiles.width = lt - ft
        profiles.delta_t = hau.T_Array([profiles.width.total_seconds()])
    else:
        profiles.times = hau.T_Array([])
        profiles.start = ft
        profiles.width = timedelta(seconds=0)
        profiles.delta_t = hau.T_Array([])
    profiles.start_time = ft
    profiles.end_time = lt
    profiles.hist.ft = ft
    profiles.hist.tc = tc
    #profiles.hist.tv=tv
    profiles.hist.lt = lt
    if rs_mean is not None and hasattr(rs_mean, 'altitudes'):
        profiles.altitudes = rs_mean.altitudes.copy()
    elif hasattr(old_profiles, 'altitudes'):
        profiles.altitudes = old_profiles.altitudes

    #FIXME need to accumulate the inverted products here, so here's a hack
    interval = hau.Time_Z_Group()
    interval.intervals = hau.T_Array(np.ones(rs_mean.times.shape))

    accumulate(profiles, old_profiles, interval, indices, 'intervals')
    interval_count = profiles.intervals
    print 'Total intervals for profile =', interval_count
    for k, v in vars(rs_mean).items():
        if k.startswith('_') or k in ('times', 'start', 'width', 'delta_t'):
            continue
        if not isinstance(v, hau.T_Array):
            continue
        if isinstance(v, hau.TZ_Array):
            continue
        accumulate(profiles, old_profiles, rs_mean, indices, k, interval_count)
    # create TZ_Array with time dimension of '1', so hsrl_inversion doesn't choke
    for k, v in vars(rs_mean).items():
        if k.startswith('_'):
            continue
        if not isinstance(v, hau.TZ_Array):
            continue
        if len(v.shape) != 2:
            continue
        accumulate(profiles, old_profiles, rs_mean, indices, k, interval_count,
                   mask)

    return profiles
Пример #22
0
    def read(self, *args, **kwargs):
        if self.val!=None:
            yield self.val
            return
        if len(args):
            print 'Unused args = ',args
        if len(kwargs):
            print "Unused kwargs = ",kwargs
        ret=hau.Time_Z_Group()
        delattr(ret,'times')

        data=self.data
        if self.group is not None:
            for g in self.group.split('/'):
                data=data.groups[g]

        if 'base_time' in data.variables:
            epochtime=datetime.datetime(1970,1,1,0,0,0)
            basetime=epochtime+datetime.timedelta(seconds=long(data.variables['base_time'].getValue()))
            print 'basetime is ',basetime
        if 'time_coverage_start' in data.variables and 'time_coverage_end' in data.variables:
            basetime=datetime.datetime.strptime(''.join(data.variables['time_coverage_start'][:20]), '%Y-%m-%dT%H:%M:%SZ')
        for v in self.xlateTable:
            vari=data.variables[v]
            parts=self.xlateTable[v].split('.')
            finalpart=parts[-1]
            parts=parts[0:(len(parts)-1)]
            base=ret
            for i in parts:
                if not hasattr(base,i):
                    setattr(base,i,hau.Time_Z_Group())
                    delattr(getattr(base,i),'times')
                base=getattr(base,i)
            idx=[slice(None) for x in range(len(vari.shape))]
            if len(idx)==0:
                newval=vari.getValue()
            elif len(idx)==1:
                newval=numpy.array(vari[idx[0]])
            else:
                newval=numpy.array(vari[tuple(idx)])
            if 'dpl_py_type' in vari.ncattrs():
                dpltype=vari.dpl_py_type[:]
                if dpltype=='matplotlib_num2date':
                    newval=numpy.array([(date2num(basetime+datetime.timedelta(seconds=float(d))) if d<1e35 else float('nan')) for d in newval])
                if dpltype=='python_datetime':
                    newval=numpy.array([(basetime+datetime.timedelta(seconds=float(d)) if d<1e35 else None) for d in newval])
            setattr(base,finalpart,newval)
        addpartsbylength=dict()
        fillins=(('times',),
                 ('delta_t',),
                 ('msl_altitudes','heights'))
        for ka,f in vars(ret).items():
            for ks in fillins:
                for k in ks:
                    if hasattr(f,k):
                        t=getattr(f,k)
                        if t.size==0:
                            continue
                        for nk in ks:
                            if nk not in addpartsbylength:
                                addpartsbylength[nk]=dict()
                            if t.size not in addpartsbylength[nk]:
                                addpartsbylength[nk][t.size]=t
        #print 'available parts:',addpartsbylength
        for k,f in vars(ret).items():
            dim_preferredsizes=[[],[]]
            try:
                for ak,av in vars(f).items():
                    if hasattr(av,'shape') and len(av.shape)>=2:
                        for x in range(2):
                            dim_preferredsizes[x].append(av.shape[x])
            except TypeError:
                #print 'no parts for ',k
                continue
            #print 'sizes for',k,dim_preferredsizes
            if len(dim_preferredsizes[0])==0 or len(dim_preferredsizes[1])==0:
                continue
            dim_preferred=[]
            for x in range(2):
                dim_preferred.append(int(numpy.median(dim_preferredsizes[x])))            
            #print 'sizes are',dim_preferred,'median of ',dim_preferredsizes
            for nk,kv in addpartsbylength.items(): #TOTAL HACK
                x=None if not hasattr(f,nk) else getattr(f,nk)
                if x is None or not hasattr(x,'shape') or x.size==0:
                    try:
                        if nk in ('times','delta_t'):
                            dimpreferredidx=0
                        else:
                            dimpreferredidx=1
                        count=dim_preferred[dimpreferredidx]
                        if count in kv:
                            setattr(f,nk,kv[count])
                        #else:
                        #    print nk,'doesnt exist for length',count
                    except AttributeError:
                        pass
        self.val=ret
        yield ret #this means this operates as an iterator that runs once
Пример #23
0
def read_raqms_file(instrument, start_time):
    """read raqms file between start and end time

    instrument - e.g. 'gvhsrl'
    start_time -  datetime object    
    
    """

    raqms = hau.Time_Z_Group(altname='model_level_alts')

    filename = find_raqms_filename(instrument, start_time)
    if not filename:
        return None

    nc = Dataset(filename, 'r')
    times = getAll(nc, 'time')
    aircraft_alts = getAll(nc, 'alt')
    pressures = getAll(nc, 'pressure')
    temperatures = getAll(nc, 'temperature')
    model_level_alts = getAll(nc, 'altitude')

    relative_humidity = getAll(nc, 'rh')
    latitude = getAll(nc, 'lat')
    longitude = getAll(nc, 'lon')
    u_vel = getAll(nc, 'uvel')
    v_vel = getAll(nc, 'vvel')
    ext_total = getAll(nc, 'ext_tot')
    ext_dust = getAll(nc, 'ext_dust')
    ext_salt = getAll(nc, 'ext_salt')

    base_time = datetime(start_time.year, start_time.month, start_time.day, 0,
                         0, 0)
    #np.fix(start_time)
    #time=times.astype('float64')

    #convert raqms seconds from start of day to python datetimes
    #times=base_time + time/(3600.0*24.0)
    times = hau.T_Array(
        [base_time + timedelta(seconds=float(x)) for x in times])

    assert (times.size > 0)

    selectedMask = (times > start_time)
    for i, x in enumerate(selectedMask):
        fi = i
        if x:
            if i > 0:
                selectedMask[i - 1] = True
            break
    selectedMask[-1] = True

    selectedTimes = np.arange(times.size)[selectedMask]

    raqms.latitude = hau.T_Array(latitude[selectedTimes])
    raqms.longitude = hau.T_Array(longitude[selectedTimes])
    raqms.pressures = hau.TZ_Array(pressures[selectedTimes, :])
    raqms.temperatures = hau.TZ_Array(temperatures[selectedTimes, :])
    raqms.ext_total = hau.TZ_Array(ext_total[selectedTimes, :])
    raqms.ext_dust = hau.TZ_Array(ext_dust[selectedTimes, :])
    raqms.ext_salt = hau.TZ_Array(ext_salt[selectedTimes, :])
    raqms.relative_humidity = hau.TZ_Array(relative_humidity[selectedTimes, :])
    raqms.u_vel = hau.TZ_Array(u_vel[selectedTimes, :])
    raqms.v_vel = hau.TZ_Array(v_vel[selectedTimes, :])
    raqms.model_level_alts = hau.TZ_Array(model_level_alts[selectedTimes, :] *
                                          1000.0)
    raqms.times = times[selectedTimes]

    return raqms
Пример #24
0
    def get_data_at_index(struct,t_index,z_index):
        """get_data_at_point(struct,t_index,z_index):
           extract values of variables at a given array index"""

        rs_init              = struct['rs_init'].__dict__
        rs_inv               = struct['rs_inv'].__dict__
        rs_mmcr              = struct['rs_mmcr'].__dict__
        rs_spheroid_particle = struct['rs_spheroid_particle'].__dict__    

       
        sounding             = rs_init['sounding']
        
    
        ptv = hau.Time_Z_Group()

        particle_parameters = rs_spheroid_particle['particle_parameters']
    
        ptv.beta_ext_radar  = np.NaN * np.zeros((1,1))
        ptv.beta_ext_lidar  = np.NaN * np.zeros((1,1))
        ptv.beta_a_backscat = np.NaN * np.zeros((1,1))
        ptv.radar_spectral_width   = np.NaN * np.zeros((1,1))
        ptv.MeanDopplerVelocity = np.NaN * np.zeros((1,1))
        ptv.zeta            = np.NaN * np.zeros((1,1))
        ptv.phase           = np.NaN * np.zeros((1,1))
        ptv.temps           = np.NaN * np.zeros(1)
        ptv.pressures       = np.NaN * np.zeros(1)
        ptv.eff_diameter_prime\
                               = np.NaN * np.zeros((1,1))
        ptv.dmode              = np.NaN * np.zeros((1,1))
        ptv.effective_diameter = np.NaN * np.zeros((1,1))
        ptv.mean_diameter      = np.NaN * np.zeros((1,1))
        ptv.mean_mass_diameter = np.NaN * np.zeros((1,1))
        ptv.LWC_ext            = np.NaN * np.zeros((1,1))
        ptv.LWC_p180           = np.NaN * np.zeros((1,1))
        ptv.hsrl_radar_precip_rate=np.NaN * np.zeros((1,1))
        ptv.rw_fall_velocity   = np.NaN * np.zeros((1,1))
        ptv.mw_fall_velocity   = np.NaN * np.zeros((1,1))
        ptv.model_spectral_width\
                               = np.NaN * np.zeros((1,1))                    

        ptv.beta_ext_lidar[0,0]  = rs_inv['beta_a'][t_index,z_index]
        ptv.beta_a_backscat[0,0] = rs_inv['beta_a_backscat'][t_index,z_index]
        ptv.beta_ext_radar[0,0] \
                        = rs_mmcr['Backscatter'][t_index,z_index] * 8 * np.pi/3.0
        ptv.radar_spectral_width[0,0]   = rs_mmcr['SpectralWidth'][t_index,z_index]
        ptv.MeanDopplerVelocity[0,0] \
                                 = rs_mmcr['MeanDopplerVelocity'][t_index,z_index]
        ptv.zeta[0,0]            = rs_spheroid_particle['zeta'][t_index,z_index]
        ptv.phase[0,0]           = rs_spheroid_particle['phase'][t_index,z_index]
        ptv.eff_diameter_prime[0,0] \
               = rs_spheroid_particle['effective_diameter_prime'][t_index,z_index]
        ptv.temps[0]             = sounding.temps[z_index]
        ptv.pressures[0]         = sounding.pressures[z_index]

        ptv.dmode[0,0]              = rs_spheroid_particle['mode_diameter'][t_index,z_index]
        ptv.effective_diameter[0,0] = rs_spheroid_particle['effective_diameter'][t_index,z_index]
        ptv.mean_diameter[0,0]      = rs_spheroid_particle['mean_diameter'][t_index,z_index]
        ptv.mean_mass_diameter[0,0] =rs_spheroid_particle['mean_mass_diameter'][t_index,z_index]
        ptv.LWC_ext[0,0]            = rs_spheroid_particle['LWC'][t_index,z_index]
        ptv.hsrl_radar_precip_rate[0,0]=rs_spheroid_particle['hsrl_radar_precip_rate'][t_index,z_index]
        ptv.rw_fall_velocity[0,0]   = rs_spheroid_particle['rw_fall_velocity'][t_index,z_index]
        ptv.mw_fall_velocity[0,0]   = rs_spheroid_particle['mw_fall_velocity'][t_index,z_index]
        ptv.model_spectral_width[0,0]= rs_spheroid_particle['model_spectral_width'][t_index,z_index]
                                          
        return ptv,particle_parameters
def process_mass_dimension_particle(rs_inv, rs_radar, particle_parameters,
                                    lambda_radar, entire_frame):
    """
            generate and return the particle measurements based on a given hsrl inverted data, radar (and its lambda), and particle parameters dictionary
            """

    ParticleParameters = namedtuple('ParticleParameters',
                                    ','.join(particle_parameters.keys()))
    pparm = ParticleParameters(
        **particle_parameters
    )  #pparm is a structure of the particle parameters, instead of a dictionary 'particle_parameters'

    #create timez group and add heights
    rs_particle = hau.Time_Z_Group(rs_inv.times.copy(),
                                   timevarname='times',
                                   altname='heights')
    setattr(rs_particle, 'heights', rs_inv.msl_altitudes.copy())

    #remove points where lidar signal is noise dominated by setting to
    #very small value.
    clipped_beta_a_back = rs_inv.beta_a_backscat.copy()
    if hasattr(rs_inv, 'std_beta_a_backscat'):
        clipped_beta_a_back[clipped_beta_a_back < (
            2 * rs_inv.std_beta_a_backscat)] = -np.inf
    else:
        print 'No std_beta_a_backscat statistics to filter particle measurements'
    clipped_beta_a_back[np.logical_not(np.isfinite(
        rs_inv.beta_a_backscat))] = -np.inf
    used_beta_a = None
    if hasattr(rs_inv, 'beta_a'):
        used_beta_a = rs_inv.beta_a.copy()
        #mask beta_a?

    rs_particle.effective_diameter_prime,used_beta_a = \
        lred.lidar_radar_eff_diameter_prime(
           beta_a_backscat=clipped_beta_a_back
          ,radar_backscat=rs_radar.Backscatter
          ,depol=rs_inv.linear_depol
          ,h2o_depol_threshold=particle_parameters['h2o_depol_threshold']
          ,beta_a=used_beta_a
          ,p180_ice=particle_parameters['p180_ice']
          ,lambda_radar=lambda_radar)

    rs_particle.effective_diameter,rs_particle.num_particles,rs_particle.LWC,rs_particle.mean_diameter=\
         lred.d_eff_from_d_eff_prime(
             rs_particle.effective_diameter_prime
             ,clipped_beta_a_back
             ,rs_inv.linear_depol
             ,used_beta_a
             ,pparm)

    #rs_particle.hsrl_radar_rain_rate = 3600 * 10* .0001 * rs_particle.LWC * rs_radar.MeanDopplerVelocity
    #convert to mks units LWC kg/m^3, Doppler m/s, rain_rate m/s
    rs_particle.hsrl_radar_rain_rate = 0.001 * rs_particle.LWC * rs_radar.MeanDopplerVelocity

    #retype all these fields to a proper TZ_Array
    for f in [
            'hsrl_radar_rain_rate', 'effective_diameter_prime',
            'effective_diameter', 'num_particles', 'LWC', 'mean_diameter'
    ]:
        if hasattr(rs_particle, f):
            setattr(rs_particle, f, hau.TZ_Array(getattr(rs_particle, f)))

    return rs_particle
Пример #26
0
def read_sounding_file(instrument, sounding_type, id, start_time,
                       requested_altitudes):
    """ read_sounding_file([instrument,sounding_type,id,start_time,alt_res,max_alt)   
     returns arrays rs.temps(sounding#, rs.dew_points(sounding#,alt_index),
     rs.wdir(sounding#,alt_index), rs.wspd(sounding#,alt_index) along with several
     scalers with sounding info instrument (e.g. 'ahsrl','gvhsrl','mf2hsrl','nshsrl')
     sounding_type may be radiosonde station id, model identifier, or other instrument
     sounding_type (e.g. 'NOAA','ARM',.......
     sounding id (for sounding_type=NOAA, this a 3-letter e.g. 'MSN')
     start_time first time for which the sounding is needed, provided as matplot time
     requested_altitudes is a vector of altitudes at which sounding values are requested (m) 
     returns temp_sounding(object) with all soundings from the current file after the starting time
     returns the last time at which this sounding can be used as rs.expire_time"""

    import lg_dpl_toolbox.core.archival as hru

    if sounding_type[:].find('NOAA raob') >= 0:
        rs = hau.Time_Z_Group()

        time_struct = start_time
        dir_path = hru.get_path_to_data(instrument, start_time)
        filename = dir_path + '/' + '%4i' % time_struct.year + '/' \
            + '%02i' % time_struct.month + '/sondes.' + id[:] + '.nc'
        print 'sounding file--', filename
        if not os.path.exists(filename):
            return None
            #raise RuntimeError, 'sounding file %s does not exist' \
            #    % filename
        nc = Dataset(filename, 'r')
        times = getAll(nc, 'synTime')

        # combine mandatory and sig height measurements

        heights = np.hstack((getAll(nc, 'htMan'), getAll(nc, 'htSigT')))

        epoch = datetime(1970, 1, 1, 0, 0, 0)
        t_mask = times < 1e36
        for i in range(len(times)):
            t_mask[i] = times[i] < 1e36 and any(heights[i, :] < 1e36)

        times = times[t_mask]

        times = [epoch + timedelta(seconds=soff) for soff in times[:]]

        # select times, one prior to start time --> last profile in file

        #indices = np.arange(len(times))
        #start_index = max(indices[times <= start_time])
        #rs.times = zeros(len(times) - start_index)
        rs.times = np.zeros(len(times))
        #rs.times = times[start_index:]
        #         rs.times = hau.T_Array( rs.times )
        rs.times = times[:]

        wmosta = getAll(nc, 'wmoStat')  # wmo station number
        stalong = getAll(nc, 'staLon')
        stalat = getAll(nc, 'staLat')

        # combine mandatory and sig height measurements

        temps = np.hstack((getAll(nc, 'tpMan'), getAll(nc, 'tpSigT')))
        pressures = np.hstack((getAll(nc, 'prMan'), getAll(nc, 'prSigT')))
        dew_points = np.hstack((getAll(nc, 'tdMan'), getAll(nc, 'tdSigT')))
        wind_dir = np.hstack((getAll(nc, 'wdMan'), getAll(nc, 'wdSigT')))
        wind_spd = np.hstack((getAll(nc, 'wsMan'), getAll(nc, 'wsSigT')))
        heights = heights[t_mask, :]
        temps = temps[t_mask, :]
        pressures = pressures[t_mask, :]
        dew_points = dew_points[t_mask, :]
        wind_dir = wind_dir[t_mask, :]
        wind_spd = wind_spd[t_mask, :]

        [n_soundings, n_heights] = temps.shape

        # defined standard atmosphere climatology for use above highest reported level
        # climate=temp_sounding()

        climate = hau.Time_Z_Group()
        climate.altitudes = np.zeros((n_soundings, 9))
        climate.temps = np.zeros((n_soundings, 9))
        climate.pressures = np.zeros((n_soundings, 9))
        climate.dew_pt = np.zeros((n_soundings, 9))
        climate.wind_spd = np.zeros((n_soundings, 9))
        climate.wind_dir = np.zeros((n_soundings, 9))

        # find the highest valid point in each sounding

        rs.top = np.zeros((n_soundings, ))
        rs.bot = np.zeros((n_soundings, ))

        # climate.altitudes[0,:]=array([10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000])

        for i in range(n_soundings):
            mask = heights[i, :] <= 50000
            if np.any(mask == True):
                rs.top[i] = max(heights[i, mask])
                rs.bot[i] = min(heights[i, temps[i, :] != 99999])
            else:
                rs.top[i] = 0.0
                rs.bot[i] = 0.0

            rs.top = hau.T_Array(rs.top)
            rs.bot = hau.T_Array(rs.bot)
            climate.altitudes[i, :] = np.array([
                10000,
                15000,
                20000,
                25000,
                30000,
                35000,
                40000,
                45000,
                50000,
            ])
            climate.temps[i, :] = np.array([
                223.1,
                216,
                216,
                221,
                226,
                237,
                251,
                265,
                270,
            ])
            climate.pressures[i, :] = np.array([
                264.3,
                120.45,
                54.75,
                25.11,
                11.71,
                5.58,
                2.77,
                1.43,
                0.759,
            ])
            climate.dew_pt[i, :] = np.NaN

            # don't use climatology lower than 2km above highest valid measurement

            climate.altitudes[climate.altitudes <= rs.top[i] + 2000] = \
                9e36
            climate.temps[climate.altitudes <= rs.top[i] + 2000] = 9e36
            climate.pressures[climate.altitudes <= rs.top[i] + 2000] = \
                9e36

    # stack the climatology on top of the observations

        heights = np.hstack((heights, climate.altitudes))
        temps = np.hstack((temps, climate.temps))
        pressures = np.hstack((pressures, climate.pressures))
        dew_points = np.hstack((dew_points, climate.dew_pt))
        wind_dir = np.hstack((wind_dir, climate.wind_dir))
        wind_spd = np.hstack((wind_spd, climate.wind_spd))
        #print heights.shape
        heights_unsorted = heights.copy()
        temps_unsorted = temps.copy()
        pressures_unsorted = pressures.copy()
        dew_points_unsorted = dew_points.copy()
        wind_dir_unsorted = wind_dir.copy()
        wind_spd_unsorted = wind_spd.copy()

        for i in range(heights_unsorted.shape[0]):
            indices = np.argsort(heights_unsorted[i, :])
            heights[i, :] = heights_unsorted[i, indices]
            temps[i, :] = temps_unsorted[i, indices]
            pressures[i, :] = pressures_unsorted[i, indices]
            dew_points[i, :] = dew_points_unsorted[i, indices]
            wind_dir[i, :] = wind_dir_unsorted[i, indices]
            wind_spd[i, :] = wind_spd_unsorted[i, indices]

    # sort combined file by height and select times of interest
        if 0:
            indices = heights.argsort(axis=1)
            index_a = np.transpose(
                np.transpose(np.ones(heights.shape, dtype=int)) *
                np.arange(heights.shape[0]))
            heights = heights[index_a, indices]
            temps = temps[index_a, indices]
            pressures = pressures[index_a, indices]
            dew_points = dew_points[index_a, indices]
            wind_dir = wind_dir[index_a, indices]
            wind_spd = wind_spd[index_a, indices]

        pressures[heights > 1e5] = np.NaN
        temps[heights > 1e5] = np.NaN
        dew_points[heights > 1e5] = np.NaN

        # interpolate to altitude resolution requested
        # and remove missing data points

        max_alt = requested_altitudes[-1]
        max_bin = requested_altitudes.shape[0]
        alts = requested_altitudes

        n_soundings = len(rs.times)
        #max_bin = round(max_alt / float(alt_res)) + 1

        # create sounding arrays as hau class items

        rs.altitudes = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.temps = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.dew_points = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.pressures = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.wind_spd = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.wind_dir = hau.TZ_Array(np.zeros((n_soundings, max_bin)))

        #rs.times =hau.T_Array(np.zeros(n_soundings))
        rs.times = hau.T_Array(times)
        #rs.expire_time =hau.T_Array(np.zeros(n_soundings))
        rs.stalat = hau.T_Array(np.zeros(n_soundings))
        rs.stalong = hau.T_Array(np.zeros(n_soundings))
        rs.wmosta = hau.T_Array(np.zeros(n_soundings))

        rs.stalat[:] = stalat[0]
        rs.stalong[:] = stalong[0]
        rs.wmosta[:] = wmosta[0]
        rs.station_id = id

        # interpolate to lidar altitude scale

        for i in range(n_soundings):

            rs.altitudes[i, :] = alts
            k = i

            rs.temps[i, :] = np.interp(alts, heights[k, temps[k, :] != 99999],
                                       temps[k, temps[k, :] != 99999])

            rs.dew_points[i, :] = np.interp(
                alts, heights[k, dew_points[k, :] != 99999],
                dew_points[k, dew_points[k, :] != 99999])

            #now using spline fit to pressures
            #should use hydrostatic equation to interpolate

            press1 = pressures[k, pressures[k, :] != 99999]
            h1 = heights[k, pressures[k, :] != 99999]
            temp = interpolate.splrep(h1[~np.isnan(press1)],
                                      press1[~np.isnan(press1)])
            rs.pressures[i, :] = interpolate.splev(alts, temp, der=0)

            rs.wind_spd[i, :] = np.interp(alts,
                                          heights[k, wind_spd[k, :] != 99999],
                                          wind_spd[k, wind_spd[k, :] != 99999])
            rs.wind_dir[i, :] = np.interp(alts,
                                          heights[k, wind_dir[k, :] != 99999],
                                          wind_dir[k, wind_dir[k, :] != 99999])
            #if i < n_soundings - 1:
            #    #rs.expire_time[i] = (rs.times[i] + rs.times[i + 1])
            #    rs.expire_time = rs.times[i] + timedelta(seconds=(rs.times[i+1] - rs.times[i]).total_seconds() / 2.0) + timedelta(seconds=60*30)  # add 1/2 hour to point to next sounding
            #else:
            #    #rs.expire_time[i] = rs.times[i] + 0.25 + 1 / 48.0  # new sonde profile expected in 6 1/2 hrs
            #    rs.expire_time = rs.times[i] + timedelta(days=0.25, seconds= 30*60)  # new sonde profile expected in 6 1/2 hrs

    # convert dew point depression to dew point temp

        rs.dew_points = rs.temps - rs.dew_points

        plots = 0  # FIXME
        if plots == 1:
            import matplotlib.pylab as plt
            plt.figure(801)

            plt.plot(temps[0, :], heights[0, :] / 1000, 'r', dew_points[0, :],
                     heights[0, :] / 1000)
            fig = plt.grid(True)
            plt.xlabel('deg-K ')
            plt.ylabel('Altitude MSL (km)')
            plt.title('Temperature, dew point')

            plt.figure(802)
            #set_printoptions(threshold=np.NaN)

            plt.plot(rs.temps[0, :], rs.altitudes[0, :] / 1000, 'r',
                     rs.dew_points[0, :], rs.altitudes[0, :] / 1000)
            fig = plt.grid(True)
            plt.xlabel('deg-K ')
            plt.ylabel('Altitude MSL (km)')
            plt.title('Temperature, dew point')

            plt.figure(803)
            plt.plot(pressures[0, :], heights[0, :] / 1000, 'r')
            fig = plt.grid(True)
            ax = plt.gca()
            #ax.set_xscale('log')
            plt.xlabel('deg-K ')
            plt.ylabel('Altitude MSL (km)')
            plt.title('Pressures')

            plt.figure(804)
            bin_vec = range(len(heights[0, :]))
            heights[heights > 1000] = np.NaN
            plt.plot(heights[0, :] / 1000, bin_vec, 'r')
            fig = plt.grid(True)
            ax = plt.gca()
            #ax.set_xscale('log')
            plt.xlabel('altitudes')
            plt.ylabel('bins')
            plt.title('Heights')

            plt.show()
            raise RuntimeError('deliberate abort')
    else:
        print ' '
        print 'ERROR**************unknown sounding source************'
        print ' '
        rs = []
    return rs
Пример #27
0
    def __call__(self,rs_inv,rs_particle,calvals):#update to process,and return a completed frame
        rs_multiple_scattering=hau.Time_Z_Group(rs_inv.times.copy(),timevarname='times',altname='msl_altitudes')

        N1 = 2  
        N2 = self.multiple_scatter_parameters['highest_order']
        
        start_alt = self.multiple_scatter_parameters['lowest_altitude']
        
        p180_water = self.multiple_scatter_parameters['p180_water']
        p180_ice   = self.multiple_scatter_parameters['p180_ice']
        h2o_depol_threshold = self.multiple_scatter_parameters['h2o_depol_threshold']
       

        p180_ice = self.multiple_scatter_parameters['p180_ice']
        p180_water = self.multiple_scatter_parameters['p180_water']
        second_wavelength =self.multiple_scatter_parameters['second_wavelength']

        wavelength = calvals['wavelength']*1e-9

        #assert(rs_particle!=None or self.multiple_scatter_parameters['particle_info_source'] == 'constant')

       
        
        if 1:   #self.multiple_scatter_parameters['processing_mode'] == '1d':
            if self.multiple_scatter_parameters['particle_info_source'] == 'constant':
                #in this case the mode diameter will be reset to ice or water values from multiple_scattering.json file
                #depending on h2o_depol_threshold
                mode_diameter = None
            else:
                #use lidar-radar retrieved particle sizes
                #print 'particle'
                #print dir(rs_particle)
                mode_diameter = rs_particle.mode_diameter.copy()

            #accumulates sums for 1-d average multiple scatter profile
            self.ms_obj = msu.ms_sums(mode_diameter,rs_inv.beta_a_backscat
                         ,rs_inv.linear_depol,self.multiple_scatter_parameters,self.ms_obj)

            self.ms_obj.beta_water[self.ms_obj.beta_water < 0] = 0.0
            self.ms_obj.beta_ice[self.ms_obj.beta_ice < 0] = 0.0
            
            beta_total = self.ms_obj.beta_water +self.ms_obj.beta_ice
            
            total_samples = self.ms_obj.n_samples_water +self.ms_obj.n_samples_ice
            
            #when no ice or water data points are present averages must be zero
            #compute weighted averages of beta, diameter when ice and water are present
            #ms_obj.beta_water and ms_obj.beta_ice have the sum of beta_backscatter for ice and water
            #self.ms_obj.n_samples_water[self.ms_obj.n_samples_water == 0] = np.infty
            #self.ms_obj.n_samples_ice[self.ms_obj.n_samples_ice == 0] = np.infty
            
            
            #compute ave beta_extinction profile from sum beta_backscat profiles
            extinction_profile = (self.ms_obj.beta_water/p180_water + self.ms_obj.beta_ice/p180_ice)/total_samples
            diameter = (self.ms_obj.diameter_ice + self.ms_obj.diameter_water)/beta_total
            
          
            #convert altitudes into range
            ranges = rs_inv.msl_altitudes.copy()
            zenith_angle = np.abs(calvals['telescope_roll_angle_offset'])*np.pi/180.0
            ranges = ranges/np.cos(zenith_angle)
            start_range = start_alt/np.cos(zenith_angle)
            end_range = rs_inv.msl_altitudes[-1]/np.cos(zenith_angle)
            if start_range >= end_range:
               raise RuntimeError(' start altitude'+str(np.int(start_range))+ ' is above highest data point')
            
            ms_ratios_profile = msu.msinteg(N1,N2,start_range \
                     ,end_range,self.multiple_scatter_parameters['step_size'], extinction_profile, diameter 
                     ,ranges,wavelength,self.multiple_scatter_parameters,calvals) 

            rs_multiple_scattering.ranges = ms_ratios_profile[:,0]
            rs_multiple_scattering.ms_ratios_profile = ms_ratios_profile
            rs_multiple_scattering.extinction_profile = hau.Z_Array(extinction_profile[np.newaxis,:])   
            rs_multiple_scattering.weighted_diameter = hau.Z_Array(diameter[np.newaxis,:])
            rs_multiple_scattering.msl_altitudes = rs_inv.msl_altitudes.copy()
            rs_multiple_scattering.wavelength = wavelength

            #compute multiple scattering for a second wavelength if it is provided
            #assume no change is extinction cross section
            if second_wavelength:
                 ms_ratios_profile_2 = msu.msinteg(N1,N2,start_range \
                     ,end_range,self.multiple_scatter_parameters['step_size'], extinction_profile, diameter 
                     ,ranges,second_wavelength,self.multiple_scatter_parameters,calvals) 

                 rs_multiple_scattering.ms_ratios_profile_2 = ms_ratios_profile_2
                 rs_multiple_scattering.second_wavelength = second_wavelength
           
        if self.multiple_scatter_parameters['processing_mode'] == '2d': #do multiple scatter calculation for all profiles in frame
            print 'begining 2d multiple scatter processing'
            #estimate extinction based on backscatter phase function 
            beta = rs_inv.beta_a_backscat.copy()
            beta = beta/p180_water
            beta[rs_inv.linear_depol>self.multiple_scatter_parameters['h2o_depol_threshold']] \
                 = beta[rs_inv.linear_depol>self.multiple_scatter_parameters['h2o_depol_threshold']]*p180_water/p180_ice
            beta[beta < 0]=0.0
            if self.multiple_scatter_parameters['particle_info_source'] == 'constant':
                mode_diameter = np.ones_like(rs_inv.beta_a_backscat) \
                                * self.multiple_scatter_parameters['mode_diameter_water']
                mode_diameter[rs_inv.linear_depol > self.multiple_scatter_parameters['h2o_depol_threshold']] \
                           = self.multiple_scatter_parameters['mode_diameter_ice']

        
            #convert altitudes into ranges
            ranges = rs_inv.msl_altitudes.copy()
            zenith_angle = np.abs(calvals['telescope_roll_angle_offset'])*np.pi/180.0
            ranges = ranges/np.cos(zenith_angle)
            start_range = start_alt/np.cos(zenith_angle)
            end_range = rs_inv.msl_altitudes[-1]/np.cos(zenith_angle)
            
            for i in range(rs_inv.beta_a_backscat.shape[0]):
                print 'Computing multiple scattering for ' ,rs_inv.times[i]
                if self.multiple_scatter_parameters['particle_info_source'] == 'constant':
                    ratios = msu.msinteg(N1,N2,start_range \
                         ,end_range,self.multiple_scatter_parameters['step_size'],beta[i,:],mode_diameter[i,:] 
                         ,ranges,wavelength,self.multiple_scatter_parameters,calvals)
                else: #get mode diameter from lidar_radar measured values
                   ratios = msu.msinteg(N1,N2,start_range \
                         ,end_range,wavelength,self.multiple_scatter_parameters['step_size']
                         ,beta[i,:],rs_particle.mode_diameter[i,:] 
                         ,ranges,self.multiple_scatter_parameters,calvals)
                   
                #load values into output array 
                if not hasattr(rs_multiple_scattering,'ms_ratio_total'):
                    rs_multiple_scattering.ms_ratio_total = np.zeros((beta.shape[0],ratios.shape[0]))
                
                rs_multiple_scattering.ms_ratio_total[i,:] =nansum(ratios[:,2:],1)
               
                rs_multiple_scattering.msl_altitudes = rs_inv.msl_altitudes.copy()
            rs_multiple_scattering.ms_ratio_total =hau.TZ_Array(rs_multiple_scattering.ms_ratio_total)
                
       
        
        #rs_multiple_scattering.start_altitude = start_alt
       
        return rs_multiple_scattering
Пример #28
0
    def profile(self, request_time, request_lat, request_long, offset=0):
        """returns a profile of temperature,pressure, dew_point, frost point at 
        time, lat, and long extracted from the sounding_archive. If request_lat and
        request_long are empty they are ignored
        request_time = python datetime for reqested sounding
        request_lat  = requested latitude for sounding--ignored if []
        request_lon  = requested longitude for sounding--ignored if []"""

        if self.soundings == None:
            return None

        print 'sounding_type= ', self.sounding_type, request_time
        if self.sounding_type == 'NOAA raob':
            temp_sounding = hau.Time_Z_Group()

            #find correct sounding profile out of archive file
            sounding = hau.selectByTime(self.soundings,
                                        request_time,
                                        offset=offset)
            if sounding is None:
                return None

            sounding.sounding_type = self.sounding_type
            sounding.sounding_id = sounding.station_id
            sounding.latitude = sounding.stalat
            sounding.longitude = sounding.stalong
            sounding.frost_points = cal_frost_point(sounding.dew_points)

            temp_sounding.type = self.sounding_type
            temp_sounding.times = sounding.times
            temp_sounding.altitudes = hau.Z_Array(sounding.altitudes)
            temp_sounding.temps = hau.TZ_Array(sounding.temps[np.newaxis, :])
            temp_sounding.pressures = hau.TZ_Array(
                sounding.pressures[np.newaxis, :])
            temp_sounding.dew_pts = hau.TZ_Array(
                sounding.dew_points[np.newaxis, :])
            temp_sounding.frost_pts = hau.TZ_Array(
                sounding.frost_points[np.newaxis, :])
            temp_sounding.wind_dir = hau.TZ_Array(
                sounding.wind_dir[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.station_id = sounding.station_id
            temp_sounding.top = sounding.top

            sounding.times = sounding.times[0]
            #sounding.expire_time=sounding.expire_time[0]

        elif self.sounding_type == "time curtain" \
                  and self.sounding_id == "raqms":

            temp_sounding = hau.Time_Z_Group()
            sounding = raqms.select_raqms_profile(self.soundings,request_time \
                       ,self.requested_altitudes,offset=offset)
            #                      ,self.max_alt,self.alt_res)
            if sounding == None:
                return None
            sounding.station_id = self.sounding_id

            temp_sounding.type = self.sounding_type
            temp_sounding.times = sounding.times
            temp_sounding.latitude = hau.T_Array(sounding.latitude)
            temp_sounding.longitude = hau.T_Array(sounding.longitude)
            temp_sounding.altitudes = hau.Z_Array(sounding.altitudes)
            temp_sounding.temps = hau.TZ_Array(sounding.temps[np.newaxis, :])
            temp_sounding.pressures = hau.TZ_Array(
                sounding.pressures[np.newaxis, :])
            temp_sounding.dew_pts = hau.TZ_Array(
                sounding.dew_points[np.newaxis, :])
            temp_sounding.frost_pts = hau.TZ_Array(
                sounding.frost_points[np.newaxis, :])
            temp_sounding.wind_dir = hau.TZ_Array(
                sounding.wind_dir[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.ext_total = hau.TZ_Array(
                sounding.ext_total[np.newaxis, :])
            temp_sounding.ext_salt = hau.TZ_Array(
                sounding.ext_salt[np.newaxis, :])
            temp_sounding.ext_dust = hau.TZ_Array(
                sounding.ext_dust[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.station_id = sounding.station_id
            temp_sounding.top = sounding.top

            sounding.times = sounding.times[0]

            #set up time to read in new sounding (in datetime)
            #sounding.expire_time = sounding.times+timedelta(seconds=5*60)
            #expire time can not be less then request time--raqms file must be missing soundings
            #set expire_time 5 minutes after request time
            #if sounding.expire_time <= request_time:
            #    print "****Warning----missing raqms sounding at ",request_time
            #    print "               using sounding from ",sounding.times
            #    sounding.expire_time = request_time + timedelta(seconds=5*60)

        #remove any negative pressure values
        temp_sounding.pressures[temp_sounding.pressures < 0] = 0.0
        #remove NaN's from pressure and temperature values
        temp_sounding.pressures[np.isnan(temp_sounding.pressures)] = 0.0
        temp_sounding.temps[np.isnan(temp_sounding.temps)] = 1.0
        return sounding
def raman_inversion(mean, consts, Cxx, corr_adjusts, process_control):
    """raman_inversion(mean,consts,Cxx,corr_adjusts,process_control)
    """

    inv = hau.Time_Z_Group(like=mean)
    inv.beta_r_355 = Cxx.beta_r_355  #FIXME
    inv.beta_r_387 = Cxx.beta_r_387
    beta_r_n2 = Cxx.beta_r_387

    inv.times = mean.times.copy()
    inv.delta_t = mean.delta_t.copy()
    inv.altitudes = mean.altitudes.copy()

    #add backscatter ratios to inv.
    adj = consts['elastic_to_n2_gain_ratio']
    wfov_adj = consts['wfov_elastic_to_n2_gain_ratio']

    inv.aerosol_backscatter_ratio = (mean.elastic_counts - mean.nitrogen_counts)\
           /mean.nitrogen_counts
    inv.aerosol_backscatter_ratio_low = (wfov_adj * mean.elastic_counts_low - mean.nitrogen_counts_low)\
          /mean.nitrogen_counts_low
    inv.aerosol_backscatter_ratio_high = (adj * mean.elastic_counts_high - mean.nitrogen_counts_high)\
          / mean.nitrogen_counts_high

    inv.beta_a_backscat_low = 3 * inv.aerosol_backscatter_ratio_low * Cxx.beta_r_355 / (
        8.0 * np.pi)
    inv.beta_a_backscat = 3 * inv.aerosol_backscatter_ratio * Cxx.beta_r_355 / (
        8.0 * np.pi)
    inv.integrated_backscatter = lu.integrated_backscatter(
        inv.beta_a_backscat, inv.altitudes)

    print
    print
    print 'check depol adjustment in raman_inversion***********************************************'
    print
    print

    inv.linear_depol = mean.depolarization_counts_high / (
        mean.elastic_counts_high - mean.nitrogen_counts_high)

    if 0:

        import matplotlib.pylab as plt
        bin_vec = np.arange(inv.altitudes.shape[0])

        plt.figure(5555)
        plt.plot(np.nanmean(inv.aerosol_backscatter_ratio_low, 0), bin_vec,
                 'r', np.nanmean(inv.aerosol_backscatter_ratio_high,
                                 0), bin_vec, 'm',
                 np.nanmean(inv.aerosol_backscatter_ratio, 0), bin_vec, 'k')
        plt.grid(True)

    #new code for raman extinction

    #filter_params needs to be called only when one of the calling parameters changes
    filter_params = lsge.filter_setup(inv.altitudes, process_control, consts,
                                      mean.delta_t[0])

    #compute range integrated backscatter cross section
    temp = inv.beta_a_backscat.copy()
    temp[np.isnan(temp)] = 0.0
    inv.integ_backscat = np.cumsum(temp, 1) * filter_params['dz']

    #for key in  filter_params:
    #    print key ,' = ',filter_params[key]

    inv.extinction = np.NaN * np.zeros_like(mean.nitrogen_counts)
    ntimes = len(mean.nitrogen_counts[:, 0])

    inv.extinction_aerosol = np.NaN * np.ones_like(mean.nitrogen_counts)
    inv.p180 = np.NaN * np.ones_like(mean.nitrogen_counts)
    if ntimes == 1:  #dont care
        alreadyfiltered = hasattr(mean, 'filtered_nitrogen')
        Nm = mean.filtered_nitrogen if alreadyfiltered else mean.nitrogen_counts
    elif hasattr(mean, 'filtered_nitrogen') and not (
            mean.nitrogen_counts == mean.filtered_nitrogen).all():
        alreadyfiltered = True
        Nm = mean.filtered_nitrogen
        print "@@@@@@@@@ SKIPPING ROLLING FILTER. Already done"
    else:
        print '@@@@@ Trying bad ROLLING FILTER. Already done'
        alreadyfiltered = False
        Nm = mean.nitrogen_counts
    if not alreadyfiltered:
        try:
            half_slice = filter_params['t_window_pts'] / 2
        except:
            half_slice = 1

    sg_ext = lsge.sg_extinction(filter_params)

    if ntimes == 1:  #for profile call
        sl = np.arange(1)
        inv.extinction[0,:],inv.extinction_aerosol[0,:],inv.p180[0,:] \
                = sg_ext(mean.times[sl],mean.delta_t[sl],Nm[sl,:]
                ,inv.beta_a_backscat[0,:],inv.integ_backscat[0,:],beta_r=Cxx.beta_r_355)
    else:
        #needs to add edge times with padded intervals
        state = []
        if alreadyfiltered:
            fullrange = range(ntimes)
        else:
            fullrange = range(half_slice, ntimes - half_slice - 1)
        for i in fullrange:
            if alreadyfiltered:
                sl = np.arange(i, i + 1)  #already filtered
            else:
                sl = np.arange(i - half_slice, i + half_slice + 1)
            inv.extinction[i,:],inv.extinction_aerosol[i,:],inv.p180[i,:] \
              = sg_ext(mean.times[sl],mean.delta_t[sl],Nm[sl,:]
              ,inv.beta_a_backscat[i,:],inv.integ_backscat[i,:],beta_r=Cxx.beta_r_355,state=state)

    #end of new code for Raman exticntion

    if 0:
        import matplotlib.pylab as plt
        plt.ion()
        plt.figure(3002)
        plt.plot(np.nanmean(inv.extinction, 0), inv.altitudes / 1000.0)
        ax = plt.gca()
        ax.set_xscale('log')
        plt.xlabel('extinction (1/m)')
        plt.ylabel('altitude (km)')
        plt.grid(True)
    return inv
Пример #30
0
class dpl_hsrl_narr(dplkit.role.narrator.aNarrator):
    """ DPL HSRL Narrator Object. should only be created by dpl_hsrl object

        :param params: parameters dictionary
        :param cal_narr: calibration framestream narration object
        :param timesource: time axis generation source (could be synthetic or from another stream)
        :param rawsrc: raw data source. if not provided, will create a lot of it here
        :param lib: raw hsrl reading library object only used if rawsrc is not given
        :param zoo: raw hsrl zookeeper object only used if rawsrc is not given

        exposed attributes:
        - hsrl_cal_stream (calibration stream, can be used for parallel stream collection)

        exposed field type in chain:
        
        - hsrl.dpl.calibration.dpl_calibration_narr
    """
    #def get_process_control(self):
    #    return self.cal_narr.get_process_control()

    @property
    def hsrl_cal_stream(self):
        return self.cal_narr

    def __init__(self,params,cal_narr,timesource,rawsrc=None,compute_stats=0):
        super(dpl_hsrl_narr,self).__init__(None,cal_narr) #FIXME replace url with some manner of unique path
        #import dpl_calibration
        #self.dpl_calibration=dpl_calibration
        #self.provides=libr.provides
        self.compute_stats=compute_stats
        self.rawsrc=rawsrc
        self.params=params
        self.timesource=timesource
        self.cal_narr=cal_narr

    def __repr__(self):
        return 'DPL HSRL Framestream Narrator (%s)' % (self.params)

    def read(self):
        """ main read generator
        """
        import hsrl.data_stream.processing_utilities as pu
        params=self.params
        firsttimeever=None
        intervalTime=None
        intervalEnd=None
        rawsrc=iter(self.rawsrc)
        #if params['timeres']!=None and params['timeres']<datetime.timedelta(seconds=float(self.cal_narr.hsrl_constants['integration_time'])):
        #    params['timeres']=None #pure native
        end_time_datetime=params['finalTime']
        #timemodoffset=time_mod(params['realStartTime'],params['timeres'])
        noframe='noframe'
        fullrange=False #if this is true, it will pad the start with any missing times.

        remainder=None
        cdf_to_hsrl = None
        preprocess_ave = None
        requested_times=None
        instrument=self.hsrl_instrument
        intcount=0
        rs_mem = None
        #rs=None
        timesource=TimeSource.CompoundTimeGenerator(self.timesource) if self.timesource is not None else None
        
        for calv in self.cal_narr:
            if intervalTime is None:
                firsttimeever=calv['chunk_start_time']
                intervalTime=calv['chunk_start_time']
                intervalEnd=intervalTime
            chunk_end_to_use=calv['chunk_end_time']#-time_mod(calv['chunk_end_time'],params['timeres'],timemodoffset)
            #print 'old end',calv['chunk_end_time'],'vs new end',chunk_end_to_use,'mod base',params['timeres']
            if calv['chunk_end_time']==calv['chunk_start_time'] and end_time_datetime is None:
                if params['block_when_out_of_data']:
                    if 'timeres' not in params or params['timeres'] is None:
                        sleep(calv['rs_constants']['integration_time'])
                    else:
                        sleep(params['timeres'].total_seconds())
                else:
                    yield None #this is done to get out of here, and not get stuck in a tight loop
                continue
            while intervalTime<chunk_end_to_use:
                integration_time = calv['rs_constants']['integration_time']
                doPresample=True
                #END init section
                if intervalEnd>chunk_end_to_use:
                    print 'Breaking calibration on endtime. proc ',intervalEnd,chunk_end_to_use,end_time_datetime
                    break
                else:
                    intervalEnd=chunk_end_to_use
                #print ' Absolute window is ', actualStartTime, ' to ' , params['finalTime']
                print ' prior window was ', intervalTime, ' to ' , intervalEnd, 'terminating at ',chunk_end_to_use,rs_mem
                if True:#requested_times==None or requested_times.shape[0]>0:
                    try:
                            try:
                                while rawsrc is not None:
                                    if rs_mem is not None and rs_mem.times[0]>=chunk_end_to_use  and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                        break
                                    tmp=rawsrc.next()
                                    if hasattr(tmp,'rs_raw'):
                                        if rs_mem is not None:
                                            rs_mem.append(tmp.rs_raw)
                                        else:
                                            rs_mem=copy.deepcopy(tmp.rs_raw)
                                    if rs_mem is not None and rs_mem.times.shape>0:
                                        break
                                    else:
                                        rs_mem=None
                            except StopIteration:
                                print 'Raw HSRL stream is ended'
                                rawsrc=None
                            if rs_mem is None or rs_mem.times.size==0:
                                rs_mem=None
                            elif rs_mem.times[0]>=chunk_end_to_use and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                print 'HSRL RAW skipping to next cal because of times',intervalTime,chunk_end_to_use,end_time_datetime,rs_mem.times[0]
                                break
                            else:
                                intervalEnd=rs_mem.times[-1]
                            print 'read in raw frame to mean',rs_mem,remainder
                            if rawsrc is None:
                                intervalEnd=chunk_end_to_use

                            print 'trimmed ',rs_mem
                            if timesource is not None:
                                if timesource.isDone:
                                    break
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet
                                inclusive=rawsrc is None and (end_time_datetime!=None and intervalEnd>=end_time_datetime)
                                timevals=hau.T_Array(timesource.getBinsFor(starttime=intervalTime,endtime=intervalEnd,inclusive=inclusive))#,inclusive=(end_time_datetime!=None and intervalEnd>=end_time_datetime)))
                                print 'Now %i intervals %s' % (timevals.size-1, "INC" if inclusive else "NOINC"),intervalTime,intervalEnd
                            elif 'timeres' in params and params['timeres'] is not None:
                                tmp=intervalTime
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet

                                timevals=[]
                                timevals.append(tmp)
                                while tmp<intervalEnd:# using python datetimes for making the axis is much much more precise than matplotlib floats.
                                        #print tmp, ' = ' , du.date2num(tmp) , ' = ' , (tmp-self.actualStartTime).total_seconds()
                                        tmp+=params['timeres']
                                        timevals.append(tmp)
                                        
                                #intervalEnd=tmp
                                intcount+=len(timevals)
                                if usePrebinnedTimes:
                                    intcount-=1
                                print 'Now %i intervals' % (intcount)
                                timevals=hau.T_Array(timevals)
                            else:

                                print 'Using Native timing'
                                timevals=None

                            print ' new window is ', intervalTime, ' to ' , intervalEnd

                            requested_times=timevals
                           
                            requested_chunk_times= requested_times#requested_times[requested_times >=intervalTime]

                            if requested_chunk_times is not None and len(requested_chunk_times)<2 and rawsrc is not None:
                                #if rawsrc is not None:
                                print "not enough time to process"
                                continue
                            elif rawsrc is None and rs_mem is None and remainder is None:
                                #chunk_end_to_use=intervalTime
                                #continue
                                #print ''
                                break
                 
          
                            rs_chunk,remainder = pu.process_data( instrument, intervalTime, intervalEnd
                                ,params['min_alt'], params['max_alt'], requested_chunk_times
                                , rs_mem, calv['rs_Cxx'], calv['rs_constants'], calv['rs_cal']
                                , None , self.cal_narr.hsrl_corr_adjusts, self.cal_narr.hsrl_process_control
                                , self.compute_stats,remainder=remainder)
                            rs_mem=None
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size==0:
                                rs_chunk=None
                            if rs_chunk is None and rawsrc is None:
                                break
                           #print rs_chunk
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size>0:
                                if fullrange and requested_chunk_times is not None:
                                    v=hau.Time_Z_Group(like=rs_chunk.rs_mean)
                                    v.times=hau.T_Array(requested_chunk_times[requested_chunk_times<rs_chunk.rs_mean.times[0]])
                                    if v.times.size>0:
                                        rs_chunk.rs_mean.prepend(v)
                                rs_chunk.calv=calv

                                yield rs_chunk
                                intervalTime=intervalEnd
                    except Exception, e:
                        print 'Exception occured in update_cal_and_process'
                        print 'Exception = ',e
                        print traceback.format_exc()
                        if isinstance(e,(MemoryError,)):
                            print 'Please Adjust Your Parameters to be more Server-friendly and try again'
                            raise
                        if not isinstance(e,(AttributeError,)):
                            raise
        assert(remainder is None or remainder.times.size==0)
        if fullrange and end_time_datetime is not None and timesource is not None and (not timesource.isDone or requested_times is not None) and firsttimeever!=intervalTime:#either timesource indicates it wasn't run completely or requested times wasn't cleared
            requested_times=hau.T_Array(timesource.getBinsFor(starttime=intervalTime,endtime=end_time_datetime))
            if requested_times is not None and len(requested_times)>1:
                print 'NO DATA to end from ',intervalTime,' to ',end_time_datetime #FIXME IS THIS USED? JPG 20160504 
                print "times to use are ",requested_times[:-1]
                rs= hau.Time_Z_Group()
                rs.rs_mean=hau.Time_Z_Group()
                rs.rs_mean.times=hau.T_Array(requested_times[:-1]).copy() #causes the time axis to be stored, but all others may be implied MISSING
                setattr(rs.rs_mean,'delta_t',hau.T_Array(np.zeros(rs.rs_mean.times.shape)))
                yield rs