Esempio n. 1
0
 def read(self):
     remainder=None
     base=None
     import lg_dpl_toolbox.dpl.TimeSource as TimeSource
     timesource=TimeSource.CompoundTimeGenerator(self.timesource)
     for f in self.framestream:
         if timesource.isDone:
             break
         if remainder==None:
             remainder=copy.deepcopy(f)
         else:
             remainder.append(f)
         t=getattr(remainder,remainder._timevarname)
         #print t.shape
         if t.shape[0]==0:
             continue
         requestedtimes=hau.T_Array(timesource.getBinsFor(starttime=base,endtime=t[-1]))
         if requestedtimes.size<2:
             continue
         lastTime=requestedtimes[-1]
         retarr=remainder
         remainder=hau.trimTimeInterval(retarr,lastTime,datetime(2200,1,1,0,0,0))
         retarr.trimTimeInterval(base,lastTime)
         retarr.hereGoneBinTimes(requestedtimes,allow_nans=self.allow_nans)
         print 'range',base,lastTime,'returning:',retarr,'remainder',remainder
         yield retarr
         base=lastTime
     if remainder!=None and timesource.end_time!=None:
         requestedtimes=hau.T_Array(timesource.getBinsFor(starttime=base,endtime=timesource.end_time,inclusive=True))
         remainder.hereGoneBinTimes(requestedtimes,allow_nans=self.allow_nans)
         if getattr(remainder,remainder._timevarname).shape[0]>0:
             print 'range',base,timesource.end_time,'returning:',remainder
             yield remainder
Esempio n. 2
0
 def process(self):
     fr = None
     flags = None
     olda = None
     qasource = tf.TimeTrickle(self.qasource, 'time')
     altitudes = hau.Z_Array(self.qaparser.altitudeAxis)
     for f in self.timealtsource:
         #FIXME include angles
         if not isinstance(f, dict):
             f = vars(f)
         t = f[self.timename]
         a = self.constantAltitude
         if fr is None or ((not qasource.atEnd) and t >= qasource.nextTime
                           ):  #if need an update to the qa record
             #print 'Getting qa source for time',t
             fr = qasource(t)
             flags = None
         if 'range_flags' in fr and fr[
                 'range_flags'] is not None:  #if there is a range dependence, and a potentially non-constant altitude
             if self.altname is not None and self.altname in f:
                 a = f[self.altname]
             if a is None:
                 raise RuntimeError(
                     'Need platform altitude to merge in range-dependant qa Flags'
                 )
             if olda is None or a != olda:
                 flags = None
                 olda = a
         if flags is None:  #was cleared either because new flags from the qa file, or new altitude from the stream
             if 'range_flags' in fr and fr['range_flags'] is not None:
                 flags = self.qaparser.mergeVectors(fr['flags'],
                                                    fr['range_flags'], a)
             else:
                 flags = fr['flags']
             flags = self.qaparser.translateToEnumeration(flags)
             flags = hau.TZ_Array(flags.reshape([1] + list(flags.shape)),
                                  dtype='int32',
                                  summode='and')
         ret = hau.Time_Z_Group(timevarname='times', altname='altitudes')
         setattr(ret, 'times', hau.T_Array([t]))
         setattr(ret, 'delta_t', hau.T_Array([f['width'].total_seconds()]))
         setattr(ret, 'altitudes', copy.copy(altitudes))
         setattr(ret, 'start', f['start'])
         setattr(ret, 'width', f['width'])
         if self.splitFields:
             for f, idx in self.qaparser.flagbits.items():
                 setattr(
                     ret, 'qa_' + f,
                     hau.TZ_Array((flags / (10**idx)) % 10,
                                  dtype='int32',
                                  summode='and'))
         else:
             setattr(ret, 'qaflags', flags)
         yield ret
Esempio n. 3
0
 def process(self):
     priorFrame = None
     priorFrameD = None
     averageDiff = 0.0
     for f in self.s:
         assert (f is not None)
         f = copy.copy(f)
         _f = f
         if not isinstance(_f, dict):
             _f = vars(_f)
         tf = _f[self.timefieldname]
         dtf = hau.T_Array(numpy.zeros(tf.shape), summode='sum')
         _f[self.dtfieldname] = dtf
         for i in range(tf.size - 1):
             dtf[i] = (tf[i + 1] - tf[i]).total_seconds()
         if priorFrame is not None:
             if priorFrameD[self.dtfieldname].size > 0 and tf.size > 0:
                 priorFrameD[self.dtfieldname][-1] = (
                     tf[0] -
                     priorFrameD[self.timefieldname][-1]).total_seconds()
             yield priorFrame
         priorFrame = f
         priorFrameD = _f
         if priorFrameD[self.dtfieldname].size > 1:
             priorFrameD[self.dtfieldname][-1] = priorFrameD[
                 self.dtfieldname][-2]
     if priorFrame is not None:
         yield priorFrame
Esempio n. 4
0
 def process(self):
     for f in self.s:
         f = copy.copy(f)
         _f = f
         if not isinstance(f, dict):
             _f = vars(f)
         #print _f.keys()
         _f[self.timefieldname] = hau.T_Array(
             [_f['start']], summode='first'
         )  #+timedelta(seconds=_f['width'].total_seconds()/2.0)])
         _f[self.dtfieldname] = hau.T_Array([
             _f['width'].total_seconds() if _f['width'] != None
             and _f['width'].total_seconds() > 0.0 else numpy.NaN
         ],
                                            summode='sum')
         yield f
Esempio n. 5
0
def dark_count_correction_from_signal(chan_sel_dict, rs, start_index,
                                      end_index):
    """
       dark_count_correction_from_signal(apply_to,rs,start_index,end_index)
       finds nanmean of signal in altitude bins between start and end indices 
       for each profile and subtracts this value for each profile
       Also add a vector T_Array vector of dark counts for each profile.
       
       chan_sel_dict = dict(chanel_names = "dark_count_names" ......) 
                       correct channel_names with corresponding dark_counts
       rs       = structure containing variables to be corrected
       start_index = array index for start of dark average
       end_index = array index for end of dark count average
    """

    #for field in apply_to:
    for channel_name, dark_count_name in chan_sel_dict.iteritems():
        #if hasattr(rs,field):
        if hasattr(rs, channel_name):
            temp_d = getattr(rs, channel_name).copy()
            ones_array = np.transpose(np.ones_like(temp_d))
            dark_corr = hau.T_Array(
                np.nanmean(temp_d[:, start_index:end_index], 1))
            dark_corr_array = np.transpose(ones_array * dark_corr)
            temp_d -= dark_corr_array
            #write corrected counts back to rs
            setattr(rs, channel_name, temp_d)
            #print 'field entering',channel_name

            #add dark count field to rs
            setattr(rs, dark_count_name, dark_corr)

    return rs
Esempio n. 6
0
    def realdocall(self, myslice, templaterec):
        """ call the filter with the given slices as input

        :param myslice: list of frames in the window
        :param templaterec: record in the window to base the return off of

        if this filter was given a varlist, this will call the filter for each individual variable, with a 2D array as the content. the return of each call will replace the content of each var in templaterec and returned
        if this filter was not given a varlist, the list of frames passed to this function will instead be passed to a single call of the filter. will return what the filter returns

        :returns: simple frame result
        """
        if not self.inMiddle(myslice, templaterec):
            print 'FAILED TO ALIGN WINDOW: len ', len(
                myslice), ' index ', self.indexOf(myslice, templaterec)
        b = hau.T_Array(np.ones((1, 6)))
        c = len(myslice)
        x = self.indexOf(myslice, templaterec)
        b[0, 0] = c
        b[0, 1] = x
        b[0, 2] = self.delta_t(myslice).total_seconds()
        b[0, 3] = self.delta_t(myslice[:x] if x > 0 else []).total_seconds()
        b[0, 4] = self.delta_t(myslice[x:(x + 1)]).total_seconds()
        b[0, 5] = self.delta_t(myslice[(
            x + 1):] if x < (c - 1) else []).total_seconds()
        # print 'rolling window stats',b
        if self.varlist == None:
            kwcargs = self.kwcargs
            if self.middleframeparametername is not None:
                kwcargs = kwcargs.copy()
                kwcargs[self.middleframeparametername] = copy.deepcopy(
                    templaterec)
            ret = self.callable(myslice, *self.cargs, **kwcargs)
        else:
            ret = copy.deepcopy(templaterec)
            for i, v in enumerate(self.varlist):
                try:
                    av = deepattribute(myslice[0], v)
                except KeyError:
                    print 'Warning: configured key', v, 'not found in frame'
                    raise
                    continue
                av = copy.deepcopy(av)
                for f in myslice[1:]:
                    av.append(deepattribute(f, v))
                setdeepattribute(
                    ret, v, self.callable(av, *self.cargs, **self.kwcargs))
        _ret = ret
        if not isinstance(ret, dict):
            _ret = vars(ret)
        if b is not None and self.windowinfo_perframe is not None:
            if self.windowinfo_perframe not in _ret:
                _ret[self.windowinfo_perframe] = b
            else:
                _ret[self.windowinfo_perframe] = np.concatenate(
                    (_ret[self.windowinfo_perframe], b), 1)
        return ret
Esempio n. 7
0
def addCommonAttributes(x, attrs):
    for attr, dest in attrrep.items():
        if attr not in attrs:
            continue
        v = attrs[attr][:].split()
        val = hau.T_Array(np.ones(x.times.shape) * float(v[0]))
        if len(v) > 1:
            if v[1] in ('(GHz)', 'GHz'):
                val *= 1e9
        setattr(x, dest or attr, val)
Esempio n. 8
0
def radar_masking(rs_radar, processing_defaults, instrument):
    """radar_masking(rs_radar,processing_defaults)
       rs_radar = structure generated by radar processing stream
       processing_defaults = config info from 'radar_processing_defaults.json'
       qc_radar_mask, bit[0] = longical and of all other bits
       qc_radar_mask, bit[1] = cleared if radar refectivity fall below SNR threshhold
       """

    assert (rs_radar != None)
    if not hasattr(rs_radar, 'Backscatter'):
        return rs_radar

    threshhold = processing_defaults.get_value('radar_SNR_mask', 'threshhold')
    #radar mask ok when sig_to_noise greater than threshhold in dB
    rs_radar = copy.copy(rs_radar)
    #fix me this has been changed to a single bit because the interpolator is
    #not aware of summode
    rs_radar.qc_radar_mask = \
        hau.TZ_Array(np.ones(rs_radar.Backscatter.shape),summode='or',dtype='uint16')
    #rs_radar.qc_radar_mask[:,:]=65535
    #mask = np.ones_like(rs_radar.Backscatter)
    #mask[rs_radar.SignalToNoiseRatio < threshhold] = 0
    #mask = ~(3*mask.astype('uint16'))
    #rs_radar.qc_radar_mask &= mask

    rs_radar.qc_radar_mask[rs_radar.SignalToNoiseRatio < threshhold] = 0
    rs_radar.qc_radar_mask[np.isnan(rs_radar.SignalToNoiseRatio)] = 0

    print
    print 'instrument', instrument

    if (instrument == 'magkazrge' or 'magmwacr') \
                and processing_defaults.enabled('ship_motion_correction'):
        temp = rs_radar.MeanDopplerVelocity.copy()
        temp[rs_radar.qc_radar_mask == 0] = np.NaN
        rs_radar.vertically_averaged_doppler = hau.T_Array(nanmean(temp, 1))

    return rs_radar
Esempio n. 9
0
def read_sounding_file(instrument, sounding_type, id, start_time,
                       requested_altitudes):
    """ read_sounding_file([instrument,sounding_type,id,start_time,alt_res,max_alt)   
     returns arrays rs.temps(sounding#, rs.dew_points(sounding#,alt_index),
     rs.wdir(sounding#,alt_index), rs.wspd(sounding#,alt_index) along with several
     scalers with sounding info instrument (e.g. 'ahsrl','gvhsrl','mf2hsrl','nshsrl')
     sounding_type may be radiosonde station id, model identifier, or other instrument
     sounding_type (e.g. 'NOAA','ARM',.......
     sounding id (for sounding_type=NOAA, this a 3-letter e.g. 'MSN')
     start_time first time for which the sounding is needed, provided as matplot time
     requested_altitudes is a vector of altitudes at which sounding values are requested (m) 
     returns temp_sounding(object) with all soundings from the current file after the starting time
     returns the last time at which this sounding can be used as rs.expire_time"""

    import lg_dpl_toolbox.core.archival as hru

    if sounding_type[:].find('NOAA raob') >= 0:
        rs = hau.Time_Z_Group()

        time_struct = start_time
        dir_path = hru.get_path_to_data(instrument, start_time)
        filename = dir_path + '/' + '%4i' % time_struct.year + '/' \
            + '%02i' % time_struct.month + '/sondes.' + id[:] + '.nc'
        print 'sounding file--', filename
        if not os.path.exists(filename):
            return None
            #raise RuntimeError, 'sounding file %s does not exist' \
            #    % filename
        nc = Dataset(filename, 'r')
        times = getAll(nc, 'synTime')

        # combine mandatory and sig height measurements

        heights = np.hstack((getAll(nc, 'htMan'), getAll(nc, 'htSigT')))

        epoch = datetime(1970, 1, 1, 0, 0, 0)
        t_mask = times < 1e36
        for i in range(len(times)):
            t_mask[i] = times[i] < 1e36 and any(heights[i, :] < 1e36)

        times = times[t_mask]

        times = [epoch + timedelta(seconds=soff) for soff in times[:]]

        # select times, one prior to start time --> last profile in file

        #indices = np.arange(len(times))
        #start_index = max(indices[times <= start_time])
        #rs.times = zeros(len(times) - start_index)
        rs.times = np.zeros(len(times))
        #rs.times = times[start_index:]
        #         rs.times = hau.T_Array( rs.times )
        rs.times = times[:]

        wmosta = getAll(nc, 'wmoStat')  # wmo station number
        stalong = getAll(nc, 'staLon')
        stalat = getAll(nc, 'staLat')

        # combine mandatory and sig height measurements

        temps = np.hstack((getAll(nc, 'tpMan'), getAll(nc, 'tpSigT')))
        pressures = np.hstack((getAll(nc, 'prMan'), getAll(nc, 'prSigT')))
        dew_points = np.hstack((getAll(nc, 'tdMan'), getAll(nc, 'tdSigT')))
        wind_dir = np.hstack((getAll(nc, 'wdMan'), getAll(nc, 'wdSigT')))
        wind_spd = np.hstack((getAll(nc, 'wsMan'), getAll(nc, 'wsSigT')))
        heights = heights[t_mask, :]
        temps = temps[t_mask, :]
        pressures = pressures[t_mask, :]
        dew_points = dew_points[t_mask, :]
        wind_dir = wind_dir[t_mask, :]
        wind_spd = wind_spd[t_mask, :]

        [n_soundings, n_heights] = temps.shape

        # defined standard atmosphere climatology for use above highest reported level
        # climate=temp_sounding()

        climate = hau.Time_Z_Group()
        climate.altitudes = np.zeros((n_soundings, 9))
        climate.temps = np.zeros((n_soundings, 9))
        climate.pressures = np.zeros((n_soundings, 9))
        climate.dew_pt = np.zeros((n_soundings, 9))
        climate.wind_spd = np.zeros((n_soundings, 9))
        climate.wind_dir = np.zeros((n_soundings, 9))

        # find the highest valid point in each sounding

        rs.top = np.zeros((n_soundings, ))
        rs.bot = np.zeros((n_soundings, ))

        # climate.altitudes[0,:]=array([10000, 15000, 20000, 25000, 30000, 35000, 40000, 45000, 50000])

        for i in range(n_soundings):
            mask = heights[i, :] <= 50000
            if np.any(mask == True):
                rs.top[i] = max(heights[i, mask])
                rs.bot[i] = min(heights[i, temps[i, :] != 99999])
            else:
                rs.top[i] = 0.0
                rs.bot[i] = 0.0

            rs.top = hau.T_Array(rs.top)
            rs.bot = hau.T_Array(rs.bot)
            climate.altitudes[i, :] = np.array([
                10000,
                15000,
                20000,
                25000,
                30000,
                35000,
                40000,
                45000,
                50000,
            ])
            climate.temps[i, :] = np.array([
                223.1,
                216,
                216,
                221,
                226,
                237,
                251,
                265,
                270,
            ])
            climate.pressures[i, :] = np.array([
                264.3,
                120.45,
                54.75,
                25.11,
                11.71,
                5.58,
                2.77,
                1.43,
                0.759,
            ])
            climate.dew_pt[i, :] = np.NaN

            # don't use climatology lower than 2km above highest valid measurement

            climate.altitudes[climate.altitudes <= rs.top[i] + 2000] = \
                9e36
            climate.temps[climate.altitudes <= rs.top[i] + 2000] = 9e36
            climate.pressures[climate.altitudes <= rs.top[i] + 2000] = \
                9e36

    # stack the climatology on top of the observations

        heights = np.hstack((heights, climate.altitudes))
        temps = np.hstack((temps, climate.temps))
        pressures = np.hstack((pressures, climate.pressures))
        dew_points = np.hstack((dew_points, climate.dew_pt))
        wind_dir = np.hstack((wind_dir, climate.wind_dir))
        wind_spd = np.hstack((wind_spd, climate.wind_spd))
        #print heights.shape
        heights_unsorted = heights.copy()
        temps_unsorted = temps.copy()
        pressures_unsorted = pressures.copy()
        dew_points_unsorted = dew_points.copy()
        wind_dir_unsorted = wind_dir.copy()
        wind_spd_unsorted = wind_spd.copy()

        for i in range(heights_unsorted.shape[0]):
            indices = np.argsort(heights_unsorted[i, :])
            heights[i, :] = heights_unsorted[i, indices]
            temps[i, :] = temps_unsorted[i, indices]
            pressures[i, :] = pressures_unsorted[i, indices]
            dew_points[i, :] = dew_points_unsorted[i, indices]
            wind_dir[i, :] = wind_dir_unsorted[i, indices]
            wind_spd[i, :] = wind_spd_unsorted[i, indices]

    # sort combined file by height and select times of interest
        if 0:
            indices = heights.argsort(axis=1)
            index_a = np.transpose(
                np.transpose(np.ones(heights.shape, dtype=int)) *
                np.arange(heights.shape[0]))
            heights = heights[index_a, indices]
            temps = temps[index_a, indices]
            pressures = pressures[index_a, indices]
            dew_points = dew_points[index_a, indices]
            wind_dir = wind_dir[index_a, indices]
            wind_spd = wind_spd[index_a, indices]

        pressures[heights > 1e5] = np.NaN
        temps[heights > 1e5] = np.NaN
        dew_points[heights > 1e5] = np.NaN

        # interpolate to altitude resolution requested
        # and remove missing data points

        max_alt = requested_altitudes[-1]
        max_bin = requested_altitudes.shape[0]
        alts = requested_altitudes

        n_soundings = len(rs.times)
        #max_bin = round(max_alt / float(alt_res)) + 1

        # create sounding arrays as hau class items

        rs.altitudes = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.temps = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.dew_points = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.pressures = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.wind_spd = hau.TZ_Array(np.zeros((n_soundings, max_bin)))
        rs.wind_dir = hau.TZ_Array(np.zeros((n_soundings, max_bin)))

        #rs.times =hau.T_Array(np.zeros(n_soundings))
        rs.times = hau.T_Array(times)
        #rs.expire_time =hau.T_Array(np.zeros(n_soundings))
        rs.stalat = hau.T_Array(np.zeros(n_soundings))
        rs.stalong = hau.T_Array(np.zeros(n_soundings))
        rs.wmosta = hau.T_Array(np.zeros(n_soundings))

        rs.stalat[:] = stalat[0]
        rs.stalong[:] = stalong[0]
        rs.wmosta[:] = wmosta[0]
        rs.station_id = id

        # interpolate to lidar altitude scale

        for i in range(n_soundings):

            rs.altitudes[i, :] = alts
            k = i

            rs.temps[i, :] = np.interp(alts, heights[k, temps[k, :] != 99999],
                                       temps[k, temps[k, :] != 99999])

            rs.dew_points[i, :] = np.interp(
                alts, heights[k, dew_points[k, :] != 99999],
                dew_points[k, dew_points[k, :] != 99999])

            #now using spline fit to pressures
            #should use hydrostatic equation to interpolate

            press1 = pressures[k, pressures[k, :] != 99999]
            h1 = heights[k, pressures[k, :] != 99999]
            temp = interpolate.splrep(h1[~np.isnan(press1)],
                                      press1[~np.isnan(press1)])
            rs.pressures[i, :] = interpolate.splev(alts, temp, der=0)

            rs.wind_spd[i, :] = np.interp(alts,
                                          heights[k, wind_spd[k, :] != 99999],
                                          wind_spd[k, wind_spd[k, :] != 99999])
            rs.wind_dir[i, :] = np.interp(alts,
                                          heights[k, wind_dir[k, :] != 99999],
                                          wind_dir[k, wind_dir[k, :] != 99999])
            #if i < n_soundings - 1:
            #    #rs.expire_time[i] = (rs.times[i] + rs.times[i + 1])
            #    rs.expire_time = rs.times[i] + timedelta(seconds=(rs.times[i+1] - rs.times[i]).total_seconds() / 2.0) + timedelta(seconds=60*30)  # add 1/2 hour to point to next sounding
            #else:
            #    #rs.expire_time[i] = rs.times[i] + 0.25 + 1 / 48.0  # new sonde profile expected in 6 1/2 hrs
            #    rs.expire_time = rs.times[i] + timedelta(days=0.25, seconds= 30*60)  # new sonde profile expected in 6 1/2 hrs

    # convert dew point depression to dew point temp

        rs.dew_points = rs.temps - rs.dew_points

        plots = 0  # FIXME
        if plots == 1:
            import matplotlib.pylab as plt
            plt.figure(801)

            plt.plot(temps[0, :], heights[0, :] / 1000, 'r', dew_points[0, :],
                     heights[0, :] / 1000)
            fig = plt.grid(True)
            plt.xlabel('deg-K ')
            plt.ylabel('Altitude MSL (km)')
            plt.title('Temperature, dew point')

            plt.figure(802)
            #set_printoptions(threshold=np.NaN)

            plt.plot(rs.temps[0, :], rs.altitudes[0, :] / 1000, 'r',
                     rs.dew_points[0, :], rs.altitudes[0, :] / 1000)
            fig = plt.grid(True)
            plt.xlabel('deg-K ')
            plt.ylabel('Altitude MSL (km)')
            plt.title('Temperature, dew point')

            plt.figure(803)
            plt.plot(pressures[0, :], heights[0, :] / 1000, 'r')
            fig = plt.grid(True)
            ax = plt.gca()
            #ax.set_xscale('log')
            plt.xlabel('deg-K ')
            plt.ylabel('Altitude MSL (km)')
            plt.title('Pressures')

            plt.figure(804)
            bin_vec = range(len(heights[0, :]))
            heights[heights > 1000] = np.NaN
            plt.plot(heights[0, :] / 1000, bin_vec, 'r')
            fig = plt.grid(True)
            ax = plt.gca()
            #ax.set_xscale('log')
            plt.xlabel('altitudes')
            plt.ylabel('bins')
            plt.title('Heights')

            plt.show()
            raise RuntimeError('deliberate abort')
    else:
        print ' '
        print 'ERROR**************unknown sounding source************'
        print ' '
        rs = []
    return rs
Esempio n. 10
0
def read_raqms_file(instrument, start_time):
    """read raqms file between start and end time

    instrument - e.g. 'gvhsrl'
    start_time -  datetime object    
    
    """

    raqms = hau.Time_Z_Group(altname='model_level_alts')

    filename = find_raqms_filename(instrument, start_time)
    if not filename:
        return None

    nc = Dataset(filename, 'r')
    times = getAll(nc, 'time')
    aircraft_alts = getAll(nc, 'alt')
    pressures = getAll(nc, 'pressure')
    temperatures = getAll(nc, 'temperature')
    model_level_alts = getAll(nc, 'altitude')

    relative_humidity = getAll(nc, 'rh')
    latitude = getAll(nc, 'lat')
    longitude = getAll(nc, 'lon')
    u_vel = getAll(nc, 'uvel')
    v_vel = getAll(nc, 'vvel')
    ext_total = getAll(nc, 'ext_tot')
    ext_dust = getAll(nc, 'ext_dust')
    ext_salt = getAll(nc, 'ext_salt')

    base_time = datetime(start_time.year, start_time.month, start_time.day, 0,
                         0, 0)
    #np.fix(start_time)
    #time=times.astype('float64')

    #convert raqms seconds from start of day to python datetimes
    #times=base_time + time/(3600.0*24.0)
    times = hau.T_Array(
        [base_time + timedelta(seconds=float(x)) for x in times])

    assert (times.size > 0)

    selectedMask = (times > start_time)
    for i, x in enumerate(selectedMask):
        fi = i
        if x:
            if i > 0:
                selectedMask[i - 1] = True
            break
    selectedMask[-1] = True

    selectedTimes = np.arange(times.size)[selectedMask]

    raqms.latitude = hau.T_Array(latitude[selectedTimes])
    raqms.longitude = hau.T_Array(longitude[selectedTimes])
    raqms.pressures = hau.TZ_Array(pressures[selectedTimes, :])
    raqms.temperatures = hau.TZ_Array(temperatures[selectedTimes, :])
    raqms.ext_total = hau.TZ_Array(ext_total[selectedTimes, :])
    raqms.ext_dust = hau.TZ_Array(ext_dust[selectedTimes, :])
    raqms.ext_salt = hau.TZ_Array(ext_salt[selectedTimes, :])
    raqms.relative_humidity = hau.TZ_Array(relative_humidity[selectedTimes, :])
    raqms.u_vel = hau.TZ_Array(u_vel[selectedTimes, :])
    raqms.v_vel = hau.TZ_Array(v_vel[selectedTimes, :])
    raqms.model_level_alts = hau.TZ_Array(model_level_alts[selectedTimes, :] *
                                          1000.0)
    raqms.times = times[selectedTimes]

    return raqms
Esempio n. 11
0
def select_raqms_profile(soundings,
                         request_time,
                         requested_altitudes,
                         offset=0):
    """selects sounding prior to request_time from soundings -- the sounding
       is returned in a Time_Z_Group as Z_arrays"""

    if soundings is None or soundings.times.size == 0:
        raise RuntimeError, "select_faqms_profile: No soundings for %s " %\
              request_time

    import atmospheric_profiles.soundings.sounding_utilities as su
    sounding = hau.Time_Z_Group()

    sounding.altitudes = hau.Z_Array(requested_altitudes)
    max_alt = requested_altitudes[-1]
    max_bin = len(requested_altitudes)
    index = sum(soundings.times <= request_time) - 1 + offset

    if index < 0 or index >= len(soundings.times):
        return None
    #initialize variables for inclusion in T_Z_Group
    sounding.temps = hau.Z_Array(np.zeros((max_bin)))
    sounding.dew_points = hau.Z_Array(np.zeros(max_bin))
    sounding.frost_points = hau.Z_Array(np.zeros(max_bin))
    sounding.pressures = hau.TZ_Array(np.zeros(max_bin))
    sounding.ext_total = hau.TZ_Array(np.zeros(max_bin))
    sounding.ext_salt = hau.TZ_Array(np.zeros(max_bin))
    sounding.wind_spd = hau.TZ_Array(np.zeros(max_bin))
    sounding.wind_dir = hau.TZ_Array(np.zeros(max_bin))

    #sounding.times is a single time at this point, however it will later be included
    #in a list of all the soundings used in this processing request. In order that it
    #be treated properly it must be defined as a T_Array

    sounding.times = hau.T_Array([soundings.times[index]])
    sounding.latitude = hau.T_Array([soundings.latitude[index]])
    sounding.longitude = hau.T_Array([soundings.longitude[index]])

    #sounding.times=hau.T_Array([soundings.times[index]])

    #interpolate model levels to lidar bin altitudes

    #temp=interpolate.splrep(soundings.model_level_alts[index,-1::-1] \
    #     ,soundings.temperatures[index,-1::-1])
    #sounding.temps=interpolate.splev(sounding.altitudes,temp,der=0)

    temp=interpolate.splrep(soundings.model_level_alts[index,-1::-1] \
                            ,soundings.pressures[index,-1::-1])
    sounding.pressures = interpolate.splev(sounding.altitudes, temp, der=0)



    sounding.temps=np.interp(sounding.altitudes \
                             ,soundings.model_level_alts[index,-1::-1] \
                             ,soundings.temperatures[index,-1::-1])

    #calculate dew point at model levels for selected profile
    dew_pts=su.cal_dew_point(soundings.relative_humidity[index,:] \
                             ,soundings.temperatures[index,:])
    frost_pts = su.cal_frost_point(dew_pts)

    #calculate wind speed and direction from u and v
    u_vel = soundings.u_vel[index, -1::-1]
    v_vel = soundings.v_vel[index, -1::-1]

    wind_spd = np.sqrt(u_vel**2 + v_vel**2)
    wind_dir = np.arctan(v_vel / u_vel) * 180.0 / np.pi

    for i in range(len(u_vel)):
        if (u_vel[i] < 0 and v_vel[i]) < 0:
            wind_dir[i] = 180.0 - wind_dir[i]
        elif (u_vel[i] > 0 and v_vel[i]) > 0:
            wind_dir[i] = 180.0 + wind_dir[i]
        elif u_vel[i] < 0:
            wind_dir[i] = 270.0 - wind_dir[i]
        else:
            wind_dir[i] = np.nan

    #interpolate to lidar bin altitudes
    sounding.frost_points=np.interp(sounding.altitudes \
                                    ,soundings.model_level_alts[index,-1::-1],frost_pts[-1::-1])
    sounding.dew_points=np.interp(sounding.altitudes \
                                  ,soundings.model_level_alts[index,-1::-1],dew_pts[-1::-1])
    sounding.ext_total=np.interp(sounding.altitudes\
                                 ,soundings.model_level_alts[index,-1::-1]\
                                 ,soundings.ext_total[index,-1::-1])
    sounding.ext_salt=np.interp(sounding.altitudes\
                                ,soundings.model_level_alts[index,-1::-1]\
                                ,soundings.ext_salt[index,-1::-1])
    sounding.ext_dust=np.interp(sounding.altitudes\
                                ,soundings.model_level_alts[index,-1::-1]\
                                ,soundings.ext_dust[index,-1::-1])



    sounding.wind_dir = np.interp(sounding.altitudes \
                                  ,soundings.model_level_alts[index,-1::-1],wind_dir)
    sounding.wind_spd = np.interp(sounding.altitudes \
                                  ,soundings.model_level_alts[index,-1::-1],wind_spd)

    sounding.top = sounding.altitudes[-1]
    sounding.bot = sounding.altitudes[0]

    #plt.figure(1)
    #plt.plot(temperatures,altitudes,dew_points,altitudes)

    #plt.figure(2)
    #plt.plot(ext_total,altitudes)
    #plt.show()
    return sounding
Esempio n. 12
0
    def read(self):
        """ main read generator
        """
        import hsrl.data_stream.processing_utilities as pu
        params=self.params
        firsttimeever=None
        intervalTime=None
        intervalEnd=None
        rawsrc=iter(self.rawsrc)
        #if params['timeres']!=None and params['timeres']<datetime.timedelta(seconds=float(self.cal_narr.hsrl_constants['integration_time'])):
        #    params['timeres']=None #pure native
        end_time_datetime=params['finalTime']
        #timemodoffset=time_mod(params['realStartTime'],params['timeres'])
        noframe='noframe'
        fullrange=False #if this is true, it will pad the start with any missing times.

        remainder=None
        cdf_to_hsrl = None
        preprocess_ave = None
        requested_times=None
        instrument=self.hsrl_instrument
        intcount=0
        rs_mem = None
        #rs=None
        timesource=TimeSource.CompoundTimeGenerator(self.timesource) if self.timesource is not None else None
        
        for calv in self.cal_narr:
            if intervalTime is None:
                firsttimeever=calv['chunk_start_time']
                intervalTime=calv['chunk_start_time']
                intervalEnd=intervalTime
            chunk_end_to_use=calv['chunk_end_time']#-time_mod(calv['chunk_end_time'],params['timeres'],timemodoffset)
            #print 'old end',calv['chunk_end_time'],'vs new end',chunk_end_to_use,'mod base',params['timeres']
            if calv['chunk_end_time']==calv['chunk_start_time'] and end_time_datetime is None:
                if params['block_when_out_of_data']:
                    if 'timeres' not in params or params['timeres'] is None:
                        sleep(calv['rs_constants']['integration_time'])
                    else:
                        sleep(params['timeres'].total_seconds())
                else:
                    yield None #this is done to get out of here, and not get stuck in a tight loop
                continue
            while intervalTime<chunk_end_to_use:
                integration_time = calv['rs_constants']['integration_time']
                doPresample=True
                #END init section
                if intervalEnd>chunk_end_to_use:
                    print 'Breaking calibration on endtime. proc ',intervalEnd,chunk_end_to_use,end_time_datetime
                    break
                else:
                    intervalEnd=chunk_end_to_use
                #print ' Absolute window is ', actualStartTime, ' to ' , params['finalTime']
                print ' prior window was ', intervalTime, ' to ' , intervalEnd, 'terminating at ',chunk_end_to_use,rs_mem
                if True:#requested_times==None or requested_times.shape[0]>0:
                    try:
                            try:
                                while rawsrc is not None:
                                    if rs_mem is not None and rs_mem.times[0]>=chunk_end_to_use  and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                        break
                                    tmp=rawsrc.next()
                                    if hasattr(tmp,'rs_raw'):
                                        if rs_mem is not None:
                                            rs_mem.append(tmp.rs_raw)
                                        else:
                                            rs_mem=copy.deepcopy(tmp.rs_raw)
                                    if rs_mem is not None and rs_mem.times.shape>0:
                                        break
                                    else:
                                        rs_mem=None
                            except StopIteration:
                                print 'Raw HSRL stream is ended'
                                rawsrc=None
                            if rs_mem is None or rs_mem.times.size==0:
                                rs_mem=None
                            elif rs_mem.times[0]>=chunk_end_to_use and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                print 'HSRL RAW skipping to next cal because of times',intervalTime,chunk_end_to_use,end_time_datetime,rs_mem.times[0]
                                break
                            else:
                                intervalEnd=rs_mem.times[-1]
                            print 'read in raw frame to mean',rs_mem,remainder
                            if rawsrc is None:
                                intervalEnd=chunk_end_to_use

                            print 'trimmed ',rs_mem
                            if timesource is not None:
                                if timesource.isDone:
                                    break
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet
                                inclusive=rawsrc is None and (end_time_datetime!=None and intervalEnd>=end_time_datetime)
                                timevals=hau.T_Array(timesource.getBinsFor(starttime=intervalTime,endtime=intervalEnd,inclusive=inclusive))#,inclusive=(end_time_datetime!=None and intervalEnd>=end_time_datetime)))
                                print 'Now %i intervals %s' % (timevals.size-1, "INC" if inclusive else "NOINC"),intervalTime,intervalEnd
                            elif 'timeres' in params and params['timeres'] is not None:
                                tmp=intervalTime
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet

                                timevals=[]
                                timevals.append(tmp)
                                while tmp<intervalEnd:# using python datetimes for making the axis is much much more precise than matplotlib floats.
                                        #print tmp, ' = ' , du.date2num(tmp) , ' = ' , (tmp-self.actualStartTime).total_seconds()
                                        tmp+=params['timeres']
                                        timevals.append(tmp)
                                        
                                #intervalEnd=tmp
                                intcount+=len(timevals)
                                if usePrebinnedTimes:
                                    intcount-=1
                                print 'Now %i intervals' % (intcount)
                                timevals=hau.T_Array(timevals)
                            else:

                                print 'Using Native timing'
                                timevals=None

                            print ' new window is ', intervalTime, ' to ' , intervalEnd

                            requested_times=timevals
                           
                            requested_chunk_times= requested_times#requested_times[requested_times >=intervalTime]

                            if requested_chunk_times is not None and len(requested_chunk_times)<2 and rawsrc is not None:
                                #if rawsrc is not None:
                                print "not enough time to process"
                                continue
                            elif rawsrc is None and rs_mem is None and remainder is None:
                                #chunk_end_to_use=intervalTime
                                #continue
                                #print ''
                                break
                 
          
                            rs_chunk,remainder = pu.process_data( instrument, intervalTime, intervalEnd
                                ,params['min_alt'], params['max_alt'], requested_chunk_times
                                , rs_mem, calv['rs_Cxx'], calv['rs_constants'], calv['rs_cal']
                                , None , self.cal_narr.hsrl_corr_adjusts, self.cal_narr.hsrl_process_control
                                , self.compute_stats,remainder=remainder)
                            rs_mem=None
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size==0:
                                rs_chunk=None
                            if rs_chunk is None and rawsrc is None:
                                break
                           #print rs_chunk
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size>0:
                                if fullrange and requested_chunk_times is not None:
                                    v=hau.Time_Z_Group(like=rs_chunk.rs_mean)
                                    v.times=hau.T_Array(requested_chunk_times[requested_chunk_times<rs_chunk.rs_mean.times[0]])
                                    if v.times.size>0:
                                        rs_chunk.rs_mean.prepend(v)
                                rs_chunk.calv=calv

                                yield rs_chunk
                                intervalTime=intervalEnd
                    except Exception, e:
                        print 'Exception occured in update_cal_and_process'
                        print 'Exception = ',e
                        print traceback.format_exc()
                        if isinstance(e,(MemoryError,)):
                            print 'Please Adjust Your Parameters to be more Server-friendly and try again'
                            raise
                        if not isinstance(e,(AttributeError,)):
                            raise
Esempio n. 13
0
    def __call__(self, raw, cdf_attr):
        """input_translator convert raw netcdf variables into form
           used by the hsrl processing code and preforms pileup
           correction on photon counts"""

        if hasattr(
                raw,
                'wfov_counts') and self.constants['wfov_type'] == 'molecular':
            raw.molecular_wfov_counts = raw.wfov_counts.copy()
        elif hasattr(raw, 'wfov_counts'):
            raw.combined_wfov_hi_counts = raw.wfov_counts.copy()

        if hasattr(raw, 'op_mode'):
            #extract i2 lock bit from operating mode
            #this will allow testing of bit even after averaging
            raw.i2_locked = (raw.op_mode[:].astype(int) & 4) / 4

        if hasattr(raw, 'seeded_shots'):
            setattr(
                raw, 'delta_t', raw.seeded_shots[:, 0] /
                float(self.constants['laser_rep_rate']))
        else:
            setattr(raw, 'delta_t', np.zeros([0]))
        #for i in np.arange(raw.times.size):
        #    raw.times[i]-=timedelta(seconds=raw.delta_t[i])

        if hasattr(raw, 'transmitted_energy'):
            # convert to mJ per preaveraged accumulation interval
            raw.transmitted_energy[:] = raw.transmitted_energy \
                *self.constants['transmitted_energy_monitor'][0]\
                +self.constants['transmitted_energy_monitor'][1]\
                *raw.seeded_shots[:,0]
            #compute tranmitted power
            setattr(raw, 'transmitted_power',
                    raw.transmitted_energy / raw.delta_t)

        if hasattr(raw, 'transmitted_1064_energy'):
            # convert to mJ per preaveraged accumulation interval
            raw.transmitted_1064_energy[:] = raw.transmitted_1064_energy \
                *self.constants['transmitted_1064_energy_monitor'][0]\
                +self.constants['transmitted_1064_energy_monitor'][1]\
                *raw.seeded_shots[:,0]
            #compute tranmitted 1064 power
            setattr(raw, 'transmitted_1064_power',
                    raw.transmitted_1064_energy / raw.delta_t)

        if hasattr(raw, 'filtered_energy'):
            if raw.filtered_energy.dtype == 'int32':
                raw.nonfiltered_energy = raw.nonfiltered_energy.astype(
                    'float64')
                raw.filtered_energy = raw.filtered_energy.astype('float64')
            if len(raw.filtered_energy.shape) == 1:
                raw.filtered_energy = raw.filtered_energy[:, np.newaxis]
                raw.nonfiltered_energy = raw.nonfiltered_energy[:, np.newaxis]
            raw.filtered_energy[raw.filtered_energy > 1e10] = np.NaN
            raw.nonfiltered_energy[raw.nonfiltered_energy > 1e10] = np.NaN

        if hasattr(raw, 'builduptime') and raw.builduptime.size > 0:
            raw.qswitch_buildup_time = raw.builduptime[:, 0]
            raw.min_qswitch_buildup_time = raw.builduptime[:, 1]
            raw.max_qswitch_buildup_time = raw.builduptime[:, 2]

        if hasattr(raw, 'superseedlasercontrollog'):
            raw.superseedlasercontrollog[
                raw.superseedlasercontrollog > 1e10] = np.NaN

        if hasattr(raw,'energyRatioLockPoint') \
               and raw.energyRatioLockPoint.size>0:
            if len(raw.energyRatioLockPoint.shape) == 2:
                raw.filtered_lockpoint = raw.energyRatioLockPoint[:, 0]
                raw.nonfiltered_lockpoint = raw.energyRatioLockPoint[:, 1]
            else:
                clarray = hau.T_Array(np.ones([raw.filtered_energy.shape[0]]))
                raw.filtered_lockpoint = clarray * raw.energyRatioLockPoint[0]
                raw.nonfiltered_lockpoint = clarray * raw.energyRatioLockPoint[
                    1]

        if hasattr(raw, 'raw_analog_interferometertemperature'):
            thermistor_cal = self.constants['interferometer_temp_cal']
            R = np.abs(raw.raw_analog_interferometertemperature / 0.000250)
            raw.interferometer_temp = 1/(thermistor_cal[0] + thermistor_cal[1] \
                    * np.log(R) + thermistor_cal[2] * np.log(R)** 3) - 273.15

            ntemps = len(raw.interferometer_temp)
            if 0:  #ntemps > 5:
                #do eleven point median filter
                temps = np.zeros((ntemps, 11))
                temps[0:ntemps - 5, 0] = raw.interferometer_temp[5:]
                temps[0:ntemps - 4, 1] = raw.interferometer_temp[4:]
                temps[0:ntemps - 3, 2] = raw.interferometer_temp[3:]
                temps[0:ntemps - 2, 3] = raw.interferometer_temp[2:]
                temps[0:ntemps - 1, 4] = raw.interferometer_temp[1:]
                temps[:, 5] = raw.interferometer_temp
                temps[1:, 6] = raw.interferometer_temp[:ntemps - 1]
                temps[2:, 7] = raw.interferometer_temp[:ntemps - 2]
                temps[3:, 8] = raw.interferometer_temp[:ntemps - 3]
                temps[4:, 9] = raw.interferometer_temp[:ntemps - 4]
                temps[5:, 10] = raw.interferometer_temp[:ntemps - 5]
                raw.interferometer_temp = hau.T_Array(np.median(temps, 1))
            else:
                raw.interferometer_temp = hau.T_Array(raw.interferometer_temp)

        if hasattr(raw, 'raw_analog_etalontemperature'):
            # convert etalon thermistor voltage to themistor resistance
            # T(degC) =1/( a + b(Ln R) + cLn R)^3)-273.15
            #(Steinhart  Hart equation)
            # Where:
            # a = 0.000862448
            # b = 0.000258456
            # c = 0.000000142
            # and
            # R = (Volts ADC Reading)/(0.000250 amps)
            thermistor_cal = self.constants['interferometer_temp_cal']
            R = np.abs(raw.raw_analog_etalontemperature / 0.000250)
            raw.etalon_temp = (hau.T_Array(
                np.array(
                    (1.0 / (thermistor_cal[0] + thermistor_cal[1] * np.log(R) +
                            thermistor_cal[2] * np.log(R)**3) - 273.15),
                    dtype=np.float32,
                    ndmin=1)))
        if hasattr(raw, 'raw_analog_coolanttemperature'):
            # convert coolant thermistor voltage to themistor resistance
            # T(degC) =1/( a + b(Ln R) + cLn R)^3)-273.15
            #(Steinhart  Hart equation)
            # Where:
            # a = 0.000862448
            # b = 0.000258456
            # c = 0.000000142
            # and
            # R = (Volts ADC Reading)/(0.000250 amps)
            thermistor_cal = self.constants['interferometer_temp_cal']

            R = np.abs(raw.raw_analog_coolanttemperature / 0.000250)


            raw.coolant_temperature = \
                  (hau.T_Array(np.array((1.0 / (thermistor_cal[0] + thermistor_cal[1]
                  * np.log(R) + thermistor_cal[2] * np.log(R)** 3)
                  - 273.15),dtype=np.float32,ndmin=1)))

        if hasattr(raw, 'telescope_pointing'):
            if not hasattr(raw, 'telescope_locked'):
                setattr(raw, 'telescope_locked',
                        np.ones_like(raw.telescope_pointing))
            raw.telescope_pointing = raw.telescope_pointing.astype('float64')
            raw.telescope_pointing[raw.telescope_locked == 0] = .5
            #roll component of telescope mounting angle in degrees measured relative
            #to platform (zero degrees = vertical)
            #roll angle is + in clockwise direction
            if not hasattr(raw, 'telescope_roll_angle_offset'):
                setattr(raw, 'telescope_roll_angle_offset',
                        np.ones_like(raw.telescope_pointing))
            raw.telescope_roll_angle_offset[:] = self.constants[
                'telescope_roll_angle_offset']
            raw.telescope_roll_angle_offset[raw.telescope_pointing == 0] = \
                                                180.0 - self.constants['telescope_roll_angle_offset']

        if hasattr(raw, 'raw_analog_telescope_temperature'):
            # convert coolant thermistor voltage to themistor resistance
            # T(degC) =1/( a + b(Ln R) + cLn R)^3)-273.15
            #(Steinhart  Hart equation)
            # Where:
            # a = 0.000862448
            # b = 0.000258456
            # c = 0.000000142
            # and
            # R = (Volts ADC Reading)/(0.000250 amps)
            thermistor_cal = self.constants['interferometer_temp_cal']

            R = np.abs(raw.raw_analog_telescope_temperature / 0.000250)


            raw.telescope_temperature = \
                  (hau.T_Array(np.array((1.0 / (thermistor_cal[0] + thermistor_cal[1]
                  * np.log(R) + thermistor_cal[2] * np.log(R)** 3)
                  - 273.15),dtype=np.float32,ndmin=1)))
        if hasattr(raw,'OutgoingBeamPosition_centermass')\
               and raw.OutgoingBeamPosition_centermass.size > 0 :
            raw.cg_xs = raw.OutgoingBeamPosition_centermass[:, 0]
            raw.cg_ys = raw.OutgoingBeamPosition_centermass[:, 1]

        if hasattr(raw,'OutgoingBeamPosition2_centermass')\
                  and raw.OutgoingBeamPosition2_centermass.size > 0 :
            raw.cg_xs2 = raw.OutgoingBeamPosition2_centermass[:, 0]
            raw.cg_ys2 = raw.OutgoingBeamPosition2_centermass[:, 1]

        if hasattr(raw,'interferometer_intensity') \
               and raw.interferometer_intensity.size > 0:
            interf_peak = \
                self.constants['interferometer_spectral_peak']
            phase_to_freq = \
                self.constants['interferometer_phase_to_freq']
            npixels = self.constants['interferometer_fft_npixels']
            xform = np.fft.rfft(raw.interferometer_intensity[:, :npixels],
                                axis=1)
            tmp = np.concatenate(
                ([self.unwrap_firstangle], np.angle(xform[:, interf_peak])))
            newlast = tmp[-1]
            tmp = np.unwrap(tmp)
            tmp = (self.unwrap_firstangle_atmagnitude - tmp[0]) + tmp
            if np.isfinite(tmp[-1]):
                self.unwrap_firstangle_atmagnitude = tmp[-1]
                self.unwrap_firstangle = newlast
            raw.interf_freq = tmp[1:]
            raw.interf_freq = hau.T_Array(-raw.interf_freq * phase_to_freq[0])

        #compute temperature compensated interferometer freq
        if 0 and hasattr(raw,'interferometer_temp') \
               and hasattr(raw,'interf_freq')\
               and self.constants.has_key('interf_temp_coef'):
            raw.tcomp_interf_freq = raw.interf_freq \
                        - (raw.interferometer_temp-raw.interferometer_temp[0])\
                         * self.constants['interf_temp_coef']*1e9
        for imagetime in ('interferometer_snapshot_time',
                          'outgoingbeamalignment_snapshot_time',
                          'overhead_snapshot_time', 'snowscope_snapshot_time'):
            if hasattr(raw, imagetime):
                setattr(
                    raw, imagetime,
                    hru.convert_to_python_times(
                        getattr(raw, imagetime)[np.newaxis, :]))

        #replace missing values witn NaN's
        if hasattr(raw, 'seedvoltage'):
            raw.seedvoltage[raw.seedvoltage > 100] = np.NaN
        if hasattr(raw, 'latitude'):
            raw.latitude[raw.latitude > 100] = np.NaN
        if hasattr(raw, 'longitude'):
            raw.longitude[raw.longitude > 200] = np.NaN

        if hasattr(raw, 'laserpowervalues') and raw.laserpowervalues.size > 0:
            raw.laser_current = raw.laserpowervalues[:, 0]
            raw.laser_voltage = raw.laserpowervalues[:, 1]
            if raw.laserpowervalues.shape[1] > 2:
                raw.laser_current_setpoint = raw.laserpowervalues[:, 2]
                raw.laser_diode_temp = raw.laserpowervalues[:, 3]
                raw.laser_diode_temp_setpoint = raw.laserpowervalues[:, 4]
            if raw.laserpowervalues.shape[1] > 6:
                raw.ktp_temp = raw.laserpowervalues[:, 5]
                raw.ktp_temp_setpoint = raw.laserpowervalues[:, 6]

        #remove spikes from tcs records
        for fiel in ('tcsopticstop_', 'tcsoptics_', 'tcstelescope_',
                     'thermal1_', 'thermal2_', 'tcsaft_', 'tcsfore_'):
            for f in vars(raw).keys():
                if f.startswith(fiel):
                    v = getattr(raw, f)
                    v[v > 1000] = np.NaN

        if hasattr(raw,'one_wire_temperatures') \
                and raw.one_wire_temperatures.size >0 :

            #raw.one_wire_attrib = cdf_attr['one_wire_temperatures']
            raw.one_wire_attrib = []
            [ntime, ntemps] = raw.one_wire_temperatures.shape
            for i in range(ntemps):
                string = 'field' + str(i) + '_name'
                try:
                    raw.one_wire_attrib.append(
                        cdf_attr['one_wire_temperatures'][string])
                except KeyError:
                    print "Couldn't find attribute for ", string
                    raw.one_wire_attrib.append(None)
            #remove spikes of 1e37 that appear in temperatues
            raw.one_wire_temperatures[raw.one_wire_temperatures>1000.]\
                         =np.NaN
        if hasattr(raw, 'RemoveLongI2Cell'):
            servo_range = cdf_attr['RemoveLongI2Cell']['range']
            raw.i2_cell_out = np.abs(raw.RemoveLongI2Cell-servo_range[1]) \
                     > np.abs(raw.RemoveLongI2Cell-servo_range[0])

        if hasattr(raw, 'RemoveLongI2ArCell'):
            servo_range = cdf_attr['RemoveLongI2ArCell']['range']
            raw.i2a_cell_out = np.abs(raw.RemoveLongI2ArCell-servo_range[1]) \
                     > np.abs(raw.RemoveLongI2ArCell-servo_range[0])

        if hasattr(raw, 'shot_count'):
            if raw.shot_count.size > 0:
                raw.shot_count = raw.shot_count[:, 0]
            else:
                raw.shot_count = raw.shot_count.reshape([0])

        if hasattr(raw, 'seeded_shots'):
            if raw.seeded_shots.size > 0:
                raw.seeded_shots = raw.seeded_shots[:, 0]
            else:
                raw.seeded_shots = raw.seeded_shots.reshape([0])

        #extract average dark counts from profiles and add dark counts to raw
        #dark count extracted from 'first_bins' or 'last_bins' as specified in constants
        #pu.extract_dark_count(raw,self.constants) #moved to after PILEUP 20140805

        #extract cal pulse from light scattered as laser pulse exits system
        #and place in raw
        #pu.extract_cal_pulse(raw,self.constants)

        if hasattr(raw, 'l3cavityvoltage') and raw.l3cavityvoltage.size > 0:
            raw.piezo_voltage_ave = raw.l3cavityvoltage[:, 0]
            raw.piezo_voltage_min = raw.l3cavityvoltage[:, 1]
            raw.piezo_voltage_max = raw.l3cavityvoltage[:, 2]
        if hasattr(raw, 'l3locking_stats'
                   ) and 'l3slope_to_frequency' in self.constants:
            raw.l3frequency_offset = raw.l3locking_stats.copy()
            for x in range(0, 3):
                raw.l3frequency_offset[:, x] = np.polyval(
                    self.constants['l3slope_to_frequency'],
                    raw.l3locking_stats[:, x])

        if hasattr(raw, 'GPS_MSL_Alt'):
            #replace and spikes in a altitude with base altitude
            #this allows the code to run but produces garbage data
            if np.any(raw.GPS_MSL_Alt > 20000.0):
                raw.GPS_MSL_Alt[raw.GPS_MSL_Alt > 20000.0] = \
                           self.constants['lidar_altitude']
        if hasattr(raw, 'roll_angle'):
            if anynan(raw.roll_angle):
                raw.roll_angle[np.isnan(raw.roll_angle)] = 0.0
            if anynan(raw.pitch_angle):
                raw.pitch_angle[np.isnan(raw.pitch_angle)] = 0.0
        if hasattr(raw, 'opticalbenchairpressure'):
            #convert psi to mb
            #print 'pre--opticalbenchairpressure ',raw.opticalbenchairpressure.shape
            raw.opticalbenchairpressure=hau.T_Array(np.array((raw.opticalbenchairpressure\
                    *self.constants['optical_bench_air_pressure_cal']),ndmin=1,dtype=np.float32))
            #print 'optical_bench_air_pressure.size',raw.opticalbenchairpressure.size,raw.times.size
        if hasattr(raw,
                   'chillertemperature') and raw.chillertemperature.size > 0:
            raw.chiller_temp = raw.chillertemperature[:, 0]
            raw.chiller_setpt = raw.chillertemperature[:, 1]

        if hasattr(raw, 'etalon_pressure'):
            raw.etalon_pressure = raw.etalon_pressure * self.constants[
                'etalon_pressure']

        if hasattr(raw, 'qw_rotation_angle'):
            #convert gv quarter wave plate rotation angle from radians to deg
            raw.qw_rotation_angle = raw.qw_rotation_angle * 180.0 / np.pi

        if hasattr(raw, 'GPS_MSL_Alt') or self.constants['installation'] in (
                'airborne', 'shipborne'):
            #do quality check on aircraft GPS and INS data
            pu.gps_quality_check(raw, self.constants)

        if hasattr(raw, 'molecular_counts'):
            for k, v in vars(raw).items():
                if '_counts' in k:
                    if raw.molecular_counts.shape[1] != v.shape[1]:
                        print 'raw field ', k, ' is messed up. size difference'
                        tmp = copy.deepcopy(raw.molecular_counts)
                        minidx = min(tmp.shape[1], v.shape[1])
                        tmp[:, :] = 0
                        tmp[:, :minidx] = v[:, :minidx]
                        setattr(raw, k, tmp)

        #do pileup correction on signals before any averaging is done
        pu.pileup_correction(raw, self.constants, self.corr_adjusts)

        #extract average dark counts from profiles and add dark counts to raw
        #dark count extracted from 'first_bins' or 'last_bins' as specified in constants
        if hasattr(raw, 'molecular_counts'):
            pu.extract_dark_count(
                raw, self.constants)  #relocated from above 20140805

            #extract cal pulse from light scattered as laser pulse exits system
            #and place in raw

            pu.extract_cal_pulse(raw, self.constants)

            if 0:
                import hsrl.simulation.rb_simulation as sim
                #rescale for new energy
                sim.rb_simulation(raw, self.constants)
                #redo dark count
                pu.extract_dark_count(raw, self.constants)
Esempio n. 14
0
def generate_ave_profiles(rs_mean,
                          qc_mask,
                          rs_Cxx,
                          rs_constants,
                          processing_defaults,
                          sel_telescope_dir,
                          corr_adjusts,
                          old_profiles=None):
    """create average of profiles dependent on telescope pointing direction
                     , telescope pointing may be 'all', 'zenith', or 'nadir' """

    try:
        [ntimes, nalts] = rs_mean.molecular_counts.shape
    except AttributeError:
        #raise RuntimeError, \
        #    "molecular counts missing"
        ntimes = 0
        nalts = 500
    #if rs_mean.times.shape[0] == 0:
    #raise RuntimeError, \
    #  "times missing"
    #    return old_profiles

    if qc_mask is not None and processing_defaults.get_value(
            'averaged_profiles', 'apply_mask'):
        #make mask array with NaN's for array elements where bit[0] of qc_mask==0
        #all other elements of mask = 1
        mask = (np.bitwise_and(qc_mask, 1)).astype('float')
        #mask is float to allow use of NaN values
        mask[mask == 0] = np.NaN

        print 'qc_mask applied to time averaged profiles vs altitude'
    else:
        #set mask == 1
        mask = None
        print 'qc_mask has not been applied to time averaged profiles'
    # most of the time we need to generate some/all of these profiles,
    # because users are plotting them
    # if this is too time-consuming, we'll generate just the ones necessary for the
    # user selected plots.

    indices = np.arange(rs_mean.times.shape[0])
    indices = (indices >= 0)  #boolean now
    if ntimes == 0 and len(indices) == 0:
        return old_profiles

    #average only those profiles occuring when locked to the i2 line
    #and not in cal_scan mode
    #op_mode = rs.rs_raw.op_mode.astype(int)
    #not_cal_scan = (~op_mode[:] & 32)/32
    #locked_to_i2 = ((op_mode[:] & 4)/4)

    #bits = np.zeros((len(rs.rs_raw.times),len(bit_value)+1))
    #opmode=rs.rs_raw.op_mode.astype('uint32')
    #for i in range(len(bit_value)):
    #   bits[:,i]=(i+1)*(opmode[:] & bit_value[i])/bit_value[i]

    #this dictionary maps the long name to the shorthand used with dark_counts variables
    # the keys are used here as a list of all possible channels too
    channel_shorthand = dict(molecular_counts='mol',
                             combined_hi_counts='c_hi',
                             combined_lo_counts='c_lo',
                             combined_wfov_counts='c_wfov',
                             combined_1064_counts='combined_1064',
                             molecular_wfov_counts='m_wfov',
                             molecular_i2a_counts='mol_i2a',
                             cross_pol_counts='c_pol',
                             combined_counts='comb')

    #,combined_1064_counts='combined_1064'
    #select desired telescope pointing direction for profiles

    if (rs_constants['installation'] == 'ground'
            or rs_constants['installation'] == 'shipborne'
            or sel_telescope_dir == 'all'):
        print 'Selecting all telescope pointing directions for profiles'

        if processing_defaults is not None and processing_defaults.get_value(
                'averaged_profiles', 'apply_mask'):
            indices[rs_mean.i2_locked <= 0.99] = False

    elif sel_telescope_dir == 'zenith':
        #if telescope pointing exists, limit to selected pointing direction
        if rs_mean.telescope_pointing.shape[0]:
            print 'Selecting only zenith pointing data for profiles'
            indices[rs_mean.telescope_pointing != 1] = False
            rs_mean.telescope_pointing = rs_mean.telescope_pointing[indices]
        else:
            print 'Warning--using all shots--no telescope pointing direction in data file'
    elif sel_telescope_dir == 'nadir':
        #if telescope pointing exists, limit to selected pointing direction
        if rs_mean.telescope_pointing.shape[0]:
            print 'Selecting only nadir pointing data for profiles'
            indices[rs_mean.telescope_pointing != 0] = False
        else:
            print 'Warning--using all shots--no telescope pointing direction in data file'
    else:
        raise RuntimeError, \
       "Unrecognized value '%s' for telescope pointing dir--valid(all, zenith,nadir)" \
       % (sel_telescope_dir)
    #this is meant to filter out chunks from rs_mean that are calibration intervals
    if hasattr(rs_mean, 'op_mode'):
        indices[np.bitwise_and(rs_mean.op_mode, 16) != 0] = False
    indices = np.arange(indices.shape[0])[indices]  #back to indexes
    profiles = hau.Time_Z_Group(can_append=False)
    profiles.hist = hau.Time_Z_Group()
    ft = None
    tc = 0
    #tv=0
    lt = None
    if old_profiles is not None:
        #total_seeded_shots=total_seeded_shots+profiles.hist.total_seeded_shots
        ft = old_profiles.hist.ft
        tc = old_profiles.hist.tc
        #tv=old_profiles.hist.tv
        lt = old_profiles.hist.lt
    if len(indices) > 0:
        if ft is None:
            ft = rs_mean.times[indices][0]
        lt = rs_mean.times[indices][-1] + timedelta(
            seconds=rs_mean.delta_t[indices][-1]
            if not np.isnan(rs_mean.delta_t[indices][-1]) else 0)
        for x in indices:
            if rs_mean.times[x] is not None:
                tc = tc + 1
                #tv=tv+(rs_mean.times[x]-ft).total_seconds()
    if tc > 0:
        profiles.times = hau.T_Array([ft])  #+timedelta(seconds=tv/tc), ])
        profiles.start = ft
        profiles.width = lt - ft
        profiles.delta_t = hau.T_Array([profiles.width.total_seconds()])
    else:
        profiles.times = hau.T_Array([])
        profiles.start = ft
        profiles.width = timedelta(seconds=0)
        profiles.delta_t = hau.T_Array([])
    profiles.start_time = ft
    profiles.end_time = lt
    profiles.hist.ft = ft
    profiles.hist.tc = tc
    #profiles.hist.tv=tv
    profiles.hist.lt = lt
    #profiles.hist.total_seeded_shots=total_seeded_shots
    if rs_mean is not None and hasattr(rs_mean, 'msl_altitudes'):
        profiles.msl_altitudes = rs_mean.msl_altitudes
    elif hasattr(old_profiles, 'msl_altitudes'):
        profiles.msl_altitudes = old_profiles.msl_altitudes

    if rs_mean is not None and hasattr(rs_mean, 'geo_corr'):
        profiles.geo_corr = rs_mean.geo_corr
    elif hasattr(old_profiles, 'geo_corr'):
        profiles.geo_corr = old_profiles.geo_corr

    if rs_constants['installation'] == 'airborne' and len(indices) > 0:
        if hasattr(rs_mean, 'GPS_MSL_Alt'):
            profiles.min_GPS_MSL_Alt = np.min(rs_mean.GPS_MSL_Alt[indices])
            profiles.mean_GPS_MSL_Alt = nanmean(rs_mean.GPS_MSL_Alt[indices])
            profiles.max_GPS_MSL_Alt = np.max(rs_mean.GPS_MSL_Alt[indices])
            profiles.telescope_pointing = np.zeros(1)
        if hasattr(rs_mean, 'telescope_pointing'):
            if (rs_mean.telescope_pointing > .95).all():
                profiles.telescope_pointing[0] = 1
            elif (rs_mean.telescope_pointing < .05).all():
                profiles.telescope_pointing[0] = 0
            else:
                profiles.telescope_pointing[0] = np.NaN

    accumulate(profiles,
               old_profiles,
               rs_mean,
               indices,
               'seeded_shots',
               pref='mean_',
               filler=hau.T_Array([0]))
    total_seeded_shots = profiles.mean_seeded_shots
    profiles.seeded_shots = total_seeded_shots.copy()
    print 'Total seeded shots for profile =', total_seeded_shots
    accumulate(profiles, old_profiles, rs_mean, indices,
               'transmitted_1064_energy', total_seeded_shots)
    accumulate(profiles, old_profiles, rs_mean, indices, 'transmitted_energy',
               total_seeded_shots)
    # create TZ_Array with time dimension of '1', so hsrl_inversion doesn't choke

    for chan in channel_shorthand.keys():
        accumulate(profiles, old_profiles, rs_mean, indices, chan,
                   total_seeded_shots, mask)

    #compute inverted profiles from mean count profiles
    if rs_Cxx is not None and hasattr(profiles, 'molecular_counts'):
        profiles.inv = cu.hsrl_inversion(profiles, rs_Cxx, rs_constants,
                                         corr_adjusts, processing_defaults)

    elif hasattr(old_profiles, 'inv'):
        profiles.inv = old_profiles.inv

    #adds raw_color_ratio to profiles.inv
    if hasattr(profiles,'inv') and hasattr(profiles,'combined_counts') \
               and hasattr(profiles,'combined_1064_counts'):
        if 0:
            print 'profiles'
            print dir(profiles)
            print 'inv'
            print dir(profiles.inv)

            import matplotlib.pylab as plt
            plt.figure(3333)
            plt.plot(
                profiles.combined_counts[0, :], profiles.inv.msl_altitudes,
                'r', profiles.cross_pol_counts[0, :],
                profiles.inv.msl_altitudes, 'g',
                profiles.combined_counts[0, :] +
                rs_constants['combined_to_cross_pol_gain_ratio'] *
                profiles.cross_pol_counts[0, :], 'c',
                profiles.combined_1064_counts[0, :],
                profiles.inv.msl_altitudes, 'k')
            plt.grid(True)
            ax = plt.gca()
            ax.set_xscale('log')

        profiles.inv.raw_color_ratio = cu.compute_raw_color_ratio(
            profiles, rs_Cxx, rs_constants, corr_adjusts)

        if 0:
            plt.figure(3334)
            plt.plot(profiles.inv.raw_color_ratio[0, :],
                     profiles.inv.msl_altitudes, 'c')
            plt.grid(True)
            ax = plt.gca()
            ax.set_xscale('log')

    #generate klett profiles if requested
    if processing_defaults is not None and processing_defaults.get_value(
            'klett', 'enable'):
        ref_altitude = processing_defaults.get_value('klett',
                                                     'ref_altitude') * 1000.0
        if ref_altitude < profiles.inv.msl_altitudes[0] \
                        or ref_altitude > profiles.inv.msl_altitudes[-1] :
            print
            print
            print 'WARNING---klett ref altitutde=', ref_altitude, ' is not in requested altitudes'
            print 'no klett profile retrieval attempted '
            print
        else:
            if hasattr(profiles, 'combined_1064_counts'):
                profiles.inv.beta_a_1064_backscat_klett = lu.compute_klett_backscatter(
                    profiles.combined_1064_counts,
                    profiles.inv.beta_r_backscat / 16.0,
                    profiles.inv.msl_altitudes,
                    processing_defaults.get_value('klett', 'lidar_ratio_532'),
                    ref_altitude)
            if hasattr(profiles, 'combined_counts'):
                profiles.inv.beta_a_532_backscat_klett = lu.compute_klett_backscatter(
                    profiles.combined_counts, profiles.inv.beta_r_backscat,
                    profiles.msl_altitudes,
                    processing_defaults.get_value('klett', 'lidar_ratio_532'),
                    ref_altitude)

    for chan in channel_shorthand.keys():
        accumulate(profiles, old_profiles, rs_mean, indices, 'var_raw_' + chan,
                   total_seeded_shots, mask)
        accumulate(profiles, old_profiles, rs_mean, indices, 'raw_' + chan,
                   total_seeded_shots, mask)

    if processing_defaults is not None and processing_defaults.get_value(
            'compute_stats', 'enable'):

        #kludge
        pf = hau.Time_Z_Group()
        for chan in channel_shorthand.keys():
            if hasattr(profiles, 'sum_var_raw_' + chan):
                setattr(pf, chan, getattr(profiles, 'sum_' + chan))
                setattr(pf, 'var_raw_' + chan,
                        getattr(profiles, 'sum_var_raw_' + chan))

        if rs_Cxx is not None and hasattr(profiles, 'inv'):
            pu.compute_photon_statistics(pf, profiles.inv, rs_Cxx,
                                         rs_constants)

    if rs_Cxx is not None and hasattr(profiles, 'inv'):
        [profiles.inv.optical_depth, profiles.inv.optical_depth_aerosol
               , profiles.inv.mol_norm_index,profiles.inv.mol_ref_aod] = \
                   lu.compute_optical_depth(profiles.inv.Nm \
                   ,profiles.inv.beta_r_backscat\
                   ,profiles.msl_altitudes\
                   ,processing_defaults\
                   ,rs_constants
                   ,profiles.inv.telescope_pointing if hasattr(profiles.inv,'telescope_pointing') else None)

        #add 1064 aerosol backscatter and color ratio to profiles
        if hasattr(profiles, 'combined_counts') and hasattr(
                profiles, 'combined_1064_counts'):
            cu.compute_1064_aerosol_backscatter(profiles, profiles.inv,
                                                processing_defaults,
                                                rs_constants, corr_adjusts)
            cu.compute_color_ratio(profiles.inv)

        if processing_defaults.get_value('extinction_processing',
                                         'filter_type') == 'savitzky_golay':
            od_threshhold = processing_defaults.get_value(
                'extinction_processing', 'od_threshhold')
            z_window_width = processing_defaults.get_value(
                'extinction_processing', 'alt_window_length')
            order = processing_defaults.get_value('extinction_processing',
                                                  'polynomial_order')

            min_filter_alt = processing_defaults.get_value(
                'extinction_processing', 'min_alt')
            if min_filter_alt < profiles.msl_altitudes[0]:
                min_filter_alt = profiles.msl_altitudes[0]

            adaptive = processing_defaults.get_value('extinction_processing',
                                                     'adaptive')

            t_window_width = 0.0
            if profiles.inv.times.size == 0:
                pass
            elif hasattr(rs_mean, 'telescope_pointing'):
                profiles.inv = filtered_extinction(
                    profiles.inv, profiles.msl_altitudes, min_filter_alt,
                    od_threshhold, t_window_width, z_window_width, order,
                    adaptive, rs_mean.telescope_pointing)
            else:
                profiles.inv = filtered_extinction(
                    profiles.inv, profiles.msl_altitudes, min_filter_alt,
                    od_threshhold, t_window_width, z_window_width, order,
                    adaptive)

        else:
            bin_delta = processing_defaults.get_value('extinction_processing',
                                                      'bin_delta')
            bin_delta = int(bin_delta)
            pts_to_ave = processing_defaults.get_value('extinction_processing',
                                                       'ext_pts_to_ave')

            if hasattr(rs_mean, 'telescope_pointing'):
                profiles.inv = pu.compute_extinction(
                    profiles.inv, profiles.msl_altitudes, bin_delta,
                    pts_to_ave, rs_mean.telescope_pointing)
            else:
                profiles.inv = pu.compute_extinction(profiles.inv,
                                                     profiles.msl_altitudes,
                                                     bin_delta, pts_to_ave)

    # raw profiles--ave photons/bin/laser_pulse
    #for chan in channel_shorthand.keys():
    #    accumulate(profiles,old_profiles,rs_raw,raw_indices,chan,raw_total_seeded_shots,pref='raw_')

    if 0:
        import matplotlib.pylab as plt
        plt.figure(898989)
        plt.plot(np.nanmean(rs_mean.combined_1064_counts,
                            0), rs_mean.msl_altitudes, 'k',
                 np.nanmean(rs_mean.combined_counts, 0), rs_mean.msl_altitudes,
                 'r')
        ax = plt.gca()
        ax.set_xscale('log')

    # dark corrected raw profiles
    for chan, shorthand in channel_shorthand.items():
        dcchan = shorthand + '_dark_counts'
        correc = 'dc_' + chan
        source = 'raw_' + chan
        if accumulate(profiles,
                      old_profiles,
                      rs_mean,
                      indices,
                      dcchan,
                      total_seeded_shots,
                      extravars=[correc]):
            if hasattr(profiles, source):
                #print 'Applying dark count from raw frame to raw counts*******'
                setattr(profiles, correc,
                        getattr(profiles, source) - getattr(profiles, dcchan))
            elif hasattr(old_profiles, correc):
                setattr(profiles, correc, getattr(old_profiles, correc))
                print 'Copying corrected counts because source doesn\'t exist???? WARNING ***'
                if hasattr(old_profiles, source):
                    setattr(profiles, source, getattr(old_profiles, source))
                else:
                    raise RuntimeError(
                        'Something is wrong with input channels. BUG!')

    if processing_defaults is not None:
        profiles.mol_norm_alt = processing_defaults.get_value(
            'mol_norm_alt', 'meters')
    return profiles
Esempio n. 15
0
def compute_optical_depth(Nm,
                          beta_r_backscat,
                          msl_altitudes,
                          processing_defaults,
                          constants,
                          telescope_pointing=None):
    """uses Nm and beta_r_backscat to compute the optical
       depth profile with the optical depth profile set to zero at
       an alititude given by 'mol_norm_alt' which must provided
       in meters.
       returns:
           od = total optical depth
           od_aerosol = aerosol optical depth
           mol_norm_index = bin number at which optical depth is normalized to zero
           mol_ref_aod = estimated optical depth at altitudes[mol_norm_index]"""

    #mol_od_between_lidar_and_norm_alt = 0.0
    mol_norm_alt = processing_defaults.get_value('mol_norm_alt', 'meters')
    mol_norm_index = len(msl_altitudes[msl_altitudes <= mol_norm_alt])
    if processing_defaults.enabled('molecular_smooth'):
        window_offset = np.float(
            processing_defaults.get_value('molecular_smooth',
                                          'window_length')) / 2.0
    else:
        window_offset = 0.0
    if ('installation' not in constants or constants['installation'] == 'ground'
               or constants['installation'] == 'shipborne')\
               and mol_norm_alt < (constants['lidar_altitude']+150.0 + window_offset):
        mol_norm_alt = constants['lidar_altitude'] + 150. + window_offset
        mol_norm_index = len(msl_altitudes[msl_altitudes <= mol_norm_alt])
        lidar_alt_index = len(
            msl_altitudes[msl_altitudes <= constants['lidar_altitude']])

        print 'requested molecular norm altitude too low--  reset at ', mol_norm_alt, ' m'
        processing_defaults.set_value('mol_norm_alt', 'meters', mol_norm_alt)
    od = hau.TZ_Array(np.NaN * np.zeros_like(Nm))
    indices = np.arange(len(msl_altitudes))

    if processing_defaults.enabled('molecular_smooth'):
        window = processing_defaults.get_value('molecular_smooth',
                                               'window_length')
        if mol_norm_alt < constants['lidar_altitude'] + 150 + window / 2.0:
            mol_norm_alt = constants['lidar_altitude'] + 150 + window / 2.0
            mol_norm_index = len(msl_altitudes[msl_altitudes <= mol_norm_alt])
            print ' '
            print 'Warning:******requested mol_normalization_altitude not within acquired data'
            print '      setting normalization altitude = ', msl_altitudes[
                mol_norm_index]
            print ' '
    if mol_norm_index >= len(msl_altitudes):
        mol_norm_index = len(msl_altitudes) - 1
        print ' '
        print 'Warning:******requested normalization altitude higher than requested range'
        print '     setting normalization altitude =', msl_altitudes[
            mol_norm_index]

    if mol_norm_index <= Nm.shape[1]:
        if 0:
            print
            print
            print 'alts', msl_altitudes.shape
            print 'beta_r', beta_r_backscat.shape
            print 'Nm', Nm.shape
            print 'norm_alt', msl_altitudes[mol_norm_index]
            print
            import matplotlib.pylab as plt
            plt.figure(2000)
            plt.plot(
                msl_altitudes,
                np.nanmean(Nm, 0) * beta_r_backscat[mol_norm_index] /
                np.nanmean(Nm[:, mol_norm_index], 0), msl_altitudes,
                beta_r_backscat)
            plt.ylabel('altitude')
            ax = plt.gca()
            ax.set_yscale('log')
            plt.grid(True)

        time_vec = np.ones_like(Nm[:, 0])
        bin_vec = np.ones_like(beta_r_backscat)
        beta_r_array = (time_vec[:, np.newaxis] *
                        beta_r_backscat[np.newaxis, :])
        Nm_norm_array = Nm[:, mol_norm_index]
        Nm_norm_array = Nm_norm_array[:, np.newaxis] * bin_vec[np.newaxis, :]
        prelog_od = Nm[:, :] / (
            beta_r_array * Nm_norm_array) * beta_r_backscat[mol_norm_index]
        prelog_od[np.isnan(prelog_od)] = 0
        prelog_od[prelog_od <= 0.0] = np.NaN
        od = -0.5 * np.log(prelog_od)
        if telescope_pointing is not None:
            tmpod = od
            od = od.copy()
            od[:, :] = np.NAN
            od[telescope_pointing < 0.1, :] = -1.0 * tmpod[
                telescope_pointing < 0.1, :]
            od[telescope_pointing > 0.9, :] = tmpod[
                telescope_pointing > 0.9, :]
        dz = msl_altitudes[1] - msl_altitudes[0]
        mol_od = 8.0 * np.pi * np.cumsum(beta_r_backscat) * dz / 3.0
        mol_od = mol_od - mol_od[mol_norm_index]

        mol_od_array = time_vec[:, np.newaxis] * mol_od[np.newaxis, :]
        od_aerosol = od - mol_od_array
    else:
        od[:, :] = np.NaN
        od_aerosol[:, :] = od.copy()
        print ' '
        print '*******requested od_normalization altitude not within acquired data'
        print ' '

    #compute optical depth below mol_norm_index
    #extrapolate from just above mol_norm_index to estimate unmeasured optical depth
    if dz >= 100:
        n_bins = 1
    else:
        n_bins = int(100.0 / dz + 1)
    norm_alt_range = msl_altitudes[mol_norm_index] - constants['lidar_altitude']
    mol_ref_aod = (od[:, mol_norm_index + n_bins] -
                   od[:, mol_norm_index]) * (norm_alt_range / (dz * n_bins))
    mol_ref_aod -= (mol_od_array[:,mol_norm_index + n_bins] - mol_od_array[:,mol_norm_index])\
                      *norm_alt_range/(dz*n_bins)
    mol_ref_aod = hau.T_Array(mol_ref_aod)
    od = hau.TZ_Array(od)
    od_aerosol = hau.TZ_Array(od_aerosol)
    return (od, od_aerosol, mol_norm_index, mol_ref_aod)
Esempio n. 16
0
def process_spheroid_particle(rs_inv,
                              rs_radar,
                              particle_parameters,
                              lambda_radar,
                              entire_frame,
                              sounding=None,
                              size_dist=None):
    """
            process_spheroid_particle(rs_inv,rs_radar,particle_parameters,lambda_radar,entire_frame,
                              sounding=None,p180_water=None,size_dist=None):
            generate and return the particle measurements based on a given hsrl inverted data,
            radar (and its lambda), and particle parameters dictionary
            """

    #create timez group and add heights
    rs_particle = hau.Time_Z_Group(rs_inv.times.copy(),
                                   timevarname='times',
                                   altname='heights')
    setattr(rs_particle, 'heights', rs_inv.msl_altitudes.copy())
    setattr(rs_particle, 'delta_t', rs_inv.delta_t.copy())

    #remove points where lidar signal is noise dominated by setting to
    #very small value.
    #clipped_beta_a_back=rs_inv.beta_a_backscat.copy()
    #if 0: #if hasattr(rs_inv,'std_beta_a_backscat'):
    #    clipped_beta_a_back[clipped_beta_a_back<(2*rs_inv.std_beta_a_backscat)]=-np.inf
    #else:
    #    print 'No std_beta_a_backscat statistics to filter particle measurements'
    #clipped_beta_a_back[np.logical_not(np.isfinite(rs_inv.beta_a_backscat))]=-np.inf;

    rs_particle.q_backscatter = np.NaN * np.zeros_like(rs_inv.beta_a_backscat)
    rs_particle.phase = np.zeros_like(rs_inv.beta_a_backscat)
    rs_particle.phase[
        rs_inv.linear_depol > particle_parameters['h2o_depol_threshold']] = 1
    rs_particle.phase[np.isnan(rs_inv.beta_a_backscat)] = np.NaN

    #set aspect ratio parameter for ice filled bins
    rs_particle.zeta = np.ones(rs_inv.beta_a_backscat.shape)
    rs_particle.zeta[rs_inv.linear_depol > particle_parameters['h2o_depol_threshold']] \
                  = particle_parameters['zeta']

    print 'Extinction cross section for particle size calculations derived from ' \
                ,particle_parameters['ext_source']
    print 'Extinction due nonprecipitating aerosols = '\
                ,particle_parameters['background_aerosol_bs'],'1/(m sr)'

    #store the mask field with the particle info
    rs_particle.qc_mask = rs_inv.qc_mask.copy()

    clipped_beta_a_backscat = rs_inv.beta_a_backscat.copy()
    copy_radar_backscatter = rs_radar.Backscatter.copy()
    #clipped_beta_a_backscat = copy_beta_a.copy()
    clipped_beta_a_backscat = clipped_beta_a_backscat \
              - particle_parameters['background_aerosol_bs']
    clipped_beta_a_backscat[clipped_beta_a_backscat < 0] = np.NaN

    #create an empty mode_diameter array
    rs_particle.mode_diameter = np.zeros_like(rs_inv.beta_a_backscat)

    #create an empty array for extinction--used only for particle calculations
    #bs_ratio_to_dmode will return extinction cross section in clipped_beta_a
    clipped_beta_a = np.NaN * np.zeros_like(rs_inv.beta_a_backscat)

    #water
    #compute mode diameter, extinction cross section, and backscatter efficeincy
    #from radar and lidar backscatter cross sections using mie theory and assumed
    #size distribution to predict mode diameter and q_backscatter for points
    #identified as water.
    if particle_parameters['radar_model'] == "Mie":
        rs_particle.mode_diameter, clipped_beta_a, rs_particle.q_backscatter\
                  ,rs_particle.dstar \
              = size_dist.dmode_from_radar_lidar_mie(copy_radar_backscatter\
                  ,clipped_beta_a_backscat)

    else:
        #use only Rayliegh approx solution--particle_parameter['radar_model']=="Rayleigh"
        #mode diameter is computed for all points assuming everything is water
        #subsequent calculation will replace ice phase points.
        if particle_parameters['ext_source'] == 'ext':
            clipped_beta_a = rs_inv.extinction_aerosol.copy()
        elif particle_parameters['ext_source'] == 'bs/p180':
            clipped_beta_a = clipped_beta_a_backscat / particle_parameters[
                'p180_water']
        else:
            print 'particle_parameters=', particle_parameters[
                'ext_source'], ' not supported'
            print 'in spheroid_particle_processing'
            print j
        clipped_beta_a[np.isnan(clipped_beta_a_backscat)] = np.NaN
        phase = np.zeros_like(rs_inv.beta_a_backscat)
        zeta = np.ones_like(rs_inv.beta_a_backscat)

        rs_particle.mode_diameter = size_dist.dmode_from_lidar_radar_rayleigh(
            rs_particle.mode_diameter, clipped_beta_a, copy_radar_backscatter,
            zeta, phase)

    #ice
    #compute extinction cross section for ice points using backscatter phase function
    clipped_beta_a[rs_particle.phase==1] = \
        clipped_beta_a_backscat[rs_particle.phase==1]/particle_parameters['p180_ice']
    zeta = np.zeros_like(clipped_beta_a)
    zeta[rs_particle.phase == 1] = particle_parameters['zeta']

    #derive mode_diameter directly from radar backscatter and lidar extinction
    #cross sections for parts of image populated by ice
    rs_particle.mode_diameter[rs_particle.phase==1] = size_dist.dmode_from_lidar_radar_rayleigh(\
        rs_particle.mode_diameter[rs_particle.phase==1] \
        ,clipped_beta_a[rs_particle.phase==1],copy_radar_backscatter[rs_particle.phase==1]\
        ,zeta[rs_particle.phase==1],rs_particle.phase[rs_particle.phase==1])

    #creates effective_diameter_prime array from mode diameter
    rs_particle.effective_diameter_prime = \
      size_dist.deff_prime(rs_particle.mode_diameter,rs_particle.phase,zeta)

    rs_particle.effective_diameter = size_dist.eff_diameter(\
                          rs_particle.mode_diameter,rs_particle.phase)

    rs_particle.mean_diameter = size_dist.mean_diameter(\
                          rs_particle.mode_diameter,rs_particle.phase)

    #compute liquid water content for bins with phase == 0
    #bins with phase > 0 will return with NaN's
    if particle_parameters['radar_model'] == "Mie":
        rs_particle.LWC = su.liquid_water_content_mie(
            rs_particle.effective_diameter, clipped_beta_a,
            rs_particle.q_backscatter)
        rs_particle.p180_extinction = rs_inv.beta_a_backscat / rs_particle.q_backscatter

    else:
        if particle_parameters['ext_source'] == 'bs/p180':
            rs_particle.extinction_aerosol = rs_inv.beta_a_backscat / particle_parameters[
                'p180_water']
            clipped_beta_a = rs_particle.extinction_aerosol.copy()
        else:
            clipped_beta_a = rs_inv.extinction_aerosol.copy()
        clipped_beta_a[np.isnan(clipped_beta_a_backscat)] = np.NaN
        rs_particle.LWC = np.NaN * np.zeros_like(
            rs_particle.effective_diameter)
        su.liquid_water_content_ext_approx(rs_particle.LWC,
                                           rs_particle.effective_diameter,
                                           clipped_beta_a, rs_particle.phase)
        rs_particle.p180_extinction = rs_inv.beta_a_backscat / particle_parameters[
            'p180_water']

    rs_particle.extinction_aerosol = rs_inv.extinction_aerosol.copy()
    #compute ice water water content for bins with phase > 0 (kg/m^3)
    #return in LWC array bins with phase > 0
    su.ice_water_content(rs_particle.LWC, rs_particle.effective_diameter,
                         clipped_beta_a, rs_particle.phase)

    if hasattr(rs_radar, 'vertically_averaged_doppler'):
        rs_radar.raw_MeanDopplerVelocity = rs_radar.MeanDopplerVelocity.copy()
        motion_correction = np.transpose(rs_radar.vertically_averaged_doppler\
                            *np.transpose(np.ones_like(rs_radar.MeanDopplerVelocity)))
        rs_radar.MeanDopplerVelocity -= motion_correction

    if sounding != None:
        s_time = datetime.utcnow()

        rs_particle.rw_fall_velocity,rs_particle.mw_fall_velocity \
             ,rs_particle.model_spectral_width,rs_particle.nw_fall_velocity\
             = su.weighted_fall_velocity(
             rs_particle.mode_diameter
            ,particle_parameters
            ,rs_particle.zeta
            ,sounding.temps
            ,sounding.pressures
            ,rs_particle.phase,size_dist)
        print 'time for fall_velocity = ', datetime.utcnow() - s_time

    # compute precip rate (m/s) #rain_rate = 1/density
    # (m^3/kg) * LWC (kg/m^3) * fall_velocity (m/s) #using
    # Doppler velocity rs_particle.hsrl_radar_dv_precip_rate =
    # 0.001 * rs_particle.LWC * rs_radar.MeanDopplerVelocity

    #using raw Doppler to give precip rate in m/s
    rs_particle.hsrl_radar_dv_precip_rate = 0.001 * rs_particle.LWC * rs_radar.MeanDopplerVelocity
    #remove points with Doppler folding
    rs_particle.hsrl_radar_dv_precip_rate[
        rs_radar.MeanDopplerVelocity < -2.0] = np.NaN

    if sounding != None:
        #using modeled mass weighted velocity and dividing by the density of water,
        #                              1000 kg/m^3, to give precip_rate in m/s
        rs_particle.hsrl_radar_precip_rate = 0.001 * rs_particle.LWC * rs_particle.mw_fall_velocity

    #retype all these fields to a proper TZ_Array
    #for f in ['effective_diameter_prime']:
    for f in vars(rs_particle).keys():
        v = getattr(rs_particle, f)
        if isinstance(v, hau.Z_Array):
            continue  #properly typed. continue
        elif isinstance(v, np.ndarray):
            if len(v.shape) == 2:
                setattr(rs_particle, f, hau.TZ_Array(v))
            elif len(v.shape) == 1:
                print '1 Dimensional Variable ' + f + ' will be changed to a T_Array. FIXME!!!!'
                setattr(rs_particle, f, hau.T_Array(v))
            else:
                raise RuntimeError(
                    "I don't know what to type particle array " + f +
                    " with dimensions " + repr(v.shape))
        else:
            pass  #not an array type. should be safe to ignore
    """
            #compute fitting error of computed radar weighted fall velocity
            #to measured Doppler veleocity.
            temp = rs_radar.Backscatter.copy()
            temp[np.isnan(rs_radar.Backscatter)]=0.0
            temp[rs_inv.msl_altitudes>400]=0.0
            fitting_error = np.sqrt(nanmean((rs_particle.rw_fall_velocity[temp >1e-9] \
                                  -  rs_radar.MeanDopplerVelocity[temp >1e-9])**2))
            print
            print rs_radar.times[0],'  --->  ' ,rs_radar.times[-1]
            print 'fitting_error (m/s)= ',fitting_error
            print
            """

    'rs_particle--spher'
    print dir(rs_particle)
    return rs_particle
Esempio n. 17
0
    def profile(self, request_time, request_lat, request_long, offset=0):
        """returns a profile of temperature,pressure, dew_point, frost point at 
        time, lat, and long extracted from the sounding_archive. If request_lat and
        request_long are empty they are ignored
        request_time = python datetime for reqested sounding
        request_lat  = requested latitude for sounding--ignored if []
        request_lon  = requested longitude for sounding--ignored if []"""

        if self.soundings == None:
            return None

        print 'sounding_type= ', self.sounding_type, request_time
        if self.sounding_type == 'NOAA raob':
            temp_sounding = hau.Time_Z_Group()

            #find correct sounding profile out of archive file
            sounding = hau.selectByTime(self.soundings,
                                        request_time,
                                        offset=offset)
            if sounding is None:
                return None

            sounding.sounding_type = self.sounding_type
            sounding.sounding_id = sounding.station_id
            sounding.latitude = sounding.stalat
            sounding.longitude = sounding.stalong
            sounding.frost_points = cal_frost_point(sounding.dew_points)

            temp_sounding.type = self.sounding_type
            temp_sounding.times = sounding.times
            temp_sounding.altitudes = hau.Z_Array(sounding.altitudes)
            temp_sounding.temps = hau.TZ_Array(sounding.temps[np.newaxis, :])
            temp_sounding.pressures = hau.TZ_Array(
                sounding.pressures[np.newaxis, :])
            temp_sounding.dew_pts = hau.TZ_Array(
                sounding.dew_points[np.newaxis, :])
            temp_sounding.frost_pts = hau.TZ_Array(
                sounding.frost_points[np.newaxis, :])
            temp_sounding.wind_dir = hau.TZ_Array(
                sounding.wind_dir[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.station_id = sounding.station_id
            temp_sounding.top = sounding.top

            sounding.times = sounding.times[0]
            #sounding.expire_time=sounding.expire_time[0]

        elif self.sounding_type == "time curtain" \
                  and self.sounding_id == "raqms":

            temp_sounding = hau.Time_Z_Group()
            sounding = raqms.select_raqms_profile(self.soundings,request_time \
                       ,self.requested_altitudes,offset=offset)
            #                      ,self.max_alt,self.alt_res)
            if sounding == None:
                return None
            sounding.station_id = self.sounding_id

            temp_sounding.type = self.sounding_type
            temp_sounding.times = sounding.times
            temp_sounding.latitude = hau.T_Array(sounding.latitude)
            temp_sounding.longitude = hau.T_Array(sounding.longitude)
            temp_sounding.altitudes = hau.Z_Array(sounding.altitudes)
            temp_sounding.temps = hau.TZ_Array(sounding.temps[np.newaxis, :])
            temp_sounding.pressures = hau.TZ_Array(
                sounding.pressures[np.newaxis, :])
            temp_sounding.dew_pts = hau.TZ_Array(
                sounding.dew_points[np.newaxis, :])
            temp_sounding.frost_pts = hau.TZ_Array(
                sounding.frost_points[np.newaxis, :])
            temp_sounding.wind_dir = hau.TZ_Array(
                sounding.wind_dir[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.ext_total = hau.TZ_Array(
                sounding.ext_total[np.newaxis, :])
            temp_sounding.ext_salt = hau.TZ_Array(
                sounding.ext_salt[np.newaxis, :])
            temp_sounding.ext_dust = hau.TZ_Array(
                sounding.ext_dust[np.newaxis, :])
            temp_sounding.wind_spd = hau.TZ_Array(
                sounding.wind_spd[np.newaxis, :])
            temp_sounding.station_id = sounding.station_id
            temp_sounding.top = sounding.top

            sounding.times = sounding.times[0]

            #set up time to read in new sounding (in datetime)
            #sounding.expire_time = sounding.times+timedelta(seconds=5*60)
            #expire time can not be less then request time--raqms file must be missing soundings
            #set expire_time 5 minutes after request time
            #if sounding.expire_time <= request_time:
            #    print "****Warning----missing raqms sounding at ",request_time
            #    print "               using sounding from ",sounding.times
            #    sounding.expire_time = request_time + timedelta(seconds=5*60)

        #remove any negative pressure values
        temp_sounding.pressures[temp_sounding.pressures < 0] = 0.0
        #remove NaN's from pressure and temperature values
        temp_sounding.pressures[np.isnan(temp_sounding.pressures)] = 0.0
        temp_sounding.temps[np.isnan(temp_sounding.temps)] = 1.0
        return sounding
Esempio n. 18
0
class dpl_hsrl_narr(dplkit.role.narrator.aNarrator):
    """ DPL HSRL Narrator Object. should only be created by dpl_hsrl object

        :param params: parameters dictionary
        :param cal_narr: calibration framestream narration object
        :param timesource: time axis generation source (could be synthetic or from another stream)
        :param rawsrc: raw data source. if not provided, will create a lot of it here
        :param lib: raw hsrl reading library object only used if rawsrc is not given
        :param zoo: raw hsrl zookeeper object only used if rawsrc is not given

        exposed attributes:
        - hsrl_cal_stream (calibration stream, can be used for parallel stream collection)

        exposed field type in chain:
        
        - hsrl.dpl.calibration.dpl_calibration_narr
    """
    #def get_process_control(self):
    #    return self.cal_narr.get_process_control()

    @property
    def hsrl_cal_stream(self):
        return self.cal_narr

    def __init__(self,params,cal_narr,timesource,rawsrc=None,compute_stats=0):
        super(dpl_hsrl_narr,self).__init__(None,cal_narr) #FIXME replace url with some manner of unique path
        #import dpl_calibration
        #self.dpl_calibration=dpl_calibration
        #self.provides=libr.provides
        self.compute_stats=compute_stats
        self.rawsrc=rawsrc
        self.params=params
        self.timesource=timesource
        self.cal_narr=cal_narr

    def __repr__(self):
        return 'DPL HSRL Framestream Narrator (%s)' % (self.params)

    def read(self):
        """ main read generator
        """
        import hsrl.data_stream.processing_utilities as pu
        params=self.params
        firsttimeever=None
        intervalTime=None
        intervalEnd=None
        rawsrc=iter(self.rawsrc)
        #if params['timeres']!=None and params['timeres']<datetime.timedelta(seconds=float(self.cal_narr.hsrl_constants['integration_time'])):
        #    params['timeres']=None #pure native
        end_time_datetime=params['finalTime']
        #timemodoffset=time_mod(params['realStartTime'],params['timeres'])
        noframe='noframe'
        fullrange=False #if this is true, it will pad the start with any missing times.

        remainder=None
        cdf_to_hsrl = None
        preprocess_ave = None
        requested_times=None
        instrument=self.hsrl_instrument
        intcount=0
        rs_mem = None
        #rs=None
        timesource=TimeSource.CompoundTimeGenerator(self.timesource) if self.timesource is not None else None
        
        for calv in self.cal_narr:
            if intervalTime is None:
                firsttimeever=calv['chunk_start_time']
                intervalTime=calv['chunk_start_time']
                intervalEnd=intervalTime
            chunk_end_to_use=calv['chunk_end_time']#-time_mod(calv['chunk_end_time'],params['timeres'],timemodoffset)
            #print 'old end',calv['chunk_end_time'],'vs new end',chunk_end_to_use,'mod base',params['timeres']
            if calv['chunk_end_time']==calv['chunk_start_time'] and end_time_datetime is None:
                if params['block_when_out_of_data']:
                    if 'timeres' not in params or params['timeres'] is None:
                        sleep(calv['rs_constants']['integration_time'])
                    else:
                        sleep(params['timeres'].total_seconds())
                else:
                    yield None #this is done to get out of here, and not get stuck in a tight loop
                continue
            while intervalTime<chunk_end_to_use:
                integration_time = calv['rs_constants']['integration_time']
                doPresample=True
                #END init section
                if intervalEnd>chunk_end_to_use:
                    print 'Breaking calibration on endtime. proc ',intervalEnd,chunk_end_to_use,end_time_datetime
                    break
                else:
                    intervalEnd=chunk_end_to_use
                #print ' Absolute window is ', actualStartTime, ' to ' , params['finalTime']
                print ' prior window was ', intervalTime, ' to ' , intervalEnd, 'terminating at ',chunk_end_to_use,rs_mem
                if True:#requested_times==None or requested_times.shape[0]>0:
                    try:
                            try:
                                while rawsrc is not None:
                                    if rs_mem is not None and rs_mem.times[0]>=chunk_end_to_use  and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                        break
                                    tmp=rawsrc.next()
                                    if hasattr(tmp,'rs_raw'):
                                        if rs_mem is not None:
                                            rs_mem.append(tmp.rs_raw)
                                        else:
                                            rs_mem=copy.deepcopy(tmp.rs_raw)
                                    if rs_mem is not None and rs_mem.times.shape>0:
                                        break
                                    else:
                                        rs_mem=None
                            except StopIteration:
                                print 'Raw HSRL stream is ended'
                                rawsrc=None
                            if rs_mem is None or rs_mem.times.size==0:
                                rs_mem=None
                            elif rs_mem.times[0]>=chunk_end_to_use and (end_time_datetime is None or chunk_end_to_use<end_time_datetime):
                                print 'HSRL RAW skipping to next cal because of times',intervalTime,chunk_end_to_use,end_time_datetime,rs_mem.times[0]
                                break
                            else:
                                intervalEnd=rs_mem.times[-1]
                            print 'read in raw frame to mean',rs_mem,remainder
                            if rawsrc is None:
                                intervalEnd=chunk_end_to_use

                            print 'trimmed ',rs_mem
                            if timesource is not None:
                                if timesource.isDone:
                                    break
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet
                                inclusive=rawsrc is None and (end_time_datetime!=None and intervalEnd>=end_time_datetime)
                                timevals=hau.T_Array(timesource.getBinsFor(starttime=intervalTime,endtime=intervalEnd,inclusive=inclusive))#,inclusive=(end_time_datetime!=None and intervalEnd>=end_time_datetime)))
                                print 'Now %i intervals %s' % (timevals.size-1, "INC" if inclusive else "NOINC"),intervalTime,intervalEnd
                            elif 'timeres' in params and params['timeres'] is not None:
                                tmp=intervalTime
                                useMungedTimes=False #this is in case this code will need to start shifting bins (which assumes resolutions, and implies start and end of intervales, rather than explicitly to avoid overlap or underlap
                                usePrebinnedTimes=True #this goes in the other direction of munged times to say provided times are timebin borders, and the last time is the end of the last, not included, and thus expected to be the first bin on the next window. thats the fully explicit way to describe the bins in code, but standards in describing bins to the user (a single time when the bin spans a range) is not defined yet

                                timevals=[]
                                timevals.append(tmp)
                                while tmp<intervalEnd:# using python datetimes for making the axis is much much more precise than matplotlib floats.
                                        #print tmp, ' = ' , du.date2num(tmp) , ' = ' , (tmp-self.actualStartTime).total_seconds()
                                        tmp+=params['timeres']
                                        timevals.append(tmp)
                                        
                                #intervalEnd=tmp
                                intcount+=len(timevals)
                                if usePrebinnedTimes:
                                    intcount-=1
                                print 'Now %i intervals' % (intcount)
                                timevals=hau.T_Array(timevals)
                            else:

                                print 'Using Native timing'
                                timevals=None

                            print ' new window is ', intervalTime, ' to ' , intervalEnd

                            requested_times=timevals
                           
                            requested_chunk_times= requested_times#requested_times[requested_times >=intervalTime]

                            if requested_chunk_times is not None and len(requested_chunk_times)<2 and rawsrc is not None:
                                #if rawsrc is not None:
                                print "not enough time to process"
                                continue
                            elif rawsrc is None and rs_mem is None and remainder is None:
                                #chunk_end_to_use=intervalTime
                                #continue
                                #print ''
                                break
                 
          
                            rs_chunk,remainder = pu.process_data( instrument, intervalTime, intervalEnd
                                ,params['min_alt'], params['max_alt'], requested_chunk_times
                                , rs_mem, calv['rs_Cxx'], calv['rs_constants'], calv['rs_cal']
                                , None , self.cal_narr.hsrl_corr_adjusts, self.cal_narr.hsrl_process_control
                                , self.compute_stats,remainder=remainder)
                            rs_mem=None
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size==0:
                                rs_chunk=None
                            if rs_chunk is None and rawsrc is None:
                                break
                           #print rs_chunk
                            if rs_chunk is not None and hasattr(rs_chunk,'rs_mean') and rs_chunk.rs_mean is not None and rs_chunk.rs_mean.times.size>0:
                                if fullrange and requested_chunk_times is not None:
                                    v=hau.Time_Z_Group(like=rs_chunk.rs_mean)
                                    v.times=hau.T_Array(requested_chunk_times[requested_chunk_times<rs_chunk.rs_mean.times[0]])
                                    if v.times.size>0:
                                        rs_chunk.rs_mean.prepend(v)
                                rs_chunk.calv=calv

                                yield rs_chunk
                                intervalTime=intervalEnd
                    except Exception, e:
                        print 'Exception occured in update_cal_and_process'
                        print 'Exception = ',e
                        print traceback.format_exc()
                        if isinstance(e,(MemoryError,)):
                            print 'Please Adjust Your Parameters to be more Server-friendly and try again'
                            raise
                        if not isinstance(e,(AttributeError,)):
                            raise
        assert(remainder is None or remainder.times.size==0)
        if fullrange and end_time_datetime is not None and timesource is not None and (not timesource.isDone or requested_times is not None) and firsttimeever!=intervalTime:#either timesource indicates it wasn't run completely or requested times wasn't cleared
            requested_times=hau.T_Array(timesource.getBinsFor(starttime=intervalTime,endtime=end_time_datetime))
            if requested_times is not None and len(requested_times)>1:
                print 'NO DATA to end from ',intervalTime,' to ',end_time_datetime #FIXME IS THIS USED? JPG 20160504 
                print "times to use are ",requested_times[:-1]
                rs= hau.Time_Z_Group()
                rs.rs_mean=hau.Time_Z_Group()
                rs.rs_mean.times=hau.T_Array(requested_times[:-1]).copy() #causes the time axis to be stored, but all others may be implied MISSING
                setattr(rs.rs_mean,'delta_t',hau.T_Array(np.zeros(rs.rs_mean.times.shape)))
                yield rs
Esempio n. 19
0
def show_pars(instrument,display_defaults,rs,usetimes,usealts,figs):
            #toplevel
            if usealts==None and hasattr(rs,'heights'):
                usealts=rs.heights
            if usetimes==None and hasattr(rs,'times'):
                usetimes=rs.times
            #delta_t=np.zeros(rs.times.shape)
            #make vector of time differences between data points
            delta_t = hau.T_Array(np.array([(rs.times[x+1]-rs.times[x]).total_seconds() for x in range(rs.times.size-1)]+[0.0]))
            if delta_t.size>1:
                delta_t[-1]=nanmean(delta_t[:-2])
    
            if figs==None:
                figs=gt.figurelist()
          
                
            #start here
           
            if instrument.find('pars2S1') >= 0:
                color ='r'
                instrment = 'Pars2S1'
            else:
                color = 'b'
                instrument = 'Pars2S2'

            if display_defaults.enabled('parsivel_precip_rate'):                
                if hasattr(rs,'precip_rate'):
                    gt.plot_vs_time('parsivel_precip_rate'
                       ,instrument
                       ,usetimes
                       ,[rs.precip_rate]  
                       ,[color]
                       ,[2]
                       ,[]
                       ,''    
                       ,'Parsivel precip rate (mm/hr)'
                       ,[]
                       ,'Parsivel precip rate vs time'   
                       ,False #FIXME
                       ,display_defaults
                       ,figs)
                    print 'rendering Parsivel precip rate'               
                else:
                    print
                    print 'No Parsivel precip rate image--precip rate field not found'
                    
            if display_defaults.enabled('parsivel_accumulated_precip'):
                if hasattr(rs,'precip_rate'):
                    #precip rate in units of mm/hr--convert seconds to hours
                    temp = (rs.precip_rate * delta_t/3600.0).copy()
                    temp[np.isnan(temp)]=0.0
                    accumulated_precip = np.cumsum(temp,0)   
                    gt.plot_vs_time('parsivel_accumulated_precip'
                       ,instrument
                       ,usetimes
                       ,[accumulated_precip]  
                       ,[color]
                       ,[2]
                       ,[]
                       ,''    
                       ,'Parsivel precip (mm)'
                       ,[]
                       ,'Parsivel precip vs time'   
                       ,False #FIXME
                       ,display_defaults
                       ,figs)
                    print 'rendering Parsivel precip'               
                else:
                    print
                    print 'No Parsivel precip image--precip_rate field not found'

            if display_defaults.enabled('parsivel_median_volume_diameter'):
                if hasattr(rs,'median_volume_diameter'):
                    temp = rs.median_volume_diameter.copy()
                    temp[temp <0] = np.NaN
                    gt.plot_vs_time('parsivel_median_volume_diameter'
                       ,instrument
                       ,usetimes
                       ,[temp]  
                       ,[color]
                       ,[2]
                       ,None
                       ,''    
                       ,'median volume diameter'
                       ,'mm'
                       ,'parsivel median vol. dia. vs time'   
                       ,False #FIXME
                       ,display_defaults
                       ,figs)
                    print 'rendering Parsivel median volume diameter'               
                else:
                    print
                    print 'No Parsivel median volume diameter plot--field not found'
                    
            if display_defaults.enabled('parsivel_liquid_water_content'):
                if hasattr(rs,'liquid_water_content'):
            
                    gt.plot_vs_time('parsivel_liquid_water_content'
                       ,instrument
                       ,usetimes
                       ,[rs.liquid_water_content * 1e-3]  #convert from mm^3/m^3 to gr/m^3  
                       ,[color]
                       ,[2]
                       ,None
                       ,''    
                       ,'Liquid water content'
                       ,'mm'
                       ,'Parsivel LWC vs time'   
                       ,False #FIXME
                       ,display_defaults
                       ,figs)
                    print 'rendering Parsivel liquid water content'               
                else:
                    print
                    print 'No Parsivel liquid water content plot--field not found'
                    
            if display_defaults.enabled('parsivel_radar_reflectivity'):
                if hasattr(rs,'equivalent_radar_reflectivity'):
                    temp = rs.equivalent_radar_reflectivity.copy()
                    temp[temp<-50] = np.NaN
                    gt.plot_vs_time('parsivel_radar_reflectiviy'
                       ,instrument
                       ,usetimes
                       ,[temp]   
                       ,[color]
                       ,[2]
                       ,None
                       ,''    
                       ,'Radar reflectivity'
                       ,'dBZ'
                       ,'Parsivel radar reflectivity vs time'   
                       ,False #FIXME
                       ,display_defaults
                       ,figs)
                    print 'rendering Parsivel radar reflectivity'               
                else:
                    print
                    print 'No Parsivel equivalent radar reflectivity--field not found'
            if 0:
              print 'particle size'
              print rs.particle_size.shape
              print rs.particle_size
              print 'particle width'
              print rs.class_size_width.shape
              print rs.class_size_width
              print 'fall velocity'
              print rs.raw_fall_velocity.shape
              print rs.raw_fall_velocity
            
            if 0: #display_defaults.enabled('parsivel_fall_velocity'):
                if hasattr(rs,'raw_fall_velocity'):
            
                    gt.plot_vs_time('parsivel_fall_velocity'
                       ,instrument
                       ,usetimes
                       ,[rs.raw_fall_velocity]   
                       ,[color]
                       ,[2]
                       ,None
                       ,''    
                       ,'Raw fall velocity'
                       ,'m/s'
                       ,'Parsivel fall velocity vs time'   
                       ,False #FIXME
                       ,display_defaults
                       ,figs)
                    print 'rendering Parsivel raw fall velocity'               
                else:
                    print
                    print 'No Parsivel raw fall velocity plot--field not found'
                    
            if 0: # display_defaults.enabled('parsivelfall_velocity_vs_time'):
                if hasattr(rs,'raw_spectrum'):

                    fall_velocity = rs.raw_spectrum.copy()
                    
                    fall_velocity[fall_velocity < -9000.0] = 0.0
                    #sum over particle size
                    fall_velocity = np.sum
                
                    gt.plot_vs_time('parsivel_fall_velocity_vs_time'
                        ,instrument
                        ,usetimes
                        ,[fall_velocity]  
                        ,[color]
                        ,[2]
                        ,[]
                        ,''    
                        ,'Parsivel fall velocity (m/s)'
                        ,[]
                        ,'Parsivel fall velocity vs time'   
                        ,False #FIXME
                        ,display_defaults
                        ,figs)
                    print 'rendering Parsivel raw fall velocity'               
                else:
                     print
                     print 'No Parsivel raw fall velocity plot--field not found'








            if display_defaults.enabled('parsivel_size_spectrum'):
                if hasattr(rs,'raw_spectrum'):
                    raw_spectrum = rs.raw_spectrum.copy()
                    raw_spectrum[raw_spectrum < -9000] = 0.0
                    #sum over time
                    raw_spectrum = np.sum(raw_spectrum,0)
                    #sum over velocities
                    raw_spectrum = np.sum(raw_spectrum,1)
                   
                    np.set_printoptions(threshold=np.NaN)

                   

                    gt.plot_xy('parsivel_size_spectrum'  #plot name
                        ,'Parsivel'
                        ,rs.times
                        ,[rs.particle_size[:19]]
                        ,[raw_spectrum[:19]]
                        ,[color]
                        ,[]
                        ,[]
                        ,['-']
                        ,[2]
                        ,[]
                        ,'upper right'
                        ,'particle size '
                        ,'mm'
                        ,'number'
                        ,None
                        ,'Raw spectrum'
                        ,'' #text_str
                        ,None #'text_position_x
                        ,None #text_position_y
                        ,None #text_angle
                        ,display_defaults
                        ,figs)
                else:
                    print
                    print 'Parsivel raw spectum plot not plotted--variable not found'

            if display_defaults.enabled('parsivel_fall_velocity_spectrum'):
                if hasattr(rs,'raw_spectrum'):
                    raw_spectrum = rs.raw_spectrum.copy()
                    raw_spectrum[raw_spectrum < -9000] = 0.0
                    #sum over time
                    raw_spectrum = np.sum(raw_spectrum,0)
                    #sum over sizes
                    raw_spectrum = np.sum(raw_spectrum,1)
                    
                    np.set_printoptions(threshold=np.NaN)

                    gt.plot_xy('parsivel_fall_velocity_spectrum'  #plot name
                        ,'Parsivel'
                        ,rs.times
                        ,[rs.raw_fall_velocity]
                        ,[raw_spectrum]
                        ,[color]
                        ,[]
                        ,[]
                        ,['-']
                        ,[2]
                        ,[]
                        ,'upper right'
                        ,'fall velocity'
                        ,'m/s'
                        ,'number'
                        ,None
                        ,'fall spectrum'
                        ,'' #text_str
                        ,None #'text_position_x
                        ,None #text_position_y
                        ,None #text_angle
                        ,display_defaults
                        ,figs)
                else:
                    print
                    print 'Parsivel fall velocity spectum  not plotted--variable not found'

            if display_defaults.enabled('parsivel_number_detected_particles_vs_time'):
                if hasattr(rs,'number_detected_particles'):
                    print 'rendering parsivel number_detected_particles_vs_time'
                    gt.plot_vs_time('parsivel_number_detected_particles_vs_time'
                        ,instrument
                        ,usetimes
                        ,[rs.number_detected_particles]  
                        ,[color]
                        ,[2]
                        ,[]
                        ,''    
                        ,'Number of particles'
                        ,None
                        ,'Parsivel number of detected particles vs time'   
                        ,False #FIXME
                        ,display_defaults
                        ,figs)
                            
                else:
                   print
                   print 'Pareivel number_detected_particles not plotted--variable not found'
                   
            if display_defaults.enabled('parsivel_number_density'):
                if hasattr(rs,'number_density_drops'):
                    number_density = rs.number_density_drops.copy()
                    number_density[number_density < -9000] = 0.0
                    #sum over time
                    number_density = np.mean(number_density,0)
                        
                    #np.set_printoptions(threshold=np.NaN)
            
                    gt.plot_xy('parsivel_number_density'
                        ,instrument
                        ,rs.times
                        ,[rs.particle_size[:19]]
                        ,[number_density[:19]]
                        ,[color]
                        ,[]
                        ,[]
                        ,['-']
                        ,[2]
                        ,[]
                        ,'upper right'
                        ,'particle size '
                        ,'mm'
                        ,'number density'
                        ,'1/(m^3 *mm)'
                        ,'Number density'
                        ,'' #text_str
                        ,None #'text_position_x
                        ,None #text_position_y
                        ,None #text_angle
                        ,display_defaults
                        ,figs)
                else:
                    print
                    print 'Parsivel number density not plotted--variable not found'
Esempio n. 20
0
def accumulate_raman_inverted_profiles(consts,
                                       rs_mean,
                                       qc_mask,
                                       old_profiles,
                                       process_control=None,
                                       corr_adjusts=None):
    indices = np.arange(rs_mean.times.shape[0])
    if len(indices) == 0:
        return old_profiles

    if qc_mask is not None and processing_defaults.get_value(
            'averaged_profiles', 'apply_mask'):
        #make mask array with NaN's for array elements where bit[0] of qc_mask==0
        #all other elements of mask = 1
        mask = (np.bitwise_and(qc_mask, 1)).astype(
            'float')  #mask is float to allow use of NaN values
        mask[mask == 0] = np.NaN

        print 'qc_mask applied to time averaged profiles vs altitude'
    else:
        #set mask == 1
        mask = None
        #for sh in channel_shorthand.keys():
        #
        # if hasattr(rs_mean,sh):
        #        mask = np.ones_like(getattr(rs_mean,sh))
        #        break
        #if mask is None:
        #    mask = np.ones((rs_mean.times.shape[0],0))
        print 'qc_mask has not been applied to time averaged profiles'

    #energies=('transmitted_1064_energy','transmitted_energy')

    profiles = hau.Time_Z_Group(can_append=False, altname='altitudes')

    profiles.hist = hau.Time_Z_Group()
    ft = None
    tc = 0
    #tv=0
    lt = None
    if old_profiles is not None:
        ft = old_profiles.hist.ft
        tc = old_profiles.hist.tc
        #tv=old_profiles.hist.tv
        lt = old_profiles.hist.lt
    if len(indices) > 0:
        if ft is None:
            ft = rs_mean.times[indices][0]
        lt = rs_mean.times[indices][-1] + timedelta(
            seconds=rs_mean.delta_t[indices][-1]
            if not np.isnan(rs_mean.delta_t[indices][-1]) else 0)
        for x in indices:
            if rs_mean.times[x] is not None:
                tc = tc + 1
                #tv=tv+(rs_mean.times[x]-ft).total_seconds()
    if tc > 0:
        profiles.times = hau.T_Array([ft])  #+timedelta(seconds=tv/tc), ])
        profiles.start = ft
        profiles.width = lt - ft
        profiles.delta_t = hau.T_Array([profiles.width.total_seconds()])
    else:
        profiles.times = hau.T_Array([])
        profiles.start = ft
        profiles.width = timedelta(seconds=0)
        profiles.delta_t = hau.T_Array([])
    profiles.start_time = ft
    profiles.end_time = lt
    profiles.hist.ft = ft
    profiles.hist.tc = tc
    #profiles.hist.tv=tv
    profiles.hist.lt = lt
    if rs_mean is not None and hasattr(rs_mean, 'altitudes'):
        profiles.altitudes = rs_mean.altitudes.copy()
    elif hasattr(old_profiles, 'altitudes'):
        profiles.altitudes = old_profiles.altitudes

    #FIXME need to accumulate the inverted products here, so here's a hack
    interval = hau.Time_Z_Group()
    interval.intervals = hau.T_Array(np.ones(rs_mean.times.shape))

    accumulate(profiles, old_profiles, interval, indices, 'intervals')
    interval_count = profiles.intervals
    print 'Total intervals for profile =', interval_count
    for k, v in vars(rs_mean).items():
        if k.startswith('_') or k in ('times', 'start', 'width', 'delta_t'):
            continue
        if not isinstance(v, hau.T_Array):
            continue
        if isinstance(v, hau.TZ_Array):
            continue
        accumulate(profiles, old_profiles, rs_mean, indices, k, interval_count)
    # create TZ_Array with time dimension of '1', so hsrl_inversion doesn't choke
    for k, v in vars(rs_mean).items():
        if k.startswith('_'):
            continue
        if not isinstance(v, hau.TZ_Array):
            continue
        if len(v.shape) != 2:
            continue
        accumulate(profiles, old_profiles, rs_mean, indices, k, interval_count,
                   mask)

    return profiles
Esempio n. 21
0
def accumulate_raman_profiles(consts,
                              rs_mean,
                              qc_mask,
                              old_profiles,
                              process_control=None,
                              rs_cal=None,
                              Cxx=None,
                              corr_adjusts=None):
    indices = np.arange(rs_mean.times.shape[0])
    if len(indices) == 0:
        return old_profiles

    if qc_mask is not None and processing_defaults.get_value(
            'averaged_profiles', 'apply_mask'):
        #make mask array with NaN's for array elements where bit[0] of qc_mask==0
        #all other elements of mask = 1
        mask = (np.bitwise_and(qc_mask, 1)).astype(
            'float')  #mask is float to allow use of NaN values
        mask[mask == 0] = np.NaN

        print 'qc_mask applied to time averaged profiles vs altitude'
    else:
        #set mask == 1
        mask = None
        #for sh in channel_shorthand.keys():
        #
        #	if hasattr(rs_mean,sh):
        #        mask = np.ones_like(getattr(rs_mean,sh))
        #        break
        #if mask is None:
        #    mask = np.ones((rs_mean.times.shape[0],0))
        print 'qc_mask has not been applied to time averaged profiles'

    #energies=('transmitted_1064_energy','transmitted_energy')

    profiles = hau.Time_Z_Group(can_append=False, altname='altitudes')

    profiles.hist = hau.Time_Z_Group()
    ft = None
    tc = 0
    #tv=0
    lt = None
    if old_profiles is not None:
        #total_seeded_shots=total_seeded_shots+profiles.hist.total_seeded_shots
        ft = old_profiles.hist.ft
        tc = old_profiles.hist.tc
        #tv=old_profiles.hist.tv
        lt = old_profiles.hist.lt
    if len(indices) > 0:
        if ft is None:
            ft = rs_mean.times[indices][0]
        lt = rs_mean.times[indices][-1] + timedelta(
            seconds=rs_mean.delta_t[indices][-1]
            if not np.isnan(rs_mean.delta_t[indices][-1]) else 0)
        for x in indices:
            if rs_mean.times[x] is not None:
                tc = tc + 1
                #tv=tv+(rs_mean.times[x]-ft).total_seconds()
    if tc > 0:
        profiles.times = hau.T_Array([ft])  #+timedelta(seconds=tv/tc), ])
        profiles.start = ft
        profiles.width = lt - ft
        profiles.delta_t = hau.T_Array([profiles.width.total_seconds()])
    else:
        profiles.times = hau.T_Array([])
        profiles.start = ft
        profiles.width = timedelta(seconds=0)
        profiles.delta_t = hau.T_Array([])
    profiles.start_time = ft
    profiles.end_time = lt
    profiles.hist.ft = ft
    profiles.hist.tc = tc
    #profiles.hist.tv=tv
    profiles.hist.lt = lt
    #profiles.hist.total_seeded_shots=total_seeded_shots
    if rs_mean is not None and hasattr(rs_mean, 'altitudes'):
        profiles.altitudes = rs_mean.altitudes.copy()

    #elif hasattr(old_profiles,'altitudes'):
    #  profiles.altitudes=old_profiles.altitudes
    elif hasattr(old_profiles, 'heights'):
        profiles.heights = old_profiles.heights

    accumulate(profiles,
               old_profiles,
               rs_mean,
               indices,
               'shots',
               pref='mean_',
               filler=hau.T_Array([0]))
    total_shots = profiles.mean_shots
    profiles.shots = total_shots.copy()
    print 'Total shots for profile =', total_shots
    for e in energies:
        accumulate(profiles, old_profiles, rs_mean, indices, e, total_shots)
    # create TZ_Array with time dimension of '1', so hsrl_inversion doesn't choke
    for chan in channel_shorthand.keys():
        accumulate(profiles, old_profiles, rs_mean, indices, chan, total_shots,
                   mask)

    #compute inverted profiles from mean count profiles
    if Cxx is not None and hasattr(profiles, 'elastic_counts'):
        import raman.core.raman_processing_utilities as rpu
        profiles.inv = rpu.process_raman(consts, profiles, process_control,
                                         rs_cal, Cxx, corr_adjusts)
        import lidar.sg_extinction as lsge
        filter_params = lsge.filter_setup(profiles.inv.altitudes,
                                          process_control, consts)
        sg_ext = lsge.sg_extinction(filter_params)
        profiles.inv.extinction,profiles.inv.extinction_aerosol,profiles.inv.p180 \
              = sg_ext(profiles.times,profiles.delta_t,profiles.nitrogen_counts ,profiles.inv.beta_a_backscat,profiles.inv.integ_backscat,beta_r=Cxx.beta_r_355)

    elif hasattr(old_profiles, 'inv'):
        profiles.inv = old_profiles.inv
    return profiles
Esempio n. 22
0
def HSRLPolConfig(x, thQWP):
    """
outputs the polarization configuration vectors of all 4 HSRL channels for a provided
    x - a dictionary containing all polarization calibration terms
    thQWP - a range of QWP angles in radians
returns 
[Si,DxV,DtV,DmV] 

"""

    thQWP = thQWP + x['qwp_rot_offset']

    DxV = np.zeros((len(thQWP), 4))  # N x 4
    DtV = np.zeros((len(thQWP), 4))  # N x 4
    DmV = np.zeros((len(thQWP), 4))  # N x 4
    Si = np.zeros((4, len(thQWP)))  # 4 x N

    Stx = np.array((1, cos(2 * x['tx_elip']) * cos(2 * x['tx_rot']),
                    cos(2 * x['tx_elip']) * sin(2 * x['tx_rot']),
                    sin(2 * x['tx_elip'])))  # 4 x 1
    Dx = np.array((1, x['cross_rx_diat'] * cos(2 * x['cross_rx_rot']) *
                   cos(2 * x['cross_rx_elip']), x['cross_rx_diat'] *
                   sin(2 * x['cross_rx_rot']) * cos(2 * x['cross_rx_elip']),
                   x['cross_rx_diat'] * sin(2 * x['cross_rx_elip'])))  # 1 x 4
    Dm = np.array((1, x['mol_rx_diat'] * cos(2 * x['mol_rx_rot']) *
                   cos(2 * x['mol_rx_elip']), x['mol_rx_diat'] *
                   sin(2 * x['mol_rx_rot']) * cos(2 * x['mol_rx_elip']),
                   x['mol_rx_diat'] * sin(2 * x['mol_rx_elip'])))  # 1 x 4
    Dt = np.array((1, x['total_rx_diat'] * cos(2 * x['total_rx_rot']) *
                   cos(2 * x['total_rx_elip']), x['total_rx_diat'] *
                   sin(2 * x['total_rx_rot']) * cos(2 * x['total_rx_elip']),
                   x['total_rx_diat'] * sin(2 * x['total_rx_elip'])))  # 1 x 4
    Dl = np.array(
        (1, x['lowgain_rx_diat'] * cos(2 * x['lowgain_rx_rot']) *
         cos(2 * x['lowgain_rx_elip']), x['lowgain_rx_diat'] *
         sin(2 * x['lowgain_rx_rot']) * cos(2 * x['lowgain_rx_elip']),
         x['lowgain_rx_diat'] * sin(2 * x['lowgain_rx_elip'])))
    # Matt: Changed to Matrix Multiplication
    Mir1 = np.dot(RMueller(x['mirror_rot']),
                  np.dot(
                      VWP(0, x['mirror_phase']),
                      np.dot(DiatU(x['mirror_diat'] * np.array((1, 0, 0))),
                             RMueller(-x['mirror_rot']))))  # 4 x 4
    Mir2 = RevMueller(Mir1)  # 4 x 4
    # Matt: Changed to Matrix Multiplication
    Twin1 = np.dot(RMueller(x['window_diat_rot']),np.dot(DiatU(x['window_diat']*np.array((1,0,0))) ,  \
        np.dot(RMueller(-x['window_diat_rot']),VWP(x['window_phase_rot'],x['window_phase']) ) ) ) # 4 x 4
    Twin2 = RevMueller(Twin1)  # 4 x 4

    for ai in range(len(thQWP)):
        QWP1 = VWP(
            -thQWP[ai] + x['qwp_rot_0'], x['qwp_phase_0'] +
            x['qwp_phase_2'] * np.cos(-2 * (thQWP[ai] - x['qwp_rot_0'])) +
            x['qwp_phase_1'] * np.cos(-thQWP[ai] + x['qwp_rot_1']))  # 4 x 4
        QWP2 = RevMueller(QWP1)  # 4 x 4
        Si[:, ai] = np.dot(np.dot(np.dot(Twin1, Mir1), QWP1), Stx)
        DxV[ai, :] = np.dot(np.dot(np.dot(Dx, QWP2), Mir2), Twin2)
        DtV[ai, :] = np.dot(np.dot(np.dot(Dt, QWP2), Mir2), Twin2)
        DmV[ai, :] = np.dot(np.dot(np.dot(Dm, QWP2), Mir2), Twin2)

    # convert to T_Array, so we can process properly
    Si = hau.T_Array(Si)
    DxV = hau.T_Array(DxV)
    DtV = hau.T_Array(DtV)
    DmV = hau.T_Array(DmV)
    out = { 'Si': Si,  'Dm': Dm, 'Dx': DxV, 'Dt': DtV, 'DmV': DmV, \
            'QWP1' : QWP1, 'QWP2': QWP2, 'Mir1': Mir1, 'Mir2': Mir2,
            'thQWP': thQWP, 'Twin1' : Twin1, 'Twin2' : Twin2, 'Dt': Dt,
            'Stx': Stx }

    #    return out
    return [Si, DxV, DtV, DmV]