示例#1
0
def wyoming_pandas_to_dict(df):
    # extract metadata
    metadata = {k: v for d in df._metadata for k, v in d.items()}
    sounding_time = metadata['Observation time']
    date_sounding = datetime.datetime(
        int('20' + sounding_time[0:2]), int(sounding_time[2:4]),
        int(sounding_time[4:6]),
        int(sounding_time[7:9], int(sounding_time[9:11])))
    # build dictionary
    sounding = {}
    sounding['dimlabel'] = ['range']
    sounding['range'] = df['range'].values
    sounding['speed'] = (df['speed'].values *
                         units.units('knots')).to_base_units().magnitude
    sounding['time'] = h.dt_to_ts(date_sounding)
    sounding['u_wind'] = (df['u_wind'].values *
                          units.units('knots')).to_base_units().magnitude
    sounding['v_wind'] = (df['v_wind'].values *
                          units.units('knots')).to_base_units().magnitude
    sounding['dewpoint'] = df['dewpoint']
    sounding['direction'] = df['direction']
    sounding['pressure'] = df['pressure']
    sounding['temperature'] = df['temperature']

    return sounding
示例#2
0
    def retfunc(f, time_interval, *further_intervals):
        """
        function that converts the txt file to larda data container
        Args:
            f:
            time_interval:

        Returns:
            larda data container with sounding data
        """
        import csv
        import datetime
        logger.debug("filename at reader {}".format(f))
        with open(f) as f:
            reader = csv.reader(f, delimiter='\t')
            headers = next(reader, None)
            var_index = [
                i for i, j in enumerate(headers)
                if j == paraminfo['variable_name']
            ]
            assert (
                len(var_index) == 1
            ), "mismatch between headers in file and variable name in toml"
            rg_index = [
                i for i, j in enumerate(headers)
                if j == paraminfo['range_variable']
            ]
            assert (
                len(rg_index) == 1
            ), "mismatch between headers in file and range variable name in toml"
            data = {}
            data['dimlabel'] = ['time', 'range']
            data['ts'] = np.array([
                h.dt_to_ts(
                    datetime.datetime.strptime(
                        f.name.split('/')[-1][0:11], '%Y%m%d_%H'))
            ])
            data['var'] = []
            data['rg'] = []
            for row in reader:
                try:
                    data['var'].append(float(row[var_index[0]]))
                except ValueError:  # empty line cannot be converted to float
                    data['var'].append(np.nan)
                data['rg'].append(float(row[rg_index[0]]))
            data['var'] = np.array(data['var'])[np.newaxis, :]
            data['rg'] = np.array(data['rg'])
            data['mask'] = np.isnan(data['var'])
            data['name'] = paraminfo['paramkey']
            data['system'] = paraminfo['system']
            data['var_lims'] = paraminfo['var_lims']
            data[
                'colormap'] = 'jet' if not 'colormap' in paraminfo else paraminfo[
                    'colormap']
            data['rg_unit'] = paraminfo['rg_unit']
            data['var_unit'] = paraminfo['var_unit']
            data['paraminfo'] = paraminfo
            data['filename'] = f.name
            return data
示例#3
0
def get_time_slicer(ts, f, time_interval):
    """get time slicer from the time_interval
    Following options are available

    1. time_interval with [ts_begin, ts_end]
    2. only one timestamp is selected and the found
        right one would be beyond the ts range -> argnearest instead searchsorted
    3. only one is timestamp
    """

    # select first timestamp right of begin (not left if nearer as above)
    #print(f'start time {h.ts_to_dt(ts[0])}')
    it_b = 0 if ts.shape[0] == 1 else np.searchsorted(
        ts, h.dt_to_ts(time_interval[0]), side='right')
    if len(time_interval) == 2:
        it_e = h.argnearest(ts, h.dt_to_ts(time_interval[1]))

        if it_b == ts.shape[0]: it_b = it_b - 1
        valid_step = 3 * np.median(np.diff(ts))
        if ts[it_e] < h.dt_to_ts(
                time_interval[0]) - valid_step or ts[it_b] < h.dt_to_ts(
                    time_interval[0]):
            # second condition is to ensure that no timestamp before
            # the selected interval is choosen
            # (problem with limrad after change of sampling frequency)
            str = 'found last profile of file {}\n at ts[it_e] {} too far ({}s) from {}\n'.format(
                    f, h.ts_to_dt(ts[it_e]), valid_step, time_interval[0]) \
                 + 'or begin too early {} < {}\n returning None'.format(h.ts_to_dt(ts[it_b]), time_interval[0])
            logger.warning(str)
            return None

        it_e = it_e + 1 if not it_e == ts.shape[0] - 1 else None
        slicer = [slice(it_b, it_e)]
    elif it_b == ts.shape[0]:
        # only one timestamp is selected
        # and the found right one would be beyond the ts range
        it_b = h.argnearest(ts, h.dt_to_ts(time_interval[0]))
        slicer = [slice(it_b, it_b + 1)]
    else:
        slicer = [slice(it_b, it_b + 1)]
    return slicer
示例#4
0
    def collect(self, param, time_interval, *further_intervals, **kwargs):
        """collect the data from a parameter for the given intervals

        Args:
            param (str) identifying the parameter
            time_interval: list of begin and end datetime
            *further_intervals: range, velocity, ...
        """
        resp_format = 'msgpack'
        interval = ["-".join([str(h.dt_to_ts(dt)) for dt in time_interval])]
        interval += ["-".join([str(i) for i in pair]) for pair in further_intervals]
        stream = True if resp_format is "msgpack" else False
        params = {"interval": ','.join(interval), 'rformat': resp_format}
        params.update(kwargs)
        resp = requests.get(self.uri + '/api/{}/{}/{}'.format(self.camp_name, self.system, param),
                            params=params, stream=stream)
        logger.debug("fetching data from: {}".format(resp.url))
        if resp_format is "msgpack":
            block_size = 1024
            pbar = tqdm(unit="B", total=(int(resp.headers.get('content-length', 0))//block_size)*block_size, unit_divisor=1024, unit_scale=True)
            content = bytearray()
            for data in resp.iter_content(block_size):
                content.extend(data)
                pbar.update(len(data))
        
        if resp.status_code != 200:
            if resp_format is "msgpack":
                print("Error at Backend")
                print(content.decode("unicode_escape"))
            else:
                print(resp.json())
            raise ConnectionError("bad status code of response {}".format(resp.status_code))

        starttime = time.time()
        # if resp_format == 'bin':
        #     data_container = cbor2.loads(resp.content)
        if resp_format == 'msgpack':
            logger.info("msgpack version {}".format(msgpack.version))
            if msgpack.version[0] < 1:
                data_container = msgpack.loads(content, encoding='utf-8')
            else:
                data_container = msgpack.loads(content, strict_map_key=False)
        elif resp_format == 'json':
            data_container = resp.json()

        #print("{:5.3f}s decode data".format(time.time() - starttime))
        starttime = time.time()
        for k in ['ts', 'rg', 'vel', 'var', 'mask']:
            if k in data_container and type(data_container[k]) == list:
                data_container[k] = np.array(data_container[k])
        logger.info("loaded data container from remote: {}".format(data_container.keys()))
        #print("{:5.3f}s converted to np arrays".format(time.time() - starttime))
        return data_container
示例#5
0
def plot_range_spectrogram(ZSpec, dt, **font_settings):
    unix_0 = np.float64(h.dt_to_ts(dt))

    Z = ZSpec.sel(ts=slice(unix_0, unix_0 + 30.0),
                  rg=slice(font_settings['range_interval'][0],
                           font_settings['range_interval'][1]))
    z_var = Z.values.copy()

    #z_var = np.ma.masked_less_equal(Z.values, 0.0)
    z_var = np.squeeze(z_var)

    x_var = np.linspace(0, 6 * 256, num=6 * 256)
    z_var = z_var.reshape((-1, 6 * 256), order='F')
    fig, ax = plt.subplots(1, figsize=_FIG_SIZE)

    surf = ax.pcolormesh(x_var,
                         Z.rg,
                         z_var[:, :],
                         vmin=dBZ_lim[0],
                         vmax=dBZ_lim[1],
                         cmap='jet')
    #ax.set_xlim(vel_lim)
    ax.set_xlabel(xlabel=r"Doppler velocity bins [-]")

    ax.set_ylim(font_settings['range_interval'])
    ax.set_ylabel("range [m]", **{})
    ax.grid()

    # Add a color bar which maps values to colors.

    cbar = fig.colorbar(
        surf,
        fraction=0.2,
        shrink=1.,
        pad=0.01,
        orientation='vertical',
    )
    cbar.ax.tick_params(axis='both',
                        which='major',
                        labelsize=12,
                        width=2,
                        length=4)
    cbar.ax.tick_params(axis='both', which='minor', width=2, length=3)
    cbar.ax.minorticks_on()
    cbar.ax.set_ylabel("signal normalized", **{})
    plt.tight_layout()
示例#6
0
    def t_r(f, time_interval, *further_intervals):
        """function that converts the trace netCDF to the data container
        """
        logger.debug("filename at reader {}".format(f))
        with netCDF4.Dataset(f, 'r') as ncD:

            times = ncD.variables[paraminfo['time_variable']][:].astype(
                np.float64)

            timeconverter, _ = h.get_converter_array(
                paraminfo['time_conversion'], ncD=ncD)
            ts = timeconverter(times)

            #print('timestamps ', ts[:5])
            # setup slice to load base on time_interval
            it_b = h.argnearest(ts, h.dt_to_ts(time_interval[0]))
            if len(time_interval) == 2:
                it_e = h.argnearest(ts, h.dt_to_ts(time_interval[1]))
                if ts[it_e] < h.dt_to_ts(
                        time_interval[0]) - 3 * np.median(np.diff(ts)):
                    logger.warning(
                        'last profile of file {}\n at {} too far from {}'.
                        format(f, h.ts_to_dt(ts[it_e]), time_interval[0]))
                    return None

                it_e = it_e + 1 if not it_e == ts.shape[0] - 1 else None
                slicer = [slice(it_b, it_e)]
            else:
                slicer = [slice(it_b, it_b + 1)]
            print(slicer)

            range_interval = further_intervals[0]
            ranges = ncD.variables[paraminfo['range_variable']]
            logger.debug('loader range conversion {}'.format(
                paraminfo['range_conversion']))
            rangeconverter, _ = h.get_converter_array(
                paraminfo['range_conversion'], altitude=paraminfo['altitude'])
            ir_b = h.argnearest(rangeconverter(ranges[:]), range_interval[0])
            if len(range_interval) == 2:
                if not range_interval[1] == 'max':
                    ir_e = h.argnearest(rangeconverter(ranges[:]),
                                        range_interval[1])
                    ir_e = ir_e + 1 if not ir_e == ranges.shape[0] - 1 else None
                else:
                    ir_e = None
                slicer.append(slice(ir_b, ir_e))
            else:
                slicer.append(slice(ir_b, ir_b + 1))

            varconverter, maskconverter = h.get_converter_array(
                paraminfo['var_conversion'])

            its = np.arange(ts.shape[0])[tuple(slicer)[0]]
            irs = np.arange(ranges.shape[0])[tuple(slicer)[1]]
            var = np.empty((its.shape[0], irs.shape[0]))
            mask = np.empty((its.shape[0], irs.shape[0]))
            mask[:] = False

            var = ncD.variables[paraminfo['variable_name']][
                tuple(slicer)[0], tuple(slicer)[1], :]

            data = {}
            data['dimlabel'] = ['time', 'range', 'cat']

            data["filename"] = f
            data["paraminfo"] = paraminfo
            data['ts'] = ts[tuple(slicer)[0]]

            data['system'] = paraminfo['system']
            data['name'] = paraminfo['paramkey']
            data['colormap'] = paraminfo['colormap']

            if 'meta' in paraminfo:
                data['meta'] = NcReader.get_meta_from_nc(
                    ncD, paraminfo['meta'], paraminfo['variable_name'])

            variable = ncD.variables[paraminfo['variable_name']]
            var_definition = ast.literal_eval(
                variable.getncattr(paraminfo['identifier_var_def']))
            if var_definition[1] == "forrest":
                var_definition[1] = "forest"

            data['var_definition'] = var_definition

            data['rg'] = rangeconverter(ranges[tuple(slicer)[1]])
            data['rg_unit'] = NcReader.get_var_attr_from_nc(
                "identifier_rg_unit", paraminfo, ranges)
            logger.debug('shapes {} {} {}'.format(ts.shape, ranges.shape,
                                                  var.shape))

            data['var_unit'] = NcReader.get_var_attr_from_nc(
                "identifier_var_unit", paraminfo, var)
            data['var_lims'] = [float(e) for e in \
                                NcReader.get_var_attr_from_nc("identifier_var_lims",
                                                    paraminfo, var)]

            data['var'] = varconverter(var)
            data['mask'] = maskconverter(mask)

            return data
示例#7
0
    def pt_ret(f, time_interval, *further_intervals):
        """function that converts the peakTree netCDF to the data container
        """
        logger.debug("filename at reader {}".format(f))
        with netCDF4.Dataset(f, 'r') as ncD:

            times = ncD.variables[paraminfo['time_variable']][:].astype(np.float64)
            if 'time_millisec_variable' in paraminfo.keys() and \
                    paraminfo['time_millisec_variable'] in ncD.variables:
                subsec = ncD.variables[paraminfo['time_millisec_variable']][:]/1.0e3
                times += subsec
            if 'time_microsec_variable' in paraminfo.keys() and \
                    paraminfo['time_microsec_variable'] in ncD.variables:
                subsec = ncD.variables[paraminfo['time_microsec_variable']][:]/1.0e6
                times += subsec

            timeconverter, _ = h.get_converter_array(
                paraminfo['time_conversion'], ncD=ncD)
            ts = timeconverter(times)

            #print('timestamps ', ts[:5])
            # setup slice to load base on time_interval
            it_b = h.argnearest(ts, h.dt_to_ts(time_interval[0]))
            if len(time_interval) == 2:
                it_e = h.argnearest(ts, h.dt_to_ts(time_interval[1]))
                if ts[it_e] < h.dt_to_ts(time_interval[0])-3*np.median(np.diff(ts)):
                    logger.warning(
                            'last profile of file {}\n at {} too far from {}'.format(
                                f, h.ts_to_dt(ts[it_e]), time_interval[0]))
                    return None

                it_e = it_e+1 if not it_e == ts.shape[0]-1 else None
                slicer = [slice(it_b, it_e)]
            else:
                slicer = [slice(it_b, it_b+1)]
            print(slicer)

            if paraminfo['ncreader'] == 'peakTree':
                range_tg = True

                range_interval = further_intervals[0]
                ranges = ncD.variables[paraminfo['range_variable']]
                logger.debug('loader range conversion {}'.format(paraminfo['range_conversion']))
                rangeconverter, _ = h.get_converter_array(
                    paraminfo['range_conversion'],
                    altitude=paraminfo['altitude'])
                ir_b = h.argnearest(rangeconverter(ranges[:]), range_interval[0])
                if len(range_interval) == 2:
                    if not range_interval[1] == 'max':
                        ir_e = h.argnearest(rangeconverter(ranges[:]), range_interval[1])
                        ir_e = ir_e+1 if not ir_e == ranges.shape[0]-1 else None
                    else:
                        ir_e = None
                    slicer.append(slice(ir_b, ir_e))
                else:
                    slicer.append(slice(ir_b, ir_b+1))

            varconverter, maskconverter = h.get_converter_array(
                paraminfo['var_conversion'])

            its = np.arange(ts.shape[0])[tuple(slicer)[0]]
            irs = np.arange(ranges.shape[0])[tuple(slicer)[1]]
            var = np.empty((its.shape[0], irs.shape[0]), dtype=object)
            mask = np.empty((its.shape[0], irs.shape[0]), dtype=bool)
            mask[:] = True

            param_list = [
                ncD.variables['parent'][tuple(slicer)[0],tuple(slicer)[1],:], #0
                ncD.variables['Z'][tuple(slicer)[0],tuple(slicer)[1],:],      #1
                ncD.variables['v'][tuple(slicer)[0],tuple(slicer)[1],:],      #2
                ncD.variables['width'][tuple(slicer)[0],tuple(slicer)[1],:],  #3
                ncD.variables['skew'][tuple(slicer)[0],tuple(slicer)[1],:],   #4
                ncD.variables['threshold'][tuple(slicer)[0],tuple(slicer)[1],:], #5
                ncD.variables['prominence'][tuple(slicer)[0],tuple(slicer)[1],:], #6
                ncD.variables['bound_l'][tuple(slicer)[0],tuple(slicer)[1],:],    #7
                ncD.variables['bound_r'][tuple(slicer)[0],tuple(slicer)[1],:]     #8
            ]
            if 'LDR' in ncD.variables.keys():
                ldr_avail = True
                param_list.append(ncD.variables['LDR'][tuple(slicer)[0],tuple(slicer)[1],:])  #9
                param_list.append(ncD.variables['ldrmax'][tuple(slicer)[0],tuple(slicer)[1],:]) #10
            else:
                ldr_avail = False
            data = np.stack(tuple(param_list), axis=3)
            print(data.shape)
            if fastbuilder:
                var, mask = peakTree_fastbuilder.array_to_tree_c(data.astype(float), ldr_avail)
            else:
                var, mask = array_to_tree_py(data, ldr_avail)

            data = {}
            data['dimlabel'] = ['time', 'range', 'dict']

            data["filename"] = f
            data["paraminfo"] = paraminfo
            data['ts'] = ts[tuple(slicer)[0]]

            data['system'] = paraminfo['system']
            data['name'] = paraminfo['paramkey']
            data['colormap'] = paraminfo['colormap']

            data['rg'] = rangeconverter(ranges[tuple(slicer)[1]])
            data['rg_unit'] = NcReader.get_var_attr_from_nc("identifier_rg_unit", 
                                                paraminfo, ranges)
            logger.debug('shapes {} {} {}'.format(ts.shape, ranges.shape, var.shape))

            logger.debug('shapes {} {}'.format(ts.shape, var.shape))
            data['var_unit'] = NcReader.get_var_attr_from_nc("identifier_var_unit", 
                                                    paraminfo, var)
            data['var_lims'] = [float(e) for e in \
                                NcReader.get_var_attr_from_nc("identifier_var_lims", 
                                                    paraminfo, var)]

            data['var'] = varconverter(var)
            data['mask'] = maskconverter(mask)

            return data
示例#8
0
    def retfunc(f, time_interval, range_interval):
        """function that converts the netCDF to the larda-data-format
        """
        logger.debug("filename at reader {}".format(f))

        with netCDF4.Dataset(f, 'r') as ncD:
            ranges = ncD.variables[paraminfo['range_variable']]
            times = ncD.variables[paraminfo['time_variable']][:].astype(
                np.float64)
            locator_mask = ncD.variables[paraminfo['mask_var']][:].astype(
                np.int)
            if 'time_millisec_variable' in paraminfo.keys() and \
                    paraminfo['time_millisec_variable'] in ncD.variables:
                subsec = ncD.variables[
                    paraminfo['time_millisec_variable']][:] / 1.0e3
                times += subsec
            if 'time_microsec_variable' in paraminfo.keys() and \
                    paraminfo['time_microsec_variable'] in ncD.variables:
                subsec = ncD.variables[
                    paraminfo['time_microsec_variable']][:] / 1.0e6
                times += subsec
            if 'base_time_variable' in paraminfo.keys() and \
                    paraminfo['base_time_variable'] in ncD.variables:
                basetime = ncD.variables[
                    paraminfo['base_time_variable']][:].astype(np.float64)
                times += basetime
            timeconverter, _ = h.get_converter_array(
                paraminfo['time_conversion'], ncD=ncD)
            ts = timeconverter(times)

            it_b = np.searchsorted(ts,
                                   h.dt_to_ts(time_interval[0]),
                                   side='right')
            if len(time_interval) == 2:
                it_e = h.argnearest(ts, h.dt_to_ts(time_interval[1]))
                if it_b == ts.shape[0]: it_b = it_b - 1
                if ts[it_e] < h.dt_to_ts(time_interval[0]) - 3 * np.median(np.diff(ts)) \
                        or ts[it_b] < h.dt_to_ts(time_interval[0]):
                    # second condition is to ensure that no timestamp before
                    # the selected interval is chosen
                    # (problem with limrad after change of sampling frequency)
                    logger.warning(
                        'last profile of file {}\n at {} too far from {}'.
                        format(f, h.ts_to_dt(ts[it_e]), time_interval[0]))
                    return None
                it_e = it_e + 1 if not it_e == ts.shape[0] - 1 else None
                slicer = [slice(it_b, it_e)]
            elif it_b == ts.shape[0]:
                # only one timestamp is selected
                # and the found right one would be beyond the ts range
                it_b = h.argnearest(ts, h.dt_to_ts(time_interval[0]))
                slicer = [slice(it_b, it_b + 1)]
            else:
                slicer = [slice(it_b, it_b + 1)]

            rangeconverter, _ = h.get_converter_array(
                paraminfo['range_conversion'])

            varconverter, _ = h.get_converter_array(
                paraminfo['var_conversion'])

            ir_b = h.argnearest(rangeconverter(ranges[:]), range_interval[0])
            if len(range_interval) == 2:
                if not range_interval[1] == 'max':
                    ir_e = h.argnearest(rangeconverter(ranges[:]),
                                        range_interval[1])
                    ir_e = ir_e + 1 if not ir_e == ranges.shape[0] - 1 else None
                else:
                    ir_e = None
                slicer.append(slice(ir_b, ir_e))
            else:
                slicer.append(slice(ir_b, ir_b + 1))

            range_out = rangeconverter(ranges[tuple(slicer)[1]])
            cal = getattr(ncD, paraminfo['cal_const'])
            var = ncD.variables[paraminfo['variable_name']][:].astype(
                np.float64)
            var = var[locator_mask]
            vel = ncD.variables[paraminfo['vel_variable']][:].astype(
                np.float64)
            # print('var dict ',ch1var.__dict__)
            # print('shapes ', ts.shape, ch1range.shape, ch1var.shape)
            # print("time indices ", it_b, it_e)

            data = {}
            data['dimlabel'] = ['time', 'range', 'vel']
            data["filename"] = f
            data["paraminfo"] = paraminfo
            data['ts'] = ts[tuple(slicer)[0]]
            data['rg'] = range_out

            data['system'] = paraminfo['system']
            data['name'] = paraminfo['paramkey']
            data['colormap'] = paraminfo['colormap']

            # also experimental: vis_varconverter
            if 'plot_varconverter' in paraminfo and paraminfo[
                    'plot_varconverter'] != 'none':
                data['plot_varconverter'] = paraminfo['plot_varconverter']
            else:
                data['plot_varconverter'] = ''

            data['rg_unit'] = get_var_attr_from_nc("identifier_rg_unit",
                                                   paraminfo, ranges)
            #data['var_unit'] = get_var_attr_from_nc("identifier_var_unit",
            #                                        paraminfo, var)
            data['var_unit'] = 'dBZ m-1 s'
            data['var_lims'] = [float(e) for e in \
                                get_var_attr_from_nc("identifier_var_lims",
                                                     paraminfo, var)]
            data['vel'] = vel

            if "identifier_fill_value" in paraminfo.keys(
            ) and not "fill_value" in paraminfo.keys():
                fill_value = var.getncattr(paraminfo['identifier_fill_value'])
                data['mask'] = (var[tuple(slicer)].data == fill_value)
            elif "fill_value" in paraminfo.keys():
                fill_value = paraminfo["fill_value"]
                data['mask'] = np.isclose(var[tuple(slicer)], fill_value)
            elif "mask_var" in paraminfo.keys():
                # combine locator mask and mask of infinite values
                mask = locator_mask.mask[tuple(slicer)]
                data["mask"] = np.logical_or(
                    ~np.isfinite(var[tuple(slicer)].data),
                    np.repeat(mask[:, :, np.newaxis], len(data['vel']),
                              axis=2))
            else:
                data['mask'] = ~np.isfinite(var[tuple(slicer)].data)
            if isinstance(times, np.ma.MaskedArray):
                var = varconverter(var[tuple(slicer)].data)
            else:
                var = varconverter(var[tuple(slicer)])

            var2 = h.z2lin(var) * h.z2lin(float(
                cal[:-3])) * (range_out**2)[np.newaxis, :, np.newaxis]
            data['var'] = var2

            return data
示例#9
0
def get_explorer_link(campaign, time_interval, range_interval, params):
    s = "http://larda.tropos.de/larda3/explorer/{}?interval={}-{}%2C{}-{}&params={}".format(
        campaign, h.dt_to_ts(time_interval[0]), h.dt_to_ts(time_interval[1]),
        *range_interval, ",".join(params))
    return s
示例#10
0
def wrapper(mat_data, **kwargs):
    """Wrap a larda container around .mat file data to use the pyLARDA.Transformations library .

    Args:
        mat_data (.mat) : Matlab .mat file datad

    Kwargs:
        var_name (string) : variable name in .mat file
        var_unit (string) : unit of the variable
        var_lims (list) : boundaries of the variable (for plotting)

    Return:
        container (dict) : sliced container
    """
    var_name = kwargs['var_name'] if 'var_name' in kwargs else ''
    var_unit = kwargs['var_unit'] if 'var_unit' in kwargs else ''

    assert isinstance(var_name, str) and len(var_name) > 0, 'Error utility.wrapper! Check var_name argument!'

    time_var, range_var, system = '', '', ''
    if 'ts_class_time' in mat_data.keys():  # set variable lists for cloudnet classification dict
        time_var, range_var = 'ts_class_time', 'h_class'
        system = 'clouetnet-classification'
    elif 'ts_cat_time' in mat_data.keys():  # set variable lists for cloudnet categorization dict
        time_var, range_var = 'ts_cat_time', 'h_cat'
        system = 'cloudnet-categorization'
    elif 'ts_polly_time' in mat_data.keys():  # set variable lists for PollyXT dict
        time_var, range_var = 'ts_polly_time', 'h_class'
    elif 'ts_NN_time' in mat_data.keys():  # set variable lists for predicted Ed Luke ANN prediction dict
        time_var, range_var = 'ts_NN_time', 'h_class'
    elif 'ts_sp_time' in mat_data.keys():  # set variable lists for Mira Doppler radar dict
        time_var, range_var = 'ts_sp_time', 'h_class'
        system = 'mira'
    elif 'ts_rs_time' in mat_data.keys():  # set variable lists for radiosonde dict
        time_var, range_var = 'ts_rs_time', 'h_rs'
        system = 'radio-sonde'

    if var_name == 'target_class_ts':
        name = 'CLASS'
        colormap = 'cloudnet_target_new'
    elif var_name == 'combi_liq_mask':
        system = 'ann-vs-cloudnet-cloud-droplet-mask'
        name = 'CLASS'
        colormap = 'four_colors'
    elif var_name == 'ldr_cc_ts':
        system = 'cloudnet-categorization'
        name = var_name
        colormap = 'LDR'
    else:
        name = var_name
        colormap = 'cloudnet_jet'

    dt_list = [datenum2datetime(dn) for dn in mat_data[time_var]]
    time = [h.dt_to_ts(dt) for dt in dt_list]
    if var_name == 'bsc_NN_ts' and 'var_converter' in kwargs and kwargs['var_converter'] == 'log':
        var = np.ma.masked_invalid(mat_data[var_name].T)
        var = np.ma.power(10.0, var)
    else:
        var = mat_data[var_name].T
    var_lims = kwargs['var_lims'] if 'var_lims' in kwargs else [np.nanmin(var), np.nanmax(var)]

    if len(var.shape) == 2:
        dimlabel = ['time', 'range']
    elif len(var.shape) == 1:
        dimlabel = ['time']
    else:
        raise ValueError('Variable must be 1D or 2D!')

    larda_container = {'dimlabel': dimlabel,
                       'filename': 'accept .mat file',
                       'paraminfo': {},
                       'rg_unit': 'm',
                       'colormap': colormap,
                       'var_unit': var_unit,
                       'var_lims': var_lims,
                       'system': system,
                       'name': name,
                       'rg': np.array(mat_data[range_var]),
                       'ts': np.array(time),
                       'mask': np.isnan(var),
                       'var': var}

    return larda_container
示例#11
0
day = 18
HH0 = 9
MM0 = 24

HH1 = 9
MM1 = 25

begin_dt = datetime.datetime(year, month, day, HH0, MM0, 0)
end_dt = datetime.datetime(year, month, day, HH1, MM1, 0)
plot_range = [0, 12000]

MIRA_Zspec = larda.read("MIRA", "Zspec", [begin_dt, end_dt], [0, 'max'])
LIMRAD_Zspec = larda.read("LIMRAD94", "VSpec", [begin_dt, end_dt], [0, 'max'])

print("slice time-range spectrogram")
interval = {
    'time': [h.dt_to_ts(begin_dt), h.dt_to_ts(end_dt)],
    'range': [6300, 6400]
}

spectrogram_LIMRAD = pyLARDA.Transformations.slice_container(LIMRAD_Zspec,
                                                             value=interval)
spectrogram_MIRA = pyLARDA.Transformations.slice_container(MIRA_Zspec,
                                                           value=interval)

name = 'plots/PNG/spectra_limrad_mira_'
fig, ax = pyLARDA.Transformations.plot_spectra(spectrogram_LIMRAD,
                                               spectrogram_MIRA,
                                               z_converter='lin2z',
                                               save=name)