コード例 #1
0
def make_hdf():
    datadir = path.join(home(), 'DATA', 'HYY_DenData')
    p200dir = path.join(datadir, 'Pluvio200')
    p400dir = path.join(datadir, 'Pluvio400')
    pipdir = path.join(datadir, 'PIP')
    psddir = path.join(pipdir, 'f_1_4_DSD_Tables_ascii')
    vdir = path.join(pipdir, 'f_2_2_Velocity_Tables')
    h5file = path.join(datadir, 'annakaisa15-18.h5')
    pluv_paths = dict(pluvio200=p200dir, pluvio400=p400dir)
    instr = dict()
    # read pluvio data
    for p in ['pluvio200', 'pluvio400']:
        fnames = glob(path.join(pluv_paths[p], '*.txt'))
        fnames.sort()
        pluv = pluvio.Pluvio(fnames, name=p)
        selection = pluv.data.i_rt.apply(lambda x: type(x) == float)
        pluv.data = pluv.data[selection].astype(float)
        instr[p] = pluv
    # read pip data
    vfiles = glob(path.join(vdir, '*', '*.dat'))
    vfiles.sort()
    psdfiles = glob(path.join(psddir, '*.dat'))
    psdfiles.sort()
    instr['vel'] = pip_v.PipV(vfiles)
    instr['dsd'] = pip_psd.PipPSD(psdfiles)
    for i in instr.values():
        i.to_hdf(filename=h5file)
コード例 #2
0
ファイル: scr_density18.py プロジェクト: juhi24/baecc
def make_hdf():
    datadir = path.join(home(), 'DATA', 'HYY_DenData')
    p200dir = path.join(datadir, 'Pluvio200')
    p400dir = path.join(datadir, 'Pluvio400')
    pipdir = path.join(datadir, 'PIP')
    psddir = path.join(pipdir, 'f_1_4_DSD_Tables_ascii')
    vdir = path.join(pipdir, 'f_2_2_Velocity_Tables')
    h5file = path.join(datadir, 'annakaisa15-18.h5')
    pluv_paths = dict(pluvio200=p200dir, pluvio400=p400dir)
    instr = dict()
    # read pluvio data
    for p in ['pluvio200', 'pluvio400']:
        fnames = glob(path.join(pluv_paths[p], '*.txt'))
        fnames.sort()
        pluv = pluvio.Pluvio(fnames, name=p)
        selection = pluv.data.i_rt.apply(lambda x: type(x)==float)
        pluv.data = pluv.data[selection].astype(float)
        instr[p] = pluv
    # read pip data
    vfiles = glob(path.join(vdir, '*', '*.dat'))
    vfiles.sort()
    psdfiles = glob(path.join(psddir, '*.dat'))
    psdfiles.sort()
    instr['vel'] = pip_v.PipV(vfiles)
    instr['dsd'] = pip_psd.PipPSD(psdfiles)
    for i in instr.values():
        i.to_hdf(filename=h5file)
コード例 #3
0
ファイル: case.py プロジェクト: zzwei1/radcomp
 def fr(self):
     """rime mass fraction"""
     t_end = self.t_end() + pd.Timedelta(minutes=15)
     hdfpath = path.join(home(), 'DATA', 'FR_haoran.h5')
     if not path.exists(hdfpath):
         return pd.Series()
     fr = pd.read_hdf(hdfpath, 'data')[self.t_start():t_end]
     return self.time_weighted_mean(fr, offset_half_delta=False)
コード例 #4
0
def load_pluvio(start=None, end=None, kind='400'):
    """Load Pluvio data from hdf5 database."""
    import baecc.instruments.pluvio as pl
    name = 'pluvio{}'.format(str(kind))
    hdfpath = path.join(home(), 'DATA', 'pluvio14-16.h5')
    data = pd.read_hdf(hdfpath, key=name)[start:end]
    pluv = pl.Pluvio(data=data, name=name)
    return pluv
コード例 #5
0
ファイル: case.py プロジェクト: zzwei1/radcomp
    def t_surface(self, use_arm=False, interp_gaps=True):
        """resampled ground temperature

        Returns:
            Series: resampled temperature
        """
        t_end = self.t_end() + pd.Timedelta(minutes=15)
        if use_arm:
            t = arm.var_in_timerange(self.t_start(), t_end, var='temp_mean')
        else:
            hdfpath = path.join(home(), 'DATA', 't_fmi_14-17.h5')
            if not path.exists(hdfpath):
                return pd.Series()
            t = pd.read_hdf(hdfpath, 'data')['TC'][self.t_start():t_end]
            t.name = 'temp_mean'
        tre = t.resample('15min', base=self.base_minute()).mean()
        if interp_gaps:
            tre = tre.interpolate()
        return tre
コード例 #6
0
ファイル: case.py プロジェクト: zzwei1/radcomp
import pandas as pd
import xarray as xr
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.io import loadmat

from radcomp.vertical import (filtering, classification, plotting, insitu, ml,
                              deriv, NAN_REPLACEMENT)
from radcomp import arm, azs
from radcomp.tools import strftime_date_range, cloudnet
from j24 import home, daterange2str

USE_LEGACY_DATA = False

if USE_LEGACY_DATA:
    DATA_DIR = path.join(home(), 'DATA', 'vprhi')
    DATA_FILE_FMT = '%Y%m%d_IKA_VP_from_RHI.mat'
else:
    DATA_DIR = path.join(home(), 'DATA', 'vprhi2')
    DATA_FILE_FMT = '%Y%m%d_IKA_vprhi.mat'
DEFAULT_PARAMS = ['zh', 'zdr', 'kdp']


def case_id_fmt(t_start,
                t_end=None,
                dtformat='{year}{month}{day}{hour}',
                day_fmt='%d',
                month_fmt='%m',
                year_fmt='%y',
                hour_fmt='T%H'):
    """daterange2str wrapper for date range based IDs"""
コード例 #7
0
ファイル: scr_plot_rhi.py プロジェクト: zzwei1/radcomp
# coding: utf-8

import pyart
import matplotlib.pyplot as plt
from os import path
from radcomp.vertical import RESULTS_DIR
from j24 import home

if __name__ == '__main__':
    #plt.close('all')
    rawfile = path.join(home(), 'DATA', '201402152010_IKA.RHI_HV.raw')
    outfile = path.join(RESULTS_DIR, 'rhi_sample.png')
    radar = pyart.io.read(rawfile)
    fig, ax = plt.subplots()
    display = pyart.graph.RadarDisplay(radar)
    display.plot('differential_reflectivity', vmin=-0.25, vmax=2)
    ax.set_ylim(bottom=0, top=10)
    ax.set_xlim(left=0, right=80)
    fig.savefig(outfile)
コード例 #8
0
# coding: utf-8

import numpy as np
import pandas as pd
from scipy.io import loadmat
from os import path
from j24 import home, mldatenum2datetime

fpath = path.join(home(), 'DATA', 'FMI_meteo_data_2014_2017.mat')
hdfpath = path.join(home(), 'DATA', 't_fmi_14-17.h5')
data = loadmat(fpath)['meteo_fmi']


class TFMI:
    """FMI temperature mat struct"""
    def __init__(self, data):
        self.data = data  # struct from mat

    def get(self, param):
        return data[param][0][0].flatten()

    def time(self):
        return np.array(list(map(mldatenum2datetime, self.get('ObsTime'))))

    def fields(self):
        return list(self.data[0].dtype.fields)

    def to_dataframe(self):
        f = self.fields()  # copy
        f.remove('ObsTime')
        index = self.time()
コード例 #9
0
ファイル: scr_density.py プロジェクト: juhi24/baecc
# -*- coding: utf-8 -*-
"""
@author: Jussi Tiira
"""
import matplotlib.pyplot as plt
from os import path
from j24 import home, ensure_join

from scr_snowfall import pip2015events

#plt.close('all')
plt.ion()

basepath = ensure_join(home(),'results','pip2015','density')
dtfmt = '%Y%m%d'

e = pip2015events()

rho_label = 'bulk density (kg m$^{-3}$)'
t_label = 'time'

for c in e.events.paper.values:
    savepath = basepath
    rho = c.density()
    rho.to_csv(path.join(savepath, c.dtstr(dtfmt) + '.csv'))
    c.instr['pluvio'].tdelta().to_csv(path.join(savepath, 'timedelta_' + c.dtstr(dtfmt) + '.csv'))
    plt.figure(dpi=120)
    rho.plot(drawstyle='steps')
    plt.title(c.dtstr())
    plt.xlabel(t_label)
    plt.ylabel(rho_label)
コード例 #10
0
ファイル: arm.py プロジェクト: zzwei1/radcomp
# coding: utf-8
import numpy as np
import netCDF4 as nc
import pandas as pd
from glob import glob
from datetime import datetime
from os import path
from j24 import home

arm_dir = path.join(home(), 'DATA', 'arm')
SOUNDING_DIR = path.join(arm_dir, 'sounding')
GROUND_DIR = path.join(arm_dir, 'ground')
MWR_DIR = path.join(arm_dir, 'MWR')
all_soundings_f = 'tmpsondewnpnM1.b1.20140121.125200..20140330.172000.custom.cdf'
sounding_f = 'tmpsondewnpnM1.b1.20140131.115000.cdf'  # sample
all_soundings_path = path.join(SOUNDING_DIR, all_soundings_f)
sounding_path = path.join(SOUNDING_DIR, sounding_f)
SOUNDING_GLOB = path.join(SOUNDING_DIR,
                          'tmpsondewnpnM1.b1.20??????.??????.cdf')
GROUND_GLOB = path.join(GROUND_DIR, 'tmpmetM1.b1.20??????.??????.cdf')
MWR_GLOB = path.join(MWR_DIR, '*.cdf')
#s = nc.Dataset(soundings_f)


def time(ncdata, as_np=False):
    """time from ARM netCDF"""
    t0 = ncdata.variables['base_time'][0]
    if as_np:
        return np.array(t0 + ncdata.variables['time_offset'][:]).astype(
            'datetime64[s]')
    return pd.to_datetime(t0 + ncdata.variables['time_offset'][:], unit='s')
コード例 #11
0
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import clustering as cl
import sonde
from os import path
from sklearn.cluster import KMeans
from j24 import home, learn
import datetime


np.random.seed(42)
plt.ion()
plt.close('all')

datadir = path.join(home(), 'DATA', 'pangaea', 'sonde')
storage_path = path.join(datadir, '96-16prep.h5')
#data = sonde.read_all_data(datadir)
#data.to_hdf(storage_path, 'data')
data = pd.read_hdf(storage_path, 'data')
#times = sonde.launch_times(data)

ww = cl.wind4clus(data)
tt = cl.t4clus(data, col='t')
hh = cl.t4clus(data, col='rh')
clus_vars = (ww, tt, hh)
km = KMeans(init='k-means++', n_clusters=6, n_init=40, n_jobs=-1)
wtr, isplit = cl.concat(clus_vars)
classes = learn.fit_predict(wtr, km)
cen = learn.centroids(wtr, km)
cw, ct, ch = cl.split(cen, isplit)
コード例 #12
0
# coding: utf-8
import os
import warnings
import pickle
import hashlib
import pandas as pd
from j24 import home, ensure_dir

MSGTLD = '.msg'
PICKLETLD = '.pkl'
CACHE_DIR = os.path.join(home(), '.cache', 'baecc')


def fingerprint(string):
    return hashlib.sha256(string.encode('utf-8')).hexdigest()[-12:]


def hash_dict(d):
    return fingerprint(str(sorted(d.items())))


def combine2str(*identifiers):
    return ''.join(tuple(map(str, identifiers)))


class Cacher:
    """common methods to use msg cache"""
    def __init__(self, use_cache=True, storefilename='store.h5', parent=None):
        self.use_cache = use_cache
        self.storefilename = storefilename
        self.parent = parent
コード例 #13
0
ファイル: debug.py プロジェクト: fagan2888/fmio-server
    border = basemap.border()

    x0 = 1.1e5
    y0 = 6.55e6
    x1 = 6.5e5
    y1 = 7e6
    #urls = fmi.available_maps(**t_range)
    fakeindex = pd.DatetimeIndex(freq='5min',
                                 start=t_range['starttime'],
                                 end=t_range['endtime'])
    urls = pd.Series(index=fakeindex)  # fake urls
    #url = fmi.gen_url(timestamp='2017-10-17T07:00:00Z')
    dl = fmi.available_maps().tail(2)
    #fmi.download_maps(urls)
    paths = fmi.download_maps(dl)
    savedir = ensure_join(home(), 'results', 'sataako')

    ### FORECAST AND SAVE LOGIC ###
    rads = paths.apply(rasterio.open)
    crops, tr, meta = raster.crop_rasters(rads, **raster.DEFAULT_CORNERS)
    dtype = meta['dtype']
    rad_crs = rads.iloc[0].read_crs().data
    rads.apply(lambda x: x.close())
    rr = raster.raw2rr(crops)
    fcast = forecast.forecast(rr)
    savepaths = fcast.copy()
    pngpaths = fcast.copy()
    for t, fc in fcast.iteritems():
        savepath = path.join(savedir, t.strftime(fmi.FNAME_FORMAT))
        savepaths.loc[t] = savepath
        raster.write_rr_geotiff(fc, meta, savepath)
コード例 #14
0
ファイル: sounding.py プロジェクト: zzwei1/radcomp
# coding: utf-8

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from os import path
from datetime import timedelta
from j24 import home, ensure_join

CACHE_DIR = ensure_join(home(), '.pysonde', 'cache')
CACHE_KEY_FMT = 'wyo%Y%m%d%H'


def round_hours(timestamp, hres=12):
    """round timestamp to hres hours"""
    tt = timestamp + timedelta(hours=hres / 2)
    dt = timedelta(hours=tt.hour % hres, minutes=tt.minute, seconds=tt.second)
    return tt - dt


def sounding_url(t, dtype='text'):
    out_type = dict(pdf='PDF%3ASTUVE', text='TEXT%3ASIMPLE')
    baseurl = 'http://weather.uwyo.edu/cgi-bin/sounding'
    query = '?region=europe&TYPE={type}&YEAR={year}&MONTH={month:02d}&FROM={day:02d}{hour:02d}&TO={day:02d}{hour:02d}&STNM=02963'
    urlformat = baseurl + query
    return urlformat.format(type=out_type[dtype],
                            year=t.year,
                            month=t.month,
                            day=t.day,
                            hour=t.hour)
コード例 #15
0
#import numpy as np
#import pandas as pd
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
import radcomp.visualization as vis
from matplotlib import gridspec
from os import path
from scipy.io import loadmat
from j24 import home
from radcomp.qpe.radar import RADARS

I_RADAR = dict(ker=0, kum=1, van=2, com=3)
NAMES = dict(ker='KER', kum='KUM', van='VAN', com='Composite')
RADAR = dict(ker=RADARS['KER'], kum=RADARS['KUM'], van=RADARS['VAN'])
RESULTSDIR = path.join(home(), 'results', 'radcomp', 'brandon')
PROJECTION = ccrs.Orthographic(central_longitude=25, central_latitude=60)


def datalist4radar(i_radar, data, param='kdp'):
    dat = data['{}_three_radars'.format(param)]
    n_timesteps = dat.shape()[3]
    return [dat[:, :, i_radar, i] for i in range(n_timesteps)]


def datalist4timestep(i_timestep, data, param='kdp'):
    if param=='r':
        sitestrs = ['ker', 'kum', 'van', 'c']
        return [data['rain_{}'.format(site)][:, :, i_timestep] for site in sitestrs]
    dat = data['{}_three_radars'.format(param)]
    n_radars = dat.shape[2]
コード例 #16
0
ファイル: caching.py プロジェクト: juhi24/baecc
# coding: utf-8
import os
import warnings
import pickle
import hashlib
import pandas as pd
from j24 import home, ensure_dir

MSGTLD = '.msg'
PICKLETLD = '.pkl'
CACHE_DIR = os.path.join(home(), '.cache', 'baecc')


def fingerprint(string):
    return hashlib.sha256(string.encode('utf-8')).hexdigest()[-12:]


def hash_dict(d):
    return fingerprint(str(sorted(d.items())))


def combine2str(*identifiers):
    return ''.join(tuple(map(str, identifiers)))


class Cacher:
    """common methods to use msg cache"""
    def __init__(self, use_cache=True, storefilename='store.h5',
                 parent=None):
        self.use_cache = use_cache
        self.storefilename = storefilename
コード例 #17
0
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals

__metaclass__ = type

import pandas as pd
from os import path
from glob import glob
from baecc.instruments import pluvio, pip_psd, pip_v
from j24 import home, ensure_join

RESULTS_DIR = ensure_join(home(), 'results', 'density18')


def make_hdf():
    datadir = path.join(home(), 'DATA', 'HYY_DenData')
    p200dir = path.join(datadir, 'Pluvio200')
    p400dir = path.join(datadir, 'Pluvio400')
    pipdir = path.join(datadir, 'PIP')
    psddir = path.join(pipdir, 'f_1_4_DSD_Tables_ascii')
    vdir = path.join(pipdir, 'f_2_2_Velocity_Tables')
    h5file = path.join(datadir, 'annakaisa15-18.h5')
    pluv_paths = dict(pluvio200=p200dir, pluvio400=p400dir)
    instr = dict()
    # read pluvio data
    for p in ['pluvio200', 'pluvio400']:
        fnames = glob(path.join(pluv_paths[p], '*.txt'))
        fnames.sort()
        pluv = pluvio.Pluvio(fnames, name=p)
        selection = pluv.data.i_rt.apply(lambda x: type(x) == float)
        pluv.data = pluv.data[selection].astype(float)
コード例 #18
0
ファイル: scr_riming.py プロジェクト: zzwei1/radcomp
# coding: utf-8

import pandas as pd
from glob import glob
from datetime import datetime, timedelta
from os import path
from scipy.io import loadmat
from j24 import home

DATAPATH = path.join(home(), 'DATA')

hdfpath = path.join(DATAPATH, 'FR_haoran.h5')


def path_dstr(fpath, start_char=4):
    fname = path.basename(fpath)
    return fname[start_char:start_char + 8]


def parse_date_from_fpath(fpath, **kws):
    dstr = path_dstr(fpath, **kws)
    return datetime.strptime(dstr, '%Y%m%d')


def times_from_file(fpath, **kws):
    date = parse_date_from_fpath(fpath, **kws)
    data = loadmat(fpath)
    h = data['PIP_time'].flatten()
    return list(map(lambda hh: date + timedelta(hours=hh), h))

コード例 #19
0
ファイル: scr_density18.py プロジェクト: juhi24/baecc
# coding: utf-8
from __future__ import absolute_import, division, print_function, unicode_literals
__metaclass__ = type

import pandas as pd
from os import path
from glob import glob
from baecc.instruments import pluvio, pip_psd, pip_v
from j24 import home, ensure_join


RESULTS_DIR = ensure_join(home(), 'results', 'density18')


def make_hdf():
    datadir = path.join(home(), 'DATA', 'HYY_DenData')
    p200dir = path.join(datadir, 'Pluvio200')
    p400dir = path.join(datadir, 'Pluvio400')
    pipdir = path.join(datadir, 'PIP')
    psddir = path.join(pipdir, 'f_1_4_DSD_Tables_ascii')
    vdir = path.join(pipdir, 'f_2_2_Velocity_Tables')
    h5file = path.join(datadir, 'annakaisa15-18.h5')
    pluv_paths = dict(pluvio200=p200dir, pluvio400=p400dir)
    instr = dict()
    # read pluvio data
    for p in ['pluvio200', 'pluvio400']:
        fnames = glob(path.join(pluv_paths[p], '*.txt'))
        fnames.sort()
        pluv = pluvio.Pluvio(fnames, name=p)
        selection = pluv.data.i_rt.apply(lambda x: type(x)==float)
        pluv.data = pluv.data[selection].astype(float)
コード例 #20
0
# coding: utf-8

import pandas as pd
from glob import glob
from os import path
from scipy.io import loadmat
from j24 import home, mldatenum2datetime

datadir = path.join(home(), 'DATA', 'BAECC_1308_AVL')
dataset = set(glob(path.join(datadir, 'Snow_*.mat')))
P200SET = set(glob(path.join(datadir, 'Snow_*PL200.mat')))
P400SET = dataset - P200SET


def mat_data(filename):
    dat = loadmat(filename)
    ind = next(i for i, x in enumerate(dat.keys()) if 'Snow_' in x)
    data = list(dat.values())[ind][0]
    keys = data.dtype.names
    values = data[0]
    return dict(zip(keys, values))


def mat2series(filename, key='azs'):
    data = mat_data(filename)
    values = data[key].flatten()
    t = list(map(mldatenum2datetime, data['time'].flatten()))
    series = pd.Series(data=values, index=t)
    series.name = key
    return series
コード例 #21
0
ファイル: __init__.py プロジェクト: juhi24/baecc
# coding: utf-8
import locale
from os import path
from j24 import home

locale.setlocale(locale.LC_ALL, 'C')

# general configuration
DEBUG = False
CGS_UNITS = True # display units in cgs instead of SI

# CONFIG default paths
HOME = home()
DATA_DIR = path.join(HOME, 'DATA')
USER_DIR = path.join(HOME, '.baecc')
RESULTS_DIR = path.join(HOME, 'results')
H5_PATH = path.join(DATA_DIR, 'baecc.h5')

# constants
if CGS_UNITS:
    RHO_SCALE = 1e-3
    RHO_UNITS = 'g$\,$cm$^{-3}$'
else:
    RHO_SCALE = 1
    RHO_UNITS = 'kg$\,$m$^{-3}$'


from baecc.tools import merge_series, merge_multiseries
from baecc import events
コード例 #22
0
ファイル: __init__.py プロジェクト: juhi24/baecc
# coding: utf-8
import locale
from os import path
from j24 import home

locale.setlocale(locale.LC_ALL, 'C')

# general configuration
DEBUG = False
CGS_UNITS = True  # display units in cgs instead of SI

# CONFIG default paths
HOME = home()
DATA_DIR = path.join(HOME, 'DATA')
USER_DIR = path.join(HOME, '.baecc')
RESULTS_DIR = path.join(HOME, 'results')
H5_PATH = path.join(DATA_DIR, 'baecc.h5')

# constants
if CGS_UNITS:
    RHO_SCALE = 1e-3
    RHO_UNITS = 'g$\,$cm$^{-3}$'
else:
    RHO_SCALE = 1
    RHO_UNITS = 'kg$\,$m$^{-3}$'

from baecc.tools import merge_series, merge_multiseries
from baecc import events