示例#1
0
def make_strictly_monotonic_cdf(x, cdf):
    ''' Ensure CDF is monotonic, so there are no duplicate x values,
    and only a single value of y is kept at either end of the CDF.
    .. Note:: Doesn't check rest of cdf is monotonic.

    >>> x = np.arange(20)
    >>> mu = 10.
    >>> sigma = 2.
    >>> cdf = get_cdf(x, mu, sigma)
    >>> new_x, new_cdf = make_strictly_monotonic_cdf(x,cdf)

    Does not change answers if already OK.

    >>> np.set_printoptions(formatter={'float':lambda x: '{:5.2f}'.format(x)})
    >>> print(cdf - new_cdf)
    [ 0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00
      0.00  0.00  0.00  0.00  0.00  0.00  0.00  0.00]

    If duplicated x or y values:

    >>> x = [ 2., 2., 3., 4., 5., 6., 7., 8., 9.]
    >>> cdf = [0., 0.1, 0.3, 0.5, 0.7,0.99, 1., 1., 1.]
    >>> new_x, new_cdf = make_strictly_monotonic_cdf(x, cdf)
    >>> print(new_x)
    [2.0, 3.0, 4.0, 5.0, 6.0, 7.0]
    >>> print(new_cdf)
    [0.0, 0.3, 0.5, 0.7, 0.99, 1.0]
    >>> np.set_printoptions()
    '''

    imax = len(x)-1
    imin = 0
    #Ensure that stop cdf as soon as reach a y value of 1,
    #or x values are the same.
    for i in range(len(x)-1, 0, -1):
        if cdf[i-1] < 1 and (x[i] != x[i-1]):
            imax = i
            break
    #Only keep first value of y at zero, and no duplicate x values at start
    for i in range(len(x)-1):
        if cdf[i+1] > 0 and (x[i] != x[i+1]):
            imin = i
            break

    x_new = x[imin:imax+1]
    cdf_new = cdf[imin:imax+1]

    #Check cdf is monotonicly increasing in middle and fix any problems:
    cdf_new = make_strictly_increasing(cdf_new, x_new)
    cdf_new[0] = min(cdf) #Ensure min cdf value is kept
    cdf_new[-1] = max(cdf) #Ensure max cdf value is kept

    #Check x is monotonic and fix any problems:
    x_new = make_strictly_increasing(x_new, cdf_new)

    return x_new, cdf_new
示例#2
0
 def _getWindDirectionColumn(self, row):
     c = 0
     for i in range(len(row)):
         m = self._directionMatch(row[i])
         if m:
             return c
         c += 1
     raise AssertionError('Incorrect Format')
示例#3
0
def add_regime_coord(cube, regime_data, data_dims=1):
    """
    Add weather regime as an auxiliary coordinate to the input cube.

    :param regime_data: numpy 2D array of dates and weather regimes.
    :param data_dims: Must point at the time coordinate of the cube.
                      Typically found at 1 for sites_cube_list, 0 for
                      gridded_cube_list.

    >>> import config
    >>> import adaq_functions
    >>> sample_data_path = config.SAMPLE_DATADIR+'weather_regimes/'
    >>> wr = sample_data_path + 'daily_30regimes_since2010.txt'
    >>> regime_dates = read_regime_txt(wr) # doctest: +ELLIPSIS
    Getting regime data from  .../daily_30regimes_since2010.txt
    >>> ini_data, sites_data, od, md_list = adaq_functions.get_exampledata()
    ... # doctest: +ELLIPSIS
    Reading inifile ...example_data_1days.ini
    Number of sites:  5
    >>> for cube in od.sites_cube_list:
    ...     cube = add_regime_coord(cube, regime_dates)
    >>> for coord in od.sites_cube_list[0].coords():
    ...     print(coord.name())
    site_id
    time
    abbrev
    latitude
    longitude
    site_altitude
    site_name
    site_type
    regime
    >>> for md in md_list:
    ...     for cube in md.sites_cube_list:
    ...         cube = add_regime_coord(cube, regime_dates)
    >>> scl0 = md_list[0].sites_cube_list[0]
    >>> names = [str(coord.name()) for coord in scl0.coords()]
    >>> print('regime' in  names)
    True
    >>> print(len(scl0.coord('time').points) == len(scl0.coord('regime').points))
    True
    """

    #Get cube's datetime points
    dtpoints = cube_time.cube_tpoints_dt(cube)

    regime_list = []
    #Add regime number to list where date in cube matches the regime date
    for point in dtpoints:
        for i in range(len(regime_data)):
            if datetime.date(point) == regime_data[i, 0]:
                regime_list.append(regime_data[i, 1])

    #Add list of regimes to cube as a coordinate
    cube.add_aux_coord(iris.coords.AuxCoord(regime_list, long_name='regime'),
                       data_dims)

    return cube
示例#4
0
def plot_regime_timeseries(cube, plotdir='./'):
    """
    Produce time series plot illustrating the daily weather regime
    classification.

    :param cube: Iris cube, which has a 'regime' coordinate and a 'time'
                 coordinate.
    :param plotdir: Directory to save resulting plot in.

    :return: :class:`timeseries_plot.TimeSeriesPlot` object

    >>> import config
    >>> wr = config.SAMPLE_DATADIR + \
    'weather_regimes/daily_30regimes_since2010.txt'
    >>> regime_dates = read_regime_txt(wr) # doctest: +ELLIPSIS
    Getting regime data from  .../daily_30regimes_since2010.txt

    Load an example sites cube:

    >>> data_path = config.SAMPLE_DATADIR+'sites_cube_list/'
    >>> cube = iris.load_cube(data_path + 'aurn_5days.nc',
    ... iris.AttributeConstraint(short_name='PM2p5'))

    Give it a regime coordinate:

    >>> cube = add_regime_coord(cube, regime_dates, 1)

    Plot this as a time-series for the first site

    >>> plotdir = config.CODE_DIR + "/adaqdocs/figures"
    >>> tsp = plot_regime_timeseries(cube[0], plotdir=plotdir)
    ... # doctest: +ELLIPSIS
    Plotting regime time-series
    Saved figure  .../Regime_Timeseries.png

    .. image:: ../adaqdocs/figures/Regime_Timeseries.png
       :scale: 50%

    """

    print('Plotting regime time-series')

    #Set up time-series-plot
    tsp = timeseries_plot.TimeSeriesPlot()

    tsp.add_line(cube, y=cube.coord('regime'), colour='teal')

    #Create a plot and save
    tsp.title = 'Daily Weather Regime Classification'
    tsp.ylabel = 'Weather Regime'
    tsp.legend = False
    tsp.plot()
    ax = plt.gca()
    ax.set_yticks(list(range(2, 31, 2)))
    tsp.save_fig(plotdir=plotdir, filename='Regime_Timeseries.png')

    return tsp
示例#5
0
def test_to_ice_data_one_sample(X, x_s):
    n_cols = X.shape[1]
    columns = ['x{}'.format(i) for i in range(n_cols)]
    data = pd.DataFrame(X, columns=list(columns))

    ice_data = ice.to_ice_data(data, 'x1', x_s)
    ice_data_expected_values = np.repeat(X, x_s.size, axis=0)
    ice_data_expected_values[:, 1] = x_s
    ice_data_expected = pd.DataFrame(ice_data_expected_values, columns=columns)

    assert compare_with_NaN(ice_data, ice_data_expected).all().all()
示例#6
0
def trim_zeros(L):
    r"""
    Strips trailing zeros/empty lists from a list.

    EXAMPLES::

        sage: from sage.rings.padics.misc import trim_zeros
        sage: trim_zeros([1,0,1,0])
        [1, 0, 1]
        sage: trim_zeros([[1],[],[2],[],[]])
        [[1], [], [2]]
        sage: trim_zeros([[],[]])
        []
        sage: trim_zeros([])
        []

    Zeros are also trimmed from nested lists (one deep):

        sage: trim_zeros([[1,0]])
        [[1]]
        sage: trim_zeros([[0],[1]])
        [[], [1]]
    """
    strip_trailing = True
    n = len(L)
    for i, c in zip(reversed(range(len(L))), reversed(L)):
        if strip_trailing and (c == 0 or c == []):
            n = i
        elif isinstance(c, list):
            strip_trailing = False
            m = len(c)
            # strip trailing zeros from the sublists
            for j, d in zip(reversed(range(len(c))), reversed(c)):
                if d == 0:
                    m = j
                else:
                    break
            L[i] = c[:m]
        else:
            break
    return L[:n]
示例#7
0
def trim_zeros(L):
    r"""
    Strips trailing zeros/empty lists from a list.

    EXAMPLES::

        sage: from sage.rings.padics.misc import trim_zeros
        sage: trim_zeros([1,0,1,0])
        [1, 0, 1]
        sage: trim_zeros([[1],[],[2],[],[]])
        [[1], [], [2]]
        sage: trim_zeros([[],[]])
        []
        sage: trim_zeros([])
        []

    Zeros are also trimmed from nested lists (one deep):

        sage: trim_zeros([[1,0]])
        [[1]]
        sage: trim_zeros([[0],[1]])
        [[], [1]]
    """
    strip_trailing = True
    n = len(L)
    for i, c in zip(reversed(range(len(L))), reversed(L)):
        if strip_trailing and (c == 0 or c == []):
            n = i
        elif isinstance(c, list):
            strip_trailing = False
            m = len(c)
            # strip trailing zeros from the sublists
            for j, d in zip(reversed(range(len(c))), reversed(c)):
                if d == 0:
                    m = j
                else:
                    break
            L[i] = c[:m]
        else:
            break
    return L[:n]
示例#8
0
def test_to_ice_data_one_test_point(l, x_s):
    X = np.array(l)
    n_cols = X.shape[1]
    columns = ['x{}'.format(i) for i in range(n_cols)]
    data = pd.DataFrame(X, columns=columns)
    x_s = np.array(x_s)

    ice_data = ice.to_ice_data(data, 'x0', x_s)
    ice_data_expected_values = X.copy()
    ice_data_expected_values[:, 0] = x_s
    ice_data_expected = pd.DataFrame(ice_data_expected_values, columns=columns)

    assert compare_with_NaN(ice_data, ice_data_expected).all().all()
def _cook_slots(period, increment):
    """
        Prepare slots to be displayed on the left hand side
        calculate dimensions (in px) for each slot.
        Arguments:
        period - time period for the whole series
        increment - slot size in minutes
    """
    tdiff = datetime.timedelta(minutes=increment)
    num = int((period.end - period.start).total_seconds()) \
        // int(tdiff.total_seconds())
    s = period.start
    slots = []
    for i in range(num):
        sl = period.get_time_slot(s, s + tdiff)
        slots.append(sl)
        s = s + tdiff
    return slots
def _cook_slots(period, increment):
    """
        Prepare slots to be displayed on the left hand side
        calculate dimensions (in px) for each slot.
        Arguments:
        period - time period for the whole series
        increment - slot size in minutes
    """
    tdiff = datetime.timedelta(minutes=increment)
    num = int((period.end - period.start).total_seconds()) \
        // int(tdiff.total_seconds())
    s = period.start
    slots = []
    for i in range(num):
        sl = period.get_time_slot(s, s + tdiff)
        slots.append(sl)
        s = s + tdiff
    return slots
def calc_forecast_periods(forecast_day, runtime, day_start_hour=None):
    """
    Basic function to return a list of forecast_periods (leadtimes) for each
    forecast day given the model runtime. Note Day 1 is the first full day of a
    forecast.

    :param forecast_day: Forecast day required
    :param runtime: Hour of day at which forecast run starts
                    (usually 18, 0, or 12)
    :param day_start_hour: Hour of the day to refer to as the start of a day.
                           By default this is set to 1 (1Z, so a day is
                           01Z - 24Z), or 12 (for 12Z runtimes,
                           so a day is 13Z - 12Z).

    For example the forecast ranges for Day 1 from an 18Z model:

    >>> print(calc_forecast_periods(1, 18))
    [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, \
26, 27, 28, 29, 30]

    """
    #Set up hour of day to refer to as the start of a day
    if day_start_hour is None:
        day_start_hour = 1
        if runtime == 12:
            day_start_hour = 13

    #Number of days since first day_start_hour:
    ndays_after_start_of_first_day = int(forecast_day) - 1
    #Number of hours since first day_start_hour:
    nhrs_after_start_of_first_day = ndays_after_start_of_first_day*24
    #Number of hours from runtime to first day_start_hour:
    nhrs_to_start_of_day = (day_start_hour - runtime)%24
    #Number of hours since runtime at start of forecast_day:
    nhrs_to_start_of_fcst_day = nhrs_to_start_of_day + \
                                nhrs_after_start_of_first_day
    #A day is 24 hours:
    forecast_periods = list(range(nhrs_to_start_of_fcst_day,
                                  nhrs_to_start_of_fcst_day+24))

    return forecast_periods
示例#12
0
def _cook_slots(period, increment, width, height):
    """
        Prepare slots to be displayed on the left hand side
        calculate dimensions (in px) for each slot.
        Arguments:
        period - time period for the whole series
        increment - slot size in minutes
        width - width of the slot column (px)
        height - height of the table (px)
    """
    tdiff = datetime.timedelta(minutes=increment)
    if (period.end - period.start).seconds:
        num = (period.end - period.start).seconds // tdiff.seconds
    else:
        num = 24  # hours in a day
    s = period.start
    slots = []
    for i in range(num):
        sl = period.get_time_slot(s, s + tdiff)
        sl.top = height // num * i
        sl.height = height // num
        slots.append(sl)
        s = s + tdiff
    return slots
示例#13
0
def _cook_slots(period, increment, width, height):
    """
        Prepare slots to be displayed on the left hand side
        calculate dimensions (in px) for each slot.
        Arguments:
        period - time period for the whole series
        increment - slot size in minutes
        width - width of the slot column (px)
        height - height of the table (px)
    """
    tdiff = datetime.timedelta(minutes=increment)
    if (period.end - period.start).seconds:
        num = (period.end - period.start).seconds // tdiff.seconds
    else:
        num = 24  # hours in a day
    s = period.start
    slots = []
    for i in range(num):
        sl = period.get_time_slot(s, s + tdiff)
        sl.top = height // num * i
        sl.height = height // num
        slots.append(sl)
        s = s + tdiff
    return slots
示例#14
0
import datetime
import calendar as standardlib_calendar

from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.template.defaultfilters import date as date_filter
from django.utils.dates import WEEKDAYS, WEEKDAYS_ABBR
from events.settings import FIRST_DAY_OF_WEEK, SHOW_CANCELLED_OCCURRENCES
from events.models import Occurrence
from django.utils import timezone

weekday_names = []
weekday_abbrs = []
if FIRST_DAY_OF_WEEK == 1:
    # The calendar week starts on Monday
    for i in range(7):
        weekday_names.append(WEEKDAYS[i])
        weekday_abbrs.append(WEEKDAYS_ABBR[i])
else:
    # The calendar week starts on Sunday, not Monday
    weekday_names.append(WEEKDAYS[6])
    weekday_abbrs.append(WEEKDAYS_ABBR[6])
    for i in range(6):
        weekday_names.append(WEEKDAYS[i])
        weekday_abbrs.append(WEEKDAYS_ABBR[i])


class Period(object):
    '''
    This class represents a period of time. It can return a set of occurrences
    based on its events, and its time period (start and end).
示例#15
0
    def get_fileinfo(self, filename):
        """
        Get information from filename such as start and endtime and
        forecast_day, returns information in a dictionary.

        There are four formats of filename currently supported:

          * <model>_yyyymmddhh+hhh.nc, eg HRES_ENS_2014032600+012.nc
            These are distinguished by the '+' in the filename
            Each of these files are assumed to contain a single time.
            Each file contains multiple species.
          * <model>_<species>_hhH_hhH_yyyymmddHHMM000000.nc
            These are distinguished by not having '+' in the filename
            Each of these files contain 24 (or 25) hours, with the forecast
            leadtimes covered being given by hhH_hhH.
            Each file contains a single species.
          * <model(part1)>_<model(part2)>_<height>_yyyymmdd.nc
            These are similar to the last format, but all species are in the
            same file. These are as used for hindcast verification.
          * W_fr-meteofrance,MODEL,<modelname>+<datatype>+<levels>+<species>+
            <forecastperiod>_C_LFPW_yyyymmddHHMMSS.nc'.
            Each file contains a single species for a 24 or 25 hour period.
            Where <modelname> is the model name - ENSEMBLE or other CAMS member,
            eg CHIMERE; <datatype> is usually FORECAST (or possibly ANALYSIS
            - not tested); <levels> is SURFACE or ALLLEVELS, <species> is
            O3, CO, NO2, SO2, PM25, PM10, PANS, NMVOC, NO, NH3, or BIRCHPOLLEN;
            <forecastperiod> is 0H24H ,25H48H ,49H72H or 73H96.
            For full information about this, see
            http://www.regional.atmosphere.copernicus.eu/doc/\
Guide_Numerical_Data_CAMS_new.pdf



        Assumes forecast day 1 runs from 01Z-24Z.

        >>> maccens = MaccEnsData()
        >>> filename = '/path/to/data/HRES_ENS_2014032600+008.nc'
        >>> fileinfo = maccens.get_fileinfo(filename)
        >>> for key in sorted(fileinfo):
        ...     print(key, fileinfo[key])
        data_end 2014-03-26 08:00:00
        data_start 2014-03-26 08:00:00
        day1_date 2014-03-26
        forecast_days [1]
        forecast_periods [8]
        model HRES_ENS

        >>> filename = ('/path/to/data/ENSEMBLE_FORECAST_SURFACE_O3'
        ... '_25H_48H_201606140000000000.nc')
        >>> fileinfo = maccens.get_fileinfo(filename)
        >>> for key in sorted(fileinfo):
        ...     print(key, fileinfo[key])
        data_end 2016-06-16 00:00:00
        data_start 2016-06-15 01:00:00
        day1_date 2016-06-14
        forecast_days [2]
        forecast_periods [25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, \
37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48]
        model ENSEMBLE
        short_name O3

        >>> filename = ('/path/to/data/HRES_ENS_SFC_20150617.nc')
        >>> fileinfo = maccens.get_fileinfo(filename)
        >>> for key in sorted(fileinfo):
        ...     print(key, fileinfo[key])
        data_end 2015-06-18 00:00:00
        data_start 2015-06-17 00:00:00
        day1_date 2015-06-17
        forecast_days [0, 1]
        forecast_periods [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, \
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
        model HRES_ENS

        >>> filename = ('/path/to/data/W_fr-meteofrance,MODEL,ENSEMBLE+FORECAST'
        ... '+SURFACE+O3+0H24H_C_LFPW_20160914000000.nc')
        >>> fileinfo = maccens.get_fileinfo(filename)
        >>> for key in sorted(fileinfo):
        ...     print(key, fileinfo[key])
        data_end 2016-09-15 00:00:00
        data_start 2016-09-14 00:00:00
        day1_date 2016-09-14
        forecast_days [0, 1]
        forecast_periods [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, \
13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
        model ENSEMBLE
        short_name O3


        """

        fileinfo = {}
        #Remove path if attached
        filename = os.path.basename(filename)
        split_file = filename.split('_')

        dt_string = split_file[-1][:-3]

        #In CAMS formatted files as opposed to older MACC data?
        cams_format = False

        if split_file[1].split(',')[0] == 'fr-meteofrance':
            #W_fr-meteofrance,MODEL,ENSEMBLE+FORECAST+SURFACE+O3+0H24H
            #_C_LFPW_20160914000000.nc
            #For more info, see
            #http://www.regional.atmosphere.copernicus.eu/doc/
            #Guide_Numerical_Data_CAMS_new.pdf
            cams_format = True
            ref_time_dt = datetime.datetime.strptime(dt_string, "%Y%m%d%H%M%S")
            comma_split = split_file[1].split(',')
            composite = comma_split[2]
            composite_split = composite.split('+')
            leadtime_start = int(composite_split[4].split('H')[0])
            leadtime_end = int(composite_split[4].split('H')[1])

            fileinfo['model'] = composite_split[0]
            fileinfo['short_name'] = composite_split[3]
            if fileinfo['short_name'] == 'PM25':
                fileinfo['short_name'] = 'PM2p5'

        elif '+' in dt_string:
            #Old format MACC_V2014
            #HRES_ENS_2014032600+008.nc'
            ref_time_dt = datetime.datetime.strptime(
                dt_string.split('+')[0], "%Y%m%d%H")
            leadtime = int(dt_string.split('+')[1])
            leadtime_delta = datetime.timedelta(hours=leadtime)

            fileinfo['data_start'] = ref_time_dt + leadtime_delta
            #As only one hour in file, data_end is the same as data_start:
            fileinfo['data_end'] = fileinfo['data_start']
            fileinfo['forecast_periods'] = [leadtime]
            forecast_day = 1 + (fileinfo['data_start'] -
                                ref_time_dt.replace(hour=1)).days
            fileinfo['forecast_days'] = [forecast_day]
            fileinfo['model'] = '_'.join(split_file[:-1])

        elif len(dt_string) == 8:
            #CAMS hindcast format <model>_<levels>_yyyymmdd.nc
            cams_format = True
            ref_time_dt = datetime.datetime.strptime(dt_string, "%Y%m%d")
            #Assumption - all files are T+0 - T+24 (25 hours)
            leadtime_start = 0
            leadtime_end = 24
            fileinfo['model'] = '_'.join(split_file[:2])
        else:
            #CAMS routinely running format
            # <model>_<species>_hhH_hhH_yyyymmddHHMM000000.nc
            cams_format = True
            ref_time_dt = datetime.datetime.strptime(dt_string[:10],
                                                     "%Y%m%d%H")

            leadtime_start = int(split_file[-3][:-1])
            leadtime_end = int(split_file[-2][:-1])

            fileinfo['short_name'] = split_file[-4]
            if fileinfo['short_name'] == 'PM25':
                fileinfo['short_name'] = 'PM2p5'
            fileinfo['model'] = split_file[0]

        if cams_format:

            fileinfo['data_start'] = ref_time_dt + \
                                     datetime.timedelta(hours=leadtime_start)
            fileinfo['data_end'] = ref_time_dt + \
                                   datetime.timedelta(hours=leadtime_end)

            fileinfo['forecast_periods'] = list(
                range(leadtime_start, leadtime_end + 1))
            if leadtime_start == 0:
                #Most files only have a single forecast day in.
                #But the first file has T+0-T+24.
                #T+0 corresponds to day 0, while T+1-T+24 is day 1.
                #So also add day 0 into forecast_days list.
                fileinfo['forecast_days'] = [0, leadtime_end // 24]
            else:
                fileinfo['forecast_days'] = [leadtime_end // 24]

        #Get date of first full day of forecast for this model run.
        if ref_time_dt.hour == 0:
            #First full day is for today
            fileinfo['day1_date'] = ref_time_dt.date()
        else:
            fileinfo['day1_date'] = ref_time_dt.date() \
                                    + datetime.timedelta(days=1)

        return fileinfo
示例#16
0
    def correct_timecoord(self, cube):
        """
        Convert time coordinate to gregorian calendar, instead of hours, with
        reference time given in long_name.
        Also gives forecast_period coordinate and ensures time coord has bounds.
        Also adds forecast_day coordinate.
        """

        time_unit = cf_units.Unit('hours since epoch', calendar='gregorian')
        tcoord = cube.coord('time')

        #Get reference time, taken from long_name which is
        #eg u'FORECAST time from 2015021900'
        assert tcoord.long_name[:18] == 'FORECAST time from'
        timestr = tcoord.long_name.split()[-1]
        if len(timestr) == 8:
            #CAMS format
            ref_time = datetime.datetime.strptime(timestr, "%Y%m%d")
        else:  #len=10
            #Old MACC_V2014 format
            ref_time = datetime.datetime.strptime(timestr, "%Y%m%d%H")

        tpoints = [
            time_unit.date2num(ref_time + datetime.timedelta(hours=int(time)))
            for time in tcoord.points
        ]
        tcoord_new = iris.coords.DimCoord(tpoints,
                                          standard_name='time',
                                          units=time_unit)
        #Remove old time coordinate
        cube.remove_coord('time')
        #Replace with new time coordinate
        if self.forecast_day == 'forecast' or self.forecast_day == 'latest':

            #Need to cope with cubes having multiple times in
            #(unlike pp reading where each cube only has one field, ie one time)
            #Add extra coordinate 'itime' with unique points to ensure cubes can
            #be concatenated - This will be removed soon after returning from
            #callback.
            #The itime points will be a strictly monontic array of integers,
            #starting from zero.
            #itime will become the dim coord and
            #time left as an aux coord for now

            cube.add_aux_coord(tcoord_new, 0)

            #Generate an list of integer points, starting from the last value of
            #self.itime+1 and going up by 1 for each point.
            itime_pts = [self.itime + i for i in range(0, len(tpoints))]
            #Now increment self.itime so next time this routine is called,
            #the same selection of integers are not chosen.
            self.itime += len(tpoints)
            cube.add_dim_coord(
                iris.coords.DimCoord(itime_pts, long_name='itime'), 0)

        else:
            #Add time on as the dim coord
            cube.add_dim_coord(tcoord_new, 0)

        #Also add new coordinate, forecast_period, which contains same
        #points as original time coordinate
        cube.add_aux_coord(
            iris.coords.DimCoord(tcoord.points,
                                 standard_name='forecast_period',
                                 units=cf_units.Unit('hours')), 0)

        #Also add forecast_day coordinate
        forecast_days = array_statistics.calc_forecast_day(
            tcoord.points, ref_time.hour)
        cube.add_aux_coord(
            iris.coords.AuxCoord(forecast_days,
                                 long_name='forecast_day',
                                 units=cf_units.Unit('days')), 0)

        return cube
示例#17
0
def _get_extent_ge(cube, thresh, border=2.0):
    """
    Used in setup_extent function to return the (x0,x1,y0,y1) range
    where the cube exceeds the lowest contour (thresh).
    """

    if len(cube.shape) > 2:
        cube = cube.collapsed(
            _everything_but_coords(cube, ['latitude', 'longitude']),
            iris.analysis.MAX)

    x_dim = cube.shape[-1]
    y_dim = cube.shape[-2]

    # find out which columns and rows contain useful values
    x_ge = np.zeros((x_dim, ))
    y_ge = np.zeros((y_dim, ))

    for x in range(x_dim):
        if np.any(cube.data[:, x] >= thresh):
            x_ge[x] = True
    for y in range(y_dim):
        if np.any(cube.data[y, :] >= thresh):
            y_ge[y] = True

    cols = x_ge.nonzero()[0]
    rows = y_ge.nonzero()[0]

    if cols.size == 0 or rows.size == 0:
        warnings.warn(
            "No data points >= threshold (" + str(thresh) + "), " +
            "a cube extent cannot be calculated, " +
            "defaulting to global extent", RuntimeWarning)
        crs = cube.coord(axis="X").coord_system.as_cartopy_crs()
        extent = [-179.9, 179.9, -89.9, 89.9]  # Taken in by 0.1 deg due to
        return (extent, crs)  # Cartopy bug - Dec 2017

    # get the tight range of interest
    x_min = cols.min()
    x_max = cols.max()
    x_centre = (x_min + x_max) / 2
    x_range = x_max - x_min

    y_min = rows.min()
    y_max = rows.max()
    y_centre = (y_min + y_max) / 2
    y_range = y_max - y_min

    # Assuming roughly square cell sizes, add a border (limited to cube range).
    dist_from_cen = max(x_range, y_range) * border
    dist_from_cen = np.ceil(dist_from_cen)
    x_min = int(max(0, x_centre - dist_from_cen))
    x_max = int(min(x_dim - 1, x_centre + dist_from_cen))
    y_min = int(max(0, y_centre - dist_from_cen))
    y_max = int(min(y_dim - 1, y_centre + dist_from_cen))

    # turn data indices into native coord values
    x_pts = cube.coord(axis="X").points
    y_pts = cube.coord(axis="Y").points
    extent = [x_pts[x_min], x_pts[x_max], y_pts[y_min], y_pts[y_max]]

    crs = cube.coord(axis="X").coord_system.as_cartopy_crs()
    return (extent, crs)
示例#18
0
def gauss_sum(a, p, f, prec=20, factored=False, algorithm='pari', parent=None):
    r"""
    Return the Gauss sum `g_q(a)` as a `p`-adic number.

    The Gauss sum `g_q(a)` is defined by

    .. MATH::

        g_q(a)= \sum_{u\in F_q^*} \omega(u)^{-a} \zeta_q^u,

    where `q = p^f`, `\omega` is the Teichmüller character and
    `\zeta_q` is some arbitrary choice of primitive `q`-th root of
    unity. The computation is adapted from the main theorem in Alain
    Robert's paper *The Gross-Koblitz formula revisited*,
    Rend. Sem. Mat. Univ. Padova 105 (2001), 157--170.

    Let `p` be a prime, `f` a positive integer, `q=p^f`, and `\pi` be
    the unique root of `f(x) = x^{p-1}+p` congruent to `\zeta_p - 1` modulo
    `(\zeta_p - 1)^2`. Let `0\leq a < q-1`. Then the
    Gross-Koblitz formula gives us the value of the Gauss sum `g_q(a)`
    as a product of `p`-adic Gamma functions as follows:

    .. MATH::

        g_q(a) = -\pi^s \prod_{0\leq i < f} \Gamma_p(a^{(i)}/(q-1)),

    where `s` is the sum of the digits of `a` in base `p` and the
    `a^{(i)}` have `p`-adic expansions obtained from cyclic
    permutations of that of `a`.

    INPUT:

    - ``a`` -- integer

    - ``p`` -- prime

    - ``f`` -- positive integer

    - ``prec`` -- positive integer (optional, 20 by default)

    - ``factored`` - boolean (optional, False by default)

    - ``algorithm`` - flag passed to p-adic Gamma function (optional, "pari" by default)

    OUTPUT:

    If ``factored`` is ``False``, returns a `p`-adic number in an Eisenstein extension of `\QQ_p`.
    This number has the form `pi^e * z` where `pi` is as above, `e` is some nonnegative
    integer, and `z` is an element of `\ZZ_p`; if ``factored`` is ``True``, the pair `(e,z)`
    is returned instead, and the Eisenstein extension is not formed.

    .. NOTE::

        This is based on GP code written by Adriana Salerno.

    EXAMPLES:

    In this example, we verify that `g_3(0) = -1`::

        sage: from sage.rings.padics.misc import gauss_sum
        sage: -gauss_sum(0,3,1)
        1 + O(pi^40)

    Next, we verify that `g_5(a) g_5(-a) = 5 (-1)^a`::

        sage: from sage.rings.padics.misc import gauss_sum
        sage: gauss_sum(2,5,1)^2-5
        O(pi^84)
        sage: gauss_sum(1,5,1)*gauss_sum(3,5,1)+5
        O(pi^84)

    Finally, we compute a non-trivial value::

        sage: from sage.rings.padics.misc import gauss_sum
        sage: gauss_sum(2,13,2)
        6*pi^2 + 7*pi^14 + 11*pi^26 + 3*pi^62 + 6*pi^74 + 3*pi^86 + 5*pi^98 +
        pi^110 + 7*pi^134 + 9*pi^146 + 4*pi^158 + 6*pi^170 + 4*pi^194 +
        pi^206 + 6*pi^218 + 9*pi^230 + O(pi^242)
        sage: gauss_sum(2,13,2,prec=5,factored=True)
        (2, 6 + 6*13 + 10*13^2 + O(13^5))

    .. SEEALSO::

        - :func:`sage.arith.misc.gauss_sum` for general finite fields
        - :meth:`sage.modular.dirichlet.DirichletCharacter.gauss_sum`
          for prime finite fields
        - :meth:`sage.modular.dirichlet.DirichletCharacter.gauss_sum_numerical`
          for prime finite fields
    """
    from sage.rings.padics.factory import Zp
    from sage.rings.all import PolynomialRing

    q = p**f
    a = a % (q - 1)
    if parent is None:
        R = Zp(p, prec)
    else:
        R = parent
    out = -R.one()
    if a != 0:
        t = R(1 / (q - 1))
        for i in range(f):
            out *= (a * t).gamma(algorithm)
            a = (a * p) % (q - 1)
    s = sum(a.digits(base=p))
    if factored:
        return s, out
    X = PolynomialRing(R, name='X').gen()
    pi = R.ext(X**(p - 1) + p, names='pi').gen()
    out *= pi**s
    return out
 def test_get_months(self):
     months = self.year.get_months()
     self.assertEqual([month.start for month in months],
         [datetime.datetime(2008, i, 1, tzinfo=pytz.utc) for i in range(1,13)])
示例#20
0
        for coord in cube.coords():
            if coord.name() == 'time':
                ntimes = coord.shape[0]
            elif coord.name() == 'latitude':
                continue
            elif coord.name() == 'longitude':
                continue
            elif coord.name() == 'height':
                cube = cube[height_idx] # can only have one height level - also assuming height dim comes before time, lat, lon dims!
            else:
                cube = cube[0] # assuming any other dim will come ahead of the others
                print('WARNING: too many dimensions. A dimension has been removed to proceed with plotting. Results may be incorrect.')
                
        print(cube)
        # sum through each timestep to get one plume over entire back run duration - we will overwrite the values after
        for t in range(ntimes):
            print(t)
            cube_t = cube[t]
            ndims = cube_t.ndim
            if ndims > 2:
                raise AssertionError('Too many cube dimensions to plot!')
            if t == 0:
                cube_tsum = cube_t
            else:
                cube_tsum = iris.analysis.maths.add(cube_tsum,cube_t, dim=None, ignore=True, in_place=True)  # sum next cube in list and current cube

        # create a copy of the time-summed cube before changing the values
        cube_copy = cube_tsum.copy()
        
        # overwrite values with 1 or 0 depending on whether the value at that lat-lon point is greater than zero (i.e. if plume exists)
        for i in range(cube_tsum.coord('latitude').shape[0]):
示例#21
0
def aq_create_rolling_stats(inifilename=None,
                            ini_dict=None,
                            sites_data=None,
                            new_ncfile=True):
    """
    Routine to loop over aq_plot.py code and read data/produce netcdf statistic
    file one day at a time. This statistic file will be appended to after being
    created on the first day.
    The created netcdf file contains a 2D array whose values corresponding to
    each statistic for each time (each day)

    :param inifilename: String giving filename of ini file.
    :param ini_dict: Dictionary of a :class:`inifile` object. If this is given,
                     then it is used instead of reading in from inifilename.
    :param sites_data: numpy ndarray containing site information data
                       from a :class:`sites_info.SitesInfo` object.
                       If this is given, then it is used instead of reading in
                       from a file/cube.
    :param new_ncfile: Start a new netcdf file to save to on day1, overwriting
                       any existing file of the same name.

    For example purposes, increase the default range in the aq_plot.ini file,
    and disable aq_plot's own saving:

    >>> ini_dict = adaqcode.inifile.get_inidict(
    ... defaultfilename='adaqscripts/aq_plot.ini') # doctest: +ELLIPSIS
    Reading inifile .../aq_plot.ini
    >>> ini_dict['range_days'] = 3
    >>> ini_dict['save_cubes_nc'] = False

    Now run the code:

    >>> aq_create_rolling_stats(ini_dict=ini_dict) # doctest: +ELLIPSIS
    Date: 2014-03-26
    Number of sites:  5
    Creating observation data at  ...
    Reading obs data files
    Found obs for  .../ABD_20140101_20140818.txt
    ...
    Found obs for  .../YW_20140101_20140818.txt
    Creating obs cubes
    Getting model data for  oper  at  ...
    Getting model data for  name_casestudy  ...
    Removing gridded_cube_list
    Adding missing times
    Converting to DAQI
    Saved to  .../stats_Obs-AURN_Mod-name_casestudy.nc
    Saved to  .../stats_Obs-AURN_Mod-oper.nc
    Statistics saved to  .../stats.csv
    Statistics saved to  .../stats.wiki
    Date: 2014-03-27
    Creating observation data at ...
    Reading obs data files
    Found obs for  .../ABD_20140101_20140818.txt
    ...
    Creating obs cubes
    Getting model data for  oper  at  ...
    Getting model data for  name_casestudy  at ...
    Removing gridded_cube_list
    Adding missing times
    Converting to DAQI
    Saved to  .../stats_Obs-AURN_Mod-name_casestudy.nc
    Saved to  .../stats_Obs-AURN_Mod-oper.nc
    Statistics saved to  .../stats.csv
    Statistics saved to  .../stats.wiki
    Date: 2014-03-28
    Creating observation data at  ...
    Reading obs data files
    Found obs for .../ABD_20140101_20140818.txt
    ...
    Saved to  .../stats_Obs-AURN_Mod-name_casestudy.nc
    Saved to  .../stats_Obs-AURN_Mod-oper.nc
    Statistics saved to  .../stats.csv
    Statistics saved to  .../stats.wiki

    """

    if ini_dict is None:
        ini_dict = adaqcode.inifile.get_inidict(
            inifilename=inifilename, defaultfilename='adaqscripts/aq_plot.ini')
    elif inifilename is not None:
        warnings.warn("Inifile not read in - using data from input ini_dict "
                      "instead")
    #Reserve short_name_list for future use (this gets added to later)
    short_name_list = ini_dict['short_name_list'][:]

    #Ensure netcdf output is enabled
    if 'calc_stats_format_list' not in ini_dict:
        ini_dict['calc_stats_format_list'] = []
    if 'nc' not in ini_dict['calc_stats_format_list']:
        ini_dict['calc_stats_format_list'].append('nc')

    #Don't need to produce contours
    #Turn off to ensure that gridded data is not kept in memory
    ini_dict['contours'] = False

    #Calculate statistics for each day and save to combined netcdf file
    #in ini_dict['plot_dir']
    dates = [
        ini_dict['start_datetime'].date() + datetime.timedelta(days=n)
        for n in range(ini_dict['range_days'])
    ]
    firstdate = True
    for date in dates:
        print('Date:', date)

        #Overwrite value of start_datetime, end_datetime and range_days
        #in ini_dict
        ini_dict['start_datetime'] = datetime.datetime(date.year, date.month,
                                                       date.day, 0)
        ini_dict['end_datetime'] = ini_dict['start_datetime'] + \
                                   datetime.timedelta(days=1)
        ini_dict['range_days'] = 1
        #If need to calculate DAQI, then need to also include previous day
        if ini_dict.get('daqi', False):
            ini_dict['start_datetime'] = ini_dict['start_datetime'] - \
                                         datetime.timedelta(days=1)
            ini_dict['range_days'] = 2

        #Reset values in short_name_list (nb use copy [:], not pointer)
        ini_dict['short_name_list'] = short_name_list[:]

        #Now read in all required data
        ini_dict, sites_data, od, md_list = aq_plot.aq_get_data(
            ini_dict=ini_dict, sites_data=sites_data, keep_all_data=False)

        #Prepare all the data as required
        ini_dict, od, md_list, od_stats, md_list_stats = \
                  aq_plot.aq_prepare_data(ini_dict, od, md_list)

        #If including DAQI, then extract required date only
        #instead of saving both days
        if ini_dict.get('daqi', False):
            #Set up new cubelist to overwrite with
            new_od_scl = iris.cube.CubeList()
            for cube in od_stats.sites_cube_list:
                new_od_cube = cube.extract(
                    iris.Constraint(time=lambda t: t.point.date() == date))
                if new_od_cube is not None:
                    #Only keep this cube in cube list if date have been
                    #matched, therefore not None
                    #Nb may have only got data for previous day
                    new_od_scl.append(new_od_cube)
            od_stats.sites_cube_list = new_od_scl
            for md in md_list_stats:
                #Set up new cubelist to overwrite with
                new_md_scl = iris.cube.CubeList()
                for cube in md.sites_cube_list:
                    new_md_cube = cube.extract(
                        iris.Constraint(time=lambda t: t.point.date() == date))
                    if new_md_cube is not None:
                        #Only keep this cube in cube list if date have been
                        #matched, therefore not None
                        #Nb may have only got data for previous day
                        new_md_scl.append(new_md_cube)
                md.sites_cube_list = new_md_scl

        #Check that we have some data, otherwise no point in continuing
        if not od.sites_cube_list:
            #Nb print rather than warning, as warning would only be
            #issued for first missing date
            print('No observation data for this date')
            continue

        model_data = False
        for md in md_list_stats:
            if len(md.sites_cube_list) >= 1:
                #Some cubes found
                model_data = True
        if not model_data:
            #Nb print rather than warning, as warning would only be
            #issued for first missing date
            print('No model data for this date')
            continue

        #Fix time-coord to have point in the middle of expected date
        #(otherwise if some times at the beginning/end etc missing, then
        # time chosen will be not as expected, and then later when missing
        # times are added, this adds times in funny unexpected places
        # and can take a very long time/add in a lot of unneeded nan data!)
        tunit = cf_units.Unit('hours since epoch', calendar='gregorian')
        tpt = datetime.datetime(date.year, date.month, date.day, 12)
        tbounds = [ini_dict['start_datetime'], ini_dict['end_datetime']]
        tcoord = iris.coords.DimCoord(
            [tunit.date2num(tpt)],
            standard_name='time',
            bounds=[tunit.date2num(b) for b in tbounds],
            units=tunit)

        #Calculate statistics and save to file
        nc_append = True
        if firstdate and new_ncfile:
            nc_append = False
        adaqcode.adaq_functions.calc_stats(ini_dict,
                                           od_stats,
                                           md_list_stats,
                                           thresholds=aq_plot.STATS_THRESHOLDS,
                                           nc_append=nc_append,
                                           stats_tcoord=tcoord)
        firstdate = False
示例#22
0
            for coord in cube.coords():
                if coord.name() == 'height':
                    cube = cube[height_idx]
            if cube.ndim > 2:
                raise AssertionError('Cube has too many dimensions!')

            if timestep_cube == None:
                timestep_cube = cube
            else:
                timestep_cube = timestep_cube + cube

        # set all values to 1 (normalise)

        binary_cube = timestep_cube.copy()

        for i in range(timestep_cube.coord('latitude').shape[0]):
            for j in range(timestep_cube.coord('longitude').shape[0]):
                if timestep_cube.data[i][j] > 0.0:
                    binary_cube.data[i][j] = 1.0
                else:
                    binary_cube.data[i][j] = 0.0

        # gather all the binary cubes at each timestep
        if overlap_cube == None:
            overlap_cube = binary_cube
            n = 1
        else:
            overlap_cube = overlap_cube + binary_cube
            n += 1
    # n is number of overlaps e.g. timesteps for which there is any fields file
    # n is used to define colorbar lims - if too small, plot looks bad
示例#23
0
    def bar(self, dir, var, **kwargs):
        """
        Plot a windrose in bar mode. For each var bins and for each sector,
        a colored bar will be draw on the axes.

        Mandatory:
          * dir : 1D array - directions the wind blows from, North centred
          * var : 1D array - values of the variable to compute. Typically the wind
            speeds

        Optional:
          * nsector: integer - number of sectors used to compute the windrose
            table. If not set, nsectors=16, then each sector will be 360/16=22.5°,
            and the resulting computed table will be aligned with the cardinals
            points.
          * bins : 1D array or integer- number of bins, or a sequence of
            bins variable. If not set, bins=6 between min(var) and max(var).
          * blowto : bool. If True, the windrose will be pi rotated,
            to show where the wind blow to (usefull for pollutant rose).
          * colors : string or tuple - one string color ('k' or 'black'), in this
            case all bins will be plotted in this color; a tuple of matplotlib
            color args (string, float, rgb, etc), different levels will be plotted
            in different colors in the order specified.
          * cmap : a cm Colormap instance from matplotlib.cm.
            - if cmap == None and colors == None, a default Colormap is used.
            edgecolor : string - The string color each edge bar will be plotted.
        
        Default : no edgecolor
          * opening : float - between 0.0 and 1.0, to control the space between
            each sector (1.0 for no space)

        """

        percflag = kwargs['normed']

        bins, nbins, nsector, colors, angles, kwargs = self._init_plot(
            dir, var, **kwargs)
        null = kwargs.pop('facecolor', None)
        edgecolor = kwargs.pop('edgecolor', None)
        if edgecolor is not None:
            if not isinstance(edgecolor, str):
                raise ValueError('edgecolor must be a string color')
        opening = kwargs.pop('opening', None)
        if opening is None:
            opening = 0.8
        dtheta = 2 * np.pi / nsector
        opening = dtheta * opening

        for j in range(nsector):
            offset = 0
            for i in range(nbins):
                if i > 0:
                    offset += self._info['table'][i - 1, j]
                val = self._info['table'][i, j]
                zorder = ZBASE + nbins - i
                patch = Rectangle((angles[j] - old_div(opening, 2), offset),
                                  opening,
                                  val,
                                  facecolor=colors[i],
                                  edgecolor=edgecolor,
                                  zorder=zorder,
                                  **kwargs)
                self.add_patch(patch)
                if j == 0:
                    self.patches_list.append(patch)

        # Plot circle for calms
        table2 = self._info['table']
        # Choose a min value below which the radius of calms circle will not
        # fall so that the number or percentage of calms can be written in
        # the central circle.
        if percflag:
            minVal = 2.7
        else:
            minVal = 0.027 * table2.sum()

        if table2[0, 0] < minVal:
            rv = [minVal] * 360
        else:
            rv = [table2[0, 0]] * 360

        tv = np.linspace(0, 2 * np.pi, num=360)
        vert = np.array([tv, rv])
        vertices = vert.transpose()

        patch = Polygon(vertices,
                        closed=True,
                        facecolor=colors[0],
                        edgecolor=edgecolor,
                        zorder=ZBASE + nbins + 1,
                        **kwargs)
        self.add_patch(patch)

        # Write fraction or percentage of calms in the central circle
        if percflag:
            self.annotate(
                "%.1f%s" %
                ((old_div(table2[0, :].sum(), table2.sum())) * 100, '%'),
                xy=(0, 0),
                xytext=(0, 0),
                ha="center",
                va="center")

        else:
            self.annotate("%.1f" % table2[0, :].sum(),
                          xy=(0, 0),
                          xytext=(0, 0),
                          ha="center",
                          va="center")

        self._update(percflag)
示例#24
0
def plot_regime_bar(cube, plotdir='./'):
    """
    Plot bar chart of the frequency of weather regime occurrence.

    :param cube: Iris cube, which has a 'regime' coordinate and a 'time'
                 coordinate.
    :param plotdir: Directory to save resulting plot in.

    Get regime data:

    >>> import config
    >>> wr = config.SAMPLE_DATADIR + \
    'weather_regimes/daily_30regimes_since2010.txt'
    >>> regime_dates = read_regime_txt(wr) # doctest: +ELLIPSIS
    Getting regime data from  .../daily_30regimes_since2010.txt

    Load an example sites cube:

    >>> data_path = config.SAMPLE_DATADIR+'sites_cube_list/'
    >>> cube = iris.load_cube(data_path + 'aurn_5days.nc',
    ... iris.AttributeConstraint(short_name='PM2p5'))

    Give it a regime coordinate:

    >>> cube = add_regime_coord(cube, regime_dates)

    Plot this as a time-series for the first site

    >>> plotdir = config.CODE_DIR + "/adaqdocs/figures"
    >>> tsp = plot_regime_bar(cube[0], plotdir=plotdir) # doctest: +ELLIPSIS
    Plotting regime bar chart
    Saved figure  .../Regime_Bar.png

    .. image:: ../adaqdocs/figures/Regime_Bar.png
       :scale: 50%

    """

    print("Plotting regime bar chart")

    #List of regime numbers for plotting on x axis
    x = list(range(1, 31))
    #Initialise empty list for plotting on y axis
    y = []

    #Get cube's datetime points
    dtpoints = cube_time.cube_tpoints_dt(cube)

    #Record number of days of occurrence for each regime in y axis list
    for iregime in x:
        try:
            filter_cube = cube.extract(iris.Constraint(regime=iregime))
            nhours = len(filter_cube.coord('time').points)
            y.append(nhours // 24)
        except:
            #If regime does not occur in this period, mark as 0
            y.append(int(0))

    #Plot
    plt.bar(x, y, align='center', width=0.75, color='teal', alpha=0.9)
    plt.xticks(list(range(1, 31)), x, ha='center', fontsize='x-small')
    plt.xlabel('Weather Regime')
    plt.ylabel('Number of Days')
    plt.suptitle('Frequency of Weather Regime Occurrence', fontsize=16)
    plt.title(str(dtpoints[0].strftime("%d/%m/%Y %H:%M")) + ' to ' +
              str(dtpoints[-1].strftime("%d/%m/%Y %H:%M")),
              fontsize=12)

    #Save
    if plotdir[-1] != '/':
        plotdir += '/'
    if not os.path.isdir(plotdir):
        print('Creating output directory:', plotdir)
        os.makedirs(plotdir)

    filename = 'Regime_Bar.png'
    plt.savefig(plotdir + filename)
    print('Saved figure ', plotdir + filename)
    plt.close()
示例#25
0
 def _getWindSpeedText(self, row):
     for i in range(len(row)):
         m = self._speedMatch(row[i])
         if m:
             return m.group(0)
     raise AssertionError('Incorrect Format')
示例#26
0
def call_mass(selectfile, massdir, outputdir, file_patterns=None,
              massretries=0, massretrydelay=60, retrieve=True):
    """
    Set up moo select or moo get shell command and call it using shell.
    If any errors with mass, will keep retrying based on massretries
    and mass retrydelay.

    :param selectfile: filename of file containing moo select query
    :param massdir: directory(s) or tar file on mass to retrieve from
    :param outputdir: directory on local machine to place retrieved files
    :param file_patterns: list of file name patterns for using with moo get
                          if selectfile=None
    :param massretries: number of times to retry mass retrieval
    :param massretrydelay: sleep time in seconds between each retry
    :param retrieve: Logical. If set to True retrieves files from mass.
                     If False, sets up all required moo select files and returns
                     the command to be run manually by user.
    :returns: String, containing command submitted to shell

    Example of running command, but with retrieve set to False so doesn't
    actually do retrieval as this is likely to fail if user does not have
    mass access or mass is currently not working!:

    >>> command = call_mass('selectfilename.txt', 'moose:/devfc/suiteid/',
    ... '/output/file/dir', retrieve=False)
    Moose Retrieval Command:
    moo select -f selectfilename.txt moose:/devfc/suiteid/ /output/file/dir

    >>> print(command)
    moo select -f selectfilename.txt moose:/devfc/suiteid/ /output/file/dir

    """

    if selectfile is None:
        if file_patterns is None:
            cmd = 'moo get -f ' + massdir + ' ' + outputdir
        else:
            moo_list = [massdir + '/' + p for p in file_patterns]
            cmd = 'moo get -f '+ ' '.join(moo_list) + ' ' + outputdir
    else:
        cmd = 'moo select -f ' + selectfile + ' ' + massdir + ' ' + outputdir
    print('Moose Retrieval Command:')
    print(cmd)
    if retrieve:
        for retry in range(-1, int(massretries)):
            if retry > 0:
                print('Retry number ', retry)
            #Set shell command going
            process = subprocess.Popen(cmd, shell=True,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.STDOUT)
            #Print standard output and standard error as command runs
            for line in iter(process.stdout.readline, b''):
                print(line.decode()) #Print as ascii
            #Wait for command to finish running and get return code to check
            returncode = process.wait()
            print('Return code:', returncode)
            if returncode == 0:
                break
            elif retry < int(massretries)-1:
                #Non-zero return code, so try again after a period of time
                print("Error retrieving from mass, sleeping for ", \
                      massretrydelay, "seconds before retrying...")
                time.sleep(int(massretrydelay))

        if returncode != 0:
            #Fatal error
            raise IOError("Mass retrieval failed")
    return cmd
示例#27
0
import datetime
import calendar as standardlib_calendar

from django.conf import settings
from django.utils.encoding import python_2_unicode_compatible
from django.template.defaultfilters import date as date_filter
from django.utils.dates import WEEKDAYS, WEEKDAYS_ABBR
from schedule.conf.settings import SHOW_CANCELLED_OCCURRENCES
from schedule.models import Occurrence
from django.utils import timezone

weekday_names = []
weekday_abbrs = []
if settings.FIRST_DAY_OF_WEEK == 1:
    # The calendar week starts on Monday
    for i in range(7):
        weekday_names.append(WEEKDAYS[i])
        weekday_abbrs.append(WEEKDAYS_ABBR[i])
else:
    # The calendar week starts on Sunday, not Monday
    weekday_names.append(WEEKDAYS[6])
    weekday_abbrs.append(WEEKDAYS_ABBR[6])
    for i in range(6):
        weekday_names.append(WEEKDAYS[i])
        weekday_abbrs.append(WEEKDAYS_ABBR[i])


class Period(object):
    """
    This class represents a period of time. It can return a set of occurrences
    based on its events, and its time period (start and end).
示例#28
0
def extract_latest_forecast_days(cube, forecast_day='latest', start_dt=None):
    """
    Extract a cube with a montonic time dimension which depending on the
    setting of the 'forecast_day' parameter may contain a stretch of
    day 1 forecasts, plus then a forecast from a single forecast run at
    the end.

    :param cube: Input cube to extract data from. Must have a time and a
                 forecast_day coordinate.
    :param forecast_day: * 'latest' generates a cube which has day 1 forecasts
                           where possible, followed by a multi-day forecast from
                           a single forecast run
                         * 'forecast' generates a cube which only has a
                           multi-day forecat from a single forecast run.
    :param start_dt: datetime formatted date-time to start from. Only used if
                     forecast_day='forecast'. This then represents the start
                     day of the first full day of the forecast.
                     If this is instead set to None, then using
                     forecast_day='forecast' gets data from the final forecast
                     that is available from the input cube.

    Note this routine is tested in doctests of pp_data.py and maccens_data.py

    Get some example data from pp files, which have a forecast_day coordinate:

    >>> import config
    >>> import pp_data
    >>> pp = pp_data.PPData()
    >>> start_dt = datetime.datetime(2014, 4, 3, 00)
    >>> end_dt = datetime.datetime(2014, 4, 7, 00)
    >>> filenames=config.SAMPLE_DATADIR+'aqum_output/oper_forecast/*201404*.pp'
    >>> gcl = pp.readdata(filenames=filenames, short_name_list=['O3'],
    ...    start_datetime=start_dt, end_datetime=end_dt, forecast_day=None)
    >>> cube = gcl[0]
    >>> np.set_printoptions(formatter={'float':lambda x: '{:5.2f}'.format(x)})
    >>> print(cube.coord('forecast_day').points)
    [ 1.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00
      4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00
      4.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00
      5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00  5.00
      5.00  0.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  2.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  3.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00
      4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00
      4.00  4.00  0.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  1.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  2.00  2.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  3.00  3.00]

    Now try extracting the 'latest' available forecast days:

    >>> latestcube = extract_latest_forecast_days(cube, 'latest')
    >>> print(latestcube.coord('forecast_day').points)
    [ 1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00]

    And then a cube with just a single forecast run in:

    >>> fcstcube = extract_latest_forecast_days(cube, 'forecast',
    ... start_dt=start_dt)
    >>> print(fcstcube.coord('forecast_day').points)
    [ 0.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00  1.00
      1.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00  2.00
      2.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00  3.00
      3.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00
      4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00  4.00
      4.00]
    >>> np.set_printoptions()

    """

    if forecast_day not in ['latest', 'forecast']:
        return cube

    #This routine requires both time and forecast_day coordinates, so check
    #that these are available before continuing.
    cube_coord_names = [coord.name() for coord in cube.coords()]
    assert 'time' in cube_coord_names
    assert 'forecast_day' in cube_coord_names

    tunits = cube.coord('time').units

    #Set up cubelist to contain partial cubes which will be
    #joined to become single cube at the end
    cubelist = iris.cube.CubeList()

    #Take day 1 where possible
    day1_cube = cube.extract(iris.Constraint(forecast_day=1))

    if forecast_day == 'latest':
        #Convert this day1_cube into a cubelist,
        #to ensure time can be made monotonic
        for cube_slice in day1_cube.slices_over('time'):
            cubelist.append(cube_slice)

    #Get times after final day 1
    #First find the final time from the day 1 cube
    day1_maxt = max(day1_cube.coord('time').points)
    #And convert this to datetime format so can be used in iris constraints
    day1_maxt_dt = tunits.num2date(day1_maxt)

    if forecast_day == 'latest':
        #Extract another cube, containing all times after the final time
        #from the day 1 cube, only for forecast_days > 1 as =1 is in day1_cube
        other_days_cube = cube.extract(
            iris.Constraint(forecast_day=lambda fd: fd > 1) &
            iris.Constraint(time=lambda t: t.point > day1_maxt_dt))
    elif forecast_day == 'forecast':
        #Extract a cube which contains all times after the time at the
        #end of the last day1 - 1 day.
        if start_dt is not None:
            #time at end of first required day
            day1_maxt_dt = start_dt + datetime.timedelta(days=1)

        #Don't limit by forecast day here as we want all forecasts days from
        #single forecast run, including possibly day 0 (if available)
        other_days_cube = cube.extract(
            iris.Constraint(time=lambda t:
                            t.point >= day1_maxt_dt-datetime.timedelta(days=1)))


    #If a cube is found:
    if other_days_cube is not None:
        #Check if multiple points available for each time
        if not iris.util.monotonic(other_days_cube.coord('time').points,
                                   strict=True):

            #Multiple points available for each time,
            # eg from different forecast runs
            #So now loop through each available forecast_day and pick out
            # points in cube for the requested forecast_day and with the time
            # which corresponds to the expected time in relation to the last
            # time from the day 1 cube.
            fd_max = other_days_cube.coord('forecast_day').points.max()
            if forecast_day == 'latest':
                fd_range = range(2, int(fd_max+1))
            elif forecast_day == 'forecast':
                fd_range = range(0, int(fd_max+1))
            for fd in fd_range:
                fd_cube = other_days_cube.extract(
                    iris.Constraint(forecast_day=fd) & \
                    iris.Constraint(time=lambda t:
                                    day1_maxt_dt+datetime.timedelta(days=fd-2) \
                                    < t.point <= day1_maxt_dt + \
                                    datetime.timedelta(days=fd-1)))
                if fd_cube is not None:
                    #Add individual time slices to cubelist to ensure
                    #when merged time will be montonic
                    for cube_slice in fd_cube.slices_over('time'):
                        cubelist.append(cube_slice)
        else:
            #Only a single point is available for each time,
            # so use this cube in its entirety
            if not cubelist:
                cubelist.append(other_days_cube)
            else:
                #Add individual time slices to cubelist to ensure
                #when merged with day1_cube time will be montonic
                for cube_slice in other_days_cube.slices_over('time'):
                    cubelist.append(cube_slice)

    #Merge cubelist back into a single cube
    if len(cubelist) == 1:
        #Only one cube, so don't need to merge, just take first cube.
        newcube = cubelist[0]
    elif len(cubelist) > 1:
        newcube = cubelist.merge_cube()
    else:
        newcube = None

    return newcube
示例#29
0
def gauss_sum(a, p, f, prec=20, factored=False, algorithm='pari', parent=None):
    r"""
    Return the Gauss sum `g_q(a)` as a `p`-adic number.

    The Gauss sum `g_q(a)` is defined by

    .. MATH::

        g_q(a)= \sum_{u\in F_q^*} \omega(u)^{-a} \zeta_q^u,

    where `q = p^f`, `\omega` is the Teichmüller character and
    `\zeta_q` is some arbitrary choice of primitive `q`-th root of
    unity. The computation is adapted from the main theorem in Alain
    Robert's paper *The Gross-Koblitz formula revisited*,
    Rend. Sem. Mat. Univ. Padova 105 (2001), 157--170.

    Let `p` be a prime, `f` a positive integer, `q=p^f`, and `\pi` be
    the unique root of `f(x) = x^{p-1}+p` congruent to `\zeta_p - 1` modulo
    `(\zeta_p - 1)^2`. Let `0\leq a < q-1`. Then the
    Gross-Koblitz formula gives us the value of the Gauss sum `g_q(a)`
    as a product of `p`-adic Gamma functions as follows:

    .. MATH::

        g_q(a) = -\pi^s \prod_{0\leq i < f} \Gamma_p(a^{(i)}/(q-1)),

    where `s` is the sum of the digits of `a` in base `p` and the
    `a^{(i)}` have `p`-adic expansions obtained from cyclic
    permutations of that of `a`.

    INPUT:

    - ``a`` -- integer

    - ``p`` -- prime

    - ``f`` -- positive integer

    - ``prec`` -- positive integer (optional, 20 by default)

    - ``factored`` - boolean (optional, False by default)

    - ``algorithm`` - flag passed to p-adic Gamma function (optional, "pari" by default)

    OUTPUT:

    If ``factored`` is ``False``, returns a `p`-adic number in an Eisenstein extension of `\QQ_p`.
    This number has the form `pi^e * z` where `pi` is as above, `e` is some nonnegative
    integer, and `z` is an element of `\ZZ_p`; if ``factored`` is ``True``, the pair `(e,z)`
    is returned instead, and the Eisenstein extension is not formed.

    .. NOTE::

        This is based on GP code written by Adriana Salerno.

    EXAMPLES:

    In this example, we verify that `g_3(0) = -1`::

        sage: from sage.rings.padics.misc import gauss_sum
        sage: -gauss_sum(0,3,1)
        1 + O(pi^40)

    Next, we verify that `g_5(a) g_5(-a) = 5 (-1)^a`::

        sage: from sage.rings.padics.misc import gauss_sum
        sage: gauss_sum(2,5,1)^2-5
        O(pi^84)
        sage: gauss_sum(1,5,1)*gauss_sum(3,5,1)+5
        O(pi^84)

    Finally, we compute a non-trivial value::

        sage: from sage.rings.padics.misc import gauss_sum
        sage: gauss_sum(2,13,2)
        6*pi^2 + 7*pi^14 + 11*pi^26 + 3*pi^62 + 6*pi^74 + 3*pi^86 + 5*pi^98 +
        pi^110 + 7*pi^134 + 9*pi^146 + 4*pi^158 + 6*pi^170 + 4*pi^194 +
        pi^206 + 6*pi^218 + 9*pi^230 + O(pi^242)
        sage: gauss_sum(2,13,2,prec=5,factored=True)
        (2, 6 + 6*13 + 10*13^2 + O(13^5))

    .. SEEALSO::

        - :func:`sage.arith.misc.gauss_sum` for general finite fields
        - :meth:`sage.modular.dirichlet.DirichletCharacter.gauss_sum`
          for prime finite fields
        - :meth:`sage.modular.dirichlet.DirichletCharacter.gauss_sum_numerical`
          for prime finite fields
    """
    from sage.rings.padics.factory import Zp
    from sage.rings.all import PolynomialRing

    q = p**f
    a = a % (q-1)
    if parent is None:
        R = Zp(p, prec)
    else:
        R = parent
    out = -R.one()
    if a != 0:
        t = R(1/(q-1))
        for i in range(f):
            out *= (a*t).gamma(algorithm)
            a = (a*p) % (q-1)
    s = sum(a.digits(base=p))
    if factored:
        return(s, out)
    X = PolynomialRing(R, name='X').gen()
    pi = R.ext(X**(p - 1) + p, names='pi').gen()
    out *= pi**s
    return out
示例#30
0
    # Remove data values less than the threshold
    dmask = np.where(cube_data >= threshold)
    new_data = cube_data[dmask]
    gcdist = cube_distance[dmask]

    # Set up projection for range ring calculation
    lons = cubes[0].coord('longitude')
    globe = lons.coord_system.as_cartopy_globe()
    #elipse = lons.coord_system.as_cartopy_crs().proj4_params['ellps']
    g = Geod(a=globe.semimajor_axis, b=globe.semiminor_axis)

    # Calculate latitudes and longitudes of range ring
    circ_lon = []
    circ_lat = []
    for azimuth in range(0, 361):
        lon, lat, _ = g.fwd(reflong,
                            reflat,
                            azimuth,
                            max(gcdist),
                            radians=False)
        circ_lon.append(lon)
        circ_lat.append(lat)

    # Plot ring
    plt.plot(circ_lon,
             circ_lat,
             label=bin1.attributes['Sources'],
             transform=ccrs.Geodetic())

# Plot country outlines
示例#31
0
 def get_labels():
     labels = np.copy(self._info['bins'])
     # Additionally write units to legend
     labels = ["[%.1f : %0.1f %s]" %(labels[i], labels[i+1], units) \
               for i in range(len(labels)-1)]
     return labels