Пример #1
0
def make_strictly_increasing(yarray, xarray=None):
    '''
    Make y strictly increasing.

    This is done by interpolating between surrounding values
    to remove any constant or decreasing values.

    >>> Y = [0,0,0,3,4,5,4,5,6,7,7,7,7]
    >>> print(make_strictly_increasing(Y))
    [0.0, 1.0, 2.0, 3, 4, 4.5, 4.5, 5, 6, 6.25, 6.5, 6.75, 7]
    '''

    if isinstance(yarray, np.ndarray):
        youtput = yarray.copy()
    else:
        youtput = yarray[:]
    if xarray is None:
        xarray = np.arange(len(yarray))

    if len(xarray) > 1 and len(yarray) > 1:
        for i in np.arange(len(yarray)-1):
            fixvalue = False
            if yarray[i] >= yarray[i+1]:
                fixvalue = True
            if i > 0 and yarray[i] <= youtput[i-1]:
                fixvalue = True
            if fixvalue:
                #Need to correct value of yarray[i]

                #Find next different larger value
                for xnext, ynext in zip(xarray[i+1:], yarray[i+1:]):
                    if ynext > yarray[i]:
                        break
                #Find previous different smaller value
                if i > 0:
                    for xprev, yprev in zip(xarray[i-1::-1], youtput[i-1::-1]):
                        if yprev < yarray[i]:
                            break
                else:
                    #Use current value as no previous values available
                    yprev = yarray[i]
                    xprev = xarray[i]

                #Now correct value by linear interpolation
                #between previous and next values
                ftn = scipy.interpolate.interp1d([xprev, xnext],
                                                 [yprev, ynext],
                                                 kind='linear')
                youtput[i] = float(ftn(xarray[i]))

    return youtput
Пример #2
0
    def __init__(self, data, columns=None, schema=None, index=None, pandas=True):
        if columns is None and schema is None:
            raise ValueError('Either columns or schema should be provided')

        if columns is None and schema is not None:
            columns = schema.columns

        self._columns = columns
        self._index = index

        if has_pandas and pandas:
            self._values = pd.DataFrame([self._get_values(r) for r in data],
                                        columns=[col.name for col in self._columns],
                                        index=index)
            self._index = self._values.index
            self._pandas = True
        else:
            if self._index is None:
                self._index = []
                self._values = []
                for i, r in zip(itertools.count(0), data):
                    self._values.append(self._get_values(r))
                    self._index.append(i)
            else:
                self._values = list(self._get_values(r) for r in data)
            self._pandas = False

        self._cursor = -1
Пример #3
0
    def concat(self, frame, axis=0):
        if self._pandas:
            from pandas.tools.merge import concat

            return concat((self, frame), axis=axis)
        else:
            if axis == 0:
                if self._columns != frame._columns:
                    raise ValueError(
                        'Cannot concat two frame of different columns')

                return ResultFrame(self._values + frame._values,
                                   columns=self._columns,
                                   index=self._index + frame._index,
                                   pandas=self._pandas)
            else:
                if self._index != frame._index:
                    raise ValueError(
                        'Cannot concat two frames of different indexes')

                values = [
                    val + other
                    for val, other in zip(self._values, frame._values)
                ]
                return ResultFrame(values,
                                   self._columns + frame._columns,
                                   index=self._index,
                                   pandas=self._pandas)
Пример #4
0
    def __init__(self,
                 data,
                 columns=None,
                 schema=None,
                 index=None,
                 pandas=True):
        if columns is None and schema is None:
            raise ValueError('Either columns or schema should be provided')

        if columns is None and schema is not None:
            columns = schema.columns

        self._columns = columns
        self._index = index

        if has_pandas and pandas:
            self._values = pd.DataFrame(
                [self._get_values(r) for r in data],
                columns=[col.name for col in self._columns],
                index=index)
            self._index = self._values.index
            self._pandas = True
        else:
            if self._index is None:
                self._index = []
                self._values = []
                for i, r in zip(itertools.count(0), data):
                    self._values.append(self._get_values(r))
                    self._index.append(i)
            else:
                self._values = list(self._get_values(r) for r in data)
            self._pandas = False

        self._cursor = -1
Пример #5
0
    def test_get_weeks(self):
        weeks = self.month.get_weeks()
        actuals = [(week.start, week.end) for week in weeks]

        if settings.FIRST_DAY_OF_WEEK == 0:
            expecteds = [
                (datetime.datetime(2008, 1, 27, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 10, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 10, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 17, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 17, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 24, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 24, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 3, 2, 0, 0, tzinfo=pytz.utc))
            ]
        else:
            expecteds = [
                (datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 4, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 4, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 11, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 11, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 18, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 18, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 25, 0, 0, tzinfo=pytz.utc)),
                (datetime.datetime(2008, 2, 25, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 3, 3, 0, 0, tzinfo=pytz.utc))
            ]

        for actual, expected in zip(actuals, expecteds):
            self.assertEqual(actual, expected)
Пример #6
0
 def mergeWith(self, other):
     if len(self) != len(other):
         raise ValueError(
             'profiles with different lengths cannot be merged')
     self.elements = [
         a.mergedWith(b)
         for a, b in builtins.zip(self.elements, other.elements)
     ]
Пример #7
0
def trim_zeros(L):
    r"""
    Strips trailing zeros/empty lists from a list.

    EXAMPLES::

        sage: from sage.rings.padics.misc import trim_zeros
        sage: trim_zeros([1,0,1,0])
        [1, 0, 1]
        sage: trim_zeros([[1],[],[2],[],[]])
        [[1], [], [2]]
        sage: trim_zeros([[],[]])
        []
        sage: trim_zeros([])
        []

    Zeros are also trimmed from nested lists (one deep):

        sage: trim_zeros([[1,0]])
        [[1]]
        sage: trim_zeros([[0],[1]])
        [[], [1]]
    """
    strip_trailing = True
    n = len(L)
    for i, c in zip(reversed(range(len(L))), reversed(L)):
        if strip_trailing and (c == 0 or c == []):
            n = i
        elif isinstance(c, list):
            strip_trailing = False
            m = len(c)
            # strip trailing zeros from the sublists
            for j, d in zip(reversed(range(len(c))), reversed(c)):
                if d == 0:
                    m = j
                else:
                    break
            L[i] = c[:m]
        else:
            break
    return L[:n]
Пример #8
0
def trim_zeros(L):
    r"""
    Strips trailing zeros/empty lists from a list.

    EXAMPLES::

        sage: from sage.rings.padics.misc import trim_zeros
        sage: trim_zeros([1,0,1,0])
        [1, 0, 1]
        sage: trim_zeros([[1],[],[2],[],[]])
        [[1], [], [2]]
        sage: trim_zeros([[],[]])
        []
        sage: trim_zeros([])
        []

    Zeros are also trimmed from nested lists (one deep):

        sage: trim_zeros([[1,0]])
        [[1]]
        sage: trim_zeros([[0],[1]])
        [[], [1]]
    """
    strip_trailing = True
    n = len(L)
    for i, c in zip(reversed(range(len(L))), reversed(L)):
        if strip_trailing and (c == 0 or c == []):
            n = i
        elif isinstance(c, list):
            strip_trailing = False
            m = len(c)
            # strip trailing zeros from the sublists
            for j, d in zip(reversed(range(len(c))), reversed(c)):
                if d == 0:
                    m = j
                else:
                    break
            L[i] = c[:m]
        else:
            break
    return L[:n]
Пример #9
0
def _expand_mask(array):
    """
    Used in mask_data function for 'contour' plots to expand
    mask by one to ensure smooth outer contour
    """

    hgt, wdt = array.shape
    mi = np.where(not array.mask)
    for j, i in zip(mi[0], mi[1]):
        i0 = max(0, i - 1)
        j0 = max(0, j - 1)
        i1 = min(wdt - 1, i + 1)
        j1 = min(hgt - 1, j + 1)
        array.mask[j0:j1 + 1, i0:i1 + 1] = False
Пример #10
0
    def add_cube(self, cube):
        '''
        Add entire iris cube of data to the data bin

        >>> import iris
        >>> cube = iris.cube.Cube(np.arange(10)/2.)
        >>> cube.data[3:6] = 0.
        >>> np.set_printoptions(formatter={'float':lambda x: '{:5.2f}'.format(x)})
        >>> print(cube.data)
        [ 0.00  0.50  1.00  0.00  0.00  0.00  3.00  3.50  4.00  4.50]
        >>> np.set_printoptions()
        >>> bd = BinnedData()
        >>> bd.add_cube(cube)
        >>> bd.databin_dict == {'0.0': 4, '1.0': 2, '3.0': 1,
        ... '4.0': 2, '5.0': 1}
        True
        '''


        #for value in cube.data:
        #    self.add_datapt(value)


        mindata = np.nanmin(cube.data)
        maxdata = np.nanmax(cube.data)
        #Figure out which bin these should go in.
        minbin = self.calc_bin(mindata)
        maxbin = self.calc_bin(maxdata)

        #Now generate all possible bins in this range
        bin_edges = np.arange(minbin-self.binsize, maxbin+self.binsize,
                              self.binsize)

        #Now use pandas cut and value_counts to generate binned data histogram
        #where right handsize value of bin is included and left handside is not.
        out = pd.cut(cube.data.flatten(), bin_edges, labels=bin_edges[1:])
        counts = pd.value_counts(out)
        for index, value in zip(counts.index, counts.values):
            binvalue = str(index)
            #Only store data in bin if actually any counts
            #(values) for this binvalue
            if value > 0:
                if binvalue not in self.databin_dict:
                    self.databin_dict[binvalue] = value
                else:
                    self.databin_dict[binvalue] += value
Пример #11
0
def backward_pass(jaxpr, consts, freevar_vals, cotangent_in):
    def write_cotangent(v, ct):
        # assert v not in primal_env
        if ct is not None:
            ct_env[v] = add_tangents(ct_env[v], ct) if v in ct_env else ct

    def read_cotangent(v):
        return ct_env.get(v, zero)

    primal_env = {
        v: val
        for v, val in zip(jaxpr.freevars, freevar_vals) if val is not None
    }
    primal_env.update(zip(jaxpr.constvars, consts))
    ct_env = {jaxpr.outvar: cotangent_in}

    for eqn in jaxpr.eqns[::-1]:
        cts_in = map(read_cotangent, eqn.outvars)
        ct_in = TangentTuple(cts_in) if eqn.destructure else cts_in[0]
        invals = map(primal_env.get, eqn.invars)
        if eqn.bound_subjaxprs:
            subjaxprs, sub_consts, sub_freevar_vals = unzip3([
                (subjaxpr, map(primal_env.get,
                               const_vars), map(primal_env.get, bound_vars))
                for subjaxpr, const_vars, bound_vars in eqn.bound_subjaxprs
            ])
            cts_out, ct_free_vars_out = get_primitive_transpose(
                eqn.primitive)(eqn.params, subjaxprs, sub_consts,
                               sub_freevar_vals, invals, ct_in)
            # TODO(dougalm): support cases != 1
            assert (len(eqn.bound_subjaxprs) == 1)
            _, _, bound_vars = eqn.bound_subjaxprs[0]
            map(write_cotangent, bound_vars, ct_free_vars_out)
        else:
            cts_out = get_primitive_transpose(eqn.primitive)(ct_in, *invals,
                                                             **eqn.params)

        if cts_out is zero:
            cts_out = [zero for _ in eqn.invars]
        # TODO(phawkins,dougalm): eqn.invars and cts_out can have different lengths
        for var, ct in builtins.zip(eqn.invars, cts_out):
            write_cotangent(var, ct)

    cotangents_out = map(read_cotangent, jaxpr.invars)
    freevar_cts = map(read_cotangent, jaxpr.freevars)
    return freevar_cts, cotangents_out
Пример #12
0
    def test_get_days(self):
        weeks = self.month.get_weeks()
        week = list(weeks)[0]
        days = week.get_days()
        actuals = [(len(day.occurrences), day.start, day.end) for day in days]

        if settings.FIRST_DAY_OF_WEEK == 0:
            expecteds = [
                (0, datetime.datetime(2008, 1, 27, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc)),
                (1, datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)),
            ]

        else:
            expecteds = [
                (0, datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc)),
                (1, datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 4, 0, 0, tzinfo=pytz.utc))
            ]

        for actual, expected in zip(actuals, expecteds):
            self.assertEqual(actual, expected)
Пример #13
0
    def concat(self, frame, axis=0):
        if self._pandas:
            from pandas.tools.merge import concat

            return concat((self, frame), axis=axis)
        else:
            if axis == 0:
                if self._columns != frame._columns:
                    raise ValueError(
                        'Cannot concat two frame of different columns')

                return ResultFrame(self._values + frame._values, columns=self._columns,
                                   index=self._index + frame._index, pandas=self._pandas)
            else:
                if self._index != frame._index:
                    raise ValueError(
                        'Cannot concat two frames of different indexes')

                values = [val+other for val, other in zip(self._values, frame._values)]
                return ResultFrame(values, self._columns + frame._columns,
                                   index=self._index, pandas=self._pandas)
Пример #14
0
    def get_comparablecdfs(self, bdlist):
        '''
        Return cdfs from self, and list of other binned data objects
        with data on same x axis, ie bincentres same for all.

        >>> bd1 = BinnedData()
        >>> bd1.databin_dict = {'0.0': 2, '3.0':1, '4.0':1}
        >>> bd2 = BinnedData()
        >>> bd2.databin_dict = {'1.0': 1, '2.0':1, '5.0':1}
        >>> bd3 = BinnedData()
        >>> bd3.databin_dict = {'1.0': 2, '2.0':1, '3.0':2}

        >>> x, [cdf1, cdf2, cdf3] = bd1.get_comparablecdfs([bd2,bd3])
        >>> np.set_printoptions(formatter={'float':lambda x: '{:5.2f}'.format(x)})
        >>> print(x)
        [-1.00  0.00  1.00  2.00  3.00  4.00  5.00]
        >>> print(cdf1)
        [ 0.00  0.19  0.38  0.50  0.75  1.00  1.00]
        >>> print(cdf2)
        [ 0.00  0.00  0.33  0.50  0.67  0.83  1.00]
        >>> print(cdf3)
        [ 0.00  0.00  0.40  0.60  1.00  1.00  1.00]
        >>> np.set_printoptions()
        '''

        #Firstly check that they can be compared, ie
        #that the binsize is the same and the reference bin location
        #for self could be contained in the bin edges for the bdlist.
        for bd in bdlist:
            if (bd.ref_binloc - self.ref_binloc)%self.binsize != 0:
                raise ValueError("BinnedData objects cannot be compared")

        #Calculate bincentres and cdfs for all BinnedData objects
        bdlist_all = bdlist[:]
        bdlist_all.insert(0, self)
        bincentres_all = []
        cdf_all = []
        for bd in bdlist_all:
            bincentres, cdf = bd.get_cdf()
            bincentres_all.append(bincentres)
            cdf_all.append(cdf)

        #Produce a bincentres_final array which contains all required
        #bincentres to cover all data
        bincentres_max = max([max(v) for v in bincentres_all])
        bincentres_min = min([min(v) for v in bincentres_all])
        bincentres_final = np.arange(bincentres_min,
                                     bincentres_max+self.binsize,
                                     self.binsize)

        #Move the cdfs for each binned data object along to match the
        #new bincentres. Fill missing data at the beginning of the cdf
        #with zeros, and data at the end with 1s to ensure a true cdf still.
        cdf_final_all = []
        for bincentres, cdf in zip(bincentres_all, cdf_all):
            cdf_final = np.zeros((len(bincentres_final)))
            #Find where bincentre fits into bincentres_final:
            #Need to convert to integers by dividing by binsize
            #to allow sensible check for equality
            bincentres_final_int = (np.rint(bincentres_final/
                                            self.binsize)).astype(int)
            bincentres_min_int = (np.rint(bincentres[0]/
                                          self.binsize)).astype(int)
            indices = np.where(bincentres_final_int == bincentres_min_int)
            imin = indices[0][0]
            imax = imin+len(cdf)-1
            if imax >= len(bincentres_final):
                imax = len(bincentres_final)-1

            #Now fit cdf into cdf_final:
            cdf_final[imin:imax+1] = cdf
            #Zeros already set up at start of cdf_final, but 1s need to
            #be added to the end to make a sensible cdf
            cdf_final[imax:] = 1.

            cdf_final_all.append(cdf_final)

        return bincentres_final, cdf_final_all
Пример #15
0
def plume_arrival_time(cube, threshold):
    '''
    Compute the arrival time of a plume at all locations using a threshold
    value to determine when the plume has arrived.
    (written with support from AVD)

    Arrival time is determined based on the first time step in the cube,
    not the release time and a new cube is created where the data points
    are hours (or fractions of hours) between the reference time and the
    arrival time.

    .. note:: Currently this subroutine can only be used on cubes with
              three dimensions

    .. note:: This subroutine assumes that the time dimension in the
              cube is in hours and will fail with an error where this
              is not true

    First import name_data and config

    >>> import name_data
    >>> import config

    Use the sample data path to locate data

    >>> sample_data_path = config.SAMPLE_DATADIR+'name/'

    Read in the data

    >>> name = name_data.NAMEData()
    >>> name.readdata(sample_data_path + 'Fields_grid1*',
    ... field_attributes = {'Species': 'CAESIUM-137'})
    [<iris 'Cube' of CAESIUM-137_AIR_CONCENTRATION / (Bq / m^3) \
(time: 9; latitude: 90; longitude: 180)>]

    Set a threshold

    >>> threshold = 1.0e-7

    Compute arrival times

    >>> time_cube = plume_arrival_time(name.gridded_cube_list[0], threshold)
    >>> print(time_cube.data[67, 98])
    96.0

    '''

    shape = cube.data.shape
    if len(shape) != 3:
        raise ValueError("Cube must have 3-dimensions")

    # Locate time coordinate
    try:
        cube.coords('time')
        t_coord = cube.coord('time')
        t_dim = cube.coord_dims('time')[0]
    except:
        raise ValueError('cube does not have a time coordinate')

    # Check that time coordinate units contains hours
    if 'hour' not in str(t_coord.units):
        raise ValueError('time coordinate units need to be hours')

    # Determine location/ name of remaining coordinates
    dims_list = []
    for nind, coord in enumerate(cube.coords()):
        if coord.name() != 'time':
            dims_list.append(nind)

    # Create a masked array
    time_array = ma.array(np.zeros(shape[1:]))
    time_array[:] = ma.masked

    # Loop over values which are greater than the threshold
    coordinates = np.where(cube.data >= threshold)
    ts = coordinates[t_dim]
    coord1 = coordinates[dims_list[0]]
    coord2 = coordinates[dims_list[1]]

    # Extract the first time step
    first_timestep = t_coord.points[0]

    # In the new array add data where the threshold is exceeded.
    for tind, c1, c2 in zip(ts, coord1, coord2):
        if (time_array[c1, c2] is ma.masked) or \
                (time_array[c1, c2] > t_coord.points[tind] - first_timestep):
            time_array[c1, c2] = t_coord.points[tind] - first_timestep

    # Add coordinates and units and create cube
    threshold_coord = iris.coords.AuxCoord(threshold,
                                           long_name='Threshold',
                                           units=cube.units)
    a_coord = cube.coords()[dims_list[0]]
    b_coord = cube.coords()[dims_list[1]]

    start_time = t_coord.units.num2date(first_timestep).isoformat(' ')

    if 'Species' in cube.attributes:
        long_name = 'Time of arrival of {}'.format(cube.attributes['Species'])
    else:
        long_name = 'Time of arrival'

    time_cube = iris.cube.Cube(time_array,
                               long_name=long_name,
                               units='hours since '+start_time,
                               attributes=cube.attributes,
                               dim_coords_and_dims=[(a_coord, 0),
                                                    (b_coord, 1)],
                               aux_coords_and_dims=[(threshold_coord, None)])

    # Add in source term coordinates if present
    names = [coord.name() for coord in cube.coords()]
    if 'source_latitude' in names:
        new_coord = cube.coord('source_latitude')
        time_cube.add_aux_coord(new_coord)
        new_coord = cube.coord('source_longitude')
        time_cube.add_aux_coord(new_coord)

    # Add short name
    time_cube.attributes['short_name'] = 'TimeOfArrival'

    return time_cube
Пример #16
0
    def plot(self, legendcols=None):
        """
        Produce plot.
        :param legendcols: Number of columns to include in legend.
        """

        if not self.lines:
            raise ValueError("SoccerPlot: no lines have been added")

        if self.fig is None:
            self.fig = plt.figure()
        ax = self.fig.add_subplot(111)

        #Scatter Plot
        for line in self.lines:
            if line['marker'] is None:
                line['marker'] = 'o'  #Default value for a scatter plot
            #Check has coordinates
            if line['cube'].coords(self.stat_xaxis) and \
               line['cube'].coords(self.stat_yaxis):
                #Check that data are not all NaNs:
                xaxispts = line['cube'].coord(self.stat_xaxis).points
                yaxispts = line['cube'].coord(self.stat_yaxis).points
                if not np.isnan(np.nanmax(xaxispts)) and \
                   not np.isnan(np.nanmax(yaxispts)):
                    iplt.scatter(line['cube'].coord(self.stat_xaxis),
                                 line['cube'].coord(self.stat_yaxis),
                                 color=np.atleast_1d(line['colour']),
                                 label=line['label'],
                                 marker=line['marker'],
                                 edgecolor='k',
                                 s=30)
            else:
                raise ValueError(
                    "Cube does not have statistics coordinates \n" +
                    "May need to run get_stats() first")

        #Set x & y axis limits
        range_x = self.get_range(self.stat_xaxis)
        if range_x is not None:
            ax.set_xlim(range_x)

        range_y = self.get_range(self.stat_yaxis)
        if range_y is not None:
            ax.set_ylim(range_y)

        #Plot goal regions
        if self.stat_xgoal is None:
            #Get goal if not already set
            self.stat_xgoal = self.get_goals(self.stat_xaxis)
        if self.stat_ygoal is None:
            #Get goal if not already set
            self.stat_ygoal = self.get_goals(self.stat_yaxis)
        if self.stat_xgoal is not None and self.stat_ygoal is not None:
            #Can plot goals - plot as a square
            for xgoal, ygoal in zip(self.stat_xgoal, self.stat_ygoal):
                xpoints = [xgoal, -xgoal, -xgoal, xgoal, xgoal]
                ypoints = [ygoal, ygoal, -ygoal, -ygoal, ygoal]
                ax.plot(xpoints, ypoints, 'k--')

        #Add lines through zero
        ax.plot(ax.get_xlim(), [0, 0], 'k')
        ax.plot([0, 0], ax.get_ylim(), 'k')

        #Add legend
        if self.legend:
            if legendcols is None:
                plotting_functions.add_legend_belowaxes(scatterpoints=1)
            else:
                plotting_functions.add_legend_belowaxes(scatterpoints=1,
                                                        ncol=legendcols)

        #Add title
        if self.title is None:
            self.gen_title()
        ax.set_title(self.title)

        #Add x and y labels
        if self.xlabel is None:
            if self.stat_xaxis in timeseries_stats.STATS_INFO:
                self.xlabel = timeseries_stats.STATS_INFO[
                    self.stat_xaxis]['long_name']
            else:
                self.xlabel = self.stat_xaxis
            ax.set_xlabel(self.xlabel)

        if self.ylabel is None:
            if self.stat_yaxis in timeseries_stats.STATS_INFO:
                self.ylabel = timeseries_stats.STATS_INFO[
                    self.stat_yaxis]['long_name']
            else:
                self.ylabel = self.stat_yaxis
            ax.set_ylabel(self.ylabel)

        #Add gridlines
        if self.gridlines:
            plt.grid()

        # Apply branding
        if self.mobrand:
            line_plot.add_mobranding()

        return self.fig
    def test_get_days(self):
        weeks = self.month.get_weeks()
        week = list(weeks)[0]
        days = week.get_days()
        actuals = [(len(day.occurrences), day.start,day.end) for day in days]

        if settings.FIRST_DAY_OF_WEEK == 0:
            expecteds = [
                (
                    0,
                    datetime.datetime(2008, 1, 27, 0, 0, tzinfo=pytz.utc),
                    datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc)
                ),
                (
                    0,
                    datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
                    datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc)
                ),
                (
                    0,
                    datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc),
                    datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc)
                ),
                (
                    0,
                    datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc),
                    datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc)
                ),
                (
                    0,
                    datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc),
                    datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc)
                ),
                (
                    0,
                    datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc),
                    datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc)
                ),
                (
                    1,
                    datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc),
                    datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)
                ),
            ]

        else:
            expecteds = [
                (0, datetime.datetime(2008, 1, 28, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 29, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 30, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 1, 31, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 2, 1, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc)),
                (1, datetime.datetime(2008, 2, 2, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc)),
                (0, datetime.datetime(2008, 2, 3, 0, 0, tzinfo=pytz.utc),
                 datetime.datetime(2008, 2, 4, 0, 0, tzinfo=pytz.utc))
            ]

        for actual, expected in zip(actuals, expecteds):
            self.assertEqual(actual, expected)
Пример #18
0
                              usecols=(7, ))
    humidity = np.loadtxt(met_filename,
                          unpack=True,
                          dtype=float,
                          delimiter=',',
                          skiprows=38,
                          usecols=(8, ))
    hPressure = np.loadtxt(met_filename,
                           unpack=True,
                           dtype=float,
                           delimiter=',',
                           skiprows=38,
                           usecols=(6, ))
    hPressure = hPressure / 100.

    HeightTemp = list(zip(hPressure, Ctemperature))
    column_titles = [('pressure', 'temperature')]
    tephi.MIN_PRESSURE = 70
    tephi.MAX_PRESSURE = 1000
    tephi.ISOBAR_SPEC = [(50, 0.50), (100, 1.5), (200, None)]
    # tephi.ISOBAR_SPEC = [(25, 0.45), (50, 0.50), (100, 1.5), (200, None)]
    tephi.MIN_THETA = 0
    tephi.MAX_THETA = 700
    tephi.MIN_WET_ADIABAT = 0
    tephi.MAX_WET_ADIABAT = 200
    tephi.WET_ADIABAT_SPEC = [(5, None)]
    tephi.MIXING_RATIO_SPEC = [(1, 0.05), (2, 0.18), (4, 0.3), (8, 1.5)]
    #tephi.MIXING_RATIO_FIXED = [0.001, 0.01, 0.05, 0.1, 0.5, 1.0, 5.0, 10.0, 20.0, 50.0, 75.0,100.0,150.0]
    tpg = tephi.Tephigram(anchor=[(1100, 0), (70, 0)],
                          isotherm_locator=tephi.Locator(25),
                          dry_adiabat_locator=tephi.Locator(25))
Пример #19
0
            #For running interactively
            curr_path = './'
        inifilename = curr_path + '/aqum_convert_nc.ini'

    ini = adaq.Inifile(inifilename, setdefaults=True)
    ini_data = ini.get()

    #--------------------------------------------------
    #Get end and start dates. End date must be midnight of current date

    end_datetime = ini_data['end_datetime']
    start_datetime = ini_data['start_datetime']

    #--------------------------------------------------
    # Get model data
    for model, datadir in zip(ini_data['models_list'],
                              ini_data['models_dir_list']):

        print("Getting model data for ", model, " at ",
              datetime.datetime.now())

        #--------------------------------------------------
        # Get a list of pp filenames between the specified times

        if Verbose: print("Generating filenames at ", datetime.datetime.now())

        ppfiles = adaq.PPFiles(datadir,
                               start_datetime,
                               end_datetime,
                               forecast_day=ini_data['forecast_day'])
        filenames = ppfiles.get()
Пример #20
0
def plot_onmap(sitelon, sitelat, sitedata, labels, title):
    '''
    Plot pie charts on a map

    Inputs:
    sitelon - list of longitudes of sites
    sitelat - list of latitudes of sites
    sitedata - list of list of pm concentrations at the sites
    '''

    dims = sitedata.shape

    # size of marker in units of lat/lon
    # scaled by total size of the data
    lsize = 7.e-3

    # make an new figure
    plt.figure()

    # set up the axes with cartopy using a Plate Careee projection
    ax = plt.axes(projection=ccrs.PlateCarree())
    fig = plt.gcf()
    ax.coastlines(resolution='50m')
    fig.suptitle(title)

    # Create a feature for countries at 1:50m from Natural Earth
    countries = cfeature.NaturalEarthFeature(category='cultural',
                                             name='admin_0_countries',
                                             scale='50m',
                                             facecolor='none')
    # add coutries to the map
    ax.add_feature(countries, edgecolor='gray')

    # choose an extent for the map
    # this extent works well for UK and Ireland
    # leaving space for a legend
    ax.set_extent([-11., 3.5, 49.8, 61.])

    # loop over all the sites with data and plot
    # the pie chart at each
    for x, y, data in zip(sitelon, sitelat, sitedata):

        # calculate the x and y size of the bounding box depending
        # on the total
        piesize = lsize * sum(data)

        # now set up a BBox for the pie chart
        # using bounds so that centred at x, y with
        # a total width of piesize
        bb_data = Bbox.from_bounds(x - piesize / 2., y - piesize / 2., piesize,
                                   piesize)
        # transform these data coordinates to figure cordinates
        disp_coords = ax.transData.transform(bb_data)
        fig_coords = fig.transFigure.inverted().transform(disp_coords)
        # now add axes based on these figure cordinates
        fig.add_axes(Bbox(fig_coords))

        # now do the pie chart on this axes
        # we want the sectors to start at the top and
        # go clockwise (default is to start on X-axis and
        # go anticlockwise - I don't know why!)
        # plt.pie( data,  counterclock=False, startangle=90)
        # unfortunately, the counterclock argument is only
        # support from 1.4 onwards.
        plt.pie(data, startangle=90)

    # finally put a legend on. Anchor in figure co-ordinates
    # Don't want to anchor relative to the last axes
    plt.legend(labels,
               bbox_to_anchor=(0.9, 0.9),
               bbox_transform=plt.gcf().transFigure)

    plt.show()
def plot_windgram(cube):
    '''
    Module for plotting a wind metogram from a cube
    Currently assumes that cube is correctly formatted (i.e
    contains only wind direction data with two dimensions,
    realization and time). The code also currently assumes
    that there are 8 time steps.

    The code also assumes that the zeroth ensemble member is
    the control and plots this in black.
    '''

    sitelat = [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25]
    lsize = 5.75

    fig = plt.figure(figsize=[16, 4])
    ax = plt.gca()
    plt.xlim([-3, 45])
    plt.ylim([0, 0.5])
    plt.yticks([])
    plt.xticks([0, 6, 12, 18, 24, 30, 36, 42])
    plt.xlabel('Hours since source start')

    t_coord = cube.coord('time')
    time = t_coord.units.num2date(t_coord.points)
    t_delta = [(t - time[0]).total_seconds()/3600. for t in time]
    t_hours = t_delta[0:48:6]

    for x, y in zip(t_hours, sitelat):
        # calculate the x and y size of the bounding box depending
        # on the total
        piesize = lsize

        # now set up a BBox for the pie chart
        # using bounds so that centred at x, y with
        # a total width of piesize
        bb_data = Bbox.from_bounds(x-piesize/2.,
                                   y-piesize/2., piesize, piesize)
        # transform these data coordinates to figure cordinates
        disp_coords = ax.transData.transform(bb_data)
        fig_coords = fig.transFigure.inverted().transform(disp_coords)
        # now add axes based on these figure cordinates
        ax1 = fig.add_axes(Bbox(fig_coords), projection='polar')

        # now do the polar line plot on this axes
        icube = cube[:, int(x)]
        for ireal, winddir in enumerate(icube.data):
            windrad = winddir * np.pi/180.0
            if ireal == 0:
                ax1.plot([windrad, windrad], [0.0, 1.0],
                         'k', linewidth=2, zorder=4)
            else:
                ax1.plot([windrad, windrad], [0.0, 1.0],
                         'r', linewidth=2)

        ax1.set_theta_zero_location("N")
        ax1.set_theta_direction(-1)
        ax1.set_rticks([])
        ax1.set_xticklabels([])

    plt.show()