Пример #1
0
 def myfunction(d, mx, mn):
     from numpy.ma import maximum, minimum, absolute, greater, count
     try:
         if count(d) == 0: return mx, mn
         mx = float(maximum(mx, float(maximum(d))))
         mn = float(minimum(mn, float(minimum(d))))
     except:
         for i in d:
             mx, mn = myfunction(i, mx, mn)
     return mx, mn
Пример #2
0
 def myfunction(d,mx,mn):
   from numpy.ma import maximum,minimum,absolute,greater,count
   try:
     if count(d)==0 : return mx,mn
     mx=float(maximum(mx,float(maximum(d))))
     mn=float(minimum(mn,float(minimum(d))))
   except:
     for i in d:
       mx,mn=myfunction(i,mx,mn)
   return mx,mn
Пример #3
0
 def myfunction(d, mx, mn):
     from numpy.ma import maximum, minimum, masked_where, absolute, greater, count
     try:
         d = masked_where(greater(absolute(d), 9.9E19), d)
         if count(d) == 0: return mx, mn
         mx = float(maximum(mx, float(maximum(d))))
         mn = float(minimum(mn, float(minimum(d))))
     except:
         for i in d:
             mx, mn = myfunction(i, mx, mn)
     return mx, mn
Пример #4
0
 def myfunction(d,mx,mn):
   from numpy.ma import maximum,minimum,masked_where,absolute,greater,count
   try:
     d=masked_where(greater(absolute(d),9.9E19),d)
     if count(d)==0 : return mx,mn
     mx=float(maximum(mx,float(maximum(d))))
     mn=float(minimum(mn,float(minimum(d))))
   except:
     for i in d:
       mx,mn=myfunction(i,mx,mn)
   return mx,mn
def flux_limiter(
        stage,
        f_old,
        Uf,
        z):
    """Applies the flux (numerical and positivity) limiter to the
    ith nominal flux matrix Uf, shape = (z1.N, z2.N)

    inputs:
    f_old -- (ndarray, dim=2) density from previous time step
    CFL -- (instance) Courant numbers dictating phase space advection
    Uf -- (ndarray, dim=2) the normalized flux (high order or not) used in the CS update

    outputs:
    Uf -- (ndarray, dim=2) final normalized flux for MC originating at prepoints[i,j]
            after numerical and positivity limiter has been applied

    Note: for a 768 x 1536 grid

          the masked implementation here takes about 285 ms
          the looped implementation over all i,j takes 3.85 s for the whole grid

          for a 1536 x 3072 grid

          the masked implementation here takes about 1.44 s
          the looped implementation over all i,j takes 17.9 s for the whole grid

          i.e. the computational savings is at least a factor of 10
    """
    # local masked array (ma) copy of CFL.frac used to leave CFL.frac unchanged
    Uf_ma = ma.array(z.CFL.frac[stage,:,:])

    # mask negative values, keep positives
    Uf_ma[z.CFL.numbers[stage,:,:] < 0] = ma.masked

    # assign the mask to a zero matrix of same size for pairwise ma.minimum/maximum operations below
    zeros = ma.zeros(Uf.shape)
    zeros.mask = Uf_ma.mask

    # operating only on positive values (negatives masked out)
    Uf_pos = ma.minimum(ma.maximum(zeros, Uf), 1.0*f_old)

    # mask positives, keep negatives
    Uf_ma.mask = np.logical_not(Uf_ma.mask)
    zeros.mask = Uf_ma.mask

    #operating only on negative values
    Uf_neg = ma.minimum(ma.maximum(-1.0*f_old, Uf), zeros)

    # combine masked values in a single matrix Uf_final
    Uf = np.where(Uf_neg.mask == False, Uf_neg.data, Uf_pos.data)

    return Uf
Пример #6
0
 def test_testMinMax2(self):
     # Test of minimum, maximum.
     assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
     assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
     x = arange(5)
     y = arange(5) - 2
     x[3] = masked
     y[0] = masked
     assert_(eq(minimum(x, y), where(less(x, y), x, y)))
     assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
     assert_(minimum.reduce(x) == 0)
     assert_(maximum.reduce(x) == 4)
Пример #7
0
 def test_testMinMax2(self):
     # Test of minimum, maximum.
     assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
     assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
     x = arange(5)
     y = arange(5) - 2
     x[3] = masked
     y[0] = masked
     assert_(eq(minimum(x, y), where(less(x, y), x, y)))
     assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
     assert_(minimum.reduce(x) == 0)
     assert_(maximum.reduce(x) == 4)
Пример #8
0
def make_solution(case):
    N, K = case
    increasing = np.array(list(range(N)))
    decreasing = np.array(increasing[::-1])

    # status variables
    free = np.ones(N, dtype=bool)
    left = np.zeros(N, dtype=int)
    right = np.zeros(N, dtype=int)

    # init with initial distances
    left[0:N] = increasing
    right[0:N] = decreasing

    for k in range(K):
        # print("customer {}".format(k))
        # mask with current occupancy status
        left = ma.array(left, mask=~free)
        right = ma.array(right, mask=~free)
        #print("left",left)
        #print("right", right)

        mins = ma.minimum(left, right)
        maxs = ma.maximum(left, right)
        #print("mins", mins)
        #print("maxs", maxs)

        # candidates are all stalls where mins are maximal
        max_mins = mins.max()
        candidates = ma.where(mins == max_mins)[0]
        #print(max_mins, candidates)

        # from those candidates select the one where max is also maximal
        selected = ma.argmax(maxs[candidates])
        selected = candidates[selected]
        #print(selected)

        # occupy stall and update left and right
        free[selected] = False
        if selected > 0:
            # left of selected, right distance has to be updated
            right[0:selected] = ma.minimum(decreasing[-selected:], right[0:selected])
        if selected < N -1:
            # right of selected, left distance has to be updated
            left[selected+1:] = ma.minimum(increasing[:N-selected -1], left[selected+1:])

        if k == K -1:
            return maxs[selected], mins[selected]
Пример #9
0
    def __init__(self, MetricTable, opts):

        epsilon = 0.0
        if opts.cpu:
            epsilon = 0.01

        # Create empty ratio table
        nprobs = MetricTable.nprobs
        nsolvs = MetricTable.nsolvs
        self.ratios = ma.zeros((nprobs, nsolvs), dtype=numpy.float)

        # Compute best relative performance ratios across
        # solvers for each problem
        for prob in range(nprobs):
            metrics  = MetricTable.prob_mets(prob) + epsilon
            best_met = ma.minimum(metrics)
            self.ratios[prob,:] = metrics * (1.0 / best_met)

        # Sort each solvers performance ratios
        for solv in range(nsolvs):
            self.ratios[:,solv] = ma.sort(self.ratios[:,solv])

        # Compute largest ratio and use to replace failure entries
        self.maxrat = ma.maximum(self.ratios)
        self.ratios = ma.filled(self.ratios, 10 * self.maxrat)
Пример #10
0
    def __init__(self, MetricTable):

        # Create empty ratio table
        nprobs = MetricTable.nprobs
        nsolvs = MetricTable.nsolvs
        self.ratios = ma.masked_array(1.0 * ma.zeros((nprobs + 1, nsolvs)))

        # Compute best relative performance ratios across
        # solvers for each problem
        for prob in range(nprobs):
            metrics = MetricTable.prob_mets(prob)
            best_met = ma.minimum(metrics)
            if (ma.count(metrics) == nsolvs
                    and ma.maximum(metrics) <= opts.minlimit):
                self.ratios[prob + 1, :] = 1.0
            else:
                self.ratios[prob + 1, :] = metrics * (1.0 / best_met)

        # Sort each solvers performance ratios
        for solv in range(nsolvs):
            self.ratios[:, solv] = ma.sort(self.ratios[:, solv])

        # Compute largest ratio and use to replace failures entries
        self.maxrat = ma.maximum(self.ratios)
        self.ratios = ma.filled(self.ratios, 1.01 * self.maxrat)
Пример #11
0
    def _breakList(self, inList, index, parameter):
        par = float(parameter)

        array = N.empty(shape=[len(inList),],dtype=N.float64)
        i = 0
        for parameters in inList:
            array[i] = parameters[index]
            i = i + 1 

        greater = MA.masked_less(array, par)
        less = MA.masked_greater(array, par)

        upper = MA.minimum(greater)
        lower = MA.maximum(less)

        upperArray = MA.masked_inside(array, par, upper)
        lowerArray = MA.masked_inside(array, lower, par)

        upperList = []
        lowerList = []
        i = 0
        for parameters in inList:
            if upperArray.mask[i]:
                upperList.append(parameters)
            if lowerArray.mask[i]:
                lowerList.append(parameters)
            i = i + 1

        return upperList, lowerList
Пример #12
0
    def __init__(self, MetricTable):

        # Create empty ratio table
        nprobs = MetricTable.nprobs
        nsolvs = MetricTable.nsolvs
        self.ratios = ma.masked_array(1.0 * ma.zeros((nprobs+1, nsolvs)))

        # Compute best relative performance ratios across
        # solvers for each problem
        for prob in range(nprobs):
            metrics  = MetricTable.prob_mets(prob)
            best_met = ma.minimum(metrics)
	    if (ma.count(metrics)==nsolvs and
                ma.maximum(metrics)<=opts.minlimit):
                self.ratios[prob+1,:] = 1.0;
	    else:
                self.ratios[prob+1,:] = metrics * (1.0 / best_met)

        # Sort each solvers performance ratios
        for solv in range(nsolvs):
            self.ratios[:,solv] = ma.sort(self.ratios[:,solv])

        # Compute largest ratio and use to replace failures entries
        self.maxrat = ma.maximum(self.ratios)
        self.ratios = ma.filled(self.ratios, 1.01 * self.maxrat)
Пример #13
0
    def test_testMinMax(self):
        # Test minimum and maximum.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        xr = np.ravel(x)  # max doesn't work if shaped
        xmr = ravel(xm)

        # true because of careful selection of data
        self.assertTrue(eq(max(xr), maximum(xmr)))
        self.assertTrue(eq(min(xr), minimum(xmr)))
Пример #14
0
def ln_shifted_auto(v):
    """If 'v' has values <= 0, it is shifted in a way that min(v)=1 before doing log. 
    Otherwise the log is done on the original 'v'."""
    vmin = ma.minimum(v)
    if vmin <= 0:
        values = v - vmin + 1
    else:
        values = v
    return ma.log(values)
 def _train(self, blob_generator):
     for blob in blob_generator:
         if self.min is None or self.max is None:
             self.min = blob.data
             self.max = blob.data
         else:
             self.min = minimum(self.min, blob.data)
             self.max = maximum(self.max, blob.data)
         yield blob
Пример #16
0
    def test_testMinMax(self):
        # Test minimum and maximum.
        (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
        xr = np.ravel(x)  # max doesn't work if shaped
        xmr = ravel(xm)

        # true because of careful selection of data
        self.assertTrue(eq(max(xr), maximum(xmr)))
        self.assertTrue(eq(min(xr), minimum(xmr)))
Пример #17
0
def ln_shifted_auto(v):
    """If 'v' has values <= 0, it is shifted in a way that min(v)=1 before doing log. 
    Otherwise the log is done on the original 'v'."""
    vmin = ma.minimum(v)
    if vmin <= 0:
        values = v - vmin + 1
    else:
        values = v
    return ma.log(values)
Пример #18
0
 def _contour_args(self, *args):
     if self.filled: fn = 'contourf'
     else:           fn = 'contour'
     Nargs = len(args)
     if Nargs <= 2:
         z = ma.asarray(args[0], dtype=np.float64)
         x, y = self._initialize_x_y(z)
     elif Nargs <=4:
         x,y,z = self._check_xyz(args[:3])
     else:
         raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
     z = ma.masked_invalid(z, copy=False)
     self.zmax = ma.maximum(z)
     self.zmin = ma.minimum(z)
     if self.logscale and self.zmin <= 0:
         z = ma.masked_where(z <= 0, z)
         warnings.warn('Log scale: values of z <=0 have been masked')
         self.zmin = z.min()
     self._auto = False
     if self.levels is None:
         if Nargs == 1 or Nargs == 3:
             lev = self._autolev(z, 7)
         else:   # 2 or 4 args
             level_arg = args[-1]
             try:
                 if type(level_arg) == int:
                     lev = self._autolev(z, level_arg)
                 else:
                     lev = np.asarray(level_arg).astype(np.float64)
             except:
                 raise TypeError(
                     "Last %s arg must give levels; see help(%s)" % (fn,fn))
         if self.filled and len(lev) < 2:
             raise ValueError("Filled contours require at least 2 levels.")
         self.levels = lev
     self._levels = list(self.levels)
     if self.extend in ('both', 'min'):
         self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
     if self.extend in ('both', 'max'):
         self._levels.append(max(self.levels[-1],self.zmax) + 1)
     self._levels = np.asarray(self._levels)
     self.vmin = np.amin(self.levels)  # alternative would be self.layers
     self.vmax = np.amax(self.levels)
     if self.extend in ('both', 'min'):
         self.vmin = 2 * self.levels[0] - self.levels[1]
     if self.extend in ('both', 'max'):
         self.vmax = 2 * self.levels[-1] - self.levels[-2]
     self.layers = self._levels # contour: a line is a thin layer
     if self.filled:
         self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
         if self.extend in ('both', 'min'):
             self.layers[0] = 0.5 * (self.vmin + self._levels[1])
         if self.extend in ('both', 'max'):
             self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])
     return (x, y, z)
Пример #19
0
 def test_testScalarArithmetic(self):
     xm = array(0, mask=1)
     #TODO FIXME: Find out what the following raises a warning in r8247
     with np.errstate(divide='ignore'):
         assert_((1 / array(0)).mask)
     assert_((1 + xm).mask)
     assert_((-xm).mask)
     assert_((-xm).mask)
     assert_(maximum(xm, xm).mask)
     assert_(minimum(xm, xm).mask)
     assert_(xm.filled().dtype is xm._data.dtype)
     x = array(0, mask=0)
     assert_(x.filled() == x._data)
     assert_equal(str(xm), str(masked_print_option))
Пример #20
0
 def test_testScalarArithmetic(self):
     xm = array(0, mask=1)
     #TODO FIXME: Find out what the following raises a warning in r8247
     with np.errstate(divide='ignore'):
         assert_((1 / array(0)).mask)
     assert_((1 + xm).mask)
     assert_((-xm).mask)
     assert_((-xm).mask)
     assert_(maximum(xm, xm).mask)
     assert_(minimum(xm, xm).mask)
     assert_(xm.filled().dtype is xm._data.dtype)
     x = array(0, mask=0)
     assert_(x.filled() == x._data)
     assert_equal(str(xm), str(masked_print_option))
Пример #21
0
 def _process_args(self, *args, **kwargs):
     """
     Process args and kwargs.
     """
     if isinstance(args[0], QuadContourSet):
         C = args[0].Cntr
         if self.levels is None:
             self.levels = args[0].levels
         self.zmin = args[0].zmin
         self.zmax = args[0].zmax
     else:
         x, y, z = self._contour_args(args, kwargs)
         x0 = ma.minimum(x)
         x1 = ma.maximum(x)
         y0 = ma.minimum(y)
         y1 = ma.maximum(y)
         self.ax.update_datalim([(x0,y0), (x1,y1)])
         self.ax.autoscale_view(tight=True)
         _mask = ma.getmask(z)
         if _mask is ma.nomask:
             _mask = None
         C = _cntr.Cntr(x, y, z.filled(), _mask)
     self.Cntr = C
Пример #22
0
 def _contour_args(self, args, kwargs):
     if self.filled: fn = 'contourf'
     else:           fn = 'contour'
     Nargs = len(args)
     if Nargs <= 2:
         z = ma.asarray(args[0], dtype=np.float64)
         x, y = self._initialize_x_y(z)
         args = args[1:]
     elif Nargs <=4:
         x,y,z = self._check_xyz(args[:3], kwargs)
         args = args[3:]
     else:
         raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
     z = ma.masked_invalid(z, copy=False)
     self.zmax = ma.maximum(z)
     self.zmin = ma.minimum(z)
     if self.logscale and self.zmin <= 0:
         z = ma.masked_where(z <= 0, z)
         warnings.warn('Log scale: values of z <= 0 have been masked')
         self.zmin = z.min()
     self._contour_level_args(z, args)
     return (x, y, z)
Пример #23
0
 def _contour_args(self, *args):
     if self.filled: fn = 'contourf'
     else: fn = 'contour'
     Nargs = len(args)
     if Nargs <= 2:
         z = ma.asarray(args[0], dtype=np.float64)
         x, y = self._initialize_x_y(z)
     elif Nargs <= 4:
         x, y, z = self._check_xyz(args[:3])
     else:
         raise TypeError("Too many arguments to %s; see help(%s)" %
                         (fn, fn))
     z = ma.masked_invalid(z, copy=False)
     self.zmax = ma.maximum(z)
     self.zmin = ma.minimum(z)
     if self.logscale and self.zmin <= 0:
         z = ma.masked_where(z <= 0, z)
         warnings.warn('Log scale: values of z <=0 have been masked')
         self.zmin = z.min()
     self._auto = False
     if self.levels is None:
         if Nargs == 1 or Nargs == 3:
             lev = self._autolev(z, 7)
         else:  # 2 or 4 args
             level_arg = args[-1]
             try:
                 if type(level_arg) == int:
                     lev = self._autolev(z, level_arg)
                 else:
                     lev = np.asarray(level_arg).astype(np.float64)
             except:
                 raise TypeError(
                     "Last %s arg must give levels; see help(%s)" %
                     (fn, fn))
         if self.filled and len(lev) < 2:
             raise ValueError("Filled contours require at least 2 levels.")
         self.levels = lev
     return (x, y, z)
Пример #24
0
 def _contour_args(self, *args):
     if self.filled: fn = 'contourf'
     else:           fn = 'contour'
     Nargs = len(args)
     if Nargs <= 2:
         z = ma.asarray(args[0], dtype=np.float64)
         x, y = self._initialize_x_y(z)
     elif Nargs <=4:
         x,y,z = self._check_xyz(args[:3])
     else:
         raise TypeError("Too many arguments to %s; see help(%s)" % (fn,fn))
     z = ma.masked_invalid(z, copy=False)
     self.zmax = ma.maximum(z)
     self.zmin = ma.minimum(z)
     if self.logscale and self.zmin <= 0:
         z = ma.masked_where(z <= 0, z)
         warnings.warn('Log scale: values of z <=0 have been masked')
         self.zmin = z.min()
     self._auto = False
     if self.levels is None:
         if Nargs == 1 or Nargs == 3:
             lev = self._autolev(z, 7)
         else:   # 2 or 4 args
             level_arg = args[-1]
             try:
                 if type(level_arg) == int:
                     lev = self._autolev(z, level_arg)
                 else:
                     lev = np.asarray(level_arg).astype(np.float64)
             except:
                 raise TypeError(
                     "Last %s arg must give levels; see help(%s)" % (fn,fn))
         if self.filled and len(lev) < 2:
             raise ValueError("Filled contours require at least 2 levels.")
         self.levels = lev
     return (x, y, z)
Пример #25
0
def comp_g(
    dert__
):  # cross-comp of g in 2x2 kernels, between derts in ma.stack dert__

    # initialize return variable
    new_dert__ = ma.zeros(
        (dert__.shape[0], dert__.shape[1] - 1, dert__.shape[2] - 1))
    new_dert__.mask = True  # initialize mask
    ig__, idy__, idx__, gg__, dgy__, dgx__, mg__ = new_dert__  # assign 'views'. Use [:] to update views

    # Unpack relevant params
    g__, dy__, dx__ = dert__[[3, 4, 5]]  # g, dy, dx -> local i, idy, idx
    g__.data[np.where(
        g__.data == 0
    )] = 1  # replace 0 values with 1 to avoid error, not needed in high-g blobs?
    ''' 
    for all operations below: only mask kernels with more than one masked dert 
    '''
    majority_mask = (
        g__[:-1, :-1].mask.astype(int) + g__[:-1, 1:].mask.astype(int) +
        g__[1:, 1:].mask.astype(int) + g__[1:, :-1].mask.astype(int)) > 1
    g__.mask = dy__.mask = dx__.mask = majority_mask

    g0__, dy0__, dx0__ = g__[:-1, :
                             -1].data, dy__[:-1, :
                                            -1].data, dx__[:-1, :
                                                           -1].data  # top left
    g1__, dy1__, dx1__ = g__[:-1,
                             1:].data, dy__[:-1,
                                            1:].data, dx__[:-1,
                                                           1:].data  # top right
    g2__, dy2__, dx2__ = g__[1:, 1:].data, dy__[1:, 1:].data, dx__[
        1:, 1:].data  # bottom right
    g3__, dy3__, dx3__ = g__[1:, :-1].data, dy__[1:, :-1].data, dx__[
        1:, :-1].data  # bottom left

    sin0__ = dy0__ / g0__
    cos0__ = dx0__ / g0__
    sin1__ = dy1__ / g1__
    cos1__ = dx1__ / g1__
    sin2__ = dy2__ / g2__
    cos2__ = dx2__ / g2__
    sin3__ = dy3__ / g3__
    cos3__ = dx3__ / g3__
    '''
    cosine of difference between diagonally opposite angles, in vector representation
    print(cos_da1__.shape, type(cos_da1__))
    '''
    cos_da0__ = (cos2__ * cos0__) + (sin2__ * sin0__
                                     )  # top left to bottom right
    cos_da1__ = (cos3__ * cos1__) + (sin3__ * sin1__
                                     )  # top right to bottom left

    dgy__[:] = ((g3__ + g2__) - (g0__ * cos_da0__ + g1__ * cos_da0__))
    # y-decomposed cosine difference between gs
    dgx__[:] = ((g1__ + g2__) - (g0__ * cos_da0__ + g3__ * cos_da1__))
    # x-decomposed cosine difference between gs

    gg__[:] = ma.hypot(dgy__, dgx__)  # gradient of gradient

    mg0__ = ma.minimum(g0__, g2__) * (cos_da1__ + 1)  # +1 to make all positive
    mg1__ = ma.minimum(g1__, g3__) * (cos_da1__ + 1)
    mg__[:] = mg0__ + mg1__  # match of gradient

    ig__[:] = g__[:-1, :
                  -1]  # remove last row and column to align with derived params
    idy__[:] = dy__[:-1, :-1]
    idx__[:] = dx__[:-1, :-1]  # -> idy, idx to compute cos for comp rg
    # unnecessary?:
    gg__.mask = mg__.mask = dgy__.mask = dgx__.mask = majority_mask
    '''
    next comp_rg will use g, dy, dx
    next comp_gg will use gg, dgy, dgx
    '''
    return new_dert__  # new_dert__ has been updated along with 'view' arrays: ig__, idy__, idx__, gg__, dgy__, dgx__, mg__
Пример #26
0
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is None: self.vmin = ma.minimum(A)
     if self.vmax is None: self.vmax = ma.maximum(A)
Пример #27
0
 def __init__(self, ax, *args, **kwargs):
     """
     Draw contour lines or filled regions, depending on
     whether keyword arg 'filled' is False (default) or True.
     The first argument of the initializer must be an axes
     object.  The remaining arguments and keyword arguments
     are described in ContourSet.contour_doc.
     """
     self.ax = ax
     self.levels = kwargs.get('levels', None)
     self.filled = kwargs.get('filled', False)
     self.linewidths = kwargs.get('linewidths', None)
     self.linestyles = kwargs.get('linestyles', None)
     self.alpha = kwargs.get('alpha', 1.0)
     self.origin = kwargs.get('origin', None)
     self.extent = kwargs.get('extent', None)
     cmap = kwargs.get('cmap', None)
     self.colors = kwargs.get('colors', None)
     norm = kwargs.get('norm', None)
     self.extend = kwargs.get('extend', 'neither')
     self.antialiased = kwargs.get('antialiased', True)
     self.nchunk = kwargs.get('nchunk', 0)
     self.locator = kwargs.get('locator', None)
     if (isinstance(norm, colors.LogNorm)
             or isinstance(self.locator, ticker.LogLocator)):
         self.logscale = True
         if norm is None:
             norm = colors.LogNorm()
         if self.extend is not 'neither':
             raise ValueError('extend kwarg does not work yet with log scale')
     else:
         self.logscale = False
     if self.origin is not None: assert(self.origin in
                                         ['lower', 'upper', 'image'])
     if self.extent is not None: assert(len(self.extent) == 4)
     if cmap is not None: assert(isinstance(cmap, colors.Colormap))
     if self.colors is not None and cmap is not None:
         raise ValueError('Either colors or cmap must be None')
     if self.origin == 'image': self.origin = mpl.rcParams['image.origin']
     if isinstance(args[0], ContourSet):
         C = args[0].Cntr
         if self.levels is None:
             self.levels = args[0].levels
     else:
         x, y, z = self._contour_args(*args)
         x0 = ma.minimum(x)
         x1 = ma.maximum(x)
         y0 = ma.minimum(y)
         y1 = ma.maximum(y)
         self.ax.update_datalim([(x0,y0), (x1,y1)])
         self.ax.autoscale_view()
         _mask = ma.getmask(z)
         if _mask is ma.nomask:
             _mask = None
         C = _cntr.Cntr(x, y, z.filled(), _mask)
     self.Cntr = C
     self._process_levels()
     if self.colors is not None:
         cmap = colors.ListedColormap(self.colors, N=len(self.layers))
     if self.filled:
         self.collections = cbook.silent_list('collections.PathCollection')
     else:
         self.collections = cbook.silent_list('collections.LineCollection')
     self.labelTexts = []
     self.labelCValues = []
     kw = {'cmap': cmap}
     if norm is not None:
         kw['norm'] = norm
     cm.ScalarMappable.__init__(self, **kw) # sets self.cmap;
     self._process_colors()
     if self.filled:
         if self.linewidths is not None:
             warnings.warn('linewidths is ignored by contourf')
         lowers = self._levels[:-1]
         uppers = self._levels[1:]
         for level, level_upper in zip(lowers, uppers):
             nlist = C.trace(level, level_upper, nchunk = self.nchunk)
             nseg = len(nlist)//2
             segs = nlist[:nseg]
             kinds = nlist[nseg:]
             paths = self._make_paths(segs, kinds)
             col = collections.PathCollection(paths,
                                  antialiaseds = (self.antialiased,),
                                  edgecolors= 'none',
                                  alpha=self.alpha)
             self.ax.add_collection(col)
             self.collections.append(col)
     else:
         tlinewidths = self._process_linewidths()
         self.tlinewidths = tlinewidths
         tlinestyles = self._process_linestyles()
         for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
             nlist = C.trace(level)
             nseg = len(nlist)//2
             segs = nlist[:nseg]
             col = collections.LineCollection(segs,
                                  linewidths = width,
                                  linestyle = lstyle,
                                  alpha=self.alpha)
             col.set_label('_nolegend_')
             self.ax.add_collection(col, False)
             self.collections.append(col)
     self.changed() # set the colors
def flux_limiter(
        stage,
        f_old,
        Uf,
        z
        ):

    """Applies the flux (numerical and positivity) limiter to the
    ith nominal flux matrix Uf, shape = (z1.N, z2.N)

    inputs:
    f_old -- (ndarray, dim=2) density from previous time step
    CFL -- (instance) Courant numbers dictating phase space advection
    Uf -- (ndarray, dim=2) the normalized flux (high order or not) used in the CS update

    outputs:
    Uf -- (ndarray, dim=2) final normalized flux for MC originating at prepoints[i,j]
            after numerical and positivity limiter has been applied

    Note: for a 768 x 1536 grid

          the masked implementation here takes about 285 ms
          the looped implementation over all i,j takes 3.85 s for the whole grid

          for a 1536 x 3072 grid

          the masked implementation here takes about 1.44 s
          the looped implementation over all i,j takes 17.9 s for the whole grid

          i.e. the computational savings is at least a factor of 10
    """
    # note that before we apply the limiter, there is no guarantee that sign(Uf) == sign(z.CFL.numbers)
    # this is one of the functions of the limiter, to make these signs agree if the unlimited value of Uf
    # has been unphysically flipped in sign. Recall that since f >= 0 by initial condition, sign(Uf) == sign(U)
    # for all points f > 0, and where f = 0 then Uf = 0.0 and its role in remapping is to add zero contribution.
    #
    # here, U is the high order correction to z.CFL.frac, i.e.
    #
    #     U = z.CFL.frac + high order terms
    #
    # there isn't a restriction on the size of each high order term or the sign. It is possible for
    # the correction to cause sign(U) != sign(z.CFL.frac) which is not physical.
    # Put in other words, this is unphysical because "if f[i] -> f[i+k] lies between grid points i+k and i+k+1, then
    # the high order correction df keeps (f[i] + Uf[i]) should also between grid points i+k and i+k+1.
    # if U suffers a sign reversal from the high order term contributions, this maps the density packet to
    # outside the interval we know to zeroeth order it must lie in (z.CFL.frac gives the remap fraction to zereoth order)

    # Finally, note that z.CFL.frac and z.CFL.number by construction have the same sign. We often prefer doing
    # sign checks on z.CFL.numbers, but this is not of consequence.

    # assign the mask to a zero matrix of same size for pairwise ma.minimum/maximum operations below
    zeros = ma.zeros(Uf.shape)
    zeros[z.CFL.numbers[stage,:,:] < 0] = ma.masked

    # operating only on positive values (negatives masked out), assert the limiter criteria
    # note: there is some decision making we let python make here (at the cost of slight efficiency, but at the benefit
    # of less storage for local copies of the same arrays). Uf and f_old are ndarrays (not a masked arrays)
    # however, having one masked object (here, the zeros array) is sufficient so that the ma.minimum and
    # ma.maximum operations will act only on the unmasked entries of zeros and hence will pairwise operate on comparing
    # minima and maximum for the nested objects that are compared.
    Uf_pos = ma.minimum(ma.maximum(zeros, Uf), 1.0*f_old)

    # mask positives, keep negatives
    zeros.mask = np.logical_not(zeros.mask)

    # operating only on negative values; see note just before U_pos calculation regarding how this operation
    # plays out given f_old and Uf are unmasked and zeros is masked.
    Uf_neg = ma.minimum(ma.maximum(-1.0*f_old, Uf), zeros)

    # combine masked data that has been limited by the operations above
    # into a final Uf object to return.
    # Here, we can interpret the np.where call as "where Uf_neg is unmasked [i.e. is negative]
    # insert the negative data at those entries in Uf, wherever there are masked values [i.e.
    # corresponded to masked value], insert the positive data that has been limited at those
    # [i,j] locations in Uf"
    Uf = np.where(Uf_neg.mask == False, Uf_neg.data, Uf_pos.data)

    return Uf
Пример #29
0
    def get_development_constrained_capacity(self,
            constraints,
            dataset_pool,
            index = None,
            recompute_flag = False,
            maximum_commercial_development_capacity = 4000000, ### TODO: Remove this default value and require this parameter.
            maximum_industrial_development_capacity = 1200000, ### TODO: Remove this default value and require this parameter.
            maximum_residential_development_capacity = 2800, ### TODO: Remove this default value and require this parameter.
            ):
        """
        Truncate the development capacity to the range
        min <= development capacity <= max, as defined by the given constraints.
        """
        if (self.development_capacity <> None) and (not recompute_flag):
            if (index <> None) and (index.size == self.development_capacity["index"].size) and \
                    alltrue(self.development_capacity["index"] == index):
                return self.development_capacity
        constraints.load_dataset_if_not_loaded()
        attributes = remove_elements_with_matched_prefix_from_list(
            constraints.get_attribute_names(), ["min", "max"])
        attributes = remove_all(attributes, constraints.get_id_name()[0])
        attributes_with_prefix = map(lambda attr: "urbansim.gridcell." +
                                        attr, attributes)
        self.compute_variables(attributes_with_prefix, dataset_pool=dataset_pool)
        if index == None:
            index = arange(self.size())
        development_constraints_array = ones((constraints.size(),index.size), dtype=bool8)
        for attr in attributes:
            values = self.get_attribute_by_index(attr, index)
            constr = reshape(constraints.get_attribute(attr), (constraints.size(),1))
            constr = repeat(constr, index.size, axis=1)
            tmp = logical_or(constr == values, constr < 0)
            development_constraints_array = logical_and(development_constraints_array, tmp)

        self.development_capacity = {
            "commercial":zeros((index.size,2)),
            "residential":zeros((index.size,2)), 
            "industrial":zeros((index.size,2)),
            "index": index,
        }

        #reasonable maxima
        self.development_capacity["commercial"][:,1] = maximum_commercial_development_capacity
        self.development_capacity["industrial"][:,1] = maximum_industrial_development_capacity
        self.development_capacity["residential"][:,1] = maximum_residential_development_capacity
        for iconstr in range(constraints.size()):
            w = where(development_constraints_array[iconstr,:])[0]
            if w.size > 0:
                self.development_capacity["commercial"][w,0] = \
                    maximum(self.development_capacity["commercial"][w,0], \
                        constraints.get_attribute_by_index("min_commercial_sqft", iconstr))
                self.development_capacity["commercial"][w,1] = \
                    ma.minimum(self.development_capacity["commercial"][w,1], \
                        constraints.get_attribute_by_index("max_commercial_sqft", iconstr))
                self.development_capacity["industrial"][w,0] = \
                    maximum(self.development_capacity["industrial"][w,0], \
                        constraints.get_attribute_by_index("min_industrial_sqft", iconstr))
                self.development_capacity["industrial"][w,1] = \
                    ma.minimum(self.development_capacity["industrial"][w,1], \
                        constraints.get_attribute_by_index("max_industrial_sqft", iconstr))
                self.development_capacity["residential"][w,0] = \
                    maximum(self.development_capacity["residential"][w,0], \
                        constraints.get_attribute_by_index("min_units", iconstr))
                self.development_capacity["residential"][w,1] = \
                    ma.minimum(self.development_capacity["residential"][w,1], \
                        constraints.get_attribute_by_index("max_units", iconstr))

        return self.development_capacity
Пример #30
0
    def specify(self,slab,axes,specification,confined_by,aux):
        ''' First part: confine the slab within a Domain wide enough to do the exact in post'''
        import string,copy
        from numpy.ma import minimum,maximum
        # myconfined is for later, we can't confine a dimension twice with an argument plus a keyword or 2 keywords
        myconfined=[None]*len(axes)
        self.aux=copy.copy(specification)
        # First look at the arguments (i.e not keywords) and confine the dimensions
        # in the order of the arguments
        for i in range(len(self.args)):
            if confined_by[i] is None :  # Check it hasn't been confined by somebody else
                myconfined[i]=1  # dim confined by argument list
                confined_by[i]=self # for cdms I want to confine this dimension
                self.aux[i]=specs=list(self.args[i]) # How do we want to confine this dim ?
                if type(specs)==type(slice(0)):
                    specification[i]=specs  # If it's a slicing nothing to do
                else: # But if it's not...
                    if specs[0] is None:
                        tmp=axes[i].getBounds()
                        if tmp is None:
                            raise ValueError, 'Region error, axis:'+axes[i].id+' has no bounds'
                        specs[0]=minimum(minimum(tmp[0],tmp[-1]))
                    if specs[1] is None:
                        tmp=axes[i].getBounds()
                        if tmp is None:
                            raise ValueError, 'Region error, axis:'+axes[i].id+' has no bounds'
                        specs[1]=maximum(maximum(tmp[0],tmp[-1]))
                    if axes[i].isTime(): # Time is as always "Special"
                        import cdtime
                        tc=type(cdtime.comptime(0))  # component time type
                        tr=type(cdtime.reltime(0,'months since 0'))  # relative time type
                        t=type(specs[0]) # my first spec type
                        if t==type(''): #if my first spec is passed as a string
                            specs[0]=cdtime.s2r(specs[0],axes[i].units)
                        elif t==tc or t==tr: #if my first spec is passed as a cdtime object
                            specs[0]=cdtime.torel(specs[0],axes[i].units)
                        else: # If not it has to be that the users knows the time values in the axis
                            pass
                        t=type(specs[1]) # my second spec type
                        if t==type(''): #if my second spec is passed as a string
                            specs[1]=cdtime.s2r(specs[1],axes[i].units)
                        elif t==tc or t==tr: #if my second spec is passed as a cdtime object
                            specs[1]=cdtime.torel(specs[1],axes[i].units)
                    sp=[specs[0],specs[1],'oob']  # Now retrieve the values wide enough for the exact                    specification[i]=sp  # sets the specifications
            else:
                return 1
        for kw in self.kargs.keys():
            axis=None
            for i in range(len(axes)):
                if axes[i].id==kw : axis=i
            if axis is None:
                if kw=='time' :
                    for i in range(len(axes)):
                        if axes[i].isTime() : axis=i
                elif kw=='level' :
                    for i in range(len(axes)):
                        if axes[i].isLevel() : axis=i
                elif kw=='longitude' :
                    for i in range(len(axes)):
                        if axes[i].isLongitude() : axis=i
                elif kw=='latitude' :
                    for i in range(len(axes)):
                        if axes[i].isLatitude() : axis=i
                elif not kw in ['exact','atol','rtol']: # keyword not a recognised keyword or dimension name
                    raise 'Error, keyword: '+kw+' not recognized'
            # At this point, if axis is None:
            # we are dealing with a keyword for the selector
            # so we'll skip it
            if not axis is None : 
                if confined_by[axis] is None:
                    confined_by[axis]=self
                    myconfined[axis]=1
                    self.aux[axis]=specs=list(self.kargs[kw])
                    if type(specs)!=type(slice(0)):
                        if specs[0] is None:
                            tmp=axes[axis].getBounds()
                            if tmp is None:
                                raise ValueError, 'Region error, axis:'+axes[axis].id+' has no bounds'
                            specs[0]=minimum(minimum(tmp[0],tmp[-1]))
                        if specs[1] is None:
                            tmp=axes[axis].getBounds()
                            if tmp is None:
                                raise ValueError, 'Region error, axis:'+axes[axis].id+' has no bounds'
                            specs[1]=maximum(maximum(tmp[0],tmp[-1]))
                        if axes[axis].isTime():
                            import cdtime
                            tc=type(cdtime.comptime(0))
                            tr=type(cdtime.reltime(0,'months since 0'))
                            t=type(specs[0])
                            if t==type(''):
                                specs[0]=cdtime.s2r(specs[0],axes[i].units)
                            elif t==tc or t==tr:
                                specs[0]=cdtime.torel(specs[0],axes[i].units)
                            t=type(specs[1])
                            if t==type(''):
                                specs[1]=cdtime.s2r(specs[1],axes[i].units)
                            elif t==tc or t==tr:
                                specs[1]=cdtime.torel(specs[1],axes[i].units)
                        sp=[specs[0],specs[1],'oob']
                        specification[axis]=sp
                    else:
                        specification[axis]=specs

                else:
                    if myconfined[axis]==1:
                        raise 'Error you are attempting to set the axis: '+str(axes[axis].id)+' more than once'
                    else:
                        return 1
        return 0
Пример #31
0
    def _contour_args(self, *args):
        if self.filled: fn = 'contourf'
        else:           fn = 'contour'
        Nargs = len(args)
        if Nargs <= 2:
            z = ma.asarray(args[0], dtype=np.float64)
            x, y = self._initialize_x_y(z)
        elif Nargs <=4:
            x,y,z = self._check_xyz(args[:3])
        else:
            raise TypeError("Too many arguments to {0!s}; see help({1!s})".format(fn, fn))
        self.zmax = ma.maximum(z)
        self.zmin = ma.minimum(z)
        if self.logscale and self.zmin <= 0:
            z = ma.masked_where(z <= 0, z)
            warnings.warn('Log scale: values of z <=0 have been masked')
            self.zmin = z.min()
        self._auto = False
        if self.levels is None:
            if Nargs == 1 or Nargs == 3:
                lev = self._autolev(z, 7)
            else:   # 2 or 4 args
                level_arg = args[-1]
                try:
                    if type(level_arg) == int:
                        lev = self._autolev(z, level_arg)
                    else:
                        lev = np.asarray(level_arg).astype(np.float64)
                except:
                    raise TypeError(
                        "Last {0!s} arg must give levels; see help({1!s})".format(fn, fn))
            if self.filled and len(lev) < 2:
                raise ValueError("Filled contours require at least 2 levels.")
            # Workaround for cntr.c bug wrt masked interior regions:
            #if filled:
            #    z = ma.masked_array(z.filled(-1e38))
            # It's not clear this is any better than the original bug.
            self.levels = lev
        #if self._auto and self.extend in ('both', 'min', 'max'):
        #    raise TypeError("Auto level selection is inconsistent "
        #                             + "with use of 'extend' kwarg")
        self._levels = list(self.levels)
        if self.extend in ('both', 'min'):
            self._levels.insert(0, min(self.levels[0],self.zmin) - 1)
        if self.extend in ('both', 'max'):
            self._levels.append(max(self.levels[-1],self.zmax) + 1)
        self._levels = np.asarray(self._levels)
        self.vmin = np.amin(self.levels)  # alternative would be self.layers
        self.vmax = np.amax(self.levels)
        if self.extend in ('both', 'min'):
            self.vmin = 2 * self.levels[0] - self.levels[1]
        if self.extend in ('both', 'max'):
            self.vmax = 2 * self.levels[-1] - self.levels[-2]
        self.layers = self._levels # contour: a line is a thin layer
        if self.filled:
            self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
            if self.extend in ('both', 'min'):
                self.layers[0] = 0.5 * (self.vmin + self._levels[1])
            if self.extend in ('both', 'max'):
                self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])

        return (x, y, z)
Пример #32
0
 def autoscale(self, A):
     '''
     Set vmin, vmax to min, max of A.
     '''
     self.vmin = ma.minimum(A)
     self.vmax = ma.maximum(A)
Пример #33
0
   def create( self, parent=None, min=None, max=None, save_file=None, thread_it = 1, rate=None, bitrate=None, ffmpegoptions='' ):
      from vcs import minmax
      from numpy.ma import maximum,minimum
      ##from tkMessageBox import showerror

      # Cannot "Run" or "Create" an animation while already creating an animation
      if self.run_flg == 1: return
      if self.vcs_self.canvas.creating_animation() == 1: return

      if self.vcs_self.animate_info == []:
         str = "No data found!"
         showerror( "Error Message to User", str )
         return
      finish_queued_X_server_requests( self.vcs_self )
      self.vcs_self.canvas.BLOCK_X_SERVER()

      # Stop the (thread) execution of the X main loop (if it is running).
      self.vcs_self.canvas.stopxmainloop( )

      # Force VCS to update its orientation, needed when the user changes the
      # VCS Canvas size.
      self.vcs_self.canvas.updateorientation()

      # Make sure the animate information is up-to-date for creating images
      if ((self.gui_popup == 1) and (self.create_flg == 0)):
         self.update_animate_display_list( )

      # Save the min and max values for the graphics methods.
      # Will need to restore values back when animation is done.
      self.save_original_min_max()

      # Set up the animation min and max values by changing the graphics method
      # Note: cannot set the min and max values if the default graphics method is set.
      do_min_max = 'yes'
      try:
         if (parent is not None) and (parent.iso_spacing == 'Log'):
            do_min_max = 'no'
      except:
         pass

      # Draw specified continental outlines if needed.
      self.continents_hold_value = self.vcs_self.canvas.getcontinentstype( )
      self.vcs_self.canvas.setcontinentstype( self.continents_value )

      if ( do_min_max == 'yes' ):
         minv = []
         maxv=[]
         if (min is None) or (max is None):
            for i in range(len(self.vcs_self.animate_info)):
               minv.append( 1.0e77 )
               maxv.append( -1.0e77 )
            for i in range(len(self.vcs_self.animate_info)):
               dpy, slab = self.vcs_self.animate_info[i]
               mins, maxs = minmax(slab)
               minv[i] = float(minimum(float(minv[i]), float(mins)))
               maxv[i] = float(maximum(float(maxv[i]), float(maxs)))
         if isinstance(min,list) or isinstance(max,list):
            for i in range(len(self.vcs_self.animate_info)):
               try:
                  minv.append( min[i] )
               except:
                  minv.append( min[-1] )
               try:
                  maxv.append( max[i] )
               except:
                  maxv.append( max[-1] )
         else:
            for i in range(len(self.vcs_self.animate_info)):
                minv.append( min )
                maxv.append( max )

         # Set the min an max for each plot in the page. If the same graphics method is used
         # to display the plots, then the last min and max setting of the data set will be used.
         for i in range(len(self.vcs_self.animate_info)):
            try:
               self.set_animation_min_max( minv[i], maxv[i], i )
            except Exception,err:
               pass # if it is default, then you cannot set the min and max, so pass.
Пример #34
0
    def create(self, parent=None, min=None, max=None, save_file=None,
               thread_it=1, rate=None, bitrate=None, ffmpegoptions=''):
        from vcs import minmax
        from numpy.ma import maximum, minimum

        # Cannot "Run" or "Create" an animation while already creating an
        # animation
        if self.run_flg == 1:
            return
        if self.vcs_self.canvas.creating_animation() == 1:
            return

        if self.vcs_self.animate_info == []:
            str = "No data found!"
            showerror("Error Message to User", str)
            return

        # Stop the (thread) execution of the X main loop (if it is running).
        self.vcs_self.canvas.stopxmainloop()

        # Force VCS to update its orientation, needed when the user changes the
        # VCS Canvas size.
        self.vcs_self.canvas.updateorientation()

        # Make sure the animate information is up-to-date for creating images
        if ((self.gui_popup == 1) and (self.create_flg == 0)):
            self.update_animate_display_list()

        # Save the min and max values for the graphics methods.
        # Will need to restore values back when animation is done.
        self.save_original_min_max()

        # Set up the animation min and max values by changing the graphics method
        # Note: cannot set the min and max values if the default graphics
        # method is set.
        do_min_max = 'yes'
        try:
            if (parent is not None) and (parent.iso_spacing == 'Log'):
                do_min_max = 'no'
        except:
            pass

        # Draw specified continental outlines if needed.
        self.continents_hold_value = self.vcs_self.canvas.getcontinentstype()
        self.vcs_self.canvas.setcontinentstype(self.continents_value)

        if (do_min_max == 'yes'):
            minv = []
            maxv = []
            if (min is None) or (max is None):
                for i in range(len(self.vcs_self.animate_info)):
                    minv.append(1.0e77)
                    maxv.append(-1.0e77)
                for i in range(len(self.vcs_self.animate_info)):
                    dpy, slab = self.vcs_self.animate_info[i]
                    mins, maxs = minmax(slab)
                    minv[i] = float(minimum(float(minv[i]), float(mins)))
                    maxv[i] = float(maximum(float(maxv[i]), float(maxs)))
            if isinstance(min, list) or isinstance(max, list):
                for i in range(len(self.vcs_self.animate_info)):
                    try:
                        minv.append(min[i])
                    except:
                        minv.append(min[-1])
                    try:
                        maxv.append(max[i])
                    except:
                        maxv.append(max[-1])
            else:
                for i in range(len(self.vcs_self.animate_info)):
                    minv.append(min)
                    maxv.append(max)

            # Set the min an max for each plot in the page. If the same graphics method is used
            # to display the plots, then the last min and max setting of the
            # data set will be used.
            for i in range(len(self.vcs_self.animate_info)):
                try:
                    self.set_animation_min_max(minv[i], maxv[i], i)
                except:
                    # if it is default, then you cannot set the min and max, so
                    # pass.
                    pass

        if save_file is None or save_file.split('.')[-1].lower() == 'ras':
            if thread_it:
                thread.start_new_thread(
                    self.vcs_self.canvas.animate_init, (save_file,))
            else:
                self.vcs_self.canvas.animate_init(save_file)
        else:  # ffmpeg stuff
            save_info = self.vcs_self.animate_info
            animation_info = self.animate_info_from_python()
            slabs = []
            templates = []
            dpys = []
            for i in range(len(self.vcs_self.animate_info)):
                dpy, slab = self.vcs_self.animate_info[i]
                slabs.append(slab)
                dpys.append(dpy)
                templates.append(dpy.template)
            sh = slabs[0].shape
            if dpy.g_type in ['boxfill', 'isofill', 'isoline', 'meshfill',
                              'outfill', 'outline', 'taylordiagram', 'vector', ]:
                r = len(sh) - 2
            else:
                r = len(sh) - 1
            # now create the list of all previous indices to plot
            indices = []
            for i in range(r):
                this = list(range(sh[i]))
                tmp = []
                if indices == []:
                    for k in this:
                        indices.append([k, ])
                else:
                    for j in range(len(indices)):
                        for k in this:
                            tmp2 = copy.copy(indices[j])
                            tmp2.append(k)
                            tmp.append(tmp2)
                    indices = tmp
            count = 1
            white_square = self.vcs_self.createfillarea()
            white_square.color = 240
            white_square.x = [0, 1, 1, 0]
            white_square.y = [0, 0, 1, 1]
            new_vcs = self.vcs_self
            if self.vcs_self.orientation() == 'portrait':
                new_vcs.portrait()
            # self.vcs_self.close()

            for index in indices:
                new_vcs.clear()
                new_vcs.plot(white_square, bg=1)
                for i in range(len(save_info)):
                    slab = slabs[i]
                    template = templates[i]
                    gtype = animation_info["gtype"][i].lower()
                    gname = animation_info["gname"][i]
                    gm = None  # for flake8 to be happy
                    exec("gm = new_vcs.get%s('%s')" % (gtype, gname))
                    for j in index:
                        slab = slab[j]
                    new_vcs.plot(slab, gm, new_vcs.gettemplate(template), bg=1)
                new_vcs.png("tmp_anim_%i" % count)
                count += 1
            new_vcs.ffmpeg(
                save_file,
                "tmp_anim_%d.png",
                bitrate=bitrate,
                rate=rate,
                options=ffmpegoptions)
            for i in range(count - 1):
                os.remove("tmp_anim_%i.png" % (i + 1))
            del(new_vcs)
        self.create_flg = 1
Пример #35
0
    def get_development_constrained_capacity(self,
            constraints,
            dataset_pool,
            index = None,
            recompute_flag = False,
            maximum_commercial_development_capacity = 4000000, ### TODO: Remove this default value and require this parameter.
            maximum_industrial_development_capacity = 1200000, ### TODO: Remove this default value and require this parameter.
            maximum_residential_development_capacity = 2800, ### TODO: Remove this default value and require this parameter.
            ):
        """
        Truncate the development capacity to the range
        min <= development capacity <= max, as defined by the given constraints.
        """
        if (self.development_capacity <> None) and (not recompute_flag):
            if (index <> None) and (index.size == self.development_capacity["index"].size) and \
                    alltrue(self.development_capacity["index"] == index):
                return self.development_capacity
        constraints.load_dataset_if_not_loaded()
        attributes = remove_elements_with_matched_prefix_from_list(
            constraints.get_attribute_names(), ["min", "max"])
        attributes = remove_all(attributes, constraints.get_id_name()[0])
        attributes_with_prefix = map(lambda attr: "urbansim.gridcell." +
                                        attr, attributes)
        self.compute_variables(attributes_with_prefix, dataset_pool=dataset_pool)
        if index == None:
            index = arange(self.size())
        development_constraints_array = ones((constraints.size(),index.size), dtype=bool8)
        for attr in attributes:
            values = self.get_attribute_by_index(attr, index)
            constr = reshape(constraints.get_attribute(attr), (constraints.size(),1))
            constr = repeat(constr, index.size, axis=1)
            tmp = logical_or(constr == values, constr < 0)
            development_constraints_array = logical_and(development_constraints_array, tmp)

        self.development_capacity = {
            "commercial":zeros((index.size,2)),
            "residential":zeros((index.size,2)), 
            "industrial":zeros((index.size,2)),
            "index": index,
        }

        #reasonable maxima
        self.development_capacity["commercial"][:,1] = maximum_commercial_development_capacity
        self.development_capacity["industrial"][:,1] = maximum_industrial_development_capacity
        self.development_capacity["residential"][:,1] = maximum_residential_development_capacity
        for iconstr in range(constraints.size()):
            w = where(development_constraints_array[iconstr,:])[0]
            if w.size > 0:
                self.development_capacity["commercial"][w,0] = \
                    maximum(self.development_capacity["commercial"][w,0], \
                        constraints.get_attribute_by_index("min_commercial_sqft", iconstr))
                self.development_capacity["commercial"][w,1] = \
                    ma.minimum(self.development_capacity["commercial"][w,1], \
                        constraints.get_attribute_by_index("max_commercial_sqft", iconstr))
                self.development_capacity["industrial"][w,0] = \
                    maximum(self.development_capacity["industrial"][w,0], \
                        constraints.get_attribute_by_index("min_industrial_sqft", iconstr))
                self.development_capacity["industrial"][w,1] = \
                    ma.minimum(self.development_capacity["industrial"][w,1], \
                        constraints.get_attribute_by_index("max_industrial_sqft", iconstr))
                self.development_capacity["residential"][w,0] = \
                    maximum(self.development_capacity["residential"][w,0], \
                        constraints.get_attribute_by_index("min_units", iconstr))
                self.development_capacity["residential"][w,1] = \
                    ma.minimum(self.development_capacity["residential"][w,1], \
                        constraints.get_attribute_by_index("max_units", iconstr))

        return self.development_capacity
Пример #36
0
    def create(self,
               parent=None,
               min=None,
               max=None,
               save_file=None,
               thread_it=1,
               rate=None,
               bitrate=None,
               ffmpegoptions=''):
        from vcs import minmax
        from numpy.ma import maximum, minimum
        ##from tkMessageBox import showerror

        # Cannot "Run" or "Create" an animation while already creating an animation
        if self.run_flg == 1: return
        if self.vcs_self.canvas.creating_animation() == 1: return

        if self.vcs_self.animate_info == []:
            str = "No data found!"
            showerror("Error Message to User", str)
            return
        finish_queued_X_server_requests(self.vcs_self)
        self.vcs_self.canvas.BLOCK_X_SERVER()

        # Stop the (thread) execution of the X main loop (if it is running).
        self.vcs_self.canvas.stopxmainloop()

        # Force VCS to update its orientation, needed when the user changes the
        # VCS Canvas size.
        self.vcs_self.canvas.updateorientation()

        # Make sure the animate information is up-to-date for creating images
        if ((self.gui_popup == 1) and (self.create_flg == 0)):
            self.update_animate_display_list()

        # Save the min and max values for the graphics methods.
        # Will need to restore values back when animation is done.
        self.save_original_min_max()

        # Set up the animation min and max values by changing the graphics method
        # Note: cannot set the min and max values if the default graphics method is set.
        do_min_max = 'yes'
        try:
            if (parent is not None) and (parent.iso_spacing == 'Log'):
                do_min_max = 'no'
        except:
            pass

        # Draw specified continental outlines if needed.
        self.continents_hold_value = self.vcs_self.canvas.getcontinentstype()
        self.vcs_self.canvas.setcontinentstype(self.continents_value)

        if (do_min_max == 'yes'):
            minv = []
            maxv = []
            if (min is None) or (max is None):
                for i in range(len(self.vcs_self.animate_info)):
                    minv.append(1.0e77)
                    maxv.append(-1.0e77)
                for i in range(len(self.vcs_self.animate_info)):
                    dpy, slab = self.vcs_self.animate_info[i]
                    mins, maxs = minmax(slab)
                    minv[i] = float(minimum(float(minv[i]), float(mins)))
                    maxv[i] = float(maximum(float(maxv[i]), float(maxs)))
            if isinstance(min, list) or isinstance(max, list):
                for i in range(len(self.vcs_self.animate_info)):
                    try:
                        minv.append(min[i])
                    except:
                        minv.append(min[-1])
                    try:
                        maxv.append(max[i])
                    except:
                        maxv.append(max[-1])
            else:
                for i in range(len(self.vcs_self.animate_info)):
                    minv.append(min)
                    maxv.append(max)

            # Set the min an max for each plot in the page. If the same graphics method is used
            # to display the plots, then the last min and max setting of the data set will be used.
            for i in range(len(self.vcs_self.animate_info)):
                try:
                    self.set_animation_min_max(minv[i], maxv[i], i)
                except Exception, err:
                    pass  # if it is default, then you cannot set the min and max, so pass.
Пример #37
0
 def autoscale(self, A):
     """
     Set *vmin*, *vmax* to min, max of *A*.
     """
     self.vmin = ma.minimum(A)
     self.vmax = ma.maximum(A)
Пример #38
0
 def autoscale(self, A):
     '''
     Set *vmin*, *vmax* to min, max of *A*.
     '''
     self.vmin = ma.minimum(A)
     self.vmax = ma.maximum(A)
Пример #39
0
    def plot_map(self,
                 dataset,
                 attribute_data,
                 min_value=None,
                 max_value=None,
                 file=None,
                 my_title="",
                 filter=None,
                 background=None):
        """    Plots a 2D image of attribute given by 'name'. matplotlib required.
               The dataset must have a method 'get_2d_attribute' defined that returns
               a 2D array that is to be plotted. If min_value/max_value are given, all values
               that are smaller/larger than these values are set to min_value/max_value.
               Argument background is a value to be used for background. If it is not given,
               it is considered as a 1/100 under the minimum value of the array.
               Filter is a 2D array. Points where filter is > 0 are masked out (put into background).
        """
        import matplotlib
        matplotlib.use('Qt4Agg')

        from matplotlib.pylab import jet, imshow, colorbar, show, axis, savefig, close, figure, title, normalize
        from matplotlib.pylab import rot90

        attribute_data = attribute_data[filter]
        coord_2d_data = dataset.get_2d_attribute(attribute_data=attribute_data)
        data_mask = coord_2d_data.mask
        #        if filter is not None:
        #            if isinstance(filter, ndarray):
        #                if not ma.allclose(filter.shape, coord_2d_data.shape):
        #                    raise StandardError, "Argument filter must have the same shape as the 2d attribute."
        #                filter_data = filter
        #            else:
        #                raise TypeError, "The filter type is invalid. A character string or a 2D numpy array allowed."
        #            filter_data = where(ma.filled(filter_data,1) > 0, 1,0)
        #            data_mask = ma.mask_or(data_mask, filter_data)
        nonmaskedmin = ma.minimum(coord_2d_data) - .2 * (
            ma.maximum(coord_2d_data) - ma.minimum(coord_2d_data))
        if max_value == None:
            max_value = ma.maximum(coord_2d_data)
        if min_value == None:
            min_value = nonmaskedmin

        coord_2d_data = ma.filled(coord_2d_data, min_value)
        if background is None:
            value_range = max_value - min_value
            background = min_value - value_range / 100
        coord_2d_data = ma.filled(
            ma.masked_array(coord_2d_data, mask=data_mask), background)

        # Our data uses NW as 0,0, while matplotlib uses SW for 0,0.
        # Rotate the data so the map is oriented correctly.
        coord_2d_data = rot90(coord_2d_data, 1)

        jet()
        figure()
        norm = normalize(min_value, max_value)
        im = imshow(
            coord_2d_data,
            origin='lower',
            aspect='equal',
            interpolation=None,
            norm=norm,
        )

        tickfmt = '%4d'
        if isinstance(min_value, float) or isinstance(max_value, float):
            tickfmt = '%1.4f'
        colorbar(format=tickfmt)

        title(my_title)
        axis('off')
        if file:
            savefig(file)
            close()
        else:
            show()
Пример #40
0
    def _contour_args(self, *args):
        if self.filled: fn = 'contourf'
        else: fn = 'contour'
        Nargs = len(args)
        if Nargs <= 2:
            z = ma.asarray(args[0], dtype=np.float64)
            x, y = self._initialize_x_y(z)
        elif Nargs <= 4:
            x, y, z = self._check_xyz(args[:3])
        else:
            raise TypeError("Too many arguments to %s; see help(%s)" %
                            (fn, fn))
        self.zmax = ma.maximum(z)
        self.zmin = ma.minimum(z)
        if self.logscale and self.zmin <= 0:
            z = ma.masked_where(z <= 0, z)
            warnings.warn('Log scale: values of z <=0 have been masked')
            self.zmin = z.min()
        self._auto = False
        if self.levels is None:
            if Nargs == 1 or Nargs == 3:
                lev = self._autolev(z, 7)
            else:  # 2 or 4 args
                level_arg = args[-1]
                try:
                    if type(level_arg) == int:
                        lev = self._autolev(z, level_arg)
                    else:
                        lev = np.asarray(level_arg).astype(np.float64)
                except:
                    raise TypeError(
                        "Last %s arg must give levels; see help(%s)" %
                        (fn, fn))
            if self.filled and len(lev) < 2:
                raise ValueError("Filled contours require at least 2 levels.")
            # Workaround for cntr.c bug wrt masked interior regions:
            #if filled:
            #    z = ma.masked_array(z.filled(-1e38))
            # It's not clear this is any better than the original bug.
            self.levels = lev
        #if self._auto and self.extend in ('both', 'min', 'max'):
        #    raise TypeError("Auto level selection is inconsistent "
        #                             + "with use of 'extend' kwarg")
        self._levels = list(self.levels)
        if self.extend in ('both', 'min'):
            self._levels.insert(0, min(self.levels[0], self.zmin) - 1)
        if self.extend in ('both', 'max'):
            self._levels.append(max(self.levels[-1], self.zmax) + 1)
        self._levels = np.asarray(self._levels)
        self.vmin = np.amin(self.levels)  # alternative would be self.layers
        self.vmax = np.amax(self.levels)
        if self.extend in ('both', 'min'):
            self.vmin = 2 * self.levels[0] - self.levels[1]
        if self.extend in ('both', 'max'):
            self.vmax = 2 * self.levels[-1] - self.levels[-2]
        self.layers = self._levels  # contour: a line is a thin layer
        if self.filled:
            self.layers = 0.5 * (self._levels[:-1] + self._levels[1:])
            if self.extend in ('both', 'min'):
                self.layers[0] = 0.5 * (self.vmin + self._levels[1])
            if self.extend in ('both', 'max'):
                self.layers[-1] = 0.5 * (self.vmax + self._levels[-2])

        return (x, y, z)
Пример #41
0
def make_closea_masks(config=None,domcfg_file=None,mask=None):

#=========================
# 1. Read in domcfg file
#=========================

    if config is None:
        raise Exception('configuration must be specified')

    if domcfg_file is None:
        raise Exception('domain_cfg file must be specified')

    if mask is None:
        mask=False

    domcfg = nc.Dataset(domcfg_file,'r+')
    lon = domcfg.variables['nav_lon'][:]
    lat = domcfg.variables['nav_lat'][:]
    top_level = domcfg.variables['top_level'][0][:]

    nx = top_level.shape[1]
    ny = top_level.shape[0]

    # Generate 2D "i" and "j" fields for use in "where" statements.
    # These are the Fortran indices, counting from 1, so we have to
    # add 1 to np.arange because python counts from 0.

    ones_2d = np.ones((ny,nx))
    ii1d = np.arange(nx)+1
    jj1d = np.arange(ny)+1
    ii2d = ii1d * ones_2d
    jj2d = np.transpose(jj1d*np.transpose(ones_2d)) 
 
#=====================================
# 2. Closea definitions (old style)
#=====================================

    # NB. The model i and j indices defined here are Fortran indices, 
    #     ie. counting from 1 as in the NEMO code. Also the indices
    #     of the arrays (ncsi1 etc) count from 1 in order to match
    #     the Fortran code.
    #     This means that you can cut and paste the definitions from
    #     the NEMO code and change round brackets to square brackets. 
    #     But BEWARE: Fortran array(a:b) == Python array[a:b+1] !!!
    #

    # If use_runoff_box = True then specify runoff area as all sea points within
    # a rectangular area. If use_runoff_box = False then specify a list of points
    # as in the old NEMO code. Default to false.
    use_runoff_box = False

    #================================================================
    if config == 'ORCA2':

        num_closea = 4
        max_runoff_points = 4

        ncsnr = np.zeros(num_closea+1,dtype=np.int)                     ; ncstt = np.zeros(num_closea+1,dtype=np.int)
        ncsi1 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj1 = np.zeros(num_closea+1,dtype=np.int)
        ncsi2 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj2 = np.zeros(num_closea+1,dtype=np.int)
        ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int)

        # Caspian Sea (spread over globe)
        ncsnr[1]   =   1  ;  ncstt[1]   =   0   
        ncsi1[1]   =  11  ;  ncsj1[1]   = 103
        ncsi2[1]   =  17  ;  ncsj2[1]   = 112

        # Great Lakes - North America - put at St Laurent mouth
        ncsnr[2]   =   1  ;  ncstt[2]   =   2 
        ncsi1[2]   =  97  ;  ncsj1[2]   = 107
        ncsi2[2]   = 103  ;  ncsj2[2]   = 111
        ncsir[2,1] = 110  ;  ncsjr[2,1] = 111           

        # Black Sea (crossed by the cyclic boundary condition)
        # put in Med Sea (north of Aegean Sea)
        ncsnr[3:5] =   4  ;  ncstt[3:5] =   2           
        ncsir[3:5,1] = 171;  ncsjr[3:5,1] = 106     
        ncsir[3:5,2] = 170;  ncsjr[3:5,2] = 106 
        ncsir[3:5,3] = 171;  ncsjr[3:5,3] = 105 
        ncsir[3:5,4] = 170;  ncsjr[3:5,4] = 105 
        # west part of the Black Sea      
        ncsi1[3]   = 174  ;  ncsj1[3]   = 107      
        ncsi2[3]   = 181  ;  ncsj2[3]   = 112      
        # east part of the Black Sea      
        ncsi1[4]   =   2  ;  ncsj1[4]   = 107      
        ncsi2[4]   =   6  ;  ncsj2[4]   = 112      

    #================================================================
    elif config == 'eORCA1':

        num_closea = 1
        max_runoff_points = 1

        ncsnr = np.zeros(num_closea+1,dtype=np.int)                     ; ncstt = np.zeros(num_closea+1,dtype=np.int)
        ncsi1 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj1 = np.zeros(num_closea+1,dtype=np.int)
        ncsi2 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj2 = np.zeros(num_closea+1,dtype=np.int)
        ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int)

        # Caspian Sea  (spread over the globe)
        ncsnr[1]   = 1    ; ncstt[1]   = 0           
        ncsi1[1]   = 332  ; ncsj1[1]   = 243
        ncsi2[1]   = 344  ; ncsj2[1]   = 275

    #================================================================
    elif config == 'eORCA025_UK':

        num_closea = 10
        max_runoff_points = 1

        ncsnr = np.zeros(num_closea+1,dtype=np.int)                     ; ncstt = np.zeros(num_closea+1,dtype=np.int)
        ncsi1 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj1 = np.zeros(num_closea+1,dtype=np.int)
        ncsi2 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj2 = np.zeros(num_closea+1,dtype=np.int)
        ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int)

        # Caspian Sea
        ncsnr[1]   = 1    ; ncstt[1]   = 0     
        ncsi1[1]   = 1330 ; ncsj1[1]   = 831
        ncsi2[1]   = 1375 ; ncsj2[1]   = 981

        # Aral Sea
        ncsnr[2]   = 1    ; ncstt[2]   = 0     
        ncsi1[2]   = 1376 ; ncsj1[2]   = 900
        ncsi2[2]   = 1400 ; ncsj2[2]   = 981

        # Azov Sea
        ncsnr[3]   = 1    ; ncstt[3]   = 0     
        ncsi1[3]   = 1284 ; ncsj1[3]   = 908
        ncsi2[3]   = 1304 ; ncsj2[3]   = 933

        # Lake Superior
        ncsnr[4]   = 1    ; ncstt[4]   = 0     
        ncsi1[4]   = 781  ; ncsj1[4]   = 905 
        ncsi2[4]   = 815  ; ncsj2[4]   = 926 

        # Lake Michigan
        ncsnr[5]   = 1    ; ncstt[5]   = 0     
        ncsi1[5]   = 795  ; ncsj1[5]   = 871             
        ncsi2[5]   = 813  ; ncsj2[5]   = 905 

        # Lake Huron part 1
        ncsnr[6]   = 1    ; ncstt[6]   = 0     
        ncsi1[6]   = 814  ; ncsj1[6]   = 882             
        ncsi2[6]   = 825  ; ncsj2[6]   = 905 

        # Lake Huron part 2
        ncsnr[7]   = 1    ; ncstt[7]   = 0     
        ncsi1[7]   = 826  ; ncsj1[7]   = 889             
        ncsi2[7]   = 833  ; ncsj2[7]   = 905 

        # Lake Erie
        ncsnr[8]   = 1    ; ncstt[8]   = 0     
        ncsi1[8]   = 816  ; ncsj1[8]   = 871             
        ncsi2[8]   = 837  ; ncsj2[8]   = 881 

        # Lake Ontario
        ncsnr[9]   = 1    ; ncstt[9]   = 0     
        ncsi1[9]   = 831  ; ncsj1[9]   = 882             
        ncsi2[9]   = 847  ; ncsj2[9]   = 889 

        # Lake Victoria
        ncsnr[10]   = 1    ; ncstt[10]   = 0   
        ncsi1[10]   = 1274 ; ncsj1[10]   = 672 
        ncsi2[10]   = 1289 ; ncsj2[10]   = 687 

    #================================================================
    elif config == 'eORCA025_UK_rnf':

        num_closea = 10
        max_runoff_points = 1
        use_runoff_box = True

        ncsnr = np.zeros(num_closea+1,dtype=np.int)                     ; ncstt = np.zeros(num_closea+1,dtype=np.int)
        ncsi1 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj1 = np.zeros(num_closea+1,dtype=np.int)
        ncsi2 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj2 = np.zeros(num_closea+1,dtype=np.int)
        ncsir1 = np.zeros(num_closea+1,dtype=np.int)                    ; ncsjr1 = np.zeros(num_closea+1,dtype=np.int)
        ncsir2 = np.zeros(num_closea+1,dtype=np.int)                    ; ncsjr2 = np.zeros(num_closea+1,dtype=np.int)
        ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int)

        # Caspian Sea
        ncsnr[1]   = 1    ; ncstt[1]   = 0     
        ncsi1[1]   = 1330 ; ncsj1[1]   = 831
        ncsi2[1]   = 1375 ; ncsj2[1]   = 981

        # Aral Sea
        ncsnr[2]   = 1    ; ncstt[2]   = 0     
        ncsi1[2]   = 1376 ; ncsj1[2]   = 900
        ncsi2[2]   = 1400 ; ncsj2[2]   = 981

        # Azov Sea
        ncsnr[3]   = 1    ; ncstt[3]   = 0     
        ncsi1[3]   = 1284 ; ncsj1[3]   = 908
        ncsi2[3]   = 1304 ; ncsj2[3]   = 933

        # Lake Superior
        ncsnr[4]   = 1    ; ncstt[4]   = 1     
        ncsi1[4]   = 781  ; ncsj1[4]   = 905 
        ncsi2[4]   = 815  ; ncsj2[4]   = 926 
        # runff points the St Laurence Seaway for all Great Lakes
        ncsir1[4:10]   = 873 ; ncsjr1[4:10]   = 909 
        ncsir2[4:10]   = 884 ; ncsjr2[4:10]   = 920 

        # Lake Michigan
        ncsnr[5]   = 1    ; ncstt[5]   = 1     
        ncsi1[5]   = 795  ; ncsj1[5]   = 871             
        ncsi2[5]   = 813  ; ncsj2[5]   = 905 

        # Lake Huron part 1
        ncsnr[6]   = 1    ; ncstt[6]   = 1     
        ncsi1[6]   = 814  ; ncsj1[6]   = 882             
        ncsi2[6]   = 825  ; ncsj2[6]   = 905 

        # Lake Huron part 2
        ncsnr[7]   = 1    ; ncstt[7]   = 1     
        ncsi1[7]   = 826  ; ncsj1[7]   = 889             
        ncsi2[7]   = 833  ; ncsj2[7]   = 905 

        # Lake Erie
        ncsnr[8]   = 1    ; ncstt[8]   = 1     
        ncsi1[8]   = 816  ; ncsj1[8]   = 871             
        ncsi2[8]   = 837  ; ncsj2[8]   = 881 

        # Lake Ontario
        ncsnr[9]   = 1    ; ncstt[9]   = 1     
        ncsi1[9]   = 831  ; ncsj1[9]   = 882             
        ncsi2[9]   = 847  ; ncsj2[9]   = 889 

        # Lake Victoria
        ncsnr[10]   = 1    ; ncstt[10]   = 0   
        ncsi1[10]   = 1274 ; ncsj1[10]   = 672 
        ncsi2[10]   = 1289 ; ncsj2[10]   = 687 

    #================================================================
    elif config == 'eORCA025_UK_empmr':

        num_closea = 10
        max_runoff_points = 1
        use_runoff_box = True

        ncsnr = np.zeros(num_closea+1,dtype=np.int)                     ; ncstt = np.zeros(num_closea+1,dtype=np.int)
        ncsi1 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj1 = np.zeros(num_closea+1,dtype=np.int)
        ncsi2 = np.zeros(num_closea+1,dtype=np.int)                     ; ncsj2 = np.zeros(num_closea+1,dtype=np.int)
        ncsir1 = np.zeros(num_closea+1,dtype=np.int)                    ; ncsjr1 = np.zeros(num_closea+1,dtype=np.int)
        ncsir2 = np.zeros(num_closea+1,dtype=np.int)                    ; ncsjr2 = np.zeros(num_closea+1,dtype=np.int)
        ncsir = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int) ; ncsjr = np.zeros((num_closea+1,max_runoff_points+1),dtype=np.int)

        # Caspian Sea
        ncsnr[1]   = 1    ; ncstt[1]   = 0     
        ncsi1[1]   = 1330 ; ncsj1[1]   = 831
        ncsi2[1]   = 1375 ; ncsj2[1]   = 981

        # Aral Sea
        ncsnr[2]   = 1    ; ncstt[2]   = 0     
        ncsi1[2]   = 1376 ; ncsj1[2]   = 900
        ncsi2[2]   = 1400 ; ncsj2[2]   = 981

        # Azov Sea
        ncsnr[3]   = 1    ; ncstt[3]   = 0     
        ncsi1[3]   = 1284 ; ncsj1[3]   = 908
        ncsi2[3]   = 1304 ; ncsj2[3]   = 933

        # Lake Superior
        ncsnr[4]   = 1    ; ncstt[4]   = 2     
        ncsi1[4]   = 781  ; ncsj1[4]   = 905
        ncsi2[4]   = 815  ; ncsj2[4]   = 926 
        # runff points the St Laurence Seaway for all Great Lakes
        ncsir1[4:10]   = 873 ; ncsjr1[4:10]   = 909 
        ncsir2[4:10]   = 884 ; ncsjr2[4:10]   = 920 

        # Lake Michigan
        ncsnr[5]   = 1    ; ncstt[5]   = 2     
        ncsi1[5]   = 795  ; ncsj1[5]   = 871             
        ncsi2[5]   = 813  ; ncsj2[5]   = 905 

        # Lake Huron part 1
        ncsnr[6]   = 1    ; ncstt[6]   = 2     
        ncsi1[6]   = 814  ; ncsj1[6]   = 882             
        ncsi2[6]   = 825  ; ncsj2[6]   = 905 

        # Lake Huron part 2
        ncsnr[7]   = 1    ; ncstt[7]   = 2     
        ncsi1[7]   = 826  ; ncsj1[7]   = 889             
        ncsi2[7]   = 833  ; ncsj2[7]   = 905 

        # Lake Erie
        ncsnr[8]   = 1    ; ncstt[8]   = 2     
        ncsi1[8]   = 816  ; ncsj1[8]   = 871             
        ncsi2[8]   = 837  ; ncsj2[8]   = 881 

        # Lake Ontario
        ncsnr[9]   = 1    ; ncstt[9]   = 2     
        ncsi1[9]   = 831  ; ncsj1[9]   = 882             
        ncsi2[9]   = 847  ; ncsj2[9]   = 889 

        # Lake Victoria
        ncsnr[10]   = 1    ; ncstt[10]   = 0   
        ncsi1[10]   = 1274 ; ncsj1[10]   = 672 
        ncsi2[10]   = 1289 ; ncsj2[10]   = 687 

#=====================================
# 3. Generate mask fields
#=====================================

    rnf_count = 0
    empmr_count = 0

    closea_mask = ma.zeros(top_level.shape,dtype=np.int)
    temp_mask_rnf = ma.zeros(top_level.shape,dtype=np.int)
    temp_mask_empmr = ma.zeros(top_level.shape,dtype=np.int)
    closea_mask_rnf = ma.zeros(top_level.shape,dtype=np.int)
    closea_mask_empmr = ma.zeros(top_level.shape,dtype=np.int)

    for ics in range(num_closea):
        closea_mask = ma.where( ( ii2d[:] >= ncsi1[ics+1] ) & ( ii2d[:] <= ncsi2[ics+1] ) &
                                ( jj2d[:] >= ncsj1[ics+1] ) & ( jj2d[:] <= ncsj2[ics+1] ) &
                                ( top_level == 1 ), ics+1, closea_mask)
        if ncstt[ics+1] == 1:
            rnf_count = rnf_count + 1
            temp_mask_rnf[:] = 0
            if use_runoff_box:
                temp_mask_rnf = ma.where( ( ii2d[:] >= ncsir1[ics+1] ) & ( ii2d[:] <= ncsir2[ics+1] ) &
                                       ( jj2d[:] >= ncsjr1[ics+1] ) & ( jj2d[:] <= ncsjr2[ics+1] ) &
                                       ( top_level == 1 ), rnf_count, 0)
            else:
                for ir in range(ncsnr[ics+1]):
                    temp_mask_rnf[ncsjr[ics+1],ncsjr[ics+1]] = rnf_count
 
            temp_mask_rnf = ma.where( closea_mask_rnf > 0, ma.minimum(temp_mask_rnf,closea_mask_rnf), temp_mask_rnf)
            min_rnf = ma.amin(temp_mask_rnf[ma.where(temp_mask_rnf > 0)])
            max_rnf = ma.amax(temp_mask_rnf[ma.where(temp_mask_rnf > 0)])
            if min_rnf != max_rnf:
                print 'min_rnf, max_rnf : ',min_rnf,max_rnf
                raise Exception('Partially overlapping target rnf areas for two closed seas.')
            else:
                # source area:
                closea_mask_rnf[ma.where(closea_mask==ics+1)] = min_rnf
                # target area:
                closea_mask_rnf[ma.where(temp_mask_rnf>0)] = min_rnf
                # reset rnf_count:
                rnf_count = min_rnf
                    
        if ncstt[ics+1] == 2:
            empmr_count = empmr_count + 1
            temp_mask_empmr[:] = 0
            if use_runoff_box:
                temp_mask_empmr = ma.where( ( ii2d[:] >= ncsir1[ics+1] ) & ( ii2d[:] <= ncsir2[ics+1] ) &
                                          ( jj2d[:] >= ncsjr1[ics+1] ) & ( jj2d[:] <= ncsjr2[ics+1] ) &
                                          ( top_level == 1 ), empmr_count, 0)
            else:
                for ir in range(ncsnr[ics+1]):
                    temp_mask_empmr[ncsjr[ics+1],ncsjr[ics+1]] = empmr_count

            temp_mask_empmr = ma.where( closea_mask_empmr > 0, ma.minimum(temp_mask_empmr,closea_mask_empmr), temp_mask_empmr)
            min_empmr = ma.amin(temp_mask_empmr[ma.where(temp_mask_empmr > 0)])
            max_empmr = ma.amax(temp_mask_empmr[ma.where(temp_mask_empmr > 0)])
            if min_empmr != max_empmr:
                raise Exception('Partially overlapping target empmr areas for two closed seas.')
            else:
                # source area:
                closea_mask_empmr[ma.where(closea_mask==ics+1)] = min_empmr
                # target area:
                closea_mask_empmr[ma.where(temp_mask_empmr>0)] = min_empmr
                # reset empmr_count:
                empmr_count = min_empmr
                    
    if mask:
        # apply land-sea mask if required
        closea_mask.mask = np.where(top_level==0,True,False)
        closea_mask_rnf.mask = np.where(top_level==0,True,False)
        closea_mask_empmr.mask = np.where(top_level==0,True,False)

#=====================================
# 4. Append masks to domain_cfg file.
#=====================================

    domcfg.createVariable('closea_mask',datatype='i',dimensions=('y','x'),fill_value=closea_mask.fill_value,chunksizes=(1000,1000))
    domcfg.variables['closea_mask'][:]=closea_mask
    if rnf_count > 0:
        domcfg.createVariable('closea_mask_rnf',datatype='i',dimensions=('y','x'),fill_value=closea_mask_rnf.fill_value,chunksizes=(1000,1000))
        domcfg.variables['closea_mask_rnf'][:]=closea_mask_rnf
    if empmr_count > 0:
        domcfg.createVariable('closea_mask_empmr',datatype='i',dimensions=('y','x'),fill_value=closea_mask_empmr.fill_value,chunksizes=(1000,1000))
        domcfg.variables['closea_mask_empmr'][:]=closea_mask_empmr

    domcfg.close()
Пример #42
0
    def specify(self, slab, axes, specification, confined_by, aux):
        ''' First part: confine the slab within a Domain wide enough to do the exact in post'''
        import string, copy
        from numpy.ma import minimum, maximum
        # myconfined is for later, we can't confine a dimension twice with an argument plus a keyword or 2 keywords
        myconfined = [None] * len(axes)
        self.aux = copy.copy(specification)
        # First look at the arguments (i.e not keywords) and confine the dimensions
        # in the order of the arguments
        for i in range(len(self.args)):
            if confined_by[
                    i] is None:  # Check it hasn't been confined by somebody else
                myconfined[i] = 1  # dim confined by argument list
                confined_by[
                    i] = self  # for cdms I want to confine this dimension
                self.aux[i] = specs = list(
                    self.args[i])  # How do we want to confine this dim ?
                if type(specs) == type(slice(0)):
                    specification[i] = specs  # If it's a slicing nothing to do
                else:  # But if it's not...
                    if specs[0] is None:
                        tmp = axes[i].getBounds()
                        if tmp is None:
                            raise ValueError, 'Region error, axis:' + axes[
                                i].id + ' has no bounds'
                        specs[0] = minimum(minimum(tmp[0], tmp[-1]))
                    if specs[1] is None:
                        tmp = axes[i].getBounds()
                        if tmp is None:
                            raise ValueError, 'Region error, axis:' + axes[
                                i].id + ' has no bounds'
                        specs[1] = maximum(maximum(tmp[0], tmp[-1]))
                    if axes[i].isTime():  # Time is as always "Special"
                        import cdtime
                        tc = type(cdtime.comptime(0))  # component time type
                        tr = type(cdtime.reltime(
                            0, 'months since 0'))  # relative time type
                        t = type(specs[0])  # my first spec type
                        if t == type(
                                ''):  #if my first spec is passed as a string
                            specs[0] = cdtime.s2r(specs[0], axes[i].units)
                        elif t == tc or t == tr:  #if my first spec is passed as a cdtime object
                            specs[0] = cdtime.torel(specs[0], axes[i].units)
                        else:  # If not it has to be that the users knows the time values in the axis
                            pass
                        t = type(specs[1])  # my second spec type
                        if t == type(
                                ''):  #if my second spec is passed as a string
                            specs[1] = cdtime.s2r(specs[1], axes[i].units)
                        elif t == tc or t == tr:  #if my second spec is passed as a cdtime object
                            specs[1] = cdtime.torel(specs[1], axes[i].units)
                    sp = [
                        specs[0], specs[1], 'oob'
                    ]  # Now retrieve the values wide enough for the exact                    specification[i]=sp  # sets the specifications
            else:
                return 1
        for kw in self.kargs.keys():
            axis = None
            for i in range(len(axes)):
                if axes[i].id == kw: axis = i
            if axis is None:
                if kw == 'time':
                    for i in range(len(axes)):
                        if axes[i].isTime(): axis = i
                elif kw == 'level':
                    for i in range(len(axes)):
                        if axes[i].isLevel(): axis = i
                elif kw == 'longitude':
                    for i in range(len(axes)):
                        if axes[i].isLongitude(): axis = i
                elif kw == 'latitude':
                    for i in range(len(axes)):
                        if axes[i].isLatitude(): axis = i
                elif not kw in [
                        'exact', 'atol', 'rtol'
                ]:  # keyword not a recognised keyword or dimension name
                    raise 'Error, keyword: ' + kw + ' not recognized'
            # At this point, if axis is None:
            # we are dealing with a keyword for the selector
            # so we'll skip it
            if not axis is None:
                if confined_by[axis] is None:
                    confined_by[axis] = self
                    myconfined[axis] = 1
                    self.aux[axis] = specs = list(self.kargs[kw])
                    if type(specs) != type(slice(0)):
                        if specs[0] is None:
                            tmp = axes[axis].getBounds()
                            if tmp is None:
                                raise ValueError, 'Region error, axis:' + axes[
                                    axis].id + ' has no bounds'
                            specs[0] = minimum(minimum(tmp[0], tmp[-1]))
                        if specs[1] is None:
                            tmp = axes[axis].getBounds()
                            if tmp is None:
                                raise ValueError, 'Region error, axis:' + axes[
                                    axis].id + ' has no bounds'
                            specs[1] = maximum(maximum(tmp[0], tmp[-1]))
                        if axes[axis].isTime():
                            import cdtime
                            tc = type(cdtime.comptime(0))
                            tr = type(cdtime.reltime(0, 'months since 0'))
                            t = type(specs[0])
                            if t == type(''):
                                specs[0] = cdtime.s2r(specs[0], axes[i].units)
                            elif t == tc or t == tr:
                                specs[0] = cdtime.torel(
                                    specs[0], axes[i].units)
                            t = type(specs[1])
                            if t == type(''):
                                specs[1] = cdtime.s2r(specs[1], axes[i].units)
                            elif t == tc or t == tr:
                                specs[1] = cdtime.torel(
                                    specs[1], axes[i].units)
                        sp = [specs[0], specs[1], 'oob']
                        specification[axis] = sp
                    else:
                        specification[axis] = specs

                else:
                    if myconfined[axis] == 1:
                        raise 'Error you are attempting to set the axis: ' + str(
                            axes[axis].id) + ' more than once'
                    else:
                        return 1
        return 0
Пример #43
0
    def _plot_with_flag(self, scan, showflag=False):
        # total number of panles to plot as a whole
        nptot = scan.nrow()
        # remaining panels to plot
        n = nptot - self._ipanel - 1
        ganged = False
        maxpanel = 25

        if n > 1:
            ganged = rcParams['plotter.ganged']
            if self._rows and self._cols:
                n = min(n,self._rows*self._cols)
                self._plotter.set_panels(rows=self._rows,cols=self._cols,
                                         nplots=n,margin=self._margins,ganged=ganged)
            else:
                n = min(n,maxpanel)
                self._plotter.set_panels(rows=n,cols=0,nplots=n,margin=self._margins,ganged=ganged)
        else:
            self._plotter.set_panels(margin=self._margins)
        #r = 0
        r = self._startrow
        # total row number of scantable
        nr = scan.nrow()
        panelcount = 0
        allylim = []
        allxlim = []
        
        while r < nr:
            # always plot to new panel
            self._plotter.subplot(panelcount)
            self._plotter.palette(0)
            # title and axes labels
            xlab = self._abcissa and self._abcissa[panelcount] \
                       or scan._getabcissalabel()
            if self._offset and not self._abcissa:
                xlab += " (relative)"
            ylab = self._ordinate and self._ordinate[panelcount] \
                   or scan._get_ordinate_label()
            self._plotter.set_axes('xlabel', xlab)
            self._plotter.set_axes('ylabel', ylab)
            lbl = self._get_label(scan, r, mode='title', userlabel=self._title)
            if type(lbl) in (list, tuple):
                if 0 <= panelcount < len(lbl):
                    lbl = lbl[panelcount]
                else:
                    # get default label
                    lbl = self._get_label(scan, r, 'title')
            self._plotter.set_axes('title',lbl)
            panelcount += 1
            # Now get data to plot
            y = scan._getspectrum(r)
            # Check for FLAGROW column
            mr = scan._getflagrow(r)
            from numpy import ma, array
            if mr:
                ys = ma.masked_array(y,mask=mr)
                if showflag:
                    yf = ma.masked_array(y, mask=(not mr))
            else:
                m = scan._getmask(r)
                from numpy import logical_not, logical_and
                if self._maskselection and len(self._usermask) == len(m):
                    if d[self._stacking](r) in self._maskselection[self._stacking]:
                        m = logical_and(m, self._usermask)
                ys = ma.masked_array(y,mask=logical_not(array(m,copy=False)))
                if showflag:
                    yf = ma.masked_array(y,mask=m)

            x = array(scan._getabcissa(r))
            if self._offset:
                x += self._offset
            #llbl = self._get_label(scan, r, mode='legend', userlabel=self._lmap)
            #if type(llbl) in (list, tuple):
            #    llbl = llbl[0]
            #self._plotter.set_line(label=llbl)
            self._plotter.set_line(label="data")
            #plotit = self._plotter.plot
            #if self._hist: plotit = self._plotter.hist
            self._plotter.plot(x,ys)
            if showflag:
                self._plotter.set_line(label="flagged")
                self._plotter.plot(x,yf)
                ylim = self._minmaxy or [min(y),max(y)]
                xlim= self._minmaxx or [min(x),max(x)]
            elif mr or ys.mask.all():
                ylim = self._minmaxy or []
                xlim = self._minmaxx or []
            else:
                ylim = self._minmaxy or [ma.minimum(ys),ma.maximum(ys)]
                xlim= self._minmaxx or [min(x),max(x)]
            allylim += ylim
            allxlim += xlim
            if (panelcount == n) or (r == nr-1):
                break
            r+=1 # next row

        # Set x- and y- limts of subplots
        if ganged:
            xlim = None
            ylim = None
            if len(allylim) > 0:
                allylim.sort()
                ylim = allylim[0],allylim[-1]
            if len(allxlim) > 0:
                allxlim.sort()
                xlim = allxlim[0],allxlim[-1]
            self._plotter.set_limits(xlim=xlim,ylim=ylim)

        # save the current counter for multi-page plotting
        self._startrow = r+1
        self._ipanel += panelcount
        if self.casabar_exists():
            if self._ipanel >= nptot-1:
                self._plotter.figmgr.casabar.disable_next()
            else:
                self._plotter.figmgr.casabar.enable_next()
            if self._ipanel + 1 - panelcount > 0:
                self._plotter.figmgr.casabar.enable_prev()
            else:
                self._plotter.figmgr.casabar.disable_prev()
Пример #44
0
    def run_model(self):
        """
        Run the model

        :return:
        """

        shape = self._shape
        tew = self._tew
        taw = self._taw

        start_time = datetime.now()

        pkcb = zeros(shape)
        swe = zeros(shape)
        tot_snow = zeros(shape)

        tot_mass = zeros(shape)
        cum_mass = zeros(shape)
        ref_et = zeros(shape)
        a_min = ones(shape) * 0.45
        a_max = ones(shape) * 0.90
        a = a_max
        pA = a_min

        days = list(rrule.rrule(rrule.DAILY, dtstart=self._start, until=self._end))
        nsteps = len(days)
        plt_day = zeros(nsteps)
        plt_rain = zeros(nsteps)
        plt_eta = zeros(nsteps)
        plt_snow_fall = zeros(nsteps)
        plt_ro = zeros(nsteps)
        plt_dr = zeros(nsteps)
        plt_de = zeros(nsteps)
        plt_drew = zeros(nsteps)
        plt_temp = zeros(nsteps)
        plt_dp_r = zeros(nsteps)
        plt_ks = zeros(nsteps)
        plt_pdr = zeros(nsteps)
        plt_etrs = zeros(nsteps)
        plt_kcb = zeros(nsteps)
        plt_ppt = zeros(nsteps)
        plt_ke = zeros(nsteps)
        plt_kr = zeros(nsteps)
        plt_mlt = zeros(nsteps)
        plt_swe = zeros(nsteps)
        plt_tempm = zeros(nsteps)
        plt_fs1 = zeros(nsteps)
        plt_mass = zeros(nsteps)

        p_mo_et = zeros(shape)
        p_mo_precip = zeros(shape)
        p_mo_ro = zeros(shape)
        p_mo_deps = self._dr + self._de + self._drew
        p_mo_infil = zeros(shape)
        p_mo_etrs = zeros(shape)

        p_yr_et = zeros(shape)
        p_yr_precip = zeros(shape)
        p_yr_ro = zeros(shape)
        p_yr_deps = self._dr + self._de + self._drew
        p_yr_infil = zeros(shape)
        p_yr_etrs = zeros(shape)

        start_month = self._start_month
        end_month = self._end_month

        for i, dday in enumerate(days):
            if i > 0:
                pkcb = kcb
            doy = dday.timetuple().tm_yday
            year = dday.year
            month = dday.month
            day = dday.day

            msg = 'Time : {} day {}_{}'.format(datetime.now() - start_time, doy, year)
            logging.debug(msg)

            # --------------  kcb -------------------
            if year == 2000:
                ndvi = self.calculate_ndvi_2000(doy)
            elif year == 2001:
                ndvi = self.calculate_ndvi_2001(doy)
            else:
                ndvi = self.calculate_ndvi(year, doy)

            kcb = ndvi * 1.25
            kcb = maximum(kcb, self._min_val)
            kcb = where(isnan(kcb), pkcb, kcb)

            # -------------- PRISM -------------------
            ppt, ppt_tom, max_temp, min_temp, mid_temp = self.load_prism(dday)

            # -------------- PM -------------------
            # PM data to etrs
            name = os.path.join('PM{}'.format(year),
                                'PM_NM_{}_{:03n}'.format(year, doy))
            etrs = tif_to_array(self._pm_data_root, name)
            etrs = maximum(etrs, self._min_val)

            name = os.path.join('PM{}'.format(year),
                                'RLIN_NM_{}_{:03n}'.format(year, doy))
            rlin = tif_to_array(self._pm_data_root, name)
            rlin = maximum(rlin, zeros(shape))

            name = os.path.join('rad{}'.format(year),
                                'RTOT_{}_{:03n}'.format(year, doy))
            rg = tif_to_array(self._pm_data_root, name)
            rg = maximum(rg, zeros(shape))

            if i == 0:
                #  Total evaporable water is depth of water in the evaporable
                #  soil layer, i.e., the water available to both stage 1 and 2 evaporation
                rew = minimum((2 + (tew / 3.)), 0.8 * tew)
                # del tew1, tew2

                # you should have all these from previous model runs
                pDr = self._dr
                pDe = self._de
                pDrew = self._drew
                dr = self._dr
                de = self._de
                drew = self._drew

            nom = 2 if start_month <= doy <= end_month else 6
            ksat = self._ksat * nom / 24.

            kc_max_1 = kcb + 0.0001
            min_val = ones(shape) * 0.0001
            kc_max = maximum(min_val, kc_max_1)

            self._nlcd_plt_hgt = self._nlcd_plt_hgt * 0.5 + 1
            numr = maximum(kcb - self._kc_min, min_val * 10)
            denom = maximum((kc_max - self._kc_min), min_val * 10)
            fcov_ref = (numr / denom) ** self._nlcd_plt_hgt
            fcov_min = minimum(fcov_ref, ones(shape))
            fcov = maximum(fcov_min, min_val * 10)
            few = maximum(1 - fcov, 0.01)  # exposed ground

            pKr = kr
            kr = minimum(((tew - de) / (tew - rew)), ones(shape))
            kr = where(isnan(kr), pKr, kr)

            pKs = ks
            ks_ref = where(((taw - pDr) / (0.6 * taw)) < zeros(shape), ones(shape) * 0.001,
                           ((taw - pDr) / (0.6 * taw)))
            ks_ref = where(isnan(ks), pKs, ks_ref)
            ks = minimum(ks_ref, ones(shape))

            # Ke evaporation reduction coefficient; stage 1 evaporation
            fsa = where(isnan((rew - drew) / (KE_MAX * etrs)), zeros(shape),
                        (rew - drew) / (KE_MAX * etrs))
            fsb = minimum(fsa, ones(shape))
            fs1 = maximum(fsb, zeros(shape))
            ke = where(drew < rew, minimum((fs1 + (1 - fs1) * kr) * (kc_max - ks * kcb), few * kc_max),
                       zeros(shape))

            transp = (ks * kcb) * etrs
            et_init = (ks * kcb + ke) * etrs
            eta = maximum(et_init, zeros(shape))
            evap_init = ke * etrs
            evap_min = maximum(evap_init, zeros(shape))
            evap = minimum(evap_min, kc_max)

            # Load temp, find swe, melt, and precipitation, load Ksat
            # Use SNOTEL data for precip and temps:
            # df_snow : (stel_date, stel_snow, stel_precip, stel_tobs, stel_tmax, stel_tmin, stel_tavg, stel_snwd)

            snow_fall = where(mid_temp <= 0.0, ppt, zeros(shape))
            rain = where(mid_temp >= 0.0, ppt, zeros(shape))

            pA = a
            a = where(snow_fall > 3.0, ones(shape) * a_max, a)
            a = where(snow_fall <= 3.0, a_min + (pA - a_min) * exp(-0.12), a)
            a = where(snow_fall == 0.0, a_min + (pA - a_min) * exp(-0.05), a)
            a = where(a < a_min, a_min, a)

            swe += snow_fall

            mlt_init = maximum(((1 - a) * rg * 0.2) + (mid_temp - 1.8) * 11.0,
                               zeros(shape))  # use calibrate coefficients
            mlt = minimum(swe, mlt_init)

            swe -= mlt

            # Find depletions
            pDr = dr
            pDe = de
            pDrew = drew
            watr = rain + mlt
            deps = dr + de + drew

            ro = zeros(shape)
            ro = where(watr > ksat + deps, watr - ksat - deps, ro)
            ro = maximum(ro, zeros(shape))

            dp_r = zeros(shape)
            id1 = where(watr > deps, ones(shape), zeros(shape))
            id2 = where(ksat > watr - deps, ones(shape), zeros(shape))
            dp_r = where(id1 + id2 > 1.99, maximum(watr - deps, zeros(shape)), dp_r)

            dp_r = where(watr > ksat + deps, ksat, dp_r)
            dp_r = maximum(dp_r, zeros(shape))

            drew_1 = minimum((pDrew + ro + (evap - (rain + mlt))), rew)
            drew = maximum(drew_1, zeros(shape))
            diff = maximum(pDrew - drew, zeros(shape))

            de_1 = minimum((pDe + (evap - (rain + mlt - diff))), tew)
            de = maximum(de_1, zeros(shape))
            diff = maximum(((pDrew - drew) + (pDe - de)), zeros(shape))

            dr_1 = minimum((pDr + ((transp + dp_r) - (rain + mlt - diff))), taw)
            dr = maximum(dr_1, zeros(shape))

            # Create cumulative rasters to show net over entire run

            infil += dp_r
            infil = maximum(infil, zeros(shape))

            prev_et = et
            ref_et += etrs
            et = et + evap + transp
            et_ind = et / ref_et
            et = where(isnan(et) == True, prev_et, et)
            et = where(et > ref_et, ref_et / 2., et)
            et = maximum(et, ones(shape) * 0.001)

            precip = precip + rain + snow_fall
            precip = maximum(precip, zeros(shape))

            runoff += ro
            runoff = maximum(runoff, zeros(shape))

            snow_ras = swe + snow_fall - mlt
            snow_ras = maximum(snow_ras, zeros(shape))

            tot_snow += snow_fall

            mo_date = calendar.monthrange(year, month)
            if day == mo_date[1]:
                infil_mo = infil - p_mo_infil
                infil_mo = maximum(infil_mo, zeros(shape))

                ref_et_mo = etrs - p_mo_etrs
                et_mo = et - p_mo_et
                et_mo = where(isnan(et_mo) == True, p_mo_et, et_mo)
                et_mo = where(et_mo > ref_et, ref_et / 2., et_mo)
                et_mo = maximum(et_mo, ones(shape) * 0.001)

                precip_mo = precip - p_mo_precip
                precip_mo = maximum(precip_mo, zeros(shape))

                runoff_mo = ro - p_mo_ro
                runoff_mo = maximum(runoff_mo, zeros(shape))

                snow_ras_mo = swe
                snow_ras_mo = maximum(snow_ras_mo, zeros(shape))

                deps_mo = drew + de + dr
                delta_s_mo = p_mo_deps - deps_mo

                outputs = (('infil', infil_mo), ('et', et_mo), ('precip', precip_mo), ('runoff', runoff_mo),
                           ('snow_ras', snow_ras_mo), ('delta_s_mo', delta_s_mo), ('deps_mo', deps_mo))

                self.save_month_step(outputs, month, year)

                p_mo_et = et
                p_mo_precip = precip
                p_mo_ro = ro
                p_mo_deps = deps_mo
                p_mo_infil = infil
                p_mo_etrs = etrs

            if day == 31 and month == 12:
                infil_yr = infil - p_yr_infil
                infil_yr = maximum(infil_yr, zeros(shape))

                ref_et_yr = etrs - p_yr_etrs
                et_yr = et - p_yr_et
                et_yr = where(isnan(et_yr) == True, p_yr_et, et_yr)
                et_yr = where(et_yr > ref_et, ref_et / 2., et_yr)
                et_yr = maximum(et_yr, ones(shape) * 0.001)

                precip_yr = precip - p_yr_precip
                precip_yr = maximum(precip_yr, zeros(shape))

                runoff_yr = ro - p_yr_ro
                runoff_yr = maximum(runoff_yr, zeros(shape))

                snow_ras_yr = swe
                snow_ras_yr = maximum(snow_ras_yr, zeros(shape))

                deps_yr = drew + de + dr
                delta_s_yr = p_yr_deps - deps_yr

                outputs = (('infil', infil_yr), ('et', et_yr), ('precip', precip_yr), ('runoff', runoff_yr),
                           ('snow_ras', snow_ras_yr), ('delta_s_yr', delta_s_yr), ('deps_yr', deps_yr))

                p_yr_et = et
                p_yr_precip = precip
                p_yr_ro = ro
                p_yr_deps = deps_yr  # this was originally p_mo_deps = deps_yr im assuming this is a typo
                p_yr_infil = infil
                p_yr_etrs = etrs

                self.save_year_step(outputs, month, year)

            # Check MASS BALANCE for the love of WATER!!!
            mass = rain + mlt - (ro + transp + evap + dp_r + ((pDr - dr) + (pDe - de) + (pDrew - drew)))
            tot_mass += abs(mass)
            cum_mass += mass

            plt_day[i] = dday

            plt_rain[i] = rain[S, E]
            plt_eta[i] = eta[S, E]
            plt_snow_fall[i] = snow_fall[S, E]
            plt_ro[i] = ro[S, E]
            plt_dr[i] = dr[S, E]
            plt_de[i] = de[S, E]
            plt_drew[i] = drew[S, E]
            plt_temp[i] = mid_temp[S, E]
            plt_dp_r[i] = dp_r[S, E]
            plt_ks[i] = ks[S, E]
            plt_pdr[i] = pDr[S, E]
            plt_etrs[i] = etrs[S, E]
            plt_kcb[i] = kcb[S, E]
            plt_ppt[i] = ppt[S, E]
            plt_ke[i] = ke[S, E]
            plt_kr[i] = kr[S, E]
            plt_mlt[i] = mlt[S, E]
            plt_swe[i] = swe[S, E]
            plt_tempm[i] = max_temp[S, E]
            plt_fs1[i] = fs1[S, E]
            plt_mass[i] = mass[S, E]
Пример #45
0
    def __init__(self, ax, *args, **kwargs):
        """
        Draw contour lines or filled regions, depending on
        whether keyword arg 'filled' is False (default) or True.

        The first argument of the initializer must be an axes
        object.  The remaining arguments and keyword arguments
        are described in ContourSet.contour_doc.

        """
        self.ax = ax
        self.levels = kwargs.get("levels", None)
        self.filled = kwargs.get("filled", False)
        self.linewidths = kwargs.get("linewidths", None)
        self.linestyles = kwargs.get("linestyles", None)

        self.alpha = kwargs.get("alpha", 1.0)
        self.origin = kwargs.get("origin", None)
        self.extent = kwargs.get("extent", None)
        cmap = kwargs.get("cmap", None)
        self.colors = kwargs.get("colors", None)
        norm = kwargs.get("norm", None)
        self.extend = kwargs.get("extend", "neither")
        self.antialiased = kwargs.get("antialiased", True)
        self.nchunk = kwargs.get("nchunk", 0)
        self.locator = kwargs.get("locator", None)
        if isinstance(norm, colors.LogNorm) or isinstance(self.locator, ticker.LogLocator):
            self.logscale = True
            if norm is None:
                norm = colors.LogNorm()
            if self.extend is not "neither":
                raise ValueError("extend kwarg does not work yet with log scale")
        else:
            self.logscale = False

        if self.origin is not None:
            assert self.origin in ["lower", "upper", "image"]
        if self.extent is not None:
            assert len(self.extent) == 4
        if cmap is not None:
            assert isinstance(cmap, colors.Colormap)
        if self.colors is not None and cmap is not None:
            raise ValueError("Either colors or cmap must be None")
        if self.origin == "image":
            self.origin = mpl.rcParams["image.origin"]
        x, y, z = self._contour_args(*args)  # also sets self.levels,
        #  self.layers
        if self.colors is not None:
            cmap = colors.ListedColormap(self.colors, N=len(self.layers))
        if self.filled:
            self.collections = cbook.silent_list("collections.PolyCollection")
        else:
            self.collections = cbook.silent_list("collections.LineCollection")
        # label lists must be initialized here
        self.labelTexts = []
        self.labelCValues = []

        kw = {"cmap": cmap}
        if norm is not None:
            kw["norm"] = norm
        cm.ScalarMappable.__init__(self, **kw)  # sets self.cmap;
        self._process_colors()
        _mask = ma.getmask(z)
        if _mask is ma.nomask:
            _mask = None

        if self.filled:
            if self.linewidths is not None:
                warnings.warn("linewidths is ignored by contourf")
            C = _cntr.Cntr(x, y, z.filled(), _mask)
            lowers = self._levels[:-1]
            uppers = self._levels[1:]
            for level, level_upper in zip(lowers, uppers):
                nlist = C.trace(level, level_upper, points=0, nchunk=self.nchunk)
                col = collections.PolyCollection(
                    nlist, antialiaseds=(self.antialiased,), edgecolors="none", alpha=self.alpha
                )
                self.ax.add_collection(col)
                self.collections.append(col)

        else:
            tlinewidths = self._process_linewidths()
            self.tlinewidths = tlinewidths
            tlinestyles = self._process_linestyles()
            C = _cntr.Cntr(x, y, z.filled(), _mask)
            for level, width, lstyle in zip(self.levels, tlinewidths, tlinestyles):
                nlist = C.trace(level, points=0)
                col = collections.LineCollection(nlist, linewidths=width, linestyle=lstyle, alpha=self.alpha)

                col.set_label("_nolegend_")
                self.ax.add_collection(col, False)
                self.collections.append(col)
        self.changed()  # set the colors
        x0 = ma.minimum(x)
        x1 = ma.maximum(x)
        y0 = ma.minimum(y)
        y1 = ma.maximum(y)
        self.ax.update_datalim([(x0, y0), (x1, y1)])
        self.ax.autoscale_view()
Пример #46
0
def comp_r(dert__, fig, root_fcr):
    '''
    Cross-comparison of input param (dert[0]) over rng passed from intra_blob.
    This fork is selective for blobs with below-average gradient,
    where input intensity didn't vary much in shorter-range cross-comparison.
    Such input is predictable enough for selective sampling: skipping current
    rim derts as kernel-central derts in following comparison kernels.
    Skipping forms increasingly sparse output dert__ for greater-range cross-comp, hence
    rng (distance between centers of compared derts) increases as 2^n, starting at 0:
    rng = 1: 3x3 kernel,
    rng = 2: 5x5 kernel,
    rng = 4: 9x9 kernel,
    ...
    Due to skipping, configuration of input derts in next-rng kernel will always be 3x3, see:
    https://github.com/boris-kz/CogAlg/blob/master/frame_2D_alg/Illustrations/intra_comp_diagrams.png
    '''
    # initialize new dert structure
    new_dert__ = ma.zeros((dert__.shape[0], (dert__.shape[1] - 1) // 2,
                           (dert__.shape[2] - 1) // 2),
                          dtype=dert__.dtype)
    new_dert__.mask = True
    # extract new_dert__ 'views', use [:] to 'update' views and new_dert__ at the same time
    i__center, idy__, idx__, g__, dy__, dx__, m__ = new_dert__

    i__ = dert__[0]  # i is ig if fig else pixel
    '''
    sparse aligned i__center and i__rim arrays:
    '''
    i__center[:] = i__[1:-1:2, 1:-1:2]  # also assignment to new_dert__[0]
    i__topleft = i__[:-2:2, :-2:2]
    i__top = i__[:-2:2, 1:-1:2]
    i__topright = i__[:-2:2, 2::2]
    i__right = i__[1:-1:2, 2::2]
    i__bottomright = i__[2::2, 2::2]
    i__bottom = i__[2::2, 1:-1:2]
    i__bottomleft = i__[2::2, :-2:2]
    i__left = i__[1:-1:2, :-2:2]
    ''' 
    unmask all derts in kernels with only one masked dert (can be set to any number of masked derts), 
    to avoid extreme blob shrinking and loss of info in other derts of partially masked kernels
    unmasked derts were computed due to extend_dert() in intra_blob   
    '''
    majority_mask = (i__[:, 1:-1:2, 1:-1:2].mask.astype(int) +
                     i__[:, :-2:2, :-2:2].mask.astype(int) +
                     i__[:, :-2:2, 1:-1:2].mask.astype(int) +
                     i__[:, :-2:2, 2::2].mask.astype(int) +
                     i__[:, 1:-1:2, 2::2].mask.astype(int) +
                     i__[:, 2::2, 2::2].mask.astype(int) +
                     i__[:, 2::2, 1:-1:2].mask.astype(int) +
                     i__[:, 2::2, :-2:2].mask.astype(int) +
                     i__[:, 1:-1:2, :-2:2].mask.astype(int)) > 1
    i__center.mask = i__topleft.mask = i__top.mask = i__topright.mask = i__right.mask = i__bottomright.mask = \
    i__bottom.mask = i__bottomleft.mask = i__left.mask = majority_mask  # not only i__center

    idy__[:], idx__[:] = dert__[[1, 2], 1:-1:2, 1:-1:2]

    if root_fcr:  # root fork is comp_r, accumulate derivatives:

        dy__[:] = dert__[4, 1:-1:2, 1:-1:2]  # sparse to align with i__center
        dx__[:] = dert__[5, 1:-1:2, 1:-1:2]
        m__[:] = dert__[6, 1:-1:2, 1:-1:2]

    dy__.mask = dx__.mask = m__.mask = majority_mask

    if not fig:  # compare four diametrically opposed pairs of rim pixels:

        d_tl_br = i__topleft.data - i__bottomright.data
        d_t_b = i__top.data - i__bottom.data
        d_tr_bl = i__topright.data - i__bottomleft.data
        d_r_l = i__right.data - i__left.data

        dy__ += (d_tl_br * YCOEFs[0] + d_t_b * YCOEFs[1] +
                 d_tr_bl * YCOEFs[2] + d_r_l * YCOEFs[3])

        dx__ += (d_tl_br * XCOEFs[0] + d_t_b * XCOEFs[1] +
                 d_tr_bl * XCOEFs[2] + d_r_l * XCOEFs[3])

        g__[:] = ma.hypot(dy__, dx__)  # gradient
        '''
        inverse match = SAD, direction-invariant and more precise measure of variation than g
        (all diagonal derivatives can be imported from prior 2x2 comp)
        '''
        m__ += (abs(i__center.data - i__topleft.data) +
                abs(i__center.data - i__top.data) +
                abs(i__center.data - i__topright.data) +
                abs(i__center.data - i__right.data) +
                abs(i__center.data - i__bottomright.data) +
                abs(i__center.data - i__bottom.data) +
                abs(i__center.data - i__bottomleft.data) +
                abs(i__center.data - i__left.data))

    else:  # fig is TRUE, compare angle and then magnitude of 8 center-rim pairs

        i__[ma.where(i__ == 0)] = 1  # to avoid / 0
        a__ = dert__[[1, 2]] / i__  # sin = idy / i, cos = idx / i, i = ig
        '''
        sparse aligned a__center and a__rim arrays:
        '''
        a__center = a__[:, 1:-1:2, 1:-1:2]
        a__topleft = a__[:, :-2:2, :-2:2]
        a__top = a__[:, :-2:2, 1:-1:2]
        a__topright = a__[:, :-2:2, 2::2]
        a__right = a__[:, 1:-1:2, 2::2]
        a__bottomright = a__[:, 2::2, 2::2]
        a__bottom = a__[:, 2::2, 1:-1:2]
        a__bottomleft = a__[:, 2::2, :-2:2]
        a__left = a__[:, 1:-1:2, :-2:2]
        ''' 
        only mask kernels with more than one masked dert, for all operations below: 
        '''
        majority_mask_a = (a__[:, 1:-1:2, 1:-1:2].mask.astype(int) +
                           a__[:, :-2:2, :-2:2].mask.astype(int) +
                           a__[:, :-2:2, 1:-1:2].mask.astype(int) +
                           a__[:, :-2:2, 2::2].mask.astype(int) +
                           a__[:, 1:-1:2, 2::2].mask.astype(int) +
                           a__[:, 2::2, 2::2].mask.astype(int) +
                           a__[:, 2::2, 1:-1:2].mask.astype(int) +
                           a__[:, 2::2, :-2:2].mask.astype(int) +
                           a__[:, 1:-1:2, :-2:2].mask.astype(int)) > 1
        a__center.mask = a__topleft.mask = a__top.mask = a__topright.mask = a__right.mask = a__bottomright.mask = \
        a__bottom.mask = a__bottomleft.mask = a__left.mask = majority_mask_a

        assert (majority_mask_a[0] == majority_mask_a[1]).all()
        # what does that do?
        dy__.mask = dx__.mask = m__.mask = majority_mask_a[0]
        '''
        8-tuple of differences between central dert angle and rim dert angle:
        '''
        cos_da = [((a__topleft[1].data * a__center[1].data) +
                   (a__center[0].data * a__topleft[0].data)),
                  ((a__top[1].data * a__center[1].data) +
                   (a__center[0].data * a__top[0].data)),
                  ((a__topright[1].data * a__center[1].data) +
                   (a__center[0].data * a__topright[0].data)),
                  ((a__right[1].data * a__center[1].data) +
                   (a__center[0].data * a__right[0].data)),
                  ((a__bottomright[1].data * a__center[1].data) +
                   (a__center[0].data * a__bottomright[0].data)),
                  ((a__bottom[1].data * a__center[1].data) +
                   (a__center[0].data * a__bottom[0].data)),
                  ((a__bottomleft[1].data * a__center[1].data) +
                   (a__center[0].data * a__bottomleft[0].data)),
                  ((a__left[1].data * a__center[1].data) +
                   (a__center[0].data * a__left[0].data))]
        '''
        8-tuple of cosine matches per direction:
        '''
        m__ += (ma.minimum(i__center.data, i__topleft.data) * cos_da[0] +
                ma.minimum(i__center.data, i__top.data) * cos_da[1] +
                ma.minimum(i__center.data, i__topright.data) * cos_da[2] +
                ma.minimum(i__center.data, i__right.data) * cos_da[3] +
                ma.minimum(i__center.data, i__bottomright.data) * cos_da[4] +
                ma.minimum(i__center.data, i__bottom.data) * cos_da[5] +
                ma.minimum(i__center.data, i__bottomleft.data) * cos_da[6] +
                ma.minimum(i__center.data, i__left.data) * cos_da[7])
        '''
        8-tuple of cosine differences per direction:
        '''
        dt__ = [(i__center.data - i__topleft.data * cos_da[0]),
                (i__center.data - i__top.data * cos_da[1]),
                (i__center.data - i__topright.data * cos_da[2]),
                (i__center.data - i__right.data * cos_da[3]),
                (i__center.data - i__bottomright.data * cos_da[4]),
                (i__center.data - i__bottom.data * cos_da[5]),
                (i__center.data - i__bottomleft.data * cos_da[6]),
                (i__center.data - i__left.data * cos_da[7])]
        for d__, YCOEF, XCOEF in zip(dt__, YCOEFs, XCOEFs):

            dy__ += d__ * YCOEF  # decompose differences into dy and dx,
            dx__ += d__ * XCOEF  # accumulate with prior-rng dy, dx
            '''
            accumulate in prior-range dy, dx: 3x3 -> 5x5 -> 9x9 
            '''
        g__[:] = ma.hypot(dy__, dx__)
    '''
    next comp_r will use full dert       
    next comp_g will use g__, dy__, dx__
    '''
    return new_dert__  # new_dert__ has been updated along with 'view' arrays: i__center, idy__, idx__, g__, dy__, dx__, m__
Пример #47
0
    def run_model(self):
        """
        Run the model

        :return:
        """

        shape = self._shape
        tew = self._tew
        taw = self._taw

        start_time = datetime.now()

        pkcb = zeros(shape)
        swe = zeros(shape)
        tot_snow = zeros(shape)

        tot_mass = zeros(shape)
        cum_mass = zeros(shape)
        ref_et = zeros(shape)
        a_min = ones(shape) * 0.45
        a_max = ones(shape) * 0.90
        a = a_max
        pA = a_min

        days = list(
            rrule.rrule(rrule.DAILY, dtstart=self._start, until=self._end))
        nsteps = len(days)
        plt_day = zeros(nsteps)
        plt_rain = zeros(nsteps)
        plt_eta = zeros(nsteps)
        plt_snow_fall = zeros(nsteps)
        plt_ro = zeros(nsteps)
        plt_dr = zeros(nsteps)
        plt_de = zeros(nsteps)
        plt_drew = zeros(nsteps)
        plt_temp = zeros(nsteps)
        plt_dp_r = zeros(nsteps)
        plt_ks = zeros(nsteps)
        plt_pdr = zeros(nsteps)
        plt_etrs = zeros(nsteps)
        plt_kcb = zeros(nsteps)
        plt_ppt = zeros(nsteps)
        plt_ke = zeros(nsteps)
        plt_kr = zeros(nsteps)
        plt_mlt = zeros(nsteps)
        plt_swe = zeros(nsteps)
        plt_tempm = zeros(nsteps)
        plt_fs1 = zeros(nsteps)
        plt_mass = zeros(nsteps)

        p_mo_et = zeros(shape)
        p_mo_precip = zeros(shape)
        p_mo_ro = zeros(shape)
        p_mo_deps = self._dr + self._de + self._drew
        p_mo_infil = zeros(shape)
        p_mo_etrs = zeros(shape)

        p_yr_et = zeros(shape)
        p_yr_precip = zeros(shape)
        p_yr_ro = zeros(shape)
        p_yr_deps = self._dr + self._de + self._drew
        p_yr_infil = zeros(shape)
        p_yr_etrs = zeros(shape)

        start_month = self._start_month
        end_month = self._end_month

        for i, dday in enumerate(days):
            if i > 0:
                pkcb = kcb
            doy = dday.timetuple().tm_yday
            year = dday.year
            month = dday.month
            day = dday.day

            msg = 'Time : {} day {}_{}'.format(datetime.now() - start_time,
                                               doy, year)
            logging.debug(msg)

            # --------------  kcb -------------------
            if year == 2000:
                ndvi = self.calculate_ndvi_2000(doy)
            elif year == 2001:
                ndvi = self.calculate_ndvi_2001(doy)
            else:
                ndvi = self.calculate_ndvi(year, doy)

            kcb = ndvi * 1.25
            kcb = maximum(kcb, self._min_val)
            kcb = where(isnan(kcb), pkcb, kcb)

            # -------------- PRISM -------------------
            ppt, ppt_tom, max_temp, min_temp, mid_temp = self.load_prism(dday)

            # -------------- PM -------------------
            # PM data to etrs
            name = os.path.join('PM{}'.format(year),
                                'PM_NM_{}_{:03n}'.format(year, doy))
            etrs = tif_to_array(self._pm_data_root, name)
            etrs = maximum(etrs, self._min_val)

            name = os.path.join('PM{}'.format(year),
                                'RLIN_NM_{}_{:03n}'.format(year, doy))
            rlin = tif_to_array(self._pm_data_root, name)
            rlin = maximum(rlin, zeros(shape))

            name = os.path.join('rad{}'.format(year),
                                'RTOT_{}_{:03n}'.format(year, doy))
            rg = tif_to_array(self._pm_data_root, name)
            rg = maximum(rg, zeros(shape))

            if i == 0:
                #  Total evaporable water is depth of water in the evaporable
                #  soil layer, i.e., the water available to both stage 1 and 2 evaporation
                rew = minimum((2 + (tew / 3.)), 0.8 * tew)
                # del tew1, tew2

                # you should have all these from previous model runs
                pDr = self._dr
                pDe = self._de
                pDrew = self._drew
                dr = self._dr
                de = self._de
                drew = self._drew

            nom = 2 if start_month <= doy <= end_month else 6
            ksat = self._ksat * nom / 24.

            kc_max_1 = kcb + 0.0001
            min_val = ones(shape) * 0.0001
            kc_max = maximum(min_val, kc_max_1)

            self._nlcd_plt_hgt = self._nlcd_plt_hgt * 0.5 + 1
            numr = maximum(kcb - self._kc_min, min_val * 10)
            denom = maximum((kc_max - self._kc_min), min_val * 10)
            fcov_ref = (numr / denom)**self._nlcd_plt_hgt
            fcov_min = minimum(fcov_ref, ones(shape))
            fcov = maximum(fcov_min, min_val * 10)
            few = maximum(1 - fcov, 0.01)  # exposed ground

            pKr = kr
            kr = minimum(((tew - de) / (tew - rew)), ones(shape))
            kr = where(isnan(kr), pKr, kr)

            pKs = ks
            ks_ref = where(((taw - pDr) / (0.6 * taw)) < zeros(shape),
                           ones(shape) * 0.001, ((taw - pDr) / (0.6 * taw)))
            ks_ref = where(isnan(ks), pKs, ks_ref)
            ks = minimum(ks_ref, ones(shape))

            # Ke evaporation reduction coefficient; stage 1 evaporation
            fsa = where(isnan((rew - drew) / (KE_MAX * etrs)), zeros(shape),
                        (rew - drew) / (KE_MAX * etrs))
            fsb = minimum(fsa, ones(shape))
            fs1 = maximum(fsb, zeros(shape))
            ke = where(
                drew < rew,
                minimum((fs1 + (1 - fs1) * kr) * (kc_max - ks * kcb),
                        few * kc_max), zeros(shape))

            transp = (ks * kcb) * etrs
            et_init = (ks * kcb + ke) * etrs
            eta = maximum(et_init, zeros(shape))
            evap_init = ke * etrs
            evap_min = maximum(evap_init, zeros(shape))
            evap = minimum(evap_min, kc_max)

            # Load temp, find swe, melt, and precipitation, load Ksat
            # Use SNOTEL data for precip and temps:
            # df_snow : (stel_date, stel_snow, stel_precip, stel_tobs, stel_tmax, stel_tmin, stel_tavg, stel_snwd)

            snow_fall = where(mid_temp <= 0.0, ppt, zeros(shape))
            rain = where(mid_temp >= 0.0, ppt, zeros(shape))

            pA = a
            a = where(snow_fall > 3.0, ones(shape) * a_max, a)
            a = where(snow_fall <= 3.0, a_min + (pA - a_min) * exp(-0.12), a)
            a = where(snow_fall == 0.0, a_min + (pA - a_min) * exp(-0.05), a)
            a = where(a < a_min, a_min, a)

            swe += snow_fall

            mlt_init = maximum(((1 - a) * rg * 0.2) + (mid_temp - 1.8) * 11.0,
                               zeros(shape))  # use calibrate coefficients
            mlt = minimum(swe, mlt_init)

            swe -= mlt

            # Find depletions
            pDr = dr
            pDe = de
            pDrew = drew
            watr = rain + mlt
            deps = dr + de + drew

            ro = zeros(shape)
            ro = where(watr > ksat + deps, watr - ksat - deps, ro)
            ro = maximum(ro, zeros(shape))

            dp_r = zeros(shape)
            id1 = where(watr > deps, ones(shape), zeros(shape))
            id2 = where(ksat > watr - deps, ones(shape), zeros(shape))
            dp_r = where(id1 + id2 > 1.99, maximum(watr - deps, zeros(shape)),
                         dp_r)

            dp_r = where(watr > ksat + deps, ksat, dp_r)
            dp_r = maximum(dp_r, zeros(shape))

            drew_1 = minimum((pDrew + ro + (evap - (rain + mlt))), rew)
            drew = maximum(drew_1, zeros(shape))
            diff = maximum(pDrew - drew, zeros(shape))

            de_1 = minimum((pDe + (evap - (rain + mlt - diff))), tew)
            de = maximum(de_1, zeros(shape))
            diff = maximum(((pDrew - drew) + (pDe - de)), zeros(shape))

            dr_1 = minimum((pDr + ((transp + dp_r) - (rain + mlt - diff))),
                           taw)
            dr = maximum(dr_1, zeros(shape))

            # Create cumulative rasters to show net over entire run

            infil += dp_r
            infil = maximum(infil, zeros(shape))

            prev_et = et
            ref_et += etrs
            et = et + evap + transp
            et_ind = et / ref_et
            et = where(isnan(et) == True, prev_et, et)
            et = where(et > ref_et, ref_et / 2., et)
            et = maximum(et, ones(shape) * 0.001)

            precip = precip + rain + snow_fall
            precip = maximum(precip, zeros(shape))

            runoff += ro
            runoff = maximum(runoff, zeros(shape))

            snow_ras = swe + snow_fall - mlt
            snow_ras = maximum(snow_ras, zeros(shape))

            tot_snow += snow_fall

            mo_date = calendar.monthrange(year, month)
            if day == mo_date[1]:
                infil_mo = infil - p_mo_infil
                infil_mo = maximum(infil_mo, zeros(shape))

                ref_et_mo = etrs - p_mo_etrs
                et_mo = et - p_mo_et
                et_mo = where(isnan(et_mo) == True, p_mo_et, et_mo)
                et_mo = where(et_mo > ref_et, ref_et / 2., et_mo)
                et_mo = maximum(et_mo, ones(shape) * 0.001)

                precip_mo = precip - p_mo_precip
                precip_mo = maximum(precip_mo, zeros(shape))

                runoff_mo = ro - p_mo_ro
                runoff_mo = maximum(runoff_mo, zeros(shape))

                snow_ras_mo = swe
                snow_ras_mo = maximum(snow_ras_mo, zeros(shape))

                deps_mo = drew + de + dr
                delta_s_mo = p_mo_deps - deps_mo

                outputs = (('infil', infil_mo), ('et', et_mo), ('precip',
                                                                precip_mo),
                           ('runoff', runoff_mo), ('snow_ras', snow_ras_mo),
                           ('delta_s_mo', delta_s_mo), ('deps_mo', deps_mo))

                self.save_month_step(outputs, month, year)

                p_mo_et = et
                p_mo_precip = precip
                p_mo_ro = ro
                p_mo_deps = deps_mo
                p_mo_infil = infil
                p_mo_etrs = etrs

            if day == 31 and month == 12:
                infil_yr = infil - p_yr_infil
                infil_yr = maximum(infil_yr, zeros(shape))

                ref_et_yr = etrs - p_yr_etrs
                et_yr = et - p_yr_et
                et_yr = where(isnan(et_yr) == True, p_yr_et, et_yr)
                et_yr = where(et_yr > ref_et, ref_et / 2., et_yr)
                et_yr = maximum(et_yr, ones(shape) * 0.001)

                precip_yr = precip - p_yr_precip
                precip_yr = maximum(precip_yr, zeros(shape))

                runoff_yr = ro - p_yr_ro
                runoff_yr = maximum(runoff_yr, zeros(shape))

                snow_ras_yr = swe
                snow_ras_yr = maximum(snow_ras_yr, zeros(shape))

                deps_yr = drew + de + dr
                delta_s_yr = p_yr_deps - deps_yr

                outputs = (('infil', infil_yr), ('et', et_yr), ('precip',
                                                                precip_yr),
                           ('runoff', runoff_yr), ('snow_ras', snow_ras_yr),
                           ('delta_s_yr', delta_s_yr), ('deps_yr', deps_yr))

                p_yr_et = et
                p_yr_precip = precip
                p_yr_ro = ro
                p_yr_deps = deps_yr  # this was originally p_mo_deps = deps_yr im assuming this is a typo
                p_yr_infil = infil
                p_yr_etrs = etrs

                self.save_year_step(outputs, month, year)

            # Check MASS BALANCE for the love of WATER!!!
            mass = rain + mlt - (ro + transp + evap + dp_r +
                                 ((pDr - dr) + (pDe - de) + (pDrew - drew)))
            tot_mass += abs(mass)
            cum_mass += mass

            plt_day[i] = dday

            plt_rain[i] = rain[S, E]
            plt_eta[i] = eta[S, E]
            plt_snow_fall[i] = snow_fall[S, E]
            plt_ro[i] = ro[S, E]
            plt_dr[i] = dr[S, E]
            plt_de[i] = de[S, E]
            plt_drew[i] = drew[S, E]
            plt_temp[i] = mid_temp[S, E]
            plt_dp_r[i] = dp_r[S, E]
            plt_ks[i] = ks[S, E]
            plt_pdr[i] = pDr[S, E]
            plt_etrs[i] = etrs[S, E]
            plt_kcb[i] = kcb[S, E]
            plt_ppt[i] = ppt[S, E]
            plt_ke[i] = ke[S, E]
            plt_kr[i] = kr[S, E]
            plt_mlt[i] = mlt[S, E]
            plt_swe[i] = swe[S, E]
            plt_tempm[i] = max_temp[S, E]
            plt_fs1[i] = fs1[S, E]
            plt_mass[i] = mass[S, E]
Пример #48
0
    def specify(self, slab, axes, specification, confined_by, aux):
        ''' First part: confine the slab within a Domain wide enough to do the exact in post'''
        import string, copy
        from numpy.ma import minimum, maximum
        # myconfined is for later, we can't confine a dimension twice with an argument plus a keyword or 2 keywords
        myconfined = [None] * len(axes)
        self.aux = copy.copy(specification)
        # First look at the arguments (i.e not keywords) and confine the dimensions
        # in the order of the arguments
        for i in range(len(self.args)):
            if confined_by[
                    i] is None:  # Check it hasn't been confined by somebody else
                myconfined[i] = 1  # dim confined by argument list
                confined_by[
                    i] = self  # for cdms I want to confine this dimension
                self.aux[i] = specs = list(
                    self.args[i])  # How do we want to confine this dim ?
                if not (isinstance(specs, list) or isinstance(specs, tuple)):
                    raise Exception, "Error in Selector, you must specify a list or a tuple, you passed:" + str(
                        specs)
                elif type(specs[0]) == type(
                        cdtime.comptime(1999)) or type(specs[0]) == type(
                            cdtime.reltime(0, 'days since 1999')) or type(
                                specs[0]) == type(''):
                    list2 = []
                    for l in specs:
                        if type(l) != type(''):
                            list2.append(l.torel('days since 1900').value)
                        else:
                            list2.append(
                                cdtime.s2r(l, 'days since 1900').value)
                    min = minimum(list2)
                    max = maximum(list2)
                    specification[i] = cdtime.reltime(
                        min, 'days since 1900'), cdtime.reltime(
                            max, 'days since 1900')
                else:  # But if it's not...
                    specification[i] = minimum(specs), maximum(
                        specs)  # sets the specifications
            else:
                return 1
        for kw in self.kargs.keys():
            axis = None
            for i in range(len(axes)):
                if axes[i].id == kw: axis = i
            if axis is None:
                if kw == 'time':
                    for i in range(len(axes)):
                        if axes[i].isTime(): axis = i
                elif kw == 'level':
                    for i in range(len(axes)):
                        if axes[i].isLevel(): axis = i
                elif kw == 'longitude':
                    for i in range(len(axes)):
                        if axes[i].isLongitude(): axis = i
                elif kw == 'latitude':
                    for i in range(len(axes)):
                        if axes[i].isLatitude(): axis = i
                elif not kw in [
                        'match'
                ]:  # keyword not a recognised keyword or dimension name
                    raise Exception, 'Error, keyword: ' + kw + ' not recognized'
            # At this point, if axis is None:
            # we are dealing with a keyword for the selector
            # so we'll skip it
            if not axis is None:
                if confined_by[axis] is None:
                    confined_by[axis] = self
                    myconfined[axis] = 1
                    self.aux[axis] = specs = list(self.kargs[kw])
                    if type(specs[0]) == type(cdtime.comptime(1999)) or type(
                            specs[0]) == type(
                                cdtime.reltime(0, 'days since 1999')) or type(
                                    specs[0]) == type(''):
                        list2 = []
                        for l in specs:
                            if type(l) != type(''):
                                list2.append(l.torel('days since 1900').value)
                            else:
                                list2.append(
                                    cdtime.s2r(l, 'days since 1900').value)
                        min = minimum(list2)
                        max = maximum(list2)
                        specification[axis] = cdtime.reltime(
                            min, 'days since 1900'), cdtime.reltime(
                                max, 'days since 1900')
                    else:  # But if it's not...
                        specification[axis] = minimum(specs), maximum(specs)

                else:
                    if myconfined[axis] == 1:
                        raise 'Error you are attempting to set the axis: ' + str(
                            axes[axis].id) + ' more than once'
                    else:
                        return 1
        return 0
Пример #49
0
 def autoscale(self, A):
     '''
     Set *vmin*, *vmax* to min, max of *A*.
     '''
     self.vmin = ma.minimum(A)
     self.vmax = ma.maximum(A)
Пример #50
0
    def __init__(self, ax, *args, **kwargs):
        """
        Draw contour lines or filled regions, depending on
        whether keyword arg 'filled' is False (default) or True.

        The first argument of the initializer must be an axes
        object.  The remaining arguments and keyword arguments
        are described in ContourSet.contour_doc.

        """
        self.ax = ax
        self.levels = kwargs.get('levels', None)
        self.filled = kwargs.get('filled', False)
        self.linewidths = kwargs.get('linewidths', None)
        self.linestyles = kwargs.get('linestyles', None)

        self.alpha = kwargs.get('alpha', 1.0)
        self.origin = kwargs.get('origin', None)
        self.extent = kwargs.get('extent', None)
        cmap = kwargs.get('cmap', None)
        self.colors = kwargs.get('colors', None)
        norm = kwargs.get('norm', None)
        self.extend = kwargs.get('extend', 'neither')
        self.antialiased = kwargs.get('antialiased', True)
        self.nchunk = kwargs.get('nchunk', 0)
        self.locator = kwargs.get('locator', None)
        if (isinstance(norm, colors.LogNorm)
                or isinstance(self.locator, ticker.LogLocator)):
            self.logscale = True
            if norm is None:
                norm = colors.LogNorm()
            if self.extend is not 'neither':
                raise ValueError(
                    'extend kwarg does not work yet with log scale')
        else:
            self.logscale = False

        if self.origin is not None:
            assert (self.origin in ['lower', 'upper', 'image'])
        if self.extent is not None: assert (len(self.extent) == 4)
        if cmap is not None: assert (isinstance(cmap, colors.Colormap))
        if self.colors is not None and cmap is not None:
            raise ValueError('Either colors or cmap must be None')
        if self.origin == 'image': self.origin = mpl.rcParams['image.origin']

        if isinstance(args[0], ContourSet):
            C = args[0].Cntr
            if self.levels is None:
                self.levels = args[0].levels
        else:
            x, y, z = self._contour_args(*args)

            x0 = ma.minimum(x)
            x1 = ma.maximum(x)
            y0 = ma.minimum(y)
            y1 = ma.maximum(y)
            self.ax.update_datalim([(x0, y0), (x1, y1)])
            self.ax.autoscale_view()
            _mask = ma.getmask(z)
            if _mask is ma.nomask:
                _mask = None
            C = _cntr.Cntr(x, y, z.filled(), _mask)
        self.Cntr = C
        self._process_levels()

        if self.colors is not None:
            cmap = colors.ListedColormap(self.colors, N=len(self.layers))
        if self.filled:
            self.collections = cbook.silent_list('collections.PathCollection')
        else:
            self.collections = cbook.silent_list('collections.LineCollection')
        # label lists must be initialized here
        self.labelTexts = []
        self.labelCValues = []

        kw = {'cmap': cmap}
        if norm is not None:
            kw['norm'] = norm
        cm.ScalarMappable.__init__(self, **kw)  # sets self.cmap;
        self._process_colors()
        if self.filled:
            if self.linewidths is not None:
                warnings.warn('linewidths is ignored by contourf')
            lowers = self._levels[:-1]
            uppers = self._levels[1:]
            for level, level_upper in zip(lowers, uppers):
                nlist = C.trace(level, level_upper, nchunk=self.nchunk)
                nseg = len(nlist) // 2
                segs = nlist[:nseg]
                kinds = nlist[nseg:]

                paths = self._make_paths(segs, kinds)

                col = collections.PathCollection(
                    paths,
                    antialiaseds=(self.antialiased, ),
                    edgecolors='none',
                    alpha=self.alpha)
                self.ax.add_collection(col)
                self.collections.append(col)
        else:
            tlinewidths = self._process_linewidths()
            self.tlinewidths = tlinewidths
            tlinestyles = self._process_linestyles()
            for level, width, lstyle in zip(self.levels, tlinewidths,
                                            tlinestyles):
                nlist = C.trace(level)
                nseg = len(nlist) // 2
                segs = nlist[:nseg]
                #kinds = nlist[nseg:]
                col = collections.LineCollection(segs,
                                                 linewidths=width,
                                                 linestyle=lstyle,
                                                 alpha=self.alpha)

                col.set_label('_nolegend_')
                self.ax.add_collection(col, False)
                self.collections.append(col)
        self.changed()  # set the colors
Пример #51
0
    def plot_map(self, dataset, attribute_data, min_value=None, max_value=None, file=None,
                 my_title="", filter=None, background=None):
        """    Plots a 2D image of attribute given by 'name'. matplotlib required.
               The dataset must have a method 'get_2d_attribute' defined that returns
               a 2D array that is to be plotted. If min_value/max_value are given, all values
               that are smaller/larger than these values are set to min_value/max_value.
               Argument background is a value to be used for background. If it is not given,
               it is considered as a 1/100 under the minimum value of the array.
               Filter is a 2D array. Points where filter is > 0 are masked out (put into background).
        """
        import matplotlib
        matplotlib.use('Qt4Agg') 
        
        from matplotlib.pylab import jet,imshow,colorbar,show,axis,savefig,close,figure,title,normalize
        from matplotlib.pylab import rot90
        
        attribute_data = attribute_data[filter]
        coord_2d_data = dataset.get_2d_attribute(attribute_data = attribute_data)
        data_mask = coord_2d_data.mask
#        if filter is not None:
#            if isinstance(filter, ndarray):
#                if not ma.allclose(filter.shape, coord_2d_data.shape):
#                    raise StandardError, "Argument filter must have the same shape as the 2d attribute."
#                filter_data = filter
#            else:
#                raise TypeError, "The filter type is invalid. A character string or a 2D numpy array allowed."
#            filter_data = where(ma.filled(filter_data,1) > 0, 1,0)
#            data_mask = ma.mask_or(data_mask, filter_data)
        nonmaskedmin = ma.minimum(coord_2d_data) - .2 * (ma.maximum(coord_2d_data) - ma.minimum(coord_2d_data))
        if max_value == None:
            max_value = ma.maximum(coord_2d_data)
        if min_value == None:
            min_value = nonmaskedmin

        coord_2d_data = ma.filled(coord_2d_data,min_value)
        if background is None:
            value_range = max_value-min_value
            background = min_value-value_range/100
        coord_2d_data = ma.filled(ma.masked_array(coord_2d_data, mask=data_mask), background)

        # Our data uses NW as 0,0, while matplotlib uses SW for 0,0.
        # Rotate the data so the map is oriented correctly.
        coord_2d_data = rot90(coord_2d_data, 1)

        jet()
        figure()
        norm = normalize(min_value, max_value)
        im = imshow(coord_2d_data,
            origin='lower',
            aspect='equal',
            interpolation=None,
            norm=norm,
            )
        
        tickfmt = '%4d'
        if isinstance(min_value, float) or isinstance(max_value, float):
            tickfmt='%1.4f'
        colorbar(format=tickfmt)

        title(my_title)
        axis('off')
        if file:
            savefig(file)
            close()
        else:
            show()
Пример #52
0
    def specify(self, slab, axes, specification, confined_by, aux):
        """ First part: confine the slab within a Domain wide enough to do the exact in post"""
        import string, copy
        from numpy.ma import minimum, maximum

        # myconfined is for later, we can't confine a dimension twice with an argument plus a keyword or 2 keywords
        myconfined = [None] * len(axes)
        self.aux = copy.copy(specification)
        # First look at the arguments (i.e not keywords) and confine the dimensions
        # in the order of the arguments
        for i in range(len(self.args)):
            if confined_by[i] is None:  # Check it hasn't been confined by somebody else
                myconfined[i] = 1  # dim confined by argument list
                confined_by[i] = self  # for cdms I want to confine this dimension
                self.aux[i] = specs = list(self.args[i])  # How do we want to confine this dim ?
                if not (isinstance(specs, list) or isinstance(specs, tuple)):
                    raise Exception, "Error in Selector, you must specify a list or a tuple, you passed:" + str(specs)
                elif (
                    type(specs[0]) == type(cdtime.comptime(1999))
                    or type(specs[0]) == type(cdtime.reltime(0, "days since 1999"))
                    or type(specs[0]) == type("")
                ):
                    list2 = []
                    for l in specs:
                        if type(l) != type(""):
                            list2.append(l.torel("days since 1900").value)
                        else:
                            list2.append(cdtime.s2r(l, "days since 1900").value)
                    min = minimum(list2)
                    max = maximum(list2)
                    specification[i] = cdtime.reltime(min, "days since 1900"), cdtime.reltime(max, "days since 1900")
                else:  # But if it's not...
                    specification[i] = minimum(specs), maximum(specs)  # sets the specifications
            else:
                return 1
        for kw in self.kargs.keys():
            axis = None
            for i in range(len(axes)):
                if axes[i].id == kw:
                    axis = i
            if axis is None:
                if kw == "time":
                    for i in range(len(axes)):
                        if axes[i].isTime():
                            axis = i
                elif kw == "level":
                    for i in range(len(axes)):
                        if axes[i].isLevel():
                            axis = i
                elif kw == "longitude":
                    for i in range(len(axes)):
                        if axes[i].isLongitude():
                            axis = i
                elif kw == "latitude":
                    for i in range(len(axes)):
                        if axes[i].isLatitude():
                            axis = i
                elif not kw in ["match"]:  # keyword not a recognised keyword or dimension name
                    raise Exception, "Error, keyword: " + kw + " not recognized"
            # At this point, if axis is None:
            # we are dealing with a keyword for the selector
            # so we'll skip it
            if not axis is None:
                if confined_by[axis] is None:
                    confined_by[axis] = self
                    myconfined[axis] = 1
                    self.aux[axis] = specs = list(self.kargs[kw])
                    if (
                        type(specs[0]) == type(cdtime.comptime(1999))
                        or type(specs[0]) == type(cdtime.reltime(0, "days since 1999"))
                        or type(specs[0]) == type("")
                    ):
                        list2 = []
                        for l in specs:
                            if type(l) != type(""):
                                list2.append(l.torel("days since 1900").value)
                            else:
                                list2.append(cdtime.s2r(l, "days since 1900").value)
                        min = minimum(list2)
                        max = maximum(list2)
                        specification[axis] = (
                            cdtime.reltime(min, "days since 1900"),
                            cdtime.reltime(max, "days since 1900"),
                        )
                    else:  # But if it's not...
                        specification[axis] = minimum(specs), maximum(specs)

                else:
                    if myconfined[axis] == 1:
                        raise "Error you are attempting to set the axis: " + str(axes[axis].id) + " more than once"
                    else:
                        return 1
        return 0
Пример #53
0
 def autoscale_None(self, A):
     ' autoscale only None-valued vmin or vmax'
     if self.vmin is None: self.vmin = ma.minimum(A)
     if self.vmax is None: self.vmax = ma.maximum(A)
Пример #54
0
    def create(self,
               parent=None,
               min=None,
               max=None,
               save_file=None,
               thread_it=1,
               rate=None,
               bitrate=None,
               ffmpegoptions=''):
        from vcs import minmax
        from numpy.ma import maximum, minimum

        # Cannot "Run" or "Create" an animation while already creating an
        # animation
        if self.run_flg == 1:
            return
        if self.vcs_self.canvas.creating_animation() == 1:
            return

        if self.vcs_self.animate_info == []:
            str = "No data found!"
            showerror("Error Message to User", str)
            return

        # Stop the (thread) execution of the X main loop (if it is running).
        self.vcs_self.canvas.stopxmainloop()

        # Force VCS to update its orientation, needed when the user changes the
        # VCS Canvas size.
        self.vcs_self.canvas.updateorientation()

        # Make sure the animate information is up-to-date for creating images
        if ((self.gui_popup == 1) and (self.create_flg == 0)):
            self.update_animate_display_list()

        # Save the min and max values for the graphics methods.
        # Will need to restore values back when animation is done.
        self.save_original_min_max()

        # Set up the animation min and max values by changing the graphics method
        # Note: cannot set the min and max values if the default graphics
        # method is set.
        do_min_max = 'yes'
        try:
            if (parent is not None) and (parent.iso_spacing == 'Log'):
                do_min_max = 'no'
        except:
            pass

        # Draw specified continental outlines if needed.
        self.continents_hold_value = self.vcs_self.canvas.getcontinentstype()
        self.vcs_self.canvas.setcontinentstype(self.continents_value)

        if (do_min_max == 'yes'):
            minv = []
            maxv = []
            if (min is None) or (max is None):
                for i in range(len(self.vcs_self.animate_info)):
                    minv.append(1.0e77)
                    maxv.append(-1.0e77)
                for i in range(len(self.vcs_self.animate_info)):
                    dpy, slab = self.vcs_self.animate_info[i]
                    mins, maxs = minmax(slab)
                    minv[i] = float(minimum(float(minv[i]), float(mins)))
                    maxv[i] = float(maximum(float(maxv[i]), float(maxs)))
            if isinstance(min, list) or isinstance(max, list):
                for i in range(len(self.vcs_self.animate_info)):
                    try:
                        minv.append(min[i])
                    except:
                        minv.append(min[-1])
                    try:
                        maxv.append(max[i])
                    except:
                        maxv.append(max[-1])
            else:
                for i in range(len(self.vcs_self.animate_info)):
                    minv.append(min)
                    maxv.append(max)

            # Set the min an max for each plot in the page. If the same graphics method is used
            # to display the plots, then the last min and max setting of the
            # data set will be used.
            for i in range(len(self.vcs_self.animate_info)):
                try:
                    self.set_animation_min_max(minv[i], maxv[i], i)
                except:
                    # if it is default, then you cannot set the min and max, so
                    # pass.
                    pass

        if save_file is None or save_file.split('.')[-1].lower() == 'ras':
            if thread_it:
                thread.start_new_thread(self.vcs_self.canvas.animate_init,
                                        (save_file, ))
            else:
                self.vcs_self.canvas.animate_init(save_file)
        else:  # ffmpeg stuff
            save_info = self.vcs_self.animate_info
            animation_info = self.animate_info_from_python()
            slabs = []
            templates = []
            dpys = []
            for i in range(len(self.vcs_self.animate_info)):
                dpy, slab = self.vcs_self.animate_info[i]
                slabs.append(slab)
                dpys.append(dpy)
                templates.append(dpy.template)
            sh = slabs[0].shape
            if dpy.g_type in [
                    'boxfill',
                    'isofill',
                    'isoline',
                    'meshfill',
                    'outfill',
                    'outline',
                    'taylordiagram',
                    'vector',
            ]:
                r = len(sh) - 2
            else:
                r = len(sh) - 1
            # now create the list of all previous indices to plot
            indices = []
            for i in range(r):
                this = list(range(sh[i]))
                tmp = []
                if indices == []:
                    for k in this:
                        indices.append([
                            k,
                        ])
                else:
                    for j in range(len(indices)):
                        for k in this:
                            tmp2 = copy.copy(indices[j])
                            tmp2.append(k)
                            tmp.append(tmp2)
                    indices = tmp
            count = 1
            white_square = self.vcs_self.createfillarea()
            white_square.color = 240
            white_square.x = [0, 1, 1, 0]
            white_square.y = [0, 0, 1, 1]
            new_vcs = self.vcs_self
            if self.vcs_self.orientation() == 'portrait':
                new_vcs.portrait()
            # self.vcs_self.close()

            for index in indices:
                new_vcs.clear()
                new_vcs.plot(white_square, bg=1)
                for i in range(len(save_info)):
                    slab = slabs[i]
                    template = templates[i]
                    gtype = animation_info["gtype"][i].lower()
                    gname = animation_info["gname"][i]
                    gm = None  # for flake8 to be happy
                    exec("gm = new_vcs.get%s('%s')" % (gtype, gname))
                    for j in index:
                        slab = slab[j]
                    new_vcs.plot(slab, gm, new_vcs.gettemplate(template), bg=1)
                new_vcs.png("tmp_anim_%i" % count)
                count += 1
            new_vcs.ffmpeg(save_file,
                           "tmp_anim_%d.png",
                           bitrate=bitrate,
                           rate=rate,
                           options=ffmpegoptions)
            for i in range(count - 1):
                os.remove("tmp_anim_%i.png" % (i + 1))
            del (new_vcs)
        self.create_flg = 1