Example #1
0
    def __average(self, arr, dims, cross):
        """ arr is a numpy array, dims is an ordered dictionary of lists,
            cross is a dictionary of (variable, values)
        """
        arr_out, dims_out = self.__condition(arr, dims, cross)

        if not cross: return arr.copy(), dims.copy()

        var = cross.keys()[0]
        varidx = dims_out.keys().index(var)

        wts = self.__loadwts(var, dims_out[var])

        sh = arr_out.shape
        slice_arr = [slice(0, size) for size in arr_out.shape]

        wts_full = masked_array(zeros(sh), mask=ones(sh))
        for i in range(len(wts)):
            slice_arr[varidx] = i
            wts_full[slice_arr] = wts[i]
        wts_full = masked_where(arr_out.mask, wts_full)  # mask

        arr_out = (wts_full * arr_out).sum(axis=varidx) / wts_full.sum(
            axis=varidx)  # average over variable

        arr_out = resize(arr_out, arr_out.shape + (1, ))
        dims_out[var] = ['ave']

        return arr_out, dims_out
Example #2
0
    def areas(self, var, agg, lats, weights = None, calcarea = False, mask = None):
        nt, nlats, nlons = var.shape

        if weights is None: # weights
            weights = ones((nt, nlats, nlons))
        elif len(weights.shape) == 2:
            weights = ma.resize(weights, (nt, nlats, nlons))

        if calcarea: # area
            area = self.area(lats, nlats, nlons)
        else:
            area = ones((nlats, nlons))

        aggvals = self.__uniquevals(agg)
        sz = len(aggvals)

        varmask = logical_not(var.mask) if ma.isMaskedArray(var) else ones(var.shape) # use variable mask
        if not mask is None: varmask = logical_and(varmask, mask) # additional mask

        areas  = ma.masked_array(zeros((sz, nt)), mask = ones((sz, nt)))
        vartmp = zeros((nt, nlats, nlons))
        for i in range(len(aggvals)):
            warea = weights * area * (agg == aggvals[i])
            tidx, latidx, lonidx = ma.where(warea)

            vartmp[:] = 0
            vartmp[tidx, latidx, lonidx] = warea[tidx, latidx, lonidx] * \
                                           varmask[tidx, latidx, lonidx]
            areas[i] = vartmp.sum(axis = 2).sum(axis = 1)

        areas = ma.masked_where(areas == 0, areas)
        areas.mask = resize(areas.mask, areas.shape) # ensure mask is same size as data

        return areas
Example #3
0
    def __average(self, arr, dims, cross):
        """ arr is a numpy array, dims is an ordered dictionary of lists,
            cross is a dictionary of (variable, values)
        """
        arr_out, dims_out = self.__condition(arr, dims, cross)

        if not cross: return arr.copy(), dims.copy()

        var    = cross.keys()[0]
        varidx = dims_out.keys().index(var)

        wts = self.__loadwts(var, dims_out[var])

        sh = arr_out.shape
        slice_arr = [slice(0, size) for size in arr_out.shape]

        wts_full = masked_array(zeros(sh), mask = ones(sh))
        for i in range(len(wts)):
            slice_arr[varidx] = i
            wts_full[slice_arr] = wts[i]
        wts_full = masked_where(arr_out.mask, wts_full) # mask

        arr_out = (wts_full * arr_out).sum(axis = varidx) / wts_full.sum(axis = varidx) # average over variable

        arr_out = resize(arr_out, arr_out.shape + (1,))
        dims_out[var] = ['ave']

        return arr_out, dims_out
Example #4
0
    def sum(self, var, agg, lats, weights = None, calcarea = False, mask = None, numchunks = 1):
        nt, nlats, nlons = var.shape

        if weights is None: # weights
            weights = ones((nt, nlats, nlons))
        elif len(weights.shape) == 2:
            weights = ma.resize(weights, (nt, nlats, nlons))

        if calcarea: # area
            area = self.area(lats, nlats, nlons)
        else:
            area = ones((nlats, nlons))

        aggvals = self.__uniquevals(agg)
        sz = len(aggvals)

        if mask is None:
            varmask = ones((nt, nlats, nlons)) # no additional mask
        else:
            varmask = mask

        chunksize = sz / numchunks # chunk data to reduce memory usage

        sumv = ma.masked_array(zeros((sz, nt)), mask = ones((sz, nt)))

        maxchunksize = max(chunksize, chunksize + sz - chunksize * numchunks)

        aselect = ma.zeros((maxchunksize, nlats, nlons), dtype = bool) # preallocate
        vartmp  = ma.zeros((maxchunksize, nlats, nlons))

        cnt = 0
        for i in range(numchunks):
            startidx = cnt
            if i != numchunks - 1:
                endidx = cnt + chunksize
            else:
                endidx = sz

            aggvalsc = aggvals[startidx : endidx] # work on subset of aggregation values
            szc = len(aggvalsc)

            aselect[:] = 0 # clear
            for j in range(szc): aselect[j] = (agg == aggvalsc[j])
            ridx, latidx, lonidx = where(aselect)

            vartmp[:] = 0 # clear
            vartmp.mask = ones(vartmp.shape)
            for t in range(nt):
                vartmp[ridx, latidx, lonidx] = var[t, latidx, lonidx]        * \
                                               varmask[t, latidx, lonidx]    * \
                                               weights[t, latidx, lonidx]    * \
                                               area[latidx, lonidx]          * \
                                               aselect[ridx, latidx, lonidx]
                sumv[startidx : endidx, t] = vartmp.sum(axis = 2).sum(axis = 1)[: szc]

            cnt += chunksize

        return sumv
    def get_average_omega(self, omega, probability, index, nsupply, nobs,
                          demand):
        omega_prob = ma.filled(ma.resize(omega, (nobs, 1)) * probability, 0.0)
        average_omega_nom = array(
            ndimage_sum(omega_prob,
                        labels=index + 1,
                        index=arange(nsupply) + 1))

        average_omega = ma.filled(
            average_omega_nom / ma.masked_where(demand == 0, demand), 0.0)
        return average_omega
Example #6
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        n = [0, 0, 1, 0, 0]
        m = make_mask(n)
        m2 = make_mask(m)
        assert_(m is m2)
        m3 = make_mask(m, copy=1)
        assert_(m is not m3)

        x1 = np.arange(5)
        y1 = array(x1, mask=m)
        assert_(y1._data is not x1)
        assert_(allequal(x1, y1._data))
        assert_(y1._mask is m)

        y1a = array(y1, copy=0)
        # For copy=False, one might expect that the array would just
        # passed on, i.e., that it would be "is" instead of "==".
        # See gh-4043 for discussion.
        assert_(y1a._mask.__array_interface__ ==
                y1._mask.__array_interface__)

        y2 = array(x1, mask=m3, copy=0)
        assert_(y2._mask is m3)
        assert_(y2[2] is masked)
        y2[2] = 9
        assert_(y2[2] is not masked)
        assert_(y2._mask is m3)
        assert_(allequal(y2.mask, 0))

        y2a = array(x1, mask=m, copy=1)
        assert_(y2a._mask is not m)
        assert_(y2a[2] is masked)
        y2a[2] = 9
        assert_(y2a[2] is not masked)
        assert_(y2a._mask is not m)
        assert_(allequal(y2a.mask, 0))

        y3 = array(x1 * 1.0, mask=m)
        assert_(filled(y3).dtype is (x1 * 1.0).dtype)

        x4 = arange(4)
        x4[2] = masked
        y4 = resize(x4, (8,))
        assert_(eq(concatenate([x4, x4]), y4))
        assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
        y5 = repeat(x4, (2, 2, 2, 2), axis=0)
        assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
        y6 = repeat(x4, 2, axis=0)
        assert_(eq(y5, y6))
Example #7
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        n = [0, 0, 1, 0, 0]
        m = make_mask(n)
        m2 = make_mask(m)
        assert_(m is m2)
        m3 = make_mask(m, copy=True)
        assert_(m is not m3)

        x1 = np.arange(5)
        y1 = array(x1, mask=m)
        assert_(y1._data is not x1)
        assert_(allequal(x1, y1._data))
        assert_(y1._mask is m)

        y1a = array(y1, copy=0)
        # For copy=False, one might expect that the array would just
        # passed on, i.e., that it would be "is" instead of "==".
        # See gh-4043 for discussion.
        assert_(y1a._mask.__array_interface__ ==
                y1._mask.__array_interface__)

        y2 = array(x1, mask=m3, copy=0)
        assert_(y2._mask is m3)
        assert_(y2[2] is masked)
        y2[2] = 9
        assert_(y2[2] is not masked)
        assert_(y2._mask is m3)
        assert_(allequal(y2.mask, 0))

        y2a = array(x1, mask=m, copy=1)
        assert_(y2a._mask is not m)
        assert_(y2a[2] is masked)
        y2a[2] = 9
        assert_(y2a[2] is not masked)
        assert_(y2a._mask is not m)
        assert_(allequal(y2a.mask, 0))

        y3 = array(x1 * 1.0, mask=m)
        assert_(filled(y3).dtype is (x1 * 1.0).dtype)

        x4 = arange(4)
        x4[2] = masked
        y4 = resize(x4, (8,))
        assert_(eq(concatenate([x4, x4]), y4))
        assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
        y5 = repeat(x4, (2, 2, 2, 2), axis=0)
        assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
        y6 = repeat(x4, 2, axis=0)
        assert_(eq(y5, y6))
Example #8
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        n = [0, 0, 1, 0, 0]
        m = make_mask(n)
        m2 = make_mask(m)
        assert_(m is m2)
        m3 = make_mask(m, copy=1)
        assert_(m is not m3)

        x1 = np.arange(5)
        y1 = array(x1, mask=m)
        assert_(y1._data is not x1)
        assert_(allequal(x1, y1._data))
        assert_(y1.mask is m)

        y1a = array(y1, copy=0)
        assert_(y1a.mask is y1.mask)

        y2 = array(x1, mask=m3, copy=0)
        assert_(y2.mask is m3)
        assert_(y2[2] is masked)
        y2[2] = 9
        assert_(y2[2] is not masked)
        assert_(y2.mask is m3)
        assert_(allequal(y2.mask, 0))

        y2a = array(x1, mask=m, copy=1)
        assert_(y2a.mask is not m)
        assert_(y2a[2] is masked)
        y2a[2] = 9
        assert_(y2a[2] is not masked)
        assert_(y2a.mask is not m)
        assert_(allequal(y2a.mask, 0))

        y3 = array(x1 * 1.0, mask=m)
        assert_(filled(y3).dtype is (x1 * 1.0).dtype)

        x4 = arange(4)
        x4[2] = masked
        y4 = resize(x4, (8,))
        assert_(eq(concatenate([x4, x4]), y4))
        assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
        y5 = repeat(x4, (2, 2, 2, 2), axis=0)
        assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
        y6 = repeat(x4, 2, axis=0)
        assert_(eq(y5, y6))
Example #9
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        n = [0, 0, 1, 0, 0]
        m = make_mask(n)
        m2 = make_mask(m)
        assert_(m is m2)
        m3 = make_mask(m, copy=1)
        assert_(m is not m3)

        x1 = np.arange(5)
        y1 = array(x1, mask=m)
        assert_(y1._data is not x1)
        assert_(allequal(x1, y1._data))
        assert_(y1.mask is m)

        y1a = array(y1, copy=0)
        assert_(y1a.mask is y1.mask)

        y2 = array(x1, mask=m3, copy=0)
        assert_(y2.mask is m3)
        assert_(y2[2] is masked)
        y2[2] = 9
        assert_(y2[2] is not masked)
        assert_(y2.mask is m3)
        assert_(allequal(y2.mask, 0))

        y2a = array(x1, mask=m, copy=1)
        assert_(y2a.mask is not m)
        assert_(y2a[2] is masked)
        y2a[2] = 9
        assert_(y2a[2] is not masked)
        assert_(y2a.mask is not m)
        assert_(allequal(y2a.mask, 0))

        y3 = array(x1 * 1.0, mask=m)
        assert_(filled(y3).dtype is (x1 * 1.0).dtype)

        x4 = arange(4)
        x4[2] = masked
        y4 = resize(x4, (8, ))
        assert_(eq(concatenate([x4, x4]), y4))
        assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
        y5 = repeat(x4, (2, 2, 2, 2), axis=0)
        assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
        y6 = repeat(x4, 2, axis=0)
        assert_(eq(y5, y6))
Example #10
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        with suppress_warnings() as sup:
            sup.filter(
                np.ma.core.MaskedArrayFutureWarning,
                "setting an item on a masked array which has a "
                "shared mask will not copy")

            n = [0, 0, 1, 0, 0]
            m = make_mask(n)
            m2 = make_mask(m)
            self.assertTrue(m is m2)
            m3 = make_mask(m, copy=1)
            self.assertTrue(m is not m3)

            x1 = np.arange(5)
            y1 = array(x1, mask=m)
            self.assertTrue(y1._data is not x1)
            self.assertTrue(allequal(x1, y1._data))
            self.assertTrue(y1.mask is m)

            y1a = array(y1, copy=0)
            self.assertTrue(y1a.mask is y1.mask)

            y2 = array(x1, mask=m, copy=0)
            self.assertTrue(y2.mask is m)
            self.assertTrue(y2[2] is masked)
            y2[2] = 9
            self.assertTrue(y2[2] is not masked)
            self.assertTrue(y2.mask is not m)
            self.assertTrue(allequal(y2.mask, 0))

            y3 = array(x1 * 1.0, mask=m)
            self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)

            x4 = arange(4)
            x4[2] = masked
            y4 = resize(x4, (8,))
            self.assertTrue(eq(concatenate([x4, x4]), y4))
            self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
            y5 = repeat(x4, (2, 2, 2, 2), axis=0)
            self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
            y6 = repeat(x4, 2, axis=0)
            self.assertTrue(eq(y5, y6))
Example #11
0
def standardize(data,weights,mode=None):
    """
    Standardize data Xnew = (X - mean) / std.
    mode = 'col': use column-wise (time) means and stds.
    mode = 'row': use row-wise (space) means and stds.
    Otherwise use total space-time mean and std of data.
    Assumes data is masked array with shape [ntime,nspace] and
    weights is array with shape [nspace,]

    NOTE on standardization:

    In a temporal EOF, time is your dependent variable. Therefore,
    standardization of your [ntime X nspace] data matrix, should be
    done across space (row-wise): for each time (row) subtract the
    spatial (row-wise) mean and divide it by the spatial (row-wise)
    std. [ntime X ntime] covariance matrix = covariance between time
    slices.

    Conversely, in a spatial EOF, space is your dependent variable,
    and standardization of your [ntime X nspace] data matrix should be
    done across time (column-wise): for each point in space (column)
    subtract the temporal (column-wise) mean and divide it by the
    temporal (column-wise) std. [nspace X nspace] covariance matrix =
    covariance between spatial fields.

    Ivan Lima - Thu Mar 17 16:11:56 EDT 2011

    """
    wght = MA.resize(weights,data.shape)
    if mode == 'row':   # space
        mean = MA.average(data,weights=wght,axis=1)
        std  = MA.sqrt(MA.average((data-mean[:,N.newaxis])**2,weights=wght,
            axis=1))
        norm_data = ((data-mean[:,N.newaxis])/std[:,N.newaxis])
    elif mode == 'col': # time
        mean      = MA.average(data,weights=wght,axis=0)
        std       = MA.sqrt(MA.average((data-mean)**2,weights=wght,axis=0))
        norm_data = (data - mean) / std
    else:               # total space-time
        mean      = MA.average(data,weights=wght)
        std       = MA.sqrt(MA.average((data-mean)**2,weights=wght))
        norm_data = (data - mean) / std

    return norm_data
Example #12
0
    def test_testCopySize(self):
        # Tests of some subtle points of copying and sizing.
        with suppress_warnings() as sup:
            sup.filter(
                np.ma.core.MaskedArrayFutureWarning,
                "setting an item on a masked array which has a "
                "shared mask will not copy")

            n = [0, 0, 1, 0, 0]
            m = make_mask(n)
            m2 = make_mask(m)
            self.assertTrue(m is m2)
            m3 = make_mask(m, copy=1)
            self.assertTrue(m is not m3)

            x1 = np.arange(5)
            y1 = array(x1, mask=m)
            self.assertTrue(y1._data is not x1)
            self.assertTrue(allequal(x1, y1._data))
            self.assertTrue(y1.mask is m)

            y1a = array(y1, copy=0)
            self.assertTrue(y1a.mask is y1.mask)

            y2 = array(x1, mask=m, copy=0)
            self.assertTrue(y2.mask is m)
            self.assertTrue(y2[2] is masked)
            y2[2] = 9
            self.assertTrue(y2[2] is not masked)
            self.assertTrue(y2.mask is not m)
            self.assertTrue(allequal(y2.mask, 0))

            y3 = array(x1 * 1.0, mask=m)
            self.assertTrue(filled(y3).dtype is (x1 * 1.0).dtype)

            x4 = arange(4)
            x4[2] = masked
            y4 = resize(x4, (8,))
            self.assertTrue(eq(concatenate([x4, x4]), y4))
            self.assertTrue(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
            y5 = repeat(x4, (2, 2, 2, 2), axis=0)
            self.assertTrue(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
            y6 = repeat(x4, 2, axis=0)
            self.assertTrue(eq(y5, y6))
Example #13
0
    def __optimize(self, arr, dims, cross):
        """ arr is a numpy array, dims is an ordered dictionary of lists,
            cross is a dictionary of (variable, values)
        """
        arr_out, dims_out = self.__condition(arr, dims, cross)

        if not cross: return arr.copy(), dims.copy()

        var    = cross.keys()[0]
        varidx = dims_out.keys().index(var)

        if self.metric in ['tscorr', 'hitrate']: # TODO: make more generic
            arr_out = arr_out.max(axis = varidx) # maximize over variable
        else:
            arr_out = arr_out.min(axis = varidx) # minimize over variable

        arr_out = resize(arr_out, arr_out.shape + (1,))
        dims_out[var] = ['opt']

        return arr_out, dims_out
Example #14
0
    def __optimize(self, arr, dims, cross):
        """ arr is a numpy array, dims is an ordered dictionary of lists,
            cross is a dictionary of (variable, values)
        """
        arr_out, dims_out = self.__condition(arr, dims, cross)

        if not cross: return arr.copy(), dims.copy()

        var = cross.keys()[0]
        varidx = dims_out.keys().index(var)

        if self.metric in ['tscorr', 'hitrate']:  # TODO: make more generic
            arr_out = arr_out.max(axis=varidx)  # maximize over variable
        else:
            arr_out = arr_out.min(axis=varidx)  # minimize over variable

        arr_out = resize(arr_out, arr_out.shape + (1, ))
        dims_out[var] = ['opt']

        return arr_out, dims_out
Example #15
0
    def read_values(self, fieldname, slices=None):
        """Read the data of a field.

        Args:
            fieldname (str): name of the field which to read the data from

            slices (list of slice, optional): list of slices for the field if
                subsetting is requested. A slice must then be provided for each
                field dimension. The slices are relative to the opened view
                (see :func:open) if a view was set when opening the file.

        Return:
            MaskedArray: array of data read. Array type is the same as the
                storage type.
        """
        native_name = self.__get_native_fieldname(fieldname)
        if fieldname == 'time':
            if slices is not None:
                tslices = [slices[0]]
            else:
                tslices = slices
            time = self.__time_handler.read_values('time_stamp',
                                                   slices=tslices)
            # reshape as a 2D field
            rows = self.get_dimsize('row')
            cols = self.get_dimsize('cell')
            if slices is None:
                shape = (cols, rows)
            else:
                newslices = self._fill_slices(slices, (rows, cols))
                shape = (newslices[1].stop - newslices[1].start,
                         newslices[0].stop - newslices[0].start)
            time = ma.resize(time, shape).transpose()
            return time
        elif fieldname in ['lat', 'lon']:
            return self.__coord_handler.read_values(native_name,
                                                    slices)
        else:
            return self.__fieldlocator[native_name].read_values(native_name,
                                                                slices)
Example #16
0
    def areas(self, var, agg, lats, weights=None, calcarea=False, mask=None):
        nt, nlats, nlons = var.shape

        if weights is None:  # weights
            weights = ones((nt, nlats, nlons))
        elif len(weights.shape) == 2:
            weights = ma.resize(weights, (nt, nlats, nlons))

        if calcarea:  # area
            area = self.area(lats, nlats, nlons)
        else:
            area = ones((nlats, nlons))

        aggvals = self.__uniquevals(agg)
        sz = len(aggvals)

        varmask = logical_not(var.mask) if ma.isMaskedArray(var) else ones(
            var.shape)  # use variable mask
        if not mask is None:
            varmask = logical_and(varmask, mask)  # additional mask

        areas = ma.masked_array(zeros((sz, nt)), mask=ones((sz, nt)))
        vartmp = zeros((nt, nlats, nlons))
        for i in range(len(aggvals)):
            warea = weights * area * (agg == aggvals[i])
            tidx, latidx, lonidx = ma.where(warea)

            vartmp[:] = 0
            vartmp[tidx, latidx, lonidx] = warea[tidx, latidx, lonidx] * \
                                           varmask[tidx, latidx, lonidx]
            areas[i] = vartmp.sum(axis=2).sum(axis=1)

        areas = ma.masked_where(areas == 0, areas)
        areas.mask = resize(areas.mask,
                            areas.shape)  # ensure mask is same size as data

        return areas
Example #17
0
def temporal_eof_w(data,weights):
    """
    Compute EOFs in time and Principal Components in space.
    Covariance matrix is computed using weights (area).
    Assumes input data is masked array with shape [ntime,nspace]
    and weights has shape [nspace,].
    """
    wght = MA.filled(MA.array(MA.resize(weights,data.shape),mask=data.mask),0)
    mat1 = N.matrix(MA.filled(data*wght,0))
    mat2 = N.matrix(MA.filled(data,0))
    # compute covariance matrix
    covm = (mat1 * N.transpose(mat2)) / wght[0,...].sum()
    # compute EOFS
    eigval, eigvec = LA.eig(covm)
    # sort by in decreasing order of eigenvalues
    inds = N.argsort(eigval)[::-1]
    eigvec = eigvec[:,inds]
    eigval = eigval[inds]
    # compute percentage of explained variances by each EOF mode
    var = eigval.real / N.sum(eigval.real) * 100.
    # compute principal components
    pc = N.transpose(mat2) * eigvec
    # eigvec and pc are matrices, NOT numpy arrays!
    return eigvec, pc, var
Example #18
0
    def run(self, probability, resources=None):
        """ Compute choices according to given probability -- Constrain Location Choice procedure.
        'probability' is a 2D numpy array (nobservation x nequations).
        The returned value is a 1D array of choice indices [0, nequations-1] of the length nobservations).
        The argument 'resources' must contain an entry 'capacity'. It is 1D array whose number of elements
        corresponds to the number of choices. 
        Optional entry 'index' (1D or 2D array) gives indices of the choices.
        """
        if probability.ndim < 2:
            raise StandardError, "Argument 'probability' must be a 2D numpy array."
            
        resources.check_obligatory_keys(["capacity"])
        supply = resources["capacity"]
        if not isinstance(supply, ndarray):
            supply = array(supply)
        nsupply = supply.size
#        logger.log_status('Supply.shape:',supply.shape)
#        logger.log_status('supply.sum:', supply.sum())
        max_iter = resources.get("max_iterations", None)
        if max_iter == None:
            max_iter = 100 # default
        
        
        index = resources.get("index", None)
        if index == None:
            index = arange(nsupply)
#        logger.log_status('index.shape:',index.shape)

        neqs = probability.shape[1]
        nobs = probability.shape[0]

        if supply.sum < nobs:
            raise StandardError, "Aggregate Supply Must be Greater than Aggregate Demand."


        if index.ndim <= 1:
            index = repeat(reshape(index, (1,index.shape[0])), nobs)        
        resources.merge({"index":index})
#        logger.log_status('index.shape:',index.shape)


        flat_index = index.ravel()
        unique_index = unique(flat_index)
#        logger.log_status('flat_index.shape:',flat_index.shape)
#        logger.log_status('unique_index.shape',unique_index.shape)
#        logger.log_status(unique_index)
        l = flat_index + 1
        demand = array(ndimage_sum(probability.ravel(), labels=l, index=arange(nsupply)+1))
#        logger.log_status('demand.shape:',demand.shape)
#        logger.log_status('demand.sum:', demand.sum())
#        logger.log_status('probability.sum:',probability.sum())
        #initial calculations
        
        sdratio = ma.filled(supply/ma.masked_where(demand==0, demand),1.0)
#        logger.log_status('sdratio.shape:',sdratio.shape)
        constrained_locations = where(sdratio<1,1,0)
        unconstrained_locations = 1-constrained_locations
        
        # Compute the iteration zero omegas
        
        sdratio_matrix = sdratio[index]
        constrained_locations_matrix = constrained_locations[index]
        unconstrained_locations_matrix = unconstrained_locations[index]
        prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)
        omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/ \
                ma.masked_where(prob_sum ==0, prob_sum)
        pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix
        average_omega = ma.filled((ma.resize(omega,(nobs,1))*probability).sum(axis=0)/\
                      ma.masked_where(demand[index]==0, demand[index]),0.0)
        number_constrained_locations=zeros((max_iter,))
            # Iterative Constrained Location Procedure
        for i in range(max_iter):
            logger.log_status('Iteration ',i+1, 'Average Omega:',average_omega[0:4])
            # Recompute the constrained locations using iteration zero value of Omega
            constrained_locations_matrix = where(supply[index]<(average_omega*demand[index]),1,0)
            unconstrained_locations_matrix = 1-constrained_locations_matrix
            # Update values of Omega using new Constrained Locations
            prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)
            omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/\
                    ma.masked_where(prob_sum ==0, prob_sum)
#            pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix       
#            logger.log_status('sdratio_matrix',sdratio_matrix.shape)
#            logger.log_status('constrained_locations_matrix',constrained_locations_matrix.shape)
#            logger.log_status('omega',omega.shape)
#            logger.log_status('unconstrained_locations_matrix',unconstrained_locations_matrix.shape)
#            pi_ta = (sdratio_matrix*constrained_locations_matrix)
#            logger.log_status('pi+ta',pi_ta.shape)
#            pi_tb = ma.resize(omega,(nobs,neqs))*unconstrained_locations_matrix
#            logger.log_status('pi_tb',pi_tb.shape)
            pi_t = (sdratio_matrix*constrained_locations_matrix)+ma.resize(omega,(nobs,neqs))*unconstrained_locations_matrix
#            logger.log_status('pi_tilde:',pi_t.shape)
            # Update the values of average Omegas per alternative
            average_omega = ma.filled((ma.resize(omega,(nobs,1))*probability).sum(axis=0)/
                          ma.masked_where(demand[index]==0, demand[index]),0.0)
            number_constrained_locations[i]= constrained_locations_matrix.sum()
            # Test for Convergence and if Reached, Exit
            if i > 0:
                if number_constrained_locations[i] == number_constrained_locations[i-1]:
                    break
          
        # update probabilities
#        new_probability = ma.filled(probability*ma.resize(omega,(nobs,1))*pi,0.0)
        new_probability = ma.filled(probability*pi_t,0.0)
        choices = lottery_choices().run(new_probability, resources)
        return choices
        
Example #19
0
    def run(self, probability, resources=None):
        """ Compute choices according to given probability -- Constrain Location Choice procedure.
        'probability' is a 2D numpy array (nobservation x nequations).
        The returned value is a 1D array of choice indices [0, nequations-1] of the length nobservations).
        The argument 'resources' must contain an entry 'capacity'. It is 1D array whose number of elements
        corresponds to the number of choices.
        Optional entry 'index' (1D or 2D array) gives indices of the choices.
        """
        if probability.ndim < 2:
            raise StandardError, "Argument 'probability' must be a 2D numpy array."

        resources.check_obligatory_keys(["capacity"])
        supply = resources["capacity"]
        if not isinstance(supply, ndarray):
            supply = array(supply)
        nsupply = supply.size

        max_iter = resources.get("max_iterations", None)
        if max_iter == None:
            max_iter = 100 # default

        index = resources.get("index", None)
        if index == None:
            index = arange(nsupply)

        neqs = probability.shape[1]
        nobs = probability.shape[0]

        if index.ndim <= 1:
            index = repeat(reshape(index, (1,index.shape[0])), nobs)
        resources.merge({"index":index})

        flat_index = index.ravel()
        unique_index = unique(flat_index)
        l = flat_index + 1
        demand = array(ndimage_sum(probability.ravel(), labels=l, index=arange(nsupply)+1))

        #initial calculations
        sdratio = ma.filled(supply/ma.masked_where(demand==0, demand),2.0)
        constrained_locations = logical_and(sdratio<1,demand-supply>0.1).astype("int8")
        unconstrained_locations = 1-constrained_locations
        excess_demand = (demand-supply)*constrained_locations
        global_excess_demand = excess_demand.sum()

        # Compute the iteration zero omegas

        sdratio_matrix = sdratio[index]
        constrained_locations_matrix = constrained_locations[index]
# Would like to include following print statements in debug printing
#        logger.log_status('Total demand:',demand.sum())
#        logger.log_status('Total supply:',supply.sum())
        logger.log_status('Global excess demand:',global_excess_demand)
#        logger.log_status('Constrained locations:',constrained_locations.sum())
        unconstrained_locations_matrix = unconstrained_locations[index]
        prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)

        # The recoding of prob_sum and omega are to handle extreme values of omega and zero divide problems
        # A complete solution involves stratifying the choice set in the initialization to ensure that
        # there are always a mixture of constrained and unconstrained alternatives in each choice set.

        prob_sum = where(prob_sum==0,-1,prob_sum)
        omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/prob_sum
        omega = where(omega>5,5,omega)
        omega = where(omega<.5,5,omega)
        omega = where(prob_sum<0,5,omega)

# Debug print statements
#        logger.log_status('Minimum omega',minimum(omega))
#        logger.log_status('Maximum omega',maximum(omega))
#        logger.log_status('Median omega',median(omega))
#        logger.log_status('Omega < 0',(where(omega<0,1,0)).sum())
#        logger.log_status('Omega < 1',(where(omega<1,1,0)).sum())
#        logger.log_status('Omega > 30',(where(omega>30,1,0)).sum())
#        logger.log_status('Omega > 100',(where(omega>100,1,0)).sum())
#        logger.log_status('Omega histogram:',histogram(omega,0,30,30))
#        logger.log_status('Excess demand max:',maximum(excess_demand))
#        logger.log_status('Excess demand 0-1000:',histogram(excess_demand,0,1000,20))
#        logger.log_status('Excess demand 0-10:',histogram(excess_demand,0,10,20))

        pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix

        omega_prob = ma.filled(ma.resize(omega,(nobs,1))*probability,0.0)
        average_omega_nom = array(ndimage_sum(omega_prob, labels=index+1, index=arange(nsupply)+1))

        average_omega = ma.filled(average_omega_nom/
                      ma.masked_where(demand==0, demand), 0.0)

#        logger.log_status('Total demand:',new_demand.sum())
#        logger.log_status('Excess demand:',excess_demand)
        number_constrained_locations=zeros((max_iter,))
        # Iterative Constrained Location Procedure
        for i in range(max_iter):
            logger.log_status()
            logger.log_status('Constrained location choice iteration ',i+1)
            # Recompute the constrained locations using preceding iteration value of Omega
            constrained_locations = where((average_omega*demand-supply>0.1),1,0)
            unconstrained_locations = 1-constrained_locations
            constrained_locations_matrix = constrained_locations[index]
            unconstrained_locations_matrix = unconstrained_locations[index]
#            logger.log_status('supply.shape,average_omega.shape,demand.shape',supply.shape,average_omega.shape,demand.shape)
#            logger.log_status('constrained_locations_matrix',constrained_locations_matrix)
#            logger.log_status('constrained_locations_matrix.shape',constrained_locations_matrix.shape)
#            logger.log_status('unconstrained_locations_matrix',unconstrained_locations_matrix)
            # Update values of Omega using new Constrained Locations
            prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)
            prob_sum = where(prob_sum==0,-1,prob_sum)
            omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/prob_sum
            omega = where(omega>5,5,omega)
            omega = where(omega<.5,5,omega)
            omega = where(prob_sum<0,5,omega)
            pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix
            # Update the values of average Omegas per alternative
            omega_prob = ma.filled(ma.resize(omega,(nobs,1)), 1.0)*probability
            average_omega_num = array(ndimage_sum(omega_prob, labels=index+1, index=arange(nsupply)+1))

            average_omega = ma.filled(average_omega_num/
                      ma.masked_where(demand==0, demand), 0.0)

            number_constrained_locations[i] = constrained_locations.sum()
            new_probability = ma.filled(probability*ma.resize(omega,(nobs,1))*pi,0.0)
            new_demand = array(ndimage_sum(new_probability.ravel(), labels=l, index=arange(nsupply)+1))
            excess_demand = (new_demand-supply)*constrained_locations
            global_excess_demand = excess_demand.sum()
#            logger.log_status('Total demand:',new_demand.sum())
            logger.log_status('Global excess demand:',global_excess_demand)
#            logger.log_status('Constrained locations:', number_constrained_locations[i])
#            logger.log_status('Minimum omega',minimum(omega))
#            logger.log_status('Maximum omega',maximum(omega))
#            logger.log_status('Median omega',median(omega))
#            logger.log_status('Omega < 0',(where(omega<0,1,0)).sum())
#            logger.log_status('Omega < 1',(where(omega<1,1,0)).sum())
#            logger.log_status('Omega > 30',(where(omega>30,1,0)).sum())
#            logger.log_status('Omega > 100',(where(omega>100,1,0)).sum())
#            logger.log_status('Omega histogram:',histogram(omega,0,30,30))
#            logger.log_status('Excess demand max:',maximum(excess_demand))
#            logger.log_status('Excess demand 0-5:',histogram(excess_demand,0,5,20))
#            logger.log_status('Excess demand 0-1:',histogram(excess_demand,0,1,20))
            # Test for Convergence and if Reached, Exit
            if i > 0:
                if number_constrained_locations[i] == number_constrained_locations[i-1]:
                    logger.log_status()
                    logger.log_status('Constrained choices converged.')
                    break

        # update probabilities
        new_probability = ma.filled(probability*ma.resize(omega,(nobs,1))*pi,0.0)
        choices = lottery_choices().run(new_probability, resources)
        return choices
    def get_average_omega(self, omega, probability, index, nsupply, nobs, demand):
        omega_prob = ma.filled(ma.resize(omega,(nobs,1))*probability,0.0)
        average_omega_nom = array(ndimage_sum(omega_prob, labels=index+1, index=arange(nsupply)+1))

        average_omega = ma.filled(average_omega_nom / ma.masked_where(demand==0, demand), 0.0)
        return average_omega
Example #21
0
import numpy as np
import numpy.ma as ma

a = ma.array([[1, 2], [3, 4]])
a[0, 1] = ma.masked
a
np.resize(a, (3, 3))
ma.resize(a, (3, 3))
a = np.array([[1, 2], [3, 4]])
ma.resize(a, (3, 3))
 def get_pi(self, sdratio_matrix, omega, constrained_locations_matrix, unconstrained_locations_matrix, nobs):
     pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix
     return pi
Example #23
0
    fpu26    = f.variables[variable + '_fpu'][:, :, 0, 0, 0, 0]    # fpu, time, scen, dt, mp, cr
    global26 = f.variables[variable + '_global'][:, :, 0, 0, 0, 0] # global, time, scen, dt, mp, cr

with nc(rcp85file) as f:
    fpu85    = f.variables[variable + '_fpu'][:, :, 0, 0, 0, 0]
    global85 = f.variables[variable + '_global'][:, :, 0, 0, 0, 0]

nt, nf, ng = len(time), len(afpu), len(aglobal)

tidx1, tidx2 = where(time == 1980)[0][0], where(time == 2009)[0][0] + 1

# number of decades
nd = nt / 10

# delta yield
dyfpu26 = reshape(fpu26, (nf, nd, 10)).mean(axis = 2) - resize(fpu26[:, tidx1 : tidx2].mean(axis = 1), (nd, nf)).T
dyfpu85 = reshape(fpu85, (nf, nd, 10)).mean(axis = 2) - resize(fpu85[:, tidx1 : tidx2].mean(axis = 1), (nd, nf)).T

# absolute global yield
global26 = reshape(global26, (ng, nd, 10)).mean(axis = 2)
global85 = reshape(global85, (ng, nd, 10)).mean(axis = 2)

with nc(outfile, 'w') as f:
    f.createDimension('fpu', nf)
    fpuvar = f.createVariable('fpu', 'i4', 'fpu')
    fpuvar[:] = afpu
    fpuvar.units = funits
    fpuvar.long_name = flname

    f.createDimension('global', ng)
    globalvar = f.createVariable('global', 'i4', 'global')
Example #24
0
print 'reading', os.path.basename(infile_first)
fpin = Nio.open_file(infile_first, 'r')
day = fpin.variables['time'][0]
dz = fpin.variables['dz'][:] / 100.  # cm -> m
area = fpin.variables['TAREA'][:] / 1.e-4  # cm2 -> m2
o2 = fpin.variables['O2'][0, ...]
fpin.close()

if POPDIAGPY == 'TRUE':
    yr_first = yroffset + day / 365.
else:
    yr_first = day / 365. - 181 + 1948

# compute ocean volume (m^3)
volume = MA.array(MA.resize(area,o2.shape),mask=o2.mask) * \
        dz[:,N.newaxis,N.newaxis]

# compute volume of ocean where O2 concentration < o2_min
o2_vol_first = N.array([volume[o2 < o2_min].sum() for o2_min in o2_scale])

#------------------------------------------------------------------------------
# read last input file (end of run)

infile_last = file_list[-1]

print 'reading', os.path.basename(infile_last)
fpin = Nio.open_file(infile_last, 'r')
day = fpin.variables['time'][0]
o2 = fpin.variables['O2'][0, ...]
fpin.close()
Example #25
0
                           '_global'][:, :, 0, 0, 0,
                                      0]  # global, time, scen, dt, mp, cr

with nc(rcp85file) as f:
    fpu85 = f.variables[variable + '_fpu'][:, :, 0, 0, 0, 0]
    global85 = f.variables[variable + '_global'][:, :, 0, 0, 0, 0]

nt, nf, ng = len(time), len(afpu), len(aglobal)

tidx1, tidx2 = where(time == 1980)[0][0], where(time == 2009)[0][0] + 1

# number of decades
nd = nt / 10

# delta yield
dyfpu26 = reshape(fpu26, (nf, nd, 10)).mean(axis=2) - resize(
    fpu26[:, tidx1:tidx2].mean(axis=1), (nd, nf)).T
dyfpu85 = reshape(fpu85, (nf, nd, 10)).mean(axis=2) - resize(
    fpu85[:, tidx1:tidx2].mean(axis=1), (nd, nf)).T

# absolute global yield
global26 = reshape(global26, (ng, nd, 10)).mean(axis=2)
global85 = reshape(global85, (ng, nd, 10)).mean(axis=2)

with nc(outfile, 'w') as f:
    f.createDimension('fpu', nf)
    fpuvar = f.createVariable('fpu', 'i4', 'fpu')
    fpuvar[:] = afpu
    fpuvar.units = funits
    fpuvar.long_name = flname

    f.createDimension('global', ng)
    def run(self, probability, resources=None):
        """ Compute choices according to given probability -- Constrain Location Choice procedure.
        'probability' is a 2D numpy array (nobservation x nequations).
        The returned value is a 1D array of choice indices [0, nequations-1] of the length nobservations).
        The argument 'resources' must contain an entry 'capacity'. It is 1D array whose number of elements
        corresponds to the number of choices.
        Optional entry 'index' (1D or 2D array) gives indices of the choices.
        """
        if probability.ndim < 2:
            raise StandardError, "Argument 'probability' must be a 2D numpy array."

        resources.check_obligatory_keys(["capacity"])
        supply = resources["capacity"]
        if not isinstance(supply, ndarray):
            supply = array(supply)
        nsupply = supply.size

        max_iter = resources.get("max_iterations", None)
        if max_iter == None:
            max_iter = 100 # default

        index = resources.get("index", None)
        if index == None:
            index = arange(nsupply)

        neqs = probability.shape[1]
        nobs = probability.shape[0]

        if index.ndim <= 1:
            index = repeat(reshape(index, (1,index.shape[0])), nobs)
        resources.merge({"index":index})

        flat_index = index.ravel()
        unique_index = unique(flat_index)
        l = flat_index + 1
        demand = array(ndimage_sum(probability.ravel(), labels=l, index=arange(nsupply)+1))

        #initial calculations
        sdratio = ma.filled(supply/ma.masked_where(demand==0, demand),2.0)
        constrained_locations = logical_and(sdratio<1,demand-supply>0.1).astype("int8")
        unconstrained_locations = 1-constrained_locations
        excess_demand = (demand-supply)*constrained_locations
        global_excess_demand = excess_demand.sum()

        # Compute the iteration zero omegas

        sdratio_matrix = sdratio[index]
        constrained_locations_matrix = constrained_locations[index]
# Would like to include following print statements in debug printing
#        logger.log_status('Total demand:',demand.sum())
#        logger.log_status('Total supply:',supply.sum())
        logger.log_status('Global excess demand:',global_excess_demand)
#        logger.log_status('Constrained locations:',constrained_locations.sum())
        unconstrained_locations_matrix = unconstrained_locations[index]
        prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)

        # The recoding of prob_sum and omega are to handle extreme values of omega and zero divide problems
        # A complete solution involves stratifying the choice set in the initialization to ensure that
        # there are always a mixture of constrained and unconstrained alternatives in each choice set.

        prob_sum = where(prob_sum==0,-1,prob_sum)
        omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/prob_sum
        omega = where(omega>5,5,omega)
        omega = where(omega<.5,5,omega)
        omega = where(prob_sum<0,5,omega)

# Debug print statements
#        logger.log_status('Minimum omega',minimum(omega))
#        logger.log_status('Maximum omega',maximum(omega))
#        logger.log_status('Median omega',median(omega))
#        logger.log_status('Omega < 0',(where(omega<0,1,0)).sum())
#        logger.log_status('Omega < 1',(where(omega<1,1,0)).sum())
#        logger.log_status('Omega > 30',(where(omega>30,1,0)).sum())
#        logger.log_status('Omega > 100',(where(omega>100,1,0)).sum())
#        logger.log_status('Omega histogram:',histogram(omega,0,30,30))
#        logger.log_status('Excess demand max:',maximum(excess_demand))
#        logger.log_status('Excess demand 0-1000:',histogram(excess_demand,0,1000,20))
#        logger.log_status('Excess demand 0-10:',histogram(excess_demand,0,10,20))

        pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix

        omega_prob = ma.filled(ma.resize(omega,(nobs,1))*probability,0.0)
        average_omega_nom = array(ndimage_sum(omega_prob, labels=index+1, index=arange(nsupply)+1))

        average_omega = ma.filled(average_omega_nom/
                      ma.masked_where(demand==0, demand), 0.0)

#        logger.log_status('Total demand:',new_demand.sum())
#        logger.log_status('Excess demand:',excess_demand)
        number_constrained_locations=zeros((max_iter,))
        # Iterative Constrained Location Procedure
        for i in range(max_iter):
            logger.log_status()
            logger.log_status('Constrained location choice iteration ',i+1)
            # Recompute the constrained locations using preceding iteration value of Omega
            constrained_locations = where((average_omega*demand-supply>0.1),1,0)
            unconstrained_locations = 1-constrained_locations
            constrained_locations_matrix = constrained_locations[index]
            unconstrained_locations_matrix = unconstrained_locations[index]
#            logger.log_status('supply.shape,average_omega.shape,demand.shape',supply.shape,average_omega.shape,demand.shape)
#            logger.log_status('constrained_locations_matrix',constrained_locations_matrix)
#            logger.log_status('constrained_locations_matrix.shape',constrained_locations_matrix.shape)
#            logger.log_status('unconstrained_locations_matrix',unconstrained_locations_matrix)
            # Update values of Omega using new Constrained Locations
            prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)
            prob_sum = where(prob_sum==0,-1,prob_sum)
            omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/prob_sum
            omega = where(omega>5,5,omega)
            omega = where(omega<.5,5,omega)
            omega = where(prob_sum<0,5,omega)
            pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix
            # Update the values of average Omegas per alternative
            omega_prob = ma.filled(ma.resize(omega,(nobs,1)), 1.0)*probability
            average_omega_num = array(ndimage_sum(omega_prob, labels=index+1, index=arange(nsupply)+1))

            average_omega = ma.filled(average_omega_num/
                      ma.masked_where(demand==0, demand), 0.0)

            number_constrained_locations[i] = constrained_locations.sum()
            new_probability = ma.filled(probability*ma.resize(omega,(nobs,1))*pi,0.0)
            new_demand = array(ndimage_sum(new_probability.ravel(), labels=l, index=arange(nsupply)+1))
            excess_demand = (new_demand-supply)*constrained_locations
            global_excess_demand = excess_demand.sum()
#            logger.log_status('Total demand:',new_demand.sum())
            logger.log_status('Global excess demand:',global_excess_demand)
#            logger.log_status('Constrained locations:', number_constrained_locations[i])
#            logger.log_status('Minimum omega',minimum(omega))
#            logger.log_status('Maximum omega',maximum(omega))
#            logger.log_status('Median omega',median(omega))
#            logger.log_status('Omega < 0',(where(omega<0,1,0)).sum())
#            logger.log_status('Omega < 1',(where(omega<1,1,0)).sum())
#            logger.log_status('Omega > 30',(where(omega>30,1,0)).sum())
#            logger.log_status('Omega > 100',(where(omega>100,1,0)).sum())
#            logger.log_status('Omega histogram:',histogram(omega,0,30,30))
#            logger.log_status('Excess demand max:',maximum(excess_demand))
#            logger.log_status('Excess demand 0-5:',histogram(excess_demand,0,5,20))
#            logger.log_status('Excess demand 0-1:',histogram(excess_demand,0,1,20))
            # Test for Convergence and if Reached, Exit
            if i > 0:
                if number_constrained_locations[i] == number_constrained_locations[i-1]:
                    logger.log_status()
                    logger.log_status('Constrained choices converged.')
                    break

        # update probabilities
        new_probability = ma.filled(probability*ma.resize(omega,(nobs,1))*pi,0.0)
        choices = lottery_choices().run(new_probability, resources)
        return choices
Example #27
0
    def sum(self,
            var,
            agg,
            lats,
            weights=None,
            calcarea=False,
            mask=None,
            numchunks=1):
        nt, nlats, nlons = var.shape

        if weights is None:  # weights
            weights = ones((nt, nlats, nlons))
        elif len(weights.shape) == 2:
            weights = ma.resize(weights, (nt, nlats, nlons))

        if calcarea:  # area
            area = self.area(lats, nlats, nlons)
        else:
            area = ones((nlats, nlons))

        aggvals = self.__uniquevals(agg)
        sz = len(aggvals)

        if mask is None:
            varmask = ones((nt, nlats, nlons))  # no additional mask
        else:
            varmask = mask

        chunksize = sz / numchunks  # chunk data to reduce memory usage

        sumv = ma.masked_array(zeros((sz, nt)), mask=ones((sz, nt)))

        maxchunksize = max(chunksize, chunksize + sz - chunksize * numchunks)

        aselect = ma.zeros((maxchunksize, nlats, nlons),
                           dtype=bool)  # preallocate
        vartmp = ma.zeros((maxchunksize, nlats, nlons))

        cnt = 0
        for i in range(numchunks):
            startidx = cnt
            if i != numchunks - 1:
                endidx = cnt + chunksize
            else:
                endidx = sz

            aggvalsc = aggvals[startidx:
                               endidx]  # work on subset of aggregation values
            szc = len(aggvalsc)

            aselect[:] = 0  # clear
            for j in range(szc):
                aselect[j] = (agg == aggvalsc[j])
            ridx, latidx, lonidx = where(aselect)

            vartmp[:] = 0  # clear
            vartmp.mask = ones(vartmp.shape)
            for t in range(nt):
                vartmp[ridx, latidx, lonidx] = var[t, latidx, lonidx]        * \
                                               varmask[t, latidx, lonidx]    * \
                                               weights[t, latidx, lonidx]    * \
                                               area[latidx, lonidx]          * \
                                               aselect[ridx, latidx, lonidx]
                sumv[startidx:endidx, t] = vartmp.sum(axis=2).sum(axis=1)[:szc]

            cnt += chunksize

        return sumv
with rasterio.open("tif-file-path") as src:
    out_image1, out_transform = mask(src, geoms, crop=True)

shapefile = gpd.read_file("sample2path")
# extract the geometry in GeoJSON format
geoms = shapefile.geometry.values  # list of shapely geometries
geometry = geoms[0]  # shapely geometry
# transform to GeJSON format
from shapely.geometry import mapping
geoms = [mapping(geoms[0])]
# extract the raster values values within the polygon
with rasterio.open("tif-file-path") as src:
    out_image3, out_transform = mask(src, geoms, crop=True)

nonwaterarray = np.transpose(
    (ma.resize(out_image3, (4, out_image3.shape[1] *
                            out_image3.shape[2]))))  #converted3darrayto2darray
waterarray = np.transpose(
    (ma.resize(out_image1, (4, out_image1.shape[1] * out_image1.shape[2]))))

nonwaterarray = nonwaterarray[~(nonwaterarray == 0).all(1)]
waterarray = waterarray[~(waterarray == 0).all(1)]

xTrain = np.concatenate((nonwaterarray, waterarray)).astype('f')

yTrain = np.concatenate(
    (0 * np.ones(nonwaterarray.shape[0]), np.ones(waterarray.shape[0])))

src_ds = gdal.Open("tif-file-path")
print("Size of X Pixel: {0}".format(src_ds.RasterXSize))
print("Size of Y Pixel: {0}".format(src_ds.RasterYSize))
rb1 = np.array(src_ds.GetRasterBand(1).ReadAsArray())
Example #29
0
print 'reading', os.path.basename(infile_first)
fpin = Nio.open_file(infile_first, 'r')
day  = fpin.variables['time'][0]
dz   = fpin.variables['dz'][:]     / 100. # cm -> m
area = fpin.variables['TAREA'][:]  / 1.e-4 # cm2 -> m2
o2   = fpin.variables['O2'][0,...]
fpin.close()

if POPDIAGPY == 'TRUE':
  yr_first = yroffset + day/365.
else:
  yr_first = day/365. - 181 + 1948

# compute ocean volume (m^3)
volume = MA.array(MA.resize(area,o2.shape),mask=o2.mask) * \
        dz[:,N.newaxis,N.newaxis]

# compute volume of ocean where O2 concentration < o2_min
o2_vol_first = N.array([volume[o2<o2_min].sum() for o2_min in o2_scale])

#------------------------------------------------------------------------------
# read last input file (end of run)

infile_last = file_list[-1]

print 'reading', os.path.basename(infile_last)
fpin = Nio.open_file(infile_last, 'r')
day  = fpin.variables['time'][0]
o2   = fpin.variables['O2'][0,...]
fpin.close()
Example #30
0
year       = options.year
outputfile = options.outputfile

with nc(inputfile) as f:
    lats, lons = f.variables['lat'][:], f.variables['lon'][:]
    areasum1 = f.variables[variable][:]
    aunits = f.variables[variable].units

areasum1 = masked_where(isnan(areasum1), areasum1)

mk = areasum1.mask
sh = areasum1.shape

if aunits == '%': # convert percent to area
    area = 100 * (111.2 / 2) ** 2 * cos(pi * lats / 180)
    area = resize(area, (len(lons), len(lats))).T
    area = resize(area, sh) # resize
    area = masked_where(mk, area) # mask
    areasum1 = areasum1 * area / 100.

with nc(weightfile) as f:
    areair2 = f.variables['irrigated'][:]
    arearf2 = f.variables['rainfed'][:]

areair2  = resize(areair2, sh)
arearf2  = resize(arearf2, sh)
areasum2 = areair2 + arearf2

areair1 = masked_array(zeros(sh), mask = mk)
arearf1 = masked_array(zeros(sh), mask = mk)
 def get_pi(self, sdratio_matrix, omega, constrained_locations_matrix,
            unconstrained_locations_matrix, nobs):
     pi = sdratio_matrix / ma.resize(omega, (
         nobs,
         1)) * constrained_locations_matrix + unconstrained_locations_matrix
     return pi
Example #32
0
    def run(self, probability, resources=None):
        """ Compute choices according to given probability -- Constrain Location Choice procedure.
        'probability' is a 2D numpy array (nobservation x nequations).
        The returned value is a 1D array of choice indices [0, nequations-1] of the length nobservations).
        The argument 'resources' must contain an entry 'capacity'. It is 1D array whose number of elements
        corresponds to the number of choices. 
        Optional entry 'index' (1D or 2D array) gives indices of the choices.
        """
        if probability.ndim < 2:
            raise StandardError, "Argument 'probability' must be a 2D numpy array."
            
        resources.check_obligatory_keys(["capacity"])
        supply = resources["capacity"]
        if not isinstance(supply, ndarray):
            supply = array(supply)
        nsupply = supply.size
#        logger.log_status('Supply.shape:',supply.shape)
#        logger.log_status('supply.sum:', supply.sum())
        max_iter = resources.get("max_iterations", None)
        if max_iter == None:
            max_iter = 100 # default
        
        
        index = resources.get("index", None)
        if index == None:
            index = arange(nsupply)
#        logger.log_status('index.shape:',index.shape)

        neqs = probability.shape[1]
        nobs = probability.shape[0]

        if supply.sum < nobs:
            raise StandardError, "Aggregate Supply Must be Greater than Aggregate Demand."


        if index.ndim <= 1:
            index = repeat(reshape(index, (1,index.shape[0])), nobs)        
        resources.merge({"index":index})
#        logger.log_status('index.shape:',index.shape)


        flat_index = index.ravel()
        unique_index = unique(flat_index)
#        logger.log_status('flat_index.shape:',flat_index.shape)
#        logger.log_status('unique_index.shape',unique_index.shape)
#        logger.log_status(unique_index)
        l = flat_index + 1
        demand = array(ndimage_sum(probability.ravel(), labels=l, index=arange(nsupply)+1))
#        logger.log_status('demand.shape:',demand.shape)
#        logger.log_status('demand.sum:', demand.sum())
#        logger.log_status('probability.sum:',probability.sum())
        #initial calculations
        
        sdratio = ma.filled(supply/ma.masked_where(demand==0, demand),1.0)
#        logger.log_status('sdratio.shape:',sdratio.shape)
        constrained_locations = where(sdratio<1,1,0)
        unconstrained_locations = 1-constrained_locations
        
        # Compute the iteration zero omegas
        
        sdratio_matrix = sdratio[index]
        constrained_locations_matrix = constrained_locations[index]
        unconstrained_locations_matrix = unconstrained_locations[index]
        prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)
        omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/ \
                ma.masked_where(prob_sum ==0, prob_sum)
        pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix
        average_omega = ma.filled((ma.resize(omega,(nobs,1))*probability).sum(axis=0)/\
                      ma.masked_where(demand[index]==0, demand[index]),0.0)
        number_constrained_locations=zeros((max_iter,))
            # Iterative Constrained Location Procedure
        for i in range(max_iter):
            logger.log_status('Iteration ',i+1, 'Average Omega:',average_omega[0:4])
            # Recompute the constrained locations using iteration zero value of Omega
            constrained_locations_matrix = where(supply[index]<(average_omega*demand[index]),1,0)
            unconstrained_locations_matrix = 1-constrained_locations_matrix
            # Update values of Omega using new Constrained Locations
            prob_sum = 1-(probability*constrained_locations_matrix).sum(axis=1)
            omega = (1-(probability*constrained_locations_matrix*sdratio_matrix).sum(axis=1))/\
                    ma.masked_where(prob_sum ==0, prob_sum)
#            pi = sdratio_matrix / ma.resize(omega, (nobs,1)) * constrained_locations_matrix + unconstrained_locations_matrix       
#            logger.log_status('sdratio_matrix',sdratio_matrix.shape)
#            logger.log_status('constrained_locations_matrix',constrained_locations_matrix.shape)
#            logger.log_status('omega',omega.shape)
#            logger.log_status('unconstrained_locations_matrix',unconstrained_locations_matrix.shape)
#            pi_ta = (sdratio_matrix*constrained_locations_matrix)
#            logger.log_status('pi+ta',pi_ta.shape)
#            pi_tb = ma.resize(omega,(nobs,neqs))*unconstrained_locations_matrix
#            logger.log_status('pi_tb',pi_tb.shape)
            pi_t = (sdratio_matrix*constrained_locations_matrix)+ma.resize(omega,(nobs,neqs))*unconstrained_locations_matrix
#            logger.log_status('pi_tilde:',pi_t.shape)
            # Update the values of average Omegas per alternative
            average_omega = ma.filled((ma.resize(omega,(nobs,1))*probability).sum(axis=0)/
                          ma.masked_where(demand[index]==0, demand[index]),0.0)
            number_constrained_locations[i]= constrained_locations_matrix.sum()
            # Test for Convergence and if Reached, Exit
            if i > 0:
                if number_constrained_locations[i] == number_constrained_locations[i-1]:
                    break
          
        # update probabilities
#        new_probability = ma.filled(probability*ma.resize(omega,(nobs,1))*pi,0.0)
        new_probability = ma.filled(probability*pi_t,0.0)
        choices = lottery_choices().run(new_probability, resources)
        return choices