def PngAlphaToBinarryArray(filename):
        RGBAarray = readPNG(filename)
        print RGBAarray.shape
        alphaarray = RGBAarray[:,:,3]
        masked_alphaarray = ma.masked_greater_equal(alphaarray,50)
        bmask = masked_alphaarray.mask
        return bmask
示例#2
0
    def __low_calc(self, img, gauss, threshold_value):
        """ Lower threshold calculations for hysteresis detection functions.

        """
        mask_img = ma.masked_greater_equal(img, threshold_value)

        # # fixed-value masked image saving, for debuging only
        # plt.figure()
        # ax0 = plt.subplot()
        # img0 = ax0.imshow(mask_img)
        # plt.savefig(f'mask_{int(threshold_value)}.png')

        low = self.low_init
        diff = np.size(img)

        while diff > self.mask_diff:
            mask_hyst = filters.apply_hysteresis_threshold(
                gauss, low=low * np.max(gauss), high=self.high * np.max(gauss))
            diff = np.sum(ma.masked_where(~mask_hyst, mask_img) > 0)
            if all([diff < self.mask_diff, low == self.low_init]):
                logging.fatal('Initial lower threshold is too low!')
                break
            low += 0.01
            if low >= self.high:
                logging.fatal('LOW=HIGH, thresholding failed!')
                break
        logging.debug(f'Lower threshold {round(low, 2)}')

        # # final masks difference, for debuging only
        # plt.figure()
        # ax0 = plt.subplot()
        # img0 = ax0.imshow(ma.masked_where(~mask_hyst, mask_img))
        # plt.savefig(f'mask_low_{int(threshold_value)}.png')
        return low
    def split(self, split_point, ch_to_split):
        img1 = np.ma.array(self.img, mask=False, fill_value=0)
        img2 = np.ma.array(self.img, mask=False, fill_value=0)

        ch_to_split = int(ch_to_split)
        img1[:, :, ch_to_split] = ma.masked_less(img1[:, :, ch_to_split],
                                                 split_point)
        if ch_to_split == 0:
            img1[ma.getmask(img1[:, :, ch_to_split]), 1] = ma.masked
            img1[ma.getmask(img1[:, :, ch_to_split]), 2] = ma.masked
        elif ch_to_split == 1:
            img1[ma.getmask(img1[:, :, ch_to_split]), 0] = ma.masked
            img1[ma.getmask(img1[:, :, ch_to_split]), 2] = ma.masked
        else:
            img1[ma.getmask(img1[:, :, ch_to_split]), 1] = ma.masked
            img1[ma.getmask(img1[:, :, ch_to_split]), 0] = ma.masked

        img2[:, :,
             ch_to_split] = ma.masked_greater_equal(img2[:, :, ch_to_split],
                                                    split_point)
        if ch_to_split == 0:
            img2[ma.getmask(img2[:, :, ch_to_split]), 1] = ma.masked
            img2[ma.getmask(img2[:, :, ch_to_split]), 2] = ma.masked
        elif ch_to_split == 1:
            img2[ma.getmask(img2[:, :, ch_to_split]), 0] = ma.masked
            img2[ma.getmask(img2[:, :, ch_to_split]), 2] = ma.masked
        else:
            img2[ma.getmask(img2[:, :, ch_to_split]), 1] = ma.masked
            img2[ma.getmask(img2[:, :, ch_to_split]), 0] = ma.masked

        #print ('Image 1 after : ' + str(img1))
        #print ('Image 2 after : ' + str(img2))

        return img1, img2
def PngToBinarryArray(filename):
	RGBAarray = readPNG(filename)
	print RGBAarray.shape
	alphaarray = RGBAarray[:,:]
	masked_alphaarray = ma.masked_greater_equal(alphaarray,50)
	bmask = masked_alphaarray.filled(1)
	return alphaarray
示例#5
0
def PngAlphaToBinarryArray(filename):
    RGBAarray = readPNG(filename)
    print RGBAarray.shape
    alphaarray = RGBAarray[:, :, 3]
    masked_alphaarray = ma.masked_greater_equal(alphaarray, 50)
    bmask = masked_alphaarray.mask
    return bmask
    def _check_geophysical(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """
        spec = product_type.get_geophysical_check_spec()
        if len(spec) != 0:
            a = ProductVerifier.__get_data(dataset, spec[0], scale=True)
            b = ProductVerifier.__get_data(dataset, spec[1], scale=True)
            d = a - b
            # count pixels with differences less than the minimum
            suspicious_data = ma.masked_greater_equal(d, spec[2])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_minimum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_minimum_check_failed_for'] = filename
            # count pixels with differences greater than the maximum
            suspicious_data = ma.masked_less_equal(d, spec[3])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_maximum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_maximum_check_failed_for'] = filename
示例#7
0
    def _check_geophysical(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """
        spec = product_type.get_geophysical_check_spec()
        if len(spec) != 0:
            a = ProductVerifier.__get_data(dataset, spec[0], scale=True)
            b = ProductVerifier.__get_data(dataset, spec[1], scale=True)
            d = a - b
            # count pixels with differences less than the minimum
            suspicious_data = ma.masked_greater_equal(d, spec[2])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_minimum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_minimum_check_failed_for'] = filename
            # count pixels with differences greater than the maximum
            suspicious_data = ma.masked_less_equal(d, spec[3])
            suspicious_data_count = suspicious_data.count()
            self.report['geophysical_maximum_check'] = suspicious_data_count
            if suspicious_data_count > 0:
                filename = os.path.basename(self.source_pathname)
                self.report['geophysical_maximum_check_failed_for'] = filename
示例#8
0
    def _getPatternTStrip(self,tsStart,tsStop):
        ntpat=800
        t0=np.mod(tsStart,2.0*np.pi)
        t1=np.mod(tsStop,2.0*np.pi)

        tpat=np.zeros((2,ntpat))
        tpat[0]=np.linspace(0,2.0*np.pi,tpat.shape[1])

        m1=ma.masked_greater_equal(tpat[0],t0).mask
        m2=ma.masked_less_equal(tpat[0],t1).mask

        mm=m1*m2
        if tsStop > 2.0*np.pi or tsStart<0.0:
            mm=m1+m2

#        tpat[1]=mm*(self.AT*np.random.random(ntpat))   #((self.AT)/2.0 + 0.5*self.AT*np.random.random(ntpat))

        st=self.AT*np.ones(ntpat)
        zeroKeys=np.random.randint(0,ntpat,70)
        st[zeroKeys]=0.3*self.AT
        tpat[1]=st*mm

        tpi=np.hstack((tpat,tpat,tpat))
        tpi[0,0:tpat.shape[1]]=tpat[0]-2*np.pi
        tpi[0,2*tpat.shape[1]:]=tpat[0]+2*np.pi
        return tpi
示例#9
0
    def _getPatternTStrip(self, tsStart, tsStop):
        ntpat = 800
        t0 = np.mod(tsStart, 2.0 * np.pi)
        t1 = np.mod(tsStop, 2.0 * np.pi)

        tpat = np.zeros((2, ntpat))
        tpat[0] = np.linspace(0, 2.0 * np.pi, tpat.shape[1])

        m1 = ma.masked_greater_equal(tpat[0], t0).mask
        m2 = ma.masked_less_equal(tpat[0], t1).mask

        mm = m1 * m2
        if tsStop > 2.0 * np.pi or tsStart < 0.0:
            mm = m1 + m2


#        tpat[1]=mm*(self.AT*np.random.random(ntpat))   #((self.AT)/2.0 + 0.5*self.AT*np.random.random(ntpat))

        st = self.AT * np.ones(ntpat)
        zeroKeys = np.random.randint(0, ntpat, 70)
        st[zeroKeys] = 0.3 * self.AT
        tpat[1] = st * mm

        tpi = np.hstack((tpat, tpat, tpat))
        tpi[0, 0:tpat.shape[1]] = tpat[0] - 2 * np.pi
        tpi[0, 2 * tpat.shape[1]:] = tpat[0] + 2 * np.pi
        return tpi
def PngToBinarryArray(filename):
    RGBAarray = readPNG(filename)
    print RGBAarray.shape
    alphaarray = RGBAarray[:, :]
    masked_alphaarray = ma.masked_greater_equal(alphaarray, 50)
    bmask = masked_alphaarray.filled(1)
    return alphaarray
示例#11
0
def replace_invalid_crude(x, value_invalid):

    x_masked = ma.masked_equal(
        x, value_invalid
    )  # a mask_matrix with the mask indicating where = -999, if = True
    if_clean = np.sum(x_masked.mask, axis=0)
    if_clean = ma.masked_greater_equal(
        if_clean, 1
    ).mask  # to indicate that which column is clean. If = 1 then is not clean
    x_clean = (
        x_masked.T[~if_clean]
    ).T  # The column which don't contain the -999, extracted from the x
    x_dirty = (
        x_masked.T[if_clean]
    ).T  # The column which do contain the -999, extracted from the x
    index_dirty = np.where(
        if_clean == True)  # the column index in x that do contain -999
    for i in range(x_dirty.shape[1]):
        tx = build_poly(
            x_clean[~x_dirty.mask[:, i]].data, 3
        )  # from the correlated x clean matrix, extract the row that correspond the invalid in the dirty x, and get ready for the regression
        tx_to_estimate = build_poly(x_clean[x_dirty.mask[:, i]].data, 3)
        y = x_dirty[:, i][
            ~x_dirty.
            mask[:, i]].data  # in the target dirty x, extract the row invalid.
        [mse, w] = least_squares(
            y, tx
        )  # Regression between the valid dirty x and the valid clean x. Get the model w
        y_estimated = np.dot(tx_to_estimate, w)
        x_masked[:, index_dirty[0][i]][x_dirty.mask[:, i]] = y_estimated
    return x_masked.data
示例#12
0
def _attvalues(attribute, stacked):
    """Attribute values computed in numpy.ma stack."""
    if attribute == "max":
        attvalues = ma.max(stacked, axis=2)
    elif attribute == "min":
        attvalues = ma.min(stacked, axis=2)
    elif attribute == "rms":
        attvalues = np.sqrt(ma.mean(np.square(stacked), axis=2))
    elif attribute == "var":
        attvalues = ma.var(stacked, axis=2)
    elif attribute == "mean":
        attvalues = ma.mean(stacked, axis=2)
    elif attribute == "maxpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.max(stacked, axis=2)
    elif attribute == "maxneg":  # ~ minimum of negative values?
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.min(stacked, axis=2)
    elif attribute == "maxabs":
        attvalues = ma.max(abs(stacked), axis=2)
    elif attribute == "sumpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.sum(stacked, axis=2)
    elif attribute == "sumneg":
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.sum(stacked, axis=2)
    elif attribute == "sumabs":
        attvalues = ma.sum(abs(stacked), axis=2)
    elif attribute == "meanabs":
        attvalues = ma.mean(abs(stacked), axis=2)
    elif attribute == "meanpos":
        stacked = ma.masked_less(stacked, 0.0, copy=True)
        attvalues = ma.mean(stacked, axis=2)
    elif attribute == "meanneg":
        stacked = ma.masked_greater_equal(stacked, 0.0, copy=True)
        attvalues = ma.mean(stacked, axis=2)
    else:
        etxt = "Invalid attribute applied: {}".format(attribute)
        raise ValueError(etxt)

    if not attvalues.flags["C_CONTIGUOUS"]:
        mask = ma.getmaskarray(attvalues)
        mask = np.asanyarray(mask, order="C")
        attvalues = np.asanyarray(attvalues, order="C")
        attvalues = ma.array(attvalues, mask=mask, order="C")

    return attvalues
示例#13
0
def get_mask_for_unphysical_using_cutoff(U, cutoff=None, mode='less'):
    if mode == 'less' or mode == 'l':
        U_masked = ma.masked_less(U, cutoff)
    elif mode == 'lesseqal' or mode == 'leq':
        U_masked = ma.masked_less_equal(U, cutoff)
    elif mode == 'greater' or mode == 'g':
        U_masked = ma.masked_greater(U, cutoff)
    elif mode == 'greaterequal' or mode == 'geq':
        U_masked = ma.masked_greater_equal(U, cutoff)
    return U_masked.mask
示例#14
0
def get_mask_for_unphysical_using_median(U, cutoffratio=0.4, mode='less'):
    median = np.median(U)
    if mode == 'less' or mode == 'l':
        U_masked = ma.masked_less(U, median * cutoffratio)
    elif mode == 'lesseqal' or mode == 'leq':
        U_masked = ma.masked_less_equal(U, median * cutoffratio)
    elif mode == 'greater' or mode == 'g':
        U_masked = ma.masked_greater(U, median * cutoffratio)
    elif mode == 'greaterequal' or mode == 'geq':
        U_masked = ma.masked_greater_equal(U, median * cutoffratio)
    return U_masked.mask
示例#15
0
def mask_outside_disk(inst_map):
    # Find coordinates and radius
    hpc_coords = all_coordinates_from_map(inst_map)
    r = np.sqrt(hpc_coords.Tx**2 + hpc_coords.Ty**2) / inst_map.rsun_obs

    # Mask everything outside of the solar disk
    mask = ma.masked_greater_equal(r, 1)
    ma.set_fill_value(mask, np.nan)
    where_disk = np.where(mask.mask == 1)

    return where_disk
示例#16
0
    def _check_variable_limits(self, dataset):
        """

        :type dataset: Dataset
        """
        for variable_name in dataset.variables:
            variable = dataset.variables[variable_name]
            self.report[variable_name + '.count.total'] = variable.size

            data = ProductVerifier.__get_masked_data(variable)
            self.report[variable_name + '.count.valid'] = data.count()

            try:
                valid_max = variable.getncattr('valid_max')
                invalid_data = ma.masked_less_equal(data, valid_max)
                invalid_data_count = invalid_data.count()
                if invalid_data_count == 0:
                    self.report[variable_name +
                                '.valid_max_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name +
                                '.valid_max_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name +
                                '.valid_max_check_failed_for'] = filename
            except AttributeError:
                pass
            try:
                valid_min = variable.getncattr('valid_min')
                invalid_data = ma.masked_greater_equal(data, valid_min)
                invalid_data_count = invalid_data.count()
                self.report[variable_name +
                            '.valid_min_check'] = invalid_data_count
                if invalid_data_count == 0:
                    self.report[variable_name +
                                '.valid_min_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name +
                                '.valid_min_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name +
                                '.valid_min_check_failed_for'] = filename
            except AttributeError:
                pass
示例#17
0
def contourValuesAboveEqual(lons, lats, rawdata, contourAboveValue):
    """
    Contours a grid for lats and lons passed in having values above the given value or equal

    Args: 
            lons : a grid of the longitudes
            lats : a grid of the latitudes
            rawdata : a grid of values
            contourAboveValue : the value of which to contour anything above or equal
            Note : the grids must be the same dimension and size
    Returns:
            A list of polygons  
    """

    masked_grid = mask.masked_greater_equal(rawdata, contourAboveValue)
    if not masked_grid.mask.any():
        return list()

    return contour(lons, lats, masked_grid.mask.astype(float))
示例#18
0
    def _getPatternZStrip(self,zsStart,zsStop):
        H=self.H
        nzpat=600

        t0=np.mod(zsStart,2.0*np.pi)
        t1=np.mod(zsStop,2.0*np.pi)


        zpat=np.zeros((2,nzpat))
        zpat[0]=np.linspace(0,H,zpat.shape[1])

        m1=ma.masked_greater_equal(zpat[0],zsStart).mask
        m2=ma.masked_less_equal(tpat[0],zsStop).mask
        mm=m1*m2
        zpat[1]=mm*(self.AZ/2.0 + 0.5*self.AZ*np.random.random(nzpat))

        zpi=np.hstack((zpat,zpat,zpat))

        zpi[0][0:zpat.shape[1]]=(zpat[0]-H)
        zpi[0][(2*zpat.shape[1]):]=(zpat[0]+H)
        return zpi
示例#19
0
    def _getPatternZStrip(self, zsStart, zsStop):
        H = self.H
        nzpat = 600

        t0 = np.mod(zsStart, 2.0 * np.pi)
        t1 = np.mod(zsStop, 2.0 * np.pi)

        zpat = np.zeros((2, nzpat))
        zpat[0] = np.linspace(0, H, zpat.shape[1])

        m1 = ma.masked_greater_equal(zpat[0], zsStart).mask
        m2 = ma.masked_less_equal(tpat[0], zsStop).mask
        mm = m1 * m2
        zpat[1] = mm * (self.AZ / 2.0 +
                        0.5 * self.AZ * np.random.random(nzpat))

        zpi = np.hstack((zpat, zpat, zpat))

        zpi[0][0:zpat.shape[1]] = (zpat[0] - H)
        zpi[0][(2 * zpat.shape[1]):] = (zpat[0] + H)
        return zpi
    def _check_variable_limits(self, dataset):
        """

        :type dataset: Dataset
        """
        for variable_name in dataset.variables:
            variable = dataset.variables[variable_name]
            self.report[variable_name + '.count.total'] = variable.size

            data = ProductVerifier.__get_masked_data(variable)
            self.report[variable_name + '.count.valid'] = data.count()

            try:
                valid_max = variable.getncattr('valid_max')
                invalid_data = ma.masked_less_equal(data, valid_max)
                invalid_data_count = invalid_data.count()
                if invalid_data_count == 0:
                    self.report[variable_name + '.valid_max_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name + '.valid_max_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name + '.valid_max_check_failed_for'] = filename
            except AttributeError:
                pass
            try:
                valid_min = variable.getncattr('valid_min')
                invalid_data = ma.masked_greater_equal(data, valid_min)
                invalid_data_count = invalid_data.count()
                self.report[variable_name + '.valid_min_check'] = invalid_data_count
                if invalid_data_count == 0:
                    self.report[variable_name + '.valid_min_check'] = invalid_data_count
                else:
                    variable.getncattr('_FillValue')
                    self.report[variable_name + '.valid_min_check'] = invalid_data_count
                    filename = os.path.basename(self.source_pathname)
                    self.report[variable_name + '.valid_min_check_failed_for'] = filename
            except AttributeError:
                pass
    def _check_corruptness(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """
        ok = True
        for variable_name in product_type.get_sst_variable_names():
            if variable_name in dataset.variables:
                variable = dataset.variables[variable_name]

                data = ProductVerifier.__get_masked_data(variable)
                valid_data_count = data.count()
                if valid_data_count == 0:
                    ok = False
                try:
                    valid_max = variable.getncattr('valid_max')
                    invalid_data = ma.masked_less_equal(data, valid_max)
                    valid_data_count = valid_data_count - invalid_data.count()
                except AttributeError:
                    pass
                try:
                    valid_min = variable.getncattr('valid_min')
                    invalid_data = ma.masked_greater_equal(data, valid_min)
                    valid_data_count = valid_data_count - invalid_data.count()
                except AttributeError:
                    pass
                if valid_data_count == 0:
                    ok = False
            else:
                ok = False
        if ok:
            self.report['corruptness_check'] = 0
        else:
            self.report['corruptness_check'] = 1
            filename = os.path.basename(self.source_pathname)
            self.report['corruptness_check_failed_for'] = filename
            raise VerificationError
示例#22
0
    def _check_corruptness(self, dataset, product_type):
        """

        :type dataset: Dataset
        :type product_type: ProductType
        """
        ok = True
        for variable_name in product_type.get_sst_variable_names():
            if variable_name in dataset.variables:
                variable = dataset.variables[variable_name]

                data = ProductVerifier.__get_masked_data(variable)
                valid_data_count = data.count()
                if valid_data_count == 0:
                    ok = False
                try:
                    valid_max = variable.getncattr('valid_max')
                    invalid_data = ma.masked_less_equal(data, valid_max)
                    valid_data_count = valid_data_count - invalid_data.count()
                except AttributeError:
                    pass
                try:
                    valid_min = variable.getncattr('valid_min')
                    invalid_data = ma.masked_greater_equal(data, valid_min)
                    valid_data_count = valid_data_count - invalid_data.count()
                except AttributeError:
                    pass
                if valid_data_count == 0:
                    ok = False
            else:
                ok = False
        if ok:
            self.report['corruptness_check'] = 0
        else:
            self.report['corruptness_check'] = 1
            filename = os.path.basename(self.source_pathname)
            self.report['corruptness_check_failed_for'] = filename
            raise VerificationError
示例#23
0
def mass_fraction(rho, temp, bl):
    '''
    Get the max fraction for a small box of size bl:
    Inputs:
        rho:   Baryon overdensity at each point in an n**3 array (mean is 1).
        temp:  Temperature at each point in an n**3 array.
        bl:    The size of the small box in lattice points (assuming a cube).
    Outputs:
        returns mWHIM, mCond, mDif, mHalo.
    '''
    # Calculate necessary means:
    brho_avg = rho.mean()
    bmass = brho_avg * bl**3  # bl**3 is volume of box.

    # Set up the masks for all four regions:
    rhoMask = ma.masked_greater_equal(rho, rhoMax)
    tMask = ma.masked_less_equal(temp, tMin)

    WHIM_Mask = rhoMask.mask + tMask.mask
    cond_Mask = ~rhoMask.mask + ~tMask.mask
    dif_Mask = rhoMask.mask + ~tMask.mask
    halo_Mask = ~rhoMask.mask + tMask.mask

    # Now make the density arrays corresponding to the masks:
    rhoWHIM = ma.array(rho, mask=WHIM_Mask)
    rhoCond = ma.array(rho, mask=cond_Mask)
    rhoDif = ma.array(rho, mask=dif_Mask)
    rhoHalo = ma.array(rho, mask=halo_Mask)

    # Now calculate the amount of mass in each fraction.
    mWHIM = ma.sum(rhoWHIM) / bmass
    mCond = ma.sum(rhoCond) / bmass
    mDif = ma.sum(rhoDif) / bmass
    mHalo = ma.sum(rhoHalo) / bmass

    return mWHIM, mCond, mDif, mHalo
def process(f, i):
    path = 'time_series_images/' + os.path.basename(f) + '.png'
    if os.path.exists(path):
        print('Exists, skipping ...')
        return

    j = json.loads(open(f).read())

    p = j['features'][0]['properties']

    # fr = p['water_area_filled_fraction']

    t = p['water_area_time']
    v1 = p['water_area_value']
    v2 = p['water_area_filled']

    t_jrc = p['water_area_time_jrc']
    v_jrc = p['water_area_value_jrc']

    filled_fr = list(zip(v1, v2))
    filled_fr = [(o[1] - o[0]) / o[1] for o in filled_fr]

    mask = ma.masked_greater_equal(filled_fr, 0.5)

    # t = list(ma.masked_array(t, mask).compressed())
    # v1 = list(ma.masked_array(v1, mask).compressed())
    # v2 = list(ma.masked_array(v2, mask).compressed())

    if not len(t):
        print('Empty, skipping ...')
        return

    years = mdates.YearLocator()  # every year

    v2_filtered = savitzky_golay(np.array(v2), window_size=15, order=4)
    # v2_filtered = signal.medfilt(v2, 7)
    # v2_filtered = lowess(v2, t)
    # v2_filtered = lowess(v2, t, frac=1./50)

    t = [datetime.datetime.fromtimestamp(tt / 1000) for tt in t]
    t_jrc = [
        datetime.datetime.fromtimestamp(tt_jrc / 1000) for tt_jrc in t_jrc
    ]

    s_scale = 'Scale: {:.2f}'.format(p['scale']) + '$m$'
    s_area = 'Area: {:.2f}'.format(
        p['area'] /
        (1000 * 1000)) + '$km^2$, ' + '{:.2f}'.format(100 * p['area'] /
                                                      (1000 * 1000)) + '$ha$'
    title = s_scale + ', ' + s_area

    fig = plt.figure(figsize=(11, 4))
    ax = fig.add_subplot(111)
    ax.xaxis.set_major_locator(years)

    # fig.autofmt_xdate()
    ax.set_xlim([datetime.date(1985, 1, 1), datetime.date(2019, 1, 1)])

    ax.grid(color='k', linestyle='-', linewidth=1, alpha=0.2)

    plt.title(title)

    plt.xticks(rotation=90)

    ax.plot(t_jrc,
            v_jrc,
            marker='.',
            c='r',
            markersize=2,
            linewidth=0,
            alpha=0.05)

    ax.plot(t, v1, marker='.', c='b', markersize=2, linewidth=0, alpha=0.05)

    ax.plot(t, v2, marker='.', c='k', markersize=3, linewidth=0, alpha=0.8)

    # for SG
    if len(t) != len(v2_filtered):
        print('Bad, shapes are not equal, skipping line plotting ...')
    else:
        ax.plot(t,
                v2_filtered,
                marker='.',
                c='k',
                markersize=0,
                linewidth=2,
                alpha=0.1)

    # for LOWESS
    # v2_filtered_t = [datetime.datetime.fromtimestamp(t / 1000) for t in v2_filtered[:, 0]]
    # ax.plot(v2_filtered_t, v2_filtered[:, 1], marker='.', c='k', markersize=0, linewidth=2, alpha=0.1)

    path = 'time_series_images/' + os.path.basename(f) + '.png'
    print(str(i) + ' ' + path)
    plt.tight_layout()
    plt.savefig(path, dpi=150)
    plt.close()
def carbchem(op_swtch,mdi,T,S,TCO2,TALK,Pr=0.0,TB=0.0,Ni=100.0,Tl=1.0e-5):
# This function calculates the inorganic carbon chemistry balance
# according to the method of Peng et al 1987
# The parameters are set in the first few lines

#salinity needs to be converted into psu *1000+35
#TCO2 and TALK must be in mol/kg /(1026.*1000.)
#the ones below here are not needed

# This procedure calculates the inorganic carbon chemistry balance
# according to the method of Peng et al 1987
# The parameters are set in the first few lines
#
#  ops= 0 ;  output is iteration count
#       1 ;            pCO2
#       2 ;            pH
#       3 ;            [H2CO3]
#       4 ;            [HCO3]
#       5 ;            [CO3]
#       6 ;            satn [co3] : calcite
#       7 ;            saturation state: calcite
#       8 ;            satn [CO3] : aragonite
#       9 ;            saturation state: aragonite

    msk=ma.masked_greater_equal(T,mdi+1.0,copy=True).mask
    #create land-sea mask used by sea_msk.mask
    salmin = 1.0
    S2=np.copy(S)
    S2[np.abs(S) < salmin]=salmin

    tol = Tl
    mxiter = Ni

    op_fld = np.empty(T.shape)
    op_fld.fill(np.NAN)

#    TB = np.ones(T.shape)
#    TB[msk] = 4.106e-4*S2[msk]/35.0
    TB = np.empty_like(T)
    TB = np.multiply(S2,4.106e-4/35.0, TB)
    # this boron is from Peng

    #convert to Kelvin
    TK=np.copy(T[:])
    TK[msk] += +273.15

    alpha_s = np.ones(T.shape)
    alpha_s[msk] = np.exp( ( -60.2409 + 9345.17/TK[msk]  + 23.3585*np.log(TK[msk]/100.0) )  + ( 0.023517 - 0.023656*(TK[msk]/100.0) + 0.0047036*np.power((TK[msk]/100.0),2.0) )*S[msk] )
  
    K1 = np.ones(T.shape)
    K1[msk] = np.exp( ( -2307.1266/TK[msk] + 2.83655  - 1.5529413*np.log(TK[msk]) ) - ( 4.0484/TK[msk] + 0.20760841 )*np.sqrt(S[msk]) + 0.08468345*S[msk] - 0.00654208*np.power(S[msk],1.5) + np.log( 1.0 - 0.001005*S[msk] ) )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape) 
        pf = np.ones(T.shape) 
        del_vol[msk] = -25.50 + 0.1271*T[msk]
        del_com[msk] = 1.0e-3*( -3.08 + 0.0877*T[msk] )
        pf[msk] = np.exp( ( 0.5*del_com[msk]*Pr[msk] - del_vol[msk] )*Pr[msk] / ( 83.131*TK[msk] ) )
        K1[msk] = K1[msk]*pf[msk]

    K2 = np.ones(T.shape)
    K2[msk] = np.exp( ( -3351.6106/TK[msk] - 9.226508 - 0.2005743*np.log(TK[msk]) ) - ( 23.9722/TK[msk] + 0.106901773 )*np.power(S[msk],0.5) + 0.1130822*S[msk] - 0.00846934*np.power(S[msk],1.5) + np.log( 1.0 - 0.001005*S[msk] ) )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape) 
        pf = np.ones(T.shape) 
        del_vol[msk] = -15.82 - 0.0219*T[msk]
        del_com[msk] = 1.0e-3*( 1.13 - 0.1475*T[msk] )
        pf[msk] = np.exp( ( 0.5*del_com[msk]*Pr[msk] - del_vol[msk] )*Pr[msk] / ( 83.131*TK[msk] ) )
        K2[msk] = K2[msk]*pf[msk]

    KB = np.ones(T.shape)
    KB[msk] = np.exp( ( -8966.90 - 2890.53*np.power(S[msk],0.5) - 77.942*S[msk] + 1.728*np.power(S[msk],1.5)- 0.0996*np.power(S[msk],2.0) )/TK[msk] + ( 148.0248 + 137.1942*np.power(S[msk],0.5) + 1.62142*S[msk] ) - ( 24.4344 + 25.085*np.power(S[msk],0.5) + 0.2474*S[msk] )*np.log(TK[msk]) + 0.053105*(np.power(S[msk],0.5))*TK[msk] )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape) 
        pf = np.ones(T.shape) 
        del_vol[msk] = -29.48 + 0.1622*T[msk]+ 0.0026080*np.power(T[msk],2.0)
        del_com[msk] = -2.84e-3
        pf[msk] = np.exp( ( 0.5*del_com[msk]*Pr[msk] - del_vol[msk] )*Pr[msk] / ( 83.131*TK[msk] ) )
        KB[msk] = KB[msk]*pf[msk]


    KW = np.ones(T.shape)
    KW[msk] = np.exp( ( -13847.26/TK[msk] + 148.96502 - 23.6521*np.log(TK[msk]) ) + ( 118.67/TK[msk] - 5.977 + 1.0495*np.log(TK[msk]) )*np.power(S[msk],0.5) - 0.01615*S[msk] )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape) 
        pf = np.ones(T.shape) 
        del_vol[msk] = -25.60 + 0.2324*T[msk] - 0.0036246*np.power(T[msk],2.0)
        del_com[msk] = 1.0e-3*( -5.13 + 0.0794*T[msk] )
        pf[msk] = np.exp( ( 0.5*del_com[msk]*Pr[msk]- del_vol[msk] )*Pr[msk] / ( 83.131*TK[msk] ) )
        KW[msk] = KW[msk]*pf[msk]

    if ( op_swtch >= 6 or op_swtch <= 9 ):
        ca_conc = np.ones(T.shape)
        ca_conc[msk] = 0.01028*S2[msk]/35.0

    if ( op_swtch == 6 or op_swtch == 7 ):
        K_SP_C = np.ones(T.shape)
        K_SP_C[msk] = np.power(10.0,( ( -171.9065 - 0.077993*TK[msk] + 2839.319/TK[msk] + 71.595*np.log10(TK[msk]) ) + ( -0.77712 + 0.0028426*TK[msk] + 178.34/TK[msk] )*np.power(S[msk],0.5) - 0.07711*S[msk]+ 0.0041249*np.power(S[msk],1.5) ))
        if keyword.iskeyword(Pr):
            del_vol = np.ones(T.shape)
            del_com = np.ones(T.shape) 
            pf = np.ones(T.shape) 
            del_vol[msk] = -48.76 + 0.5304*T[msk]
            del_com[msk] = 1.0e-3*( -11.76 + 0.3692*T[msk] )
            pf[msk] = np.exp( ( 0.5*del_com[msk]*Pr[msk]   - del_vol[msk] )*Pr[msk] / ( 83.131*TK[msk] ) )
            K_SP_C[msk] = K_SP_C[msk]*pf[msk]

    if ( op_swtch == 8 or op_swtch == 9 ):
        K_SP_A = np.ones(T.shape)
        K_SP_A[msk] = np.power(10,( ( -171.945 - 0.077993*TK[msk] + 2903.293/TK[msk] + 71.595*np.log10(TK[msk]) ) + ( -0.068393 + 0.0017276*TK[msk] + 88.135/TK[msk] )*np.power(S[msk],0.5) - 0.10018*S[msk] + 0.0059415*np.power(S[msk],1.5) ))
        if keyword.iskeyword(Pr):
            del_vol = np.ones(T.shape)
            del_com = np.ones(T.shape) 
            pf = np.ones(T.shape) 
            del_vol[msk] = -46.0 + 0.5304*T[msk]
            del_com[msk] = 1.0e-3*( -11.76 + 0.3692*T[msk] )
            pf[msk] = np.exp( ( 0.5*del_com[msk]*Pr[msk]   - del_vol[msk] )*Pr[msk] / ( 83.131*TK[msk] ) )
            K_SP_A[msk] = K_SP_A[msk]*pf[msk]


    # Get first estimate for H+ concentration.

    aH, count = carbiter(T, TCO2, TALK, TB, msk, tol, mxiter, K1, K2, KB, KW)
    
    # now we have aH we can calculate...
    denom = np.zeros(T.shape)
    H2CO3 = np.zeros(T.shape)
    HCO3 = np.zeros(T.shape)
    CO3 = np.zeros(T.shape)
    pH = np.zeros(T.shape)
    pCO2 = np.zeros(T.shape)
    if ( op_swtch == 6 or op_swtch == 7 ):
        sat_CO3_C = np.zeros(T.shape)
    if ( op_swtch == 7 ):
        sat_stat_C = np.zeros(T.shape)
    if ( op_swtch == 8 or op_swtch == 9 ):
        sat_CO3_A = np.zeros(T.shape)
    if ( op_swtch == 9 ):
        sat_stat_A = np.zeros(T.shape)

    denom[msk] = np.power(aH[msk],2.0) + K1[msk]*aH[msk] + K1[msk]*K2[msk]
    H2CO3[msk] = TCO2[msk]*np.power(aH[msk],2.0)/denom[msk]
    HCO3[msk] = TCO2[msk]*K1[msk]*aH[msk]/denom[msk]
    CO3[msk] = TCO2[msk]*K1[msk]*K2[msk]/denom[msk]

    pH[msk] = -np.log10(aH[msk])
    pCO2[msk] = H2CO3[msk]/alpha_s[msk]

    if ( op_swtch == 6 or op_swtch == 7 ):
        sat_CO3_C[msk] = K_SP_C[msk]/ca_conc[msk]
        if ( op_swtch == 7 ):
            sat_stat_C[msk] = CO3[msk]/sat_CO3_C[msk]

    if ( op_swtch == 8 or op_swtch == 9 ):
        sat_CO3_A[msk] = K_SP_A[msk]/ca_conc[msk]
        if ( op_swtch == 9 ):
            sat_stat_A[msk] = CO3[msk]/sat_CO3_A[msk]

    if ( op_swtch == 0 ):
        op_fld = np.zeros(T.shape)
        op_fld[msk] = count[msk]
    elif ( op_swtch == 1 ):
        op_fld[msk] = pCO2[msk]*1.0e6
    elif ( op_swtch == 2 ):
        op_fld[msk] = pH[msk]
    elif ( op_swtch == 3 ):
        op_fld[msk] = H2CO3[msk]
    elif ( op_swtch == 4 ):
        op_fld[msk] = HCO3[msk]
    elif ( op_swtch == 5 ):
        op_fld[msk] = CO3[msk]
    elif ( op_swtch == 6 ):
        op_fld[msk] = sat_CO3_C[msk]
    elif ( op_swtch == 7 ):
        op_fld[msk] = sat_stat_C[msk]
    elif ( op_swtch == 8 ):
        op_fld[msk] = sat_CO3_A[msk]
    elif ( op_swtch == 9 ):
        op_fld[msk] = sat_stat_A[msk]


    return op_fld
示例#26
0
def PngToBinarryArray(filename):
    RGBAarray = readPNG(filename)
    alphaarray = RGBAarray[:, :]
    masked_alphaarray = ma.masked_greater_equal(alphaarray, 50)
    bmask = masked_alphaarray.mask
    return bmask
示例#27
0
文件: b14.py 项目: tlinnet/relax
def r2eff_B14(r20a=None,
              r20b=None,
              pA=None,
              dw=None,
              dw_orig=None,
              kex=None,
              ncyc=None,
              inv_tcpmg=None,
              tcp=None,
              back_calc=None):
    """Calculate the R2eff values for the CR72 model.

    See the module docstring for details.


    @keyword r20a:          The R20 parameter value of state A (R2 with no exchange).
    @type r20a:             numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword r20b:          The R20 parameter value of state B (R2 with no exchange).
    @type r20b:             numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword pA:            The population of state A.
    @type pA:               float
    @keyword dw:            The chemical exchange difference between states A and B in rad/s.
    @type dw:               numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword dw_orig:       The chemical exchange difference between states A and B in ppm. This is only for faster checking of zero value, which result in no exchange.
    @type dw_orig:          numpy float array of rank-1
    @keyword kex:           The kex parameter value (the exchange rate in rad/s).
    @type kex:              float
    @keyword ncyc:          The matrix exponential power array. The number of CPMG blocks.
    @type ncyc:             numpy int16 array of rank [NE][NS][NM][NO][ND]
    @keyword inv_tcpmg:     The inverse of the total duration of the CPMG element (in inverse seconds).
    @type inv_tcpmg:        numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword tcp:           The tau_CPMG times (1 / 4.nu1).
    @type tcp:              numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword back_calc:     The array for holding the back calculated R2eff values.  Each element corresponds to one of the CPMG nu1 frequencies.
    @type back_calc:        numpy float array of rank [NE][NS][NM][NO][ND]
    """

    # Flag to tell if values should be replaced if math function is violated.
    t_dw_zero = False
    t_max_e = False
    t_v3_N_zero = False
    t_log_tog_neg = False
    t_v1c_less_one = False

    # Catch parameter values that will result in no exchange, returning flat R2eff = R20 lines (when kex = 0.0, k_AB = 0.0).
    # Test if pA or kex is zero.
    if kex == 0.0 or pA == 1.0:
        back_calc[:] = r20a
        return

    # Test if dw is zero. Create a mask for the affected spins to replace these with R20 at the end of the calculationWait for replacement, since this is spin specific.
    if min(fabs(dw_orig)) == 0.0:
        t_dw_zero = True
        mask_dw_zero = masked_where(dw == 0.0, dw)

    # Parameter conversions.
    pB = 1.0 - pA
    k_BA = pA * kex
    k_AB = pB * kex

    # Repetitive calculations (to speed up calculations).
    deltaR2 = r20a - r20b
    dw2 = dw**2
    two_tcp = 2.0 * tcp

    # The Carver and Richards (1972) alpha_minus short notation.
    alpha_m = deltaR2 + k_AB - k_BA
    zeta = 2.0 * dw * alpha_m
    Psi = alpha_m**2 + 4.0 * k_BA * k_AB - dw2

    # Get the real and imaginary components of the exchange induced shift.
    # Trigonometric functions faster than square roots.
    quad_zeta2_Psi2 = (zeta**2 + Psi**2)**0.25
    fact = 0.5 * arctan2(-zeta, Psi)
    g3 = cos(fact) * quad_zeta2_Psi2
    g4 = sin(fact) * quad_zeta2_Psi2

    # Repetitive calculations (to speed up calculations).
    g32 = g3**2
    g42 = g4**2

    # Time independent factors.
    # N = oG + oE.
    N = g3 + g4 * 1j

    NNc = g32 + g42

    # F0.
    F0 = (dw2 + g32) / NNc

    # F2.
    F2 = (dw2 - g42) / NNc

    # t1 = (-dw + g4) * (complex(-dw, -g3)) / NNc #t1.

    # t2.
    F1b = (dw + g4) * (dw - g3 * 1j) / NNc

    # t1 + t2.
    F1a_plus_b = (2. * dw2 + zeta * 1j) / NNc

    # Derived from relaxation.
    # E0 = -2.0 * tcp * (F00R - f11R).
    E0 = two_tcp * g3

    # Catch math domain error of sinh(val > 710).
    # This is when E0 > 710.
    if max(E0) > 700:
        t_max_e = True
        mask_max_e = masked_greater_equal(E0, 700.0)
        # To prevent math errors, set e_zero to 1.
        E0[mask_max_e.mask] = 1.0

    # Derived from chemical shifts  #E2 = complex(0, -2.0 * tcp * (F00I - f11I)).
    E2 = two_tcp * g4

    # Mixed term (complex) (E0 - iE2)/2.
    E1 = (g3 - g4 * 1j) * tcp

    # Complex.
    v1s = F0 * sinh(E0) - F2 * sin(E2) * 1j

    # -2 * oG * t2.
    v4 = F1b * (-alpha_m - g3) + F1b * (dw - g4) * 1j

    # Complex.
    ex1c = sinh(E1)

    # Off diagonal common factor. sinh fuctions.
    v5 = (-deltaR2 + kex + dw * 1j) * v1s - 2. * (v4 +
                                                  k_AB * F1a_plus_b) * ex1c

    # Real. The v_1c in paper.
    v1c = F0 * cosh(E0) - F2 * cos(E2)

    # Catch math domain error of sqrt of negative.
    # This is when v1c is less than 1.
    mask_v1c_less_one = v1c < 1.0
    if any(mask_v1c_less_one):
        t_v1c_less_one = True
        v1c[mask_v1c_less_one] = 1.0

    # Exact result for v2v3.
    v3 = sqrt(v1c**2 - 1.)

    y = power((v1c - v3) / (v1c + v3), ncyc)

    Tog_div = 2. * v3 * N

    # Catch math domain error of division with 0.
    # This is when Tog_div is zero.
    mask_v3_N_zero = Tog_div == 0.0
    if any(mask_v3_N_zero):
        t_v3_N_zero = True
        Tog_div[mask_v3_N_zero] = 1.0

    Tog = 0.5 * (1. + y) + (1. - y) * v5 / Tog_div

    ## -1/Trel * log(LpreDyn).
    # Rpre = (r20a + r20b + kex) / 2.0

    ## Carver and Richards (1972)
    # R2eff_CR72 = Rpre - inv_tcpmg * ncyc *  arccosh(v1c.real)

    ## Baldwin final.
    # Estimate R2eff. relax_time = Trel = 1/inv_tcpmg.
    # R2eff = R2eff_CR72 - inv_tcpmg * log(Tog.real)

    # Catch math domain error of log of negative.
    # This is when Tog.real is negative.
    mask_log_tog_neg = Tog.real < 0.0
    if any(mask_log_tog_neg):
        t_log_tog_neg = True
        Tog.real[mask_log_tog_neg] = 1.0

    # Fastest calculation.
    back_calc[:] = (r20a + r20b + kex) / 2.0 - inv_tcpmg * (
        ncyc * arccosh(v1c.real) + log(Tog.real))

    # Replace data in array.
    # If dw is zero.
    if t_dw_zero:
        back_calc[mask_dw_zero.mask] = r20a[mask_dw_zero.mask]

    # If E0 is above 700.
    if t_max_e:
        back_calc[mask_max_e.mask] = r20a[mask_max_e.mask]

    # If v1c is less than 1.
    if t_v1c_less_one:
        back_calc[mask_v1c_less_one] = 1e100

    # If Tog_div is zero.
    if t_v3_N_zero:
        back_calc[mask_v3_N_zero] = 1e100

    # If Tog.real is negative.
    if t_log_tog_neg:
        back_calc[mask_log_tog_neg] = 1e100

    # Catch errors, taking a sum over array is the fastest way to check for
    # +/- inf (infinity) and nan (not a number).
    if not isfinite(sum(back_calc)):
        # Replaces nan, inf, etc. with fill value.
        fix_invalid(back_calc, copy=False, fill_value=1e100)
示例#28
0
import sunpy.map
from sunpy.data.sample import AIA_171_IMAGE

###############################################################################
# We first create the Map using the sample data and import the coordinate
# functionality.

aia = sunpy.map.Map(AIA_171_IMAGE)

###############################################################################
# Now we create a new custom aia with our new mask and
# plot the result using our modified colormap

max_indices = np.unravel_index(aia.data.argmax(), aia.data.shape) * u.pixel
hpc_max = aia.pixel_to_data(max_indices[1], max_indices[0])
r = np.sqrt(hpc_max.Tx ** 2 + hpc_max.Ty ** 2) / aia.rsun_obs
mask = ma.masked_greater_equal(r, 1)
scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask.mask)

###############################################################################
# Let's now plot the results. We'll overlay the autogenerated SunPy lon/lat
# grid as well for comparison.

fig = plt.figure()
ax = plt.subplot(projection=aia)

aia.plot()
ax.plot_coord(hpc_max, color='white', marker='x', markersize=10)

plt.show()
示例#29
0
文件: b14.py 项目: pombredanne/relax
def r2eff_B14(r20a=None, r20b=None, pA=None, dw=None, dw_orig=None, kex=None, ncyc=None, inv_tcpmg=None, tcp=None, back_calc=None):
    """Calculate the R2eff values for the CR72 model.

    See the module docstring for details.


    @keyword r20a:          The R20 parameter value of state A (R2 with no exchange).
    @type r20a:             numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword r20b:          The R20 parameter value of state B (R2 with no exchange).
    @type r20b:             numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword pA:            The population of state A.
    @type pA:               float
    @keyword dw:            The chemical exchange difference between states A and B in rad/s.
    @type dw:               numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword dw_orig:       The chemical exchange difference between states A and B in ppm. This is only for faster checking of zero value, which result in no exchange.
    @type dw_orig:          numpy float array of rank-1
    @keyword kex:           The kex parameter value (the exchange rate in rad/s).
    @type kex:              float
    @keyword ncyc:          The matrix exponential power array. The number of CPMG blocks.
    @type ncyc:             numpy int16 array of rank [NE][NS][NM][NO][ND]
    @keyword inv_tcpmg:     The inverse of the total duration of the CPMG element (in inverse seconds).
    @type inv_tcpmg:        numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword tcp:           The tau_CPMG times (1 / 4.nu1).
    @type tcp:              numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword back_calc:     The array for holding the back calculated R2eff values.  Each element corresponds to one of the CPMG nu1 frequencies.
    @type back_calc:        numpy float array of rank [NE][NS][NM][NO][ND]
    """

    # Flag to tell if values should be replaced if math function is violated.
    t_dw_zero = False
    t_max_e = False
    t_v3_N_zero = False
    t_log_tog_neg = False
    t_v1c_less_one = False

    # Catch parameter values that will result in no exchange, returning flat R2eff = R20 lines (when kex = 0.0, k_AB = 0.0).
    # Test if pA or kex is zero.
    if kex == 0.0 or pA == 1.0:
        back_calc[:] = r20a
        return

    # Test if dw is zero. Create a mask for the affected spins to replace these with R20 at the end of the calculationWait for replacement, since this is spin specific.
    if min(fabs(dw_orig)) == 0.0:
        t_dw_zero = True
        mask_dw_zero = masked_where(dw == 0.0, dw)

    # Parameter conversions.
    pB = 1.0 - pA
    k_BA = pA * kex
    k_AB = pB * kex

    # Repetitive calculations (to speed up calculations).
    deltaR2 = r20a - r20b
    dw2 = dw**2
    two_tcp = 2.0 * tcp

    # The Carver and Richards (1972) alpha_minus short notation.
    alpha_m = deltaR2 + k_AB - k_BA
    zeta = 2.0 * dw * alpha_m
    Psi = alpha_m**2 + 4.0 * k_BA * k_AB - dw2

    # Get the real and imaginary components of the exchange induced shift.
    # Trigonometric functions faster than square roots.
    quad_zeta2_Psi2 = (zeta**2 + Psi**2)**0.25
    fact = 0.5 * arctan2(-zeta, Psi)
    g3 = cos(fact) * quad_zeta2_Psi2
    g4 = sin(fact) * quad_zeta2_Psi2

    # Repetitive calculations (to speed up calculations).
    g32 = g3**2
    g42 = g4**2

    # Time independent factors.
    # N = oG + oE.
    N = g3 + g4*1j

    NNc = g32 + g42

    # F0.
    F0 = (dw2 + g32) / NNc

    # F2.
    F2 = (dw2 - g42) / NNc

    # t1 = (-dw + g4) * (complex(-dw, -g3)) / NNc #t1.

    # t2.
    F1b = (dw + g4) * (dw - g3*1j) / NNc

    # t1 + t2.
    F1a_plus_b = (2. * dw2 + zeta*1j) / NNc

    # Derived from relaxation.
    # E0 = -2.0 * tcp * (F00R - f11R).
    E0 = two_tcp * g3

    # Catch math domain error of sinh(val > 710).
    # This is when E0 > 710.
    if max(E0) > 700:
        t_max_e = True
        mask_max_e = masked_greater_equal(E0, 700.0)
        # To prevent math errors, set e_zero to 1.
        E0[mask_max_e.mask] = 1.0

    # Derived from chemical shifts  #E2 = complex(0, -2.0 * tcp * (F00I - f11I)).
    E2 = two_tcp * g4

    # Mixed term (complex) (E0 - iE2)/2.
    E1 = (g3 - g4*1j) * tcp

    # Complex.
    v1s = F0 * sinh(E0) - F2 * sin(E2)*1j

    # -2 * oG * t2.
    v4 = F1b * (-alpha_m - g3 ) + F1b * (dw - g4)*1j

    # Complex.
    ex1c = sinh(E1)

    # Off diagonal common factor. sinh fuctions.
    v5 = (-deltaR2 + kex + dw*1j) * v1s - 2. * (v4 + k_AB * F1a_plus_b) * ex1c

    # Real. The v_1c in paper.
    v1c = F0 * cosh(E0) - F2 * cos(E2)

    # Catch math domain error of sqrt of negative.
    # This is when v1c is less than 1.
    mask_v1c_less_one = v1c < 1.0
    if any(mask_v1c_less_one):
        t_v1c_less_one = True
        v1c[mask_v1c_less_one] = 1.0

    # Exact result for v2v3.
    v3 = sqrt(v1c**2 - 1.)

    y = power( (v1c - v3) / (v1c + v3), ncyc)

    Tog_div = 2. * v3 * N

    # Catch math domain error of division with 0.
    # This is when Tog_div is zero.
    mask_v3_N_zero = Tog_div == 0.0
    if any(mask_v3_N_zero):
        t_v3_N_zero = True
        Tog_div[mask_v3_N_zero] = 1.0

    Tog = 0.5 * (1. + y) + (1. - y) * v5 / Tog_div

    ## -1/Trel * log(LpreDyn).
    # Rpre = (r20a + r20b + kex) / 2.0

    ## Carver and Richards (1972)
    # R2eff_CR72 = Rpre - inv_tcpmg * ncyc *  arccosh(v1c.real)

    ## Baldwin final.
    # Estimate R2eff. relax_time = Trel = 1/inv_tcpmg.
    # R2eff = R2eff_CR72 - inv_tcpmg * log(Tog.real)

    # Catch math domain error of log of negative.
    # This is when Tog.real is negative.
    mask_log_tog_neg = Tog.real < 0.0
    if any(mask_log_tog_neg):
        t_log_tog_neg = True
        Tog.real[mask_log_tog_neg] = 1.0

    # Fastest calculation.
    back_calc[:] = (r20a + r20b + kex) / 2.0 - inv_tcpmg * ( ncyc * arccosh(v1c.real) + log(Tog.real) )

    # Replace data in array.
    # If dw is zero.
    if t_dw_zero:
        back_calc[mask_dw_zero.mask] = r20a[mask_dw_zero.mask]

    # If E0 is above 700.
    if t_max_e:
        back_calc[mask_max_e.mask] = r20a[mask_max_e.mask]

    # If v1c is less than 1.
    if t_v1c_less_one:
        back_calc[mask_v1c_less_one] = 1e100

    # If Tog_div is zero.
    if t_v3_N_zero:
        back_calc[mask_v3_N_zero] = 1e100

    # If Tog.real is negative.
    if t_log_tog_neg:
        back_calc[mask_log_tog_neg] = 1e100

    # Catch errors, taking a sum over array is the fastest way to check for
    # +/- inf (infinity) and nan (not a number).
    if not isfinite(sum(back_calc)):
        # Replaces nan, inf, etc. with fill value.
        fix_invalid(back_calc, copy=False, fill_value=1e100)
def carbchem(op_swtch, mdi, T_cube, S_cube, TCO2_cube, TALK_cube, Pr=0.0, TB=0.0, Ni=100.0, Tl=1.0e-5):
    # This function calculates the inorganic carbon chemistry balance
    # according to the method of Peng et al 1987
    # The parameters are set in the first few lines

    # salinity needs to be converted into psu
    # TCO2 and TALK must be in mol/kg
    # the ones below here are not needed

    # This procedure calculates the inorganic carbon chemistry balance
    # according to the method of Peng et al 1987
    # The parameters are set in the first few lines
    #
    #  ops= 0 ;  output is iteration count
    #       1 ;            pCO2
    #       2 ;            pH
    #       3 ;            [H2CO3]
    #       4 ;            [HCO3]
    #       5 ;            [CO3]
    #       6 ;            satn [co3] : calcite
    #       7 ;            saturation state: calcite
    #       8 ;            satn [CO3] : aragonite
    #       9 ;            saturation state: aragonite

    # make sure grids are same size
    # make sure rthey years are the same
    # extarct the data from the cubes

    # from iris import *
    # from iris.analysis import *
    # import iris.analysis
    # from numpy import *
    # from matplotlib.pyplot import *
    # from scipy.stats.mstats import *
    # import iris.plot as iplt
    # import seawater
    # import numpy
    # import iris.quickplot as quickplot
    # import iris.analysis.stats as istats
    # temp = iris.load_cube('/home/ph290/tmp/hadgem2es_potential_temperature_historical_regridded.nc').extract(Constraint(depth = 0))
    # sal = iris.load_cube('/home/ph290/tmp/hadgem2es_salinity_historical_regridded.nc').extract(Constraint(depth = 0))
    # carb = iris.load_cube('/home/ph290/tmp/hadgem2es_dissolved_inorganic_carbon_historical_regridded.nc').extract(Constraint(depth = 0))
    # alk = iris.load_cube('/home/ph290/tmp/hadgem2es_total_alkalinity_historical_regridded.nc').extract(Constraint(depth = 0))
    # import carbchem
    # co2 = carbchem.carbchem(1,temp.data.fill_value,temp,sal,carb,alk)
    # T_cube = temp
    # S_cube = sal
    # TCO2_cube = carb
    # TALK_cube = alk
    # mdi = temp.data.fill_value

    t_lat = np.size(T_cube.coord("latitude").points)
    s_lat = np.size(S_cube.coord("latitude").points)
    c_lat = np.size(TCO2_cube.coord("latitude").points)
    a_lat = np.size(TALK_cube.coord("latitude").points)
    lat_test = t_lat == s_lat == c_lat == a_lat

    t_lon = np.size(T_cube.coord("longitude").points)
    s_lon = np.size(S_cube.coord("longitude").points)
    c_lon = np.size(TCO2_cube.coord("longitude").points)
    a_lon = np.size(TALK_cube.coord("longitude").points)
    lon_test = t_lon == s_lon == c_lon == a_lon

    if lat_test and lon_test:
        coord = T_cube.coord("time")
        T_year = np.array([coord.units.num2date(value).year for value in coord.points])
        coord = S_cube.coord("time")
        S_year = np.array([coord.units.num2date(value).year for value in coord.points])
        coord = TCO2_cube.coord("time")
        TCO2_year = np.array([coord.units.num2date(value).year for value in coord.points])
        coord = TALK_cube.coord("time")
        TALK_year = np.array([coord.units.num2date(value).year for value in coord.points])

        common_yrs = np.intersect1d(T_year, S_year)
        common_yrs = np.intersect1d(common_yrs, TCO2_year)
        common_yrs = np.intersect1d(common_yrs, TALK_year)

        if not (
            common_yrs.size
            == T_year.size | common_yrs.size
            == S_year.size | common_yrs.size
            == TCO2_year.size | common_yrs.size
            == TALK_year.size
        ):
            print "warning: timeseries shortened so all variables have the same years"
            T_cube = T_cube[np.nonzero(np.in1d(T_year, common_yrs))[0]]
            S_cube = S_cube[np.nonzero(np.in1d(S_year, common_yrs))[0]]
            TCO2_cube = TCO2_cube[np.nonzero(np.in1d(TCO2_year, common_yrs))[0]]
            TALK_cube = TALK_cube[np.nonzero(np.in1d(TALK_year, common_yrs))[0]]

        output_cube = T_cube.copy()
        T_cube = T_cube - 273.15
        T = T_cube.data.copy()
        S = S_cube.data.copy()
        TCO2_cube = TCO2_cube / 1026.0
        TCO2 = TCO2_cube.data.copy()
        TALK_cube = TALK_cube / 1026.0
        TALK = TALK_cube.data.copy()

        msk1 = ma.masked_greater_equal(T, mdi - 1.0, copy=True)
        msk2 = ma.masked_greater_equal(S, mdi - 1.0, copy=True)
        msk3 = ma.masked_greater_equal(TCO2, mdi - 1.0, copy=True)
        msk4 = ma.masked_greater_equal(TALK, mdi - 1.0, copy=True)

        msk = msk1.mask | msk2.mask | msk3.mask | msk4.mask

        T[msk] = np.nan
        S[msk] = np.nan
        TALK[msk] = np.nan
        TCO2[msk] = np.nan

        # T = np.array([13.74232016,25.0])
        # S = np.array([33.74096661,35.0])
        # TCO2 = np.array([0.0019863,2.0e-3])
        # TALK = np.array([0.00226763,2.2e-3])
        # msk = ma.masked_greater_equal(T,mdi-1.0,copy=True)

        # create land-sea mask used by sea_msk.mask
        salmin = 1.0
        S2 = np.copy(S)
        S2[np.abs(S) < salmin] = salmin

        tol = Tl
        mxiter = Ni

        op_fld = np.empty(T.shape)
        op_fld.fill(np.NAN)

        #    TB = np.ones(T.shape)
        #    TB = 4.106e-4*S2/35.0
        TB = np.empty_like(T)
        TB = np.multiply(S2, 4.106e-4 / 35.0, TB)
        # this boron is from Peng

        # convert to Kelvin
        TK = np.copy(T[:])
        TK += +273.15

        alpha_s = np.ones(T.shape)
        alpha_s = np.exp(
            (-60.2409 + 9345.17 / TK + 23.3585 * np.log(TK / 100.0))
            + (0.023517 - 0.023656 * (TK / 100.0) + 0.0047036 * np.power((TK / 100.0), 2.0)) * S
        )

        K1 = np.ones(T.shape)
        K1 = np.exp(
            (-2307.1266 / TK + 2.83655 - 1.5529413 * np.log(TK))
            - (4.0484 / TK + 0.20760841) * np.sqrt(S)
            + 0.08468345 * S
            - 0.00654208 * np.power(S, 1.5)
            + np.log(1.0 - 0.001005 * S)
        )

        a = np.array([-25.50, -15.82, -29.48, -25.60, -48.76, -46.0])
        b = np.array([0.1271, 0.0219, 0.2324, 0.5304, 0.5304])
        c = np.array([0.0, 0.0, 0.0026080, 0.0036246, 0.0, 0.0])
        d = np.array([-3.08, 1.13, (-2.84e-3) / (1.0e-3), -5.13, -11.76, -11.76])
        e = np.array([0.0877, 0.1475, 0.0, 0.0794, 0.3692, 0.3692])

        if keyword.iskeyword(Pr):
            instance = 0
            pf = pressure_fun(a[instance], b[instance], c[instance], d[instance], e[instance], T)
            K1 = K1 * pf

        K2 = np.ones(T.shape)
        K2 = np.exp(
            (-3351.6106 / TK - 9.226508 - 0.2005743 * np.log(TK))
            - (23.9722 / TK + 0.106901773) * np.power(S, 0.5)
            + 0.1130822 * S
            - 0.00846934 * np.power(S, 1.5)
            + np.log(1.0 - 0.001005 * S)
        )

        if keyword.iskeyword(Pr):
            instance = 1
            pf = pressure_fun(a[instance], b[instance], c[instance], d[instance], e[instance], T)
            K2 = K2 * pf

        KB = np.ones(T.shape)
        KB = np.exp(
            (-8966.90 - 2890.53 * np.power(S, 0.5) - 77.942 * S + 1.728 * np.power(S, 1.5) - 0.0996 * np.power(S, 2.0))
            / TK
            + (148.0248 + 137.1942 * np.power(S, 0.5) + 1.62142 * S)
            - (24.4344 + 25.085 * np.power(S, 0.5) + 0.2474 * S) * np.log(TK)
            + 0.053105 * (np.power(S, 0.5)) * TK
        )

        if keyword.iskeyword(Pr):
            instance = 2
            pf = pressure_fun(a[instance], b[instance], c[instance], d[instance], e[instance], T)
            KB = KB * pf

        KW = np.ones(T.shape)
        KW = np.exp(
            (-13847.26 / TK + 148.96502 - 23.6521 * np.log(TK))
            + (118.67 / TK - 5.977 + 1.0495 * np.log(TK)) * np.power(S, 0.5)
            - 0.01615 * S
        )

        if keyword.iskeyword(Pr):
            instance = 3
            pf = pressure_fun(a[instance], b[instance], c[instance], d[instance], e[instance], T)
            KW = KW * pf

        if op_swtch >= 6 or op_swtch <= 9:
            ca_conc = np.ones(T.shape)
            ca_conc = 0.01028 * S2 / 35.0

        if op_swtch == 6 or op_swtch == 7:
            K_SP_C = np.ones(T.shape)
            K_SP_C = np.power(
                10.0,
                (
                    (-171.9065 - 0.077993 * TK + 2839.319 / TK + 71.595 * np.log10(TK))
                    + (-0.77712 + 0.0028426 * TK + 178.34 / TK) * np.power(S, 0.5)
                    - 0.07711 * S
                    + 0.0041249 * np.power(S, 1.5)
                ),
            )
            if keyword.iskeyword(Pr):
                instance = 4
                pf = pressure_fun(a[instance], b[instance], c[instance], d[instance], e[instance], T)
                K_SP_C = K_SP_C * pf

        if op_swtch == 8 or op_swtch == 9:
            K_SP_A = np.ones(T.shape)
            K_SP_A = np.power(
                10,
                (
                    (-171.945 - 0.077993 * TK + 2903.293 / TK + 71.595 * np.log10(TK))
                    + (-0.068393 + 0.0017276 * TK + 88.135 / TK) * np.power(S, 0.5)
                    - 0.10018 * S
                    + 0.0059415 * np.power(S, 1.5)
                ),
            )
            if keyword.iskeyword(Pr):
                instance = 5
                pf = pressure_fun(a[instance], b[instance], c[instance], d[instance], e[instance], T)
                K_SP_A = K_SP_A * pf

        # Get first estimate for H+ concentration.

        aH, count = carbiter(T, TCO2, TALK, TB, msk, tol, mxiter, K1, K2, KB, KW)

        # now we have aH we can calculate...
        denom = np.zeros(T.shape)
        H2CO3 = np.zeros(T.shape)
        HCO3 = np.zeros(T.shape)
        CO3 = np.zeros(T.shape)
        pH = np.zeros(T.shape)
        pCO2 = np.zeros(T.shape)
        if op_swtch == 6 or op_swtch == 7:
            sat_CO3_C = np.zeros(T.shape)
        if op_swtch == 7:
            sat_stat_C = np.zeros(T.shape)
        if op_swtch == 8 or op_swtch == 9:
            sat_CO3_A = np.zeros(T.shape)
        if op_swtch == 9:
            sat_stat_A = np.zeros(T.shape)

        denom = np.power(aH, 2.0) + K1 * aH + K1 * K2
        H2CO3 = TCO2 * np.power(aH, 2.0) / denom
        HCO3 = TCO2 * K1 * aH / denom
        CO3 = TCO2 * K1 * K2 / denom

        pH = -np.log10(aH)
        pCO2 = H2CO3 / alpha_s

        if op_swtch == 6 or op_swtch == 7:
            sat_CO3_C = K_SP_C / ca_conc
            if op_swtch == 7:
                sat_stat_C = CO3 / sat_CO3_C

        if op_swtch == 8 or op_swtch == 9:
            sat_CO3_A = K_SP_A / ca_conc
            if op_swtch == 9:
                sat_stat_A = CO3 / sat_CO3_A

        output_cube = output_cube * 0.0 + np.nan
        if op_swtch == 0:
            op_fld = np.zeros(T.shape)
            op_fld = count
        elif op_swtch == 1:
            output_cube.data = pCO2 * 1.0e6
            output_cube.standard_name = "surface_partial_pressure_of_carbon_dioxide_in_sea_water"
            output_cube.long_name = "CO2 concentration"
            output_cube.units = "uatm"
        elif op_swtch == 2:
            output_cube.data = pH
            output_cube.standard_name = "sea_water_ph_reported_on_total_scale"
            output_cube.long_name = "pH"
            output_cube.units = "1"
        elif op_swtch == 3:
            output_cube.data = H2CO3
        elif op_swtch == 4:
            output_cube.data = HCO3
        elif op_swtch == 5:
            output_cube.data = CO3
        elif op_swtch == 6:
            output_cube.data = sat_CO3_C
        elif op_swtch == 7:
            output_cube.data = sat_stat_C
        elif op_swtch == 8:
            output_cube.data = sat_CO3_A
        elif op_swtch == 9:
            output_cube.data = sat_stat_A

        return output_cube
示例#31
0
文件: cr72.py 项目: pombredanne/relax
def r2eff_CR72(r20a=None, r20a_orig=None, r20b=None, r20b_orig=None, pA=None, dw=None, dw_orig=None, kex=None, cpmg_frqs=None, back_calc=None):
    """Calculate the R2eff values for the CR72 model.

    See the module docstring for details.


    @keyword r20a:          The R20 parameter value of state A (R2 with no exchange).
    @type r20a:             numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword r20a_orig:     The R20 parameter value of state A (R2 with no exchange). This is only for faster checking of zero value, which result in no exchange.
    @type r20a_orig:        numpy float array of rank-1
    @keyword r20b:          The R20 parameter value of state B (R2 with no exchange).
    @type r20b:             numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword r20b_orig:     The R20 parameter value of state B (R2 with no exchange). This is only for faster checking of zero value, which result in no exchange.
    @type r20b_orig:        numpy float array of rank-1
    @keyword pA:            The population of state A.
    @type pA:               float
    @keyword dw:            The chemical exchange difference between states A and B in rad/s.
    @type dw:               numpy array of rank [NE][NS][NM][NO][ND]
    @keyword dw_orig:       The chemical exchange difference between states A and B in ppm. This is only for faster checking of zero value, which result in no exchange.
    @type dw_orig:          numpy float array of rank-1
    @keyword kex:           The kex parameter value (the exchange rate in rad/s).
    @type kex:              float
    @keyword cpmg_frqs:     The CPMG nu1 frequencies.
    @type cpmg_frqs:        numpy float array of rank [NE][NS][NM][NO][ND]
    @keyword back_calc:     The array for holding the back calculated R2eff values.  Each element corresponds to one of the CPMG nu1 frequencies.
    @type back_calc:        numpy float array of rank [NE][NS][NM][NO][ND]
    """

    # Flag to tell if values should be replaced if max_etapos in cosh function is violated.
    t_dw_zero = False
    t_max_etapos = False

    # Catch parameter values that will result in no exchange, returning flat R2eff = R20 lines (when kex = 0.0, k_AB = 0.0).
    # Test if pA or kex is zero.
    if kex == 0.0 or pA == 1.0:
        back_calc[:] = r20a
        return

    # Test if dw is zero. Create a mask for the affected spins to replace these with R20 at the end of the calculationWait for replacement, since this is spin specific.
    if min(fabs(dw_orig)) == 0.0:
        t_dw_zero = True
        mask_dw_zero = masked_where(dw == 0.0, dw)

    # The B population.
    pB = 1.0 - pA

    # Repetitive calculations (to speed up calculations).
    dw2 = dw**2
    r20_kex = (r20a + r20b + kex) / 2.0
    k_BA = pA * kex
    k_AB = pB * kex

    # The Psi and zeta values.
    if sum(r20a_orig - r20b_orig) != 0.0:
        fact = r20a - r20b - k_BA + k_AB
        Psi = fact**2 - dw2 + 4.0*k_BA*k_AB
        zeta = 2.0*dw * fact
    else:
        Psi = kex**2 - dw2
        zeta = -2.0*dw * (k_BA - k_AB)

    # More repetitive calculations.
    sqrt_psi2_zeta2 = sqrt(Psi**2 + zeta**2)

    # The D+/- values.
    D_part = (0.5*Psi + dw2) / sqrt_psi2_zeta2
    Dpos = 0.5 + D_part
    Dneg = -0.5 + D_part

    # Partial eta+/- values.
    eta_fact = eta_scale / cpmg_frqs
    etapos = eta_fact * sqrt(Psi + sqrt_psi2_zeta2)
    etaneg = eta_fact * sqrt(-Psi + sqrt_psi2_zeta2)

    # Catch math domain error of cosh(val > 710).
    # This is when etapos > 710.
    if max(etapos) > 700:
        t_max_etapos = True
        mask_max_etapos = masked_greater_equal(etapos, 700.0)
        # To prevent math errors, set etapos to 1.
        etapos[mask_max_etapos.mask] = 1.0

    # The arccosh argument - catch invalid values.
    fact = Dpos * cosh(etapos) - Dneg * cos(etaneg)
    if min(fact) < 1.0:
        back_calc[:] = r20_kex
        return

    # Calculate R2eff. This uses the temporary buffer and fill directly to back_calc.
    multiply(cpmg_frqs, arccosh(fact), out=back_calc)
    subtract(r20_kex, back_calc, out=back_calc)

    # Replace data in array.
    # If dw is zero.
    if t_dw_zero:
        back_calc[mask_dw_zero.mask] = r20a[mask_dw_zero.mask]

    # If eta_pos above 700.
    if t_max_etapos:
        back_calc[mask_max_etapos.mask] = r20a[mask_max_etapos.mask]

    # Catch errors, taking a sum over array is the fastest way to check for
    # +/- inf (infinity) and nan (not a number).
    if not isfinite(sum(back_calc)):
        # Replaces nan, inf, etc. with fill value.
        fix_invalid(back_calc, copy=False, fill_value=1e100)
示例#32
0
def hystLow(img,
            img_gauss,
            sd=0,
            mean=0,
            diff=40,
            init_low=0.05,
            gen_high=0.8,
            mode='memb'):
    """ Lower treshold calculations for hysteresis membrane detection function hystMemb.

    diff - int, difference (in px number) between hysteresis mask and img without greater values
    delta_diff - int, tolerance level (in px number) for diff value
    gen_high, sd, mean - see hystMemb

    mode - 'cell': only sd treshold calc, 'memb': both tresholds calc

    """
    if mode == 'memb':
        masks = {
            '2sd':
            ma.masked_greater_equal(img,
                                    2 * sd),  # values greater then 2 noise sd 
            'mean': ma.masked_greater(img, mean)
        }  # values greater then mean cytoplasm intensity
    elif mode == 'cell':
        masks = {'2sd': ma.masked_greater_equal(img, 2 * sd)}

    logging.info('masks: {}'.format(masks.keys()))

    low_val = {}
    control_diff = False
    for mask_name in masks:
        mask_img = masks[mask_name]

        logging.info(
            'Mask {} lower treshold fitting in progress'.format(mask_name))

        mask_hyst = filters.apply_hysteresis_threshold(
            img_gauss,
            low=init_low * np.max(img_gauss),
            high=gen_high * np.max(img_gauss))
        diff_mask = np.sum(ma.masked_where(~mask_hyst, mask_img) > 0)

        if diff_mask < diff:
            raise ValueError('Initial lower threshold is too low!')
        logging.info('Initial masks difference {}'.format(diff_mask))

        low = init_low

        i = 0
        control_diff = 1
        while diff_mask >= diff:
            mask_hyst = filters.apply_hysteresis_threshold(
                img_gauss,
                low=low * np.max(img_gauss),
                high=gen_high * np.max(img_gauss))
            diff_mask = np.sum(ma.masked_where(~mask_hyst, mask_img) > 0)

            low += 0.01

            i += 1
            # is cytoplasm mean mask at initial lower threshold value closed? prevent infinit cycle
            if i == 75:
                logging.fatal(
                    'Lower treshold for {} mask {:.2f}, control difference {}px'
                    .format(mask_name, low, control_diff))
                raise RuntimeError(
                    'Membrane in mean mask doesn`t detected at initial lower threshold value!'
                )

        # is cytoplasm mask at setted up difference value closed?
        if mask_name == 'mean':
            control_diff = np.all((segmentation.flood(mask_hyst,
                                                      (0, 0)) + mask_hyst))
            if control_diff == True:
                logging.fatal(
                    'Lower treshold for {} mask {:.2f}, masks difference {}px'.
                    format(mask_name, low, diff_mask))
                raise ValueError(
                    'Membrane in {} mask doesn`t closed, mebrane unlocated at this diff value (too low)!'
                    .format(mask_name))

        low_val.update({mask_name: low})
    logging.info('Lower tresholds {}\n'.format(low_val))

    return low_val
示例#33
0
def Stochastic_Process(j):
    Profile, num_profiles = Initialise_model(j)
    peak_enlarg, mu_peak, s_peak, Year_behaviour, User_list = Initialise_inputs(
        j)
    '''
    Calculation of the peak time range, which is used to discriminate between off-peak and on-peak coincident switch-on probability
    Calculates first the overall Peak Window (taking into account all User classes). 
    The peak window is just a time window in which coincident switch-on of multiple appliances assumes a higher probability than off-peak
    Within the peak window, a random peak time is calculated and then enlarged into a peak_time_range following again a random procedure
    '''
    windows_curve = np.zeros(1440)  #creates an empty daily profile
    Tot_curve = np.zeros(1440)  #creates another empty daily profile
    for Us in User_list:
        App_count = 0
        for App in Us.App_list:
            #Calculate windows curve, i.e. the theoretical maximum curve that can be obtained, for each app, by switching-on always all the 'n' apps altogether in any time-step of the functioning windows
            single_wcurve = Us.App_list[App_count].daily_use * np.mean(
                Us.App_list[App_count].POWER
            ) * Us.App_list[
                App_count].number  #this computes the curve for the specific App
            windows_curve = np.vstack(
                [windows_curve, single_wcurve]
            )  #this stacks the specific App curve in an overall curve comprising all the Apps within a User class
            App_count += 1
        Us.windows_curve = windows_curve  #after having iterated for all the Apps within a User class, saves the overall User class theoretical maximum curve
        Us.windows_curve = np.transpose(np.sum(Us.windows_curve,
                                               axis=0)) * Us.num_users
        Tot_curve = Tot_curve + Us.windows_curve  #adds the User's theoretical max profile to the total theoretical max comprising all classes
    peak_window = np.transpose(np.argwhere(Tot_curve == np.amax(
        Tot_curve)))  #Find the peak window within the theoretical max profile
    peak_time = round(
        random.normalvariate(round(np.average(peak_window)),
                             1 / 3 * (peak_window[0, -1] - peak_window[0, 0]))
    )  #Within the peak_window, randomly calculate the peak_time using a gaussian distribution
    peak_time_range = np.arange(
        (peak_time - round(
            math.fabs(peak_time -
                      (random.gauss(peak_time, (peak_enlarg * peak_time)))))),
        (peak_time + round(
            math.fabs(peak_time -
                      random.gauss(peak_time, (peak_enlarg * peak_time)))))
    )  #the peak_time is randomly enlarged based on the calibration parameter peak_enlarg
    '''
    The core stochastic process starts here. For each profile requested by the software user, 
    each Appliance instance within each User instance is separately and stochastically generated
    '''
    for prof_i in range(
            num_profiles
    ):  #the whole code is repeated for each profile that needs to be generated
        Tot_Classes = np.zeros(
            1440
        )  #initialise an empty daily profile that will be filled with the sum of the hourly profiles of each User instance
        for Us in User_list:  #iterates for each User instance (i.e. for each user class)
            Us.load = np.zeros(1440)  #initialise empty load for User instance
            for i in range(
                    Us.num_users
            ):  #iterates for every single user within a User class. Each single user has its own separate randomisation
                if Us.user_preference == 0:
                    rand_daily_pref = 0
                    pass
                else:
                    rand_daily_pref = random.randint(1, Us.user_preference)
                for App in Us.App_list:  #iterates for all the App types in the given User class
                    #initialises variables for the cycle
                    tot_time = 0
                    App.daily_use = np.zeros(1440)
                    if random.uniform(
                            0, 1
                    ) > App.occasional_use:  #evaluates if occasional use happens or not
                        continue
                    else:
                        pass

                    if App.Pref_index == 0:
                        pass
                    else:
                        if rand_daily_pref == App.Pref_index:  #evaluates if daily preference coincides with the randomised daily preference number
                            pass
                        else:
                            continue
                    if App.wd_we == Year_behaviour[
                            prof_i] or App.wd_we == 2:  #checks if the app is allowed in the given yearly behaviour pattern
                        pass
                    else:
                        continue

                    #recalculate windows start and ending times randomly, based on the inputs
                    rand_window_1 = np.array([
                        int(
                            random.uniform(
                                (App.window_1[0] - App.random_var_1),
                                (App.window_1[0] + App.random_var_1))),
                        int(
                            random.uniform(
                                (App.window_1[1] - App.random_var_1),
                                (App.window_1[1] + App.random_var_1)))
                    ])
                    if rand_window_1[0] < 0:
                        rand_window_1[0] = 0
                    if rand_window_1[1] > 1440:
                        rand_window_1[1] = 1440

                    rand_window_2 = np.array([
                        int(
                            random.uniform(
                                (App.window_2[0] - App.random_var_2),
                                (App.window_2[0] + App.random_var_2))),
                        int(
                            random.uniform(
                                (App.window_2[1] - App.random_var_2),
                                (App.window_2[1] + App.random_var_2)))
                    ])
                    if rand_window_2[0] < 0:
                        rand_window_2[0] = 0
                    if rand_window_2[1] > 1440:
                        rand_window_2[1] = 1440

                    rand_window_3 = np.array([
                        int(
                            random.uniform(
                                (App.window_3[0] - App.random_var_3),
                                (App.window_3[0] + App.random_var_3))),
                        int(
                            random.uniform(
                                (App.window_3[1] - App.random_var_3),
                                (App.window_3[1] + App.random_var_3)))
                    ])
                    if rand_window_3[0] < 0:
                        rand_window_3[0] = 0
                    if rand_window_3[1] > 1440:
                        rand_window_3[1] = 1440

                    #redefines functioning windows based on the previous randomisation of the boundaries
                    if App.flat == 'yes':  #if the app is "flat" the code stops right after filling the newly created windows without applying any further stochasticity
                        App.daily_use[
                            rand_window_1[0]:rand_window_1[1]] = np.full(
                                np.diff(rand_window_1),
                                App.POWER[prof_i] * App.number)
                        App.daily_use[
                            rand_window_2[0]:rand_window_2[1]] = np.full(
                                np.diff(rand_window_2),
                                App.POWER[prof_i] * App.number)
                        App.daily_use[
                            rand_window_3[0]:rand_window_3[1]] = np.full(
                                np.diff(rand_window_3),
                                App.POWER[prof_i] * App.number)
                        Us.load = Us.load + App.daily_use
                        continue
                    else:  #otherwise, for "non-flat" apps it puts a mask on the newly defined windows and continues
                        App.daily_use[
                            rand_window_1[0]:rand_window_1[1]] = np.full(
                                np.diff(rand_window_1), 0.001)
                        App.daily_use[
                            rand_window_2[0]:rand_window_2[1]] = np.full(
                                np.diff(rand_window_2), 0.001)
                        App.daily_use[
                            rand_window_3[0]:rand_window_3[1]] = np.full(
                                np.diff(rand_window_3), 0.001)
                    App.daily_use_masked = np.zeros_like(
                        ma.masked_not_equal(App.daily_use, 0.001))

                    App.power = App.POWER[prof_i]

                    #random variability is applied to the total functioning time and to the duration of the duty cycles, if they have been specified
                    random_var_t = random.uniform((1 - App.r_t), (1 + App.r_t))
                    if App.activate == 1:
                        App.p_11 = App.P_11 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_12 = App.P_12 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        random_cycle1 = np.concatenate(
                            ((np.ones(
                                int(App.t_11 * (random.uniform(
                                    (1 + App.r_c1),
                                    (1 - App.r_c1))))) * App.p_11),
                             (np.ones(
                                 int(App.t_12 * (random.uniform(
                                     (1 + App.r_c1), (1 - App.r_c1))))) *
                              App.p_12)))  #randomise also the fixed cycle
                        random_cycle2 = random_cycle1
                        random_cycle3 = random_cycle1
                    elif App.activate == 2:
                        App.p_11 = App.P_11 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_12 = App.P_12 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_21 = App.P_21 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_22 = App.P_22 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        random_cycle1 = np.concatenate(
                            ((np.ones(
                                int(App.t_11 * (random.uniform(
                                    (1 + App.r_c1),
                                    (1 - App.r_c1))))) * App.p_11),
                             (np.ones(
                                 int(App.t_12 * (random.uniform(
                                     (1 + App.r_c1), (1 - App.r_c1))))) *
                              App.p_12)))  #randomise also the fixed cycle
                        random_cycle2 = np.concatenate(
                            ((np.ones(
                                int(App.t_21 * (random.uniform(
                                    (1 + App.r_c2),
                                    (1 - App.r_c2))))) * App.p_21),
                             (np.ones(
                                 int(App.t_22 * (random.uniform(
                                     (1 + App.r_c2), (1 - App.r_c2))))) *
                              App.p_22)))  #randomise also the fixed cycle
                        random_cycle3 = random_cycle1
                    elif App.activate == 3:
                        App.p_11 = App.P_11 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_12 = App.P_12 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_21 = App.P_12 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_22 = App.P_22 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_31 = App.P_31 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        App.p_32 = App.P_32 * (
                            random.uniform((1 - App.Thermal_P_var),
                                           (1 + App.Thermal_P_var))
                        )  #randomly variates the power of thermal apps, otherwise variability is 0
                        random_cycle1 = random.choice([
                            np.concatenate(((np.ones(
                                int(App.t_11 * (random.uniform(
                                    (1 + App.r_c1),
                                    (1 - App.r_c1))))) * App.p_11), (np.ones(
                                        int(App.t_12 * (random.uniform(
                                            (1 + App.r_c1),
                                            (1 - App.r_c1))))) * App.p_12))),
                            np.concatenate(((np.ones(
                                int(App.t_12 * (random.uniform(
                                    (1 + App.r_c1),
                                    (1 - App.r_c1))))) * App.p_12), (np.ones(
                                        int(App.t_11 * (random.uniform(
                                            (1 + App.r_c1),
                                            (1 - App.r_c1))))) * App.p_11)))
                        ])  #randomise also the fixed cycle
                        random_cycle2 = random.choice([
                            np.concatenate(((np.ones(
                                int(App.t_21 * (random.uniform(
                                    (1 + App.r_c2),
                                    (1 - App.r_c2))))) * App.p_21), (np.ones(
                                        int(App.t_22 * (random.uniform(
                                            (1 + App.r_c2),
                                            (1 - App.r_c2))))) * App.p_22))),
                            np.concatenate(((np.ones(
                                int(App.t_22 * (random.uniform(
                                    (1 + App.r_c2),
                                    (1 - App.r_c2))))) * App.p_22), (np.ones(
                                        int(App.t_21 * (random.uniform(
                                            (1 + App.r_c2),
                                            (1 - App.r_c2))))) * App.p_21)))
                        ])
                        random_cycle3 = random.choice([
                            np.concatenate(((np.ones(
                                int(App.t_31 * (random.uniform(
                                    (1 + App.r_c3),
                                    (1 - App.r_c3))))) * App.p_31), (np.ones(
                                        int(App.t_32 * (random.uniform(
                                            (1 + App.r_c3),
                                            (1 - App.r_c3))))) * App.p_32))),
                            np.concatenate(((np.ones(
                                int(App.t_32 * (random.uniform(
                                    (1 + App.r_c3),
                                    (1 - App.r_c3))))) * App.p_32), (np.ones(
                                        int(App.t_31 * (random.uniform(
                                            (1 + App.r_c3),
                                            (1 - App.r_c3))))) * App.p_31)))
                        ])  #this is to avoid that all cycles are sincronous
                    else:
                        pass
                    rand_time = round(
                        random.uniform(App.func_time,
                                       int(App.func_time * random_var_t)))
                    #control to check that the total randomised time of use does not exceed the total space available in the windows
                    if rand_time > 0.99 * (np.diff(rand_window_1) +
                                           np.diff(rand_window_2) +
                                           np.diff(rand_window_3)):
                        rand_time = int(
                            0.99 *
                            (np.diff(rand_window_1) + np.diff(rand_window_2) +
                             np.diff(rand_window_3)))
                    max_free_spot = rand_time  #free spots are used to detect if there's still space for switch_ons. Before calculating actual free spots, the max free spot is set equal to the entire randomised func_time

                    while tot_time <= rand_time:  #this is the key cycle, which runs for each App until the switch_ons and their duration equals the randomised total time of use of the App
                        #check how many windows to consider
                        if App.num_windows == 1:
                            switch_on = int(
                                random.choice([
                                    random.uniform(rand_window_1[0],
                                                   (rand_window_1[1]))
                                ]))
                        elif App.num_windows == 2:
                            switch_on = int(
                                random.choice([
                                    random.uniform(rand_window_1[0],
                                                   (rand_window_1[1])),
                                    random.uniform(rand_window_2[0],
                                                   (rand_window_2[1]))
                                ]))
                        else:
                            switch_on = int(
                                random.choice([
                                    random.uniform(rand_window_1[0],
                                                   (rand_window_1[1])),
                                    random.uniform(rand_window_2[0],
                                                   (rand_window_2[1])),
                                    random.uniform(rand_window_3[0],
                                                   (rand_window_3[1]))
                                ]))
                        #Identifies a random switch on time within the available functioning windows
                        if App.daily_use[
                                switch_on] == 0.001:  #control to check if the app is not already on at the randomly selected switch-on time
                            if switch_on in range(rand_window_1[0],
                                                  rand_window_1[1]):
                                if np.any(
                                        App.
                                        daily_use[switch_on:rand_window_1[1]]
                                        != 0.001
                                ):  #control to check if there are any other switch on times after the current one
                                    next_switch = [
                                        switch_on + k[0] for k in np.where(
                                            App.daily_use[switch_on:] != 0.001)
                                    ]  #identifies the position of next switch on time and sets it as a limit for the duration of the current switch on
                                    if (
                                            next_switch[0] - switch_on
                                    ) >= App.func_cycle and max_free_spot >= App.func_cycle:
                                        upper_limit = min(
                                            (next_switch[0] - switch_on),
                                            min(rand_time,
                                                rand_window_1[1] - switch_on))
                                    elif (
                                            next_switch[0] - switch_on
                                    ) < App.func_cycle and max_free_spot >= App.func_cycle:  #if next switch_on event does not allow for a minimum functioning cycle without overlapping, but there are other larger free spots, the cycle tries again from the beginning
                                        continue
                                    else:
                                        upper_limit = next_switch[
                                            0] - switch_on  #if there are no other options to reach the total time of use, empty spaces are filled without minimum cycle restrictions until reaching the limit
                                else:
                                    upper_limit = min(
                                        rand_time, rand_window_1[1] - switch_on
                                    )  #if there are no other switch-on events after the current one, the upper duration limit is set this way

                                if upper_limit >= App.func_cycle:  #if the upper limit is higher than minimum functioning time, an array of indexes is created to be later put in the profile
                                    indexes = np.arange(
                                        switch_on, switch_on + (int(
                                            random.uniform(
                                                App.func_cycle, upper_limit)))
                                    )  #a random duration is chosen between the upper limit and the minimum cycle
                                else:
                                    indexes = np.arange(
                                        switch_on, switch_on + upper_limit
                                    )  #this is the case in which empty spaces need to be filled without constraints to reach the total time goal

                            elif switch_on in range(
                                    rand_window_2[0], rand_window_2[1]
                            ):  #if random switch_on happens in windows2, same code as above is repeated for windows2
                                if np.any(App.
                                          daily_use[switch_on:rand_window_2[1]]
                                          != 0.001):
                                    next_switch = [
                                        switch_on + k[0] for k in np.where(
                                            App.daily_use[switch_on:] != 0.001)
                                    ]
                                    if (
                                            next_switch[0] - switch_on
                                    ) >= App.func_cycle and max_free_spot >= App.func_cycle:
                                        upper_limit = min(
                                            (next_switch[0] - switch_on),
                                            min(rand_time,
                                                rand_window_2[1] - switch_on))
                                    elif (
                                            next_switch[0] - switch_on
                                    ) < App.func_cycle and max_free_spot >= App.func_cycle:
                                        continue
                                    else:
                                        upper_limit = next_switch[0] - switch_on

                                else:
                                    upper_limit = min(
                                        rand_time,
                                        rand_window_2[1] - switch_on)

                                if upper_limit >= App.func_cycle:
                                    indexes = np.arange(
                                        switch_on, switch_on + (int(
                                            random.uniform(
                                                App.func_cycle, upper_limit))))
                                else:
                                    indexes = np.arange(
                                        switch_on, switch_on + upper_limit)

                            else:  #if switch_on is not in window1 nor in window2, it shall be in window3. Same code is repreated
                                if np.any(App.
                                          daily_use[switch_on:rand_window_3[1]]
                                          != 0.001):
                                    next_switch = [
                                        switch_on + k[0] for k in np.where(
                                            App.daily_use[switch_on:] != 0.001)
                                    ]
                                    if (
                                            next_switch[0] - switch_on
                                    ) >= App.func_cycle and max_free_spot >= App.func_cycle:
                                        upper_limit = min(
                                            (next_switch[0] - switch_on),
                                            min(rand_time,
                                                rand_window_3[1] - switch_on))
                                    elif (
                                            next_switch[0] - switch_on
                                    ) < App.func_cycle and max_free_spot >= App.func_cycle:
                                        continue
                                    else:
                                        upper_limit = next_switch[0] - switch_on

                                else:
                                    upper_limit = min(
                                        rand_time,
                                        rand_window_3[1] - switch_on)

                                if upper_limit >= App.func_cycle:
                                    indexes = np.arange(
                                        switch_on, switch_on + (int(
                                            random.uniform(
                                                App.func_cycle, upper_limit))))
                                else:
                                    indexes = np.arange(
                                        switch_on, switch_on + upper_limit)

                            tot_time = tot_time + indexes.size  #the count of total time is updated with the size of the indexes array

                            if tot_time > rand_time:  #control to check when the total functioning time is reached. It will be typically overcome, so a correction is applied to avoid this
                                indexes_adj = indexes[:-(
                                    tot_time - rand_time
                                )]  #correctes indexes size to avoid overcoming total time
                                if np.in1d(peak_time_range, indexes_adj).any(
                                ) and App.fixed == 'no':  #check if indexes are in peak window and if the coincident behaviour is locked by the "fixed" attribute
                                    coincidence = min(
                                        App.number,
                                        max(
                                            1,
                                            math.ceil(
                                                random.gauss(
                                                    math.ceil(App.number *
                                                              mu_peak),
                                                    (s_peak * App.number *
                                                     mu_peak))))
                                    )  #calculates coincident behaviour within the peak time range
                                elif np.in1d(peak_time_range, indexes_adj).any(
                                ) == False and App.fixed == 'no':  #check if indexes are off-peak and if coincident behaviour is locked or not
                                    Prob = random.uniform(
                                        0, (App.number - 1) / App.number
                                    )  #calculates probability of coincident switch_ons off-peak
                                    array = np.arange(0,
                                                      App.number) / App.number
                                    try:
                                        on_number = np.max(
                                            np.where(Prob >= array)) + 1
                                    except ValueError:
                                        on_number = 1
                                    coincidence = on_number  #randomly selects how many apps are on at the same time for each app type based on the above probabilistic algorithm
                                else:
                                    coincidence = App.number  #this is the case when App.fixed is activated. All 'n' apps of an App instance are switched_on altogether
                                if App.activate > 0:  #evaluates if the app has some duty cycles to be considered
                                    if indexes_adj.size > 0:
                                        evaluate = round(
                                            np.mean(indexes_adj)
                                        )  #calculates the mean time position of the current switch_on event, to later select the proper duty cycle
                                    else:
                                        evaluate = 0
                                    #based on the evaluate value, selects the proper duty cycle and puts the corresponding power values in the indexes range
                                    if evaluate in range(
                                            App.cw11[0],
                                            App.cw11[1]) or evaluate in range(
                                                App.cw12[0], App.cw12[1]):
                                        np.put(App.daily_use, indexes_adj,
                                               (random_cycle1 * coincidence))
                                        np.put(App.daily_use_masked,
                                               indexes_adj,
                                               (random_cycle1 * coincidence),
                                               mode='clip')
                                    elif evaluate in range(
                                            App.cw21[0],
                                            App.cw21[1]) or evaluate in range(
                                                App.cw22[0], App.cw22[1]):
                                        np.put(App.daily_use, indexes_adj,
                                               (random_cycle2 * coincidence))
                                        np.put(App.daily_use_masked,
                                               indexes_adj,
                                               (random_cycle2 * coincidence),
                                               mode='clip')
                                    else:
                                        np.put(App.daily_use, indexes_adj,
                                               (random_cycle3 * coincidence))
                                        np.put(App.daily_use_masked,
                                               indexes_adj,
                                               (random_cycle3 * coincidence),
                                               mode='clip')
                                else:  #if no duty cycles are specififed, a regular switch_on event is modelled
                                    np.put(
                                        App.daily_use, indexes_adj,
                                        (App.power * (random.uniform(
                                            (1 - App.Thermal_P_var),
                                            (1 + App.Thermal_P_var))) *
                                         coincidence)
                                    )  #randomises also the App Power if Thermal_P_var is on
                                    np.put(App.daily_use_masked,
                                           indexes_adj,
                                           (App.power * (random.uniform(
                                               (1 - App.Thermal_P_var),
                                               (1 + App.Thermal_P_var))) *
                                            coincidence),
                                           mode='clip')
                                App.daily_use_masked = np.zeros_like(
                                    ma.masked_greater_equal(
                                        App.daily_use_masked, 0.001)
                                )  #updates the mask excluding the current switch_on event to identify the free_spots for the next iteration
                                tot_time = (
                                    tot_time - indexes.size
                                ) + indexes_adj.size  #updates the total time correcting the previous value
                                break  #exit cycle and go to next App
                            else:  #if the tot_time has not yet exceeded the App total functioning time, the cycle does the same without applying corrections to indexes size
                                if np.in1d(
                                        peak_time_range,
                                        indexes).any() and App.fixed == 'no':
                                    coincidence = min(
                                        App.number,
                                        max(
                                            1,
                                            math.ceil(
                                                random.gauss(
                                                    math.ceil(App.number *
                                                              mu_peak),
                                                    (s_peak * App.number *
                                                     mu_peak)))))
                                elif np.in1d(peak_time_range, indexes).any(
                                ) == False and App.fixed == 'no':
                                    Prob = random.uniform(
                                        0, (App.number - 1) / App.number)
                                    array = np.arange(0,
                                                      App.number) / App.number
                                    try:
                                        on_number = np.max(
                                            np.where(Prob >= array)) + 1
                                    except ValueError:
                                        on_number = 1
                                    coincidence = on_number
                                else:
                                    coincidence = App.number
                                if App.activate > 0:
                                    if indexes.size > 0:
                                        evaluate = round(np.mean(indexes))
                                    else:
                                        evaluate = 0
                                    if evaluate in range(
                                            App.cw11[0],
                                            App.cw11[1]) or evaluate in range(
                                                App.cw12[0], App.cw12[1]):
                                        np.put(App.daily_use, indexes,
                                               (random_cycle1 * coincidence))
                                        np.put(App.daily_use_masked,
                                               indexes,
                                               (random_cycle1 * coincidence),
                                               mode='clip')
                                    elif evaluate in range(
                                            App.cw21[0],
                                            App.cw21[1]) or evaluate in range(
                                                App.cw22[0], App.cw22[1]):
                                        np.put(App.daily_use, indexes,
                                               (random_cycle2 * coincidence))
                                        np.put(App.daily_use_masked,
                                               indexes,
                                               (random_cycle2 * coincidence),
                                               mode='clip')
                                    else:
                                        np.put(App.daily_use, indexes,
                                               (random_cycle3 * coincidence))
                                        np.put(App.daily_use_masked,
                                               indexes,
                                               (random_cycle3 * coincidence),
                                               mode='clip')
                                else:
                                    np.put(App.daily_use, indexes,
                                           (App.power * (random.uniform(
                                               (1 - App.Thermal_P_var),
                                               (1 + App.Thermal_P_var))) *
                                            coincidence))
                                    np.put(App.daily_use_masked,
                                           indexes,
                                           (App.power * (random.uniform(
                                               (1 - App.Thermal_P_var),
                                               (1 + App.Thermal_P_var))) *
                                            coincidence),
                                           mode='clip')
                                App.daily_use_masked = np.zeros_like(
                                    ma.masked_greater_equal(
                                        App.daily_use_masked, 0.001))
                                tot_time = tot_time  #no correction applied to previously calculated value

                            free_spots = [
                            ]  #calculate how many free spots remain for further switch_ons
                            try:
                                for j in ma.notmasked_contiguous(
                                        App.daily_use_masked):
                                    free_spots.append(j.stop - j.start)
                            except TypeError:
                                free_spots = [0]
                            max_free_spot = max(free_spots)

                        else:
                            continue  #if the random switch_on falls somewhere where the App has been already turned on, tries again from beginning of the while cycle
                    Us.load = Us.load + App.daily_use  #adds the App profile to the User load
            Tot_Classes = Tot_Classes + Us.load  #adds the User load to the total load of all User classes
        Profile.append(
            Tot_Classes
        )  #appends the total load to the list that will contain all the generated profiles
        print('Profile', prof_i + 1, '/', num_profiles,
              'completed')  #screen update about progress of computation
    return (Profile)
示例#34
0
def r2eff_mmq_cr72(
    r20=None, pA=None, dw=None, dwH=None, kex=None, cpmg_frqs=None, inv_tcpmg=None, tcp=None, back_calc=None
):
    """The CR72 model extended to MMQ CPMG data.

    This function calculates and stores the R2eff values.


    @keyword r20:           The R2 value in the absence of exchange.
    @type r20:              numpy float array of rank [NS][NM][NO][ND]
    @keyword pA:            The population of state A.
    @type pA:               float
    @keyword dw:            The chemical exchange difference between states A and B in rad/s.
    @type dw:               numpy float array of rank [NS][NM][NO][ND]
    @keyword dwH:           The proton chemical exchange difference between states A and B in rad/s.
    @type dwH:              numpy float array of rank [NS][NM][NO][ND]
    @keyword kex:           The kex parameter value (the exchange rate in rad/s).
    @type kex:              float
    @keyword cpmg_frqs:     The CPMG nu1 frequencies.
    @type cpmg_frqs:        numpy float array of rank [NS][NM][NO][ND]
    @keyword inv_tcpmg:     The inverse of the total duration of the CPMG element (in inverse seconds).
    @type inv_tcpmg:        numpy float array of rank [NS][NM][NO][ND]
    @keyword tcp:           The tau_CPMG times (1 / 4.nu1).
    @type tcp:              numpy float array of rank [NS][NM][NO][ND]
    @keyword back_calc:     The array for holding the back calculated R2eff values.  Each element corresponds to one of the CPMG nu1 frequencies.
    @type back_calc:        numpy float array of rank [NS][NM][NO][ND]
    """

    # Once off parameter conversions.
    pB = 1.0 - pA
    k_BA = pA * kex
    k_AB = pB * kex

    # Flag to tell if values should be replaced if max_etapos in cosh function is violated.
    t_dw_dw_H_zero = False
    t_max_etapos = False

    # Test if pA or kex is zero.
    if kex == 0.0 or pA == 1.0:
        back_calc[:] = r20
        return

    # Test if dw and dwH is zero. Create a mask for the affected spins to replace these with R20 at the end of the calculationWait for replacement, since this is spin specific.
    if min(fabs(dw)) == 0.0 and min(fabs(dwH)) == 0.0:
        t_dw_dw_H_zero = True
        mask_dw_zero = masked_where(dw == 0.0, dw)
        mask_dw_H_zero = masked_where(dwH == 0.0, dwH)

    # Repetitive calculations (to speed up calculations).
    dw2 = dw ** 2
    r20_kex = r20 + kex / 2.0
    pApBkex2 = k_AB * k_BA
    isqrt_pApBkex2 = 1.0j * sqrt(pApBkex2)
    sqrt_pBpA = sqrt(pB / pA)
    ikex = 1.0j * kex

    # The d+/- values.
    d = dwH + dw
    dpos = d + ikex
    dneg = d - ikex

    # The z+/- values.
    z = dwH - dw
    zpos = z + ikex
    zneg = z - ikex

    # The Psi and zeta values.
    fact = 1.0j * dwH + k_BA - k_AB
    Psi = fact ** 2 - dw2 + 4.0 * pApBkex2
    zeta = -2.0 * dw * fact

    # More repetitive calculations.
    sqrt_psi2_zeta2 = sqrt(Psi ** 2 + zeta ** 2)

    # The D+/- values.
    D_part = (0.5 * Psi + dw2) / sqrt_psi2_zeta2
    Dpos = 0.5 + D_part
    Dneg = -0.5 + D_part

    # The eta+/- values.
    eta_fact = eta_scale / cpmg_frqs
    etapos = eta_fact * sqrt(Psi + sqrt_psi2_zeta2)
    etaneg = eta_fact * sqrt(-Psi + sqrt_psi2_zeta2)

    # Catch math domain error of cosh(val > 710).
    # This is when etapos > 710.
    if max(etapos) > 700:
        t_max_etapos = True
        mask_max_etapos = masked_greater_equal(etapos, 700.0)
        # To prevent math errors, set etapos to 1.
        etapos[mask_max_etapos.mask] = 1.0

    # The mD value.
    mD = isqrt_pApBkex2 / (dpos * zpos) * (zpos + 2.0 * dw * sin(zpos * tcp) / sin((dpos + zpos) * tcp))

    # The mZ value.
    mZ = -isqrt_pApBkex2 / (dneg * zneg) * (dneg - 2.0 * dw * sin(dneg * tcp) / sin((dneg + zneg) * tcp))

    # The Q value.
    Q = 1 - mD ** 2 + mD * mZ - mZ ** 2 + 0.5 * (mD + mZ) * sqrt_pBpA
    Q = Q.real

    # The first eigenvalue.
    lambda1 = r20_kex - cpmg_frqs * arccosh(Dpos * cosh(etapos) - Dneg * cos(etaneg))

    # The full formula.
    back_calc[:] = lambda1.real - inv_tcpmg * log(Q)

    # Replace data in array.
    # If eta_pos above 700.
    if t_max_etapos:
        back_calc[mask_max_etapos.mask] = r20[mask_max_etapos.mask]

    # Replace data in array.
    # If dw and dwH is zero.
    if t_dw_dw_H_zero:
        back_calc[mask_dw_zero.mask] = r20[mask_dw_zero.mask]
        back_calc[mask_dw_H_zero.mask] = r20[mask_dw_H_zero.mask]

    # Catch errors, taking a sum over array is the fastest way to check for
    # +/- inf (infinity) and nan (not a number).
    if not isfinite(sum(back_calc)):
        # Replaces nan, inf, etc. with fill value.
        fix_invalid(back_calc, copy=False, fill_value=1e100)
def carbchem(op_swtch, mdi, T, S, TCO2, TALK, Pr=0.0, TB=0.0, Ni=100.0, Tl=1.0e-5):
    # This function calculates the inorganic carbon chemistry balance
    # according to the method of Peng et al 1987
    # The parameters are set in the first few lines

    # salinity needs to be converted into psu *1000+35
    # TCO2 and TALK must be in mol/kg /(1026.*1000.)
    # the ones below here are not needed

    # This procedure calculates the inorganic carbon chemistry balance
    # according to the method of Peng et al 1987
    # The parameters are set in the first few lines
    #
    #  ops= 0 ;  output is iteration count
    #       1 ;            pCO2
    #       2 ;            pH
    #       3 ;            [H2CO3]
    #       4 ;            [HCO3]
    #       5 ;            [CO3]
    #       6 ;            satn [co3] : calcite
    #       7 ;            saturation state: calcite
    #       8 ;            satn [CO3] : aragonite
    #       9 ;            saturation state: aragonite

    msk1 = ma.masked_greater_equal(T, mdi + 1.0, copy=True)
    msk2 = ma.masked_greater_equal(S, mdi + 1.0, copy=True)
    msk3 = ma.masked_greater_equal(TCO2, mdi + 1.0, copy=True)
    msk4 = ma.masked_greater_equal(TALK, mdi + 1.0, copy=True)

    msk = np.ma.mask_or(msk1.mask, msk2.mask, copy=True)

    # create land-sea mask used by sea_msk.mask
    salmin = 1.0
    S2 = np.copy(S)
    S2[np.abs(S) < salmin] = salmin

    tol = Tl
    mxiter = Ni

    op_fld = np.empty(T.shape)
    op_fld.fill(np.NAN)

    TB = np.ones(T.shape)
    TB[msk] = 4.106e-4 * S2[msk] / 35.0
    # this boron is from Peng

    # convert to Kelvin
    TK = np.copy(T[:])
    TK[msk] += +273.15

    alpha_s = np.ones(T.shape)
    alpha_s[msk] = np.exp(
        (-60.2409 + 9345.17 / TK[msk] + 23.3585 * np.log(TK[msk] / 100.0))
        + (0.023517 - 0.023656 * (TK[msk] / 100.0) + 0.0047036 * np.power((TK[msk] / 100.0), 2.0)) * S[msk]
    )

    K1 = np.ones(T.shape)
    K1[msk] = np.exp(
        (-2307.1266 / TK[msk] + 2.83655 - 1.5529413 * np.log(TK[msk]))
        - (4.0484 / TK[msk] + 0.20760841) * np.sqrt(S[msk])
        + 0.08468345 * S[msk]
        - 0.00654208 * np.power(S[msk], 1.5)
        + np.log(1.0 - 0.001005 * S[msk])
    )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape)
        pf = np.ones(T.shape)
        del_vol[msk] = -25.50 + 0.1271 * T[msk]
        del_com[msk] = 1.0e-3 * (-3.08 + 0.0877 * T[msk])
        pf[msk] = np.exp((0.5 * del_com[msk] * Pr[msk] - del_vol[msk]) * Pr[msk] / (83.131 * TK[msk]))
        K1[msk] = K1[msk] * pf[msk]

    K2 = np.ones(T.shape)
    K2[msk] = np.exp(
        (-3351.6106 / TK[msk] - 9.226508 - 0.2005743 * np.log(TK[msk]))
        - (23.9722 / TK[msk] + 0.106901773) * np.power(S[msk], 0.5)
        + 0.1130822 * S[msk]
        - 0.00846934 * np.power(S[msk], 1.5)
        + np.log(1.0 - 0.001005 * S[msk])
    )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape)
        pf = np.ones(T.shape)
        del_vol[msk] = -15.82 - 0.0219 * T[msk]
        del_com[msk] = 1.0e-3 * (1.13 - 0.1475 * T[msk])
        pf[msk] = np.exp((0.5 * del_com[msk] * Pr[msk] - del_vol[msk]) * Pr[msk] / (83.131 * TK[msk]))
        K2[msk] = K2[msk] * pf[msk]

    KB = np.ones(T.shape)
    KB[msk] = np.exp(
        (
            -8966.90
            - 2890.53 * np.power(S[msk], 0.5)
            - 77.942 * S[msk]
            + 1.728 * np.power(S[msk], 1.5)
            - 0.0996 * np.power(S[msk], 2.0)
        )
        / TK[msk]
        + (148.0248 + 137.1942 * np.power(S[msk], 0.5) + 1.62142 * S[msk])
        - (24.4344 + 25.085 * np.power(S[msk], 0.5) + 0.2474 * S[msk]) * np.log(TK[msk])
        + 0.053105 * (np.power(S[msk], 0.5)) * TK[msk]
    )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape)
        pf = np.ones(T.shape)
        del_vol[msk] = -29.48 + 0.1622 * T[msk] + 0.0026080 * np.power(T[msk], 2.0)
        del_com[msk] = -2.84e-3
        pf[msk] = np.exp((0.5 * del_com[msk] * Pr[msk] - del_vol[msk]) * Pr[msk] / (83.131 * TK[msk]))
        KB[msk] = KB[msk] * pf[msk]

    KW = np.ones(T.shape)
    KW[msk] = np.exp(
        (-13847.26 / TK[msk] + 148.96502 - 23.6521 * np.log(TK[msk]))
        + (118.67 / TK[msk] - 5.977 + 1.0495 * np.log(TK[msk])) * np.power(S[msk], 0.5)
        - 0.01615 * S[msk]
    )

    if keyword.iskeyword(Pr):
        del_vol = np.ones(T.shape)
        del_com = np.ones(T.shape)
        pf = np.ones(T.shape)
        del_vol[msk] = -25.60 + 0.2324 * T[msk] - 0.0036246 * np.power(T[msk], 2.0)
        del_com[msk] = 1.0e-3 * (-5.13 + 0.0794 * T[msk])
        pf[msk] = np.exp((0.5 * del_com[msk] * Pr[msk] - del_vol[msk]) * Pr[msk] / (83.131 * TK[msk]))
        KW[msk] = KW[msk] * pf[msk]

    if op_swtch >= 6 or op_swtch <= 9:
        ca_conc = np.ones(T.shape)
        ca_conc[msk] = 0.01028 * S2[msk] / 35.0

    if op_swtch == 6 or op_swtch == 7:
        K_SP_C = np.ones(T.shape)
        K_SP_C[msk] = np.power(
            10.0,
            (
                (-171.9065 - 0.077993 * TK[msk] + 2839.319 / TK[msk] + 71.595 * np.log10(TK[msk]))
                + (-0.77712 + 0.0028426 * TK[msk] + 178.34 / TK[msk]) * np.power(S[msk], 0.5)
                - 0.07711 * S[msk]
                + 0.0041249 * np.power(S[msk], 1.5)
            ),
        )
        if keyword.iskeyword(Pr):
            del_vol = np.ones(T.shape)
            del_com = np.ones(T.shape)
            pf = np.ones(T.shape)
            del_vol[msk] = -48.76 + 0.5304 * T[msk]
            del_com[msk] = 1.0e-3 * (-11.76 + 0.3692 * T[msk])
            pf[msk] = np.exp((0.5 * del_com[msk] * Pr[msk] - del_vol[msk]) * Pr[msk] / (83.131 * TK[msk]))
            K_SP_C[msk] = K_SP_C[msk] * pf[msk]

    if op_swtch == 8 or op_swtch == 9:
        K_SP_A = np.ones(T.shape)
        K_SP_A[msk] = np.power(
            10,
            (
                (-171.945 - 0.077993 * TK[msk] + 2903.293 / TK[msk] + 71.595 * np.log10(TK[msk]))
                + (-0.068393 + 0.0017276 * TK[msk] + 88.135 / TK[msk]) * np.power(S[msk], 0.5)
                - 0.10018 * S[msk]
                + 0.0059415 * np.power(S[msk], 1.5)
            ),
        )
        if keyword.iskeyword(Pr):
            del_vol = np.ones(T.shape)
            del_com = np.ones(T.shape)
            pf = np.ones(T.shape)
            del_vol[msk] = -46.0 + 0.5304 * T[msk]
            del_com[msk] = 1.0e-3 * (-11.76 + 0.3692 * T[msk])
            pf[msk] = np.exp((0.5 * del_com[msk] * Pr[msk] - del_vol[msk]) * Pr[msk] / (83.131 * TK[msk]))
            K_SP_A[msk] = K_SP_A[msk] * pf[msk]

    # Get first estimate for H+ concentration.
    aH = np.ones(T.shape)
    aH[msk] = 1.0e-8

    count = np.zeros(T.shape)
    tol_swtch = np.zeros(T.shape)

    AB = np.ones(T.shape)
    AC = np.ones(T.shape)
    AW = np.ones(T.shape)

    iter = 0
    test = 2.0

    while test > 0.5 or iter >= mxiter:
        # Compute alkalinity guesses for Boron, Silicon, Phosphorus and Water
        AB[msk] = TB[msk] * KB[msk] / (aH[msk] + KB[msk])

        #  ASi[msk] = TSi[msk]*KSi[msk]/( aH[msk] $
        #    + KSi[msk] )

        #  AP[msk] = TP[msk]*( 1.0/( 1.0 + KP2[msk]/aH[msk] $
        #    + KP2[msk]*KP3[msk]/(aH[msk]^2.0) ) + 2.0/( 1.0 $
        #    + aH[msk]/KP2[msk] + KP3[msk]/aH[msk] ) $
        #    + 3.0/( 1.0 + aH[msk]/KP3[msk] $
        #    + (aH[msk]^2.0)/(KP2[msk]*KP3[msk]) ) )

        AW[msk] = (KW[msk] / aH[msk]) - aH[msk]

        # using the guessed alkalinities and total alkalinity, calculate the
        # alkalinity due to carbon
        #  AC[msk] = TALK[msk] - ( AB[msk] + ASi[msk] $
        #    + AP[msk] + AW[msk] )
        AC[msk] = TALK[msk] - (AB[msk] + AW[msk])

        # and recalculate aH with the new As
        old_aH = np.copy(aH)
        aH[msk] = (0.5 * K1[msk] / AC[msk]) * (
            (TCO2[msk] - AC[msk])
            + np.sqrt(
                (TCO2[msk] - AC[msk]) * (TCO2[msk] - AC[msk])
                + 4.0 * (AC[msk] * K2[msk] / K1[msk]) * (2.0 * TCO2[msk] - AC[msk])
            )
        )

        tol_swtch[msk] = abs((aH[msk] - old_aH[msk]) / old_aH[msk]) > tol
        count[msk] = count[msk] + tol_swtch[msk]

        test = np.sum(tol_swtch)
        iter += 1

    # now we have aH we can calculate...
    denom = np.zeros(T.shape)
    H2CO3 = np.zeros(T.shape)
    HCO3 = np.zeros(T.shape)
    CO3 = np.zeros(T.shape)
    pH = np.zeros(T.shape)
    pCO2 = np.zeros(T.shape)
    if op_swtch == 6 or op_swtch == 7:
        sat_CO3_C = np.zeros(T.shape)
    if op_swtch == 7:
        sat_stat_C = np.zeros(T.shape)
    if op_swtch == 8 or op_swtch == 9:
        sat_CO3_A = np.zeros(T.shape)
    if op_swtch == 9:
        sat_stat_A = np.zeros(T.shape)

    denom[msk] = np.power(aH[msk], 2.0) + K1[msk] * aH[msk] + K1[msk] * K2[msk]
    H2CO3[msk] = TCO2[msk] * np.power(aH[msk], 2.0) / denom[msk]
    HCO3[msk] = TCO2[msk] * K1[msk] * aH[msk] / denom[msk]
    CO3[msk] = TCO2[msk] * K1[msk] * K2[msk] / denom[msk]

    pH[msk] = -np.log10(aH[msk])
    pCO2[msk] = H2CO3[msk] / alpha_s[msk]

    if op_swtch == 6 or op_swtch == 7:
        sat_CO3_C[msk] = K_SP_C[msk] / ca_conc[msk]
        if op_swtch == 7:
            sat_stat_C[msk] = CO3[msk] / sat_CO3_C[msk]

    if op_swtch == 8 or op_swtch == 9:
        sat_CO3_A[msk] = K_SP_A[msk] / ca_conc[msk]
        if op_swtch == 9:
            sat_stat_A[msk] = CO3[msk] / sat_CO3_A[msk]

    if op_swtch == 0:
        op_fld = np.zeros(T.shape)
        op_fld[msk] = count[msk]
    elif op_swtch == 1:
        op_fld[msk] = pCO2[msk] * 1.0e6
    elif op_swtch == 2:
        op_fld[msk] = pH[msk]
    elif op_swtch == 3:
        op_fld[msk] = H2CO3[msk]
    elif op_swtch == 4:
        op_fld[msk] = HCO3[msk]
    elif op_swtch == 5:
        op_fld[msk] = CO3[msk]
    elif op_swtch == 6:
        op_fld[msk] = sat_CO3_C[msk]
    elif op_swtch == 7:
        op_fld[msk] = sat_stat_C[msk]
    elif op_swtch == 8:
        op_fld[msk] = sat_CO3_A[msk]
    elif op_swtch == 9:
        op_fld[msk] = sat_stat_A[msk]

    return op_fld
示例#36
0
        mask = np.zeros(img.shape, dtype='uint8')
        cv2.circle(mask, (int(xres / 2), int(yres / 2)), radiusMask, white, -1)

        img = img[..., ::-1]

        masked = cv2.bitwise_and(img, mask)

        cv2.line(masked, (828, 457), (693, 96), black, 35)

        maskValue, threshold, normalizedRatioBR, flatNormalizedRatioBRNoZeros, st_dev = setHYTAThreshold(
            masked, xres, yres)

        sunpixels = ma.masked_less(normalizedRatioBR, threshold,
                                   copy=True).count()
        cldpixels = ma.masked_greater_equal(normalizedRatioBR,
                                            threshold,
                                            copy=True).count()

        cloudCover = cldpixels / (sunpixels + cldpixels)
        cloudCoverL.append(cloudCover)

        skycoverimage = np.zeros((xres, yres))
        # 2 is cloud, 1 is sun, 0 is mask
        skycoverimage = np.where(
            normalizedRatioBR != maskValue,
            np.where(normalizedRatioBR >= threshold, 2, 1), 0)

        print('azimuth:', azimuth, 'altitude:', altitude, 'date', camera.date,
              'cloud cover:', cloudCover)

        # PLOTTING #############################################################
示例#37
0
    def polarization_analysis(self,
                              jobind,
                              foil=(7, ),
                              pre_mask_key=False,
                              to_memory=False,
                              mon_norm=True):
        """
        fits pol
        arguments:
                    jobind[int]:                index of job in self.jobs
                    foil[tuple]:                index/indecies of foils used for analysis
                    pre_mask[str, bool]:        False, or pre_mask key in maskdict
                    to_memory[bool]:            should be dumped to self.local_memory
                    mon_norm[bool]:             should be normed to mon counts
        """
        if pre_mask_key:
            try:
                contracted_data = zeros(
                    concatenate(
                        ((len(foil), 16),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))
                fitted_data = zeros(
                    concatenate(
                        ((len(foil), ),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))

            except TypeError:
                contracted_data = zeros(
                    concatenate(
                        ((1, 16),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))
                fitted_data = zeros(
                    concatenate(
                        ((len(foil), ),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))

            except AttributeError:
                print 'contracted_data array could not be initialized. Return None!'
                return None

            for ind, foil_ind in enumerate(foil):
                for tc in xrange(16):
                    contracted_data[ind, tc] = self.apply_pre_mask(
                        pre_mask_key, jobind, tc, foil_ind)

        else:
            try:
                contracted_data = array(
                    [self.data_dict[self.jobs[jobind]][list(foil)]])
                fitted_data = zeros((len(foil), 128, 128))

            except KeyError:
                print 'No data contraction. Could not initialize usable data from data_dict. Return None!'
                return None

            except:
                print 'Sth went wrong with data_contraction in polatization_analysis! Return None!'
                return None

        # mask contracted data
        contracted_data = ma.masked_less_equal(contracted_data, 0.)
        #        return contracted_data
        # norm contracted data
        # proper error determination

        for i in xrange(len(fitted_data)):
            for j in xrange(len(fitted_data[i])):
                for k in xrange(len(fitted_data[i, j])):
                    out = self.single_sinus_fit(
                        contracted_data[i, :, j, k],
                        sqrt(contracted_data[i, :, j, k]))
                    fitted_data[i, j, k] = out.params[
                        'amp'].value / out.params['offset'].value
                    if fitted_data[i, j, k] < 0. or fitted_data[i, j, k] > 1.:
                        print i, j, k
                        out.params.pretty_print()

#        for i, tc_panel in enumerate(contracted_data):
#            for j in xrange(len(fitted_data[i])):
#                for k in xrange(len(fitted_data[i,j])):
#                    out = self.single_sinus_fit(contracted_data[i,:,j,k], np.sqrt(contracted_data[i,:,j,k]))
#                    fitted_data[i,j,k] = out.params['amp'].value / out.params['offset'].value

# add dump to self.local_memory

        return ma.masked_greater_equal(fitted_data, 1.)
示例#38
0
def r2eff_mmq_cr72(r20=None, pA=None, dw=None, dwH=None, kex=None, cpmg_frqs=None, inv_tcpmg=None, tcp=None, back_calc=None):
    """The CR72 model extended to MMQ CPMG data.

    This function calculates and stores the R2eff values.


    @keyword r20:           The R2 value in the absence of exchange.
    @type r20:              numpy float array of rank [NS][NM][NO][ND]
    @keyword pA:            The population of state A.
    @type pA:               float
    @keyword dw:            The chemical exchange difference between states A and B in rad/s.
    @type dw:               numpy float array of rank [NS][NM][NO][ND]
    @keyword dwH:           The proton chemical exchange difference between states A and B in rad/s.
    @type dwH:              numpy float array of rank [NS][NM][NO][ND]
    @keyword kex:           The kex parameter value (the exchange rate in rad/s).
    @type kex:              float
    @keyword cpmg_frqs:     The CPMG nu1 frequencies.
    @type cpmg_frqs:        numpy float array of rank [NS][NM][NO][ND]
    @keyword inv_tcpmg:     The inverse of the total duration of the CPMG element (in inverse seconds).
    @type inv_tcpmg:        numpy float array of rank [NS][NM][NO][ND]
    @keyword tcp:           The tau_CPMG times (1 / 4.nu1).
    @type tcp:              numpy float array of rank [NS][NM][NO][ND]
    @keyword back_calc:     The array for holding the back calculated R2eff values.  Each element corresponds to one of the CPMG nu1 frequencies.
    @type back_calc:        numpy float array of rank [NS][NM][NO][ND]
    """

    # Once off parameter conversions.
    pB = 1.0 - pA
    k_BA = pA * kex
    k_AB = pB * kex

    # Flag to tell if values should be replaced if max_etapos in cosh function is violated.
    t_dw_dw_H_zero = False
    t_max_etapos = False

    # Test if pA or kex is zero.
    if kex == 0.0 or pA == 1.0:
        back_calc[:] = r20
        return

    # Test if dw and dwH is zero. Create a mask for the affected spins to replace these with R20 at the end of the calculationWait for replacement, since this is spin specific.
    if min(fabs(dw)) == 0.0 and min(fabs(dwH)) == 0.0:
        t_dw_dw_H_zero = True
        mask_dw_zero = masked_where(dw == 0.0, dw)
        mask_dw_H_zero = masked_where(dwH == 0.0, dwH)

    # Repetitive calculations (to speed up calculations).
    dw2 = dw**2
    r20_kex = r20 + kex/2.0
    pApBkex2 = k_AB * k_BA
    isqrt_pApBkex2 = 1.j*sqrt(pApBkex2)
    sqrt_pBpA = sqrt(pB/pA)
    ikex = 1.j*kex

    # The d+/- values.
    d = dwH + dw
    dpos = d + ikex
    dneg = d - ikex

    # The z+/- values.
    z = dwH - dw
    zpos = z + ikex
    zneg = z - ikex

    # The Psi and zeta values.
    fact = 1.j*dwH + k_BA - k_AB
    Psi = fact**2 - dw2 + 4.0*pApBkex2
    zeta = -2.0*dw * fact

    # More repetitive calculations.
    sqrt_psi2_zeta2 = sqrt(Psi**2 + zeta**2)

    # The D+/- values.
    D_part = (0.5*Psi + dw2) / sqrt_psi2_zeta2
    Dpos = 0.5 + D_part
    Dneg = -0.5 + D_part

    # The eta+/- values.
    eta_fact = eta_scale / cpmg_frqs
    etapos = eta_fact * sqrt(Psi + sqrt_psi2_zeta2)
    etaneg = eta_fact * sqrt(-Psi + sqrt_psi2_zeta2)

    # Catch math domain error of cosh(val > 710).
    # This is when etapos > 710.
    if max(etapos) > 700:
        t_max_etapos = True
        mask_max_etapos = masked_greater_equal(etapos, 700.0)
        # To prevent math errors, set etapos to 1.
        etapos[mask_max_etapos.mask] = 1.0

    # The mD value.
    mD = isqrt_pApBkex2 / (dpos * zpos) * (zpos + 2.0*dw*sin(zpos*tcp)/sin((dpos + zpos)*tcp))

    # The mZ value.
    mZ = -isqrt_pApBkex2 / (dneg * zneg) * (dneg - 2.0*dw*sin(dneg*tcp)/sin((dneg + zneg)*tcp))

    # The Q value.
    Q = 1 - mD**2 + mD*mZ - mZ**2 + 0.5*(mD + mZ)*sqrt_pBpA
    Q = Q.real

    # The first eigenvalue.
    lambda1 = r20_kex - cpmg_frqs * arccosh(Dpos * cosh(etapos) - Dneg * cos(etaneg))

    # The full formula.
    back_calc[:] = lambda1.real - inv_tcpmg * log(Q)

    # Replace data in array.
    # If eta_pos above 700.
    if t_max_etapos:
        back_calc[mask_max_etapos.mask] = r20[mask_max_etapos.mask]

    # Replace data in array.
    # If dw and dwH is zero.
    if t_dw_dw_H_zero:
        back_calc[mask_dw_zero.mask] = r20[mask_dw_zero.mask]
        back_calc[mask_dw_H_zero.mask] = r20[mask_dw_H_zero.mask]

    # Catch errors, taking a sum over array is the fastest way to check for
    # +/- inf (infinity) and nan (not a number).
    if not isfinite(sum(back_calc)):
        # Replaces nan, inf, etc. with fill value.
        fix_invalid(back_calc, copy=False, fill_value=1e100)
示例#39
0
    def __init__(self,
                 runs,
                 livetime_per_run,
                 times,
                 grb_runs=None,
                 duration_min=0.,
                 width_n_sigmas=1.,
                 factor=1,
                 averaged_time='per_run'):
        """Construct a :class:`SeasonalVariation` object.

        :type   runs: sequence of ints
        :param  runs: the event run numbers

        :type   livetime_per_run: sequence of floats
        :param  livetime_per_run: the true livetime per run from IceCube Live database.

        :type   times: sequence of datetime.datetime objects
        :param  times: the event trigger times

        :type   width_n_sigmas: float
        :param  width_n_sigmas: the number of standard deviations from the mean
            rate to allow in the rate vs time fit

        :type   factor: float
        :param  factor: return factor * the otherwise best fit rate vs time

        :type   averaged_time: string
        :param  averaged_time: an option to choose between 'per_run' or 'per_month' to calculate
                avergaed event rates before the seasonal variation fit.
        """
        from scipy.interpolate import UnivariateSpline
        from icecube.grbllh.fitting import curve_fit
        import numpy.ma as ma
        try:
            from itertools import izip
        except ImportError:  #python3.x
            izip = zip

        time_sort_idx = times.argsort()
        times = times[time_sort_idx]
        runs = runs[time_sort_idx]
        livetime_per_run = livetime_per_run[time_sort_idx]
        start_indices = np.r_[0, np.nonzero(np.diff(runs))[0] + 1]
        # if start_indices[-1] == len (runs) - 2:
        # start_indices = np.r_[start_indices, len (runs) - 1]
        end_indices = np.r_[start_indices[1:] - 1, len(runs) - 1]
        run_unique = runs[start_indices]
        run_starts = times[start_indices]
        run_ends = times[end_indices]
        run_counts = (end_indices - start_indices) + 1
        run_durations = livetime_per_run[start_indices]
        run_rates = run_counts / run_durations
        run_rate_errs = np.sqrt(run_counts) / run_durations
        idx_neg_rates = 0 < run_rates
        #idx_long_run = run_durations > duration_min  # only use runs longer than an hour
        idx_long_run = run_durations > 0.  # SAM CHANGED THIS IN HIS SRC
        idx_nogrbs = np.ones(len(run_unique), dtype=bool)
        if grb_runs is not None:
            for r in grb_runs:
                idx_nogrbs -= (run_unique == r)
        idx_good_runs = idx_neg_rates * idx_long_run * idx_nogrbs

        #print ("len of idx_good_runs: ", len(idx_good_runs))
        #print ("any good runs? ", idx_good_runs.any())

        if averaged_time == 'per_run':
            print(
                "You are using per run rates in the seasonal variation fit... "
            )
            idx_nogrbs = np.ones(len(run_unique), dtype=bool)
            if grb_runs is not None:
                for r in grb_runs:
                    idx_nogrbs -= (run_unique == r)
            idx_neg_rates = 0 < run_rates
            #idx_long_run = run_durations > duration_min  # only use runs longer than an hour
            idx_long_run = run_durations > 0.  # SAM CHANGED THIS IN HIS SRC
            rate_mean = run_rates[idx_neg_rates * idx_nogrbs *
                                  idx_long_run].mean()
            rate_std = run_rates[idx_neg_rates * idx_nogrbs *
                                 idx_long_run].std()

            run_offsets = _timedelta_in_seconds(run_starts - run_starts[0])

            self._best_fit = curve_fit(
                self.rate_vs_offset,
                run_offsets[idx_neg_rates * idx_nogrbs * idx_long_run],
                run_rates[idx_neg_rates * idx_nogrbs * idx_long_run],
                p0=[.1 * rate_std, 0., rate_mean])

            self.t0 = run_starts[0]
            self.run_durations = run_durations
            self.run_rates = run_rates
            self.run_rate_errs = run_rate_errs
            self.run_starts = run_starts
            self.idx = idx_neg_rates * idx_nogrbs * idx_long_run
            self.factor = factor
            self.chisq2 = np.sum([
                (rate - self.__call__(start))**2 / rate_err**2
                for rate, rate_err, start in izip(self.run_rates[
                    self.idx], self.run_rate_errs[self.idx], self.run_starts[
                        self.idx])
            ])
            self.ndof = self.idx.sum() - 3

        if averaged_time == 'per_month':
            print(
                "You are using monthly averaged rates in the seasonal variation fit... "
            )
            #month = 365.25 * 86400 / 12
            #n_months = int(_timedelta_in_seconds(run_ends[-1] - run_starts[0]) / month) + 1
            month = _timedelta_in_seconds(run_ends[-1] - run_starts[0]) / 12
            n_months = int(
                _timedelta_in_seconds(run_ends[-1] - run_starts[0]) / month)
            month_durations = np.zeros(n_months)
            month_counts = np.zeros(n_months)
            month_mask = np.empty([n_months], dtype=np.ndarray)
            month_times = np.array([run_starts[0] for i in xrange(n_months)])
            month_offsets = np.zeros(n_months)
            for i in range(n_months):
                mask1 = ma.getmask(
                    ma.masked_greater_equal(
                        _timedelta_in_seconds(run_starts - run_starts[0]),
                        i * month))
                mask2 = ma.getmask(
                    ma.masked_less(
                        _timedelta_in_seconds(run_starts - run_starts[0]),
                        (i + 1) * month))
                month_mask[i] = mask1 * mask2
                #print ("month_mask: ", month_mask[i])
                #print ("len of month_mask: ", len(month_mask[i]))
                #print ("any good runs? ", (idx_good_runs * month_mask[i]).any())
                #month_durations[i] = np.sum([run_duration for run_duration in run_durations[idx_good_runs * month_mask[i]]])
                #month_counts[i] = np.sum([run_count for run_count in run_counts[idx_good_runs * month_mask[i]]])
                month_durations[i] = np.sum([
                    run_duration
                    for run_duration in run_durations[month_mask[i]]
                ])
                month_counts[i] = np.sum(
                    [run_count for run_count in run_counts[month_mask[i]]])
                #print "i*month/2: ", i*month/2
                #month_offsets[i] = run_starts[0] + datetime.timedelta(seconds = month/2) + datetime.timedelta(seconds = i * month)
                if i == 0:
                    month_times[0] = run_starts[0] + datetime.timedelta(
                        seconds=0.5 *
                        _timedelta_in_seconds(run_starts[month_mask[0]][-1] -
                                              run_starts[month_mask[0]][0]))
                    month_offsets[0] = 0.5 * _timedelta_in_seconds(
                        run_starts[month_mask[0]][-1] - run_starts[0])
                else:
                    month_offsets[i] = _timedelta_in_seconds(
                        run_ends[month_mask[i - 1]][-1] +
                        datetime.timedelta(seconds=0.5 * _timedelta_in_seconds(
                            run_starts[month_mask[i]][-1] -
                            run_starts[month_mask[i]][0])) - run_starts[0])
                    month_times[i] = run_ends[month_mask[
                        i - 1]][-1] + datetime.timedelta(
                            seconds=0.5 * _timedelta_in_seconds(
                                run_starts[month_mask[i]][-1] -
                                run_starts[month_mask[i]][0]))

            month_rates = month_counts / month_durations
            month_rate_errs = np.sqrt(month_counts) / month_durations

            mrate_mean = month_rates.mean()
            mrate_std = month_rates.std()
            mmin_rate = mrate_mean - width_n_sigmas * mrate_std
            mmax_rate = mrate_mean + width_n_sigmas * mrate_std

            self._best_fit = curve_fit(self.rate_vs_offset,
                                       month_offsets,
                                       month_rates,
                                       p0=[.1 * mrate_std, 0, mrate_mean])

            self.t0 = run_starts[0]
            self.month_durations = month_durations
            self.month_rates = month_rates
            self.month_rate_errs = month_rate_errs
            self.month_offsets = month_offsets
            self.month_times = month_times
            self.mmin_rate = mmin_rate
            self.mmax_rate = mmax_rate
            self.factor = factor
            self.chisq2 = np.sum([
                (rate - self.__call__(start))**2 / rate_err**2
                for rate, rate_err, start in izip(
                    self.month_rates, self.month_rate_errs, self.month_times)
            ])
            self.ndof = n_months - 3
示例#40
0
import sunpy.map
from sunpy.data.sample import AIA_171_IMAGE

###############################################################################
# We first create the Map using the sample data and import the coordinate
# functionality.

aia = sunpy.map.Map(AIA_171_IMAGE)

###############################################################################
# Now we create a new custom aia with our new mask and
# plot the result using our modified colormap

max_indices = np.unravel_index(aia.data.argmax(), aia.data.shape) * u.pixel
hpc_max = aia.pixel_to_data(max_indices[1], max_indices[0])
r = np.sqrt(hpc_max.Tx**2 + hpc_max.Ty**2) / aia.rsun_obs
mask = ma.masked_greater_equal(r, 1)
scaled_map = sunpy.map.Map(aia.data, aia.meta, mask=mask.mask)

###############################################################################
# Let's now plot the results. We'll overlay the autogenerated SunPy lon/lat
# grid as well for comparison.

fig = plt.figure()
ax = plt.subplot(projection=aia)

aia.plot()
ax.plot_coord(hpc_max, color='white', marker='x', markersize=10)

plt.show()
def PngToBinarryArray(filename):
        RGBAarray = readPNG(filename)
        alphaarray = RGBAarray[:,:]
        masked_alphaarray = ma.masked_greater_equal(alphaarray,50)
        bmask = masked_alphaarray.mask
        return bmask
示例#42
0
print(a.min(), a.max(), a.mean(), a.var())

# 2. 由数组生成掩码数组
a = np.arange(5)
print(ma.asarray(a))

a = np.array([1, np.nan, 2, np.inf, 3])  # 包含特殊值的数组
print(ma.asarray(a))

# 3. 对数组中的无效值做掩码处理
a = np.array([1, np.nan, 2, np.inf, 3])
print(ma.masked_invalid(a))

# 4. 对数组中的给定值做掩码处理
a = np.arange(3).repeat(2)
print(ma.masked_equal(a, 1))  # 对数组元素1做掩码

# 5. 对数组中符合条件的特定值做掩码处理
a = np.arange(8)
print(ma.masked_greater(a, 4))  # 掩码大于4的元素
print(ma.masked_greater_equal(a, 4))  # 掩码大于等于4的元素
print(ma.masked_less(a, 4))  # 掩码小于4的元素
print(ma.masked_less_equal(a, 4))  # 掩码小于等于4的元素
print(ma.masked_inside(a, 2, 5))  # 掩码 [2,5]之间的元素
print(ma.masked_outside(a, 2, 5))  # 掩码 [2,5]之外的元素

# 6. 用一个数组的条件筛选结果对另一个数组做掩码处理
a = np.arange(8)
b = np.random.random(8)
print(ma.masked_where(a > 5, b))  # 用a>5的条件掩码数组b
示例#43
0
def replace_invalid(x, value_invalid):

    x_masked = ma.masked_equal(x, value_invalid)
    while np.sum(x_masked.mask > 0):
        x_masked = ma.masked_equal(
            x, value_invalid
        )  # a mask_matrix with the mask indicating where = -999, if = True
        if_clean = np.sum(x_masked.mask, axis=0)
        if_clean = ma.masked_greater_equal(
            if_clean, 1
        ).mask  # to indicate that which column is clean. If = 1 then is not clean
        x_clean = (
            x_masked.T[~if_clean]
        ).T  # The column which don't contain the -999, extracted from the x
        x_dirty = (
            x_masked.T[if_clean]
        ).T  # The column which do contain the -999, extracted from the x
        index_clean = np.where(
            if_clean == False)  # the column index in x that don't contain -999
        index_dirty = np.where(
            if_clean == True)  # the column index in x that do contain -999
        correlation = np.zeros([
            x_dirty.shape[1], x_clean.shape[1]
        ])  # initialize the matrix storing the correlation coef
        for i in range(x_dirty.shape[1]):
            for j in range(
                    x_clean.shape[1]
            ):  # get rid the dirty number then compute the correlation between the dirty column and the clean column
                correlation[i, j] = np.corrcoef(
                    x_dirty[:, i][~x_dirty.mask[:, i]],
                    x_clean[:, j][~x_dirty.mask[:, i]])[1, 0]
        most_correlated = np.abs(
            correlation
        ) > 0.5  # get the mask where the correlation is bigger than the threshold
        print np.sum(most_correlated)
        if np.sum(most_correlated) == 0.:
            for i in range(x_dirty.shape[1]):
                x_masked[:, index_dirty[0][i]][x_dirty.mask[:, i]] = np.mean(
                    x_masked[:, index_dirty[0][i]][~x_dirty.mask[:, i]])
            break
        num_correlated_ = np.sum(
            most_correlated, axis=1
        )  # for each row, compute the total number of the big correlation
        index_target = np.argmax(
            num_correlated_
        )  # pick the row that has more correlation votes. One row here corresponds one column in x_dirty
        index_correlated = np.where(
            most_correlated[index_target] > 0.5
        )  # from the x_clean, pick the index of the column that votes the correlation for the dirty x.
        x_clean_correlated = x_clean.T[most_correlated[
            index_target]].T  # build the matrix of the most correlated clean x, extracted from x_clean
        tx = build_poly(
            x_clean_correlated[~x_dirty.mask[:, index_target]].data, 4
        )  # from the correlated x clean matrix, extract the row that correspond the invalid in the dirty x, and get ready for the regression
        tx_to_estimate = build_poly(
            x_clean_correlated[x_dirty.mask[:, index_target]].data, 4)
        y = x_dirty[:, index_target][
            ~x_dirty.
            mask[:,
                 index_target]].data  # in the target dirty x, extract the row invalid.
        [mse, w] = least_squares(
            y, tx
        )  # Regression between the valid dirty x and the valid clean x. Get the model w
        y_estimated = np.dot(tx_to_estimate, w)
        x_masked[:, index_dirty[0][index_target]][
            x_dirty.mask[:, index_target]] = y_estimated
        x = x_masked.data

    return x_masked.data
示例#44
0
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
示例#45
0
    def analysis(self,
                 jobind,
                 foil=(7, ),
                 pre_mask_key=False,
                 output='pol_bound',
                 to_memory=False,
                 mon_norm=True):
        """
        fits pol
        arguments:
                    jobind[int]:                index of job in self.jobs
                    foil[tuple]:                index/indecies of foils used for analysis
                    pre_mask[str, bool]:        False, or pre_mask key in maskdict
                    to_memory[bool]:            should be dumped to self.local_memory
                    mon_norm[bool]:             should be normed to mon counts
        """
        if pre_mask_key:
            try:
                contracted_data = zeros(
                    concatenate(
                        ((len(foil), 16),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))
                fitted_data = zeros(
                    concatenate(
                        ((len(foil), ),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))

            except TypeError:
                contracted_data = zeros(
                    concatenate(
                        ((1, 16),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))
                fitted_data = zeros(
                    concatenate(
                        ((len(foil), ),
                         self.maskdict['pre_masks'][pre_mask_key].shape())))

            except AttributeError:
                print 'contracted_data array could not be initialized. Return None!'
                return None

            for ind, foil_ind in enumerate(foil):
                for tc in xrange(16):
                    contracted_data[ind, tc] = self.apply_pre_mask(
                        pre_mask_key, jobind, tc, foil_ind)

        else:
            try:
                contracted_data = array(
                    [self.data_dict[self.jobs[jobind]][list(foil)]])
                fitted_data = zeros((len(foil), 128, 128))

            except KeyError:
                print 'No data contraction. Could not initialize usable data from data_dict. Return None!'
                return None

            except:
                print 'Sth went wrong with data_contraction in polatization_analysis! Return None!'
                return None

        # mask contracted data
        contracted_data = ma.masked_less_equal(contracted_data, 0.)

        for i in xrange(len(fitted_data)):
            for j in xrange(len(fitted_data[i])):
                for k in xrange(len(fitted_data[i, j])):
                    # Here occures fitting of the sine
                    out = self.single_sinus_fit(
                        contracted_data[i, :, j, k],
                        sqrt(contracted_data[i, :, j, k]))
                    if output == 'pol_bound':
                        fitted_data[i, j, k] = out.params['pol_bound'].value
                    elif output == 'amp':
                        fitted_data[i, j, k] = out.params['amp'].value
                    elif output == 'offset':
                        fitted_data[i, j, k] = out.params['offset'].value
                    elif output == 'phase':
                        fitted_data[i, j, k] = out.params['phase'].value

        if output == 'pol_bound':
            return ma.masked_greater_equal(fitted_data, 1.)
        elif output == 'phase':
            return fitted_data % (2 * pi) / pi
        else:
            return fitted_data
def carbchem_revelle(op_swtch,mdi,T_cube,S_cube,TCO2_cube,TALK_cube,Pr=0.0,TB=0.0,Ni=100.0,Tl=1.0e-5):
# This function calculates the inorganic carbon chemistry balance
# according to the method of Peng et al 1987
# The parameters are set in the first few lines

#salinity needs to be converted into psu
#TCO2 and TALK must be in mol/kg
#the ones below here are not needed

# This procedure calculates the inorganic carbon chemistry balance
# according to the method of Peng et al 1987
# The parameters are set in the first few lines
#
#  ops= 0 ;  output is iteration count
#       1 ;            pCO2
#       2 ;            pH
#       3 ;            [H2CO3]
#       4 ;            [HCO3]
#       5 ;            [CO3]
#       6 ;            satn [co3] : calcite
#       7 ;            saturation state: calcite
#       8 ;            satn [CO3] : aragonite
#       9 ;            saturation state: aragonite
#	10;            Ravelle factor (DIC) calculated from Egleston et al. 2010
#	11;            Alkalinity buffer factor calculated from Egleston et al. 2010

    #make sure grids are same size
    #make sure rthey years are the same
    #extarct the data from the cubes
    
# from iris import *
# from iris.analysis import *
# import iris.analysis
# from numpy import *
# from matplotlib.pyplot import *
# from scipy.stats.mstats import *
# import iris.plot as iplt
# import seawater
# import numpy
# import iris.quickplot as quickplot
# import iris.analysis.stats as istats
# temp = iris.load_cube('/home/ph290/tmp/hadgem2es_potential_temperature_historical_regridded.nc').extract(Constraint(depth = 0))
# sal = iris.load_cube('/home/ph290/tmp/hadgem2es_salinity_historical_regridded.nc').extract(Constraint(depth = 0))
# carb = iris.load_cube('/home/ph290/tmp/hadgem2es_dissolved_inorganic_carbon_historical_regridded.nc').extract(Constraint(depth = 0))
# alk = iris.load_cube('/home/ph290/tmp/hadgem2es_total_alkalinity_historical_regridded.nc').extract(Constraint(depth = 0))
# import carbchem
# co2 = carbchem.carbchem(1,temp.data.fill_value,temp,sal,carb,alk)
# T_cube = temp
# S_cube = sal
# TCO2_cube = carb
# TALK_cube = alk  
# mdi = temp.data.fill_value
	
    t_lat = np.size(T_cube.coord('latitude').points)    
    s_lat = np.size(S_cube.coord('latitude').points)
    c_lat = np.size(TCO2_cube.coord('latitude').points)
    a_lat = np.size(TALK_cube.coord('latitude').points)
    lat_test = t_lat == s_lat == c_lat == a_lat

    t_lon = np.size(T_cube.coord('longitude').points) 
    s_lon = np.size(S_cube.coord('longitude').points)
    c_lon = np.size(TCO2_cube.coord('longitude').points)
    a_lon = np.size(TALK_cube.coord('longitude').points)
    lon_test = t_lon == s_lon == c_lon == a_lon

    if lat_test and lon_test:

		output_cube = T_cube.copy()
		T_cube = T_cube-273.15
		T = T_cube.data.copy()
		S = S_cube.data.copy()
		TCO2_cube = TCO2_cube/1026.0
		TCO2 = np.roll(ma.swapaxes(TCO2_cube.data.copy(),0,1),180)
		#NOTE - this is only required here 'cos glodap and WOA are differently ordered - not necessary for other stuff
		TALK_cube = TALK_cube/1026.0
		TALK = np.roll(ma.swapaxes(TALK_cube.data.copy(),0,1),180)
		
		print np.mean(T)
		print np.mean(S)
		print np.mean(TCO2)
		print np.mean(TALK)
		
		msk1=ma.masked_greater_equal(T,mdi-1.0,copy=True)
		msk2=ma.masked_greater_equal(S,mdi-1.0,copy=True)
		msk3=ma.masked_greater_equal(TCO2,mdi-1.0,copy=True)
		msk4=ma.masked_greater_equal(TALK,mdi-1.0,copy=True)

		msk=msk1.mask | msk2.mask | msk3.mask | msk4.mask

		T[msk]=np.nan
		S[msk]=np.nan
		TALK[msk]=np.nan
		TCO2[msk]=np.nan
# 		plt.contourf(T)
# 		plt.show()
# 		plt.contourf(TCO2)
# 		plt.show()

		# T = np.array([13.74232016,25.0])
		# S = np.array([33.74096661,35.0])
		# TCO2 = np.array([0.0019863,2.0e-3])
		# TALK = np.array([0.00226763,2.2e-3])
		# msk = ma.masked_greater_equal(T,mdi-1.0,copy=True)

		#create land-sea mask used by sea_msk.mask
		salmin = 1.0
		S2=np.copy(S)
		S2[np.abs(S) < salmin]=salmin

		tol = Tl
		mxiter = Ni

		op_fld = np.empty(T.shape)
		op_fld.fill(np.NAN)

#    TB = np.ones(T.shape)
#    TB = 4.106e-4*S2/35.0
		TB = np.empty_like(T)
		TB = np.multiply(S2,4.106e-4/35.0, TB)
		# this boron is from Peng

		#convert to Kelvin
		TK=np.copy(T[:])
		TK += +273.15

		alpha_s = np.ones(T.shape)
		alpha_s = np.exp( ( -60.2409 + 9345.17/TK  + 23.3585*np.log(TK/100.0) )  + ( 0.023517 - 0.023656*(TK/100.0) + 0.0047036*np.power((TK/100.0),2.0) )*S )

		K1 = np.ones(T.shape)
		K1 = np.exp( ( -2307.1266/TK + 2.83655  - 1.5529413*np.log(TK) ) - ( 4.0484/TK + 0.20760841 )*np.sqrt(S) + 0.08468345*S - 0.00654208*np.power(S,1.5) + np.log( 1.0 - 0.001005*S ) )

		a = np.array([-25.50,-15.82,-29.48,-25.60,-48.76,-46.0])
		b = np.array([0.1271,0.0219,0.2324,0.5304,0.5304])
		c = np.array([0.0,0.0,0.0026080,0.0036246,0.0,0.0])
		d = np.array([-3.08,1.13,(-2.84e-3)/(1.0e-3),-5.13,-11.76,-11.76])
		e = np.array([0.0877,0.1475,0.0,0.0794,0.3692,0.3692])

		if keyword.iskeyword(Pr):
				instance = 0
				pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
				K1 = K1*pf

		K2 = np.ones(T.shape)
		K2 = np.exp( ( -3351.6106/TK - 9.226508 - 0.2005743*np.log(TK) ) - ( 23.9722/TK + 0.106901773 )*np.power(S,0.5) + 0.1130822*S - 0.00846934*np.power(S,1.5) + np.log( 1.0 - 0.001005*S ) )

		if keyword.iskeyword(Pr):
				instance = 1
				pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
				K2 = K2*pf

		KB = np.ones(T.shape)
		KB = np.exp( ( -8966.90 - 2890.53*np.power(S,0.5) - 77.942*S + 1.728*np.power(S,1.5)- 0.0996*np.power(S,2.0) )/TK + ( 148.0248 + 137.1942*np.power(S,0.5) + 1.62142*S ) - ( 24.4344 + 25.085*np.power(S,0.5) + 0.2474*S )*np.log(TK) + 0.053105*(np.power(S,0.5))*TK )

		if keyword.iskeyword(Pr):
				instance = 2
				pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
				KB = KB*pf

		KW = np.ones(T.shape)
		KW = np.exp( ( -13847.26/TK + 148.96502 - 23.6521*np.log(TK) ) + ( 118.67/TK - 5.977 + 1.0495*np.log(TK) )*np.power(S,0.5) - 0.01615*S )

		if keyword.iskeyword(Pr):
				instance = 3
				pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
				KW = KW*pf

		if ( op_swtch >= 6 or op_swtch <= 9 ):
				ca_conc = np.ones(T.shape)
				ca_conc = 0.01028*S2/35.0

		if ( op_swtch == 6 or op_swtch == 7 ):
				K_SP_C = np.ones(T.shape)
				K_SP_C = np.power(10.0,( ( -171.9065 - 0.077993*TK + 2839.319/TK + 71.595*np.log10(TK) ) + ( -0.77712 + 0.0028426*TK + 178.34/TK )*np.power(S,0.5) - 0.07711*S+ 0.0041249*np.power(S,1.5) ))
				if keyword.iskeyword(Pr):
						instance = 4
						pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
						K_SP_C = K_SP_C*pf


		if ( op_swtch == 8 or op_swtch == 9 ):
				K_SP_A = np.ones(T.shape)
				K_SP_A = np.power(10,( ( -171.945 - 0.077993*TK + 2903.293/TK + 71.595*np.log10(TK) ) + ( -0.068393 + 0.0017276*TK + 88.135/TK )*np.power(S,0.5) - 0.10018*S + 0.0059415*np.power(S,1.5) ))
				if keyword.iskeyword(Pr):
						instance = 5
						pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
						K_SP_A = K_SP_A*pf


		# Get first estimate for H+ concentration.

		AC, AW, AB, aH, count = carbiter(T, TCO2, TALK, TB, msk, tol, mxiter, K1, K2, KB, KW)

# 		plt.contourf(aH)
# 		plt.show()
# 		plt.contourf(AC)
# 		plt.show()
# 		plt.contourf(AW)
# 		plt.show()
# 		plt.contourf(aH)
# 		plt.show()

		# now we have aH we can calculate...
		denom = np.zeros(T.shape)
		H2CO3 = np.zeros(T.shape)
		HCO3 = np.zeros(T.shape)
		CO3 = np.zeros(T.shape)
		pH = np.zeros(T.shape)
		pCO2 = np.zeros(T.shape)
		if ( op_swtch == 6 or op_swtch == 7 ):
				sat_CO3_C = np.zeros(T.shape)
		if ( op_swtch == 7 ):
				sat_stat_C = np.zeros(T.shape)
		if ( op_swtch == 8 or op_swtch == 9 ):
				sat_CO3_A = np.zeros(T.shape)
		if ( op_swtch == 9 ):
				sat_stat_A = np.zeros(T.shape)

		denom = np.power(aH,2.0) + K1*aH + K1*K2
		H2CO3 = TCO2*np.power(aH,2.0)/denom
		HCO3 = TCO2*K1*aH/denom
		CO3 = TCO2*K1*K2/denom
# 		plt.contourf(K1)
# 		plt.show()
# 		plt.contourf(aH) -no
# 		plt.show()
# 		plt.contourf(denom) -no
# 		plt.show()

		pH = -np.log10(aH)
		pCO2 = H2CO3/alpha_s

		if ( op_swtch == 6 or op_swtch == 7 ):
				sat_CO3_C = K_SP_C/ca_conc
				if ( op_swtch == 7 ):
						sat_stat_C = CO3/sat_CO3_C

		if ( op_swtch == 8 or op_swtch == 9 ):
				sat_CO3_A = K_SP_A/ca_conc
				if ( op_swtch == 9 ):
						sat_stat_A = CO3/sat_CO3_A
						
		TALKc=+HCO3+2*(CO3)
		var1=HCO3
		DIC_buffer=HCO3
		ALK_buffer=HCO3
		
		var1=HCO3+4*(CO3)+((aH*AB)/(KB+aH))-AW
		
		DIC_buffer=TCO2-((TALKc*TALKc)/var1)
		
		ALK_buffer=((TALKc*TALKc)-TCO2*var1)/TALKc

		output_cube = output_cube*0.0+np.nan
		if ( op_swtch == 0 ):
				op_fld = np.zeros(T.shape)
				op_fld = count
		elif ( op_swtch == 1 ):
				print np.mean(pCO2)
				output_cube.data = pCO2*1.0e6
				output_cube.standard_name = 'surface_partial_pressure_of_carbon_dioxide_in_sea_water'
				output_cube.long_name = 'CO2 concentration'
				output_cube.units = 'uatm'
		elif ( op_swtch == 2 ):
				output_cube.data = pH
				output_cube.standard_name = 'sea_water_ph_reported_on_total_scale'
				output_cube.long_name = 'pH'
				output_cube.units = '1'
		elif ( op_swtch == 3 ):
				output_cube.data = H2CO3
		elif ( op_swtch == 4 ):
				output_cube.data = HCO3
		elif ( op_swtch == 5 ):
				output_cube.data = CO3
		elif ( op_swtch == 6 ):
				output_cube.data = sat_CO3_C
		elif ( op_swtch == 7 ):
				output_cube.data = sat_stat_C
		elif ( op_swtch == 8 ):
				output_cube.data = sat_CO3_A
		elif ( op_swtch == 9 ):
				output_cube.data = sat_stat_A
		elif ( op_swtch == 10 ):
				output_cube.data = TCO2/DIC_buffer
		elif ( op_swtch == 11 ):
				output_cube.data = ALK_buffer*1000.0

		return output_cube
def carbchem(op_swtch,mdi,T,S,TCO2,TALK,Pr=0.0,TB=0.0,Ni=100.0,Tl=1.0e-5):
# This function calculates the inorganic carbon chemistry balance
# according to the method of Peng et al 1987
# The parameters are set in the first few lines

#salinity needs to be converted into psu *1000+35
#TCO2 and TALK must be in mol/kg /(1026.*1000.)
#the ones below here are not needed

# This procedure calculates the inorganic carbon chemistry balance
# according to the method of Peng et al 1987
# The parameters are set in the first few lines
#
#  ops= 0 ;  output is iteration count
#       1 ;            pCO2
#       2 ;            pH
#       3 ;            [H2CO3]
#       4 ;            [HCO3]
#       5 ;            [CO3]
#       6 ;            satn [co3] : calcite
#       7 ;            saturation state: calcite
#       8 ;            satn [CO3] : aragonite
#       9 ;            saturation state: aragonite

    msk1=ma.masked_greater_equal(T,mdi+1.0,copy=True)
    msk2=ma.masked_greater_equal(S,mdi+1.0,copy=True)
    msk3=ma.masked_greater_equal(TCO2,mdi+1.0,copy=True)
    msk4=ma.masked_greater_equal(TALK,mdi+1.0,copy=True)

    msk=msk1.mask | msk2.mask | msk3.mask | msk4.mask

    T[np.invert(msk)]=np.nan
    S[np.invert(msk)]=np.nan
    TALK[np.invert(msk)]=np.nan
    TCO2[np.invert(msk)]=np.nan
    
    #create land-sea mask used by sea_msk.mask
    salmin = 1.0
    S2=np.copy(S)
    S2[np.abs(S) < salmin]=salmin

    tol = Tl
    mxiter = Ni

    op_fld = np.empty(T.shape)
    op_fld.fill(np.NAN)

#    TB = np.ones(T.shape)
#    TB = 4.106e-4*S2/35.0
    TB = np.empty_like(T)
    TB = np.multiply(S2,4.106e-4/35.0, TB)
    # this boron is from Peng

    #convert to Kelvin
    TK=np.copy(T[:])
    TK += +273.15

    alpha_s = np.ones(T.shape)
    alpha_s = np.exp( ( -60.2409 + 9345.17/TK  + 23.3585*np.log(TK/100.0) )  + ( 0.023517 - 0.023656*(TK/100.0) + 0.0047036*np.power((TK/100.0),2.0) )*S )
  
    K1 = np.ones(T.shape)
    K1 = np.exp( ( -2307.1266/TK + 2.83655  - 1.5529413*np.log(TK) ) - ( 4.0484/TK + 0.20760841 )*np.sqrt(S) + 0.08468345*S - 0.00654208*np.power(S,1.5) + np.log( 1.0 - 0.001005*S ) )

    a = np.array([-25.50,-15.82,-29.48,-25.60,-48.76,-46.0])
    b = np.array([0.1271,0.0219,0.2324,0.5304,0.5304])
    c = np.array([0.0,0.0,0.0026080,0.0036246,0.0,0.0])
    d = np.array([-3.08,1.13,(-2.84e-3)/(1.0e-3),-5.13,-11.76,-11.76])
    e = np.array([0.0877,0.1475,0.0,0.0794,0.3692,0.3692])

    if keyword.iskeyword(Pr):
        instance = 0
        pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
        K1 = K1*pf
        
    K2 = np.ones(T.shape)
    K2 = np.exp( ( -3351.6106/TK - 9.226508 - 0.2005743*np.log(TK) ) - ( 23.9722/TK + 0.106901773 )*np.power(S,0.5) + 0.1130822*S - 0.00846934*np.power(S,1.5) + np.log( 1.0 - 0.001005*S ) )

    if keyword.iskeyword(Pr):
        instance = 1
        pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
        K2 = K2*pf

    KB = np.ones(T.shape)
    KB = np.exp( ( -8966.90 - 2890.53*np.power(S,0.5) - 77.942*S + 1.728*np.power(S,1.5)- 0.0996*np.power(S,2.0) )/TK + ( 148.0248 + 137.1942*np.power(S,0.5) + 1.62142*S ) - ( 24.4344 + 25.085*np.power(S,0.5) + 0.2474*S )*np.log(TK) + 0.053105*(np.power(S,0.5))*TK )

    if keyword.iskeyword(Pr):
        instance = 2
        pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
        KB = KB*pf

    KW = np.ones(T.shape)
    KW = np.exp( ( -13847.26/TK + 148.96502 - 23.6521*np.log(TK) ) + ( 118.67/TK - 5.977 + 1.0495*np.log(TK) )*np.power(S,0.5) - 0.01615*S )

    if keyword.iskeyword(Pr):
        instance = 3
        pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
        KW = KW*pf

    if ( op_swtch >= 6 or op_swtch <= 9 ):
        ca_conc = np.ones(T.shape)
        ca_conc = 0.01028*S2/35.0

    if ( op_swtch == 6 or op_swtch == 7 ):
        K_SP_C = np.ones(T.shape)
        K_SP_C = np.power(10.0,( ( -171.9065 - 0.077993*TK + 2839.319/TK + 71.595*np.log10(TK) ) + ( -0.77712 + 0.0028426*TK + 178.34/TK )*np.power(S,0.5) - 0.07711*S+ 0.0041249*np.power(S,1.5) ))
        if keyword.iskeyword(Pr):
            instance = 4
            pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
            K_SP_C = K_SP_C*pf


    if ( op_swtch == 8 or op_swtch == 9 ):
        K_SP_A = np.ones(T.shape)
        K_SP_A = np.power(10,( ( -171.945 - 0.077993*TK + 2903.293/TK + 71.595*np.log10(TK) ) + ( -0.068393 + 0.0017276*TK + 88.135/TK )*np.power(S,0.5) - 0.10018*S + 0.0059415*np.power(S,1.5) ))
        if keyword.iskeyword(Pr):
            instance = 5
            pf = pressure_fun(a[instance],b[instance],c[instance],d[instance],e[instance],T)
            K_SP_A = K_SP_A*pf


    # Get first estimate for H+ concentration.

    aH, count = carbiter(T, TCO2, TALK, TB, msk, tol, mxiter, K1, K2, KB, KW)
    
    # now we have aH we can calculate...
    denom = np.zeros(T.shape)
    H2CO3 = np.zeros(T.shape)
    HCO3 = np.zeros(T.shape)
    CO3 = np.zeros(T.shape)
    pH = np.zeros(T.shape)
    pCO2 = np.zeros(T.shape)
    if ( op_swtch == 6 or op_swtch == 7 ):
        sat_CO3_C = np.zeros(T.shape)
    if ( op_swtch == 7 ):
        sat_stat_C = np.zeros(T.shape)
    if ( op_swtch == 8 or op_swtch == 9 ):
        sat_CO3_A = np.zeros(T.shape)
    if ( op_swtch == 9 ):
        sat_stat_A = np.zeros(T.shape)

    denom = np.power(aH,2.0) + K1*aH + K1*K2
    H2CO3 = TCO2*np.power(aH,2.0)/denom
    HCO3 = TCO2*K1*aH/denom
    CO3 = TCO2*K1*K2/denom

    pH = -np.log10(aH)
    pCO2 = H2CO3/alpha_s

    if ( op_swtch == 6 or op_swtch == 7 ):
        sat_CO3_C = K_SP_C/ca_conc
        if ( op_swtch == 7 ):
            sat_stat_C = CO3/sat_CO3_C

    if ( op_swtch == 8 or op_swtch == 9 ):
        sat_CO3_A = K_SP_A/ca_conc
        if ( op_swtch == 9 ):
            sat_stat_A = CO3/sat_CO3_A

    if ( op_swtch == 0 ):
        op_fld = np.zeros(T.shape)
        op_fld = count
    elif ( op_swtch == 1 ):
        op_fld = pCO2*1.0e6
    elif ( op_swtch == 2 ):
        op_fld = pH
    elif ( op_swtch == 3 ):
        op_fld = H2CO3
    elif ( op_swtch == 4 ):
        op_fld = HCO3
    elif ( op_swtch == 5 ):
        op_fld = CO3
    elif ( op_swtch == 6 ):
        op_fld = sat_CO3_C
    elif ( op_swtch == 7 ):
        op_fld = sat_stat_C
    elif ( op_swtch == 8 ):
        op_fld = sat_CO3_A
    elif ( op_swtch == 9 ):
        op_fld = sat_stat_A


    return op_fld
示例#48
0
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
import numpy as np
import numpy.ma as ma

a = np.arange(4)
a
ma.masked_greater_equal(a, 2)