def _get_surface_(self, lattice, reconstruction):
     string_split = reconstruction.split('-')
     theta = float(string_split[2][1:])
     periodicities = string_split[1].split('X')
     p1, p2 = tuple([float(periodicities[0]), float(periodicities[1])])
     if string_split[0] == 'p':
         self.Woodsmatrix = ma.masked_inside(
             np.array([[np.cos(theta), -np.sin(theta)],
                       [np.sin(theta), np.cos(theta)]]) *
             np.array([[p1, 0.], [0., p2]]) * np.eye(2), -10**(-15),
             10**(-15)).filled(0.)
     else:
         self.Woodsmatrix = ma.masked_inside(
             np.array([[np.cos(theta), -np.sin(theta)],
                       [np.sin(theta), np.cos(theta)]]) *
             np.array([[p1, 0.], [0., p2]]) *
             np.array([[1., 0.], [0.5, 0.5]]), -10**(-15),
             10**(-15)).filled(0.)
     self.surfAmat = ma.masked_inside(
         self.Woodsmatrix * np.dot(
             np.array([[1., 0., 0.], [0., 1., 0.]]),
             np.dot(self.Amat, np.array([[1., 0.], [0., 1.], [0., 0.]]))),
         -10**(-15), 10**(-15)).filled(0.)
     self.surfBmat = ma.masked_inside(
         2 * np.pi * np.transpose(linalg.inv(self.surfAmat)), -10**(-15),
         10**(-15)).filled(0.)
Beispiel #2
0
    def quality_fis(self,fis):
        """
        Count the correct classifications of a given FIS on the check data.

        :param fis: The Fuzzy Inference System to be tested
        :type fis: :class:`rule.ClassifierSet`
        :returns: A tuple containing the number of correct classifications and the total number of classifications
        :rtype: (:class:`int`,:class:`int`)
        """
        if fis.dimension() == self.training_data.shape[1]:
            last_res = 0.0
            count = 0
            for i in range(self.check_data.shape[0]):
                last_res = fis.evaluate(np.hstack((self.check_data[i],last_res)))
                if abs(last_res - self.id) < 0.5:
                    count = count + 1
            return (count,self.check_data.shape[0])
        else:
            rvec = fis.evaluates(self.check_data) - self.id
            rvec = ma.masked_inside(rvec,-0.5,0.5)
            return (ma.count_masked(rvec),self.check_data.shape[0])
        
        if fis.dimension() == self.training_data.shape[1]:
            dat = np.hstack((self.check_data,self.id*np.ones((self.check_data.shape[0],1))))
        else:
            dat = self.check_data
        #if self.check_data.shape[1] == self.training_data.shape[1]:
        #    dat = self.check_data
        #else:
        #    dat = np.hstack((self.check_data,np.zeros((self.check_data.shape[0],1))))
        rvec = fis.evaluates(dat) - self.id
        rvec = ma.masked_inside(rvec,-0.5,0.5)
        return (ma.count_masked(rvec),self.check_data.shape[0])
Beispiel #3
0
def Fill2ThetaAzimuthMap(masks, TA, tam, image):
    'Needs a doc string'
    Zlim = masks['Thresholds'][1]
    rings = masks['Rings']
    arcs = masks['Arcs']
    TA = np.dstack((ma.getdata(TA[1]), ma.getdata(TA[0]),
                    ma.getdata(TA[2])))  #azimuth, 2-theta, dist
    tax, tay, tad = np.dsplit(TA, 3)  #azimuth, 2-theta, dist**2/d0**2
    for tth, thick in rings:
        tam = ma.mask_or(
            tam.flatten(),
            ma.getmask(
                ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.),
                                 tth + thick / 2.)))
    for tth, azm, thick in arcs:
        tamt = ma.getmask(
            ma.masked_inside(tay.flatten(), max(0.01, tth - thick / 2.),
                             tth + thick / 2.))
        tama = ma.getmask(ma.masked_inside(tax.flatten(), azm[0], azm[1]))
        tam = ma.mask_or(tam.flatten(), tamt * tama)
    taz = ma.masked_outside(image.flatten(), int(Zlim[0]), Zlim[1])
    tabs = np.ones_like(taz)
    tam = ma.mask_or(tam.flatten(), ma.getmask(taz))
    tax = ma.compressed(ma.array(tax.flatten(), mask=tam))  #azimuth
    tay = ma.compressed(ma.array(tay.flatten(), mask=tam))  #2-theta
    taz = ma.compressed(ma.array(taz.flatten(), mask=tam))  #intensity
    tad = ma.compressed(ma.array(tad.flatten(), mask=tam))  #dist**2/d0**2
    tabs = ma.compressed(ma.array(
        tabs.flatten(), mask=tam))  #ones - later used for absorption corr.
    return tax, tay, taz, tad, tabs
Beispiel #4
0
def in_range_hsv(hsv):
    liver_light1 = (-14, 98, 35)
    liver_dark1 = (16, 245, 193)

    # liver_light1 = (-14, 0, 0)
    # liver_dark1 = (255, 255, 255)

    mask1 = ma.masked_inside(hsv, liver_light1, liver_dark1).mask
    mask1 = mask1 * 1
    mask1[:, 0] = mask1[:, 0] * mask1[:, 1] * mask1[:, 2]
    mask1_result = mask1[:, 0]

    #use ma.masked_equal

    liver_light1 = (164, 46, 97)
    liver_dark1 = (192, 150, 247)
    mask2 = ma.masked_inside(hsv, liver_light1, liver_dark1).mask
    mask2 = mask2 * 1
    mask2[:, 0] = mask2[:, 0] * mask2[:, 1] * mask2[:, 2]
    mask2_result = mask2[:, 0]

    #bit wise
    mask2_result[mask1_result == 1] = 1

    return mask2_result
Beispiel #5
0
    def _breakList(self, inList, index, parameter):
        par = float(parameter)

        array = N.empty(shape=[len(inList),],dtype=N.float64)
        i = 0
        for parameters in inList:
            array[i] = parameters[index]
            i = i + 1 

        greater = MA.masked_less(array, par)
        less = MA.masked_greater(array, par)

        upper = MA.minimum(greater)
        lower = MA.maximum(less)

        upperArray = MA.masked_inside(array, par, upper)
        lowerArray = MA.masked_inside(array, lower, par)

        upperList = []
        lowerList = []
        i = 0
        for parameters in inList:
            if upperArray.mask[i]:
                upperList.append(parameters)
            if lowerArray.mask[i]:
                lowerList.append(parameters)
            i = i + 1

        return upperList, lowerList
Beispiel #6
0
class TestHasCyclic:
    """
    Test def has_cyclic(x, axis=-1, cyclic=360, precision=1e-4):
    - variations of x with and without axis keyword
    - different unit of x - cyclic keyword
    - detection of cyclic points - precision keyword
    """

    # 1d lon (6), lat (5)
    lons = np.arange(0, 360, 60)
    lats = np.arange(-90, 90, 180 / 5)
    # 2d lon, lat
    lon2d, lat2d = np.meshgrid(lons, lats)
    # 3d lon
    lon3d = np.repeat(lon2d, 4).reshape((*lon2d.shape, 4))
    # cyclic lon 1d, 2d, 3d
    c_lons = np.concatenate((lons, np.array([360.])))
    c_lon2d = np.concatenate((lon2d, np.full((lon2d.shape[0], 1), 360.)),
                             axis=1)
    c_lon3d = np.concatenate(
        (lon3d, np.full((lon3d.shape[0], 1, lon3d.shape[2]), 360.)), axis=1)

    @pytest.mark.parametrize("lon, clon", [(lons, c_lons), (lon2d, c_lon2d)])
    def test_data(self, lon, clon):
        '''Test lon is not cyclic, clon is cyclic'''
        assert not has_cyclic(lon)
        assert has_cyclic(clon)

    @pytest.mark.parametrize("lon, clon, axis",
                             [(lons, c_lons, 0), (lon2d, c_lon2d, 1),
                              (ma.masked_inside(lon2d, 100, 200),
                               ma.masked_inside(c_lon2d, 100, 200), 1)])
    def test_data_axis(self, lon, clon, axis):
        '''Test lon is not cyclic, clon is cyclic, with axis keyword'''
        assert not has_cyclic(lon, axis=axis)
        assert has_cyclic(clon, axis=axis)

    def test_3d_axis(self):
        '''Test 3d with axis keyword, no keyword name for axis'''
        assert has_cyclic(self.c_lon3d, 1)
        assert not has_cyclic(self.lon3d, 1)

    def test_3d_axis_cyclic(self):
        '''Test 3d with axis and cyclic keywords'''
        new_clons = np.deg2rad(self.c_lon3d)
        new_lons = np.deg2rad(self.lon3d)
        assert has_cyclic(new_clons, axis=1, cyclic=np.deg2rad(360.))
        assert not has_cyclic(new_lons, axis=1, cyclic=np.deg2rad(360.))

    def test_1d_precision(self):
        '''Test 1d with precision keyword'''
        new_clons = np.concatenate((self.lons, np.array([360. + 1e-3])))
        assert has_cyclic(new_clons, precision=1e-2)
        assert not has_cyclic(new_clons, precision=2e-4)
Beispiel #7
0
    def _Veggspretting(self):
        a = ma.masked_outside(
            self.pos, 0, self.L)  # Finner de partiklene som er uttafor boksen
        self.hast += -2 * self.hast * a.mask  # Endrer hastigheten til de partiklene som er utenfor boksen med 180 grader

        b = ma.masked_inside(self.pos[:, 0], self.L / 4, 3 * self.L /
                             4)  # Finner alle partikler som treffer hullet
        c = ma.masked_inside(self.pos[:, 1], self.L / 4, 3 * self.L / 4)
        e = ma.masked_outside(self.pos[:, 2], 0, self.L)
        #print(Sum(b.mask*c.mask * e.mask))
        self.hullteller = Sum(e.mask * b.mask * c.mask)
        self.bevegelsesmengde = abs(
            Sum(self.m_H * self.hast[:, 0] * b.mask * c.mask * e.mask))
 def __init__(self, space_group, lattice_parameters, elements, coordinates,
              miller_indexes, reconstruction):
     space_group_number, a, b, c, alpha, beta, gamma = self._define_parameters_(
         space_group, lattice_parameters)
     lattice = mg.Lattice.from_parameters(a, b, c, alpha, beta, gamma)
     latticeAmat = ma.masked_inside(lattice.matrix, -10**(-15),
                                    10**(-15)).filled(0.)
     latticeBmat = ma.masked_inside(lattice.reciprocal_lattice.matrix,
                                    -10**(-15), 10**(-15)).filled(0.)
     elements_list = self._define_elements_(elements)
     coordinates_list = self._define_coordinates_(coordinates)
     crystal = mg.Structure.from_spacegroup(space_group, lattice,
                                            elements_list, coordinates_list)
     self._define_base_(crystal.sites)
Beispiel #9
0
    def create_gap_mask(self, wave):
        """
        Use the gap configuration and a wavelength vector, wave, to build a masked array that
        masks out the location of the gap.  Wavelengths between and including both gap endpoints
        will be masked out.

        Parameters
        ----------
        wave: numpy.ndarray
            Wavelength vector to construct mask from

        Returns
        -------
        mask: numpy.ma 1D masked array
            1D array masked at the locations within the configured detector gap and 1.0 elsewhere
        """
        disperser = self.instrument['disperser']
        aperture = self.instrument['aperture']
        filt = self.instrument['filter']
        gap = self.aperture_config[aperture]['gap'][disperser]
        if filt in gap:
            gap_start = gap[filt]['gap_start']
            gap_end = gap[filt]['gap_end']
        else:
            gap_start = gap['gap_start']
            gap_end = gap['gap_end']

        if gap_start is not None and gap_end is not None:
            masked_wave = ma.masked_inside(wave, gap_start, gap_end)
            mask = masked_wave / wave
            mask = ma.filled(mask, 0.0)
        else:
            mask = 1.0
        return mask
    def create_gap_mask(self, wave):
        """
        Use the gap configuration and a wavelength vector, wave, to build a masked array that
        masks out the location of the gap.  Wavelengths between and including both gap endpoints
        will be masked out.

        Parameters
        ----------
        wave: numpy.ndarray
            Wavelength vector to construct mask from

        Returns
        -------
        mask: numpy.ma 1D masked array
            1D array masked at the locations within the configured detector gap and 1.0 elsewhere
        """
        disperser = self.instrument['disperser']
        aperture = self.instrument['aperture']
        filt = self.instrument['filter']
        gap = self.aperture_config[aperture]['gap'][disperser]
        if filt in gap:
            gap_start = gap[filt]['gap_start']
            gap_end = gap[filt]['gap_end']
        else:
            gap_start = gap['gap_start']
            gap_end = gap['gap_end']

        if gap_start is not None and gap_end is not None:
            masked_wave = ma.masked_inside(wave, gap_start, gap_end)
            mask = masked_wave / wave
            mask = ma.filled(mask, 0.0)
        else:
            mask = 1.0
        return mask
Beispiel #11
0
def compute_temperature_low_altitude(h, tb):
    r"""
    Computes the temperature in the low-altitude region.

    Parameters
    ----------
    h : array
        Geopotential height values [m].

    tb : array-like
        Levels temperature values [K].

    Returns
    -------
    array:
        Temperature [K].
    """
    # we create a mask for each layer
    masks = [ma.masked_inside(h, H[i - 1], H[i]).mask for i in range(1, 8)]

    # for each layer, we evaluate the pressure based on whether the
    # temperature gradient is zero or not
    t = np.empty(len(h))
    for i, mask in enumerate(masks):
        if LK[i] == 0:
            t[mask] = tb[i]
        else:
            t[mask] = tb[i] + LK[i] * (h[mask] - H[i])
    return t
Beispiel #12
0
def imshow_rango(v1, imin, imax):

    #Se crea un maskarray que contenga los valores dentro del rango y otro que no, para pintarlos con rangos de colores distintos
    v1b = masked_inside(v1, imin, imax)
    v1a = masked_outside(v1, imin, imax)

    fig, ax = plt.subplots()
    fig.tight_layout
    pa = ax.imshow(v1a,
                   interpolation='nearest',
                   cmap=matplotlib.cm.jet,
                   vmin=min_range.value,
                   vmax=max_range.value)
    pb = ax.imshow(v1b,
                   interpolation='nearest',
                   cmap=matplotlib.cm.Pastel1,
                   vmax=3,
                   vmin=3)
    cbar = plt.colorbar(pa, shrink=0.25)

    try:
        cbar.set_label(dataset.variables[variables[0][propiedades[0]]].units)
    except:
        cbar.set_label("Unidades no especificadas")
    plt.title(variables[0][propiedades[0]])
    plt.ylabel("Latitude")
    plt.xlabel("Longitude")

    plt.show()

    return fig
Beispiel #13
0
def compute_pressure_low_altitude(h, pb, tb):
    r"""
    Computes the pressure in the low-altitude region.

    Parameters
    ----------
    h : array
        Geopotential height values [m].

    p_levels : array-like
        Levels pressure [Pa].

    Returns
    -------
    array:
        Pressure values [Pa].
    """
    # we create a mask for each layer
    masks = [ma.masked_inside(h, H[i - 1], H[i]).mask for i in range(1, 8)]

    # for each layer, we evaluate the pressure based on whether the
    # temperature gradient is zero or not
    p = np.empty(len(h))
    for i, mask in enumerate(masks):
        if LK[i] == 0:
            p[mask] = compute_pressure_low_altitude_zero_gradient(
                h[mask], H[i], pb[i], tb[i])
        else:
            p[mask] = compute_pressure_low_altitude_non_zero_gradient(
                h[mask], H[i], pb[i], tb[i], LK[i])
    return p
Beispiel #14
0
    def corte_latitud(lat, dim, dimz, imin, imax):
        corte= np.zeros((dimz, dim))
        step=1
        z0=0
        z1=dimz-1
        if tipo==0:
            z0=dimz-1
            z1=0
            step=-1

        if variables[1][propiedades[0]]==-1:
            for i in range (z0,z1,step):
                aux=dataset.variables["R1"][propiedades[1],1,i,:,lat]
                corte[i,:]=aux
        else:
            for i in range (z0,z1,step):
                aux=dataset.variables[variables[0][propiedades[0]]][propiedades[1],i,:,lat]
                corte[i,:]=aux

        v_m= np.nanmin(corte[:])
        try:
            corte[ corte==v_m ] = np.nan
        except:
            print("fallo")


        v1b = masked_inside(corte,imin,imax)
        v1a = masked_outside(corte,imin,imax)

        fig,ax = plt.subplots()
        fig.tight_layout
        pa = ax.imshow(v1a,interpolation='nearest',cmap = matplotlib.cm.jet, vmin= min_range.value, vmax= max_range.value)
        pb = ax.imshow(v1b,interpolation='nearest',cmap=matplotlib.cm.Pastel1, vmax= 3, vmin= 3)
        cbar = plt.colorbar(pa,shrink=0.25)
Beispiel #15
0
def isDis(bDis):
    """
    Is discontinuity?

    If the kepler team identifies a discontinuity.  We mask out
    several cadences before, and many cadences afterward.

    Parameters
    ----------
    bDis - Boolean from Kepler SAP_QAULITY

    Returns
    -------
    Boolean array corresponding to masked out region.

    """
    preCut = 4  # Number of cadences to cut out before discontinuity
    postCut = 50  # Number of cadences to cut after discont

    cad = np.arange(bDis.size)
    cad = ma.masked_array(cad)
    disId = cad[bDis]
    for id in disId:
        cad = ma.masked_inside(cad, id - preCut, id + postCut)

    return cad.mask
Beispiel #16
0
 def transform(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
     log = sign * self.linthresh * (1 + ma.log(np.abs(masked) / self.linthresh))
     if masked.mask.any():
         return ma.where(masked.mask, a, log)
     else:
         return log
Beispiel #17
0
 def transform(self, a):
     a = np.asarray(a)
     sign = np.sign(a)
     masked = ma.masked_inside(a, -1.0, 1.0, copy=False)
     result = np.where(
         (a >= -1.0) & (a <= 1.0), a * self.linthresh,
         sign * np.power(self.base, np.abs(a - sign * self._linadjust)))
     return result
Beispiel #18
0
 def transform(self, a):
     a = np.asarray(a)
     sign = np.sign(a)
     masked = ma.masked_inside(a, -1.0, 1.0, copy=False)
     result = np.where((a >= -1.0) & (a <= 1.0),
                       a * self.linthresh,
                       sign * np.power(self.base, np.abs(a - sign * self._linadjust)))
     return result
Beispiel #19
0
 def transform(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
     exp = sign * self.linthresh * ma.exp(sign * masked / self.linthresh - 1)
     if masked.mask.any():
         return ma.where(masked.mask, a, exp)
     else:
         return exp
Beispiel #20
0
 def transform_non_affine(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
     log = sign * self.linthresh * (self._linscale_adj + ma.log(np.abs(masked) / self.linthresh) / self._log_base)
     if masked.mask.any():
         return ma.where(masked.mask, a * self._linscale_adj, log)
     else:
         return log
Beispiel #21
0
 def transform_non_affine(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.invlinthresh, self.invlinthresh, copy=False)
     exp = sign * self.linthresh * (ma.power(self.base, (sign * (masked / self.linthresh)) - self._linscale_adj))
     if masked.mask.any():
         return ma.where(masked.mask, a / self._linscale_adj, exp)
     else:
         return exp
Beispiel #22
0
 def transform(self, a):
     sign = np.sign(np.asarray(a))
     masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
     log = sign * ma.log(np.abs(masked)) / self._log_base
     if masked.mask.any():
         return np.asarray(ma.where(masked.mask, a * self._linadjust, log))
     else:
         return np.asarray(log)
Beispiel #23
0
def polysigclip(x, y, porder, sigma, iters):
    mask = np.ones(x.shape, dtype=bool)
    for _ in range(iters):
        pfit = np.polyfit(x[mask], y[mask], porder)
        p = np.poly1d(pfit)
        std = np.std(y[mask] - p(x[mask]))
        mask = ma.getmask(ma.masked_inside(y - p(x), -sigma * std,
                                           sigma * std))
    return mask, p
 def _get_hkl_oriented_base_(self, rotation_matrix, base):
     rotated_base = copy.deepcopy(base)
     for key in base:
         vector = ma.masked_inside(
             np.dot(rotation_matrix, base[key]["Coordinates"]), -10**(-15),
             10**(-15)).filled(0.)
         rotated_base[key]["Coordinates"] = self._check_base_coordinates_(
             vector)
     return rotated_base
Beispiel #25
0
 def transform(self, a):
     a = np.asarray(a)
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
     if masked.mask.any():
         log = sign * (ma.log(np.abs(masked)) / self._log_base + self._linadjust)
         return np.asarray(ma.where(masked.mask, a * self._linscale, log))
     else:
         return sign * (np.log(np.abs(a)) / self._log_base + self._linadjust)
Beispiel #26
0
 def transform(self, a):
     sign = np.sign(np.asarray(a))
     masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
     log = sign * ma.log(np.abs(masked)) / self._log_base
     if masked.mask.any():
         return np.asarray(ma.where(masked.mask,
                                     a * self._linadjust,
                                     log))
     else:
         return np.asarray(log)
Beispiel #27
0
 def transform(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.linthresh, self.linthresh, copy=False)
     log = sign * self.linthresh * (
         self._linscale_adj +
         ma.log(np.abs(masked) / self.linthresh) / self._log_base)
     if masked.mask.any():
         return ma.where(masked.mask, a * self._linscale_adj, log)
     else:
         return log
Beispiel #28
0
 def transform(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a, -self.invlinthresh, self.invlinthresh, copy=False)
     exp = sign * self.linthresh * (
         ma.power(self.base, (sign * (masked / self.linthresh))
         - self._linscale_adj))
     if masked.mask.any():
         return ma.where(masked.mask, a / self._linscale_adj, exp)
     else:
         return exp
    def delete_trim_reg(self, event):
        """Delete the whole trim region nearest the click."""
        x = event.xdata
        pick = np.abs(self.spec_trim_points[self.order]-x).argmin()
        points = self.spec_trim_points[self.order]
        row = (pick-(pick%2))/2
        print('Deleting trim range '+str(
            self.spec_trim_points[self.order][row,0])+' to '+ str(
            self.spec_trim_points[self.order][row,1]))
        if np.shape(self.spec_trim_points[self.order])[0] >= 1:
            self.spec_trim_points[self.order] = \
                    np.delete(self.spec_trim_points[self.order],row,axis=0)
            print(self.spec_trim_points[self.order])

            self.rawspec[self.order] = ma.masked_array(
                                      self.rawspec[self.order].data, mask=False)
            self.norm[self.order] = ma.masked_array(
                                         self.norm[self.order].data, mask=False)
            self.fit[self.order] = ma.masked_array(
                                          self.fit[self.order].data, mask=False)
        else:
            self.spec_trim_points[self.order] = 0
        if np.shape(self.spec_trim_points[self.order])[0] == 0:
            self.spec_trim_points[self.order] = 0
            self.state['trimmed'][self.order] = False
        else:
            for i in range((np.shape(points)[0]) -1 ):
                start = self.spec_trim_points[self.order][i,0]
                end = self.spec_trim_points[self.order][i,1]
                self.rawspec[self.order] = ma.masked_inside(
                                            self.rawspec[self.order],start,end)
                self.fit[self.order] = ma.masked_inside(
                                                self.fit[self.order],start,end)
                self.norm[self.order] = ma.masked_inside(
                                               self.norm[self.order],start,end)
                self.sm[self.order] = ma.masked_inside(
                                                 self.sm[self.order],start,end)
        self.state['del_trim'] = False
        self.base_draw()
        self.spline_fit_and_plot()
        self.fig1.canvas.draw()
        return
    def delete_trim_reg(self, event):
        """Delete the whole trim region nearest the click."""
        x = event.xdata
        pick = np.abs(self.spec_trim_points[self.order] - x).argmin()
        points = self.spec_trim_points[self.order]
        row = (pick - (pick % 2)) / 2
        print('Deleting trim range ' +
              str(self.spec_trim_points[self.order][row, 0]) + ' to ' +
              str(self.spec_trim_points[self.order][row, 1]))
        if np.shape(self.spec_trim_points[self.order])[0] >= 1:
            self.spec_trim_points[self.order] = \
                    np.delete(self.spec_trim_points[self.order],row,axis=0)
            print(self.spec_trim_points[self.order])

            self.rawspec[self.order] = ma.masked_array(
                self.rawspec[self.order].data, mask=False)
            self.norm[self.order] = ma.masked_array(self.norm[self.order].data,
                                                    mask=False)
            self.fit[self.order] = ma.masked_array(self.fit[self.order].data,
                                                   mask=False)
        else:
            self.spec_trim_points[self.order] = 0
        if np.shape(self.spec_trim_points[self.order])[0] == 0:
            self.spec_trim_points[self.order] = 0
            self.state['trimmed'][self.order] = False
        else:
            for i in range((np.shape(points)[0]) - 1):
                start = self.spec_trim_points[self.order][i, 0]
                end = self.spec_trim_points[self.order][i, 1]
                self.rawspec[self.order] = ma.masked_inside(
                    self.rawspec[self.order], start, end)
                self.fit[self.order] = ma.masked_inside(
                    self.fit[self.order], start, end)
                self.norm[self.order] = ma.masked_inside(
                    self.norm[self.order], start, end)
                self.sm[self.order] = ma.masked_inside(self.sm[self.order],
                                                       start, end)
        self.state['del_trim'] = False
        self.base_draw()
        self.spline_fit_and_plot()
        self.fig1.canvas.draw()
        return
Beispiel #31
0
 def _mask_measurements(self,mask):
     ''' Set the mask on the measurements based on noise characteristics.  This is so that
         we don't compute garbage n,G0 where observed ratios are noise divided by noise.
         
        :param mask: Indicate how to mask image observations before computing the density and radiation field. See run()>
     '''
     if mask is None: 
         return
     if self._measurementnaxis == 0:
         utils.warn(self,"Ignoring 'mask' parameter for single pixel observations")
         return
     if mask[0] == 'mad':
         for k,v in self._measurements.items():
             sigcut = mask[1]*astats.mad_std(v.data,ignore_nan=True)
             print("Masking %s data between [%.1e,%.1e]"%(k,-sigcut,sigcut))
             masked_data = ma.masked_inside(v.data,-sigcut,sigcut,copy=True)
             # CCDData/NDData do not use MaskArrays underneath but two nddata.arrays. Why??
             # Make a copy so we are keeping references to data copies lying around.
             v.mask = masked_data.mask.copy()
     elif mask[0] == 'data':
         for k,v in self._measurements.items():
             masked_data=ma.masked_inside(v.data,mask[1][0],mask[1][1],copy=True)
             v.mask = masked_data.mask.copy()
             print("Masking %s data between [%.1e,%.1e]"%(k,mask[1][0],mask[1][1]))
     elif mask[0] == 'clip':
         for k,v in self._measurements.items():
             masked_data=ma.masked_outside(v.data,mask[1][0],mask[1][1],copy=True)
             v.mask = masked_data.mask.copy()
             print("Masking %s data outside [%.1e,%.1e]"%(k,mask[1][0],mask[1][1]))
     elif mask[0] == 'error':
         for k,v in self._measurements.items():
             # error is StdDevUncertainty so must use _array to get at raw values
             indices = np.where((v.error <= mask[1][0]) | (v.error >= mask[1][1]))
             if v.mask is not None:
                 v.mask[indices] = True
             else:
                 v.mask = np.full(v.data.shape,False)
                 v.mask[indices] = True
             print("Masking %s data where error outside [%.1e,%.1e]"%(k,mask[1][0],mask[1][1]))
     else:
         raise ValueError("Unrecognized mask parameter %s. Valid values are 'mad','data','error'"%mask[0])
Beispiel #32
0
 def transform(self, a):
     sign = np.sign(a)
     masked = ma.masked_inside(a,
                               -self.linthresh,
                               self.linthresh,
                               copy=False)
     exp = sign * self.linthresh * ma.exp(sign * masked /
                                          self.linthresh - 1)
     if masked.mask.any():
         return ma.where(masked.mask, a, exp)
     else:
         return exp
Beispiel #33
0
def Fill2ThetaAzimuthMap(masks,TA,tam,image):
    'Needs a doc string'
    Zlim = masks['Thresholds'][1]
    rings = masks['Rings']
    arcs = masks['Arcs']
    TA = np.dstack((ma.getdata(TA[1]),ma.getdata(TA[0]),ma.getdata(TA[2])))    #azimuth, 2-theta, dist
    tax,tay,tad = np.dsplit(TA,3)    #azimuth, 2-theta, dist**2/d0**2
    for tth,thick in rings:
        tam = ma.mask_or(tam.flatten(),ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.)))
    for tth,azm,thick in arcs:
        tamt = ma.getmask(ma.masked_inside(tay.flatten(),max(0.01,tth-thick/2.),tth+thick/2.))
        tama = ma.getmask(ma.masked_inside(tax.flatten(),azm[0],azm[1]))
        tam = ma.mask_or(tam.flatten(),tamt*tama)
    taz = ma.masked_outside(image.flatten(),int(Zlim[0]),Zlim[1])
    tabs = np.ones_like(taz)
    tam = ma.mask_or(tam.flatten(),ma.getmask(taz))
    tax = ma.compressed(ma.array(tax.flatten(),mask=tam))   #azimuth
    tay = ma.compressed(ma.array(tay.flatten(),mask=tam))   #2-theta
    taz = ma.compressed(ma.array(taz.flatten(),mask=tam))   #intensity
    tad = ma.compressed(ma.array(tad.flatten(),mask=tam))   #dist**2/d0**2
    tabs = ma.compressed(ma.array(tabs.flatten(),mask=tam)) #ones - later used for absorption corr.
    return tax,tay,taz,tad,tabs
Beispiel #34
0
def par_dark_count(par, depth, time, depth_percentile=90):
    """
    Calculates an in situ dark count from the PAR sensor.

    The in situ dark count for the PAR sensor is calculated from the median,
    selecting only observations in the nighttime and in the 90th percentile of
    the depth sampled (i.e. the deepest depths measured)

    Parameters
    ----------

    par: numpy.ndarray or pandas.Series
        The par array after factory calibration in units uE/m2/sec.
    depth: numpy.ndarray or pandas.Series
        The depth array in metres.
    time: numpy.ndarray or pandas.Series
        The date & time array in a numpy.datetime64 format.

    Returns
    -------

    par_dark: numpy.ndarray or pandas.Series
        The par data corrected for the in situ dark value in units uE/m2/sec.
    """
    from numpy import array, isnan, ma, nanmedian, nanpercentile
    import warnings

    par_arr = array(par)
    depth = array(depth)
    time = array(time)

    # DARK CORRECTION FOR PAR
    hrs = time.astype("datetime64[h]") - time.astype("datetime64[D]")
    xi = ma.masked_inside(hrs.astype(int), 21,
                          5)  # find hours between 22:00 and 3:00
    if ma.sum(xi) < 1:
        warnings.warn(
            "There are no reliable night time measurements. This dark count correction cannot be "
            "cannot be trusted",
            UserWarning,
        )

    yi = ma.masked_outside(
        depth, *nanpercentile(depth[~isnan(par_arr)],
                              [depth_percentile, 100]))  # pctl of depth
    i = ~(xi.mask | yi.mask)
    dark = nanmedian(par_arr[i])
    par_dark = par_arr - dark
    par_dark[par_dark < 0] = 0

    return par_dark
 def _get_hkl_oriented_lattice_(self,kvec):
     if np.all(np.cross(kvec, np.array([0., 0., 1.])) == np.array([0., 0., 0.])):
         self.rotation_matrix = np.eye(3)
         self.rotAmat = self.Amat
         self.rotBmat = self.Bmat
         self.rotbase = self.base
     else:
         warnings.simplefilter("ignore")
         theta = - np.arctan(kvec[1] / kvec[0])
         phi = - np.arctan(np.sqrt(kvec[0] ** 2 + kvec[1] ** 2) / kvec[2])
         rthetaz = ma.masked_inside(ma.masked_equal(np.array([[np.cos(theta), -np.sin(theta), 0.],[np.sin(theta), np.cos(theta), 0.],[0., 0., 1.]]),-0.).filled(0.),-10 ** (-15),10 ** (-15)).filled(0.)
         rphiy = ma.masked_inside(ma.masked_equal(np.array([[np.cos(phi), 0., np.sin(phi)],[0., 1., 0.],[-np.sin(phi), 0., np.cos(phi)]]),-0.).filled(0.),-10 ** (-15),10 ** (-15)).filled(0.)
         matrix = np.dot(rphiy, rthetaz)
         rotkvec = ma.masked_inside(np.dot(matrix,kvec),-10 ** (-15),10 ** (-15)).filled(0.)
         Amattoreduce = np.dot(self.Amat, matrix)
         Ap, Al, Au = linalg.lu(Amattoreduce)
         Bmattoreduce = np.dot(self.Bmat, matrix)
         Bp, Bl, Bu = linalg.lu(Bmattoreduce)
         if np.all(np.cross(rotkvec, np.array([0., 0., 1.])) == np.array([0., 0., 0.])):
             self.rotation_matrix = matrix
             self.rotAmat = self._correct_u_matrices_(Au)
             self.rotBmat = self._correct_u_matrices_(Bu)
             self.rotbase = self._get_hkl_oriented_base_(matrix,self.base)
Beispiel #36
0
def getseg(lc):
    seglabel = np.zeros(lc.size) - 1
    t = lc['t']
    tm = ma.masked_array(t)

    for i in range(len(cutList) - 1):
        rng = cutList[i]
        rng1 = cutList[i + 1]

        tm = ma.masked_inside(tm, rng['start'], rng['stop'])

        b = (tm > rng['stop']) & (tm < rng1['start'])
        seglabel[b] = i
    return seglabel
Beispiel #37
0
 def transform(self, a):
     a = np.asarray(a)
     sign = np.sign(a)
     masked = ma.masked_inside(a,
                               -self.linthresh,
                               self.linthresh,
                               copy=False)
     if masked.mask.any():
         log = sign * (ma.log(np.abs(masked)) / self._log_base +
                       self._linadjust)
         return np.asarray(
             ma.where(masked.mask, a * self._linscale, log))
     else:
         return sign * (np.log(np.abs(a)) / self._log_base +
                        self._linadjust)
Beispiel #38
0
def dustbuster(mc):
    """
    Read SJI fits files and return Inpaint-corrected fits files.
    Image inpainting involves filling in part of an image or video
    using information from the surrounding area.

    Parameters
    ----------
    mc: `sunpy.map.MapCube`
        Mapcube to read


    Returns
    -------
    mc: `sunpy.map.MapCube`
        Inpaint-corrected Mapcube
    """
    image_result = []
    ndx = len(mc)
    for i, map in enumerate(mc):
        image_orig = map.data
        nx = map.meta.get('NRASTERP')
        firstpos = range(ndx)[0::nx]
        #  Create mask with values < 1, excluding frame (-200)
        m = ma.masked_inside(image_orig, -199, .1)

        if nx <= 50:  # sparse/coarse raster
            skip = 1
            secpos = [-1]
            thirdpos = [-1]
        elif nx > 50:  # dense raster
            skip = 5
            secpos = range(ndx)[1::nx]
            thirdpos = range(ndx)[2::nx]

        if (i in firstpos) or (i in secpos) or (i in thirdpos):
            image_inpaint = mc[i + skip].data.copy()  # grab next frame
        else:
            image_inpaint = mc[i - skip].data.copy()  # grab prev frame

        # Inpaint mask onto image
        image_orig[m.mask] = image_inpaint[m.mask]

        map.data = image_orig

    return mc
Beispiel #39
0
def par_dark_count(par, dives, depth, time):
    """
    Calculates an in situ dark count from the PAR sensor.

    The in situ dark count for the PAR sensor is calculated from the median,
    with masking applied for values before 23:01 and outside the 90th %

    Parameters
    ----------

    par: numpy.ndarray or pandas.Series
        The par array after factory calibration in units uE/m2/sec.
    dives: numpy.ndarray or pandas.Series
        The dive count (round is down dives, 0.5 is up dives).
    depth: numpy.ndarray or pandas.Series
        The depth array in metres.
    time: numpy.ndarray or pandas.Series
        The date & time array in a numpy.datetime64 format.

    Returns
    -------

    par_dark: numpy.ndarray or pandas.Series
        The par data corrected for the in situ dark value in units uE/m2/sec.
    """
    from numpy import array, ma, nanmedian, isnan, nanpercentile

    par_arr = array(par)
    dives = array(dives)
    depth = array(depth)
    time = array(time)

    # DARK CORRECTION FOR PAR
    hrs = time.astype('datetime64[h]') - time.astype('datetime64[D]')
    xi = ma.masked_inside(hrs.astype(int), 22, 2)  # find 23:01 hours
    yi = ma.masked_outside(depth,
                           *nanpercentile(depth[~isnan(par)],
                                          [90, 100]))  # 90th pctl of depth
    i = ~(xi.mask | yi.mask)
    dark = nanmedian(par_arr[i])
    par_dark = par_arr - dark
    par_dark[par_dark < 0] = 0

    par_dark = transfer_nc_attrs(getframe(), par, par_dark, '_dark')

    return par_dark
Beispiel #40
0
def make_guess(data, sampleRate):
    '''Returns a decent starting point for fitting the decaying oscillation
    function.

    '''
    p = np.zeros(5)

    # the first unknown is the shift along the y axis
    p[0] = np.mean(data)

    # work with the mean subtracted data from now on
    data = data - p[0]

    # what is the initial slope of the curve
    if data[10] > data[0]:
        slope = 1
    else:
        slope = -1

    # the second is the amplitude for the sin function
    p[1] = slope * np.max(data) / 2

    # the third is the amplitude for the cos function
    p[2] = slope * np.max(data)

    # the fourth is the damping ratio and is typically small, 0.001 < zeta < 0.02
    p[3] = 0.001

    # the fifth is the undamped natural frequency
    # first remove the data around zero
    dataMasked = ma.masked_inside(data, -0.1, 0.1)
    # find the zero crossings
    zeroCrossings = np.where(np.diff(np.sign(dataMasked)))[0]
    # remove redundant crossings
    zero = []
    for i, v in enumerate(zeroCrossings):
        if abs(v - zeroCrossings[i - 1]) > 20:
            zero.append(v)
    # get the samples per period
    samplesPerPeriod = 2*np.mean(np.diff(zero))
    # now the frequency
    p[4] = (samplesPerPeriod / float(sampleRate) /2. / pi)**-1
    if np.isnan(p[4]):
        p[4] = 4.

    return p
Beispiel #41
0
def sign(x, tolerance=1e-7):
    """
    Function:  sign_vector
    --------------------
    computes the sign sequence of vector x (within some tolerance):
        sign_vector(x) = element-wise sign function on x

        x: input vector
        tolerance: the bounds around 0 to be considered unsigned

    returns: the sign sequence of vector x
    """
    z = np.copy(x)
    mask = ma.masked_inside(z, -tolerance, tolerance).mask
    z[mask] = 0

    return np.sign(z, out=z)
Beispiel #42
0
def make_guess(data, sampleRate):
    '''Returns a decent starting point for fitting the decaying oscillation
    function.

    '''
    p = np.zeros(5)

    # the first unknown is the shift along the y axis
    p[0] = np.mean(data)

    # work with the mean subtracted data from now on
    data = data - p[0]

    # what is the initial slope of the curve
    if data[10] > data[0]:
        slope = 1
    else:
        slope = -1

    # the second is the amplitude for the sin function
    p[1] = slope * np.max(data) / 2

    # the third is the amplitude for the cos function
    p[2] = slope * np.max(data)

    # the fourth is the damping ratio and is typically small, 0.001 < zeta < 0.02
    p[3] = 0.001

    # the fifth is the undamped natural frequency
    # first remove the data around zero
    dataMasked = ma.masked_inside(data, -0.1, 0.1)
    # find the zero crossings
    zeroCrossings = np.where(np.diff(np.sign(dataMasked)))[0]
    # remove redundant crossings
    zero = []
    for i, v in enumerate(zeroCrossings):
        if abs(v - zeroCrossings[i - 1]) > 20:
            zero.append(v)
    # get the samples per period
    samplesPerPeriod = 2 * np.mean(np.diff(zero))
    # now the frequency
    p[4] = (samplesPerPeriod / float(sampleRate) / 2. / pi)**-1
    if np.isnan(p[4]):
        p[4] = 4.

    return p
Beispiel #43
0
def isBadReg(t):
    """
    Cut out the bad regions.

    Paramters
    ---------
    t : time

    Returns
    -------
    mask : mask indicating bad values. True is bad.

    """
    tm = ma.masked_array(t, copy=True)
    for r in cutList:
        tm = ma.masked_inside(tm, r['start'], r['stop'])
    mask = tm.mask
    return mask
Beispiel #44
0
def getT(time,P,epoch,wd):
    """
    Get Transits

    time : Time series
    P    : Period
    epoch: epoch
    wd   : How much data to return for each slice.

    Returns
    -------
    Time series phase folded with everything but the transits masked out.

    """
    tfold = time - epoch # Slide transits to 0, P, 2P
    tfold = np.mod(tfold,P)
    tfold = ma.masked_inside(tfold,wd/2,P-wd/2)
    tfold = ma.masked_invalid(tfold)
    return tfold
Beispiel #45
0
def isBadReg(t):
    """
    Cut out the bad regions.

    Paramters
    ---------
    t : time

    Returns
    -------
    mask : mask indicating bad values. True is bad.
    """
    cutpath = os.path.join(os.environ['KEPDIR'],'ranges/cut_time.txt')
    rec = atpy.Table(cutpath,type='ascii').data
    tm = ma.masked_array(t,copy=True)
    for r in rec:
        tm = ma.masked_inside(tm,r['start'],r['stop'])
    mask = tm.mask
    return mask
Beispiel #46
0
    def _clean_timestamps(self, track, stime, etime):
        '''
        Cut track in time and make array with timestamps monotonic.
        @param track:
        @type track:
        @return:
        @rtype:
        '''
        data = ma.masked_inside(track['timestamp'], stime, etime)
        track = track.compress(data.mask)
        # Eliminate repetitive points.
        times, indices = np.unique(track['timestamp'], return_index=True)
        track = track[indices]
        # Here we still can has points reversed in time. Fix it.
        tdifs = np.ediff1d(track['timestamp'], to_begin=1)
        # At first let's find ends of track's chunks delimited by timeout,
        # if any.
        chunk_end_idxs = np.where(tdifs > self.maxtimediff)[0]
        # Index of timestamp from which no chunks allowed, just track.
        safe_time_idx = np.where(track['timestamp'] <
                                 stime + self.safe_time)[0]
        if len(chunk_end_idxs) > 0:
            track_start_idx = 0
            track_end_idx = chunk_end_idxs[0]
            # Situation then there are little tracks exists before window
            # open time.
            if len(safe_time_idx) > 0:
                safe_time_idx = safe_time_idx[-1]
                for chunk_end_idx in chunk_end_idxs:
                    if chunk_end_idx < safe_time_idx:
                        track_start_idx = chunk_end_idx + 1
                    else:
                        track_end_idx = chunk_end_idx
                        break
            track = track[track_start_idx:track_end_idx]
            tdifs = tdifs[track_start_idx:track_end_idx]

        # Eliminate reverse points.
        data = ma.masked_greater(tdifs, 0)
        track = track.compress(data.mask)
        return track
Beispiel #47
0
def fit_gauss(x, y, pos_x, fit_width=30, yerr=0, ax=None):
    """ fit gaussian function to given x- and y-data at position pos_x

        @return mean, width and content of gauss
    """
    import matplotlib.pyplot as plt
    import numpy.ma as ma
    mask = ma.masked_inside(x, pos_x - fit_width, pos_x + fit_width).mask
    x = x[mask]
    y = y[mask]
    p0=[np.median(x), pos_x, 1, 5]
    coeff, covar = curve_fit(gauss, x, y, p0=p0)
    errs = np.sqrt(np.diag(covar))
    mu = ufloat(coeff[1], errs[1])
    A = ufloat(coeff[0], errs[0])
    sigma = ufloat(coeff[2], errs[2])
    if ax is not None:
        xs = np.linspace(min(x), max(x), 200)
        ax.plot(xs, gauss(xs, *coeff), label='Gaußscher Fit')
        ax.errorbar(x, y, yerr=np.sqrt(y), fmt='+', label='Datenpunkte')
    return mu, sigma, A
    def _trim_spec(self, event):
        """Trim out the section of data between a set of x values."""
        x = event.xdata
        trimline = np.abs(self.rawspec[self.order][:,0]-x).argmin()
        # This section handels the case where there is currently no trim
        # sections defined for the data. This is detected because the list
        # that holds an array of trim points for each order/band is initally
        # set 0 at for a band/order. This Trimming algorithm is not especially
        # robust if something unexpected happens between defining the first and
        # second boundry of the region..
        if type(self.spec_trim_points[self.order]) == int:
            self.spec_trim_points[self.order] = np.array(
                                [[self.rawspec[self.order][trimline,0]]])
            self.base_draw
            self.ax.axvline(self.spec_trim_points[self.order][0,0],color='red')
            self.fig1.canvas.draw()
        elif (np.shape(self.spec_trim_points[self.order])[1] == 1
              and self.state['trimmed'][self.order] != True):
            self.spec_trim_points[self.order]= \
                      np.append(self.spec_trim_points[self.order],
                      [[self.rawspec[self.order][trimline,0]]], axis=1)
            self.base_draw
            self.ax.axvline(self.spec_trim_points[self.order][0,0],color='red')
            self.ax.axvline(self.spec_trim_points[self.order][0,1],color='red')
            self.ax.axvspan(self.spec_trim_points[self.order][0,0],
                            self.spec_trim_points[self.order][0,1],
                            color='black',alpha=0.5)
            self.fig1.canvas.draw()
            start = int(np.where(self.spec_trim_points[self.order][0,0] ==
                                self.rawspec[self.order][:,0])[0])
            start = self.rawspec[self.order][start,0]
            end = int(np.where(self.spec_trim_points[self.order][0,1] ==
                                self.rawspec[self.order][:,0])[0])
            end = self.rawspec[self.order][end,0]
            del_splice = np.array([start,end])
            self.rawspec[self.order] = ma.masked_inside(
                                            self.rawspec[self.order],start,end)
            self.fit[self.order] = ma.masked_inside(
                                                self.fit[self.order],start,end)
            self.norm[self.order] = ma.masked_inside(
                                               self.norm[self.order],start,end)
            self.sm[self.order] = ma.masked_inside(
                                               self.sm[self.order],start,end)

            T = str(np.shape(self.spec_trim_points[self.order])[0])
            print(T+' Sections Are Trimmed From Data. \n'
                    'Trimming is now finished. To trim another section press'
                    ' T again. \n')
            self.state['trimming'] = False
            self.state['trimmed'][self.order] = True
            return

        # This sections Handles the case where there is allready at least one
        # trim section in the data. The new section is built and append to the
        # list of regions.
        elif (self.state['trimmed'][self.order] == True and
            self.spec_trim_points[self.order][-1,1] != 0):
            new = np.array([self.rawspec[self.order][trimline,0],0])
            self.spec_trim_points[self.order] = np.vstack(
                                        (self.spec_trim_points[self.order],
                                        new))
            self.base_draw
            self.ax.axvline(self.spec_trim_points[self.order][-1,0],
                            color='red')
            self.fig1.canvas.draw()
        elif self.spec_trim_points[self.order][-1,1] == 0:
            self.spec_trim_points[self.order][-1,1] = \
                                           self.rawspec[self.order][trimline,0]
            self.base_draw
            self.ax.axvline(self.spec_trim_points[self.order][-1,0],
                            color='red')
            self.ax.axvline(self.spec_trim_points[self.order][-1,1],
                            color='red')
            self.ax.axvspan(self.spec_trim_points[self.order][-1,0],
                            self.spec_trim_points[self.order][-1,1],
                            color='black',alpha=0.5)
            self.fig1.canvas.draw()
            start = int(np.where(self.spec_trim_points[self.order][-1,0] ==
                                self.rawspec[self.order][:,0])[0])
            start = self.rawspec[self.order][start,0]
            end = int(np.where(self.spec_trim_points[self.order][-1,1] ==
                                self.rawspec[self.order][:,0])[0])
            end = self.rawspec[self.order][end,0]
            self.rawspec[self.order] = ma.masked_inside(
                                            self.rawspec[self.order],start,end)
            self.fit[self.order] = ma.masked_inside(
                                                self.fit[self.order],start,end)
            self.norm[self.order] = ma.masked_inside(
                                               self.norm[self.order],start,end)
            self.sm[self.order] = ma.masked_inside(
                                               self.sm[self.order],start,end)
            T = str(np.shape(self.spec_trim_points[self.order])[0])
            print(T+' Sections Are Trimmed From Data. \n'
                    'Trimming is now finished. To trim another section press'
                    ' T again. \n')
            self.state['trimming'] = False
            return
Beispiel #49
0
    def __call__(self, varName, sDTime, eDTime, BBox=None, res=None, delT=None, LonCrdCent="A", verbose=True):
        '''
        res     : spa. res. of 2d-array   # not in service
        sDTime  : DTime bound left
        eDTime  : DTime bound right
        LonCrdCent: Center longitude of map coordination.
            "A"(tlantic): longitute takes range of -180 ~ 180 (CloudSat Default)
            "P"(acific) : longitute takes range of 0 ~ 360

        '''

        csData    = CloudSat_data()

        srcDir    = os.path.join( self.dataDir, 
                                  "%s-%s.%s"%(
                                   self.prdLv,
                                   self.prdName,
                                   self.prdVer) )

        assert os.path.exists( srcDir ), "{} is not exists.".format( srcDir)

        try:
          Granule   = self.search_granules( srcDir, sDTime, eDTime, BBox , verbose)
        except IOError:
          print "No granule    by %s"%(__file__.split("/")[-1])
          raise IOError

        nbin      = self.nbin
        #outSize   = sum( [ len(gra[2]) for gra in Granule ] ), Granule[0][2].shape[1], nbin
        #outSize   = sum( [ len(gra[2]) for gra in Granule ] ), nbin
        #Lat       = empty( outSize[:-1], "float32")
        #Lon       = empty( outSize[:-1], "float32")
        #aOut      = empty( outSize,      "float32")
        #DTime     = empty( outSize[:-1], "object" )

        Lat       = deque([]) 
        Lon       = deque([]) 
        aOut      = deque([]) 
        DTime     = deque([]) 

        #prvI      = 0
        for granule in Granule:
            srcPath, dtime, lat, lon, idx   = granule

            '''
            csData.srcPath.append(srcPath)
            csData.recLen.append( len(dtime) )    # number of data record for each file

            nxtI        = prvI + len(dtime)

            aOut[prvI:nxtI] = self.func_read( srcPath, varName, idx.tolist() )
            Lat[prvI:nxtI]  = lat 
            Lon[prvI:nxtI]  = lon
            DTime[prvI:nxtI]= dtime

            """
            if res != None and delT == None:
                csData.griddata.append( granule2map( lat, lon, aOut[prvI:nxtI], BBox, res ) )
                gpmData.grid    = GridCoordinates(mapCode, BBox=BBox)
            """

            prvI  = nxtI
            '''

            mskLat = ma.masked_inside( lat, BBox[0][0], BBox[1][0] ).mask

            [[lllat, lllon],[urlat,urlon]] = BBox
            if ( (lllon<=180) & (urlon<=180) ):
                mskLon  = ma.masked_outside( lon, BBox[0][1], BBox[1][1] ).mask
            elif ( (lllon<=180) & (180<urlon) ):
                mskLon  = ma.masked_inside( lon, urlon-360, lllon ).mask
                atmp  = ma.masked_inside( lon, urlon-360, lllon )
            elif ( (180<lllon) & (180<urlon) ):
                mskLon  = ma.masked_outside( lon, lllon-360, urlon-360 ).mask
            else:
                print "Check BBox",BBox
                print "by: search_granules.py"
                sys.exit()

            #msk    = mskLat * mskLon
            msk    = mskLat + mskLon   # 2018/1/15


            if type(msk)== np.bool_:  # if msk == False
              msk = array([False]*len(lat))

            Lat  .extend(lat  [msk])
            Lon  .extend(lon  [msk])
            aOut .extend(self.func_read( srcPath, varName, idx.tolist() )[msk,:])

            dtime  = dtime[msk]
            DTime.extend(dtime)

            csData.srcPath.append(srcPath)
            csData.recLen.append( len(dtime) )    # number of data record for each file

        if LonCrdCent =="A":
            pass
        elif LonCrdCent=="P":
            Lon =(ma.masked_inside(array(Lon), 0, 180)+360).data

        # Time binning
        if delT != None:
            dtBnd  = dtrange(sDTime, eDTime, delT)
        else:
            dtBnd  = [sDTime, eDTime]

        csData.dtime   = bin_bytbound( DTime, dtBnd, array(DTime) )
        csData.lat     = bin_bytbound( DTime, dtBnd, array(Lat  ) )
        csData.lon     = bin_bytbound( DTime, dtBnd, array(Lon  ) )
        csData.data    = bin_bytbound( DTime, dtBnd, array(aOut ) )

        return csData
Beispiel #50
0
    def search_granules(self, srcDir, sDTime, eDTime, BBox=[[-90,-180],[90,180]],  verbose=True):
        '''
        BBox    : [[lllat,lllon], [urlat,urlon]]    /* lat: -90 ~ 90 */
                                                    /* lon: -180 ~ 180 */
        '''
        srcPATH = get_path(srcDir, sDTime, eDTime)
        if len(srcPATH)==0:
            print "!"*50
            print "Warning     by %s"%(__file__.split("/")[-1])
            print "No file for the time [%s]-[%s]"%(sDTime,eDTime)
            print "in %s"%(srcDir)
            print "!"*50
            raise IOError
        '''
        gtrkDim = [get_gtrack_dim(path, self.func_read, self.cached, self.cacheDir)
                           for path in srcPATH]
        '''
        gtrkDim = [get_gtrack_dim(path, self.func_read_vs, self.cached, self.cacheDir, verbose=verbose)
                           for path in srcPATH]

        DTime, Lat, Lon   = zip(*gtrkDim)
        Granule           = deque([])
        for dtime, lat, lon, path in map(None, DTime, Lat, Lon, srcPATH):

            [[lllat, lllon],[urlat,urlon]] = BBox
            if ( (lllon<=180) & (urlon<=180) ):
                mskLon  = ma.masked_outside( lon, BBox[0][1], BBox[1][1] ).mask
            elif ( (lllon<=180) & (180<urlon) ):
                mskLon  = ma.masked_inside( lon, urlon-360, lllon ).mask
                atmp  = ma.masked_inside( lon, urlon-360, lllon )
            elif ( (180<lllon) & (180<urlon) ):
                mskLon  = ma.masked_outside( lon, lllon-360, urlon-360 ).mask
            else:
                print "Check BBox",BBox
                print "by: search_granules.py"
                sys.exit()
            #--
            mskLat  = ma.masked_outside( lat, BBox[0][0], BBox[1][0] ).mask
            mskTime = ma.masked_outside( dtime, sDTime, eDTime).mask
            #mask    = (mskLat + mskLon).all(1) + mskTime
            #mask    = (mskLat + mskLon).all(0) + mskTime
            mask    = mskLat + mskLon + mskTime
            if not mask.all():
                idx = ma.array( arange(dtime.size), "int", mask=mask).compressed()
                Granule.append([path,
                                dtime[idx],
                                lat[idx],
                                lon[idx],
                                idx
                                ])

                if verbose==True:
                    print '* [V] ground track dimension (%s): %s'%(self.cached,path)
            else:
                if verbose==False:
                    print '* [_] ground track dimension (%s): %s'%(self.cached,path)

        summary = '| [{}] granules intersects domain {} out of [{}] total between ({}-{}) |\n'    \
                  .format( len(Granule), tuple(BBox), len(srcPATH), sDTime, eDTime )

        line    = '+' + '-'*len(summary[3:]) + '+\n'

        print line + summary + line

        return list(Granule)
Beispiel #51
0
#
#axSpks.set_ylabel('Neuron number', fontsize=fs)
#ax[1].set_ylabel('$V_m\ (mV)$', fontsize=fs)
#ax[0].set_xlabel('$time\ (s)$', fontsize=fs)
#pl.subplots_adjust(left = 0.10, right = 0.9, top = 0.95, bottom = 0.11, hspace = 0.19)
#pl.savefig('cluster_iv_{}.png'.format(iv_seed), dpi=256.0)
#pl.savefig('cluster_iv_{}_zoomed.png'.format(iv_seed), dpi=256.0)
#pl.savefig('nodelay_synchrony.png', dpi=256.0)
#%%
fig = pl.figure(figsize=(8*1.5, 6*1.5))

gs = GridSpec(2, 1, height_ratios=[3, 1])
axSpks = fig.add_subplot(gs[0])
axHist = fig.add_subplot(gs[1], sharex=axSpks)

mask = ma.masked_inside(senders, 0, N - 1)
axHist.hist(spikes[mask.mask]*h/1000, bins=int(SimTime/20.), color='C3', ls='-', histtype='step', lw=3.)

axSpks.plot(spikes[mask.mask]*h/1000, senders[mask.mask], marker='.', ls='', color='k', ms = 5)

axSpks.set_xlim([4.0, 25.1])
axSpks.set_xticks(np.arange(4., 25.1, 3.))
axSpks.set_ylim([0, 100])

axHist.set_yticks([50, 100])
axHist.set_ylim([0, 100])
axSpks.set_yticks([0, 50, 100])

axSpks.set_ylabel('Neuron number', fontsize=fs)
axHist.set_ylabel('Firing rate', fontsize=fs)
axHist.set_xlabel('$time\ (s)$', fontsize=fs)
import numpy.ma as ma

x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
ma.masked_inside(x, -0.3, 0.3)
ma.masked_inside(x, 0.3, -0.3)
Beispiel #53
0
    def __call__(self, varName, sDTime, eDTime, BBox=None, res=None, delT=None, verbose=True):
        '''
        res     : spa. res. of 2d-array   # not in service
        sDTime  : DTime bound left
        eDTime  : DTime bound right
        '''

        csData    = CloudSat_data()

        srcDir    = os.path.join( self.dataDir, 
                                  "%s-%s.%s"%(
                                   self.prdLv,
                                   self.prdName,
                                   self.prdVer) )

        assert os.path.exists( srcDir ), "{} is not exists.".format( srcDir)

        try:
          Granule   = self.search_granules( srcDir, sDTime, eDTime, BBox , verbose)
        except IOError:
          print "No granule    by %s"%(__file__.split("/")[-1])
          raise IOError

        nbin      = self.nbin
        #outSize   = sum( [ len(gra[2]) for gra in Granule ] ), Granule[0][2].shape[1], nbin
        #outSize   = sum( [ len(gra[2]) for gra in Granule ] ), nbin
        #Lat       = empty( outSize[:-1], "float32")
        #Lon       = empty( outSize[:-1], "float32")
        #aOut      = empty( outSize,      "float32")
        #DTime     = empty( outSize[:-1], "object" )

        Lat       = deque([]) 
        Lon       = deque([]) 
        aOut      = deque([]) 
        DTime     = deque([]) 

        #prvI      = 0
        for granule in Granule:
            srcPath, dtime, lat, lon, idx   = granule

            '''
            csData.srcPath.append(srcPath)
            csData.recLen.append( len(dtime) )    # number of data record for each file

            nxtI        = prvI + len(dtime)

            aOut[prvI:nxtI] = self.func_read( srcPath, varName, idx.tolist() )
            Lat[prvI:nxtI]  = lat 
            Lon[prvI:nxtI]  = lon
            DTime[prvI:nxtI]= dtime

            """
            if res != None and delT == None:
                csData.griddata.append( granule2map( lat, lon, aOut[prvI:nxtI], BBox, res ) )
                gpmData.grid    = GridCoordinates(mapCode, BBox=BBox)
            """

            prvI  = nxtI
            '''

            mskLat = ma.masked_inside( lat, BBox[0][0], BBox[1][0] ).mask
            mskLon = ma.masked_inside( lon, BBox[0][1], BBox[1][1] ).mask
            msk    = mskLat * mskLon


            if type(msk)== np.bool_:  # if msk == False
              msk = array([False]*len(lat))

            Lat  .extend(lat  [msk])
            Lon  .extend(lon  [msk])
            aOut .extend(self.func_read( srcPath, varName, idx.tolist() )[msk,:])

            dtime  = dtime[msk]
            DTime.extend(dtime)

            csData.srcPath.append(srcPath)
            csData.recLen.append( len(dtime) )    # number of data record for each file

        # Time binning
        if delT != None:
            dtBnd  = dtrange(sDTime, eDTime, delT)
        else:
            dtBnd  = [sDTime, eDTime]

        csData.dtime   = bin_bytbound( DTime, dtBnd, array(DTime) )
        csData.lat     = bin_bytbound( DTime, dtBnd, array(Lat  ) )
        csData.lon     = bin_bytbound( DTime, dtBnd, array(Lon  ) )
        csData.data    = bin_bytbound( DTime, dtBnd, array(aOut ) )

        return csData
Beispiel #54
0
def HesseMatrix_to_ExactFreqs_wo_zeroFreqs(infile="HesseMatrix",outfile=None,output_as_nparray=False):
    """
    -chef
    """
    ################################
    ### check input file
    ################################
    my.checkfile(infile)
    infile = os.path.abspath(infile)
    #print "infile:",infile
    #print "outfile",outfile

    ## You have: hbar(hartree/(bohrradius^2 u))^(1/2)
    ## You want: meV
    ##    * 637.33912
    dynMatToFreq = 637.339113585464
    infile = str(infile)

    ##########################################
    ### import and extract eigenvalues
    ##########################################
    import numpy as np
    from numpy import linalg
    h = np.loadtxt(infile)
    #### check wether we have just a unary or an alloy (imortant for masses)
    elements = get.element_real(path = infile)
    elements_amount = len(elements.split(" "))
    #print "elements:", elements,len(elements.split(" "))
    #print "---h---"
    #print h
    #print "---h---"
    if elements_amount == 1:
        mass = db.AtomicWeight()
        d = h/mass

    else:
        #print "infile",infile
        file_w = infile+"_weights_all"
        #print "ww:", file_w
        my.checkfile(file_w)
        massesall = np.loadtxt(file_w)
        d = h/np.sqrt(massesall)
    #print "---d---"
    #print d
    #print "---d---"

    #quit()
#    d_sym = ((np.conjugate(d) - d)/2 + d)/mass
#    print "dc:",dc
#    print d
#    print ""
    eigenvalues,   eigenvectors = linalg.eig(d)
    eigenvalues_realpart = np.sort(eigenvalues.real)
#    print eigenvalues_realpart


#    eigenvalues_sym, eigenvectors_sym = linalg.eig(d_sym)
#    eigenvalues_realpart_sym = np.sort(eigenvalues_sym.real)

    #### Chop vales smaller 1e-8 (this is little)
    tol=1e-8
    from numpy import ma
    eigenvalues_out = ma.masked_inside(eigenvalues_realpart,-tol, tol)
    ev =  eigenvalues_out.filled(0)


    ### check if we have negative parts
    if ev[0] < 0:
        my.exit("ERROR: Negative Eigenvalues in "+infile)

    ### some conversions
    a = np.sqrt(ev)*dynMatToFreq

    ### check if first 3 eigenvalues are 0
    if a[0] != 0 : my.exit("a[0] "+str(a[0])+" is not 0")
    if a[1] != 0 : my.exit("a[1] "+str(a[1])+" is not 0")
    if a[2] != 0 : my.exit("a[2] "+str(a[2])+" is not 0")

    out = np.nan_to_num(a[3:])
    out = out[::-1]

    if output_as_nparray == True: return out

    ## from here: print to screen or print to file
    if outfile != None:
#        from numpy import savetxt
        np.savetxt(str(outfile),out, fmt='%.10f')
        return str(outfile)+" written"
    else:
#        print list(out)
        for i in out:
            print i
    return " "
## Constructing masked arrays
#
# Masked arrays are intended to be able to replace normal np.ndarrays,
# and so the same construction functions work:
x = ma.array([5, 2, 9, -4])
y = ma.array([5, 2., 9, -4.])
o = ma.ones(5)
z = ma.zeros((3, 2))
e = ma.empty(3)
a = ma.arange(6)
# These functions create masked arrays with all elements not masked
# initially. However, masked arrays can be constructed in other ways,
# which automatically set more interesting masks.
mx = ma.masked_equal([5, 2, 9, -4], 2)
my = ma.masked_greater([5, 2., 9, -4.], 3)
mis = ma.masked_inside(x, 1, 6)
miv = ma.masked_invalid([1, np.nan, 5, np.inf])
## And more:
# ma.greater_equal
# ma.masked_less
# ma.masked_less_equal
# ma.masked_not_equal
# ma.masked_object
# ma.masked_outside
# ma.masked_values
# ma.masked_where

# To get a masked array's unmasked data, and fill in masked elements, use
# 'filled':
print "mx, with masked elements filled with 77", mx.filled(77)
print "mx, with masked elements filled with 88", ma.filled(mx, 88)
interpMo_He = RegularGridInterpolator((glist_He,tlist_He), tableMo_He, bounds_error=False, fill_value=None)
interpRo_He = RegularGridInterpolator((glist_He,tlist_He), tableRo_He, bounds_error=False, fill_value=None)

#read in the BSS data and ...

#CREATE WAVELENGTH MASK
#edges of masked regions adjusted by hand in TestMasks.ipynb to
#block out the geocoronal lines.

#w1, f1, err1 = np.loadtxt('lcb201010_x1dsum.txt.bin30.unred', unpack=True)
w1, f1, stdev1, err1 = np.loadtxt('../Natalie/lcb201010_x1dsum.txt.bin30.unred.newerrors', unpack=True)
add_disp = 9.1e-18
#adding intrinsic dispersion
toterr1 = np.sqrt(err1**2.0 + add_disp**2.0)
w1_1 = ma.masked_less(w1, 1141)
w1_2 = ma.masked_inside(w1_1, 1178., 1250.)
w1_3 = ma.masked_inside(w1_2, 1292., 1318.)
w1_4 = ma.masked_inside(w1_3, 1348., 1367.)
w1_5 = ma.masked_inside(w1_4, 1332., 1340.)
dataw1 = ma.masked_greater(w1_5, 1725.)
dataw1c = 1.0*dataw1.compressed()

#w2, f2, err2 = np.loadtxt('lcb202010_x1dsum.txt.bin30.unred', unpack=True)
w2, f2, stdev2, err2 = np.loadtxt('../Natalie/lcb202010_x1dsum.txt.bin30.unred.newerrors', unpack=True)
#adding intrinsic dispersion
toterr2 = np.sqrt(err2**2.0 + add_disp**2.0)
w2_1 = ma.masked_less(w2, 1142)
w2_2 = ma.masked_inside(w2_1, 1182., 1250.)
w2_3 = ma.masked_inside(w2_2, 1290., 1318.)
w2_4 = ma.masked_inside(w2_3, 1348., 1368.)
w2_5 = ma.masked_inside(w2_4, 1330., 1340.)
Beispiel #57
0
#for idx, w_p in enumerate(xrange(Nneur)):
#    pl.plot(np.linspace(0, SimTime/1000, int((Tsim + recInt - 1)/recInt)), Vrec[:, idx], label=str(idx))
#pl.legend()
#%%
# combine all spike
import numpy.ma as ma
#
spikes = spike_times[:num_spikes_neur[0], 0]
senders = np.array([0]*num_spikes_neur[0])
for i, nsp in zip(xrange(1, Nneur), num_spikes_neur[1:]):
    spikes = np.concatenate((spikes, spike_times[:nsp, i]))
    senders = np.concatenate((senders, [i]*nsp))

(f, ax) = pl.subplots(nw, 1, sharex=True, sharey=True)
if type(ax) != np.ndarray:
    ax = [ax]
#(f2, ax2) = pl.subplots(nw, 1, sharex=True, sharey=True)
#for idx, (a, a2) in enumerate(zip(ax, ax2)):
for idx, a in enumerate(ax):
    mask = ma.masked_inside(senders, N*idx, N*(idx + 1) - 1)
    a.hist(spikes[mask.mask]*h/1000, bins=int(SimTime/20), histtype='step')
#    a.plot(spikes[mask.mask]*h/1000, senders[mask.mask] - idx*N, '.')
    a.set_title(w_ps[idx])
    a.set_xlim((0, SimTime/1000))
    
#    a2.plot(np.linspace(0, SimTime/1000, int((Tsim + recInt - 1)/recInt)), Vrec[:, N*idx:N*(idx + 1)])
#    a2.set_xlim((0, SimTime/1000))
#pl.show()

pl.savefig('N_{}_rate_{:.2f}_w_n_{:.3f}_Ie_{:.2f}_pcon_{:.2f}_delayRND.png'.format(N, rate[0], w_n, I0, pcon))
    def return_subregions(self, ra, dec, theta=None, rot_xedges=None,
                          rot_yedges=None):
        """
        Returns the subregion number for each pair of RA and Dec given the
        parameters either stored or given as function parameters.

        Parameters
        ----------
        ra : array-like
            A list of RAs to get subregion numbers for (in degrees)
            
        dec : array-like
            A list of Decs to get subregion numbers for (in degrees)
            
        theta : scalar (optional)
            Rotation angle that the mask's bounding box will be defined at
            with respect to the XY coordinate system of the mask.  If not
            given, the routine will look for a stored theta.  Units are
            degrees
            
        rot_xedges : array-like (optional)
            The x coordinates of the cell boundaries in the rotated
            coordinate system.  If not given, the routine will look for a
            stored theta.
            
        rot_yedges : array-like (optional)
            The x coordinates of the cell boundaries in the rotated
            coordinate system.  If not given, the routine will look for a
            stored theta.

        Returns
        -------
        subregions : numpy ndarray
            The subregion number for each of the points input.  The array
            shape is (len(ra),).  The subregion -1 is outside the bounding
            box (this only happens if you've set negative padding somewhere
            or have asked for things outside the mask).
        """
        #Check to make sure we have what we need, pull to local if we have
        #stored values but no given values
        if (theta is None):
            if (self._subregion_rotation is None):
                raise ValueError("ImageMask.return_subregions says: "
                                 "ERROR!  I don't have the rotation "
                                 "angle.  Please provide one.")
            else:
                theta=self._subregion_rotation
                
        if (rot_xedges is None):
            if (self._subregion_rotated_xedges is None):
                raise ValueError("ImageMask.return_subregions says: "
                                 "ERROR!  I don't have rotated x edges."
                                 "  Please provide them.")
            else:
                rot_xedges=self._subregion_rotated_xedges
                
        if (rot_yedges is None):
            if (self._subregion_rotated_yedges is None):
                raise ValueError("ImageMask.return_subregions says: "
                                 "ERROR!  I don't have rotated y edges.  "
                                 "Please provide them.")
            else:
                rot_yedges=self._subregion_rotated_yedges

        #Now that we know we have everything, put the ra and decs into x
        #and y coords
        x1, y1=self.ra_dec_to_xy(ra, dec)

        #Transform to the rotated coordinate system
        x2, y2=misc.rotate_coords(x1, y1, theta)

        #Now make masks for each row and column
        nx=len(rot_xedges)-1
        ny=len(rot_yedges)-1
        ymasks={}
        xmasks={}
        for i in range(nx):
            xmasks[i]=ma.masked_inside(x2, rot_xedges[i],
                                       rot_xedges[i+1]).mask
        for i in range(ny):
            ymasks[i]=ma.masked_inside(y2, rot_yedges[i],
                                       rot_yedges[i+1]).mask

        #Now use the masks to put numbers to each galaxy
        #No subregion defaults to -1
        subregion=-np.ones(len(ra))
        for ix in range(nx):
            for iy in range(ny):
                bin_number= nx*iy + ix
                thismask = xmasks[ix] & ymasks[iy]
                subregion[thismask]=bin_number

        return subregion
Beispiel #59
0
    def test_testOddFeatures(self):
        # Test of other odd features
        x = arange(20)
        x = x.reshape(4, 5)
        x.flat[5] = 12
        assert_(x[1, 0] == 12)
        z = x + 10j * x
        assert_(eq(z.real, x))
        assert_(eq(z.imag, 10 * x))
        assert_(eq((z * conjugate(z)).real, 101 * x * x))
        z.imag[...] = 0.0

        x = arange(10)
        x[3] = masked
        assert_(str(x[3]) == str(masked))
        c = x >= 8
        assert_(count(where(c, masked, masked)) == 0)
        assert_(shape(where(c, masked, masked)) == c.shape)
        z = where(c, x, masked)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is masked)
        assert_(z[7] is masked)
        assert_(z[8] is not masked)
        assert_(z[9] is not masked)
        assert_(eq(x, z))
        z = where(c, masked, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        z = masked_where(c, x)
        assert_(z.dtype is x.dtype)
        assert_(z[3] is masked)
        assert_(z[4] is not masked)
        assert_(z[7] is not masked)
        assert_(z[8] is masked)
        assert_(z[9] is masked)
        assert_(eq(x, z))
        x = array([1., 2., 3., 4., 5.])
        c = array([1, 1, 1, 0, 0])
        x[2] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        c[0] = masked
        z = where(c, x, -x)
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
        assert_(eq(masked_where(greater_equal(x, 2), x),
                   masked_greater_equal(x, 2)))
        assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
        assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
        assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
        assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
        assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
        assert_(eq(masked_inside(array(list(range(5)),
                                       mask=[1, 0, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 1, 1, 0]))
        assert_(eq(masked_outside(array(list(range(5)),
                                        mask=[0, 1, 0, 0, 0]), 1, 3).mask,
                   [1, 1, 0, 0, 1]))
        assert_(eq(masked_equal(array(list(range(5)),
                                      mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 0]))
        assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
                                          mask=[1, 0, 0, 0, 0]), 2).mask,
                   [1, 0, 1, 0, 1]))
        assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
                   [99, 99, 3, 4, 5]))
        atest = ones((10, 10, 10), dtype=np.float32)
        btest = zeros(atest.shape, MaskType)
        ctest = masked_where(btest, atest)
        assert_(eq(atest, ctest))
        z = choose(c, (-x, x))
        assert_(eq(z, [1., 2., 0., -4., -5]))
        assert_(z[0] is masked)
        assert_(z[1] is not masked)
        assert_(z[2] is masked)
        x = arange(6)
        x[5] = masked
        y = arange(6) * 10
        y[2] = masked
        c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
        cm = c.filled(1)
        z = where(c, x, y)
        zm = where(cm, x, y)
        assert_(eq(z, zm))
        assert_(getmask(zm) is nomask)
        assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
        z = where(c, masked, 1)
        assert_(eq(z, [99, 99, 99, 1, 1, 1]))
        z = where(c, 1, masked)
        assert_(eq(z, [99, 1, 1, 99, 99, 99]))
Beispiel #60
0
def going_toward_bottom (a) :
    return np.ma.getmaskarray(ma.masked_inside(a, 91, 269))