예제 #1
0
def _integrate(data, masks, frequencies, time_axes, ctx):
    """
    Integrate over time and frequency on the TOD plane.

    :param data: TOD
    :param masks: mask
    :param frequencies: frequency axis after integration
    :param time_axes: time axis after integration
    :param ctx: context

    :return: TOD, time axis, frequency axis after integration
    """
    integration_time = ctx.params.integration_time
    integration_frequency = ctx.params.integration_frequency

    frequencies = smooth(np.atleast_2d(frequencies), integration_frequency, axis=1)[0]
    time_axes = smooth(np.atleast_2d(time_axes), integration_time, axis=1)[0]
    
    data = smooth(data, integration_time, axis=1)
    data = smooth(data, integration_frequency, axis=0)
    
    if masks is not None:
        # 'real' mask
        masks = masks
        masks = smooth(masks, integration_time, axis=1)
        masks = (smooth(masks, integration_frequency, axis=0) > 0)
    else:
        masks=get_empty_mask(data.shape)

    tod_vx = ma.array(data, mask=masks)
    tod_vy = ma.array(data, mask=masks)
    
    return tod_vx, tod_vy, frequencies, time_axes
예제 #2
0
    def __call__(self, value, clip=None):
        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin<=0:
            raise ValueError("values must all be positive")
        elif vmin==vmax:
            return 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                                mask=mask)
            result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
        if vtype == 'scalar':
            result = result[0]
        return result
예제 #3
0
파일: test_old_ma.py 프로젝트: numpy/numpy
    def test_testPut2(self):
        # Test of put
        d = arange(5)
        x = array(d, mask=[0, 0, 0, 0, 0])
        z = array([10, 40], mask=[1, 0])
        assert_(x[2] is not masked)
        assert_(x[3] is not masked)
        x[2:4] = z
        assert_(x[2] is masked)
        assert_(x[3] is not masked)
        assert_(eq(x, [0, 1, 10, 40, 4]))

        d = arange(5)
        x = array(d, mask=[0, 0, 0, 0, 0])
        y = x[2:4]
        z = array([10, 40], mask=[1, 0])
        assert_(x[2] is not masked)
        assert_(x[3] is not masked)
        y[:] = z
        assert_(y[0] is masked)
        assert_(y[1] is not masked)
        assert_(eq(y, [10, 40]))
        assert_(x[2] is masked)
        assert_(x[3] is not masked)
        assert_(eq(x, [0, 1, 10, 40, 4]))
예제 #4
0
    def __call__(self, value, clip=None):
        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(numpy.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(numpy.float)
        
        if self.staticrange is None:
            self.autoscale_None(val)
            vmin, vmax = self.vmin, self.vmax
        else:
            self.vmin, self.vmax = None, None
            self.autoscale_None(val)
            vmin, vmax = self.vmax - self.staticrange, self.vmax
        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin==vmax:
            result = 0.0 * val
        else:
            vmin = float(vmin)
            vmax = float(vmax)
            rmin = float(self.rmin)
            rmax = float(self.rmax)
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                                mask=mask)
            result = (val-vmin) * ((rmax-rmin) / (vmax-vmin)) + rmin
        if vtype == 'scalar':
            result = result[0]
        return result
예제 #5
0
    def test_pearsonr(self):
        # Tests some computations of Pearson's r
        x = ma.arange(10)
        with warnings.catch_warnings():
            # The tests in this context are edge cases, with perfect
            # correlation or anticorrelation, or totally masked data.
            # None of these should trigger a RuntimeWarning.
            warnings.simplefilter("error", RuntimeWarning)

            assert_almost_equal(mstats.pearsonr(x, x)[0], 1.0)
            assert_almost_equal(mstats.pearsonr(x, x[::-1])[0], -1.0)

            x = ma.array(x, mask=True)
            pr = mstats.pearsonr(x, x)
            assert_(pr[0] is masked)
            assert_(pr[1] is masked)

        x1 = ma.array([-1.0, 0.0, 1.0])
        y1 = ma.array([0, 0, 3])
        r, p = mstats.pearsonr(x1, y1)
        assert_almost_equal(r, np.sqrt(3)/2)
        assert_almost_equal(p, 1.0/3)

        # (x2, y2) have the same unmasked data as (x1, y1).
        mask = [False, False, False, True]
        x2 = ma.array([-1.0, 0.0, 1.0, 99.0], mask=mask)
        y2 = ma.array([0, 0, 3, -1], mask=mask)
        r, p = mstats.pearsonr(x2, y2)
        assert_almost_equal(r, np.sqrt(3)/2)
        assert_almost_equal(p, 1.0/3)
예제 #6
0
 def test_user_missing_values(self):
     datastr ="A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
     data = StringIO.StringIO(datastr)
     basekwargs = dict(dtype=None, delimiter=',', names=True, missing='N/A')
     mdtype = [('A', int), ('B', float), ('C', complex)]
     #
     test = np.mafromtxt(data, **basekwargs)
     control = ma.array([(   0, 0.0,    0j), (1, -999, 1j),
                         (  -9, 2.2, -999j), (3,  -99, 3j)],
                         mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
                         dtype=mdtype)
     assert_equal(test, control)
     #
     data.seek(0)
     test = np.mafromtxt(data,
                         missing_values={0:-9, 1:-99, 2:-999j}, **basekwargs)
     control = ma.array([(   0, 0.0,    0j), (1, -999, 1j),
                         (  -9, 2.2, -999j), (3,  -99, 3j)],
                         mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
                         dtype=mdtype)
     assert_equal(test, control)
     #
     data.seek(0)
     test = np.mafromtxt(data,
                         missing_values={0:-9, 'B':-99, 'C':-999j},
                         **basekwargs)
     control = ma.array([(   0, 0.0,    0j), (1, -999, 1j),
                         (  -9, 2.2, -999j), (3,  -99, 3j)],
                         mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
                         dtype=mdtype)
     assert_equal(test, control)
예제 #7
0
    def setUp(self):
        self.r1 = Raster('examples/multifact.tif')
        self.r2 = Raster('examples/sites.tif')
        self.r3 = Raster('examples/two_band.tif')

        # r1
        data1 = np.array(
            [
                [1,1,3],
                [3,2,1],
                [0,3,1]
            ])
        # r2
        data2 = np.array(
            [
                [1,2,1],
                [1,2,1],
                [0,1,2]
            ])
        mask = [
            [False, False, False],
            [False, False, False],
            [False, False, False]
        ]
        self.data1 = ma.array(data=data1, mask=mask)
        self.data2 = ma.array(data=data2, mask=mask)
예제 #8
0
    def test_unnamed_and_named_fields(self):
        # Test combination of arrays w/ & w/o named fields
        (_, x, _, z) = self.data

        test = stack_arrays((x, z))
        control = ma.array([(1, -1, -1), (2, -1, -1),
                            (-1, 'A', 1), (-1, 'B', 2)],
                           mask=[(0, 1, 1), (0, 1, 1),
                                 (1, 0, 0), (1, 0, 0)],
                           dtype=[('f0', int), ('A', '|S3'), ('B', float)])
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)

        test = stack_arrays((z, x))
        control = ma.array([('A', 1, -1), ('B', 2, -1),
                            (-1, -1, 1), (-1, -1, 2), ],
                           mask=[(0, 0, 1), (0, 0, 1),
                                 (1, 1, 0), (1, 1, 0)],
                           dtype=[('A', '|S3'), ('B', float), ('f2', int)])
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)

        test = stack_arrays((z, z, x))
        control = ma.array([('A', 1, -1), ('B', 2, -1),
                            ('A', 1, -1), ('B', 2, -1),
                            (-1, -1, 1), (-1, -1, 2), ],
                           mask=[(0, 0, 1), (0, 0, 1),
                                 (0, 0, 1), (0, 0, 1),
                                 (1, 1, 0), (1, 1, 0)],
                           dtype=[('A', '|S3'), ('B', float), ('f2', int)])
        assert_equal(test, control)
예제 #9
0
    def test_matching_named_fields(self):
        # Test combination of arrays w/ matching field names
        (_, x, _, z) = self.data
        zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
                      dtype=[('A', '|S3'), ('B', float), ('C', float)])
        test = stack_arrays((z, zz))
        control = ma.array([('A', 1, -1), ('B', 2, -1),
                            (
                                'a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
                           dtype=[('A', '|S3'), ('B', float), ('C', float)],
                           mask=[(0, 0, 1), (0, 0, 1),
                                 (0, 0, 0), (0, 0, 0), (0, 0, 0)])
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)

        test = stack_arrays((z, zz, x))
        ndtype = [('A', '|S3'), ('B', float), ('C', float), ('f3', int)]
        control = ma.array([('A', 1, -1, -1), ('B', 2, -1, -1),
                            ('a', 10., 100., -1), ('b', 20., 200., -1),
                            ('c', 30., 300., -1),
                            (-1, -1, -1, 1), (-1, -1, -1, 2)],
                           dtype=ndtype,
                           mask=[(0, 0, 1, 1), (0, 0, 1, 1),
                                 (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1),
                                 (1, 1, 1, 0), (1, 1, 1, 0)])
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)
예제 #10
0
    def test_unnamed_and_named_fields(self):
        # Test combination of arrays w/ & w/o named fields
        (_, x, _, z) = self.data

        test = stack_arrays((x, z))
        control = ma.array(
            [(1, -1, -1), (2, -1, -1), (-1, "A", 1), (-1, "B", 2)],
            mask=[(0, 1, 1), (0, 1, 1), (1, 0, 0), (1, 0, 0)],
            dtype=[("f0", int), ("A", "|S3"), ("B", float)],
        )
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)

        test = stack_arrays((z, x))
        control = ma.array(
            [("A", 1, -1), ("B", 2, -1), (-1, -1, 1), (-1, -1, 2)],
            mask=[(0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)],
            dtype=[("A", "|S3"), ("B", float), ("f2", int)],
        )
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)

        test = stack_arrays((z, z, x))
        control = ma.array(
            [("A", 1, -1), ("B", 2, -1), ("A", 1, -1), ("B", 2, -1), (-1, -1, 1), (-1, -1, 2)],
            mask=[(0, 0, 1), (0, 0, 1), (0, 0, 1), (0, 0, 1), (1, 1, 0), (1, 1, 0)],
            dtype=[("A", "|S3"), ("B", float), ("f2", int)],
        )
        assert_equal(test, control)
예제 #11
0
    def test_matching_named_fields(self):
        # Test combination of arrays w/ matching field names
        (_, x, _, z) = self.data
        zz = np.array(
            [("a", 10.0, 100.0), ("b", 20.0, 200.0), ("c", 30.0, 300.0)],
            dtype=[("A", "|S3"), ("B", float), ("C", float)],
        )
        test = stack_arrays((z, zz))
        control = ma.array(
            [("A", 1, -1), ("B", 2, -1), ("a", 10.0, 100.0), ("b", 20.0, 200.0), ("c", 30.0, 300.0)],
            dtype=[("A", "|S3"), ("B", float), ("C", float)],
            mask=[(0, 0, 1), (0, 0, 1), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
        )
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)

        test = stack_arrays((z, zz, x))
        ndtype = [("A", "|S3"), ("B", float), ("C", float), ("f3", int)]
        control = ma.array(
            [
                ("A", 1, -1, -1),
                ("B", 2, -1, -1),
                ("a", 10.0, 100.0, -1),
                ("b", 20.0, 200.0, -1),
                ("c", 30.0, 300.0, -1),
                (-1, -1, -1, 1),
                (-1, -1, -1, 2),
            ],
            dtype=ndtype,
            mask=[(0, 0, 1, 1), (0, 0, 1, 1), (0, 0, 0, 1), (0, 0, 0, 1), (0, 0, 0, 1), (1, 1, 1, 0), (1, 1, 1, 0)],
        )
        assert_equal(test, control)
        assert_equal(test.mask, control.mask)
예제 #12
0
def colicTest():
    frTrain = open('horseColicTraining.txt')
    frTest = open('horseColicTest.txt')
    trainingSet = []
    trainingLabels = []
    for line in frTrain.readlines():
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(21):
            lineArr.append(float(currLine[i]))
        trainingSet.append(lineArr)
        trainingLabels.append(float(currLine[21]))
    trainWeights = stocGradAscent1(array(trainingSet), trainingLabels, 200)
    errorCount = 0
    numTestVec = 0.0
    for line in frTest.readlines():
        numTestVec += 1.0
        currLine = line.strip().split('\t')
        lineArr = []
        for i in range(21):
            lineArr.append(float(currLine[i]))
        if int(classifyVector(array(lineArr), trainWeights)) != int(currLine[21]):
            errorCount += 1
    errorRate = (float(errorCount) / numTestVec)
    print "the error rate of this test is: %f" % errorRate
    return errorRate
예제 #13
0
파일: OMNO2_Trop.py 프로젝트: gkuhl/omi
def preprocessing(gridding_method, Time, ColumnAmountNO2Trop,
    ColumnAmountNO2TropStd, FoV75Area, CloudRadianceFraction,
    RootMeanSquareErrorOfFit, SolarZenithAngle, VcdQualityFlags,
    XTrackQualityFlags, **kwargs):

    # mask of bad values
    mask = ColumnAmountNO2Trop.mask | ColumnAmountNO2TropStd.mask

    # mask low quality data
    mask |= RootMeanSquareErrorOfFit > 0.0003
    mask |= SolarZenithAngle > 85
    mask |= VcdQualityFlags % 2 != 0
    mask |= XTrackQualityFlags

    # set invalid cloud cover to 100% -> smallest weight
    CloudRadianceFraction[CloudRadianceFraction.mask] = 1.0

    # values and errors
    values = ma.array(ColumnAmountNO2Trop, mask=mask)
    errors = ma.array(ColumnAmountNO2TropStd, mask=mask)

    # weight based on stddev and pixel area (see Wenig et al., 2008)
    stddev = 1.5e15 * (1.0 + 3.0 * ma.array(CloudRadianceFraction, mask=mask))
    area = FoV75Area.reshape(1, FoV75Area.size)
    area = area.repeat(ColumnAmountNO2Trop.shape[0], axis=0)

    if gridding_method.startswith('psm'):
        weights = ma.array(1.0 / area, mask=mask)
    else:
        weights = ma.array(1.0 / (area * stddev**2), mask=mask)

    return values, errors, stddev, weights
예제 #14
0
def common_ma_setup():
    data2D = ma.array([np.random.rand(25).reshape(5,5),
                       np.random.rand(25).reshape(5,5),
                       np.random.rand(25).reshape(5,5),
                       np.random.rand(25).reshape(5,5),
                       np.random.rand(25).reshape(5,5),],
                       mask=[np.random.rand(25).reshape(5,5)>.5,
                             np.random.rand(25).reshape(5,5)>.5,
                             np.random.rand(25).reshape(5,5)>.5,
                             np.random.rand(25).reshape(5,5)>.5,
                             np.random.rand(25).reshape(5,5)>.5,]
                      ) 
    data1D = ma.array(np.random.rand(25),
                      mask=np.random.rand(25)>0.9,
                      fill_value=-9999)
    dtype5R = [('a',float),('b',int),('c','|S3')]
    data5N = ma.array(zip(np.random.rand(5),
                          np.arange(5),
                          'ABCDE'),
                      dtype=dtype5R)
    data5R = mr.fromarrays([np.random.rand(5),
                            np.arange(5),
                            ('A','B','C','D','E')],
                           dtype=dtype5R)
    data5R._mask['a'][0]=True
    data5R._mask['b'][2]=True
    data5R._mask['c'][-1]=True
    return dict(data1D=data1D, 
                data2D=data2D,
                data5N=data5N,
                data5R=data5R)
예제 #15
0
파일: test_peak.py 프로젝트: Jozhogg/iris
    def test_peak_with_mask(self):
        # Single value in column masked.
        latitude = iris.coords.DimCoord(np.arange(0, 5, 1),
                                        standard_name='latitude',
                                        units='degrees')
        cube = iris.cube.Cube(ma.array([1, 4, 2, 3, 2], dtype=np.float32),
                              standard_name='air_temperature',
                              units='kelvin')
        cube.add_dim_coord(latitude, 0)

        cube.data[3] = ma.masked

        collapsed_cube = cube.collapsed('latitude', iris.analysis.PEAK)
        self.assertArrayAlmostEqual(collapsed_cube.data,
                                    np.array([4.024977], dtype=np.float32))
        self.assertTrue(ma.isMaskedArray(collapsed_cube.data))
        self.assertEqual(collapsed_cube.data.shape, (1,))

        # Whole column masked.
        cube.data[:] = ma.masked

        collapsed_cube = cube.collapsed('latitude', iris.analysis.PEAK)
        masked_array = ma.array(ma.masked)
        self.assertTrue(ma.allequal(collapsed_cube.data, masked_array))
        self.assertTrue(ma.isMaskedArray(collapsed_cube.data))
        self.assertEqual(collapsed_cube.data.shape, (1,))
예제 #16
0
    def test_mode(self):
        a1 = [0,0,0,1,1,1,2,3,3,3,3,4,5,6,7]
        a2 = np.reshape(a1, (3,5))
        a3 = np.array([1,2,3,4,5,6])
        a4 = np.reshape(a3, (3,2))
        ma1 = ma.masked_where(ma.array(a1) > 2, a1)
        ma2 = ma.masked_where(a2 > 2, a2)
        ma3 = ma.masked_where(a3 < 2, a3)
        ma4 = ma.masked_where(ma.array(a4) < 2, a4)
        assert_equal(mstats.mode(a1, axis=None), (3,4))
        assert_equal(mstats.mode(a1, axis=0), (3,4))
        assert_equal(mstats.mode(ma1, axis=None), (0,3))
        assert_equal(mstats.mode(a2, axis=None), (3,4))
        assert_equal(mstats.mode(ma2, axis=None), (0,3))
        assert_equal(mstats.mode(a3, axis=None), (1,1))
        assert_equal(mstats.mode(ma3, axis=None), (2,1))
        assert_equal(mstats.mode(a2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
        assert_equal(mstats.mode(ma2, axis=0), ([[0,0,0,1,1]], [[1,1,1,1,1]]))
        assert_equal(mstats.mode(a2, axis=-1), ([[0],[3],[3]], [[3],[3],[1]]))
        assert_equal(mstats.mode(ma2, axis=-1), ([[0],[1],[0]], [[3],[1],[0]]))
        assert_equal(mstats.mode(ma4, axis=0), ([[3,2]], [[1,1]]))
        assert_equal(mstats.mode(ma4, axis=-1), ([[2],[3],[5]], [[1],[1],[1]]))

        a1_res = mstats.mode(a1, axis=None)

        # test for namedtuple attributes
        attributes = ('mode', 'count')
        check_named_results(a1_res, attributes, ma=True)
예제 #17
0
파일: sqrt_norm.py 프로젝트: Fade89/agpy
    def __call__(self, value, clip=None, midpoint=None):


        if clip is None:
            clip = self.clip

        if cbook.iterable(value):
            vtype = 'array'
            val = ma.asarray(value).astype(np.float)
        else:
            vtype = 'scalar'
            val = ma.array([value]).astype(np.float)

        self.autoscale_None(val)
        vmin, vmax = self.vmin, self.vmax

        if vmin > vmax:
            raise ValueError("minvalue must be less than or equal to maxvalue")
        elif vmin==vmax:
            return 0.0 * val
        else:
            if clip:
                mask = ma.getmask(val)
                val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                                mask=mask)
            result = (val-vmin) * (1.0/(vmax-vmin))
            #result = (ma.arcsinh(val)-np.arcsinh(vmin))/(np.arcsinh(vmax)-np.arcsinh(vmin))
            result = result**(1./self.nthroot)
        if vtype == 'scalar':
            result = result[0]
        return result
예제 #18
0
    def test_compute_statistics_with_masked_values(self):
        model_values = ma.array(np.arange(1.0, 5.0, 1), mask=np.array([False, False, True, False])) # [1, 2, --, 4]
        ref_values = ma.array([1.1, 2.2, 2.9, 3.7])
        ref_values, model_values = utils.harmonise(ref_values, model_values)
        ref_values = ref_values.compressed()
        model_values = model_values.compressed()
        stats = calculate_statistics(model_values=model_values, reference_values=ref_values, config=self.config, model_name='kate', ref_name='ref')
        self.assertEqual('kate', stats['model_name'])
        self.assertEqual('ref', stats['ref_name'])
        self.assertAlmostEqual(0.216024, stats['unbiased_rmse'], 5)
        self.assertAlmostEqual(0.216024, stats['rmse'], 5)
        self.assertAlmostEqual(6.344131e-15, stats['pbias'], 5)
        self.assertAlmostEqual(0.0, stats['bias'], 5)
        self.assertAlmostEqual(0.99484975, stats['corrcoeff'], 5)
        self.assertAlmostEqual(1.039815, stats['reliability_index'], 5)
        self.assertAlmostEqual(0.9589041, stats['model_efficiency'], 5)
        self.assertAlmostEqual(2.33333, stats['mean'], 5)
        self.assertAlmostEqual(2.33333, stats['ref_mean'], 5)
        self.assertAlmostEqual(1.24722, stats['stddev'], 5)
        self.assertAlmostEqual(1.06562, stats['ref_stddev'], 5)
        self.assertAlmostEqual(1.17041, stats['normalised_stddev'], 5)
        self.assertAlmostEqual(2, stats['median'], 5)
        self.assertAlmostEqual(2.2, stats['ref_median'], 5)
        self.assertAlmostEqual(3.6, stats['p90'], 5)
        self.assertAlmostEqual(3.4, stats['ref_p90'], 5)
        self.assertAlmostEqual(3.8, stats['p95'], 5)
        self.assertAlmostEqual(3.55, stats['ref_p95'], 5)
        self.assertAlmostEqual(1, stats['min'], 5)
        self.assertAlmostEqual(1.1, stats['ref_min'], 5)
        self.assertAlmostEqual(4, stats['max'], 5)
        self.assertAlmostEqual(3.7, stats['ref_max'], 5)

        self.assertAlmostEqual(stats['rmse'] ** 2, stats['bias'] ** 2 + stats['unbiased_rmse'] ** 2, 5)
예제 #19
0
파일: masked.py 프로젝트: ucfatsg/geogg122
def masked(root="data/", years=[2009], months=[], layers=["BHR_VIS"]):
    """
    Method to read GlobAlbedo files
    """
    file_template = 'NETCDF:"%s":%s'

    if len(months) == 0:
        months = xrange(1, 13)

    data = []

    for year in years:
        for month in months:
            for layer in layers:

                # what does this do???
                filename = root + "GlobAlbedo.%d%02d.mosaic.5.nc" % (year, month)

                g = gdal.Open(file_template % (filename, layer))

                if g is None:
                    raise IOError
                band = g.ReadAsArray()
                masked_band = ma.array(band, mask=np.isnan(band))
                data.append(masked_band)
    return ma.array(data)
예제 #20
0
파일: test_io.py 프로젝트: hector1618/numpy
 def test_user_missing_values(self):
     data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
     basekwargs = dict(dtype=None, delimiter=",", names=True)
     mdtype = [("A", int), ("B", float), ("C", complex)]
     #
     test = np.mafromtxt(StringIO(data), missing_values="N/A", **basekwargs)
     control = ma.array(
         [(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)],
         mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
         dtype=mdtype,
     )
     assert_equal(test, control)
     #
     basekwargs["dtype"] = mdtype
     test = np.mafromtxt(StringIO(data), missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
     control = ma.array(
         [(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)],
         mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
         dtype=mdtype,
     )
     assert_equal(test, control)
     #
     test = np.mafromtxt(StringIO(data), missing_values={0: -9, "B": -99, "C": -999j}, **basekwargs)
     control = ma.array(
         [(0, 0.0, 0j), (1, -999, 1j), (-9, 2.2, -999j), (3, -99, 3j)],
         mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
         dtype=mdtype,
     )
     assert_equal(test, control)
예제 #21
0
파일: utils.py 프로젝트: cpaulik/cis
def apply_intersection_mask_to_two_arrays(array1, array2):
    """
    Ensure two (optionally) masked arrays have the same mask.
    If both arrays are masked the intersection of the masks is used.
    If one array is masked and the other is not, the mask from the masked array is applied to the unmasked array.
    If neither array is masked then both arrays are returned as masked arrays with an empty mask.

    :param array1: An (optionally masked) array
    :param array2: Another (optionally masked) array
    :return: Two masked arrays with a common mask
    """

    import numpy.ma as ma
    if isinstance(array1, ma.MaskedArray):
        if isinstance(array2, ma.MaskedArray):
            intersection_mask = ma.mask_or(array1.mask, array2.mask)
        else:
            intersection_mask = array1.mask
    else:
        if isinstance(array2, ma.MaskedArray):
            intersection_mask = array2.mask
        else:
            intersection_mask = False

    array1 = ma.array(array1, mask=intersection_mask)
    array2 = ma.array(array2, mask=intersection_mask)

    return array1, array2
예제 #22
0
 def test_woe(self):
     wPlus1  = np.math.log ( (2.0/3 + EPSILON)/(2.0/5 + EPSILON) ) 
     wMinus1 = np.math.log ( (1.0/3 + EPSILON)/(3.0/5 + EPSILON) ) 
     
     wPlus2  = np.math.log ( (1.0/3 + EPSILON)/(EPSILON) ) 
     wMinus2 = np.math.log ( (2.0/3 + EPSILON)/(1.0 + EPSILON) ) 
     
     wPlus3  = np.math.log ( (EPSILON)/(3.0/5 + EPSILON) ) 
     wMinus3 = np.math.log ( (1.0 + EPSILON)/(2.0/5 + EPSILON) )
     
     # Binary classes
     ans = [
         [wPlus1,  wPlus1,  wMinus1,],
         [wMinus1, wMinus1, wPlus1, ],
         [None,   wMinus1,  wPlus1, ]
     ]
     ans = ma.array(data=ans, mask=self.mask)
     np.testing.assert_equal(woe(self.factor, self.sites), ans)
     
     # Multiclass
     w1, w2, w3 = (wPlus1 + wMinus2+wMinus3), (wPlus2 + wMinus1 + wMinus3), (wPlus3 + wMinus1 + wMinus2)
     ans = [
         [w1, w1, w3,],
         [w3, w2, w1,],
         [ 0, w3, w1,]
     ]
     ans = ma.array(data=ans, mask=self.mask)
     weights = woe(self.multifact, self.sites)
     
     np.testing.assert_equal(ans, weights)
예제 #23
0
    def setUp(self):
        self.factor = Raster('../../examples/multifact.tif')
                #~ [1,1,3]
                #~ [3,2,1]
                #~ [0,3,1]

        self.sites  = Raster('../../examples/sites.tif')
                    #~ [1,2,1],
                    #~ [1,2,1],
                    #~ [0,1,2]
        self.sites.resetMask(maskVals= [0])

        self.mask = [
            [False, False, False,],
            [False, False, False,],
            [True,  False, False,]
        ]
        fact = [
            [1, 1, 3,],
            [3, 2, 1,],
            [0, 3, 1,]
        ]
        site = [
            [False, True,  False,],
            [False, True,  False,],
            [False, False, True,]
        ]
        self.factraster  = ma.array(data = fact, mask=self.mask, dtype=np.int)
        self.sitesraster = ma.array(data = site, mask=self.mask, dtype=np.bool)
예제 #24
0
파일: regression.py 프로젝트: rainly/armor
def getShiibaVectorField(shiibaCoeffs, phi1, gridSize=25, name="",\
                     key="Shiiba vector field", title="UpWind Scheme"):
 
    """ plotting vector fields from shiiba coeffs
    input:  shiiba coeffs (c1,c2,c3,..,c6) for Ui=c1.I + c2.J +c3, Vj=c4.I +c5.J+c6
    and transform it via I=y, J=x, to Ux = c5.x+c4.y+c6, Vy = c2.x+c1.y+c3
    """
    # 1. setting the variables
    # 2. setting the stage
    # 3. plotting
    # 4. no need to save or print to screen

    # 1. setting the variables
    c1, c2, c3, c4, c5, c6 = shiibaCoeffs
    c5, c4, c6, c2, c1, c3 = c1, c2, c3, c4, c5, c6     # x,y <- j, i switch
    # 2. setting the stage
    height= phi1.matrix.shape[0]
    width = phi1.matrix.shape[1]
    mask  = phi1.matrix.mask
    name  = "shiiba vector field for "+ phi1.name
    imagePath = phi1.name+"shiibaVectorField.png"
    key   = key
    ploTitle  = title
    gridSize = gridSize
    X, Y    = np.meshgrid(range(width), range(height))
    Ux      = c1*X + c2*Y + c3
    Vy      = c4*X + c5*Y + c6
    Ux      = ma.array(Ux, mask=mask)
    Vy      = ma.array(Vy, mask=mask)
    #constructing the vector field object
    vect    = pattern.VectorField(Ux, Vy, name=name, imagePath=imagePath, key=key,
                                    title=title, gridSize=gridSize)
    return vect
예제 #25
0
파일: file_funcs.py 프로젝트: tcv/hibiscus
def spike_flag(data,masked,freq,percent):
    """
    Flags out RFI spikes using a 11 bin filter
    Can be used with either time or freq
    percent is a percentage level cut (100 would be twice the 11 bin average)
    Needs to be applied to masked data.
    """
    new_mask = np.zeros(len(data))
    new_array = ma.array(data,mask=masked)
    new_comp = ma.compressed(new_array)
    freq_array = ma.array(freq,mask=masked)
    new_freq = ma.compressed(freq_array)
    for i in range(0,len(data)):
        if masked[i]==1.0:
            new_mask[i] = 1.0

    for i in range(5,len(new_comp)-5):
        group = new_comp[i-5]+new_comp[i-4]+new_comp[i-3]+new_comp[i-2]+new_comp[i-1]+new_comp[i]+new_comp[i+1]+new_comp[i+2]+new_comp[i+3]+new_comp[i+4]+new_comp[i+5]
        mean_group = group/11.
        if new_comp[i]/mean_group>=(1+percent/100.):
            comp_freq = new_freq[i]
            for j in range(0,len(freq)):
                if freq[j]==comp_freq:
                    index=j
            new_mask[index]= 1.0
        elif new_comp[i]/mean_group<=1/(1+percent/100.):
            comp_freq = new_freq[i]
            for j in range(0,len(freq)):
                if freq[j]==comp_freq:
                    index=j
            new_mask[index]= 1.0
   
    return new_mask
예제 #26
0
파일: imaging.py 프로젝트: iceseismic/sito
 def __call__(self, value, clip=None):
     if clip is None:
         clip = self.clip
     if cbook.iterable(value):
         vtype = 'array'
         val = ma.asarray(value).astype(np.float)
     else:
         vtype = 'scalar'
         val = ma.array([value]).astype(np.float)
     self.autoscale_None(val)
     vmin, vmax = self.vmin, self.vmax
     cmin, cmax = self.cmin * vmin, self.cmax * vmax
     if vmin > vmax:
         raise ValueError("minvalue must be less than or equal to maxvalue")
     elif vmin == vmax:
         result = 0.0 * val
     else:
         if clip:
             mask = ma.getmask(val)
             val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
                             mask=mask)
         result = 0. * val + 0.5
         result[val > cmax] = (ma.log10(val[val > cmax]) - ma.log10(cmax)) / (np.log10(vmax) - np.log10(cmax)) / 2. + 0.5
         result[val < cmin] = -(ma.log10(-val[val < cmin]) - ma.log10(-cmin)) / (np.log10(-vmin) - np.log10(-cmin)) / 2. + 0.5
     if vtype == 'scalar':
         result = result[0]
     return result
예제 #27
0
def plotCurves(c1, c2):
    name1, t, avg1, top1, bottom1 = c1
    name2, t, avg2, top2, bottom2 = c2
    pl.plot(t, np.zeros(len(t)), 'k-')
    s1 = ma.array(avg1)
    s2 = ma.array(avg2)
    zx1 = np.logical_and(np.greater_equal(top1, 0), np.less_equal(bottom1, 0))
    zx2 = np.logical_and(np.greater_equal(top2, 0), np.less_equal(bottom2, 0))
    ix = np.logical_or(
            np.logical_and(
                np.greater_equal(top1, top2),
                np.less_equal(bottom1, top2)),
            np.logical_and(
                np.greater_equal(top1, bottom2),
                np.less_equal(bottom1, bottom2)))
    mask1 = np.logical_or(zx1, ix)
    mask2 = np.logical_or(zx2, ix)

    print mask1
    print mask2
    print zx1
    print zx2
    print ix

    pl.plot(t, s1, "k--", linewidth=1)
    pl.plot(t, s2, "k-", linewidth=1)
    s1.mask = ix
    s2.mask = ix
    pl.plot(t, s1, "k--", linewidth=3, label=name1)
    pl.plot(t, s2, "k-", linewidth=3, label=name2)
    pl.xlabel('Time (secs)')
    pl.ylabel("Pearson correlation")
예제 #28
0
파일: mlai.py 프로젝트: ThomasG77/geogg122
def hooray(year,tile):

	file_pattern = 'files/data/MCD15A2.A%s*.%s.*'%(year,tile)

	filenames = np.sort(glob(file_pattern))


	selected_layers = [  "Lai_1km", "FparLai_QC", "LaiStdDev_1km" ]
	file_template = 'HDF4_EOS:EOS_GRID:"%s":MOD_Grid_MOD15A2:%s'


	lai_all    = []
	lai_sd_all = []

	for filename in filenames:
		data = {}
		for i, layer in enumerate ( selected_layers ):
		    this_file = file_template % ( filename, layer )
		    g = gdal.Open ( this_file )
		    
		    if g is None:
		        raise IOError
		    data[layer] = g.ReadAsArray() 
		lai = data['Lai_1km'] * 0.1
		lai_sd = data['LaiStdDev_1km'] * 0.1
		mask = data['FparLai_QC'] & 1
		laim = ma.array(lai,mask=mask)
		laim_sd = ma.array(lai_sd,mask=mask)

		lai_all.append(laim)
		lai_sd_all.append(laim_sd)

	lai_all    = ma.array(lai_all)
	lai_sd_all = ma.array(lai_sd_all)
	return lai_all, lai_sd_all
def fitModel(traj, mode=None, maxMissing=0.9, excludeEdge=None):
    traj = deepcopy(traj)
    if mode == 'onFood':
        traj.excluded = normDistToFood(traj)>1.1
    if mode == 'offFood':
        traj.excluded = normDistToFood(traj)<=1.1
    if excludeEdge is not None:
            width, height = (traj.h5ref['cropRegion'][-2:]/
                             traj.pixelsPerMicron)
            xsel = np.logical_or(traj.X[:,0] < excludeEdge,
                                 traj.X[:,0] > width - excludeEdge)
            ysel = np.logical_or(traj.X[:,1] < excludeEdge,
                                 traj.X[:,1] > height - excludeEdge)
            sel = np.logical_or(xsel, ysel)
            traj.excluded = np.logical_or(traj.excluded, sel)
    
    m = wtm.Helms2015CentroidModel()
    # check whether there is sufficient datapoints to fit model
    if fractionMissing(traj) > maxMissing:
        p = ma.array(m.toParameterVector()[0], dtype=float)
        p[:] = ma.masked
        return p.filled(np.NAN).astype(float)
    else:
        try:
            m.fit(traj, windowSize=100., plotFit=False)
            return ma.array(m.toParameterVector()[0]).filled(np.NAN).astype(float)
        except Exception as e:
            print 'Error during ' + repr(traj) + repr(e)
            p = ma.array(m.toParameterVector()[0])
            p[:] = ma.masked
            return p.filled(np.NAN).astype(float)
예제 #30
0
def autosearch_peaks(dataset,limits,params): 
    """
    Detects peaks in the y axis of a dataset and returns a list of PeakRowUI objects for each peak
    """
    xdata=dataset.data[:,0]
    #limits=(xdata[0],xdata[-1])
    iBeg = np.searchsorted(xdata,limits[0])
    iFin = np.searchsorted(xdata,limits[1])
    x = xdata[iBeg:iFin]
    y0 = dataset.data[iBeg:iFin,1]
    y1 = copy.copy(y0)
    ysig = np.std(y1)
    offset = [-1,1]
    ymask = ma.array(y0,mask=(y0<ysig))
    for off in offset:
        ymask = ma.array(ymask,mask=(ymask-np.roll(y0,off)<=0.))
    indx = ymask.nonzero()
    mags = ymask[indx]
    poss = x[indx]
    iPeak=0
    max_peaks=50 # arbitrarily set for now
    if len(poss)>max_peaks:
        return None
    else:
        for pos,mag in zip(poss,mags):
            params.update(setPeakparms(pos,mag,params,iPeak))
            iPeak+=1
        return createPeakRows(params)
예제 #31
0
    def plotPCA(self, data=None):
        try:
            self.fig.delaxes(self.ax)
        except:
            pass

        # normalize and reorganize data
        n_vars = len(data)
        var_names = list(data)
        x_data = data.values()  # guaranteed, per python docs, to be same order
        x_data = [(d - np.mean(d)) / np.std(d) for d in x_data]
        # synchronize masks
        n_data = ma.array(x_data)
        if isinstance(n_data, ma.masked_array):
            mask = ma.sum(n_data.mask, axis=0)
            mask = np.where(mask, True, False)
            for d in x_data:
                d.mask = mask

        x_data = np.concatenate(
            [np.expand_dims(d.compressed(), axis=0) for d in x_data])
        x_data = x_data.transpose()

        # calculate pca
        pca = skd.PCA()
        model = pca.fit(x_data)
        tx_data = pca.transform(x_data)

        # plot first two components
        self.ax = self.fig.add_subplot(111)
        self.ax.grid()
        self.ax.set_xlabel('Component 1')
        self.ax.set_ylabel('Component 2')

        axis_type = self.axis_type.GetString(self.axis_type.GetSelection())
        if axis_type == 'fixed':
            self.ax.set_xlim(-3, 3)
            self.ax.set_ylim(-3, 3)

        plot_type = self.plot_type.GetString(self.plot_type.GetSelection())
        if plot_type == 'scatterplot':
            # adjust marker size and alpha based on # of points
            marker_size = mpl.rcParams['lines.markersize']**2
            marker_size *= min(1, max(.12, 200 / len(tx_data[:, 0])))
            alpha = min(1, max(.002, 500 / len(tx_data[:, 0])))
            self.ax.scatter(tx_data[:, 0],
                            tx_data[:, 1],
                            s=marker_size,
                            c='b',
                            alpha=alpha)
        else:  # heatmap
            bins = 200
            heatmap, x_edges, y_edges = np.histogram2d(tx_data[:, 0],
                                                       tx_data[:, 1],
                                                       bins=bins)
            x_min, x_max = x_edges[0], x_edges[-1]
            y_min, y_max = y_edges[0], y_edges[-1]
            self.ax.imshow(np.log(heatmap.transpose() + 1),
                           extent=[x_min, x_max, y_min, y_max],
                           cmap='Blues',
                           origin='lower',
                           aspect='auto')

        # plot axes
        color = ['g', 'r', 'm', 'k', 'y']
        for n in range(n_vars):
            adata = np.zeros([2, n_vars])
            adata[0, n] = ma.min(x_data[:, n])
            adata[1, n] = ma.max(x_data[:, n])
            xf = pca.transform(adata)
            self.ax.plot([xf[0, 0], xf[0, 1]], [xf[1, 0], xf[1, 1]],
                         color[n % len(color)] + '-')
            self.ax.text(xf[0, 0],
                         xf[1, 0],
                         var_names[n],
                         color=color[n % len(color)])
        self.fig.tight_layout()

        # show stats in grid
        try:
            self.grid.Destroy()
        except:
            pass
        self.grid = wx.grid.Grid(self.panel, -1)
        self.grid.CreateGrid(n_vars,
                             n_vars + 1)  # extra column for explained variance
        grid_labels = ['Expl. var.'] + var_names
        grid_data = np.concatenate((np.expand_dims(
            pca.explained_variance_ratio_, axis=1), pca.components_),
                                   axis=1)
        for v in range(len(grid_labels)):
            self.grid.SetColLabelValue(v, grid_labels[v])
            # grid.SetColSize(v, cell_width+8)
            self.grid.SetColFormatFloat(v, 6, 3)
        vc = wx.ColourDatabase().Find('Light Blue')
        for row in range(n_vars):
            self.grid.SetCellBackgroundColour(row, 0, vc)
            for col in range(len(grid_labels)):
                self.grid.SetCellValue(row, col, str(grid_data[row, col]))
                self.grid.SetReadOnly(row, col)
        self.grid.AutoSize()
        self.sizer.Add(self.grid, 2, flag=wx.TOP | wx.LEFT, border=10)
        self.sizer.AddStretchSpacer()
        self.panel.Layout()
예제 #32
0
class TestMRecords:

    ilist = [1, 2, 3, 4, 5]
    flist = [1.1, 2.2, 3.3, 4.4, 5.5]
    slist = [b'one', b'two', b'three', b'four', b'five']
    ddtype = [('a', int), ('b', float), ('c', '|S8')]
    mask = [0, 1, 0, 0, 1]
    base = ma.array(list(zip(ilist, flist, slist)), mask=mask, dtype=ddtype)

    def test_byview(self):
        # Test creation by view
        base = self.base
        mbase = base.view(mrecarray)
        assert_equal(mbase.recordmask, base.recordmask)
        assert_equal_records(mbase._mask, base._mask)
        assert_(isinstance(mbase._data, recarray))
        assert_equal_records(mbase._data, base._data.view(recarray))
        for field in ('a', 'b', 'c'):
            assert_equal(base[field], mbase[field])
        assert_equal_records(mbase.view(mrecarray), mbase)

    def test_get(self):
        # TestData fields retrieval
        base = self.base.copy()
        mbase = base.view(mrecarray)
        # As fields..........
        for field in ('a', 'b', 'c'):
            assert_equal(getattr(mbase, field), mbase[field])
            assert_equal(base[field], mbase[field])
        # as elements .......
        mbase_first = mbase[0]
        assert_(isinstance(mbase_first, mrecarray))
        assert_equal(mbase_first.dtype, mbase.dtype)
        assert_equal(mbase_first.tolist(), (1, 1.1, b'one'))
        # Used to be mask, now it's recordmask
        assert_equal(mbase_first.recordmask, nomask)
        assert_equal(mbase_first._mask.item(), (False, False, False))
        assert_equal(mbase_first['a'], mbase['a'][0])
        mbase_last = mbase[-1]
        assert_(isinstance(mbase_last, mrecarray))
        assert_equal(mbase_last.dtype, mbase.dtype)
        assert_equal(mbase_last.tolist(), (None, None, None))
        # Used to be mask, now it's recordmask
        assert_equal(mbase_last.recordmask, True)
        assert_equal(mbase_last._mask.item(), (True, True, True))
        assert_equal(mbase_last['a'], mbase['a'][-1])
        assert_((mbase_last['a'] is masked))
        # as slice ..........
        mbase_sl = mbase[:2]
        assert_(isinstance(mbase_sl, mrecarray))
        assert_equal(mbase_sl.dtype, mbase.dtype)
        # Used to be mask, now it's recordmask
        assert_equal(mbase_sl.recordmask, [0, 1])
        assert_equal_records(mbase_sl.mask,
                             np.array([(False, False, False),
                                       (True, True, True)],
                                      dtype=mbase._mask.dtype))
        assert_equal_records(mbase_sl, base[:2].view(mrecarray))
        for field in ('a', 'b', 'c'):
            assert_equal(getattr(mbase_sl, field), base[:2][field])

    def test_set_fields(self):
        # TestData setting fields.
        base = self.base.copy()
        mbase = base.view(mrecarray)
        mbase = mbase.copy()
        mbase.fill_value = (999999, 1e20, 'N/A')
        # Change the data, the mask should be conserved
        mbase.a._data[:] = 5
        assert_equal(mbase['a']._data, [5, 5, 5, 5, 5])
        assert_equal(mbase['a']._mask, [0, 1, 0, 0, 1])
        # Change the elements, and the mask will follow
        mbase.a = 1
        assert_equal(mbase['a']._data, [1]*5)
        assert_equal(ma.getmaskarray(mbase['a']), [0]*5)
        # Use to be _mask, now it's recordmask
        assert_equal(mbase.recordmask, [False]*5)
        assert_equal(mbase._mask.tolist(),
                     np.array([(0, 0, 0),
                               (0, 1, 1),
                               (0, 0, 0),
                               (0, 0, 0),
                               (0, 1, 1)],
                              dtype=bool))
        # Set a field to mask ........................
        mbase.c = masked
        # Use to be mask, and now it's still mask !
        assert_equal(mbase.c.mask, [1]*5)
        assert_equal(mbase.c.recordmask, [1]*5)
        assert_equal(ma.getmaskarray(mbase['c']), [1]*5)
        assert_equal(ma.getdata(mbase['c']), [b'N/A']*5)
        assert_equal(mbase._mask.tolist(),
                     np.array([(0, 0, 1),
                               (0, 1, 1),
                               (0, 0, 1),
                               (0, 0, 1),
                               (0, 1, 1)],
                              dtype=bool))
        # Set fields by slices .......................
        mbase = base.view(mrecarray).copy()
        mbase.a[3:] = 5
        assert_equal(mbase.a, [1, 2, 3, 5, 5])
        assert_equal(mbase.a._mask, [0, 1, 0, 0, 0])
        mbase.b[3:] = masked
        assert_equal(mbase.b, base['b'])
        assert_equal(mbase.b._mask, [0, 1, 0, 1, 1])
        # Set fields globally..........................
        ndtype = [('alpha', '|S1'), ('num', int)]
        data = ma.array([('a', 1), ('b', 2), ('c', 3)], dtype=ndtype)
        rdata = data.view(MaskedRecords)
        val = ma.array([10, 20, 30], mask=[1, 0, 0])

        rdata['num'] = val
        assert_equal(rdata.num, val)
        assert_equal(rdata.num.mask, [1, 0, 0])

    def test_set_fields_mask(self):
        # TestData setting the mask of a field.
        base = self.base.copy()
        # This one has already a mask....
        mbase = base.view(mrecarray)
        mbase['a'][-2] = masked
        assert_equal(mbase.a, [1, 2, 3, 4, 5])
        assert_equal(mbase.a._mask, [0, 1, 0, 1, 1])
        # This one has not yet
        mbase = fromarrays([np.arange(5), np.random.rand(5)],
                           dtype=[('a', int), ('b', float)])
        mbase['a'][-2] = masked
        assert_equal(mbase.a, [0, 1, 2, 3, 4])
        assert_equal(mbase.a._mask, [0, 0, 0, 1, 0])

    def test_set_mask(self):
        base = self.base.copy()
        mbase = base.view(mrecarray)
        # Set the mask to True .......................
        mbase.mask = masked
        assert_equal(ma.getmaskarray(mbase['b']), [1]*5)
        assert_equal(mbase['a']._mask, mbase['b']._mask)
        assert_equal(mbase['a']._mask, mbase['c']._mask)
        assert_equal(mbase._mask.tolist(),
                     np.array([(1, 1, 1)]*5, dtype=bool))
        # Delete the mask ............................
        mbase.mask = nomask
        assert_equal(ma.getmaskarray(mbase['c']), [0]*5)
        assert_equal(mbase._mask.tolist(),
                     np.array([(0, 0, 0)]*5, dtype=bool))

    def test_set_mask_fromarray(self):
        base = self.base.copy()
        mbase = base.view(mrecarray)
        # Sets the mask w/ an array
        mbase.mask = [1, 0, 0, 0, 1]
        assert_equal(mbase.a.mask, [1, 0, 0, 0, 1])
        assert_equal(mbase.b.mask, [1, 0, 0, 0, 1])
        assert_equal(mbase.c.mask, [1, 0, 0, 0, 1])
        # Yay, once more !
        mbase.mask = [0, 0, 0, 0, 1]
        assert_equal(mbase.a.mask, [0, 0, 0, 0, 1])
        assert_equal(mbase.b.mask, [0, 0, 0, 0, 1])
        assert_equal(mbase.c.mask, [0, 0, 0, 0, 1])

    def test_set_mask_fromfields(self):
        mbase = self.base.copy().view(mrecarray)

        nmask = np.array(
            [(0, 1, 0), (0, 1, 0), (1, 0, 1), (1, 0, 1), (0, 0, 0)],
            dtype=[('a', bool), ('b', bool), ('c', bool)])
        mbase.mask = nmask
        assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
        assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
        assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])
        # Reinitialize and redo
        mbase.mask = False
        mbase.fieldmask = nmask
        assert_equal(mbase.a.mask, [0, 0, 1, 1, 0])
        assert_equal(mbase.b.mask, [1, 1, 0, 0, 0])
        assert_equal(mbase.c.mask, [0, 0, 1, 1, 0])

    def test_set_elements(self):
        base = self.base.copy()
        # Set an element to mask .....................
        mbase = base.view(mrecarray).copy()
        mbase[-2] = masked
        assert_equal(
            mbase._mask.tolist(),
            np.array([(0, 0, 0), (1, 1, 1), (0, 0, 0), (1, 1, 1), (1, 1, 1)],
                     dtype=bool))
        # Used to be mask, now it's recordmask!
        assert_equal(mbase.recordmask, [0, 1, 0, 1, 1])
        # Set slices .................................
        mbase = base.view(mrecarray).copy()
        mbase[:2] = (5, 5, 5)
        assert_equal(mbase.a._data, [5, 5, 3, 4, 5])
        assert_equal(mbase.a._mask, [0, 0, 0, 0, 1])
        assert_equal(mbase.b._data, [5., 5., 3.3, 4.4, 5.5])
        assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])
        assert_equal(mbase.c._data,
                     [b'5', b'5', b'three', b'four', b'five'])
        assert_equal(mbase.b._mask, [0, 0, 0, 0, 1])

        mbase = base.view(mrecarray).copy()
        mbase[:2] = masked
        assert_equal(mbase.a._data, [1, 2, 3, 4, 5])
        assert_equal(mbase.a._mask, [1, 1, 0, 0, 1])
        assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 4.4, 5.5])
        assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])
        assert_equal(mbase.c._data,
                     [b'one', b'two', b'three', b'four', b'five'])
        assert_equal(mbase.b._mask, [1, 1, 0, 0, 1])

    def test_setslices_hardmask(self):
        # TestData setting slices w/ hardmask.
        base = self.base.copy()
        mbase = base.view(mrecarray)
        mbase.harden_mask()
        try:
            mbase[-2:] = (5, 5, 5)
            assert_equal(mbase.a._data, [1, 2, 3, 5, 5])
            assert_equal(mbase.b._data, [1.1, 2.2, 3.3, 5, 5.5])
            assert_equal(mbase.c._data,
                         [b'one', b'two', b'three', b'5', b'five'])
            assert_equal(mbase.a._mask, [0, 1, 0, 0, 1])
            assert_equal(mbase.b._mask, mbase.a._mask)
            assert_equal(mbase.b._mask, mbase.c._mask)
        except NotImplementedError:
            # OK, not implemented yet...
            pass
        except AssertionError:
            raise
        else:
            raise Exception("Flexible hard masks should be supported !")
        # Not using a tuple should crash
        try:
            mbase[-2:] = 3
        except (NotImplementedError, TypeError):
            pass
        else:
            raise TypeError("Should have expected a readable buffer object!")

    def test_hardmask(self):
        # Test hardmask
        base = self.base.copy()
        mbase = base.view(mrecarray)
        mbase.harden_mask()
        assert_(mbase._hardmask)
        mbase.mask = nomask
        assert_equal_records(mbase._mask, base._mask)
        mbase.soften_mask()
        assert_(not mbase._hardmask)
        mbase.mask = nomask
        # So, the mask of a field is no longer set to nomask...
        assert_equal_records(mbase._mask,
                             ma.make_mask_none(base.shape, base.dtype))
        assert_(ma.make_mask(mbase['b']._mask) is nomask)
        assert_equal(mbase['a']._mask, mbase['b']._mask)

    def test_pickling(self):
        # Test pickling
        base = self.base.copy()
        mrec = base.view(mrecarray)
        for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
            _ = pickle.dumps(mrec, protocol=proto)
            mrec_ = pickle.loads(_)
            assert_equal(mrec_.dtype, mrec.dtype)
            assert_equal_records(mrec_._data, mrec._data)
            assert_equal(mrec_._mask, mrec._mask)
            assert_equal_records(mrec_._mask, mrec._mask)

    def test_filled(self):
        # Test filling the array
        _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
        _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
        _c = ma.array(['one', 'two', 'three'], mask=[0, 0, 1], dtype='|S8')
        ddtype = [('a', int), ('b', float), ('c', '|S8')]
        mrec = fromarrays([_a, _b, _c], dtype=ddtype,
                          fill_value=(99999, 99999., 'N/A'))
        mrecfilled = mrec.filled()
        assert_equal(mrecfilled['a'], np.array((1, 2, 99999), dtype=int))
        assert_equal(mrecfilled['b'], np.array((1.1, 2.2, 99999.),
                                               dtype=float))
        assert_equal(mrecfilled['c'], np.array(('one', 'two', 'N/A'),
                                               dtype='|S8'))

    def test_tolist(self):
        # Test tolist.
        _a = ma.array([1, 2, 3], mask=[0, 0, 1], dtype=int)
        _b = ma.array([1.1, 2.2, 3.3], mask=[0, 0, 1], dtype=float)
        _c = ma.array(['one', 'two', 'three'], mask=[1, 0, 0], dtype='|S8')
        ddtype = [('a', int), ('b', float), ('c', '|S8')]
        mrec = fromarrays([_a, _b, _c], dtype=ddtype,
                          fill_value=(99999, 99999., 'N/A'))

        assert_equal(mrec.tolist(),
                     [(1, 1.1, None), (2, 2.2, b'two'),
                      (None, None, b'three')])

    def test_withnames(self):
        # Test the creation w/ format and names
        x = mrecarray(1, formats=float, names='base')
        x[0]['base'] = 10
        assert_equal(x['base'][0], 10)

    def test_exotic_formats(self):
        # Test that 'exotic' formats are processed properly
        easy = mrecarray(1, dtype=[('i', int), ('s', '|S8'), ('f', float)])
        easy[0] = masked
        assert_equal(easy.filled(1).item(), (1, b'1', 1.))

        solo = mrecarray(1, dtype=[('f0', '<f8', (2, 2))])
        solo[0] = masked
        assert_equal(solo.filled(1).item(),
                     np.array((1,), dtype=solo.dtype).item())

        mult = mrecarray(2, dtype="i4, (2,3)float, float")
        mult[0] = masked
        mult[1] = (1, 1, 1)
        mult.filled(0)
        assert_equal_records(mult.filled(0),
                             np.array([(0, 0, 0), (1, 1, 1)],
                                      dtype=mult.dtype))
예제 #33
0
    x = []
    y = []
    bottom = []
    for row in mcursor:
        delta = (sts - row[0]).days * 86400 + (sts - row[0]).seconds
        delta = delta /  (6*3600.0)
        if row[1] is not None:
            x.append( 0 - delta )
            y.append( row[1] / 25.4 )
            bottom.append( ys )
        print delta, row[0], row[1], precip
    x.append( 0 )
    y.append( precip )
    bottom.append( ys )
    x = np.array( x)
    y = npma.array( y) / (npma.max(y) * 1.1)
    bottom = np.array( bottom )
    bars = ax.bar(x-0.4, y, bottom=bottom)
    bars[-1].set_facecolor('r')
    ylabels.append(" %s\n%s in" % (sts.strftime("%-d %b %Y"), snow))
    
    ys += 1
    
for i in range(-13,1):
    ax.text(i, 1.2, '?', ha='center')
    
ax.grid(True)
ax.set_ylim(1,8)
ax.set_xticks(np.arange(-28,1,4))
ax.set_xticklabels([7,6,5,4,3,2,1,'Event'])
ax.set_xlim(-28.5,0.5)
예제 #34
0
    def _run(self, frame, fgmask):
        """
        Run tracking method on current frame. 
        
        Internal reference - don't call this directly.      
        """

        ## initialize
        self.overlay = np.zeros_like(frame)
        self.overlay_bin = np.zeros(frame.shape[0:2], dtype=np.uint8)
        self.frame_df = pd.DataFrame()

        if self.remove_shadows == True:
            ret, fgmask = cv2.threshold(fgmask, 128, 255, cv2.THRESH_BINARY)

        ## blur
        fgmask = blur(fgmask, self.blur_kernel)

        # ## threshold
        fgmask = threshold(
            fgmask, method="binary", invert=True, value=self.threshold_value
        )

        ## find contours
        ret, contours, hierarchy = cv2.findContours(
            fgmask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE
        )

        ## perform operations on contours
        if len(contours) > 0:
            list_contours, list_area, list_length, list_center_coordinates = (
                [],
                [],
                [],
                [],
            )
            df_list, df_column_names = [], []

            # check if contour matches min/max length provided
            for contour in contours:
                if contour.shape[0] > 4:
                    center, radius = cv2.minEnclosingCircle(contour)
                    length = int(radius * 2)
                    area = int(cv2.contourArea(contour))
                    if all(
                        [
                            length > self.min_length and length < self.max_length,
                            area > self.min_area and area < self.max_area,
                        ]
                    ):
                        list_length.append(length)
                        list_area.append(area)
                        list_contours.append(contour)
                        list_center_coordinates.append(center)

            if len(list_contours) > 0:
                # if single biggest contour:
                if self.mode == "single":
                    if len(contours) == 1:
                        pass
                    elif len(contours) > 1:
                        max_idx = np.argmax(list_length)
                        list_contours = [list_contours[max_idx]]
                        list_length = [list_length[max_idx]]
                        list_area = [list_area[max_idx]]
                        list_center_coordinates = [list_center_coordinates[max_idx]]

                list_x, list_y = [], []
                list_grayscale, list_grayscale_background = [], []
                list_b, list_g, list_r = [], [], []

                for contour, center in zip(list_contours, list_center_coordinates):

                    # operations
                    x = int(center[0])
                    y = int(center[1])
                    list_x.append(x)
                    list_y.append(y)

                    if "mask_bool" in vars(self):
                        list_mask_check = []
                        temp_list = []
                        for key, val in self.mask_bool.items():
                            temp_list.append(val[y, x])
                        list_mask_check.append(temp_list)

                    rx, ry, rw, rh = cv2.boundingRect(contour)
                    frame_roi = frame[ry : ry + rh, rx : rx + rw]
                    frame_roi_gray = cv2.cvtColor(frame_roi, cv2.COLOR_BGR2GRAY)
                    mask_roi = fgmask[ry : ry + rh, rx : rx + rw]

                    if any("grayscale" in o for o in self.operations):
                        grayscale = ma.array(
                            data=frame_roi_gray, mask=np.logical_not(mask_roi)
                        )
                        list_grayscale.append(int(np.mean(grayscale)))

                    if any("grayscale_background" in o for o in self.operations):
                        grayscale_background = ma.array(
                            data=frame_roi_gray, mask=mask_roi
                        )
                        if not grayscale_background.mask.all():
                            list_grayscale_background.append(
                                int(np.mean(grayscale_background))
                            )
                        else:
                            list_grayscale_background.append(9999)

                    if any("bgr" in o for o in self.operations):
                        b = ma.array(
                            data=frame_roi[:, :, 0], mask=np.logical_not(mask_roi)
                        )
                        list_b.append(int(np.mean(b)))
                        g = ma.array(
                            data=frame_roi[:, :, 1], mask=np.logical_not(mask_roi)
                        )
                        list_g.append(int(np.mean(g)))
                        r = ma.array(
                            data=frame_roi[:, :, 2], mask=np.logical_not(mask_roi)
                        )
                        list_r.append(int(np.mean(r)))

                    # drawing
                    self.overlay = cv2.drawContours(
                        self.overlay, [contour], 0, self.overlay_colour, -1
                    )  # Draw filled contour in mask
                    self.overlay = cv2.putText(
                        self.overlay,
                        self.label,
                        (x, y),
                        cv2.FONT_HERSHEY_SIMPLEX,
                        1,
                        self.overlay_colour,
                        1,
                        cv2.LINE_AA,
                    )
                    self.overlay = cv2.rectangle(
                        self.overlay,
                        (rx, ry),
                        (int(rx + rw), int(ry + rh)),
                        self.overlay_colour,
                        2,
                    )

                df_list = df_list + [list_x]
                df_list = df_list + [list_y]
                df_column_names = df_column_names + ["x", "y"]

                if any("diameter" in o for o in self.operations):
                    df_list = df_list + [list_length]
                    df_column_names.append("diameter")

                if any("area" in o for o in self.operations):
                    df_list = df_list + [list_area]
                    df_column_names.append("area")

                if any("grayscale" in o for o in self.operations):
                    df_list = df_list + [list_grayscale]
                    df_column_names.append("grayscale")

                if any("grayscale_background" in o for o in self.operations):
                    df_list = df_list + [list_grayscale_background]
                    df_column_names.append("grayscale_background")

                if any("bgr" in o for o in self.operations):
                    df_list = df_list + [list_b]
                    df_list = df_list + [list_g]
                    df_list = df_list + [list_r]
                    df_column_names = df_column_names + ["b", "g", "r"]

                frame_df = pd.DataFrame(data=df_list)
                frame_df = frame_df.transpose()
                frame_df.columns = df_column_names
                frame_df["label"] = self.label
                self.frame_df = frame_df

                if "mask_bool" in vars(self):
                    mask_df = pd.DataFrame(list_mask_check, columns=[*self.mask_bool])
                    self.frame_df = pd.concat(
                        [frame_df.reset_index(drop=True), mask_df], axis=1
                    )

                self.contours = list_contours

                return fgmask, self.overlay, self.contours, self.frame_df

            else:
                frame_df = pd.DataFrame()
                return fgmask, self.overlay, [], self.frame_df

        else:
            frame_df = pd.DataFrame()
            return fgmask, self.overlay, [], self.frame_df
예제 #35
0
파일: core.py 프로젝트: welterde/astroquery
    def get_catalog(self):
        """
        Download catalog of ALFALFA source properties.

        Notes
        -----
        This catalog has ~15,000 entries, so after it's downloaded, it is made
        global to save some time later.

        Returns
        -------
        result : Dictionary of results, each element is a masked array.
        """

        if hasattr(self, 'ALFALFACAT'):
            return self.ALFALFACAT

        result = requests.get(self.CATALOG_PREFIX)
        iterable_lines = result.iter_lines()

        # Read header
        cols = [col for col in next(iterable_lines).rstrip('\n').split(',')]

        catalog = {}
        for col in cols:
            catalog[col] = []

        # Parse result
        for line in iterable_lines:
            # skip blank lines or trailing newlines
            if line == "":
                continue
            l = line.rstrip('\n').split(',')
            for i, col in enumerate(cols):
                item = l[i].strip()
                if item == '\"\"':
                    catalog[col].append(self.PLACEHOLDER)
                elif item.isdigit():
                    catalog[col].append(int(item))
                elif item.replace('.', '').isdigit():
                    catalog[col].append(float(item))
                else:
                    catalog[col].append(item)

        result.close()

        # Mask out blank elements
        for col in cols:
            mask = np.zeros(len(catalog[col]), dtype='bool')
            # need to turn list -> array for boolean comparison
            colArr = np.array(catalog[col])
            # placeholder must share Type with the array
            ph = np.array(self.PLACEHOLDER, dtype=colArr.dtype)
            mask[colArr == ph] = True
            catalog[col] = ma.array(catalog[col], mask=mask)

        # Make this globally available so we don't have to re-download it
        # again in this session
        self.ALFALFACAT = catalog

        return catalog
예제 #36
0
def _find_mean_melting_alt(obs: ClassData,
                           melting_layer: np.ndarray) -> np.ndarray:
    assert melting_layer.dtype == bool
    alt_array = np.tile(obs.height, (len(obs.time), 1))
    melting_alts = ma.array(alt_array, mask=~melting_layer)
    return ma.median(melting_alts, axis=1)
예제 #37
0
 def test_with_masked_column_uniform(self):
     "Test masked column"
     data = StringIO.StringIO('1 2 3\n4 5 6\n')
     test = np.genfromtxt(data, missing='2,5', dtype=None, usemask=True)
     control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
     assert_equal(test, control)
예제 #38
0
    def __call__(self, X, alpha=None, bytes=False):
        """
        *X* is either a scalar or an array (of any dimension).
        If scalar, a tuple of rgba values is returned, otherwise
        an array with the new shape = oldshape+(4,). If the X-values
        are integers, then they are used as indices into the array.
        If they are floating point, then they must be in the
        interval (0.0, 1.0).
        Alpha must be a scalar between 0 and 1, or None.
        If bytes is False, the rgba values will be floats on a
        0-1 scale; if True, they will be uint8, 0-255.
        """

        if not self._isinit: self._init()
        mask_bad = None
        if not cbook.iterable(X):
            vtype = 'scalar'
            xa = np.array([X])
        else:
            vtype = 'array'
            # force a copy here -- the ma.array and filled functions
            # do force a cop of the data by default - JDH
            xma = ma.array(X, copy=True)
            xa = xma.filled(0)
            mask_bad = ma.getmask(xma)
        if xa.dtype.char in np.typecodes['Float']:
            np.putmask(xa, xa == 1.0,
                       0.9999999)  #Treat 1.0 as slightly less than 1.
            # The following clip is fast, and prevents possible
            # conversion of large positive values to negative integers.

            if NP_CLIP_OUT:
                np.clip(xa * self.N, -1, self.N, out=xa)
            else:
                xa = np.clip(xa * self.N, -1, self.N)

            # ensure that all 'under' values will still have negative
            # value after casting to int
            np.putmask(xa, xa < 0.0, -1)
            xa = xa.astype(int)
        # Set the over-range indices before the under-range;
        # otherwise the under-range values get converted to over-range.
        np.putmask(xa, xa > self.N - 1, self._i_over)
        np.putmask(xa, xa < 0, self._i_under)
        if mask_bad is not None and mask_bad.shape == xa.shape:
            np.putmask(xa, mask_bad, self._i_bad)
        if bytes:
            lut = (self._lut * 255).astype(np.uint8)
        else:
            lut = self._lut.copy()  # Don't let alpha modify original _lut.

        if alpha is not None:
            alpha = min(alpha, 1.0)  # alpha must be between 0 and 1
            alpha = max(alpha, 0.0)
            if (lut[-1] == 0).all():
                lut[:-1, -1] = alpha
                # All zeros is taken as a flag for the default bad
                # color, which is no color--fully transparent.  We
                # don't want to override this.
            else:
                lut[:, -1] = alpha
                # If the bad value is set to have a color, then we
                # override its alpha just as for any other value.

        rgba = np.empty(shape=xa.shape + (4, ), dtype=lut.dtype)
        lut.take(xa, axis=0, mode='clip', out=rgba)
        #  twice as fast as lut[xa];
        #  using the clip or wrap mode and providing an
        #  output array speeds it up a little more.
        if vtype == 'scalar':
            rgba = tuple(rgba[0, :])
        return rgba
예제 #39
0
def posterior_function_many_stars_real(changing_parameter, error_list,
                                       error_element_list):
    '''
	This is the actual posterior function for many stars. But the functionality is explained in posterior_function_many_stars.
	'''
    import numpy.ma as ma
    from .cem_function import get_prior, posterior_function_returning_predictions
    from .data_to_test import likelihood_evaluation, read_out_wildcard
    from .parameter import ModelParameters

    ## Initialising the model parameters
    a = ModelParameters()

    ## extracting from 'changing_parameters' the global parameters and the local parameters
    global_parameters = changing_parameter[:len(a.SSP_parameters)]
    local_parameters = changing_parameter[len(a.SSP_parameters):]
    local_parameters = local_parameters.reshape(
        (len(a.stellar_identifier_list), len(a.ISM_parameters)))

    ## getting the prior for the global parameters in order to subtract it in the end for each time it was evaluated too much
    a.to_optimize = a.SSP_parameters_to_optimize
    global_parameter_prior = get_prior(global_parameters, a)

    ## Chempy is evaluated one after the other for each stellar identifier with the prescribed parameter combination and the element predictions for each star are stored
    predictions_list = []
    elements_list = []
    log_prior_list = []

    for i, item in enumerate(a.stellar_identifier_list):
        b = ModelParameters()
        b.stellar_identifier = item
        changing_parameter = np.hstack(
            (global_parameters, local_parameters[i]))
        args = (changing_parameter, b)
        abundance_list, element_list = posterior_function_returning_predictions(
            args)
        predictions_list.append(abundance_list)
        elements_list.append(element_list)
        log_prior_list.append(get_prior(changing_parameter, b))

    ## The wildcards are read out so that the predictions can be compared with the observations
    args = zip(a.stellar_identifier_list, predictions_list, elements_list)
    list_of_l_input = []
    for item in args:
        list_of_l_input.append(read_out_wildcard(*item))
        list_of_l_input[-1] = list(list_of_l_input[-1])

    ## Here the predictions and observations are brought into the same array form in order to perform the likelihood calculation fast
    elements = np.unique(np.hstack(elements_list))
    # Masking the elements that are not given for specific stars and preparing the likelihood input
    star_errors = ma.array(np.zeros(
        (len(elements), len(a.stellar_identifier_list))),
                           mask=True)
    star_abundances = ma.array(np.zeros(
        (len(elements), len(a.stellar_identifier_list))),
                               mask=True)
    model_abundances = ma.array(np.zeros(
        (len(elements), len(a.stellar_identifier_list))),
                                mask=True)

    for star_index, item in enumerate(list_of_l_input):
        for element_index, element in enumerate(item[0]):
            assert element in elements, 'observed element is not predicted by Chempy'
            new_element_index = np.where(elements == element)[0][0]
            star_errors[new_element_index, star_index] = item[1][element_index]
            model_abundances[new_element_index,
                             star_index] = item[2][element_index]
            star_abundances[new_element_index,
                            star_index] = item[3][element_index]

    ## given model error from error_list is read out and brought into the same element order (compatibility between python 2 and 3 makes the decode method necessary)
    if not a.error_marginalization:
        error_elements_decoded = []
        for item in error_element_list:
            error_elements_decoded.append(item.decode('utf8'))
        error_element_list = np.hstack(error_elements_decoded)

        error_list = np.hstack(error_list)
        model_error = []
        for element in elements:
            assert element in error_element_list, 'for this element the model error was not given, %s' % (
                element)
            model_error.append(
                error_list[np.where(error_element_list == element)])
        model_error = np.hstack(model_error)

    ## likelihood is calculated (the model error vector is expanded)
    if a.error_marginalization:
        from scipy.stats import beta
        likelihood_list = []
        model_errors = np.linspace(a.flat_model_error_prior[0],
                                   a.flat_model_error_prior[1],
                                   a.flat_model_error_prior[2])
        if a.beta_error_distribution[0]:
            error_weight = beta.pdf(model_errors,
                                    a=a.beta_error_distribution[1],
                                    b=a.beta_error_distribution[2])
            error_weight /= sum(error_weight)
        else:
            error_weight = np.ones_like(model_errors) * 1. / float(
                flat_model_error_prior[2])
        for i, item in enumerate(model_errors):
            error_temp = np.ones(len(elements)) * item
            likelihood_list.append(
                likelihood_evaluation(error_temp[:, None], star_errors,
                                      model_abundances, star_abundances))
        likelihood = logsumexp(likelihood_list, b=error_weight)
    else:
        if a.zero_model_error:
            model_error = np.zeros_like(model_error)
        likelihood = likelihood_evaluation(model_error[:, None], star_errors,
                                           model_abundances, star_abundances)

    ## Prior from all stars is added
    prior = sum(log_prior_list)
    ## Prior for global parameters is subtracted
    prior -= (len(a.stellar_identifier_list) - 1) * global_parameter_prior
    posterior = prior + likelihood
    assert np.isnan(posterior) == False, ('returned posterior = ', posterior,
                                          'prior = ', prior, 'likelihood = ',
                                          likelihood, 'changing parameter = ',
                                          changing_parameter)
    ########
    if a.verbose:
        print('prior = ', prior, 'likelihood = ', likelihood)

    return (posterior, model_abundances)
예제 #40
0
    def _binObs(self, q, im, jm):
        """
      Bins observations. Assumes a global GEOS-5 A-Grid: Lons in [-180,10) Lats in [-90,90].
      q can be 2D or 3D. If 3D, channel is first dimension and each channel must be valid to include in binning.
      """

        # detect 3D channel variable
        if q.ndim == 3:
            nc = q.shape[0]
        else:
            nc = None

        # remove any masked values or out-of-range values
        # a. acceptable lat, lon
        mask = self.lon.mask | self.lat.mask
        mask |= np.abs(self.lon) > 180.
        mask |= np.abs(self.lat) > 90.
        # b. apply optional pass filter
        if self.iFilter is not None:
            mask |= np.logical_not(self.iFilter)
        # c. mask out missing obs
        if nc is None:
            mask |= q.mask
        else:
            # all chamnels must be valid to pass
            for n in range(nc):
                mask |= q[n].mask
        # d. mask out and make 1D
        lon = ma.array(self.lon, mask=mask, keep_mask=False).compressed()
        lat = ma.array(self.lat, mask=mask, keep_mask=False).compressed()
        if nc is None:
            obs = ma.array(q, mask=mask, keep_mask=False).compressed()
        else:
            # form a list of the masked channels, each 1D
            obs = []
            for n in range(nc):
                obs.append(
                    ma.array(q[n], mask=mask, keep_mask=False).compressed())
            # put 1D index first, channels second
            obs = np.array(obs).T

        # bins
        lon[lon >= 180.] -= 360.
        dLon = 360. / im
        dLat = 180. / (jm - 1)
        ivals = np.rint((lon + 180.) / dLon)
        jvals = np.rint((lat + 90.) / dLat)
        ivals[ivals >= im] -= im
        ivals[ivals < 0] += im

        # do the binnning
        cnts = np.zeros((im, jm), dtype=int)
        if nc is None:
            gObs = np.zeros((im, jm))
            for i, j, v in zip(ivals, jvals, obs):
                gObs[i, j] += v
                cnts[i, j] += 1
        else:
            gObs = np.zeros((im, jm, nc))
            for i, j, channels in zip(ivals, jvals, obs):
                gObs[i, j] += channels
                cnts[i, j] += 1

        # normalize & return
        hasData = (cnts > 0)
        if nc is None:
            gObs[hasData] /= cnts[hasData]
            gObs[~hasData] = self.missing
        else:
            gObs = np.transpose(gObs, (2, 0, 1))  # now (nc,im,jm)
            for n in range(nc):
                gObs[n][hasData] /= cnts[hasData]
                gObs[n][~hasData] = self.missing
            gObs = np.transpose(gObs, (1, 2, 0))  # now (im,jm,nc)
        return gObs
예제 #41
0
 def result(traj):
     psi = traj.getMaskedPosture(traj.psi)
     return dotacf(
         ma.array([ma.cos(psi), ma.sin(psi)]).T, lags,
         traj.excluded)
예제 #42
0
def global_optimization_real(changing_parameter, result):
    '''
	This function calculates the predictions from several Chempy zones in parallel. It also calculates the likelihood for common model errors
	BEWARE: Model parameters are called as saved in parameters.py!!!

	INPUT:

	   changing_parameter = the global SSP parameters (parameters that all stars share)

	   result = the complete parameter set is handed over as an array of shape(len(stars),len(all parameters)). From those the local ISM parameters are taken
	
	OUTPUT:

	   -posterior = negative log posterior for all stellar zones

	   error_list = the optimal standard deviation of the model error

	   elements = the corresponding element symbols
	'''
    import multiprocessing as mp
    import numpy.ma as ma
    from scipy.stats import beta
    from .cem_function import get_prior, posterior_function_returning_predictions
    from .data_to_test import likelihood_evaluation
    from .parameter import ModelParameters

    ## Calculating the prior
    a = ModelParameters()
    a.to_optimize = a.SSP_parameters_to_optimize
    prior = get_prior(changing_parameter, a)

    ## Handing over to posterior_function_returning_predictions
    parameter_list = []
    p0_list = []
    for i, item in enumerate(a.stellar_identifier_list):
        parameter_list.append(ModelParameters())
        parameter_list[-1].stellar_identifier = item
        p0_list.append(
            np.hstack((changing_parameter, result[i, len(a.SSP_parameters):])))
    args = zip(p0_list, parameter_list)
    p = mp.Pool(len(parameter_list))
    t = p.map(posterior_function_returning_predictions, args)
    p.close()
    p.join()
    z = np.array(t)
    # Predictions including element symbols are returned

    # Reading out the wildcards
    elements = np.unique(np.hstack(z[:, 1]))
    from Chempy.data_to_test import read_out_wildcard
    args = zip(a.stellar_identifier_list, z[:, 0], z[:, 1])
    list_of_l_input = []
    for item in args:
        list_of_l_input.append(read_out_wildcard(*item))
        list_of_l_input[-1] = list(list_of_l_input[-1])
    # Now the input for the likelihood evaluating function is almost ready

    # Masking the elements that are not given for specific stars and preparing the likelihood input
    star_errors = ma.array(np.zeros(
        (len(elements), len(a.stellar_identifier_list))),
                           mask=True)
    star_abundances = ma.array(np.zeros(
        (len(elements), len(a.stellar_identifier_list))),
                               mask=True)
    model_abundances = ma.array(np.zeros(
        (len(elements), len(a.stellar_identifier_list))),
                                mask=True)

    for star_index, item in enumerate(list_of_l_input):
        for element_index, element in enumerate(item[0]):
            assert element in elements, 'observed element is not predicted by Chempy'
            new_element_index = np.where(elements == element)[0][0]
            star_errors[new_element_index, star_index] = item[1][element_index]
            model_abundances[new_element_index,
                             star_index] = item[2][element_index]
            star_abundances[new_element_index,
                            star_index] = item[3][element_index]

    # Brute force testing of a few model errors
    model_errors = np.linspace(a.flat_model_error_prior[0],
                               a.flat_model_error_prior[1],
                               a.flat_model_error_prior[2])
    if a.beta_error_distribution[0]:
        error_weight = beta.pdf(model_errors,
                                a=a.beta_error_distribution[1],
                                b=a.beta_error_distribution[2])
        error_weight /= sum(error_weight)
    else:
        error_weight = np.ones_like(model_errors) * 1. / float(
            flat_model_error_prior[2])
    error_list = []
    likelihood_list = []
    for i, element in enumerate(elements):
        error_temp = []
        for item in model_errors:
            error_temp.append(
                likelihood_evaluation(item, star_errors[i],
                                      model_abundances[i], star_abundances[i]))
        cut = np.where(np.hstack(error_temp) == np.max(error_temp))
        if len(cut) == 2:
            cut = cut[0][0]
        error_list.append(float(model_errors[cut]))
        ## Adding the marginalization over the model error (within the prior borders). Taking the average of the likelihoods (they are log likelihoods so exp needs to be called)
        if a.error_marginalization:
            likelihood_list.append(logsumexp(error_temp, b=error_weight))
        else:
            if a.zero_model_error:
                likelihood_list.append(error_temp[0])
            else:
                likelihood_list.append(np.max(error_temp))

    error_list = np.hstack(error_list)
    likelihood_list = np.hstack(likelihood_list)
    likelihood = np.sum(likelihood_list)

    # returning the best likelihood together with the prior as posterior
    return (-(prior + likelihood), error_list, elements)
예제 #43
0
def cost_matrix(tl, tr, t_max=0.5):
    """given arrays of left and right trajectory objects tl and tr with nl = len(tl) and nr = len(tr),
    generate an (nl + nr) x (nl + nr) matrix where the upper left nlxnr blocks C_ij are the costs of linking trajectory
    tl[i] to trajectory tr[j]. The upper right nl x nl block is a diagonal matrix allowing each trajectory from the left an opportunity
    to null link. The bottom left nr x nr block is a diagonal matrix allowing each trajectory from the right a null link
    opportunity. The bottom right nr x nl block is a grid matrix where every element allows any null link to pair with any other."""

    import time
    t0 = time.time()
    from numpy import ma
    nl = len(tl)
    nr = len(tr)
    l_traj_times = np.fromiter(map(lambda t: t.T, tl), dtype=float)
    r_traj_times = np.fromiter(map(lambda t: t.T, tr), dtype=float)
    mask = np.abs(l_traj_times.reshape(-1, 1) - r_traj_times
                  ) < t_max  # here's the mask saying medians are close enough
    i1, j1 = ma.MaskedArray.nonzero(ma.array(
        mask))  # the indices cooresponding to nonzero data in cost matrix

    # now make a new sparse matrix with shape of map which is filled with a cost(tl[i],tr[j]) if mask[i,j] is true
    traj_mat = cart_cross(tl, tr)  # matrix of pairs of trajectories

    # this is the set of all costs
    cost_data1 = np.fromiter(map(lambda q: cost(q[0], q[1]), traj_mat[i1, j1]),
                             dtype=float)  # this generates the costs

    # now generate all indices associated with a null link possibility

    # bottom right
    i2, j2 = np.indices((nr, nl))
    i2, j2 = i2.flatten() + nl, j2.flatten() + nr
    cost_data2 = np.ones(i2.size, dtype=float) * cost(None, None, 'dummy')

    # top right
    i3 = np.fromiter(range(nl), dtype=int)
    j3 = np.fromiter(range(nl), dtype=int)
    j3 = j3 + nr
    cost_data3 = np.ones(i3.size, dtype=float) * cost(None, None, 'dummy')

    # bottom left
    i4 = np.fromiter(range(nr), dtype=int)
    j4 = np.fromiter(range(nr), dtype=int)
    i4 = i4 + nl
    cost_data4 = np.ones(i4.size, dtype=float) * cost(None, None, 'dummy')

    # now make the full set of cost data for the matrix generation
    cost_data = np.concatenate(
        (cost_data1, cost_data2, cost_data3, cost_data4))
    i = np.concatenate((i1, i2, i3, i4))
    j = np.concatenate((j1, j2, j3, j4))

    # now you have the cost matrix and all real links are filled with their proper values
    N = nl + nr  #dimension of output square matrix
    costs = scipy.sparse.csc_matrix(
        (cost_data, (i, j)), shape=(nl + nr, nl + nr),
        dtype=float)  # and this puts them in a sparse matrix
    print('%d x %d cost matrix generated in %d seconds' %
          (nl + nr, nl + nr, time.time() - t0))

    costs = costs.toarray()

    return -costs
예제 #44
0
 def getBearingAutocorrelation(self, maxT=100):
     n = int(np.round(maxT * self.frameRate))
     tau = range(n) / self.frameRate
     psi = self.getMaskedPosture(self.psi)
     C = dotacf(ma.array([ma.cos(psi), ma.sin(psi)]), n)
     return tau, C
예제 #45
0
###################################################################################################
#                                                                                                 #
#  Figure S1:  Maps of RPSS comparing different CSGD implementations                              #
#                                                                                                 #
###################################################################################################


acfRv1 = np.zeros((3,15),dtype=np.float32)
acfRv2 = np.zeros((3,15),dtype=np.float32)
pvalRv1 = np.zeros((3,nxy),dtype=np.float32)
pvalRv2 = np.zeros((3,nxy),dtype=np.float32)
alphaFDRrv1 = np.zeros(3,dtype=np.float32)
alphaFDRrv2 = np.zeros(3,dtype=np.float32)

rpssMapCSGD = ma.array(np.zeros((3,nxy),dtype=np.float32),mask=True)
rpssMapCSGDrv1 = ma.array(np.zeros((3,nxy),dtype=np.float32),mask=True)
rpssMapCSGDrv2 = ma.array(np.zeros((3,nxy),dtype=np.float32),mask=True)

rpssAvgCSGD = ma.array(np.zeros(3,dtype=np.float32),mask=True)
rpssAvgCSGDrv1 = ma.array(np.zeros(3,dtype=np.float32),mask=True)
rpssAvgCSGDrv2 = ma.array(np.zeros(3,dtype=np.float32),mask=True)

for ilead in range(3):
    f1 = np.load("/home/michael/Desktop/CalifAPCP/results/scores-ann_week"+str(ilead+2)+".npz")
    Bs33Clm = f1['Bs33pClm']
    Bs33CSGD = f1['Bs33pCSGD']
    Bs67Clm = f1['Bs67pClm']
    Bs67CSGD = f1['Bs67pCSGD']
    Bs85Clm = f1['Bs85pClm']
    Bs85CSGD = f1['Bs85pCSGD']
예제 #46
0
 def getMaskedCentroid(self, data):
     data = ma.array(data)
     sel = self.badFrames | self.excluded
     data[sel, ...] = ma.masked
     data[np.isnan(data)] = ma.masked
     return data
예제 #47
0
파일: algo.py 프로젝트: sanghack81/KRCIT
def med_except_diag(x):
    D_squared = euclidean_distances(x, squared=True)
    # masking upper triangle and the diagonal.
    mask = np.triu(np.ones(D_squared.shape), 0)
    median_squared_distance = ma.median(ma.array(D_squared, mask=mask))
    return median_squared_distance
예제 #48
0
def random_features_with_nans(shape, nan_proportion):
    features = np.random.random(shape)
    nan_mask = random_split_mask(shape, UnitSplit.from_first(nan_proportion))
    masked_data = ma.array(features, mask=nan_mask)
    return masked_data.filled(np.NAN)
예제 #49
0
 def setUp(self):
     self.real_array = np.array(123)
     masked_array = ma.array([0, 1], mask=[0, 1])
     self.lazy_array = as_lazy_data(masked_array)
     self.name = DataManager.__name__
예제 #50
0
    def join_many(cls,
                  specs,
                  mk_arr=None,
                  nonlinear=False,
                  maxgap=0,
                  fill=JOIN_REPEAT):
        """Produce new Spectrogram that contains spectrograms
        joined together in time.

        Parameters
        ----------
        specs : list
            List of spectrograms to join together in time.
        nonlinear : bool
            If True, leave out gaps between spectrograms. Else, fill them with
            the value specified in fill.
        maxgap : float, int or None
            Largest gap to allow in second. If None, allow gap of arbitrary
            size.
        fill : float or int
            Value to fill missing values (assuming nonlinear=False) with.
            Can be LinearTimeSpectrogram.JOIN_REPEAT to repeat the values for
            the time just before the gap.
        mk_array: function
            Function that is called to create the resulting array. Can be set
            to LinearTimeSpectrogram.memap(filename) to create a memory mapped
            result array.
        """
        # XXX: Only load header and load contents of files
        # on demand.
        mask = None

        if mk_arr is None:
            mk_arr = cls.make_array

        specs = sorted(specs, key=lambda x: x.start)

        freqs = specs[0].freq_axis
        if not all(np.array_equal(freqs, sp.freq_axis) for sp in specs):
            raise ValueError("Frequency channels do not match.")

        # Smallest time-delta becomes the common time-delta.
        min_delt = min(sp.t_delt for sp in specs)
        dtype_ = max(sp.dtype for sp in specs)

        specs = [sp.resample_time(min_delt) for sp in specs]
        size = sum(sp.shape[1] for sp in specs)

        data = specs[0]
        start_day = data.start

        xs = []
        last = data
        for elem in specs[1:]:
            e_init = (SECONDS_PER_DAY *
                      (get_day(elem.start) - get_day(start_day)).days +
                      elem.t_init)
            x = int((e_init - last.t_init) / min_delt)
            xs.append(x)
            diff = last.shape[1] - x

            if maxgap is not None and -diff > maxgap / min_delt:
                raise ValueError("Too large gap.")

            # If we leave out undefined values, we do not want to
            # add values here if x > t_res.
            if nonlinear:
                size -= max(0, diff)
            else:
                size -= diff

            last = elem

        # The non existing element after the last one starts after
        # the last one. Needed to keep implementation below sane.
        xs.append(specs[-1].shape[1])

        # We do that here so the user can pass a memory mapped
        # array if they'd like to.
        arr = mk_arr((data.shape[0], size), dtype_)
        time_axis = np.zeros((size, ))
        sx = 0
        # Amount of pixels left out due to non-linearity. Needs to be
        # considered for correct time axes.
        sd = 0
        for x, elem in zip(xs, specs):
            diff = x - elem.shape[1]
            e_time_axis = elem.time_axis

            elem = elem.data

            if x > elem.shape[1]:
                if nonlinear:
                    x = elem.shape[1]
                else:
                    # If we want to stay linear, fill up the missing
                    # pixels with placeholder zeros.
                    filler = np.zeros((data.shape[0], diff))
                    if fill is cls.JOIN_REPEAT:
                        filler[:, :] = elem[:, -1, np.newaxis]
                    else:
                        filler[:] = fill
                    minimum = e_time_axis[-1]
                    e_time_axis = np.concatenate([
                        e_time_axis,
                        np.linspace(minimum + min_delt,
                                    minimum + diff * min_delt, diff)
                    ])
                    elem = np.concatenate([elem, filler], 1)
            arr[:, sx:sx + x] = elem[:, :x]

            if diff > 0:
                if mask is None:
                    mask = np.zeros((data.shape[0], size), dtype=np.uint8)
                mask[:, sx + x - diff:sx + x] = 1
            time_axis[sx:sx + x] = e_time_axis[:x] + data.t_delt * (sx + sd)
            if nonlinear:
                sd += max(0, diff)
            sx += x
        params = {
            'time_axis': time_axis,
            'freq_axis': data.freq_axis,
            'start': data.start,
            'end': specs[-1].end,
            't_delt': data.t_delt,
            't_init': data.t_init,
            't_label': data.t_label,
            'f_label': data.f_label,
            'content': data.content,
            'instruments': _union(spec.instruments for spec in specs),
        }
        if mask is not None:
            arr = ma.array(arr, mask=mask)
        if nonlinear:
            del params['t_delt']
            return Spectrogram(arr, **params)
        return common_base(specs)(arr, **params)
예제 #51
0
def average_over_time_then_space(space_time):
    space, weight = average_over_time(space_time)
    space_masked = ma.masked_less(space, NDVI_THRESHOLD)
    weight_masked = ma.array(weight, mask=space_masked.mask)
    return space_masked.mean(), weight_masked.mean()
예제 #52
0
def analysis_gaussian_histogram(data_array,
                                bin_width,
                                mask_filter=None,
                                run=True):
    """ This generates the dictionary which is the results from the
    analysis of the data to create a heat-map of the data.

    Parameters
    ----------
    data_array : ndarray
        The array of data that will be used to compute the heat-map
        results.
    bin_width : float
        The bin width of the histogram bars.
    mask_filter : ndarray (optional)
        The array of mask and filter values to consider in the 
        calculations. True values denote the mask/filter is applied, 
        False otherwise.
    run : boolean (optional)
        If True, the analysis is run, else it is not and will exit
        with None.

    Returns
    -------
    histogram_results : dictionary
        The results of the histogram analysis.

    """
    # The data object to store the analysis.
    histogram_results = {}

    # See if the analysis should even be run.
    if (not run):
        core.error.ifas_info("The `run` flag is False regarding histogram "
                             "analysis. Nothing is done, the run flag "
                             "in the results is set to False.")
        histogram_results['histogram_run'] = False
        return histogram_results
    else:
        histogram_results['histogram_run'] = True
    # Continue with analysis.

    # Check that the bin-width is a usable value.
    if (bin_width <= 0):
        raise core.error.InputError("The bin width must be a positive "
                                    "non-zero number. It is currently: "
                                    "{bin_val}".format(bin_val=bin_width))

    # Combine the data and the mask for better management.
    masked_data = np_ma.array(data_array, mask=mask_filter)

    # Calculate the histogram values.
    hist_values, hist_bins = np.histogram(
        masked_data.compressed(),
        bins=core.math.generate_numpy_bin_width_array(
            data_array=masked_data.compressed(), bin_width=bin_width))

    # Save the histogram values to the results. The modified keys
    # are needed as Astropy Header cards cannot use both the HIERARCH
    # and CONTINUE cards at the same time.
    histogram_results['HIST_BIN'] = str(hist_bins.tolist())
    histogram_results['HIST_VAL'] = str(hist_values.tolist())

    # Calculate the Gaussian fit values. The compressed function
    # allows the histogram to ignore masked values.
    gauss_funct, gauss_param = core.model.fit_histogram_gaussian_function(
        data_array=masked_data.compressed(), bin_width=bin_width)

    # Save the Gaussian values to the results.
    for keydex, paramdex in gauss_param.items():
        histogram_results.update({''.join(['histogram_', keydex]): paramdex})

    # The bin width is also important information for plotting.
    histogram_results['histogram_bin_width'] = bin_width

    # All done.
    return histogram_results
pyplot.subplot(1, 3, 3)
pyplot.imshow(growing_seed_Threshold_arr)
pyplot.title('Image Threshold')

pyplot.show()

#%%
# mask Array
maskedArray = growing_seed_Threshold_arr
maskedArray[maskedArray == 1] = 2
maskedArray[maskedArray == 0] = 1
maskedArray[maskedArray == 2] = 0
croppedMaskedImageNDVI_array = numpy.array(croppedMaskedImageNDVI[0][0])
croppedMaskedImageNDRE_array = numpy.array(croppedMaskedImageNDRE[0][0])
maskedArray_ndvi = ma.array(croppedMaskedImageNDVI_array,
                            mask=maskedArray,
                            copy=True)
maskedArray_ndre = ma.array(croppedMaskedImageNDRE_array,
                            mask=maskedArray,
                            copy=True)
#plot the figures

pyplot.figure()
pyplot.subplot(1, 4, 1)
pyplot.imshow(croppedMaskedImageNDVI[0][0])
pyplot.title('NDVI image')

pyplot.subplot(1, 4, 2)
pyplot.imshow(maskedArray_ndvi)
pyplot.title('Masked Image NDVI')
예제 #54
0
파일: ss2009.py 프로젝트: ourobouros/WRed
        #    print 'currfile',currfile
        #    flist.append(currfile) 
            
    
        #flist = SU.ffind(mydirectory, shellglobs=(myfilebaseglob,))
        #SU.printr(flist)
        E,l,counts,counts_err,mon0=readfiles(flist)
        
        #p,perror,pcerror,chisq=fitpeak(tth[0],counts[0],counts_err[0])
        #print 'p',p,perror,pcerror, chisq
        #sys.exit()
        new_tth,new_T,new_counts=regrid2(E,l,counts)
        #x,y,z=grid(new_tth,new_T,new_counts)
        x=N.array(new_tth)
        y=N.array(new_T)
        z=ma.array(new_counts)

        #QX,QZ=N.meshgrid(qx,qz)
        cmap=pylab.cm.jet
        #import matplotlib.ticker as ticker
        zmin, zmax = 0, 5000
        locator = ticker.MaxNLocator(10) # if you want no more than 10 contours
        locator.create_dummy_axis()
        #locator.set_bounds(zmin, zmax)
        levs = locator()
        #levs=N.linspace(zmin,zmax,10)
        #levs=N.concatenate((levs,[3000]))
        pylab.subplot(1,2,1)
        mycontour=pylab.contourf(x,y,z,levs)#,
        #levs.set_bounds(zmin, zmax)
        #mycontour=pylab.contourf(x,y,z,35,extent=(17,19.6,y.min(),y.max()))#,cmap=pylab.cm.jet)
예제 #55
0
 def __init__(self):
     self.time = np.arange(5)
     self.lwp = ma.array([1, 2, 3, 4, 5], mask=True)
예제 #56
0
def merge_arrays(seqarrays,
                 fill_value=-1,
                 flatten=False,
                 usemask=False,
                 asrecarray=False):
    """
    Merge arrays field by field.

    Parameters
    ----------
    seqarrays : sequence of ndarrays
        Sequence of arrays
    fill_value : {float}, optional
        Filling value used to pad missing data on the shorter arrays.
    flatten : {False, True}, optional
        Whether to collapse nested fields.
    usemask : {False, True}, optional
        Whether to return a masked array or not.
    asrecarray : {False, True}, optional
        Whether to return a recarray (MaskedRecords) or not.

    Examples
    --------
    >>> from numpy.lib import recfunctions as rfn
    >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
    masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
                 mask = [(False, False) (False, False) (True, False)],
           fill_value = (999999, 1e+20),
                dtype = [('f0', '<i4'), ('f1', '<f8')])

    >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
    ...              usemask=False)
    array([(1, 10.0), (2, 20.0), (-1, 30.0)],
          dtype=[('f0', '<i4'), ('f1', '<f8')])
    >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
    ...               np.array([10., 20., 30.])),
    ...              usemask=False, asrecarray=True)
    rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
              dtype=[('a', '<i4'), ('f1', '<f8')])

    Notes
    -----
    * Without a mask, the missing value will be filled with something,
    * depending on what its corresponding type:
            -1      for integers
            -1.0    for floating point numbers
            '-'     for characters
            '-1'    for strings
            True    for boolean values
    * XXX: I just obtained these values empirically
    """
    # Only one item in the input sequence ?
    if (len(seqarrays) == 1):
        seqarrays = np.asanyarray(seqarrays[0])
    # Do we have a single ndarary as input ?
    if isinstance(seqarrays, (ndarray, np.void)):
        seqdtype = seqarrays.dtype
        if (not flatten) or \
           (zip_descr((seqarrays,), flatten=True) == seqdtype.descr):
            # Minimal processing needed: just make sure everythng's a-ok
            seqarrays = seqarrays.ravel()
            # Make sure we have named fields
            if not seqdtype.names:
                seqdtype = [('', seqdtype)]
            # Find what type of array we must return
            if usemask:
                if asrecarray:
                    seqtype = MaskedRecords
                else:
                    seqtype = MaskedArray
            elif asrecarray:
                seqtype = recarray
            else:
                seqtype = ndarray
            return seqarrays.view(dtype=seqdtype, type=seqtype)
        else:
            seqarrays = (seqarrays, )
    else:
        # Make sure we have arrays in the input sequence
        seqarrays = map(np.asanyarray, seqarrays)
    # Find the sizes of the inputs and their maximum
    sizes = tuple(a.size for a in seqarrays)
    maxlength = max(sizes)
    # Get the dtype of the output (flattening if needed)
    newdtype = zip_descr(seqarrays, flatten=flatten)
    # Initialize the sequences for data and mask
    seqdata = []
    seqmask = []
    # If we expect some kind of MaskedArray, make a special loop.
    if usemask:
        for (a, n) in itertools.izip(seqarrays, sizes):
            nbmissing = (maxlength - n)
            # Get the data and mask
            data = a.ravel().__array__()
            mask = ma.getmaskarray(a).ravel()
            # Get the filling value (if needed)
            if nbmissing:
                fval = _check_fill_value(fill_value, a.dtype)
                if isinstance(fval, (ndarray, np.void)):
                    if len(fval.dtype) == 1:
                        fval = fval.item()[0]
                        fmsk = True
                    else:
                        fval = np.array(fval, dtype=a.dtype, ndmin=1)
                        fmsk = np.ones((1, ), dtype=mask.dtype)
            else:
                fval = None
                fmsk = True
            # Store an iterator padding the input to the expected length
            seqdata.append(itertools.chain(data, [fval] * nbmissing))
            seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
        # Create an iterator for the data
        data = tuple(izip_records(seqdata, flatten=flatten))
        output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
                          mask=list(izip_records(seqmask, flatten=flatten)))
        if asrecarray:
            output = output.view(MaskedRecords)
    else:
        # Same as before, without the mask we don't need...
        for (a, n) in itertools.izip(seqarrays, sizes):
            nbmissing = (maxlength - n)
            data = a.ravel().__array__()
            if nbmissing:
                fval = _check_fill_value(fill_value, a.dtype)
                if isinstance(fval, (ndarray, np.void)):
                    if len(fval.dtype) == 1:
                        fval = fval.item()[0]
                    else:
                        fval = np.array(fval, dtype=a.dtype, ndmin=1)
            else:
                fval = None
            seqdata.append(itertools.chain(data, [fval] * nbmissing))
        output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
                             dtype=newdtype,
                             count=maxlength)
        if asrecarray:
            output = output.view(recarray)
    # And we're done...
    return output
예제 #57
0
Diag = nc4(FilePath + 'ocean_dia_2014_0005.nc', 'r')

Grd = nc4('/home/ablowe/runs/ncfiles/grids/wc15.a01.b03_grd.nc', 'r')

#create CV mask
RhoMask2D = mt.RhoMask(Avg, latbounds, lonbounds)
RhoMask = np.repeat(np.array(RhoMask2D)[np.newaxis, :, :],
                    Avg.variables['salt'].shape[1],
                    axis=0)

#load time for steping
time = Avg.variables['ocean_time'][:]

#load dA
dA_xy = ma.array(dff.dA_top(Avg), mask=RhoMask)

#integrate time derivative of s_prime^2
SprInt = np.empty(time.shape[0])
SprInt.fill(np.nan)
for t in range(time.shape[0]):
    #avg file variables at t
    salt = ma.array(Avg.variables['salt'][t, :, :, :], mask=RhoMask)
    s_prime = salt - salt.mean()

    #diff and mask are in opposite order
    deltaA = ma.array(ma.diff(dep._set_depth(AvgFile, None, 'w', \
                                             Avg.variables['h'][:], \
                                             Avg.variables['zeta'][t,:,:]),
                              n = 1, axis = 0), \
                      mask = RhoMask)
예제 #58
0
    def read(self):
        logger = logging.getLogger('read')
        logger.info('Reading %s', self.fitsfile)

        self.hdulist = fits.open(self.fitsfile)

        numext = len(self.hdulist)
        logger.debug('Numext: %d', numext)

        self.sciext = None
        for i in range(len(self.hdulist)):
            try:
                extname = self.hdulist[i].header['EXTNAME']
            except:
                extname = None
            logger.debug('Extension %d name: %s', i, extname)
            if extname == 'SCI':
                self.sciext = i
        if self.sciext is None:
            if numext == 1:
                self.sciext = 0
            else:
                self.sciext = 1
        logger.debug('Science extension: %s', self.sciext)

        self.data = self.hdulist[self.sciext].data

        self.instrument = self.hdulist[0].header['INSTRUME']
        logger.debug('Instrument: %s', self.instrument)

        self.naxis1 = self.hdulist[self.sciext].header['naxis1']  # X
        self.naxis2 = self.hdulist[self.sciext].header['naxis2']  # Y
        logger.debug('Image size: %s x %s', self.naxis1, self.naxis2)

        if self.instrument == 'NIRI':
            self.config = self.hdulist[0].header['FPMASK']

        elif self.instrument == 'GNIRS':
            self.config = self.hdulist[0].header['CAMERA'] + self.hdulist[0].header['DECKER']

            logger.debug('Padding GNIRS SCI y-axis by 2 rows')  # ydim must be a multiple of 4
            self.data = numpy.append(self.data, numpy.zeros((2, self.naxis1)), axis=0)
            self.naxis2 += 2
            logger.debug('New image size: %s x %s', self.naxis1, self.naxis2)

        else:
            logger.error('Unsupported instrument: %s', self.instrument)
            raise SystemExit

        # Check that the image is the proper size:
        if self.naxis1 % self.pxsize or self.naxis2 % self.pysize:
            logger.info('Padded image size: %d x %d', self.naxis1, self.naxis2)
            logger.error('Image size is not a multiple of %d x %d', self.pxsize, self.pysize)
            raise SystemExit

        logger.info('Config: %s', self.config)

        self.qxsize = int(self.naxis1 / 2)  # quadrant x size
        self.qysize = int(self.naxis2 / 2)  # quadrant y size

        self.mdata = ma.array(self.data, copy=True)  # masked science data

        if self.instrument == 'GNIRS': # mask the padding
            self.mdata[-2:,] = ma.masked

        return
예제 #59
0
파일: hic.py 프로젝트: uibcdf/ProDy
    def align(self, array, axis=None):
        if not isinstance(array, np.ndarray):
            array = np.array(array)

        ret = array = array.copy()

        if np.isscalar(self.mask):
            return ret

        mask = self.mask.copy()

        l_full = self.getCompleteMap().shape[0]
        l_trim = self.getTrimedMap().shape[0]

        if len(array.shape) == 0:
            raise ValueError('array cannot be empty')
        elif len(array.shape) == 1:
            l = array.shape[0]
            if l == l_trim:
                N = len(mask)
                ret = np.zeros(N, dtype=array.dtype)
                ret[mask] = array
            elif l == l_full:
                ret = array[mask]
            else:
                raise ValueError('The length of array (%d) does not '
                                 'match that of either the full (%d) '
                                 'or trimed (%d).' % (l, l_full, l_trim))
        elif len(array.shape) == 2:
            s = array.shape

            if axis is None:
                if s[0] != s[1]:
                    raise ValueError('The array must be a square matrix '
                                     'if axis is set to None.')
                if s[0] == l_trim:
                    N = len(mask)
                    whole_mat = np.zeros((N, N), dtype=array.dtype)
                    mask = np.outer(mask, mask)
                    whole_mat[mask] = array.flatten()
                    ret = whole_mat
                elif s[0] == l_full:
                    M = ma.array(array)
                    M.mask = np.diag(mask)
                    ret = ma.compress_rowcols(M)
                else:
                    raise ValueError('The size of array (%d) does not '
                                     'match that of either the full (%d) '
                                     'or trimed (%d).' %
                                     (s[0], l_full, l_trim))
            else:
                new_shape = list(s)
                otheraxis = 0 if axis != 0 else 1
                if s[axis] == l_trim:
                    N = len(mask)
                    new_shape[axis] = N
                    whole_mat = np.zeros(new_shape)
                    mask = np.expand_dims(mask, axis=otheraxis)
                    mask = mask.repeat(s[otheraxis], axis=otheraxis)
                    whole_mat[mask] = array.flatten()
                    ret = whole_mat
                elif s[axis] == l_full:
                    mask = np.expand_dims(mask, axis=otheraxis)
                    mask = mask.repeat(s[otheraxis])
                    ret = self._map[mask]
                else:
                    raise ValueError('The size of array (%d) does not '
                                     'match that of either the full (%d) '
                                     'or trimed (%d).' %
                                     (s[0], l_full, l_trim))

        return ret
예제 #60
0
    def __call__(self, X, alpha=None, bytes=False):
        """
        *X* is either a scalar or an array (of any dimension).
        If scalar, a tuple of rgba values is returned, otherwise
        an array with the new shape = oldshape+(4,). If the X-values
        are integers, then they are used as indices into the array.
        If they are floating point, then they must be in the
        interval (0.0, 1.0).
        Alpha must be a scalar between 0 and 1, or None.
        If bytes is False, the rgba values will be floats on a
        0-1 scale; if True, they will be uint8, 0-255.
        """

        if not self._isinit:
            self._init()
        mask_bad = None
        if not cbook.iterable(X):
            vtype = 'scalar'
            xa = np.array([X])
        else:
            vtype = 'array'
            xma = ma.array(X, copy=True)  # Copy here to avoid side effects.
            mask_bad = xma.mask  # Mask will be used below.
            xa = xma.filled()  # Fill to avoid infs, etc.
            del xma

        # Calculations with native byteorder are faster, and avoid a
        # bug that otherwise can occur with putmask when the last
        # argument is a numpy scalar.
        if not xa.dtype.isnative:
            xa = xa.byteswap().newbyteorder()

        if xa.dtype.kind == "f":
            # Treat 1.0 as slightly less than 1.
            vals = np.array([1, 0], dtype=xa.dtype)
            almost_one = np.nextafter(*vals)
            cbook._putmask(xa, xa == 1.0, almost_one)
            # The following clip is fast, and prevents possible
            # conversion of large positive values to negative integers.

            xa *= self.N
            if NP_CLIP_OUT:
                np.clip(xa, -1, self.N, out=xa)
            else:
                xa = np.clip(xa, -1, self.N)

            # ensure that all 'under' values will still have negative
            # value after casting to int
            cbook._putmask(xa, xa < 0.0, -1)
            xa = xa.astype(int)
        # Set the over-range indices before the under-range;
        # otherwise the under-range values get converted to over-range.
        cbook._putmask(xa, xa > self.N - 1, self._i_over)
        cbook._putmask(xa, xa < 0, self._i_under)
        if mask_bad is not None:
            if mask_bad.shape == xa.shape:
                cbook._putmask(xa, mask_bad, self._i_bad)
            elif mask_bad:
                xa.fill(self._i_bad)
        if bytes:
            lut = (self._lut * 255).astype(np.uint8)
        else:
            lut = self._lut.copy()  # Don't let alpha modify original _lut.

        if alpha is not None:
            alpha = min(alpha, 1.0)  # alpha must be between 0 and 1
            alpha = max(alpha, 0.0)
            if bytes:
                alpha = int(alpha * 255)
            if (lut[-1] == 0).all():
                lut[:-1, -1] = alpha
                # All zeros is taken as a flag for the default bad
                # color, which is no color--fully transparent.  We
                # don't want to override this.
            else:
                lut[:, -1] = alpha
                # If the bad value is set to have a color, then we
                # override its alpha just as for any other value.

        rgba = np.empty(shape=xa.shape + (4, ), dtype=lut.dtype)
        lut.take(xa, axis=0, mode='clip', out=rgba)
        #  twice as fast as lut[xa];
        #  using the clip or wrap mode and providing an
        #  output array speeds it up a little more.
        if vtype == 'scalar':
            rgba = tuple(rgba[0, :])
        return rgba