示例#1
0
    def test_stack_errors(self):
        v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})

        with self.assertRaisesRegexp(ValueError, "invalid existing dim"):
            v.stack(z=("x1",))
        with self.assertRaisesRegexp(ValueError, "cannot create a new dim"):
            v.stack(x=("x",))
示例#2
0
 def test_items(self):
     data = np.random.random((10, 11))
     v = Variable(['x', 'y'], data)
     # test slicing
     self.assertVariableIdentical(v, v[:])
     self.assertVariableIdentical(v, v[...])
     self.assertVariableIdentical(Variable(['y'], data[0]), v[0])
     self.assertVariableIdentical(Variable(['x'], data[:, 0]), v[:, 0])
     self.assertVariableIdentical(Variable(['x', 'y'], data[:3, :2]),
                                  v[:3, :2])
     # test array indexing
     x = Variable(['x'], np.arange(10))
     y = Variable(['y'], np.arange(11))
     self.assertVariableIdentical(v, v[x.values])
     self.assertVariableIdentical(v, v[x])
     self.assertVariableIdentical(v[:3], v[x < 3])
     self.assertVariableIdentical(v[:, 3:], v[:, y >= 3])
     self.assertVariableIdentical(v[:3, 3:], v[x < 3, y >= 3])
     self.assertVariableIdentical(v[:3, :2], v[x[:3], y[:2]])
     self.assertVariableIdentical(v[:3, :2], v[range(3), range(2)])
     # test iteration
     for n, item in enumerate(v):
         self.assertVariableIdentical(Variable(['y'], data[n]), item)
     with self.assertRaisesRegexp(TypeError, 'iteration over a 0-d'):
         iter(Variable([], 0))
     # test setting
     v.values[:] = 0
     self.assertTrue(np.all(v.values == 0))
     # test orthogonal setting
     v[range(10), range(11)] = 1
     self.assertArrayEqual(v.values, np.ones((10, 11)))
示例#3
0
    def test_numpy_same_methods(self):
        v = Variable([], np.float32(0.0))
        self.assertEqual(v.item(), 0)
        self.assertIs(type(v.item()), float)

        v = Coordinate('x', np.arange(5))
        self.assertEqual(2, v.searchsorted(2))
示例#4
0
    def test_stack_errors(self):
        v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})

        with self.assertRaisesRegexp(ValueError, 'invalid existing dim'):
            v.stack(z=('x1',))
        with self.assertRaisesRegexp(ValueError, 'cannot create a new dim'):
            v.stack(x=('x',))
示例#5
0
 def test_concat(self):
     u = self.eager_var
     v = self.lazy_var
     self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], 'x'))
     self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], 'x'))
     self.assertLazyAndIdentical(
         u[:3], Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]]))
示例#6
0
def test_outer_indexer_consistency_with_broadcast_indexes_vectorized():
    def nonzero(x):
        if isinstance(x, np.ndarray) and x.dtype.kind == 'b':
            x = x.nonzero()[0]
        return x

    original = np.random.rand(10, 20, 30)
    v = Variable(['i', 'j', 'k'], original)
    I = ReturnItem()  # noqa: E741  # allow ambiguous name
    # test orthogonally applied indexers
    indexers = [I[:], 0, -2, I[:3], np.array([0, 1, 2, 3]), np.array([0]),
                np.arange(10) < 5]
    for i, j, k in itertools.product(indexers, repeat=3):

        if isinstance(j, np.ndarray) and j.dtype.kind == 'b':  # match size
            j = np.arange(20) < 4
        if isinstance(k, np.ndarray) and k.dtype.kind == 'b':
            k = np.arange(30) < 8

        _, expected, new_order = v._broadcast_indexes_vectorized((i, j, k))
        expected_data = nputils.NumpyVIndexAdapter(v.data)[expected.tuple]
        if new_order:
            old_order = range(len(new_order))
            expected_data = np.moveaxis(expected_data, old_order,
                                        new_order)

        outer_index = indexing.OuterIndexer((nonzero(i), nonzero(j),
                                             nonzero(k)))
        actual = indexing._outer_to_numpy_indexer(outer_index, v.shape)
        actual_data = v.data[actual]
        np.testing.assert_array_equal(actual_data, expected_data)
示例#7
0
    def test_roll_consistency(self):
        v = Variable(('x', 'y'), np.random.randn(5, 6))

        for axis, dim in [(0, 'x'), (1, 'y')]:
            for shift in [-3, 0, 1, 7, 11]:
                expected = np.roll(v.values, shift, axis=axis)
                actual = v.roll(**{dim: shift}).values
                self.assertArrayEqual(expected, actual)
示例#8
0
 def test_expand_dims_object_dtype(self):
     v = Variable([], ("a", 1))
     actual = v.expand_dims(("x",), (3,))
     exp_values = np.empty((3,), dtype=object)
     for i in range(3):
         exp_values[i] = ("a", 1)
     expected = Variable(["x"], exp_values)
     assert actual.identical(expected)
示例#9
0
    def test_unstack_2d(self):
        v = Variable(["x", "y"], [[0, 1], [2, 3]])
        actual = v.unstack(y={"z": 2})
        expected = Variable(["x", "z"], v.data)
        self.assertVariableIdentical(actual, expected)

        actual = v.unstack(x={"z": 2})
        expected = Variable(["y", "z"], v.data.T)
        self.assertVariableIdentical(actual, expected)
示例#10
0
    def test_unstack_2d(self):
        v = Variable(['x', 'y'], [[0, 1], [2, 3]])
        actual = v.unstack(y={'z': 2})
        expected = Variable(['x', 'z'], v.data)
        self.assertVariableIdentical(actual, expected)

        actual = v.unstack(x={'z': 2})
        expected = Variable(['y', 'z'], v.data.T)
        self.assertVariableIdentical(actual, expected)
示例#11
0
 def test_concat_attrs(self):
     # different or conflicting attributes should be removed
     v = self.cls('a', np.arange(5), {'foo': 'bar'})
     w = self.cls('a', np.ones(5))
     expected = self.cls('a', np.concatenate([np.arange(5), np.ones(5)]))
     self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
     w.attrs['foo'] = 2
     self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
     w.attrs['foo'] = 'bar'
     expected.attrs['foo'] = 'bar'
     self.assertVariableIdentical(expected, Variable.concat([v, w], 'a'))
示例#12
0
 def test_concat_attrs(self):
     # different or conflicting attributes should be removed
     v = self.cls("a", np.arange(5), {"foo": "bar"})
     w = self.cls("a", np.ones(5))
     expected = self.cls("a", np.concatenate([np.arange(5), np.ones(5)]))
     self.assertVariableIdentical(expected, Variable.concat([v, w], "a"))
     w.attrs["foo"] = 2
     self.assertVariableIdentical(expected, Variable.concat([v, w], "a"))
     w.attrs["foo"] = "bar"
     expected.attrs["foo"] = "bar"
     self.assertVariableIdentical(expected, Variable.concat([v, w], "a"))
示例#13
0
 def test_pickle(self):
     # Test that pickling/unpickling does not convert the dask
     # backend to numpy
     a1 = Variable(['x'], build_dask_array('x'))
     a1.compute()
     assert not a1._in_memory
     assert kernel_call_count == 1
     a2 = pickle.loads(pickle.dumps(a1))
     assert kernel_call_count == 1
     assert_identical(a1, a2)
     assert not a1._in_memory
     assert not a2._in_memory
示例#14
0
 def test_variable_pickle(self):
     # Test that pickling/unpickling does not convert the dask
     # backend to numpy
     a1 = Variable(['x'], build_dask_array())
     a1.compute()
     self.assertFalse(a1._in_memory)
     self.assertEquals(kernel_call_count, 1)
     a2 = pickle.loads(pickle.dumps(a1))
     self.assertEquals(kernel_call_count, 1)
     self.assertVariableIdentical(a1, a2)
     self.assertFalse(a1._in_memory)
     self.assertFalse(a2._in_memory)
示例#15
0
 def test_transpose(self):
     v = Variable(['time', 'x'], self.d)
     v2 = Variable(['x', 'time'], self.d.T)
     self.assertVariableIdentical(v, v2.transpose())
     self.assertVariableIdentical(v.transpose(), v.T)
     x = np.random.randn(2, 3, 4, 5)
     w = Variable(['a', 'b', 'c', 'd'], x)
     w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
     self.assertEqual(w2.shape, (5, 3, 4, 2))
     self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
     self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
     w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
     self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
示例#16
0
 def test_transpose(self):
     v = Variable(["time", "x"], self.d)
     v2 = Variable(["x", "time"], self.d.T)
     self.assertVariableIdentical(v, v2.transpose())
     self.assertVariableIdentical(v.transpose(), v.T)
     x = np.random.randn(2, 3, 4, 5)
     w = Variable(["a", "b", "c", "d"], x)
     w2 = Variable(["d", "b", "c", "a"], np.einsum("abcd->dbca", x))
     self.assertEqual(w2.shape, (5, 3, 4, 2))
     self.assertVariableIdentical(w2, w.transpose("d", "b", "c", "a"))
     self.assertVariableIdentical(w, w2.transpose("a", "b", "c", "d"))
     w3 = Variable(["b", "c", "d", "a"], np.einsum("abcd->bcda", x))
     self.assertVariableIdentical(w, w3.transpose("a", "b", "c", "d"))
示例#17
0
    def test_unstack(self):
        v = Variable('z', [0, 1, 2, 3], {'foo': 'bar'})
        actual = v.unstack(z=OrderedDict([('x', 2), ('y', 2)]))
        expected = Variable(('x', 'y'), [[0, 1], [2, 3]], v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.unstack(z=OrderedDict([('x', 4), ('y', 1)]))
        expected = Variable(('x', 'y'), [[0], [1], [2], [3]], v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.unstack(z=OrderedDict([('x', 4)]))
        expected = Variable('x', [0, 1, 2, 3], v.attrs)
        self.assertVariableIdentical(actual, expected)
示例#18
0
    def test_unstack(self):
        v = Variable("z", [0, 1, 2, 3], {"foo": "bar"})
        actual = v.unstack(z=OrderedDict([("x", 2), ("y", 2)]))
        expected = Variable(("x", "y"), [[0, 1], [2, 3]], v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.unstack(z=OrderedDict([("x", 4), ("y", 1)]))
        expected = Variable(("x", "y"), [[0], [1], [2], [3]], v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.unstack(z=OrderedDict([("x", 4)]))
        expected = Variable("x", [0, 1, 2, 3], v.attrs)
        self.assertVariableIdentical(actual, expected)
示例#19
0
    def test_reduce_keep_attrs(self):
        _attrs = {'units': 'test', 'long_name': 'testing'}

        v = Variable(['x', 'y'], self.d, _attrs)

        # Test dropped attrs
        vm = v.mean()
        self.assertEqual(len(vm.attrs), 0)
        self.assertEqual(vm.attrs, OrderedDict())

        # Test kept attrs
        vm = v.mean(keep_attrs=True)
        self.assertEqual(len(vm.attrs), len(_attrs))
        self.assertEqual(vm.attrs, _attrs)
示例#20
0
    def test_reduce_keep_attrs(self):
        _attrs = {"units": "test", "long_name": "testing"}

        v = Variable(["x", "y"], self.d, _attrs)

        # Test dropped attrs
        vm = v.mean()
        self.assertEqual(len(vm.attrs), 0)
        self.assertEqual(vm.attrs, OrderedDict())

        # Test kept attrs
        vm = v.mean(keep_attrs=True)
        self.assertEqual(len(vm.attrs), len(_attrs))
        self.assertEqual(vm.attrs, _attrs)
示例#21
0
 def test_data_and_values(self):
     v = Variable(['time', 'x'], self.d)
     self.assertArrayEqual(v.data, self.d)
     self.assertArrayEqual(v.values, self.d)
     self.assertIs(source_ndarray(v.values), self.d)
     with self.assertRaises(ValueError):
         # wrong size
         v.values = np.random.random(5)
     d2 = np.random.random((10, 3))
     v.values = d2
     self.assertIs(source_ndarray(v.values), d2)
     d3 = np.random.random((10, 3))
     v.data = d3
     self.assertIs(source_ndarray(v.data), d3)
示例#22
0
    def test_stack(self):
        v = Variable(['x', 'y'], [[0, 1], [2, 3]], {'foo': 'bar'})
        actual = v.stack(z=('x', 'y'))
        expected = Variable('z', [0, 1, 2, 3], v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.stack(z=('x',))
        expected = Variable(('y', 'z'), v.data.T, v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.stack(z=(),)
        self.assertVariableIdentical(actual, v)

        actual = v.stack(X=('x',), Y=('y',)).transpose('X', 'Y')
        expected = Variable(('X', 'Y'), v.data, v.attrs)
        self.assertVariableIdentical(actual, expected)
示例#23
0
    def test_stack(self):
        v = Variable(["x", "y"], [[0, 1], [2, 3]], {"foo": "bar"})
        actual = v.stack(z=("x", "y"))
        expected = Variable("z", [0, 1, 2, 3], v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.stack(z=("x",))
        expected = Variable(("y", "z"), v.data.T, v.attrs)
        self.assertVariableIdentical(actual, expected)

        actual = v.stack(z=())
        self.assertVariableIdentical(actual, v)

        actual = v.stack(X=("x",), Y=("y",)).transpose("X", "Y")
        expected = Variable(("X", "Y"), v.data, v.attrs)
        self.assertVariableIdentical(actual, expected)
示例#24
0
    def test_broadcast_equals(self):
        v1 = Variable((), np.nan)
        v2 = Variable(('x'), [np.nan, np.nan])
        self.assertTrue(v1.broadcast_equals(v2))
        self.assertFalse(v1.equals(v2))
        self.assertFalse(v1.identical(v2))

        v3 = Variable(('x'), [np.nan])
        self.assertTrue(v1.broadcast_equals(v3))
        self.assertFalse(v1.equals(v3))
        self.assertFalse(v1.identical(v3))

        self.assertFalse(v1.broadcast_equals(None))

        v4 = Variable(('x'), [np.nan] * 3)
        self.assertFalse(v2.broadcast_equals(v4))
示例#25
0
    def test_count(self):
        expected = Variable([], 3)
        actual = Variable(["x"], [1, 2, 3, np.nan]).count()
        self.assertVariableIdentical(expected, actual)

        v = Variable(["x"], np.array(["1", "2", "3", np.nan], dtype=object))
        actual = v.count()
        self.assertVariableIdentical(expected, actual)

        actual = Variable(["x"], [True, False, True]).count()
        self.assertVariableIdentical(expected, actual)
        self.assertEqual(actual.dtype, int)

        expected = Variable(["x"], [2, 3])
        actual = Variable(["x", "y"], [[1, 0, np.nan], [1, 1, 1]]).count("y")
        self.assertVariableIdentical(expected, actual)
示例#26
0
    def test_count(self):
        expected = Variable([], 3)
        actual = Variable(['x'], [1, 2, 3, np.nan]).count()
        self.assertVariableIdentical(expected, actual)

        v = Variable(['x'], np.array(['1', '2', '3', np.nan], dtype=object))
        actual = v.count()
        self.assertVariableIdentical(expected, actual)

        actual = Variable(['x'], [True, False, True]).count()
        self.assertVariableIdentical(expected, actual)
        self.assertEqual(actual.dtype, int)

        expected = Variable(['x'], [2, 3])
        actual = Variable(['x', 'y'], [[1, 0, np.nan], [1, 1, 1]]).count('y')
        self.assertVariableIdentical(expected, actual)
示例#27
0
    def test_roll(self):
        v = Variable('x', [1, 2, 3, 4, 5])

        self.assertVariableIdentical(v, v.roll(x=0))
        self.assertIsNot(v, v.roll(x=0))

        expected = Variable('x', [5, 1, 2, 3, 4])
        self.assertVariableIdentical(expected, v.roll(x=1))
        self.assertVariableIdentical(expected, v.roll(x=-4))
        self.assertVariableIdentical(expected, v.roll(x=6))

        expected = Variable('x', [4, 5, 1, 2, 3])
        self.assertVariableIdentical(expected, v.roll(x=2))
        self.assertVariableIdentical(expected, v.roll(x=-3))

        with self.assertRaisesRegexp(ValueError, 'dimension'):
            v.roll(z=0)
示例#28
0
 def test_concat_fixed_len_str(self):
     # regression test for #217
     for kind in ["S", "U"]:
         x = self.cls("animal", np.array(["horse"], dtype=kind))
         y = self.cls("animal", np.array(["aardvark"], dtype=kind))
         actual = Variable.concat([x, y], "animal")
         expected = Variable("animal", np.array(["horse", "aardvark"], dtype=kind))
         self.assertVariableEqual(expected, actual)
示例#29
0
 def test_concat_number_strings(self):
     # regression test for #305
     a = self.cls('x', ['0', '1', '2'])
     b = self.cls('x', ['3', '4'])
     actual = Variable.concat([a, b], dim='x')
     expected = Variable('x', np.arange(5).astype(str).astype(object))
     self.assertVariableIdentical(expected, actual)
     self.assertEqual(expected.dtype, object)
     self.assertEqual(type(expected.values[0]), str)
示例#30
0
 def test_concat_fixed_len_str(self):
     # regression test for #217
     for kind in ['S', 'U']:
         x = self.cls('animal', np.array(['horse'], dtype=kind))
         y = self.cls('animal', np.array(['aardvark'], dtype=kind))
         actual = Variable.concat([x, y], 'animal')
         expected = Variable(
             'animal', np.array(['horse', 'aardvark'], dtype=kind))
         self.assertVariableEqual(expected, actual)
示例#31
0
class TestVariable(DaskTestCase):
    def assertLazyAndIdentical(self, expected, actual):
        self.assertLazyAnd(expected, actual, assert_identical)

    def assertLazyAndAllClose(self, expected, actual):
        self.assertLazyAnd(expected, actual, assert_allclose)

    def setUp(self):
        self.values = np.random.RandomState(0).randn(4, 6)
        self.data = da.from_array(self.values, chunks=(2, 2))

        self.eager_var = Variable(('x', 'y'), self.values)
        self.lazy_var = Variable(('x', 'y'), self.data)

    def test_basics(self):
        v = self.lazy_var
        assert self.data is v.data
        assert self.data.chunks == v.chunks
        assert_array_equal(self.values, v)

    def test_copy(self):
        self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
        self.assertLazyAndIdentical(self.eager_var,
                                    self.lazy_var.copy(deep=True))

    def test_chunk(self):
        for chunks, expected in [(None, ((2, 2), (2, 2, 2))),
                                 (3, ((3, 1), (3, 3))),
                                 ({'x': 3, 'y': 3}, ((3, 1), (3, 3))),
                                 ({'x': 3}, ((3, 1), (2, 2, 2))),
                                 ({'x': (3, 1)}, ((3, 1), (2, 2, 2)))]:
            rechunked = self.lazy_var.chunk(chunks)
            assert rechunked.chunks == expected
            self.assertLazyAndIdentical(self.eager_var, rechunked)

    def test_indexing(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u[0], v[0])
        self.assertLazyAndIdentical(u[:1], v[:1])
        self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
        with raises_regex(TypeError, 'stored in a dask array'):
            v[:1] = 0

    def test_squeeze(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())

    def test_equals(self):
        v = self.lazy_var
        assert v.equals(v)
        assert isinstance(v.data, da.Array)
        assert v.identical(v)
        assert isinstance(v.data, da.Array)

    def test_transpose(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u.T, v.T)

    def test_shift(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
        self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
        assert v.data.chunks == v.shift(x=1).data.chunks

    def test_roll(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
        assert v.data.chunks == v.roll(x=1).data.chunks

    def test_unary_op(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(-u, -v)
        self.assertLazyAndIdentical(abs(u), abs(v))
        self.assertLazyAndIdentical(u.round(), v.round())

    def test_binary_op(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(2 * u, 2 * v)
        self.assertLazyAndIdentical(u + u, v + v)
        self.assertLazyAndIdentical(u[0] + u, v[0] + v)

    def test_repr(self):
        expected = dedent("""\
        <xarray.Variable (x: 4, y: 6)>
        dask.array<shape=(4, 6), dtype=float64, chunksize=(2, 2)>""")
        assert expected == repr(self.lazy_var)

    def test_pickle(self):
        # Test that pickling/unpickling does not convert the dask
        # backend to numpy
        a1 = Variable(['x'], build_dask_array('x'))
        a1.compute()
        assert not a1._in_memory
        assert kernel_call_count == 1
        a2 = pickle.loads(pickle.dumps(a1))
        assert kernel_call_count == 1
        assert_identical(a1, a2)
        assert not a1._in_memory
        assert not a2._in_memory

    def test_reduce(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndAllClose(u.mean(), v.mean())
        self.assertLazyAndAllClose(u.std(), v.std())
        self.assertLazyAndAllClose(u.argmax(dim='x'), v.argmax(dim='x'))
        self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
        self.assertLazyAndAllClose((u < 1).all('x'), (v < 1).all('x'))
        with raises_regex(NotImplementedError, 'dask'):
            v.median()

    def test_missing_values(self):
        values = np.array([0, 1, np.nan, 3])
        data = da.from_array(values, chunks=(2,))

        eager_var = Variable('x', values)
        lazy_var = Variable('x', data)
        self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
        self.assertLazyAndIdentical(Variable('x', range(4)),
                                    lazy_var.fillna(2))
        self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())

    def test_concat(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], 'x'))
        self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], 'x'))
        self.assertLazyAndIdentical(u[:2], Variable.concat([u[0], v[1]], 'x'))
        self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], u[1]], 'x'))
        self.assertLazyAndIdentical(
            u[:3],
            Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]]))

    def test_missing_methods(self):
        v = self.lazy_var
        try:
            v.argsort()
        except NotImplementedError as err:
            assert 'dask' in str(err)
        try:
            v[0].item()
        except NotImplementedError as err:
            assert 'dask' in str(err)

    def test_univariate_ufunc(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndAllClose(np.sin(u), xu.sin(v))

    def test_bivariate_ufunc(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(v, 0))
        self.assertLazyAndAllClose(np.maximum(u, 0), xu.maximum(0, v))

    @pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
                        reason='Need dask 0.16 for new interface')
    def test_compute(self):
        u = self.eager_var
        v = self.lazy_var

        assert dask.is_dask_collection(v)
        (v2,) = dask.compute(v + 1)
        assert not dask.is_dask_collection(v2)

        assert ((u + 1).data == v2.data).all()

    @pytest.mark.skipif(LooseVersion(dask.__version__) <= '0.15.4',
                        reason='Need dask 0.16 for new interface')
    def test_persist(self):
        u = self.eager_var
        v = self.lazy_var + 1

        (v2,) = dask.persist(v)
        assert v is not v2
        assert len(v2.__dask_graph__()) < len(v.__dask_graph__())
        assert v2.__dask_keys__() == v.__dask_keys__()
        assert dask.is_dask_collection(v)
        assert dask.is_dask_collection(v2)

        self.assertLazyAndAllClose(u + 1, v)
        self.assertLazyAndAllClose(u + 1, v2)
示例#32
0
    def add_original_variables(dataset, height, srf_size=None):
        tu.add_geolocation_variables(dataset, SWATH_WIDTH, height)
        tu.add_quality_flags(dataset, SWATH_WIDTH, height)

        # Temperature_misc_housekeeping
        default_array = DefaultData.create_default_array(
            height,
            NUM_THERMISTORS,
            np.float32,
            dims_names=["housekeeping", "y"],
            fill_value=np.NaN)
        variable = Variable(["housekeeping", "y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        variable.attrs["units"] = "TODO"
        dataset["Temperature_misc_housekeeping"] = variable

        # ancil_data
        default_array = DefaultData.create_default_array(
            height,
            ANCIL_VAL,
            np.float64,
            dims_names=["ancil_val", "y"],
            fill_value=np.NaN)
        variable = Variable(["ancil_val", "y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "Additional per scan information: year, day_of_year, secs_of_day, sat_lat, " \
                                      "sat_long, sat_alt, sat_heading, year, day_of_year, secs_of_day"
        dataset["ancil_data"] = variable

        # channel_quality_flag
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
        variable = Variable(["channel", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        dataset["channel_quality_flag"] = variable

        # cold_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["cold_counts"] = variable

        # counts_to_tb_gain
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["counts_to_tb_gain"] = variable

        # counts_to_tb_offset
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["counts_to_tb_offset"] = variable

        # gain_control
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["gain_control"] = variable

        # tb
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
        variable = Variable(["channel", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        variable.attrs["standard_name"] = "toa_brightness_temperature"
        tu.add_units(variable, "K")
        dataset["tb"] = variable

        # thermal_reference
        default_array = DefaultData.create_default_vector(
            height, np.float32, np.NaN)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        tu.add_units(variable, "TODO")
        dataset["thermal_reference"] = variable

        # warm_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["warm_counts"] = variable
示例#33
0
    def test_reduce_funcs(self):
        v = Variable('x', np.array([1, np.nan, 2, 3]))
        self.assertVariableIdentical(v.mean(), Variable([], 2))
        self.assertVariableIdentical(v.mean(skipna=True), Variable([], 2))
        self.assertVariableIdentical(v.mean(skipna=False), Variable([],
                                                                    np.nan))
        self.assertVariableIdentical(np.mean(v), Variable([], 2))

        self.assertVariableIdentical(v.prod(), Variable([], 6))
        self.assertVariableIdentical(v.cumsum(axis=0),
                                     Variable('x', np.array([1, 1, 3, 6])))
        self.assertVariableIdentical(v.cumprod(axis=0),
                                     Variable('x', np.array([1, 1, 2, 6])))
        self.assertVariableIdentical(v.var(), Variable([], 2.0 / 3))

        if LooseVersion(np.__version__) < '1.9':
            with self.assertRaises(NotImplementedError):
                v.median()
        else:
            self.assertVariableIdentical(v.median(), Variable([], 2))

        v = Variable('x', [True, False, False])
        self.assertVariableIdentical(v.any(), Variable([], True))
        self.assertVariableIdentical(v.all(dim='x'), Variable([], False))

        v = Variable('t', pd.date_range('2000-01-01', periods=3))
        with self.assertRaises(NotImplementedError):
            v.max(skipna=True)
        self.assertVariableIdentical(v.max(),
                                     Variable([], pd.Timestamp('2000-01-03')))
示例#34
0
    def add_full_fcdr_variables(dataset, height):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        # count_vis
        default_array = DefaultData.create_default_array(
            FULL_SIZE, FULL_SIZE, np.uint8)
        variable = Variable(["y", "x"], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "Image counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_vis"] = variable

        dataset["u_latitude"] = MVIRI._create_angle_variable_int(
            1.5E-05, long_name="Uncertainty in Latitude", unsigned=True)
        MVIRI._add_geo_correlation_attributes(dataset["u_latitude"])

        dataset["u_longitude"] = MVIRI._create_angle_variable_int(
            1.5E-05, long_name="Uncertainty in Longitude", unsigned=True)
        MVIRI._add_geo_correlation_attributes(dataset["u_longitude"])

        # u_time
        default_array = DefaultData.create_default_vector(IR_SIZE,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable([IR_Y_DIMENSION], default_array)
        variable.attrs["standard_name"] = "Uncertainty in Time"
        tu.add_units(variable, "s")
        tu.add_encoding(variable, np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        0.009155273)
        variable.attrs["pdf_shape"] = "rectangle"
        dataset["u_time"] = variable

        dataset["u_satellite_zenith_angle"] = MVIRI._create_angle_variable_int(
            7.62939E-05,
            long_name="Uncertainty in Satellite Zenith Angle",
            unsigned=True)
        dataset[
            "u_satellite_azimuth_angle"] = MVIRI._create_angle_variable_int(
                7.62939E-05,
                long_name="Uncertainty in Satellite Azimuth Angle",
                unsigned=True)
        dataset["u_solar_zenith_angle"] = MVIRI._create_angle_variable_int(
            7.62939E-05,
            long_name="Uncertainty in Solar Zenith Angle",
            unsigned=True)
        dataset["u_solar_azimuth_angle"] = MVIRI._create_angle_variable_int(
            7.62939E-05,
            long_name="Uncertainty in Solar Azimuth Angle",
            unsigned=True)

        dataset["a0_vis"] = tu.create_scalar_float_variable(
            "Calibration Coefficient at Launch", units="Wm^-2sr^-1/count")
        dataset["a1_vis"] = tu.create_scalar_float_variable(
            "Time variation of a0", units="Wm^-2sr^-1/count day^-1 10^5")
        dataset["a2_vis"] = tu.create_scalar_float_variable(
            "Time variation of a0, quadratic term",
            units="Wm^-2sr^-1/count year^-2")
        dataset["mean_count_space_vis"] = tu.create_scalar_float_variable(
            "Space count", units="count")

        # u_a0_vis
        variable = tu.create_scalar_float_variable("Uncertainty in a0",
                                                   units="Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a0_vis"] = variable

        # u_a1_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty in a1", units="Wm^-2sr^-1/count day^-1 10^5")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a1_vis"] = variable

        # u_a2_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty in a2", units="Wm^-2sr^-1/count year^-2")
        MVIRI._add_calibration_coeff_correlation_attributes(variable)
        dataset["u_a2_vis"] = variable

        # u_zero_vis
        variable = tu.create_scalar_float_variable("Uncertainty zero term",
                                                   units="Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(
            variable, image_correlation_scale=[-np.inf, np.inf])
        dataset["u_zero_vis"] = variable

        # covariance_a_vis
        variable = tu.create_float_variable(
            COV_SIZE,
            COV_SIZE,
            long_name=
            "Covariance of calibration coefficients from fit to calibration runs",
            dim_names=["cov_size", "cov_size"],
            fill_value=np.NaN)
        tu.add_fill_value(variable, np.NaN)
        tu.add_units(variable, "Wm^-2sr^-1/count")
        MVIRI._add_calibration_coeff_correlation_attributes(
            variable, image_correlation_scale=[-np.inf, np.inf])
        dataset["covariance_a_vis"] = variable

        dataset["u_electronics_counts_vis"] = tu.create_scalar_float_variable(
            "Uncertainty due to Electronics noise", units="count")
        dataset["u_digitization_counts_vis"] = tu.create_scalar_float_variable(
            "Uncertainty due to digitization", units="count")

        # allan_deviation_counts_space_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty of space count", units="count")
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "digitised_gaussian"
        dataset["allan_deviation_counts_space_vis"] = variable

        # u_mean_counts_space_vis
        variable = tu.create_scalar_float_variable(
            "Uncertainty of space count", units="count")
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "digitised_gaussian"
        dataset["u_mean_counts_space_vis"] = variable

        # sensitivity_solar_irradiance_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis * solar_irradiance_vis)"
        dataset["sensitivity_solar_irradiance_vis"] = variable

        # sensitivity_count_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_count_vis"] = variable

        # sensitivity_count_space
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "-1.0 * distance_sun_earth * distance_sun_earth * PI * (a2_vis * years_since_launch * years_since_launch + a1_vis * years_since_launch + a0_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_count_space"] = variable

        # sensitivity_a0_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a0_vis"] = variable

        # sensitivity_a1_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * years_since_launch / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a1_vis"] = variable

        # sensitivity_a2_vis
        variable = tu.create_scalar_float_variable()
        variable.attrs["virtual"] = "true"
        variable.attrs["dimension"] = "y, x"
        variable.attrs[
            "expression"] = "distance_sun_earth * distance_sun_earth * PI * (count_vis - mean_count_space_vis) * years_since_launch*years_since_launch / (cos(solar_zenith_angle * PI / 180.0) * solar_irradiance_vis)"
        dataset["sensitivity_a2_vis"] = variable

        effect_names = [
            "u_solar_irradiance_vis", "u_a0_vis", "u_a1_vis", "u_a2_vis",
            "u_zero_vis", "u_solar_zenith_angle", "u_mean_count_space_vis"
        ]
        dataset["Ne"] = Coordinate("Ne", effect_names)

        num_effects = len(effect_names)
        default_array = DefaultData.create_default_array(num_effects,
                                                         num_effects,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["Ne", "Ne"], default_array)
        tu.add_encoding(variable, np.int16, -32768, 3.05176E-05)
        variable.attrs["valid_min"] = -1
        variable.attrs["valid_max"] = 1
        variable.attrs[
            "long_name"] = "Channel error correlation matrix for structured effects."
        variable.attrs[
            "description"] = "Matrix_describing correlations between errors of the uncertainty_effects due to spectral response function errors (determined using Monte Carlo approach)"
        dataset["effect_correlation_matrix"] = variable
示例#35
0
 def test_0d_int32_encoding(self):
     original = Variable((), np.int32(0), encoding={'dtype': 'int64'})
     expected = Variable((), np.int64(0))
     actual = conventions.maybe_encode_dtype(original)
     self.assertDatasetIdentical(expected, actual)
示例#36
0
    def test_vectorized_lazily_indexed_array(self):
        original = np.random.rand(10, 20, 30)
        x = indexing.NumpyIndexingAdapter(original)
        v_eager = Variable(["i", "j", "k"], x)
        lazy = indexing.LazilyOuterIndexedArray(x)
        v_lazy = Variable(["i", "j", "k"], lazy)
        arr = ReturnItem()

        def check_indexing(v_eager, v_lazy, indexers):
            for indexer in indexers:
                actual = v_lazy[indexer]
                expected = v_eager[indexer]
                assert expected.shape == actual.shape
                assert isinstance(
                    actual._data,
                    (
                        indexing.LazilyVectorizedIndexedArray,
                        indexing.LazilyOuterIndexedArray,
                    ),
                )
                assert_array_equal(expected, actual)
                v_eager = expected
                v_lazy = actual

        # test orthogonal indexing
        indexers = [(arr[:], 0, 1), (Variable("i", [0, 1]), )]
        check_indexing(v_eager, v_lazy, indexers)

        # vectorized indexing
        indexers = [
            (Variable("i", [0, 1]), Variable("i", [0, 1]), slice(None)),
            (slice(1, 3, 2), 0),
        ]
        check_indexing(v_eager, v_lazy, indexers)

        indexers = [
            (slice(None, None, 2), 0, slice(None, 10)),
            (Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
            (Variable(["i", "j"], [[0, 1], [1, 2]]), ),
        ]
        check_indexing(v_eager, v_lazy, indexers)

        indexers = [
            (Variable("i", [3, 2, 4, 3]), Variable("i", [3, 2, 1, 0])),
            (Variable(["i", "j"], [[0, 1], [1, 2]]), ),
        ]
        check_indexing(v_eager, v_lazy, indexers)
示例#37
0
 def test_missing_fillvalue(self):
     v = Variable(['x'], np.array([np.nan, 1, 2, 3]))
     v.encoding = {'dtype': 'int16'}
     with self.assertWarns('floating point data as an integer'):
         conventions.encode_cf_variable(v)
示例#38
0
 def _create_channel_uncertainty_uint16(height):
     default_array = DefaultData.create_default_array(NUM_CHANNELS, height, dtype=np.float32, dims_names=["y", "channel"], fill_value=np.NaN)
     variable = Variable(["y", "channel"], default_array)
     tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
     return variable
示例#39
0
    def add_full_fcdr_variables(dataset, height):
        # c_earth
        default_array = DefaultData.create_default_array_3d(SWATH_WIDTH, height, NUM_RAD_CHANNELS, np.uint16, dims_names=["rad_channel", "y", "x"])
        variable = Variable(["rad_channel", "y", "x"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint16))
        variable.attrs["long_name"] = "counts_earth"
        tu.add_units(variable, "count")
        variable.attrs["ancilliary_variables"] = "scnlinf quality_scanline_bitmask quality_channel_bitmask mnfrqualflags"
        dataset["c_earth"] = variable

        # L_earth
        default_array = DefaultData.create_default_array_3d(SWATH_WIDTH, height, NUM_RAD_CHANNELS, np.float32, np.NaN, ["rad_channel", "y", "x"])
        variable = Variable(["rad_channel", "y", "x"], default_array)
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.0001)
        variable.attrs["standard_name"] = "toa_outgoing_inband_radiance"
        tu.add_units(variable, "W/Hz/m ** 2/sr")
        variable.attrs["long_name"] = "Channel radiance, NOAA/EUMETSAT calibrated"
        variable.attrs["ancilliary_variables"] = "scnlinf quality_scanline_bitmask quality_channel_bitmask mnfrqualflags"
        dataset["L_earth"] = variable

        # u_lat
        variable = HIRS._create_angle_variable(height, "uncertainty_latitude")
        dataset["u_lat"] = variable

        # u_lon
        variable = HIRS._create_angle_variable(height, "uncertainty_longitude")
        dataset["u_lon"] = variable

        # u_time
        variable = tu.create_float_variable(SWATH_WIDTH, height, "uncertainty_time")
        tu.add_encoding(variable, np.uint16, 65535, 0.01)
        tu.add_units(variable, "s")
        dataset["u_time"] = variable

        # u_c_earth
        default_array = DefaultData.create_default_array(NUM_CALIBRATION_CYCLE, NUM_CHANNELS, np.uint16, dims_names=["channel", "calibration_cycle"])
        variable = Variable(["channel", "calibration_cycle"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint16))
        tu.add_units(variable, "count")
        variable.attrs["long_name"] = "uncertainty counts for Earth views"
        variable.attrs["ancilliary_variables"] = "u_c_earth_chan_corr"
        variable.attrs["channels_affected"] = "all"
        variable.attrs["parameter"] = "C_E"
        variable.attrs["pdf_shape"] = "gaussian"
        dataset["u_c_earth"] = variable

        # u_L_earth_independent
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_random")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_independent"] = variable

        # u_L_earth_structured
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_structured")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_structured"] = variable

        # u_L_earth_systematic
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_systematic")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_systematic"] = variable

        # u_L_earth_total
        variable = HIRS._create_3d_rad_uncertainty_variable(height, "uncertainty_radiance_Earth_total")
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        tu.add_units(variable, "mW m^-2 sr^-1 cm")
        dataset["u_L_earth_total"] = variable

        # S_u_L_earth
        variable = tu.create_float_variable(NUM_RAD_CHANNELS, NUM_RAD_CHANNELS, "covariance_radiance_Earth", dim_names=["rad_channel", "rad_channel"])
        tu.add_encoding(variable, np.uint32, DefaultData.get_default_fill_value(np.uint32), 0.01)
        dataset["S_u_L_earth"] = variable

        # u_bt_random
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_random")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_random"] = variable

        # u_bt_structured
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_structured")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_structured"] = variable

        # u_bt_systematic
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_systematic")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_systematic"] = variable

        # u_bt_total
        variable = HIRS._create_3d_bt_uncertainty_variable(height, "uncertainty_bt_total")
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        tu.add_units(variable, "K")
        dataset["u_bt_total"] = variable

        # S_bt
        variable = tu.create_float_variable(NUM_RAD_CHANNELS, NUM_RAD_CHANNELS, "covariance_brightness_temperature", dim_names=["rad_channel", "rad_channel"])
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        dataset["S_bt"] = variable

        # l1b_calcof
        default_array = DefaultData.create_default_array(height, NUM_COEFFS, np.float32, dims_names=["coeffs", "y"])
        variable = Variable(["coeffs", "y"], default_array)
        tu.add_encoding(variable, np.int32, DefaultData.get_default_fill_value(np.int32), 0.01)
        variable.attrs["standard_name"] = "calibration_coefficients"
        dataset["l1b_calcof"] = variable

        # navigation_status
        variable = HIRS._create_int32_vector(height, standard_name="status_flag", long_name="Navigation status bit field", orig_name="hrs_navstat")
        dataset["navigation_status"] = variable

        # quality_flags
        variable = HIRS._create_int32_vector(height, standard_name="status_flag", long_name="Quality indicator bit field", orig_name="hrs_qualind")
        dataset["quality_flags"] = variable

        variable = HIRS._create_scaled_uint16_vector(height, long_name="Platform altitude", original_name="hrs_scalti")
        tu.add_units(variable, "km")
        dataset["platform_altitude"] = variable

        variable = HIRS._create_scaled_int16_vector(height, long_name="Platform pitch angle", original_name="hrs_pitchang")
        tu.add_units(variable, "degree")
        dataset["platform_pitch_angle"] = variable

        variable = HIRS._create_scaled_int16_vector(height, long_name="Platform roll angle", original_name="hrs_rollang")
        tu.add_units(variable, "degree")
        dataset["platform_roll_angle"] = variable

        variable = HIRS._create_scaled_int16_vector(height, long_name="Platform yaw angle", original_name="hrs_yawang")
        tu.add_units(variable, "degree")
        dataset["platform_yaw_angle"] = variable

        # scan_angles
        default_array = DefaultData.create_default_array(NUM_SCAN_ANGLES, height, np.float32, dims_names=["y", "num_scan_angles"], fill_value=np.NaN)
        variable = Variable(["y", "num_scan_angles"], default_array)
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), scale_factor=0.01)
        tu.add_units(variable, "degree")
        variable.attrs["long_name"] = "Scan angles"
        variable.attrs["orig_name"] = "hrs_ang"
        dataset["scan_angles"] = variable

        dataset["l1b_scanline_number"] = HIRS._create_int16_vector(height, long_name="scanline number", orig_name="hrs_scnlin")
        dataset["scanline_position"] = HIRS._create_int8_vector(height, long_name="Scanline position number in 32 second cycle", orig_name="hrs_scnpos")

        # second_original_calibration_coefficients
        default_array = DefaultData.create_default_array(WIDTH_TODO, height, np.float32, fill_value=np.NaN)
        variable = Variable(["y", "width_todo"], default_array)
        tu.add_encoding(variable, np.int32, DefaultData.get_default_fill_value(np.int32), scale_factor=0.01)
        variable.attrs["long_name"] = "Second original calibration coefficients (unsorted)"
        variable.attrs["orig_name"] = "hrs_scalcof"
        dataset["l1b_second_original_calibration_coefficients"] = variable

        dataset["Tc_baseplate"] = HIRS._create_counts_vector(height, "temperature_baseplate_counts")
        dataset["Tc_ch"] = HIRS._create_counts_vector(height, "temperature_coolerhousing_counts")
        dataset["Tc_elec"] = HIRS._create_counts_vector(height, "temperature_electronics_counts")
        dataset["Tc_fsr"] = HIRS._create_counts_vector(height, "temperature_first_stage_radiator_counts")
        dataset["Tc_fwh"] = HIRS._create_counts_vector(height, "temperature_filter_wheel_housing_counts")
        dataset["Tc_fwm"] = HIRS._create_counts_vector(height, "temperature_filter_wheel_monitor_counts")
        dataset["Tc_icct"] = HIRS._create_counts_vector(height, "temperature_internal_cold_calibration_target_counts")
        dataset["Tc_iwct"] = HIRS._create_counts_vector(height, "temperature_internal_warm_calibration_target_counts")
        dataset["Tc_patch_exp"] = HIRS._create_counts_vector(height, "temperature_patch_expanded_scale_counts")
        dataset["Tc_patch_full"] = HIRS._create_counts_vector(height, "temperature_patch_full_range_counts")
        dataset["Tc_tlscp_prim"] = HIRS._create_counts_vector(height, "temperature_telescope_primary_counts")
        dataset["Tc_tlscp_sec"] = HIRS._create_counts_vector(height, "temperature_telescope_secondary_counts")
        dataset["Tc_tlscp_tert"] = HIRS._create_counts_vector(height, "temperature_telescope_tertiary_counts")
        dataset["Tc_scanmirror"] = HIRS._create_counts_vector(height, "temperature_scanmirror_counts")
        dataset["Tc_scanmotor"] = HIRS._create_counts_vector(height, "temperature_scanmotor_counts")

        dataset["u_Tc_baseplate"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_baseplate_counts")
        dataset["u_Tc_ch"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_coolerhousing_counts")
        dataset["u_Tc_elec"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_electronics_counts")
        dataset["u_Tc_fsr"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_first_stage_radiator_counts")
        dataset["u_Tc_fwh"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_filter_wheel_housing_counts")
        dataset["u_Tc_fwm"] = HIRS._create_counts_uncertainty_vector(height, "uncertainty_temperature_filter_wheel_monitor_counts")
        dataset["u_Tc_icct"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_internal_cold_calibration_target_counts")
        dataset["u_Tc_iwct"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_internal_warm_calibration_target_counts")
        dataset["u_Tc_patch_exp"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_patch_expanded_scale_counts")
        dataset["u_Tc_patch_full"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_patch_full_range_counts")
        dataset["u_Tc_tlscp_prim"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_telescope_primary_counts")
        dataset["u_Tc_tlscp_sec"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_telescope_secondary_counts")
        dataset["u_Tc_tlscp_tert"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_telescope_tertiary_counts")
        dataset["u_Tc_scanmirror"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_scanmirror_counts")
        dataset["u_Tc_scanmotor"] = HIRS._create_counts_uncertainty_vector_uint32(height, "uncertainty_temperature_scanmotor_counts")

        dataset["u_sol_za"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_solar_zenith_angle", height, FILL_VALUE)
        dataset["u_sol_aa"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_solar_azimuth_angle", height, FILL_VALUE)
        dataset["u_sat_za"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_satellite_zenith_angle", height, FILL_VALUE)
        dataset["u_sat_aa"] = HIRS._create_geo_angle_uncertainty_variable("uncertainty_local_azimuth_angle", height, FILL_VALUE)

        # u_c_earth_chan_corr
        dataset["u_c_earth_chan_corr"] = HIRS._create_channel_correlation_variable("u_c_earth channel correlations", np.int16)

        # u_c_space
        default_array = DefaultData.create_default_array(NUM_CALIBRATION_CYCLE, NUM_CHANNELS, np.uint16, dims_names=["channel", "calibration_cycle"])
        variable = Variable(["channel", "calibration_cycle"], default_array)
        tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.uint16))
        tu.add_units(variable, "count")
        tu.add_scale_factor(variable, 0.005)
        variable.attrs["long_name"] = "uncertainty counts for space views"
        variable.attrs["ancilliary_variables"] = "u_c_space_chan_corr"
        variable.attrs["channels_affected"] = "all"
        variable.attrs["parameter"] = "C_s"
        variable.attrs["pdf_shape"] = "gaussian"
        dataset["u_c_space"] = variable

        # u_c_space_chan_corr
        dataset["u_c_space_chan_corr"] = HIRS._create_channel_correlation_variable("u_c_space channel correlations", np.uint16)

        # u_Earthshine
        dataset["u_Earthshine"] = HIRS._create_channel_uncertainty_uint16(height)

        # u_O_Re
        dataset["u_O_Re"] = HIRS._create_channel_uncertainty_uint16(height)

        # u_O_TIWCT
        default_array = DefaultData.create_default_vector(height, np.float32, np.NaN)
        variable = Variable(["y"], default_array)
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        dataset["u_O_TIWCT"] = variable

        # u_O_TPRT
        default_array = DefaultData.create_default_vector(height, np.uint16, DefaultData.get_default_fill_value(np.uint16))
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, 65535)
        tu.add_scale_factor(variable, 0.01)
        tu.add_units(variable, "K")
        variable.attrs["channels_affected"] = "all"
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.IMG_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.IMG_CORR_UNIT] = corr.IMG
        variable.attrs[corr.IMG_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["parameter"] = "O_TPRT"
        variable.attrs["pdf_shape"] = "gaussian"
        variable.attrs["short_name"] = "O_TPRT"
        variable.attrs["ancilliary_variables"] = "u_O_TPRT_chan_corr"
        dataset["u_O_TPRT"] = variable

        dataset["u_Rself"] = HIRS._create_channel_uncertainty_uint16(height)
        dataset["u_SRF_calib"] = HIRS._create_channel_uncertainty_uint16(height)

        default_array = DefaultData.create_default_array(PRT_NUMBER_IWT, PRT_READING, dtype=np.float32, dims_names=["prt_number_iwt", "prt_reading"], fill_value=np.NaN)
        variable = Variable(["prt_number_iwt", "prt_reading"], default_array)
        tu.add_encoding(variable, np.uint16, DefaultData.get_default_fill_value(np.uint16), 0.01)
        dataset["u_d_PRT"] = variable

        dataset["u_electronics"] = HIRS._create_channel_uncertainty_uint16(height)
        dataset["u_periodic_noise"] = HIRS._create_channel_uncertainty_uint16(height)
        dataset["u_nonlinearity"] = HIRS._create_scaled_uint16_vector(NUM_CHANNELS, dimension_name=["channel"], scale_factor=0.01)
        dataset["emissivity"] = tu.create_scalar_float_variable("emissivity", units="1")
        dataset["temp_corr_slope"] = tu.create_scalar_float_variable("Slope for effective temperature correction", units="1")
        dataset["temp_corr_offset"] = tu.create_scalar_float_variable("Offset for effective temperature correction", units="1")

        # mnfrqualflags
        default_array = DefaultData.create_default_array(NUM_MINOR_FRAME, height, np.int32, dims_names=["y", "minor_frame"], fill_value=0)
        variable = Variable(["y", "minor_frame"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "minor_frame_quality_flags_bitfield"
        dataset["mnfrqualflags"] = variable

        # scnlintime
        variable = HIRS._create_int32_vector(height, standard_name="time", long_name="Scan line time of day", orig_name="hrs_scnlintime")
        tu.add_units(variable, "ms")
        dataset["scnlintime"] = variable

        # scnlinf
        default_array = DefaultData.create_default_vector(height, np.int16, fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "scanline_bitfield"
        variable.attrs["flag_masks"] = "16384, 32768"
        variable.attrs["flag_meanings"] = "clock_drift_correction southbound_data"
        dataset["scnlinf"] = variable

        # scantype
        default_array = DefaultData.create_default_vector(height, np.int8, fill_value=0)
        variable = Variable(["y"], default_array)
        variable.attrs["standard_name"] = "status_flag"
        variable.attrs["long_name"] = "scantype_bitfield"
        variable.attrs["flag_values"] = "0, 1, 2, 3"
        variable.attrs["flag_meanings"] = "earth_view space_view cold_bb_view main_bb_view"
        dataset["scantype"] = variable
示例#40
0
    def add_original_variables(dataset, height, srf_size=None):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        tu.add_quality_flags(dataset,
                             FULL_SIZE,
                             FULL_SIZE,
                             chunksizes=CHUNKSIZES)

        # time
        default_array = DefaultData.create_default_array(
            IR_SIZE, IR_SIZE, np.uint32)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint32))
        variable.attrs["standard_name"] = "time"
        variable.attrs["long_name"] = "Acquisition time of pixel"
        tu.add_units(variable, "seconds since 1970-01-01 00:00:00")
        tu.add_offset(variable, TIME_FILL_VALUE)
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["time"] = variable

        dataset["solar_azimuth_angle"] = MVIRI._create_angle_variable_int(
            0.005493164, standard_name="solar_azimuth_angle", unsigned=True)
        dataset["solar_zenith_angle"] = MVIRI._create_angle_variable_int(
            0.005493248, standard_name="solar_zenith_angle")
        dataset["satellite_azimuth_angle"] = MVIRI._create_angle_variable_int(
            0.01,
            standard_name="sensor_azimuth_angle",
            long_name="sensor_azimuth_angle",
            unsigned=True)
        dataset["satellite_zenith_angle"] = MVIRI._create_angle_variable_int(
            0.01, standard_name="platform_zenith_angle", unsigned=True)

        # count_ir
        default_array = DefaultData.create_default_array(
            IR_SIZE, IR_SIZE, np.uint8)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "Infrared Image Counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_ir"] = variable

        # count_wv
        default_array = DefaultData.create_default_array(
            IR_SIZE, IR_SIZE, np.uint8)
        variable = Variable([IR_Y_DIMENSION, IR_X_DIMENSION], default_array)
        tu.add_fill_value(variable,
                          DefaultData.get_default_fill_value(np.uint8))
        variable.attrs["long_name"] = "WV Image Counts"
        tu.add_units(variable, "count")
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["count_wv"] = variable

        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.uint8,
                                                         fill_value=0)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["flag_masks"] = "1, 2, 4, 8, 16, 32"
        variable.attrs[
            "flag_meanings"] = "uncertainty_suspicious uncertainty_too_large space_view_suspicious not_on_earth suspect_time suspect_geo"
        variable.attrs["standard_name"] = "status_flag"
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["data_quality_bitmask"] = variable

        # distance_sun_earth
        dataset["distance_sun_earth"] = tu.create_scalar_float_variable(
            long_name="Sun-Earth distance", units="au")

        # solar_irradiance_vis
        dataset["solar_irradiance_vis"] = tu.create_scalar_float_variable(
            standard_name="solar_irradiance_vis",
            long_name="Solar effective Irradiance",
            units="W*m-2")

        # u_solar_irradiance_vis
        default_array = np.full([], np.NaN, np.float32)
        variable = Variable([], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Uncertainty in Solar effective Irradiance"
        tu.add_units(variable, "Wm^-2")
        variable.attrs[corr.PIX_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.PIX_CORR_UNIT] = corr.PIXEL
        variable.attrs[corr.PIX_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.SCAN_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.SCAN_CORR_UNIT] = corr.LINE
        variable.attrs[corr.SCAN_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs[corr.IMG_CORR_FORM] = corr.RECT_ABS
        variable.attrs[corr.IMG_CORR_UNIT] = corr.DAYS
        variable.attrs[corr.IMG_CORR_SCALE] = [-np.inf, np.inf]
        variable.attrs["pdf_shape"] = "rectangle"
        dataset["u_solar_irradiance_vis"] = variable

        if srf_size is None:
            srf_size = SRF_SIZE

        default_array = DefaultData.create_default_array(srf_size,
                                                         NUM_CHANNELS,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function weights'
        variable.attrs[
            "description"] = 'Per channel: weights for the relative spectral response function'
        tu.add_encoding(variable, np.int16, -32768, 0.000033)
        dataset['SRF_weights'] = variable

        default_array = DefaultData.create_default_array(srf_size,
                                                         NUM_CHANNELS,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["channel", "n_frequencies"], default_array)
        variable.attrs["long_name"] = 'Spectral Response Function frequencies'
        variable.attrs[
            "description"] = 'Per channel: frequencies for the relative spectral response function'
        tu.add_encoding(variable, np.int32, -2147483648, 0.0001)
        tu.add_units(variable, "nm")
        variable.attrs["source"] = "Filename of SRF"
        variable.attrs["Valid(YYYYDDD)"] = "datestring"
        dataset['SRF_frequencies'] = variable

        # srf covariance_
        default_array = DefaultData.create_default_array(srf_size,
                                                         srf_size,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable([SRF_VIS_DIMENSION, SRF_VIS_DIMENSION],
                            default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Covariance of the Visible Band Spectral Response Function"
        tu.add_chunking(variable, CHUNKSIZES)
        dataset["covariance_spectral_response_function_vis"] = variable

        # u_srf_ir
        default_array = DefaultData.create_default_vector(srf_size,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable([SRF_IR_WV_DIMENSION], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Uncertainty in Spectral Response Function for IR channel"
        dataset["u_spectral_response_function_ir"] = variable

        # u_srf_wv
        default_array = DefaultData.create_default_vector(srf_size,
                                                          np.float32,
                                                          fill_value=np.NaN)
        variable = Variable([SRF_IR_WV_DIMENSION], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs[
            "long_name"] = "Uncertainty in Spectral Response Function for WV channel"
        dataset["u_spectral_response_function_wv"] = variable

        dataset["a_ir"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter a for IR Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["b_ir"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter b for IR Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["u_a_ir"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter a for IR Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["u_b_ir"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter b for IR Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["a_wv"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter a for WV Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["b_wv"] = tu.create_scalar_float_variable(
            long_name="Calibration parameter b for WV Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["u_a_wv"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter a for WV Band",
            units="mWm^-2sr^-1cm^-1")
        dataset["u_b_wv"] = tu.create_scalar_float_variable(
            long_name="Uncertainty of calibration parameter b for WV Band",
            units="mWm^-2sr^-1cm^-1/DC")
        dataset["bt_a_ir"] = tu.create_scalar_float_variable(
            long_name="IR Band BT conversion parameter A", units="1")
        dataset["bt_b_ir"] = tu.create_scalar_float_variable(
            long_name="IR Band BT conversion parameter B", units="1")
        dataset["bt_a_wv"] = tu.create_scalar_float_variable(
            long_name="WV Band BT conversion parameter A", units="1")
        dataset["bt_b_wv"] = tu.create_scalar_float_variable(
            long_name="WV Band BT conversion parameter B", units="1")
        dataset["years_since_launch"] = tu.create_scalar_float_variable(
            long_name="Fractional year since launch of satellite",
            units="years")

        x_ir_wv_dim = dataset.dims["x_ir_wv"]
        dataset["x_ir_wv"] = Coordinate(
            "x_ir_wv", np.arange(x_ir_wv_dim, dtype=np.uint16))

        y_ir_wv_dim = dataset.dims["y_ir_wv"]
        dataset["y_ir_wv"] = Coordinate(
            "y_ir_wv", np.arange(y_ir_wv_dim, dtype=np.uint16))

        srf_size_dim = dataset.dims["srf_size"]
        dataset["srf_size"] = Coordinate(
            "srf_size", np.arange(srf_size_dim, dtype=np.uint16))
示例#41
0
    def add_full_fcdr_variables(dataset, height):
        # u_Temperature_misc_housekeeping
        default_array = DefaultData.create_default_array(
            height,
            NUM_THERMISTORS,
            np.float32,
            dims_names=["housekeeping", "y"],
            fill_value=np.NaN)
        variable = Variable(["housekeeping", "y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        variable.attrs["units"] = "TODO"
        dataset["u_Temperature_misc_housekeeping"] = variable

        # u_cold_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_cold_counts"] = variable

        # u_counts_to_tb_gain
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_counts_to_tb_gain"] = variable

        # u_counts_to_tb_offset
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_counts_to_tb_offset"] = variable

        # u_gain_control
        default_array = DefaultData.create_default_array(
            height,
            NUM_CHANNELS,
            np.float32,
            dims_names=["channel", "y"],
            fill_value=np.NaN)
        variable = Variable([
            "channel",
            "y",
        ], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_gain_control"] = variable

        # u_tb
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, NUM_CHANNELS, np.float32, np.NaN)
        variable = Variable(["channel", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        tu.add_units(variable, "K")
        dataset["u_tb"] = variable

        # u_thermal_reference
        default_array = DefaultData.create_default_vector(
            height, np.float32, np.NaN)
        variable = Variable(["y"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        tu.add_units(variable, "TODO")
        dataset["u_thermal_reference"] = variable

        # u_warm_counts
        default_array = DefaultData.create_default_array_3d(
            SWATH_WIDTH, height, CALIB_NUMBER, np.float32, np.NaN)
        variable = Variable(["calib_number", "y", "x"], default_array)
        tu.add_fill_value(variable, np.NaN)
        variable.attrs["long_name"] = "TODO"
        dataset["u_warm_counts"] = variable
示例#42
0
 def _create_channel_correlation_variable(long_name, data_type):
     data_array = DefaultData.create_default_array(NUM_CHANNELS, NUM_CHANNELS, np.float32, fill_value=np.NaN)
     variable = Variable(["channel", "channel"], data_array)
     tu.add_encoding(variable, data_type, DefaultData.get_default_fill_value(data_type), scale_factor=0.01)
     variable.attrs["long_name"] = long_name
     return variable
示例#43
0
 def test_shift2d(self):
     v = Variable(('x', 'y'), [[1, 2], [3, 4]])
     expected = Variable(('x', 'y'), [[np.nan, np.nan], [np.nan, 1]])
     self.assertVariableIdentical(expected, v.shift(x=1, y=1))
示例#44
0
 def _create_3d_bt_uncertainty_variable(height, standard_name):
     default_array = DefaultData.create_default_array_3d(SWATH_WIDTH, height, NUM_CHANNELS, np.float32, dims_names=["channel", "y", "x"])
     variable = Variable(["channel", "y", "x"], default_array)
     tu.add_fill_value(variable, DefaultData.get_default_fill_value(np.float32))
     variable.attrs["standard_name"] = standard_name
     return variable
示例#45
0
 def test_big_endian_reduce(self):
     # regression test for GH489
     data = np.ones(5, dtype='>f4')
     v = Variable(['x'], data)
     expected = Variable([], 5)
     self.assertVariableIdentical(expected, v.sum())
示例#46
0
    def test_expand_dims(self):
        v = Variable(['x'], [0, 1])
        actual = v.expand_dims(['x', 'y'])
        expected = Variable(['x', 'y'], [[0], [1]])
        self.assertVariableIdentical(actual, expected)

        actual = v.expand_dims(['y', 'x'])
        self.assertVariableIdentical(actual, expected.T)

        actual = v.expand_dims(OrderedDict([('x', 2), ('y', 2)]))
        expected = Variable(['x', 'y'], [[0, 0], [1, 1]])
        self.assertVariableIdentical(actual, expected)

        v = Variable(['foo'], [0, 1])
        actual = v.expand_dims('foo')
        expected = v
        self.assertVariableIdentical(actual, expected)

        with self.assertRaisesRegexp(ValueError, 'must be a superset'):
            v.expand_dims(['z'])
示例#47
0
    def test_reduce(self):
        v = Variable(['x', 'y'], self.d, {'ignored': 'attributes'})
        self.assertVariableIdentical(v.reduce(np.std, 'x'),
                                     Variable(['y'], self.d.std(axis=0)))
        self.assertVariableIdentical(v.reduce(np.std, axis=0),
                                     v.reduce(np.std, dim='x'))
        self.assertVariableIdentical(v.reduce(np.std, ['y', 'x']),
                                     Variable([], self.d.std(axis=(0, 1))))
        self.assertVariableIdentical(v.reduce(np.std),
                                     Variable([], self.d.std()))
        self.assertVariableIdentical(
            v.reduce(np.mean, 'x').reduce(np.std, 'y'),
            Variable([],
                     self.d.mean(axis=0).std()))
        self.assertVariableAllClose(v.mean('x'), v.reduce(np.mean, 'x'))

        with self.assertRaisesRegexp(ValueError, 'cannot supply both'):
            v.mean(dim='x', axis=0)
示例#48
0
    def test_squeeze(self):
        v = Variable(['x', 'y'], [[1]])
        self.assertVariableIdentical(Variable([], 1), v.squeeze())
        self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze('x'))
        self.assertVariableIdentical(Variable(['y'], [1]), v.squeeze(['x']))
        self.assertVariableIdentical(Variable(['x'], [1]), v.squeeze('y'))
        self.assertVariableIdentical(Variable([], 1), v.squeeze(['x', 'y']))

        v = Variable(['x', 'y'], [[1, 2]])
        self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze())
        self.assertVariableIdentical(Variable(['y'], [1, 2]), v.squeeze('x'))
        with self.assertRaisesRegexp(ValueError, 'cannot select a dimension'):
            v.squeeze('y')
示例#49
0
 def test_stack_unstack_consistency(self):
     v = Variable(['x', 'y'], [[0, 1], [2, 3]])
     actual = (v.stack(z=('x', 'y')).unstack(z=OrderedDict([('x',
                                                             2), ('y',
                                                                  2)])))
     self.assertVariableIdentical(actual, v)
示例#50
0
    def add_easy_fcdr_variables(dataset,
                                height,
                                corr_dx=None,
                                corr_dy=None,
                                lut_size=None):
        # height is ignored - supplied just for interface compatibility tb 2017-02-05

        # reflectance
        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["standard_name"] = "toa_bidirectional_reflectance_vis"
        variable.attrs[
            "long_name"] = "top of atmosphere bidirectional reflectance factor per pixel of the visible band with central wavelength 0.7"
        tu.add_units(variable, "1")
        tu.add_encoding(variable,
                        np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        3.05176E-05,
                        chunksizes=CHUNKSIZES)
        dataset["toa_bidirectional_reflectance_vis"] = variable

        # u_independent
        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["long_name"] = "independent uncertainty per pixel"
        tu.add_units(variable, "1")
        tu.add_encoding(variable,
                        np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        3.05176E-05,
                        chunksizes=CHUNKSIZES)
        dataset["u_independent_toa_bidirectional_reflectance"] = variable

        # u_structured
        default_array = DefaultData.create_default_array(FULL_SIZE,
                                                         FULL_SIZE,
                                                         np.float32,
                                                         fill_value=np.NaN)
        variable = Variable(["y", "x"], default_array)
        variable.attrs["long_name"] = "structured uncertainty per pixel"
        tu.add_units(variable, "1")
        tu.add_encoding(variable,
                        np.uint16,
                        DefaultData.get_default_fill_value(np.uint16),
                        3.05176E-05,
                        chunksizes=CHUNKSIZES)
        dataset["u_structured_toa_bidirectional_reflectance"] = variable

        # u_common
        dataset[
            "u_common_toa_bidirectional_reflectance"] = tu.create_scalar_float_variable(
                long_name="common uncertainty per slot", units="1")

        dataset[
            "sub_satellite_latitude_start"] = tu.create_scalar_float_variable(
                long_name="Latitude of the sub satellite point at image start",
                units="degrees_north")
        dataset[
            "sub_satellite_longitude_start"] = tu.create_scalar_float_variable(
                long_name="Longitude of the sub satellite point at image start",
                units="degrees_east")
        dataset[
            "sub_satellite_latitude_end"] = tu.create_scalar_float_variable(
                long_name="Latitude of the sub satellite point at image end",
                units="degrees_north")
        dataset[
            "sub_satellite_longitude_end"] = tu.create_scalar_float_variable(
                long_name="Longitude of the sub satellite point at image end",
                units="degrees_east")

        tu.add_correlation_matrices(dataset, NUM_CHANNELS)

        if lut_size is not None:
            tu.add_lookup_tables(dataset, NUM_CHANNELS, lut_size=lut_size)

        if corr_dx is not None and corr_dy is not None:
            tu.add_correlation_coefficients(dataset, NUM_CHANNELS, corr_dx,
                                            corr_dy)

        tu.add_coordinates(dataset, ["vis", "wv", "ir"])
示例#51
0
    def test_roll(self):
        v = Variable('x', [1, 2, 3, 4, 5])

        self.assertVariableIdentical(v, v.roll(x=0))
        self.assertIsNot(v, v.roll(x=0))

        expected = Variable('x', [5, 1, 2, 3, 4])
        self.assertVariableIdentical(expected, v.roll(x=1))
        self.assertVariableIdentical(expected, v.roll(x=-4))
        self.assertVariableIdentical(expected, v.roll(x=6))

        expected = Variable('x', [4, 5, 1, 2, 3])
        self.assertVariableIdentical(expected, v.roll(x=2))
        self.assertVariableIdentical(expected, v.roll(x=-3))

        with self.assertRaisesRegexp(ValueError, 'dimension'):
            v.roll(z=0)
示例#52
0
 def test_concat_dim_is_variable(self):
     objs = [Dataset({"x": 0}), Dataset({"x": 1})]
     coord = Variable("y", [3, 4])
     expected = Dataset({"x": ("y", [0, 1]), "y": [3, 4]})
     actual = concat(objs, coord)
     assert_identical(actual, expected)
示例#53
0
    def test_lazily_indexed_array(self):
        original = np.random.rand(10, 20, 30)
        x = indexing.NumpyIndexingAdapter(original)
        v = Variable(["i", "j", "k"], original)
        lazy = indexing.LazilyOuterIndexedArray(x)
        v_lazy = Variable(["i", "j", "k"], lazy)
        arr = ReturnItem()
        # test orthogonally applied indexers
        indexers = [
            arr[:], 0, -2, arr[:3], [0, 1, 2, 3], [0],
            np.arange(10) < 5
        ]
        for i in indexers:
            for j in indexers:
                for k in indexers:
                    if isinstance(j, np.ndarray) and j.dtype.kind == "b":
                        j = np.arange(20) < 5
                    if isinstance(k, np.ndarray) and k.dtype.kind == "b":
                        k = np.arange(30) < 5
                    expected = np.asarray(v[i, j, k])
                    for actual in [
                            v_lazy[i, j, k],
                            v_lazy[:, j, k][i],
                            v_lazy[:, :, k][:, j][i],
                    ]:
                        assert expected.shape == actual.shape
                        assert_array_equal(expected, actual)
                        assert isinstance(actual._data,
                                          indexing.LazilyOuterIndexedArray)

                        # make sure actual.key is appropriate type
                        if all(
                                isinstance(k, (int, slice))
                                for k in v_lazy._data.key.tuple):
                            assert isinstance(v_lazy._data.key,
                                              indexing.BasicIndexer)
                        else:
                            assert isinstance(v_lazy._data.key,
                                              indexing.OuterIndexer)

        # test sequentially applied indexers
        indexers = [
            (3, 2),
            (arr[:], 0),
            (arr[:2], -1),
            (arr[:4], [0]),
            ([4, 5], 0),
            ([0, 1, 2], [0, 1]),
            ([0, 3, 5], arr[:2]),
        ]
        for i, j in indexers:
            expected = v[i][j]
            actual = v_lazy[i][j]
            assert expected.shape == actual.shape
            assert_array_equal(expected, actual)

            # test transpose
            if actual.ndim > 1:
                order = np.random.choice(actual.ndim, actual.ndim)
                order = np.array(actual.dims)
                transposed = actual.transpose(*order)
                assert_array_equal(expected.transpose(*order), transposed)
                assert isinstance(
                    actual._data,
                    (
                        indexing.LazilyVectorizedIndexedArray,
                        indexing.LazilyOuterIndexedArray,
                    ),
                )

            assert isinstance(actual._data, indexing.LazilyOuterIndexedArray)
            assert isinstance(actual._data.array,
                              indexing.NumpyIndexingAdapter)
示例#54
0
    def test_remap_label_indexers(self):
        def test_indexer(data, x, expected_pos, expected_idx=None):
            pos, idx = indexing.remap_label_indexers(data, {"x": x})
            assert_array_equal(pos.get("x"), expected_pos)
            assert_array_equal(idx.get("x"), expected_idx)

        data = Dataset({"x": ("x", [1, 2, 3])})
        mindex = pd.MultiIndex.from_product([["a", "b"], [1, 2], [-1, -2]],
                                            names=("one", "two", "three"))
        mdata = DataArray(range(8), [("x", mindex)])

        test_indexer(data, 1, 0)
        test_indexer(data, np.int32(1), 0)
        test_indexer(data, Variable([], 1), 0)
        test_indexer(mdata, ("a", 1, -1), 0)
        test_indexer(
            mdata,
            ("a", 1),
            [True, True, False, False, False, False, False, False],
            [-1, -2],
        )
        test_indexer(
            mdata,
            "a",
            slice(0, 4, None),
            pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
        )
        test_indexer(
            mdata,
            ("a", ),
            [True, True, True, True, False, False, False, False],
            pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
        )
        test_indexer(mdata, [("a", 1, -1), ("b", 2, -2)], [0, 7])
        test_indexer(mdata, slice("a", "b"), slice(0, 8, None))
        test_indexer(mdata, slice(("a", 1), ("b", 1)), slice(0, 6, None))
        test_indexer(mdata, {"one": "a", "two": 1, "three": -1}, 0)
        test_indexer(
            mdata,
            {
                "one": "a",
                "two": 1
            },
            [True, True, False, False, False, False, False, False],
            [-1, -2],
        )
        test_indexer(
            mdata,
            {
                "one": "a",
                "three": -1
            },
            [True, False, True, False, False, False, False, False],
            [1, 2],
        )
        test_indexer(
            mdata,
            {"one": "a"},
            [True, True, True, True, False, False, False, False],
            pd.MultiIndex.from_product([[1, 2], [-1, -2]]),
        )
示例#55
0
    coder = strings.EncodedStringCoder(allows_unicode=True)
    raw = Variable(('x',), raw_data, encoding={'dtype': 'S1'})
    actual = coder.encode(raw)
    expected = Variable(('x',), expected_data, attrs={'_Encoding': 'utf-8'})
    assert_identical(actual, expected)

    raw = Variable(('x',), raw_data)
    assert_identical(coder.encode(raw), raw)

    coder = strings.EncodedStringCoder(allows_unicode=False)
    assert_identical(coder.encode(raw), expected)


@pytest.mark.parametrize('original', [
    Variable(('x',), [b'ab', b'cdef']),
    Variable((), b'ab'),
    Variable(('x',), [b'a', b'b']),
    Variable((), b'a'),
])
def test_CharacterArrayCoder_roundtrip(original):
    coder = strings.CharacterArrayCoder()
    roundtripped = coder.decode(coder.encode(original))
    assert_identical(original, roundtripped)


@pytest.mark.parametrize('data', [
    np.array([b'a', b'bc']),
    np.array([b'a', b'bc'], dtype=strings.create_vlen_dtype(bytes_type)),
])
def test_CharacterArrayCoder_encode(data):
 def test_concat_dim_is_variable(self):
     objs = [Dataset({'x': 0}), Dataset({'x': 1})]
     coord = Variable('y', [3, 4])
     expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]})
     actual = concat(objs, coord)
     assert_identical(actual, expected)
示例#57
0
 def test_transpose(self):
     v = Variable(['time', 'x'], self.d)
     v2 = Variable(['x', 'time'], self.d.T)
     self.assertVariableIdentical(v, v2.transpose())
     self.assertVariableIdentical(v.transpose(), v.T)
     x = np.random.randn(2, 3, 4, 5)
     w = Variable(['a', 'b', 'c', 'd'], x)
     w2 = Variable(['d', 'b', 'c', 'a'], np.einsum('abcd->dbca', x))
     self.assertEqual(w2.shape, (5, 3, 4, 2))
     self.assertVariableIdentical(w2, w.transpose('d', 'b', 'c', 'a'))
     self.assertVariableIdentical(w, w2.transpose('a', 'b', 'c', 'd'))
     w3 = Variable(['b', 'c', 'd', 'a'], np.einsum('abcd->bcda', x))
     self.assertVariableIdentical(w, w3.transpose('a', 'b', 'c', 'd'))
示例#58
0
class TestVariable(DaskTestCase):
    def assertLazyAnd(self, expected, actual, test):
        expected_copy = expected.copy(deep=False)
        actual_copy = actual.copy(deep=False)
        with dask.set_options(get=dask.get):
            test(actual_copy, expected_copy)
        var = getattr(actual, 'variable', actual)
        self.assertIsInstance(var.data, da.Array)

    def assertLazyAndIdentical(self, expected, actual):
        self.assertLazyAnd(expected, actual, self.assertVariableIdentical)

    def assertLazyAndAllClose(self, expected, actual):
        self.assertLazyAnd(expected, actual, self.assertVariableAllClose)

    def setUp(self):
        self.values = np.random.randn(4, 6)
        self.data = da.from_array(self.values, chunks=(2, 2))

        self.eager_var = Variable(('x', 'y'), self.values)
        self.lazy_var = Variable(('x', 'y'), self.data)

    def test_basics(self):
        v = self.lazy_var
        self.assertIs(self.data, v.data)
        self.assertEqual(self.data.chunks, v.chunks)
        self.assertArrayEqual(self.values, v)

    def test_copy(self):
        self.assertLazyAndIdentical(self.eager_var, self.lazy_var.copy())
        self.assertLazyAndIdentical(self.eager_var,
                                    self.lazy_var.copy(deep=True))

    def test_chunk(self):
        for chunks, expected in [(None, ((2, 2), (2, 2, 2))),
                                 (3, ((3, 1), (3, 3))),
                                 ({
                                     'x': 3,
                                     'y': 3
                                 }, ((3, 1), (3, 3))),
                                 ({
                                     'x': 3
                                 }, ((3, 1), (2, 2, 2))),
                                 ({
                                     'x': (3, 1)
                                 }, ((3, 1), (2, 2, 2)))]:
            rechunked = self.lazy_var.chunk(chunks)
            self.assertEqual(rechunked.chunks, expected)
            self.assertLazyAndIdentical(self.eager_var, rechunked)

    def test_indexing(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u[0], v[0])
        self.assertLazyAndIdentical(u[:1], v[:1])
        self.assertLazyAndIdentical(u[[0, 1], [0, 1, 2]], v[[0, 1], [0, 1, 2]])
        with self.assertRaisesRegexp(TypeError, 'stored in a dask array'):
            v[:1] = 0

    def test_squeeze(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u[0].squeeze(), v[0].squeeze())

    def test_equals(self):
        v = self.lazy_var
        self.assertTrue(v.equals(v))
        self.assertIsInstance(v.data, da.Array)
        self.assertTrue(v.identical(v))
        self.assertIsInstance(v.data, da.Array)

    def test_transpose(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u.T, v.T)

    def test_shift(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u.shift(x=2), v.shift(x=2))
        self.assertLazyAndIdentical(u.shift(x=-2), v.shift(x=-2))
        self.assertEqual(v.data.chunks, v.shift(x=1).data.chunks)

    def test_roll(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u.roll(x=2), v.roll(x=2))
        self.assertEqual(v.data.chunks, v.roll(x=1).data.chunks)

    def test_unary_op(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(-u, -v)
        self.assertLazyAndIdentical(abs(u), abs(v))
        self.assertLazyAndIdentical(u.round(), v.round())

    def test_binary_op(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(2 * u, 2 * v)
        self.assertLazyAndIdentical(u + u, v + v)
        self.assertLazyAndIdentical(u[0] + u, v[0] + v)

    def test_reduce(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndAllClose(u.mean(), v.mean())
        self.assertLazyAndAllClose(u.std(), v.std())
        self.assertLazyAndAllClose(u.argmax(dim='x'), v.argmax(dim='x'))
        self.assertLazyAndAllClose((u > 1).any(), (v > 1).any())
        self.assertLazyAndAllClose((u < 1).all('x'), (v < 1).all('x'))
        with self.assertRaisesRegexp(NotImplementedError, 'dask'):
            v.prod()
        with self.assertRaisesRegexp(NotImplementedError, 'dask'):
            v.median()

    def test_missing_values(self):
        values = np.array([0, 1, np.nan, 3])
        data = da.from_array(values, chunks=(2, ))

        eager_var = Variable('x', values)
        lazy_var = Variable('x', data)
        self.assertLazyAndIdentical(eager_var, lazy_var.fillna(lazy_var))
        self.assertLazyAndIdentical(Variable('x', range(4)),
                                    lazy_var.fillna(2))
        self.assertLazyAndIdentical(eager_var.count(), lazy_var.count())

    def test_concat(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndIdentical(u, Variable.concat([v[:2], v[2:]], 'x'))
        self.assertLazyAndIdentical(u[:2], Variable.concat([v[0], v[1]], 'x'))
        self.assertLazyAndIdentical(
            u[:3],
            Variable.concat([v[[0, 2]], v[[1]]], 'x', positions=[[0, 2], [1]]))

    def test_missing_methods(self):
        v = self.lazy_var
        try:
            v.argsort()
        except NotImplementedError as err:
            self.assertIn('dask', str(err))
        try:
            v[0].item()
        except NotImplementedError as err:
            self.assertIn('dask', str(err))

    def test_ufuncs(self):
        u = self.eager_var
        v = self.lazy_var
        self.assertLazyAndAllClose(np.sin(u), xu.sin(v))
示例#59
0
    def test_shift(self):
        v = Variable('x', [1, 2, 3, 4, 5])

        self.assertVariableIdentical(v, v.shift(x=0))
        self.assertIsNot(v, v.shift(x=0))

        expected = Variable('x', [np.nan, 1, 2, 3, 4])
        self.assertVariableIdentical(expected, v.shift(x=1))

        expected = Variable('x', [np.nan, np.nan, 1, 2, 3])
        self.assertVariableIdentical(expected, v.shift(x=2))

        expected = Variable('x', [2, 3, 4, 5, np.nan])
        self.assertVariableIdentical(expected, v.shift(x=-1))

        expected = Variable('x', [np.nan] * 5)
        self.assertVariableIdentical(expected, v.shift(x=5))
        self.assertVariableIdentical(expected, v.shift(x=6))

        with self.assertRaisesRegexp(ValueError, 'dimension'):
            v.shift(z=0)

        v = Variable('x', [1, 2, 3, 4, 5], {'foo': 'bar'})
        self.assertVariableIdentical(v, v.shift(x=0))

        expected = Variable('x', [np.nan, 1, 2, 3, 4], {'foo': 'bar'})
        self.assertVariableIdentical(expected, v.shift(x=1))
示例#60
0
    def setUp(self):
        self.values = np.random.RandomState(0).randn(4, 6)
        self.data = da.from_array(self.values, chunks=(2, 2))

        self.eager_var = Variable(('x', 'y'), self.values)
        self.lazy_var = Variable(('x', 'y'), self.data)