예제 #1
0
 def on_receive(self, message):
     op = FakeOperand(_num=message)
     chunk = op.new_chunk(None, ())
     graph = DirectedGraph()
     graph.add_node(chunk.data)
     res = self._executor.execute_graph(graph, [chunk.key])
     assert res[0] == message + 1
예제 #2
0
    def testSVD(self):
        a = mt.random.rand(9, 6, chunk_size=(3, 6))
        U, s, V = mt.linalg.svd(a)

        self.assertEqual(U.shape, (9, 6))
        self.assertEqual(s.shape, (6,))
        self.assertEqual(V.shape, (6, 6))

        U.tiles()
        self.assertEqual(len(U.chunks), 3)
        self.assertEqual(len(s.chunks), 1)
        self.assertEqual(len(V.chunks), 1)

        self.assertEqual(s.ndim, 1)
        self.assertEqual(len(s.chunks[0].index), 1)

        a = mt.random.rand(9, 6, chunk_size=(9, 6))
        U, s, V = mt.linalg.svd(a)

        self.assertEqual(U.shape, (9, 6))
        self.assertEqual(s.shape, (6,))
        self.assertEqual(V.shape, (6, 6))

        U.tiles()
        self.assertEqual(len(U.chunks), 1)
        self.assertEqual(len(s.chunks), 1)
        self.assertEqual(len(V.chunks), 1)

        self.assertEqual(s.ndim, 1)
        self.assertEqual(len(s.chunks[0].index), 1)

        rs = mt.random.RandomState(1)
        a = rs.rand(9, 6, chunk_size=(3, 6))
        U, s, V = mt.linalg.svd(a)

        # test tensor graph
        graph = DirectedGraph()
        U.build_graph(tiled=False, graph=graph)
        s.build_graph(tiled=False, graph=graph)
        new_graph = DirectedGraph.from_json(graph.to_json())
        self.assertEqual((len(new_graph)), 4)
        new_outputs = [n for n in new_graph if new_graph.count_predecessors(n) == 1]
        self.assertEqual(len(new_outputs), 3)
        self.assertEqual(len(set([o.op for o in new_outputs])), 1)

        # test tensor graph, do some caculation
        graph = DirectedGraph()
        (U + 1).build_graph(tiled=False, graph=graph)
        (s + 1).build_graph(tiled=False, graph=graph)
        new_graph = DirectedGraph.from_json(graph.to_json())
        self.assertEqual((len(new_graph)), 6)
        new_outputs = [n for n in new_graph if new_graph.count_predecessors(n) == 1]
        self.assertEqual(len(new_outputs), 5)
        self.assertEqual(len(set([o.op for o in new_outputs])), 3)

        a = rs.rand(20, 10, chunk_size=10)
        _, s, _ = mt.linalg.svd(a)
        del _
        graph = s.build_graph(tiled=False)
        self.assertEqual(len(graph), 4)
    def testTensorReadCOO(self):
        import shutil
        import tempfile
        import pyarrow as pa
        import pyarrow.parquet as pq
        import pandas as pd
        import numpy as np
        import scipy.sparse as sps

        dir_name = tempfile.mkdtemp(prefix='mars-test-tensor-read')
        try:
            data_src = []
            for x in range(2):
                for y in range(2):
                    mat = sps.random(50, 50, 0.1)
                    data_src.append(mat)
                    df = pd.DataFrame(dict(x=mat.row, y=mat.col, val=mat.data),
                                      columns=['x', 'y', 'val'])
                    pq.write_table(
                        pa.Table.from_pandas(df),
                        dir_name + '/' + 'table@%d,%d.parquet' % (x, y))

            t = read_coo(dir_name + '/*.parquet', ['x', 'y'],
                         'val',
                         shape=(100, 100),
                         chunk_size=50,
                         sparse=True)
            res = self.executor.execute_tensor(t)
            [
                np.testing.assert_equal(r.toarray(), e.toarray())
                for r, e in zip(res, data_src)
            ]

            t = read_coo(dir_name + '/*.parquet', ['x', 'y'],
                         'val',
                         shape=(100, 100),
                         chunk_size=50,
                         sparse=True)
            DirectedGraph.from_json(t.build_graph(tiled=False).to_json())
            DirectedGraph.from_json(t.build_graph(tiled=False).to_json())
        finally:
            shutil.rmtree(dir_name)
예제 #4
0
    def testCompose(self):
        """
        test compose in build graph and optimize
        """
        r"""
        graph(@: node, #: composed_node):

        @ --> @ --> @   ========>    #
        """
        chunks = [
            TensorTreeAdd(_key=str(n)).new_chunk(None, None) for n in range(3)
        ]
        graph = DirectedGraph()
        lmap(graph.add_node, chunks[:3])
        graph.add_edge(chunks[0], chunks[1])
        graph.add_edge(chunks[1], chunks[2])

        composed_nodes = graph.compose()
        self.assertTrue(composed_nodes[0].composed == chunks[:3])

        # make the middle one as result chunk, thus the graph cannot be composed
        composed_nodes = graph.compose(keys=[chunks[1].key])
        self.assertEqual(len(composed_nodes), 0)
        r"""
        graph(@: node, #: composed_node):

        @             @              @       @
          \         /                  \   /
            @ --> @       ========>      #
          /         \                  /   \
        @             @              @       @
        """
        chunks = [
            TensorTreeAdd(_key=str(n)).new_chunk(None, None) for n in range(6)
        ]
        graph = DirectedGraph()
        lmap(graph.add_node, chunks[:6])

        chunks[2].op._inputs = [chunks[0], chunks[1]]
        chunks[3].op._inputs = [chunks[2]]
        chunks[4].op._inputs = [chunks[3]]
        chunks[5].op._inputs = [chunks[3]]

        graph.add_edge(chunks[0], chunks[2])
        graph.add_edge(chunks[1], chunks[2])
        graph.add_edge(chunks[2], chunks[3])
        graph.add_edge(chunks[3], chunks[4])
        graph.add_edge(chunks[3], chunks[5])

        composed_nodes = graph.compose()
        self.assertTrue(composed_nodes[0].composed == chunks[2:4])

        # to make sure the predecessors and successors of compose are right
        # 0 and 1's successors must be composed
        self.assertIn(composed_nodes[0], graph.successors(chunks[0]))
        self.assertIn(composed_nodes[0], graph.successors(chunks[1]))
        # check composed's inputs
        self.assertIn(chunks[0].key, [n.key for n in composed_nodes[0].inputs])
        self.assertIn(chunks[1].key, [n.key for n in composed_nodes[0].inputs])
        # check composed's predecessors
        self.assertIn(chunks[0], graph.predecessors(composed_nodes[0]))
        self.assertIn(chunks[1], graph.predecessors(composed_nodes[0]))
        # check 4 and 5's inputs
        self.assertIn(
            composed_nodes[0].key,
            [n.key for n in graph.successors(composed_nodes[0])[0].inputs])
        self.assertIn(
            composed_nodes[0].key,
            [n.key for n in graph.successors(composed_nodes[0])[0].inputs])
        # check 4 and 5's predecessors
        self.assertIn(composed_nodes[0], graph.predecessors(chunks[4]))
        self.assertIn(composed_nodes[0], graph.predecessors(chunks[5]))

        # test optimizer compose
        r"""
        graph(@: node, S: Slice Chunk, #: composed_node):

        @                   @              @             @
          \               /                  \         /
            @ --> @ --> S      ========>       # --> S
          /               \                  /         \
        @                   @              @             @

        compose stopped at S, because numexpr don't support Slice op
        """
        chunks = [
            TensorTreeAdd(_key=str(n)).new_chunk(None, None) for n in range(6)
        ]
        chunk_slice = TensorSlice().new_chunk([None], None)
        graph = DirectedGraph()
        lmap(graph.add_node, chunks[:6])
        graph.add_node(chunk_slice)
        graph.add_edge(chunks[0], chunks[2])
        graph.add_edge(chunks[1], chunks[2])
        graph.add_edge(chunks[2], chunks[3])
        graph.add_edge(chunks[3], chunk_slice)
        graph.add_edge(chunk_slice, chunks[4])
        graph.add_edge(chunk_slice, chunks[5])
        optimizer = NeOptimizer(graph)
        composed_nodes = optimizer.compose()
        self.assertTrue(composed_nodes[0].composed == chunks[2:4])
        r"""
            graph(@: node, S: Slice Chunk, #: composed_node):

            @ --> @ --> S --> @  ========>  # --> S --> @

        compose stopped at S, because numexpr don't support Slice op
        """
        chunks = [
            TensorTreeAdd(_key=str(n)).new_chunk(None, None) for n in range(4)
        ]
        graph = DirectedGraph()
        lmap(graph.add_node, chunks[:3])
        graph.add_node(chunk_slice)
        graph.add_edge(chunks[0], chunks[1])
        graph.add_edge(chunks[1], chunk_slice)
        graph.add_edge(chunk_slice, chunks[2])
        optimizer = NeOptimizer(graph)
        composed_nodes = optimizer.compose()
        self.assertTrue(composed_nodes[0].composed == chunks[:2])
        self.assertTrue(len(composed_nodes) == 1)
        r"""
            graph(@: node, S: Slice Chunk, #: composed_node):

            @ --> @ --> S --> @ --> @   ========>  # --> S --> #

        compose stopped at S, because numexpr don't support Slice op
        """
        chunks = [
            TensorTreeAdd(_key=str(n)).new_chunk(None, None) for n in range(4)
        ]
        graph = DirectedGraph()
        lmap(graph.add_node, chunks[:4])
        graph.add_node(chunk_slice)
        graph.add_edge(chunks[0], chunks[1])
        graph.add_edge(chunks[1], chunk_slice)
        graph.add_edge(chunk_slice, chunks[2])
        graph.add_edge(chunks[2], chunks[3])
        optimizer = NeOptimizer(graph)
        composed_nodes = optimizer.compose()
        self.assertTrue(composed_nodes[0].composed == chunks[:2])
        self.assertTrue(composed_nodes[1].composed == chunks[2:4])