def test_shape_of_variable_tensor_static_shape(self): var = Variable("var", dtype=np.float32, shape=(1, 3, 4)) graph = Graph(inputs=[var]) graph.inputs = [var] graph.outputs = [graph.shape(var)] graph.fold_constants().cleanup() assert not graph.nodes assert isinstance(graph.outputs[0], Constant) assert np.all(graph.outputs[0].values == (1, 3, 4))
def test_shape_of_variable_tensor_static_shape_no_fold(self): graph = Graph() var = Variable("var", dtype=np.float32, shape=(1, 3, 4)) graph.inputs = [var] graph.outputs = [graph.shape(var)] graph.fold_constants(fold_shapes=False).cleanup() assert len(graph.nodes) == 1 assert graph.nodes[0].op == "Shape" assert isinstance(graph.outputs[0], Variable)
def test_io_cannot_be_sync_list_on_assign(self): inp = Variable("input0", shape=(1, 3), dtype=np.float32) out = Variable("input1", shape=(1, 3), dtype=np.float32) node = Node("Add", inputs=[inp], outputs=[out]) assert isinstance(node.inputs, SynchronizedList) assert isinstance(node.outputs, SynchronizedList) graph = Graph(nodes=[node], inputs=[], outputs=[]) graph.inputs = node.inputs graph.outputs = node.outputs assert not isinstance(graph.inputs, SynchronizedList) assert not isinstance(graph.outputs, SynchronizedList)
def simple_foldable(): # Graph: # c = (a + b) # output = input + c # Should fold to: # output = input + c weights = np.ones(shape=(1, 3), dtype=np.float32) graph = Graph() inp = Variable("input", shape=(1, 3), dtype=np.float32) c = graph.add(weights, weights, name="c") out = graph.add(inp, c) graph.inputs = [inp] graph.outputs = [out] yield graph
def one_hop_foldable(): # Graph: # c = (a + b) # e = (c + d) # output = input + e # Should fold to: # output = input + e weights = np.ones(shape=(1, 3), dtype=np.float32) graph = Graph() inp = Variable("input", shape=(1, 3), dtype=np.float32) c = graph.add(weights, weights, name="c") e = graph.add(c, weights, name="e") out = graph.add(inp, e) graph.inputs = [inp] graph.outputs = [out] yield graph
def test_shape_of_variable_tensor_multiple_shapes(self): graph = Graph() var = Variable("var", dtype=np.float32, shape=(1, 3, 4)) var2 = Variable("var2", dtype=np.float32, shape=tuple()) # Scalar graph.inputs = [var, var2] graph.outputs = [ graph.shape(var), graph.identity(var), graph.shape(var2) ] graph.fold_constants().cleanup() assert len(graph.nodes) == 1 assert graph.nodes[0].op == "Identity" assert isinstance(graph.outputs[0], Constant) assert np.all(graph.outputs[0].values == (1, 3, 4)) assert isinstance(graph.outputs[2], Constant) assert np.all(graph.outputs[2].values == tuple())
def test_input_is_output(self): graph = Graph() A = Variable("A", dtype=np.float32, shape=(1, 1)) B = Variable("B", dtype=np.float32, shape=(1, 1)) C = graph.add(A, B) graph.inputs = [A, B] graph.outputs = [C, B, A] # Out of order w/ respect to Add node inputs # Graph should remain unchanged after cleanup, including I/O tensors. graph.cleanup() assert graph.inputs == [A, B] assert graph.outputs == [C, B, A] assert len(graph.nodes) == 1 assert graph.nodes[0].inputs == [A, B] assert graph.nodes[0].outputs == [C]
def foldable_with_invalid_node(): # Graph # c = (a + b) # e = fake(d) # f = (e + c) # out = inp + f # # c should be folded even though e is the output of an # invalid node. weights = np.ones(shape=(1, 3), dtype=np.float32) graph = Graph() inp = Variable("input", shape=(1, 3), dtype=np.float32) c = graph.add(weights, weights, name="c") e = graph.fake(weights, name="e") f = graph.add(e, c, name="f") out = graph.add(inp, f, name="output") graph.inputs = [inp] graph.outputs = [out] yield graph