def test_subgraph(add_weight_subgraph: pir.Graph): assert len(add_weight_subgraph.get_tensors()) == 3 assert len(add_weight_subgraph.get_variables()) == 0 assert contains_op_of_type("Add", _ir.op.AddOp, add_weight_subgraph) # Rudimentarily test subgraph has only expected ops with negative tests assert not contains_op_of_type("Loop", _ir.op.LoopOp, add_weight_subgraph) assert not contains_op_of_type("Mul", _ir.op.MulOp, add_weight_subgraph)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: h2d = pir.h2d_stream((), pir.dtypes.float32) x = ops.host_load(h2d, "x") assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 0 assert contains_op_of_type("HostLoad", _ir.op.exchange.HostLoadOp, g) assert contains_op_of_type("Init", _ir.op.InitOp, g)
def test_fn(self, inplace): ir = pir.Ir() g = ir.main_graph() with g: t = pir.variable(data) if inplace: y = ops.slice_(t, start=1, stop=3, step=1, axis=0) else: y = ops.slice(t, start=1, stop=3, step=1, axis=0) if not inplace: assert contains_op_of_type("Slice", _ir.op.SliceOp, g) else: assert contains_op_of_type("SliceInplace", _ir.op.SliceInplaceOp, g) assert len(g.get_tensors()) == 2 assert len(g.get_variables()) == 1
def test_dunder(self, inplace): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((1, 2, 3))) if inplace: c = a.detach_() else: c = a.detach() assert len(g.get_tensors()) == 2 assert len(g.get_variables()) == 1 if inplace: assert contains_op_of_type("DetachInplace", _ir.op.DetachInplaceOp, g) else: assert contains_op_of_type("Detach", _ir.op.DetachOp, g)
def test_flatten(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((1, 2, 3))) c = ops.flatten(a) assert c.shape == (6, ) assert contains_op_of_type("Reshape", _ir.op.ReshapeOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((1, 2, 3))) c = ops.gelu(a) assert len(g.get_tensors()) == 2 assert contains_op_of_type("Gelu", _ir.op.GeluOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((1, 2, 3))) c = ops.softmax(a, axis=1) assert len(g.get_tensors()) == 2 assert contains_op_of_type("Softmax", _ir.op.SoftmaxOp, g)
def test_negative(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((1, 2, 3))) c = a.reshape((-1, 1)) assert c.shape == (6, 1) assert contains_op_of_type("Reshape", _ir.op.ReshapeOp, g)
def test_scaled_add_1_t(self, inplace): ir = pir.Ir() g = ir.main_graph() with g: X = pir.variable(np.ones((2, 2)), dtype=pir.float32) Y = pir.variable(np.ones((2, 2)), dtype=pir.float32) if inplace: ops.scaled_add_(X, Y, b=pir.variable(0.1)) else: ops.scaled_add(X, Y, b=pir.variable(0.1)) assert len(g.get_tensors()) == 4 assert len(g.get_variables()) == 3 if inplace: assert contains_op_of_type("ScaledAddLhsInplace", _ir.op.ScaledAddLhsInplaceOp, g) else: assert contains_op_of_type("ScaledAdd", _ir.op.ScaledAddOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: seed = pir.variable(np.array([32, 32]), dtype=dtypes.uint32) x = ops.random_normal(seed, (2, 2)) assert len(g.get_tensors()) == 2 assert len(g.get_variables()) == 1 assert contains_op_of_type("RandomNormal", _ir.op.RandomNormalOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(True, pir.bool) c = ops.logical_not(a) assert len(g.get_tensors()) == 2 assert len(g.get_variables()) == 1 assert contains_op_of_type("Not", _ir.op.NotOp, g)
def test_needs_casting(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1, pir.int32) c = ops.logical_not(a) assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 1 assert contains_op_of_type("Not", _ir.op.NotOp, g)
def test_squeeze_all(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((4, 1, 3, 1))) c = ops.squeeze(a) assert c.shape == (4, 3) assert len(g.get_tensors()) == 2 assert contains_op_of_type("Reshape", _ir.op.ReshapeOp, g)
def test_dunder(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) c = -a assert len(g.get_tensors()) == 2 assert len(g.get_variables()) == 1 assert contains_op_of_type("Neg", _ir.op.NegateOp, g)
def test_fn_with_no_producer(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) a_1 = ops.ipu_copy(a, 1, 0) assert len(g.get_variables()) == 1 assert len(g.get_tensors()) == 2 assert contains_op_of_type("IpuCopy", _ir.op.IpuCopyOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: x = ops.init((), pir.dtypes.float32) assert len(g.get_tensors()) == 1 assert len(g.get_variables()) == 0 assert contains_op_of_type("Init", _ir.op.InitOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) c = ops.print_tensor(a) assert len(g.get_tensors()) == 2 assert len(g.get_variables()) == 1 assert contains_op_of_type("PrintTensor", _ir.op.PrintTensorOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) c = ops.increment_mod(a, 1, 3) assert len(g.get_tensors()) == 2 assert len(g.get_variables()) == 1 assert contains_op_of_type("IncrementMod", _ir.op.IncrementModOp, g)
def test_squeeze_specified_negative(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((4, 1, 3, 1, 4, 5, 1))) c = ops.squeeze(a, axes=[-4, 1]) assert c.shape == (4, 3, 4, 5, 1) assert len(g.get_tensors()) == 2 assert contains_op_of_type("Reshape", _ir.op.ReshapeOp, g)
def test_tensor_method(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(np.ones((1, 2, 3))) c = a.reshape((2, 3, 1)) assert c.shape == (2, 3, 1) assert len(g.get_tensors()) == 2 assert contains_op_of_type("Reshape", _ir.op.ReshapeOp, g)
def test_add(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) b = pir.constant(2) c = ops.var_updates.copy_var_update_(a, b) assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 1 assert contains_op_of_type("CopyVarUpdate", _ir.op.CopyVarUpdateOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) b = pir.variable(2) c = ops.sub(a, b) assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 2 assert contains_op_of_type("Sub", _ir.op.SubtractOp, g)
def test_ensure_tensor(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) c = a - 2 assert len(ir.main_graph().get_tensors()) == 3 assert len(ir.main_graph().get_variables()) == 1 assert len(ir.main_graph().get_constants()) == 1 assert contains_op_of_type("Sub", _ir.op.SubtractOp, g)
def test_dunder(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) b = pir.variable(2) c = a - b assert len(ir.main_graph().get_tensors()) == 3 assert len(ir.main_graph().get_variables()) == 2 assert contains_op_of_type("Sub", _ir.op.SubtractOp, g)
def test_ensure_tensor(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) c = a * 2 assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 1 assert len(g.get_constants()) == 1 assert contains_op_of_type("Mul", _ir.op.MulOp, g)
def test_dampened_add_square(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) b = pir.constant(2) c = ops.var_updates.accumulate_square_(a, b, 0.999) assert contains_op_of_type("Accumulate", _ir.op.AccumulateOp, g) op = g._pb_graph.getOps()[0] op.getAccumulationType() == _ir.AccumulationType.DampenedAddSquare
def test_dunder(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1) b = pir.variable(2) c = a * b assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 2 assert contains_op_of_type("Mul", _ir.op.MulOp, g)
def test_needs_casting(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(1, pir.int32) b = pir.variable(0, pir.int32) c = ops.logical_and(a, b) assert len(g.get_tensors()) == 5 assert len(g.get_variables()) == 2 assert contains_op_of_type("And", _ir.op.AndOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: x = pir.variable(0) seed = pir.variable(np.array([32, 32]), dtype=dtypes.uint32) c = ops.dropout(x, seed, 0.3) assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 2 assert contains_op_of_type("Dropout", _ir.op.DropoutOp, g)
def test_fn(self): ir = pir.Ir() g = ir.main_graph() with g: a = pir.variable(True, pir.bool) b = pir.variable(False, pir.bool) c = ops.logical_and(a, b) assert len(g.get_tensors()) == 3 assert len(g.get_variables()) == 2 assert contains_op_of_type("And", _ir.op.AndOp, g)