Пример #1
0
def test_get_of_type():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    for tid in 'abcd':
        ts.addActGrad(tid)

    data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    tinfo = TensorInfo(DataType.FLOAT, data.shape)
    for tid in 'efgh':
        ts.addVarInit(tid, tinfo, data)

    for tid in 'ijkl':
        ts.addStream(tid, tinfo)

    actGrads = ts.getOfType(TensorType.ActGrad)
    assert len(actGrads) == 4
    assert set([i.id for i in actGrads]) == set([i for i in 'abcd'])

    variables = ts.getOfType(TensorType.Variable)
    assert len(variables) == 4
    assert set([i.id for i in variables]) == set([i for i in 'efgh'])

    streams = ts.getOfType(TensorType.Stream)
    assert len(streams) == 4
    assert set([i.id for i in streams]) == set([i for i in 'ijkl'])

    actGradsAndVars = ts.getOfType([TensorType.ActGrad, TensorType.Variable])
    assert len(actGradsAndVars) == 8
    assert set([i.id for i in actGradsAndVars]) == set([i for i in 'abcdefgh'])
Пример #2
0
def test_add_stream():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    tinfo = TensorInfo(DataType.FLOAT, [10])
    ts.addStream("data", tinfo)
Пример #3
0
def test_add_const_init():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    tinfo = TensorInfo(DataType.FLOAT, data.shape)
    ts.addConstInit("data", tinfo, data)
Пример #4
0
def test_getAllTensorIds():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add some tensors.
    ids = [i for i in "abcdefghi"]
    for tid in ids:
        ts.addActGrad(tid)

    # Check ids returned from getAllTensorIds.
    assert set(ts.getAllTensorIds()) == set(ids)
Пример #5
0
def test_adding_actGrads():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add some tensors.
    ids = [i for i in "abcdefghi"]
    for tid in ids:
        ts.addActGrad(tid)

    # Check the number of tensors is correct.
    assert ts.n() == len(ids)
Пример #6
0
def test_contains_with_scope():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    ts.addActGrad('a/b/c/foo')
    ts.addActGrad('a/b/bar')

    scope = Scope() / 'a' / 'b' / 'c'

    assert ts.contains('foo', scope)
    assert ts.contains('bar', scope)
    assert ts.contains('fizz', scope) == False
Пример #7
0
def test_remove_all_isolated():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add some tensors.
    ids = [i for i in "abcdefghi"]
    for tid in ids:
        ts.addActGrad(tid)

    # All these tensors should be isolated
    ts.removeIsolated(False)
    assert ts.n() == 0
Пример #8
0
def _setup_call_and_repeat(
    pb_ir: _ir.Ir, pb_top_graph: _ir.Graph, pb_bottom_graph: _ir.Graph
) -> Tuple[_ir.Graph, _ir.op.CallOp, _ir.op.LoopOp]:
    """Setup the call and repeat ops, as well as the middle graph that the loop op will loop.

    Args:
        pb_ir (_ir.Ir): The _ir level Ir
        pb_top_graph (_ir.Graph): The _ir top level graph that will contain the loop op.
        pb_bottom_graph (_ir.Graph): The _ir user defined subgraph that will be called.

    Returns:
        Tuple[_ir.Graph, _ir.op.CallOp, _ir.op.LoopOp]: The created _ir-level middle graph, call op
            and loop op.
    """
    # This is the graph we will repeat.
    pb_middle_graph = pb_ir.createGraph(
        _ir.GraphId(
            pb_ir.createUniqueSubgraphId(
                f"{pb_bottom_graph.id.str()}__loop_wrapper")))

    opid = _ir.OperatorIdentifier("ai.graphcore", "Call", 1, _ir.NumInputs(),
                                  0)
    op_name = pb_middle_graph.id.str() + '__call__' + pb_bottom_graph.id.str()

    ctx = get_current_context()
    # Call the bottom_graph
    pb_callop = pb_middle_graph.createOp_CallOp(opid, pb_bottom_graph,
                                                ctx._get_op_settings(op_name))

    opid = _ir.OperatorIdentifier("ai.onnx", "Loop", 11, _ir.NumInputs(), 0)
    op_name = pb_top_graph.id.str() + '__loop__' + pb_middle_graph.id.str()

    # Loop the middle_graph
    pb_loop_op = pb_top_graph.createOp_LoopOp(opid,
                                              ctx._get_op_settings(op_name),
                                              pb_middle_graph)

    # Add mandatory loop iterator tensor to subgraph (is not an output)
    repeatIterId = _ir.addScope(pb_middle_graph, "Iterator___")
    pb_middle_graph.addInput(repeatIterId,
                             _ir.TensorInfo(_ir.DataType.INT32, ()))

    # Add mandatory loop condition tensor to subgraph (is also an output)
    repeatCondId = _ir.addScope(pb_middle_graph, "LoopCond___")
    pb_middle_graph.addInput(repeatCondId,
                             _ir.TensorInfo(_ir.DataType.BOOL, ()))
    pb_middle_graph.markAsOutput(repeatCondId)

    return pb_middle_graph, pb_callop, pb_loop_op
Пример #9
0
def _setup_outputs(pb_top_graph: _ir.Graph, pb_bottom_graph: _ir.Graph,
                   pb_middle_graph: _ir.Graph, pb_callop: _ir.op.CallOp,
                   pb_loop_op: _ir.op.LoopOp) -> List[str]:
    """3. Connect outputs. We introspect the subgraph to get its outputs then,
         for each one, create an output tensor of the call op in the middle
         graph.
    Args:
        pb_top_graph (_ir.Graph): Top _ir graph
        pb_bottom_graph (_ir.Graph): Bottom _ir Graph
        pb_middle_graph (_ir.Graph): Middle _ir Graph
        pb_callop (_ir.op.CallOp): Previously created Call op
        pb_loop_op (_ir.op.LoopOp): Previously created Loop op

    Returns:
        List[str]: The output tensor ids.
    """

    outnames: List[str] = []

    for pb_repeat_subgraph_out_id in pb_bottom_graph.getOutputIds():
        top_tensor_id = _ir.addScope(
            pb_top_graph,
            _ir.removeScope(pb_bottom_graph, pb_repeat_subgraph_out_id))
        # Already has scope added
        middle_tensor_id = _ir.removeScope(pb_bottom_graph,
                                           pb_repeat_subgraph_out_id)
        bottom_tensor_id = _ir.addScope(
            pb_bottom_graph,
            _ir.removeScope(pb_bottom_graph, pb_repeat_subgraph_out_id))

        sgOutIdx = pb_bottom_graph.getOutputIndex(bottom_tensor_id)
        callOutIdx = pb_callop.subgraphOutToOpOutIndex(sgOutIdx)

        # Avoid tensor name collisions
        middle_tensor_id = pb_middle_graph.getIr().createIntermediateTensorId(
            middle_tensor_id)
        pb_callop.createAndConnectOutTensor(callOutIdx, middle_tensor_id)

        pb_middle_graph.markAsOutput(middle_tensor_id)
        sgOutIdx = pb_middle_graph.getOutputIndex(middle_tensor_id)
        repeatOutIdx = pb_loop_op.subgraphOutToOpOutIndex(sgOutIdx)
        # Avoid tensor name collisions
        top_tensor_id = pb_middle_graph.getIr().createIntermediateTensorId(
            top_tensor_id)
        # We overwrite here as we added the middle_tensor_id as an output above, but we want to make
        # sure the loop op is setup correctly.
        pb_loop_op.addLoopOutput(repeatOutIdx, top_tensor_id, middle_tensor_id,
                                 True)

        outnames.append(top_tensor_id)
    return outnames
Пример #10
0
def test_get():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add some tensors.
    ids = [i for i in "abcdefghi"]
    for tid in ids:
        ts.addActGrad(tid)

    # Get the tensors one by one and confirm we have been returned the correct tensor.
    for tid in ids:
        t = ts.get(tid)
        assert t.id == tid
Пример #11
0
def test_make_const_init():
    data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)

    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add a tensor and check the value returned by `tensorType()`.
    ts.addActGrad('foo')
    t = ts.get('foo')
    assert t.tensorType() == TensorType.ActGrad

    # Make the tensor const init and check the value returned by `tensorType()` has changed.
    t.info = TensorInfo(DataType.FLOAT, data.shape)
    ts.makeConstInit('foo', data)
    assert t.tensorType() == TensorType.Const
Пример #12
0
def test_contains():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add some tensors.
    ids = [i for i in "abcdefghi"]
    for tid in ids:
        ts.addActGrad(tid)

    # Check all expected tensors are in ts.
    for tid in ids:
        assert ts.contains(tid)

    # Check `ts.contains` is not just returning true.
    for tid in 'xyz':
        assert not ts.contains(tid)
Пример #13
0
def test_find():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add three tensors called foo with different scopes.
    ts.addActGrad('foo')
    ts.addActGrad('a/foo')
    ts.addActGrad('a/b/c/foo')

    # Make sure we can find all three tensors.
    foo = ts.find('foo', Scope())
    assert foo == 'foo'
    foo = ts.find('foo', Scope() / 'a')
    assert foo == 'a/foo'
    foo = ts.find('foo', Scope() / 'a' / 'b' / 'c')
    assert foo == 'a/b/c/foo'
Пример #14
0
def test_remove():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    # Add some tensors.
    ids = [i for i in "abcdefghi"]
    for tid in ids:
        ts.addActGrad(tid)

    # Test removing tensors
    while ids:
        x = ids[0]
        del ids[0]
        ts.remove(x)
        assert not ts.contains(x)
        assert ts.n() == len(ids)
Пример #15
0
def test_get_ids():
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)

    for tid in 'abcd':
        ts.addActGrad(tid)

    data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.float32)
    tinfo = TensorInfo(DataType.FLOAT, data.shape)
    for tid in 'efgh':
        ts.addVarInit(tid, tinfo, data)

    actGrads = ts.getIds(TensorType.ActGrad)
    assert len(actGrads) == 4
    assert set(actGrads) == set([i for i in 'abcd'])

    variables = ts.getIds(TensorType.Variable)
    assert len(variables) == 4
    assert set(variables) == set([i for i in 'efgh'])
Пример #16
0
def _setup_inputs(subgraph_fn_param_inputs: Iterable[Tensor],
                  subgraph_in_to_parent_in: Mapping[Tensor, Tensor],
                  pb_top_graph: _ir.Graph, pb_bottom_graph: _ir.Graph,
                  pb_middle_graph: _ir.Graph, pb_callop: _ir.op.CallOp,
                  pb_loop_op: _ir.op.LoopOp) -> None:
    """Do the following:

    1. Connect explicitly passed inputs. These would have been created first
       by ir.get_graph, so we do them first. ir.get_graph will have created
       the input tensors t_0,...,t_N at input indices 0,..,N, respectively. We
       require that the user has passed the parent tensors that correspond to
       these inputs in the exact same order, so we can trivially reconstruct
       the input indices here.

    2. Connect internally created inputs.

    Args:
        subgraph_fn_param_inputs (Iterable[Tensor]): User defined explicit inputs.
        subgraph_in_to_parent_in (Mapping[Tensor, Tensor]):
            Mapping of `subgraph tensor -> parent tensor` that corresponds to
            the inputs that the callable defined internally, e.g. by using
            popart.ir.subgraph_input. Defaults to an empty dictionary.
            Works effectively the same as the call op's `subgraph_in_to_parent_in` argument.
        pb_top_graph (_ir.Graph): Top _ir graph
        pb_bottom_graph (_ir.Graph): Bottom _ir Graph
        pb_middle_graph (_ir.Graph): Middle _ir Graph
        pb_callop (_ir.op.CallOp): Previously created Call op
        pb_loop_op (_ir.op.LoopOp): Previously created Loop op
    """

    # Note: Only bottom_graph (which is called) has gone through the ir.get_graph process.
    # middle_graph (intentionally) has not, so we need to add loop inputs/outputs.
    # User defined indices start at 2 for loop ops.
    sgInIdx = 0
    for t in subgraph_fn_param_inputs:
        callInIdx = pb_callop.subgraphInToOpInIndex(sgInIdx)
        # Note the + 2 here
        pb_loop_op.addLoopInput(sgInIdx + 2,
                                _ir.addScope(pb_top_graph, t.name),
                                _ir.addScope(pb_middle_graph, t.name), False)
        pb_callop.connectInTensor(callInIdx,
                                  _ir.addScope(pb_middle_graph, t.name))
        sgInIdx += 1

    # 2. Connect internally created inputs.
    for sg_tensor, parent_tensor in subgraph_in_to_parent_in.items():
        sgInIdx = pb_bottom_graph.getInputIndex(sg_tensor.id)
        callInIdx = pb_callop.subgraphInToOpInIndex(sgInIdx)

        top_tensor_id = _ir.addScope(pb_top_graph, parent_tensor.id)
        pb_loop_op.addLoopInput(
            sgInIdx + 2, top_tensor_id,
            _ir.addScope(pb_middle_graph,
                         _ir.removeScope(pb_bottom_graph, sg_tensor.id)),
            False)
        set_input_modified(pb_loop_op, pb_loop_op.inTensor(sgInIdx + 2))
        pb_callop.connectInTensor(
            callInIdx,
            _ir.addScope(pb_middle_graph,
                         _ir.removeScope(pb_bottom_graph, sg_tensor.id)))
        set_input_modified(pb_callop, pb_callop.inTensor(callInIdx))
Пример #17
0
def test_tensors_construction():
    """ Test that we can construct a popart._internal.ir.Graph object. """
    ir = Ir()
    gId = GraphId("g")
    graph = Graph(ir, gId)
    ts = Tensors(graph)