Example #1
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     o = builder.aiOnnx.sign([i1], "test_sign")
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + o,
         popart.reservedGradientPrefix() + i1,
     ]
Example #2
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     o = builder.aiGraphcore.reshape(i1, shape=d2a)
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + o,
     ]
def _get_ir(pingpong_enabled, virtualgraph_enabled, pipeline_enabled):
    dsize = 10
    builder = popart.Builder()
    ip = builder.addInputTensor(popart.TensorInfo("FLOAT", [dsize, dsize]))

    anchorIds = []
    anchorIds.append(popart.reservedGradientPrefix() + ip)

    def add_layer(in_id):
        w = builder.addInitializedInputTensor(
            np.ones([dsize, dsize], np.float32))
        matmul_id = builder.aiOnnx.matmul([in_id, w])
        return matmul_id

    x = ip
    for i in range(3):
        w = builder.addInitializedInputTensor(
            np.ones([dsize, dsize], np.float32))
        x = builder.aiOnnx.matmul([x, w])

        if pingpong_enabled:
            builder.pingPongPhase(x, i)
        if virtualgraph_enabled:
            builder.virtualGraph(x, i)
        if pipeline_enabled:
            builder.pipelineStage(x, i)

        anchorIds.append(popart.reservedGradientPrefix() + x)

    out = builder.aiGraphcore.identityloss([x])
    if virtualgraph_enabled:
        builder.virtualGraph(out, 3)

    device = tu.create_test_device()

    dfAnchors = {}
    for anchorId in anchorIds:
        dfAnchors.update({anchorId: popart.AnchorReturnType("All")})

    opts = popart.SessionOptions()
    # disable outlining to make the ir easier to parse
    opts.enableOutlining = False

    proto = builder.getModelProto()

    session = popart.TrainingSession(fnModel=proto,
                                     dataFlow=popart.DataFlow(1, dfAnchors),
                                     optimizer=popart.ConstSGD(0.1),
                                     loss=out,
                                     patterns=popart.Patterns(
                                         popart.PatternsLevel.All),
                                     userOptions=opts,
                                     deviceInfo=device)

    ir = json.loads(session._serializeIr(popart.IrSerializationFormat.JSON))
    return ir
Example #4
0
    def run(model_file_name, enableOutlining):
        dsize = 10
        ratio = 0.5
        builder = popart.Builder()
        ip = builder.addInputTensor(popart.TensorInfo("FLOAT", [dsize, dsize]))
        d__ip = popart.reservedGradientPrefix() + ip

        def add_layer(in_id):
            w = builder.addInitializedInputTensor(
                np.ones([dsize, dsize], np.float32))
            matmul_id = builder.aiOnnx.matmul([in_id, w])
            return matmul_id

        m1 = add_layer(ip)
        m2 = add_layer(m1)
        m3 = add_layer(m2)

        anchorIds = []
        for i in (ip, m1, m2, m3):
            anchorIds.append(popart.reservedGradientPrefix() + i)

        out = builder.aiGraphcore.identityloss([m3])
        builder.addOutputTensor(out)

        device = tu.create_test_device()

        dfAnchors = {}
        for anchorId in anchorIds:
            dfAnchors.update({anchorId: popart.AnchorReturnType("All")})

        opts = popart.SessionOptions()
        opts.enableOutlining = enableOutlining
        opts.separateCallOpPdfs = False
        opts.subgraphCopyingStrategy = subgraphCopyingStrategy

        proto = builder.getModelProto()

        session = popart.TrainingSession(
            fnModel=proto,
            dataFlow=popart.DataFlow(1, dfAnchors),
            optimizer=popart.ConstSGD(0.1),
            loss=out,
            patterns=popart.Patterns(popart.PatternsLevel.All),
            userOptions=opts,
            deviceInfo=device)

        session.prepareDevice()
        session.weightsFromHost()
        anchors = session.initAnchorArrays()

        ip_data = np.ones((dsize, dsize), dtype=np.float32)
        stepio = popart.PyStepIO({ip: ip_data}, anchors)

        session.run(stepio)

        session.modelToHost(str(tmpdir / model_file_name))
Example #5
0
 def init_builder(builder):
     D = builder.addInputTensor(src.numpy())
     I = builder.addInputTensor(index.numpy().astype(np.uint32))
     out = builder.aiGraphcore.scatterreduce([D, I], axis_size=axsz)
     builder.addOutputTensor(out)
     return [
         out,
         popart.reservedGradientPrefix() + D,
         popart.reservedGradientPrefix() + out
     ]
Example #6
0
 def init_builder(builder):
     i0 = builder.addInputTensor(x)
     i1 = builder.aiOnnxOpset11.constant(axis)
     o = builder.aiOnnxOpset11.cumsum([i0, i1], reverse=1)
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i0,
         popart.reservedGradientPrefix() + o,
     ]
Example #7
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     c = builder.aiOnnx.constant(d2)
     o = builder.aiOnnx.tile([i1, c])
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + o
     ]
Example #8
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     c = builder.aiOnnx.cast([i1], builderDstType)
     # Add an op that produces a gradient so we can test CastGrad properly
     o = builder.aiOnnx.reducesum([c])
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + o
     ]
Example #9
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     i2 = builder.addInputTensor(d2)
     o = builder.aiOnnx.pow([i1, i2])
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + i2,
         popart.reservedGradientPrefix() + o
     ]
Example #10
0
 def init_builder(builder):
     P = builder.addInitializedInputTensor(data)
     T = builder.addInputTensor(target.astype(np.int32))
     logP = builder.aiOnnx.logsoftmax([P], axis=1)
     nll = builder.aiGraphcore.nllloss([logP, T], inputIsLogProbability=1)
     builder.addOutputTensor(nll)
     return [
         nll,
         popart.reservedGradientPrefix() + P,
         popart.reservedGradientPrefix() + nll,
     ]
Example #11
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     o = builder.aiOnnxOpset11.depthtospace([i1],
                                            blocksize=blocks,
                                            mode="DCR")
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + o,
     ]
Example #12
0
 def init_builder(builder):
     i1 = builder.addInputTensor(indices)
     i2 = builder.aiOnnx.constant(depth)  # depth has to be a constant
     i3 = builder.addInputTensor(values)
     o = builder.aiOnnx.onehot([i1, i2, i3], 0, "test_onehot")
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + o,
         popart.reservedGradientPrefix() + i3
     ]
Example #13
0
 def init_builder(builder):
     i1 = builder.addInputTensor(data)
     i2 = builder.addInputTensor(indices)
     i3 = builder.addInputTensor(updates)
     o = builder.aiOnnx.scatter([i1, i2, i3], axis)
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + i3
     ]
Example #14
0
 def init_builder(builder):
     i1 = builder.addInputTensor(input_y)
     i2 = builder.addInputTensor(input_x)
     o = builder.aiGraphcore.atan2([i1, i2])
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + i2,
         popart.reservedGradientPrefix() + o
     ]
Example #15
0
 def init_builder(builder):
     tensor = builder.addInputTensor(data)
     out = builder.aiGraphcore.swish(
         [tensor],
         debugContext='test_swish_grad',
     )
     builder.addOutputTensor(out)
     return [
         out,
         popart.reservedGradientPrefix() + out,
         popart.reservedGradientPrefix() + tensor,
     ]
Example #16
0
 def init_builder(builder):
     d = builder.addInputTensor(data)
     x = builder.addInputTensor(x_data)
     s = builder.aiOnnx.constant(scales)
     o = builder.aiOnnx.resize([d, s])
     o = builder.aiOnnx.mul([o, x])
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + d,
         popart.reservedGradientPrefix() + o,
     ]
Example #17
0
    def init_builder(builder):
        i1 = builder.addInputTensor(d1)
        axes = builder.aiOnnx.constant(axesV)
        starts = builder.aiOnnx.constant(startsV)
        ends = builder.aiOnnx.constant(endsV)

        o = builder.aiOnnx.slice([i1, starts, ends, axes])
        builder.addOutputTensor(o)
        return [
            o,
            popart.reservedGradientPrefix() + i1,
            popart.reservedGradientPrefix() + o
        ]
Example #18
0
    def init_builder(builder):
        i1 = builder.addInputTensor(data)
        i2 = builder.addInputTensor(one_place)

        s = builder.aiOnnx.softmax([i1], axis)
        o = builder.aiOnnx.mul([s, i2])

        builder.addOutputTensor(o)
        return [
            o, s,
            popart.reservedGradientPrefix() + i1,
            popart.reservedGradientPrefix() + o
        ]
Example #19
0
 def init_builder(builder):
     i1 = builder.addInputTensor(A)
     i2 = builder.addInputTensor(B)
     i3 = builder.addInputTensor(C)
     o = builder.aiOnnx.gemm([i1, i2, i3], alpha, beta, transA, transB)
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + i2,
         popart.reservedGradientPrefix() + i3,
         popart.reservedGradientPrefix() + o
     ]
Example #20
0
    def init_builder(builder):
        i = builder.addInputTensor(data)

        a, b = builder.aiOnnx.split([i], 2)
        o = builder.aiOnnx.sum([a, b])

        builder.addOutputTensor(o)

        return [
            o,
            popart.reservedGradientPrefix() + i,
            popart.reservedGradientPrefix() + o
        ]
Example #21
0
 def init_builder(builder):
     i1 = builder.addInputTensor(input_data)
     o = builder.aiOnnx.softplus([i1])
     builder.addOutputTensor(o)
     op_tester.setPatterns(['InPlace'], enableRuntimeAsserts=False)
     # Set the result to
     # ['Softplus:0', 'Gradient___input', 'Gradient___Softplus:0']
     result = [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + o
     ]
     return result
Example #22
0
    def run_test():
        builder = popart.Builder()

        lhs = builder.addInputTensor(popart.TensorInfo("FLOAT", lhs_shape),
                                     "lhs")
        rhs = builder.addInputTensor(popart.TensorInfo("FLOAT", rhs_shape),
                                     "rhs")

        z = builder.addInputTensor(popart.TensorInfo("FLOAT", [2]), "zero")

        t1 = builder.aiOnnx.matmul([lhs, rhs])

        o = builder.aiOnnx.add([z, t1])
        o = builder.aiGraphcore.identityloss([o])

        proto = builder.getModelProto()

        dataFlow = popart.DataFlow(
            1, {
                o:
                popart.AnchorReturnType("All"),
                popart.reservedGradientPrefix() + lhs:
                popart.AnchorReturnType("All"),
                popart.reservedGradientPrefix() + rhs:
                popart.AnchorReturnType("All"),
            })

        opts = popart.SessionOptions()
        opts.reportOptions = {"showExecutionSteps": "true"}

        pat = popart.Patterns(popart.PatternsLevel.Default)

        session = popart.TrainingSession(
            fnModel=proto,
            dataFlow=dataFlow,
            userOptions=opts,
            loss=o,
            optimizer=popart.ConstSGD(0.01),
            patterns=pat,
            deviceInfo=tu.create_test_device(opts={"compileIPUCode": False}))

        session.prepareDevice()

        anchors = session.initAnchorArrays()

        inputs = {lhs: lhs_data, rhs: rhs_data, z: zero_data}
        stepio = popart.PyStepIO(inputs, anchors)

        session.run(stepio)

        return anchors[o]
Example #23
0
 def init_builder(builder):
     i1 = builder.addInputTensor(input_data)
     o = builder.aiOnnx.hardsigmoid([i1], alpha=alpha, beta=beta)
     builder.addOutputTensor(o)
     result = [o]
     if builder_settings is 'InPlace':
         op_tester.setPatterns(['InPlace'], enableRuntimeAsserts=False)
     elif builder_settings is 'backward':
         result = [
             o,
             popart.reservedGradientPrefix() + i1,
             popart.reservedGradientPrefix() + o
         ]
     return result
Example #24
0
    def init_builder(builder):
        i1 = builder.addInputTensor(A)
        i2 = builder.addInitializedInputTensor(B)
        i3 = builder.addInitializedInputTensor(C)
        o = builder.aiOnnx.gemm([i1, i2, i3], alpha, beta, transA, transB)
        builder.addOutputTensor(o)

        return [
            o,
            popart.reservedGradientPrefix() + i2, i2,
            popart.reservedGradientPrefix() + i3, i3,
            "scaledLearningRate0___default___FLOAT",
            "weightDecayScaleFactor0___default___FLOAT"
        ]
Example #25
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     (o, ) = builder.aiOnnx.maxpool([i1],
                                    num_outputs=1,
                                    kernel_shape=[2, 2],
                                    pads=[0, 0, 0, 0],
                                    storage_order=0,
                                    strides=[2, 2])
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + o
     ]
Example #26
0
 def init_builder(builder):
     i1 = builder.addInputTensor(d1)
     c = builder.aiOnnx.constant(d2)
     o = builder.aiOnnx.expand([i1, c])
     if inplace:
         o_identity = builder.aiOnnx.identity([o])
     else:
         o_identity = o
     builder.addOutputTensor(o_identity)
     return [
         o_identity,
         popart.reservedGradientPrefix() + o,
         popart.reservedGradientPrefix() + i1
     ]
Example #27
0
 def init_builder(builder):
     i1 = builder.addInputTensor(input_data)
     o = builder.aiOnnx.elu([i1], alpha=alpha)
     builder.addOutputTensor(o)
     result = [o]
     if builder_settings is 'InPlace':
         op_tester.patterns = ['InPlace']
     elif builder_settings is 'backward':
         result = [
             o,
             popart.reservedGradientPrefix() + i1,
             popart.reservedGradientPrefix() + o
         ]
     return result
Example #28
0
    def init_builder(builder):
        i1 = builder.addInputTensor(input_data)

        if (alpha == None):
            o = builder.aiOnnx.leakyrelu([i1])
        else:
            o = builder.aiOnnx.leakyrelu([i1], alpha=alpha)

        builder.addOutputTensor(o)
        return [
            o,
            popart.reservedGradientPrefix() + i1,
            popart.reservedGradientPrefix() + o
        ]
Example #29
0
        def init_builder(builder):
            tData = builder.addInputTensor(data, data_id)
            tIW = builder.addInitializedInputTensor(input_weights,
                                                    input_weights_id)
            tOW = builder.addInitializedInputTensor(output_weights,
                                                    output_weights_id)
            tBiases = builder.addInitializedInputTensor(biases, biases_id)
            tInitH = builder.addInputTensor(initial_h, init_h_id)
            tInitC = builder.addInputTensor(initial_c, init_c_id)

            def reshape_weights(w):
                ws = builder.aiOnnx.split([w], 4, 1, [hidden_size] * 4)
                ws = [builder.aiOnnx.transpose([i], [0, 2, 1]) for i in ws]
                ws = builder.aiOnnx.concat([ws[i] for i in (2, 0, 3, 1)], 0)
                return ws

            tIW = reshape_weights(tIW)
            tOW = reshape_weights(tOW)

            # NB shape inference is not yet possible with aiOnnx.split

            tWeights = builder.aiOnnx.concat([tIW, tOW], 1)

            tBiases = builder.aiOnnx.split([tBiases], 8, 1, [hidden_size] * 8)
            tBiases0 = builder.aiOnnx.concat(
                [tBiases[i] for i in (2, 0, 3, 1)], 0)
            tBiases1 = builder.aiOnnx.concat(
                [tBiases[i] for i in (6, 4, 7, 5)], 0)
            tBiases = builder.aiOnnx.add([tBiases0, tBiases1])

            tInitState = builder.aiOnnx.concat([tInitH, tInitC], 0)

            input_ids = [tData, tWeights, tBiases, tInitState]
            out, cell_state = builder.aiGraphcore.lstm(input_ids)
            assert builder.getTensorDtypeString(out) == "float32"
            assert builder.getTensorDtypeString(cell_state) == "float32"

            if sum_outputs:
                out = builder.aiOnnx.add([out, cell_state],
                                         "sum_out_and_cell_state")
            loss = builder.aiGraphcore.identityloss([out])

            return [
                loss,
                popart.reservedGradientPrefix() + data_id,
                popart.reservedGradientPrefix() + input_weights_id,
                popart.reservedGradientPrefix() + output_weights_id,
                popart.reservedGradientPrefix() + biases_id,
            ]
Example #30
0
 def init_builder_manual_padding(builder):
     i1 = builder.addInputTensor(d1)
     (o, ) = builder.aiOnnxOpset10.maxpool([i1],
                                           num_outputs=1,
                                           kernel_shape=[3, 3],
                                           ceil_mode=0,
                                           pads=[0, 0, 1, 1],
                                           storage_order=0,
                                           strides=[2, 2])
     builder.addOutputTensor(o)
     return [
         o,
         popart.reservedGradientPrefix() + i1,
         popart.reservedGradientPrefix() + o
     ]