Esempio n. 1
0
def onnx_loss(a,model,target):
    '''
    input:
    a graph node dictionary
    model: graph model
    target: label

    load other nodes of onnx
    '''
    for i in model.graph.node:
        if (i.op_type == 'Constant'):
            pass
            # do nothing
        if (i.op_type == 'LeakyRelu'):
            a[str(i.output[0])] = autograd.relu(a[str(i.input[0])])
        elif (i.op_type == 'Relu'):
            a[str(i.output[0])] = autograd.relu(a[str(i.input[0])])
        elif (i.op_type == 'Softmax'):
            a[str(i.output[0])] = autograd.softmax(a[str(i.input[0])])
        elif (i.op_type == 'Add'):
            if(str(i.input[1])[-1] == 'b'):
                a[str(i.output[0])] = autograd.add_bias(a[str(i.input[0])], a[str(i.input[1])])
            else:
                a[str(i.output[0])] = autograd.add(a[str(i.input[0])],a[str(i.input[1])])
        elif (i.op_type == 'MatMul'):
            a[str(i.output[0])] = autograd.matmul(a[str(i.input[0])], a[str(i.input[1])])

    loss = autograd.cross_entropy(a['Y'], target)
    return loss
Esempio n. 2
0
 def forward(x, t):
     y = conv1(x)
     y = autograd.relu(y)
     y = conv2(y)
     y = autograd.relu(y)
     y = autograd.max_pool_2d(y)
     y = autograd.flatten(y)
     y = linear(y)
     y = autograd.soft_max(y)
     loss = autograd.cross_entropy(y, t)
     return loss, y
Esempio n. 3
0
 def forward(x, t):
     y = conv1(x)
     y = autograd.relu(y)
     y = conv2(y)
     y = autograd.relu(y)
     y = autograd.max_pool_2d(y)
     y = autograd.flatten(y)
     y = linear(y)
     y = autograd.soft_max(y)
     loss = autograd.cross_entropy(y, t)
     return loss, y
Esempio n. 4
0
    print("train_label_shape:", label.shape)

    inputs = Tensor(data=data)
    target = Tensor(data=label)

    w0 = Tensor(shape=(2, 3), requires_grad=True, stores_grad=True)
    w0.gaussian(0.0, 0.1)
    b0 = Tensor(shape=(1, 3), requires_grad=True, stores_grad=True)
    b0.set_value(0.0)

    w1 = Tensor(shape=(3, 2), requires_grad=True, stores_grad=True)
    w1.gaussian(0.0, 0.1)
    b1 = Tensor(shape=(1, 2), requires_grad=True, stores_grad=True)
    b1.set_value(0.0)

    sgd = optimizer.SGD(0.05)
    # training process
    for i in range(1001):
        x = autograd.matmul(inputs, w0)
        x = autograd.add_bias(x, b0)
        x = autograd.relu(x)
        x = autograd.matmul(x, w1)
        x = autograd.add_bias(x, b1)
        x = autograd.softmax(x)
        loss = autograd.cross_entropy(x, target)
        for p, gp in autograd.backward(loss):
            sgd.apply(0, gp, p, "")

        if i % 100 == 0:
            print("training loss = ", tensor.to_numpy(loss)[0])
Esempio n. 5
0
    print('train_label_shape:', label.shape)

    inputs = Tensor(data=data)
    target = Tensor(data=label)

    w0 = Tensor(shape=(2, 3), requires_grad=True, stores_grad=True)
    w0.gaussian(0.0, 0.1)
    b0 = Tensor(shape=(1, 3), requires_grad=True, stores_grad=True)
    b0.set_value(0.0)

    w1 = Tensor(shape=(3, 2), requires_grad=True, stores_grad=True)
    w1.gaussian(0.0, 0.1)
    b1 = Tensor(shape=(1, 2), requires_grad=True, stores_grad=True)
    b1.set_value(0.0)

    sgd = optimizer.SGD(0.05)
    # training process
    for i in range(1001):
        x = autograd.matmul(inputs, w0)
        x = autograd.add_bias(x, b0)
        x = autograd.relu(x)
        x = autograd.matmul(x, w1)
        x = autograd.add_bias(x, b1)
        x = autograd.softmax(x)
        loss = autograd.cross_entropy(x, target)
        for p, gp in autograd.backward(loss):
            sgd.apply(0, gp, p, '')

        if (i % 100 == 0):
            print('training loss = ', tensor.to_numpy(loss)[0])
Esempio n. 6
0
    n = y.shape[0]
    categorical = np.zeros((n, num_classes))
    categorical[np.arange(n), y] = 1
    return categorical


label = to_categorical(label, 2).astype(np.float32)
print('train_data_shape:', data.shape)
print('train_label_shape:', label.shape)

inputs = Tensor(data=data)
target = Tensor(data=label)

model = onnx.load('mlp.onnx')

print('finish init')
sgd = optimizer.SGD(0.00)

#####backend run multiple times
# training process
rep = sonnx.BackendRep(model)
for epoch in range(1):
    outputs = rep.run([inputs])
    loss = autograd.cross_entropy(outputs[0], target)
    if (epoch % 100 == 0):
        print('training loss = ', tensor.to_numpy(loss)[0])

#####backend run only one time
outputs = sonnx.Backend.run_model(model, [inputs])
loss = autograd.cross_entropy(outputs[0], target)
print('training loss = ', tensor.to_numpy(loss)[0])