def onnx_loss(a,model,target): ''' input: a graph node dictionary model: graph model target: label load other nodes of onnx ''' for i in model.graph.node: if (i.op_type == 'Constant'): pass # do nothing if (i.op_type == 'LeakyRelu'): a[str(i.output[0])] = autograd.relu(a[str(i.input[0])]) elif (i.op_type == 'Relu'): a[str(i.output[0])] = autograd.relu(a[str(i.input[0])]) elif (i.op_type == 'Softmax'): a[str(i.output[0])] = autograd.softmax(a[str(i.input[0])]) elif (i.op_type == 'Add'): if(str(i.input[1])[-1] == 'b'): a[str(i.output[0])] = autograd.add_bias(a[str(i.input[0])], a[str(i.input[1])]) else: a[str(i.output[0])] = autograd.add(a[str(i.input[0])],a[str(i.input[1])]) elif (i.op_type == 'MatMul'): a[str(i.output[0])] = autograd.matmul(a[str(i.input[0])], a[str(i.input[1])]) loss = autograd.cross_entropy(a['Y'], target) return loss
def run(model, modeldic, layer,inputs): ''' input: input for singa model load other nodes of onnx ''' supportLayer = ['Linear','Conv','MaxPool','AveragePool','BatchNormalization'] #supportLayer = ['Conv', 'MaxPool', 'AveragePool', 'BatchNormalization'] oper=modeldic for counter,i in enumerate(model.graph.input): oper[i.name] = inputs[counter] for i in model.graph.node: if (i.op_type == 'Relu'): oper[str(i.output[0])] = autograd.relu(oper[str(i.input[0])]) elif (i.op_type == 'Softmax'): oper[str(i.output[0])] = autograd.softmax(oper[str(i.input[0])]) elif (i.op_type == 'Add'): oper[str(i.output[0])] = autograd.add(oper[str(i.input[0])], oper[str(i.input[1])]) elif (i.op_type == 'MatMul'): oper[str(i.output[0])] = autograd.matmul(oper[str(i.input[0])], oper[str(i.input[1])]) elif (i.op_type == 'Flatten'): oper[str(i.output[0])] = autograd.flatten(oper[str(i.input[0])]) elif(i.op_type == 'Concat'): oper[str(i.output[0])] = autograd.cat((oper[str(i.input[0])], oper[str(i.input[1])]),int(i.attribute[0].i)) elif(i.op_type == 'Tanh'): oper[str(i.output[0])] = autograd.tanh(oper[str(i.input[0])]) elif (i.op_type == 'Sigmoid'): oper[str(i.output[0])] = autograd.sigmoid(oper[str(i.input[0])]) elif (i.op_type == 'Mul'): oper[str(i.output[0])] = autograd.mul(oper[str(i.input[0])],oper[str(i.input[1])]) elif (i.op_type in supportLayer): oper[str(i.output[0])] = layer[str(i.output[0])](oper[str(i.input[0])]) out =[] for counter,i in enumerate(model.graph.output): out.append(modeldic[i.name]) return out
print("train_label_shape:", label.shape) inputs = Tensor(data=data) target = Tensor(data=label) w0 = Tensor(shape=(2, 3), requires_grad=True, stores_grad=True) w0.gaussian(0.0, 0.1) b0 = Tensor(shape=(1, 3), requires_grad=True, stores_grad=True) b0.set_value(0.0) w1 = Tensor(shape=(3, 2), requires_grad=True, stores_grad=True) w1.gaussian(0.0, 0.1) b1 = Tensor(shape=(1, 2), requires_grad=True, stores_grad=True) b1.set_value(0.0) sgd = optimizer.SGD(0.05) # training process for i in range(1001): x = autograd.matmul(inputs, w0) x = autograd.add_bias(x, b0) x = autograd.relu(x) x = autograd.matmul(x, w1) x = autograd.add_bias(x, b1) x = autograd.softmax(x) loss = autograd.cross_entropy(x, target) for p, gp in autograd.backward(loss): sgd.apply(0, gp, p, "") if i % 100 == 0: print("training loss = ", tensor.to_numpy(loss)[0])
print('train_label_shape:', label.shape) inputs = Tensor(data=data) target = Tensor(data=label) w0 = Tensor(shape=(2, 3), requires_grad=True, stores_grad=True) w0.gaussian(0.0, 0.1) b0 = Tensor(shape=(1, 3), requires_grad=True, stores_grad=True) b0.set_value(0.0) w1 = Tensor(shape=(3, 2), requires_grad=True, stores_grad=True) w1.gaussian(0.0, 0.1) b1 = Tensor(shape=(1, 2), requires_grad=True, stores_grad=True) b1.set_value(0.0) sgd = optimizer.SGD(0.05) # training process for i in range(1001): x = autograd.matmul(inputs, w0) x = autograd.add_bias(x, b0) x = autograd.relu(x) x = autograd.matmul(x, w1) x = autograd.add_bias(x, b1) x = autograd.softmax(x) loss = autograd.cross_entropy(x, target) for p, gp in autograd.backward(loss): sgd.apply(0, gp, p, '') if (i % 100 == 0): print('training loss = ', tensor.to_numpy(loss)[0])
label = to_categorical(label, 2).astype(np.float32) print('train_data_shape:', data.shape) print('train_label_shape:', label.shape) inputs = Tensor(data=data) target = Tensor(data=label) linear1 = autograd.Linear(3, 2) linear2 = autograd.Linear(2, 2) linear3 = autograd.Linear(2, 2) sgd = optimizer.SGD(0.00) # training process for i in range(1): x = linear1(inputs) x = autograd.relu(x) x1 = linear2(x) x2 = linear3(x) x3 = autograd.add(x1, x2) y = autograd.softmax(x3) loss = autograd.cross_entropy(y, target) gradient = autograd.backward(loss) for p, gp in gradient: sgd.apply(0, gp, p, '') if (i % 100 == 0): print('training loss = ', tensor.to_numpy(loss)[0]) model = sonnx.to_onnx_model([inputs], [y]) onnx.save(model, 'linear.onnx')