Exemple #1
0
 def __init__(self):
     self.conv1 = autograd.Conv2d(1, 20, 5, padding=0)
     self.conv2 = autograd.Conv2d(20, 50, 5, padding=0)
     self.linear1 = autograd.Linear(4 * 4 * 50, 500)
     self.linear2 = autograd.Linear(500, 10)
     self.pooling1 = autograd.MaxPool2d(2, 2, padding=0)
     self.pooling2 = autograd.MaxPool2d(2, 2, padding=0)
Exemple #2
0
 def __init__(self, num_classes=10, num_channels=1):
     super(CNN, self).__init__()
     self.num_classes = num_classes
     self.input_size = 28
     self.dimension = 4
     self.conv1 = autograd.Conv2d(num_channels, 20, 5, padding=0)
     self.conv2 = autograd.Conv2d(20, 50, 5, padding=0)
     self.linear1 = autograd.Linear(4 * 4 * 50, 500)
     self.linear2 = autograd.Linear(500, num_classes)
     self.pooling1 = autograd.MaxPool2d(2, 2, padding=0)
     self.pooling2 = autograd.MaxPool2d(2, 2, padding=0)
Exemple #3
0
 def __init__(self, vocab_size, hidden_size=32):
     super(CharRNN, self).__init__()
     self.rnn = autograd.LSTM(vocab_size, hidden_size)
     self.dense = autograd.Linear(hidden_size, vocab_size)
     self.optimizer = opt.SGD(0.01)
     self.hidden_size = hidden_size
     self.vocab_size = vocab_size
     self.hx = tensor.Tensor((1, self.hidden_size))
     self.cx = tensor.Tensor((1, self.hidden_size))
Exemple #4
0
    def combine_node(model,modeldic):
        '''
        # for combine operators to layers
        '''

        for idx, i in enumerate(model.graph.node):
            if (i.op_type == 'MatMul'):
                addlist = Backend.find_add(model,i.output[0])
                if (len(addlist) == 0): continue
                if (len(addlist) > 1): continue
                addidx = addlist[0]
                if (i.name == "not_requires_grad" and model.graph.node[addidx].name == "not_requires_grad"): continue
                model.graph.node[idx].output[0] = model.graph.node[addidx].output[0]
                model.graph.node[idx].input.append(model.graph.node[addidx].input[1])
                model.graph.node[idx].op_type = 'Linear'
                model.graph.node[addidx].op_type = 'removed'

        layer = {}
        for i in model.graph.node:
            if (i.op_type == 'Linear'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Linear(shape[0], shape[1])
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])]))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])]))


        for i in model.graph.node:
            if (i.op_type == 'Conv'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Conv2d(shape[1], shape[0], shape[2],
                                                          padding=int(i.attribute[0].ints[0]))
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        for i in model.graph.node:
            if (i.op_type == 'MaxPool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.MaxPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'AveragePool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.AvgPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'BatchNormalization'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.BatchNorm2d(shape[0])
                layer[str(i.output[0])].set_params(scale=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(bias=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        return model,modeldic,layer
Exemple #5
0
def singa_to_onnx(epochs, use_cpu=False, batchsize=32):
    sgd = opt.SGD(lr=0.1)

    # operations initialization
    conv1 = autograd.Conv2d(1, 8, 3, 2, padding=1) # 28 - 14
    conv2 = autograd.Conv2d(8, 4, 3, 2, padding=1) # 14 - 7
    pooling = autograd.MaxPool2d(3, 2, padding=1) # 7 - 4
    linear = autograd.Linear(64, 10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.relu(y)
        y = conv2(y)
        y = autograd.relu(y)
        y = pooling(y)
        y = autograd.flatten(y)
        y = linear(y)
        loss = autograd.softmax_cross_entropy(y, t)
        return loss, y

    autograd.training = True
    (x_train, y_train), (x_test, y_test), dev = common(use_cpu)

    niter = 1 # x_train.shape[0] // batchsize
    for epoch in range(epochs):
        accuracy_rate = 0.0
        loss_rate = 0.0
        for i in range(niter):
            inputs = tensor.Tensor(
                device=dev,
                data=x_train[i * batchsize : (i + 1) * batchsize],
                stores_grad=False,
                name="input",
            )
            targets = tensor.Tensor(
                device=dev,
                data=y_train[i * batchsize : (i + 1) * batchsize],
                requires_grad=False,
                stores_grad=False,
                name="target",
            )
            loss, y = forward(inputs, targets)
            accuracy_rate += accuracy(
                tensor.to_numpy(y), y_train[i * batchsize : (i + 1) * batchsize]
            )
            loss_rate += tensor.to_numpy(loss)[0]
            for p, gp in autograd.backward(loss):
                sgd.update(p, gp)
        print( "accuracy is {}, loss is {}".format( accuracy_rate / niter, loss_rate / niter))
    model = sonnx.to_onnx_model([inputs], [y])
    sonnx.save(model, "cnn.onnx")
Exemple #6
0
 def __init__(self, block, layers, num_classes=1000):
     self.inplanes = 64
     super(ResNet, self).__init__()
     self.conv1 = autograd.Conv2d(
         3, 64, kernel_size=7, stride=2, padding=3, bias=False
     )
     self.bn1 = autograd.BatchNorm2d(64)
     self.maxpool = autograd.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = autograd.AvgPool2d(7, stride=1)
     self.fc = autograd.Linear(512 * block.expansion, num_classes)
Exemple #7
0
    def test_linear(self):
        x = tensor.Tensor(shape=(2, 20), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        x1 = x.clone()
        y = autograd.Linear(20, 1, bias=False)(x)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x1])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
    def __init__(self, num_classes=1000):
        """ Constructor
        Args:
            num_classes: number of classes
        """
        super(Xception, self).__init__()
        self.num_classes = num_classes

        self.conv1 = autograd.Conv2d(3, 32, 3, 2, 0, bias=False)
        self.bn1 = autograd.BatchNorm2d(32)

        self.conv2 = autograd.Conv2d(32, 64, 3, 1, 1, bias=False)
        self.bn2 = autograd.BatchNorm2d(64)
        # do relu here

        self.block1 = Block(64,
                            128,
                            2,
                            2,
                            padding=0,
                            start_with_relu=False,
                            grow_first=True)
        self.block2 = Block(128,
                            256,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)
        self.block3 = Block(256,
                            728,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)

        self.block4 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block5 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block6 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block7 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)

        self.block8 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block9 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block10 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)
        self.block11 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)

        self.block12 = Block(728,
                             1024,
                             2,
                             2,
                             start_with_relu=True,
                             grow_first=False)

        self.conv3 = autograd.SeparableConv2d(1024, 1536, 3, 1, 1)
        self.bn3 = autograd.BatchNorm2d(1536)

        # do relu here
        self.conv4 = autograd.SeparableConv2d(1536, 2048, 3, 1, 1)
        self.bn4 = autograd.BatchNorm2d(2048)

        self.globalpooling = autograd.MaxPool2d(10, 1)
        self.fc = autograd.Linear(2048, num_classes)
Exemple #9
0
    x_train = preprocess(train[0])
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print('the shape of training data is', x_train.shape)
    print('the shape of training label is', y_train.shape)
    print('the shape of testing data is', x_test.shape)
    print('the shape of testing label is', y_test.shape)

    # operations initialization
    conv1 = autograd.Conv2d(1, 32, 3, padding=1)
    conv21 = autograd.Conv2d(32, 16, 3, padding=1)
    conv22 = autograd.Conv2d(32, 16, 3, padding=1)
    linear = autograd.Linear(32 * 28 * 28, 10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.tanh(y)
        y1 = conv21(y)
        y2 = conv22(y)
        y = autograd.cat((y1, y2), 1)
        y = autograd.sigmoid(y)
        y = autograd.mul(y, y)
        y = autograd.flatten(y)
        y = linear(y)
        loss = autograd.softmax_cross_entropy(y, t)
        return loss, y

    autograd.training = True
    '''
    y = np.array(y, dtype='int')
    n = y.shape[0]
    categorical = np.zeros((n, num_classes))
    categorical[np.arange(n), y] = 1
    return categorical


label = to_categorical(label, 2).astype(np.float32)
print('train_data_shape:', data.shape)
print('train_label_shape:', label.shape)

inputs = Tensor(data=data)
target = Tensor(data=label)

linear1 = autograd.Linear(3, 2)
linear2 = autograd.Linear(2, 2)
linear3 = autograd.Linear(2, 2)

sgd = optimizer.SGD(0.00)

# training process
for i in range(1):
    x = linear1(inputs)
    x = autograd.relu(x)
    x1 = linear2(x)
    x2 = linear3(x)
    x3 = autograd.add(x1, x2)
    y = autograd.softmax(x3)
    loss = autograd.cross_entropy(y, target)
    gradient = autograd.backward(loss)
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print('the shape of training data is', x_train.shape)
    print('the shape of training label is', y_train.shape)
    print('the shape of testing data is', x_test.shape)
    print('the shape of testing label is', y_test.shape)

    # operations initialization
    conv1 = autograd.Conv2d(1, 1, 3, padding=1)
    conv21 = autograd.Conv2d(1, 5, 3, padding=1)
    conv22 = autograd.Conv2d(1, 5, 3, padding=1)
    pooling1 = autograd.MaxPool2d(3, 1, padding=1)
    pooling2 = autograd.AvgPool2d(28, 1, padding=0)
    linear = autograd.Linear(10, 10)
    bn = autograd.BatchNorm2d(10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.tanh(y)
        y1 = conv21(y)
        y2 = conv22(y)
        y = autograd.cat((y1, y2), 1)
        y = autograd.sigmoid(y)
        y = bn(y)
        y = autograd.relu(y)
        y = autograd.mul(y, y)
        y = pooling1(y)
        y = autograd.sigmoid(y)
Exemple #12
0
 def __init__(self, sg_ir, last_layers):
     self.sg_ir = sg_ir
     self.last_layers = last_layers
     self.append_linear1 = autograd.Linear(500, 128, bias=False)
     self.append_linear2 = autograd.Linear(128, 32, bias=False)
     self.append_linear3 = autograd.Linear(32, 10, bias=False)
Exemple #13
0
    '''
    y = np.array(y, dtype='int')
    n = y.shape[0]
    categorical = np.zeros((n, num_classes))
    categorical[np.arange(n), y] = 1
    return categorical


label = to_categorical(label, 3).astype(np.float32)
print('train_data_shape:', data.shape)
print('train_label_shape:', label.shape)

inputs = Tensor(data=data)
target = Tensor(data=label)

linear1 = autograd.Linear(3, 3)
linear2 = autograd.Linear(3, 3)
linear3 = autograd.Linear(3, 3)

sgd = optimizer.SGD(0.00)

# training process
for i in range(1):
    x = linear1(inputs)
    x = autograd.relu(x)
    x1 = linear2(x)
    x2 = linear3(x)
    x3 = autograd.add(x1, x2)
    x3 = autograd.softmax(x3)
    loss = autograd.cross_entropy(x3, target)
    gradient = autograd.backward(loss)