Esempio n. 1
0
 def __init__(self):
     self.conv1 = autograd.Conv2d(1, 20, 5, padding=0)
     self.conv2 = autograd.Conv2d(20, 50, 5, padding=0)
     self.linear1 = autograd.Linear(4 * 4 * 50, 500)
     self.linear2 = autograd.Linear(500, 10)
     self.pooling1 = autograd.MaxPool2d(2, 2, padding=0)
     self.pooling2 = autograd.MaxPool2d(2, 2, padding=0)
Esempio n. 2
0
 def __init__(self, num_classes=10, num_channels=1):
     super(CNN, self).__init__()
     self.num_classes = num_classes
     self.input_size = 28
     self.dimension = 4
     self.conv1 = autograd.Conv2d(num_channels, 20, 5, padding=0)
     self.conv2 = autograd.Conv2d(20, 50, 5, padding=0)
     self.linear1 = autograd.Linear(4 * 4 * 50, 500)
     self.linear2 = autograd.Linear(500, num_classes)
     self.pooling1 = autograd.MaxPool2d(2, 2, padding=0)
     self.pooling2 = autograd.MaxPool2d(2, 2, padding=0)
Esempio n. 3
0
    def combine_node(model,modeldic):
        '''
        # for combine operators to layers
        '''

        for idx, i in enumerate(model.graph.node):
            if (i.op_type == 'MatMul'):
                addlist = Backend.find_add(model,i.output[0])
                if (len(addlist) == 0): continue
                if (len(addlist) > 1): continue
                addidx = addlist[0]
                if (i.name == "not_requires_grad" and model.graph.node[addidx].name == "not_requires_grad"): continue
                model.graph.node[idx].output[0] = model.graph.node[addidx].output[0]
                model.graph.node[idx].input.append(model.graph.node[addidx].input[1])
                model.graph.node[idx].op_type = 'Linear'
                model.graph.node[addidx].op_type = 'removed'

        layer = {}
        for i in model.graph.node:
            if (i.op_type == 'Linear'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Linear(shape[0], shape[1])
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])]))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])]))


        for i in model.graph.node:
            if (i.op_type == 'Conv'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Conv2d(shape[1], shape[0], shape[2],
                                                          padding=int(i.attribute[0].ints[0]))
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        for i in model.graph.node:
            if (i.op_type == 'MaxPool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.MaxPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'AveragePool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.AvgPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'BatchNormalization'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.BatchNorm2d(shape[0])
                layer[str(i.output[0])].set_params(scale=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(bias=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        return model,modeldic,layer
Esempio n. 4
0
def singa_to_onnx(epochs, use_cpu=False, batchsize=32):
    sgd = opt.SGD(lr=0.1)

    # operations initialization
    conv1 = autograd.Conv2d(1, 8, 3, 2, padding=1) # 28 - 14
    conv2 = autograd.Conv2d(8, 4, 3, 2, padding=1) # 14 - 7
    pooling = autograd.MaxPool2d(3, 2, padding=1) # 7 - 4
    linear = autograd.Linear(64, 10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.relu(y)
        y = conv2(y)
        y = autograd.relu(y)
        y = pooling(y)
        y = autograd.flatten(y)
        y = linear(y)
        loss = autograd.softmax_cross_entropy(y, t)
        return loss, y

    autograd.training = True
    (x_train, y_train), (x_test, y_test), dev = common(use_cpu)

    niter = 1 # x_train.shape[0] // batchsize
    for epoch in range(epochs):
        accuracy_rate = 0.0
        loss_rate = 0.0
        for i in range(niter):
            inputs = tensor.Tensor(
                device=dev,
                data=x_train[i * batchsize : (i + 1) * batchsize],
                stores_grad=False,
                name="input",
            )
            targets = tensor.Tensor(
                device=dev,
                data=y_train[i * batchsize : (i + 1) * batchsize],
                requires_grad=False,
                stores_grad=False,
                name="target",
            )
            loss, y = forward(inputs, targets)
            accuracy_rate += accuracy(
                tensor.to_numpy(y), y_train[i * batchsize : (i + 1) * batchsize]
            )
            loss_rate += tensor.to_numpy(loss)[0]
            for p, gp in autograd.backward(loss):
                sgd.update(p, gp)
        print( "accuracy is {}, loss is {}".format( accuracy_rate / niter, loss_rate / niter))
    model = sonnx.to_onnx_model([inputs], [y])
    sonnx.save(model, "cnn.onnx")
Esempio n. 5
0
 def __init__(self, block, layers, num_classes=1000):
     self.inplanes = 64
     super(ResNet, self).__init__()
     self.conv1 = autograd.Conv2d(
         3, 64, kernel_size=7, stride=2, padding=3, bias=False
     )
     self.bn1 = autograd.BatchNorm2d(64)
     self.maxpool = autograd.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = autograd.AvgPool2d(7, stride=1)
     self.fc = autograd.Linear(512 * block.expansion, num_classes)
Esempio n. 6
0
    def test_max_pool(self):
        x = tensor.Tensor(shape=(2, 3, 4, 4), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        y = autograd.MaxPool2d(2, 2, 0)(x)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
Esempio n. 7
0
    def __init__(self,
                 in_filters,
                 out_filters,
                 reps,
                 strides=1,
                 padding=0,
                 start_with_relu=True,
                 grow_first=True):
        super(Block, self).__init__()

        if out_filters != in_filters or strides != 1:
            self.skip = autograd.Conv2d(in_filters,
                                        out_filters,
                                        1,
                                        stride=strides,
                                        padding=padding,
                                        bias=False)
            self.skipbn = autograd.BatchNorm2d(out_filters)
        else:
            self.skip = None

        self.layers = []

        filters = in_filters
        if grow_first:
            self.layers.append(autograd.ReLU())
            self.layers.append(
                autograd.SeparableConv2d(in_filters,
                                         out_filters,
                                         3,
                                         stride=1,
                                         padding=1,
                                         bias=False))
            self.layers.append(autograd.BatchNorm2d(out_filters))
            filters = out_filters

        for i in range(reps - 1):
            self.layers.append(autograd.ReLU())
            self.layers.append(
                autograd.SeparableConv2d(filters,
                                         filters,
                                         3,
                                         stride=1,
                                         padding=1,
                                         bias=False))
            self.layers.append(autograd.BatchNorm2d(filters))

        if not grow_first:
            self.layers.append(autograd.ReLU())
            self.layers.append(
                autograd.SeparableConv2d(in_filters,
                                         out_filters,
                                         3,
                                         stride=1,
                                         padding=1,
                                         bias=False))
            self.layers.append(autograd.BatchNorm2d(out_filters))

        if not start_with_relu:
            self.layers = self.layers[1:]
        else:
            self.layers[0] = autograd.ReLU()

        if strides != 1:
            self.layers.append(autograd.MaxPool2d(3, strides, padding + 1))
Esempio n. 8
0
    def __init__(self, num_classes=1000):
        """ Constructor
        Args:
            num_classes: number of classes
        """
        super(Xception, self).__init__()
        self.num_classes = num_classes

        self.conv1 = autograd.Conv2d(3, 32, 3, 2, 0, bias=False)
        self.bn1 = autograd.BatchNorm2d(32)

        self.conv2 = autograd.Conv2d(32, 64, 3, 1, 1, bias=False)
        self.bn2 = autograd.BatchNorm2d(64)
        # do relu here

        self.block1 = Block(64,
                            128,
                            2,
                            2,
                            padding=0,
                            start_with_relu=False,
                            grow_first=True)
        self.block2 = Block(128,
                            256,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)
        self.block3 = Block(256,
                            728,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)

        self.block4 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block5 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block6 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block7 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)

        self.block8 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block9 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block10 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)
        self.block11 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)

        self.block12 = Block(728,
                             1024,
                             2,
                             2,
                             start_with_relu=True,
                             grow_first=False)

        self.conv3 = autograd.SeparableConv2d(1024, 1536, 3, 1, 1)
        self.bn3 = autograd.BatchNorm2d(1536)

        # do relu here
        self.conv4 = autograd.SeparableConv2d(1536, 2048, 3, 1, 1)
        self.bn4 = autograd.BatchNorm2d(2048)

        self.globalpooling = autograd.MaxPool2d(10, 1)
        self.fc = autograd.Linear(2048, num_classes)
Esempio n. 9
0
    x_train = preprocess(train[0])
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print('the shape of training data is', x_train.shape)
    print('the shape of training label is', y_train.shape)
    print('the shape of testing data is', x_test.shape)
    print('the shape of testing label is', y_test.shape)

    # operations initialization
    conv1 = autograd.Conv2d(1, 1, 3, padding=1)
    conv21 = autograd.Conv2d(1, 5, 3, padding=1)
    conv22 = autograd.Conv2d(1, 5, 3, padding=1)
    pooling1 = autograd.MaxPool2d(3, 1, padding=1)
    pooling2 = autograd.AvgPool2d(28, 1, padding=0)
    linear = autograd.Linear(10, 10)
    bn = autograd.BatchNorm2d(10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.tanh(y)
        y1 = conv21(y)
        y2 = conv22(y)
        y = autograd.cat((y1, y2), 1)
        y = autograd.sigmoid(y)
        y = bn(y)
        y = autograd.relu(y)
        y = autograd.mul(y, y)
        y = pooling1(y)