예제 #1
0
    def test_retraining(self):
        # forward
        x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        x1 = autograd.Conv2d(3, 1, 2)(x)
        x2 = autograd.Conv2d(1, 1, 2)(x1)
        y = autograd.Flatten()(x2)[0]
        y_t = tensor.Tensor(shape=(2, 1), device=gpu_dev)
        y_t.gaussian(0.0, 1.0)
        loss = autograd.MeanSquareError()(y, y_t)[0]
        # backward
        sgd = opt.SGD(lr=0.01)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)
        sgd.step()

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        for idx, tens in sg_ir.tensor_map.items():
            tens.requires_grad = True
            tens.stores_grad = True
            sg_ir.tensor_map[idx] = tens
        # forward
        y_o = sg_ir.run([x])[0]
        # backward
        loss = autograd.MeanSquareError()(y_o, y_t)[0]
        sgd = opt.SGD(lr=0.01)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)
        sgd.step()
예제 #2
0
 def __init__(self):
     self.conv1 = autograd.Conv2d(1, 20, 5, padding=0)
     self.conv2 = autograd.Conv2d(20, 50, 5, padding=0)
     self.linear1 = autograd.Linear(4 * 4 * 50, 500)
     self.linear2 = autograd.Linear(500, 10)
     self.pooling1 = autograd.MaxPool2d(2, 2, padding=0)
     self.pooling2 = autograd.MaxPool2d(2, 2, padding=0)
예제 #3
0
    def test_transfer_learning(self):
        # forward
        x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        x1 = autograd.Conv2d(3, 1, 2)(x)
        y = autograd.Flatten()(x1)[0]
        y_t = tensor.Tensor(shape=(2, 4), device=gpu_dev)
        y_t.gaussian(0.0, 1.0)
        loss = autograd.MeanSquareError()(y, y_t)[0]
        # backward
        sgd = opt.SGD(lr=0.01)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)
        sgd.step()

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        # forward
        x1 = sg_ir.run([x], last_layers=-1)[0]
        x2 = autograd.Conv2d(1, 1, 2)(x1)
        y_o = autograd.Flatten()(x2)[0]
        # backward
        y_ot = tensor.Tensor(shape=(2, 1), device=gpu_dev)
        y_ot.gaussian(0.0, 1.0)
        loss = autograd.MeanSquareError()(y_o, y_ot)[0]
        sgd = opt.SGD(lr=0.01)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)
        sgd.step()
예제 #4
0
 def __init__(self, num_classes=10, num_channels=1):
     super(CNN, self).__init__()
     self.num_classes = num_classes
     self.input_size = 28
     self.dimension = 4
     self.conv1 = autograd.Conv2d(num_channels, 20, 5, padding=0)
     self.conv2 = autograd.Conv2d(20, 50, 5, padding=0)
     self.linear1 = autograd.Linear(4 * 4 * 50, 500)
     self.linear2 = autograd.Linear(500, num_classes)
     self.pooling1 = autograd.MaxPool2d(2, 2, padding=0)
     self.pooling2 = autograd.MaxPool2d(2, 2, padding=0)
예제 #5
0
def singa_to_onnx(epochs, use_cpu=False, batchsize=32):
    sgd = opt.SGD(lr=0.1)

    # operations initialization
    conv1 = autograd.Conv2d(1, 8, 3, 2, padding=1) # 28 - 14
    conv2 = autograd.Conv2d(8, 4, 3, 2, padding=1) # 14 - 7
    pooling = autograd.MaxPool2d(3, 2, padding=1) # 7 - 4
    linear = autograd.Linear(64, 10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.relu(y)
        y = conv2(y)
        y = autograd.relu(y)
        y = pooling(y)
        y = autograd.flatten(y)
        y = linear(y)
        loss = autograd.softmax_cross_entropy(y, t)
        return loss, y

    autograd.training = True
    (x_train, y_train), (x_test, y_test), dev = common(use_cpu)

    niter = 1 # x_train.shape[0] // batchsize
    for epoch in range(epochs):
        accuracy_rate = 0.0
        loss_rate = 0.0
        for i in range(niter):
            inputs = tensor.Tensor(
                device=dev,
                data=x_train[i * batchsize : (i + 1) * batchsize],
                stores_grad=False,
                name="input",
            )
            targets = tensor.Tensor(
                device=dev,
                data=y_train[i * batchsize : (i + 1) * batchsize],
                requires_grad=False,
                stores_grad=False,
                name="target",
            )
            loss, y = forward(inputs, targets)
            accuracy_rate += accuracy(
                tensor.to_numpy(y), y_train[i * batchsize : (i + 1) * batchsize]
            )
            loss_rate += tensor.to_numpy(loss)[0]
            for p, gp in autograd.backward(loss):
                sgd.update(p, gp)
        print( "accuracy is {}, loss is {}".format( accuracy_rate / niter, loss_rate / niter))
    model = sonnx.to_onnx_model([inputs], [y])
    sonnx.save(model, "cnn.onnx")
예제 #6
0
    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(Bottleneck, self).__init__()
        self.conv1 = autograd.Conv2d(
            inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = autograd.BatchNorm2d(planes)
        self.conv2 = autograd.Conv2d(planes, planes, kernel_size=3,
                                     stride=stride,
                                     padding=1, bias=False)
        self.bn2 = autograd.BatchNorm2d(planes)
        self.conv3 = autograd.Conv2d(
            planes, planes * self.expansion, kernel_size=1, bias=False)
        self.bn3 = autograd.BatchNorm2d(planes * self.expansion)

        self.downsample = downsample
        self.stride = stride
예제 #7
0
    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            conv = autograd.Conv2d(self.inplanes,
                                   planes * block.expansion,
                                   kernel_size=1,
                                   stride=stride,
                                   bias=False)
            bn = autograd.BatchNorm2d(planes * block.expansion)

            def downsample(x):
                return bn(conv(x))

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        def forward(x):
            for layer in layers:
                x = layer(x)
            return x

        return forward
예제 #8
0
def conv3x3(in_planes, out_planes, stride=1):
    """3x3 convolution with padding"""
    return autograd.Conv2d(in_planes,
                           out_planes,
                           kernel_size=3,
                           stride=stride,
                           padding=1,
                           bias=False)
예제 #9
0
    def test_inference(self):
        x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        x1 = autograd.Conv2d(3, 1, 2)(x)
        y = autograd.Conv2d(1, 1, 2)(x1)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # print('The model is:\n{}'.format(model))

        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x], last_layers=-1)

        np.testing.assert_array_almost_equal(tensor.to_numpy(x1),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
예제 #10
0
    def test_conv2d_cpu(self):
        # (in_channels, out_channels, kernel_size)
        conv_1 = autograd.Conv2d(3, 1, 2)
        conv_without_bias_1 = autograd.Conv2d(3, 1, 2, bias=False)

        cpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=cpu_dev)
        cpu_input_tensor.gaussian(0.0, 1.0)

        y = conv_1(cpu_input_tensor)  # PyTensor
        dx, dW, db = y.creator.backward(dy)  # CTensor

        self.check_shape(y.shape, (2, 1, 2, 2))
        self.check_shape(dx.shape(), (2, 3, 3, 3))
        self.check_shape(dW.shape(), (1, 3, 2, 2))
        self.check_shape(db.shape(), (1, ))

        # forward without bias
        y_without_bias = conv_without_bias_1(cpu_input_tensor)
        self.check_shape(y_without_bias.shape, (2, 1, 2, 2))
예제 #11
0
파일: sonnx.py 프로젝트: joddiy/singa-onnx
    def combine_node(model,modeldic):
        '''
        # for combine operators to layers
        '''

        for idx, i in enumerate(model.graph.node):
            if (i.op_type == 'MatMul'):
                addlist = Backend.find_add(model,i.output[0])
                if (len(addlist) == 0): continue
                if (len(addlist) > 1): continue
                addidx = addlist[0]
                if (i.name == "not_requires_grad" and model.graph.node[addidx].name == "not_requires_grad"): continue
                model.graph.node[idx].output[0] = model.graph.node[addidx].output[0]
                model.graph.node[idx].input.append(model.graph.node[addidx].input[1])
                model.graph.node[idx].op_type = 'Linear'
                model.graph.node[addidx].op_type = 'removed'

        layer = {}
        for i in model.graph.node:
            if (i.op_type == 'Linear'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Linear(shape[0], shape[1])
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])]))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])]))


        for i in model.graph.node:
            if (i.op_type == 'Conv'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.Conv2d(shape[1], shape[0], shape[2],
                                                          padding=int(i.attribute[0].ints[0]))
                layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        for i in model.graph.node:
            if (i.op_type == 'MaxPool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.MaxPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'AveragePool'):
                k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0]))
                layer[str(i.output[0])] = autograd.AvgPool2d(k, int(i.attribute[2].ints[0]),
                                                             padding=int(i.attribute[1].ints[0]))
        for i in model.graph.node:
            if (i.op_type == 'BatchNormalization'):
                shape = Backend.find_shape(model,i.input[1])
                layer[str(i.output[0])] = autograd.BatchNorm2d(shape[0])
                layer[str(i.output[0])].set_params(scale=tensor.to_numpy(modeldic[str(i.input[1])].clone()))
                layer[str(i.output[0])].set_params(bias=tensor.to_numpy(modeldic[str(i.input[2])].clone()))

        return model,modeldic,layer
예제 #12
0
 def __init__(self, block, layers, num_classes=1000):
     self.inplanes = 64
     super(ResNet, self).__init__()
     self.conv1 = autograd.Conv2d(
         3, 64, kernel_size=7, stride=2, padding=3, bias=False
     )
     self.bn1 = autograd.BatchNorm2d(64)
     self.maxpool = autograd.MaxPool2d(kernel_size=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block, 64, layers[0])
     self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
     self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
     self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
     self.avgpool = autograd.AvgPool2d(7, stride=1)
     self.fc = autograd.Linear(512 * block.expansion, num_classes)
예제 #13
0
    def test_conv2d(self):
        x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev)
        x.gaussian(0.0, 1.0)
        y = autograd.Conv2d(3, 1, 2)(x)

        # frontend
        model = sonnx.to_onnx([x], [y])
        # backend
        sg_ir = sonnx.prepare(model, device=gpu_dev)
        y_t = sg_ir.run([x])

        np.testing.assert_array_almost_equal(tensor.to_numpy(y),
                                             tensor.to_numpy(y_t[0]),
                                             decimal=5)
예제 #14
0
    epochs = 1

    sgd = opt.SGD(lr=0.01)

    x_train = preprocess(train[0])
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print("the shape of training data is", x_train.shape)
    print("the shape of training label is", y_train.shape)
    print("the shape of testing data is", x_test.shape)
    print("the shape of testing label is", y_test.shape)

    # operations initialization
    conv1 = autograd.Conv2d(1, 32, 3, padding=1, bias=False)
    bn1 = autograd.BatchNorm2d(32)
    conv21 = autograd.Conv2d(32, 16, 3, padding=1)
    conv22 = autograd.Conv2d(32, 16, 3, padding=1)
    bn2 = autograd.BatchNorm2d(32)
    linear = autograd.Linear(32 * 28 * 28, 10)
    pooling1 = autograd.MaxPool2d(3, 1, padding=1)
    pooling2 = autograd.AvgPool2d(3, 1, padding=1)

    def forward(x, t):
        y = conv1(x)
        y = autograd.relu(y)
        y = bn1(y)
        y = pooling1(y)
        y1 = conv21(y)
        y2 = conv22(y)
예제 #15
0
    def __init__(self,
                 in_filters,
                 out_filters,
                 reps,
                 strides=1,
                 padding=0,
                 start_with_relu=True,
                 grow_first=True):
        super(Block, self).__init__()

        if out_filters != in_filters or strides != 1:
            self.skip = autograd.Conv2d(in_filters,
                                        out_filters,
                                        1,
                                        stride=strides,
                                        padding=padding,
                                        bias=False)
            self.skipbn = autograd.BatchNorm2d(out_filters)
        else:
            self.skip = None

        self.layers = []

        filters = in_filters
        if grow_first:
            self.layers.append(autograd.ReLU())
            self.layers.append(
                autograd.SeparableConv2d(in_filters,
                                         out_filters,
                                         3,
                                         stride=1,
                                         padding=1,
                                         bias=False))
            self.layers.append(autograd.BatchNorm2d(out_filters))
            filters = out_filters

        for i in range(reps - 1):
            self.layers.append(autograd.ReLU())
            self.layers.append(
                autograd.SeparableConv2d(filters,
                                         filters,
                                         3,
                                         stride=1,
                                         padding=1,
                                         bias=False))
            self.layers.append(autograd.BatchNorm2d(filters))

        if not grow_first:
            self.layers.append(autograd.ReLU())
            self.layers.append(
                autograd.SeparableConv2d(in_filters,
                                         out_filters,
                                         3,
                                         stride=1,
                                         padding=1,
                                         bias=False))
            self.layers.append(autograd.BatchNorm2d(out_filters))

        if not start_with_relu:
            self.layers = self.layers[1:]
        else:
            self.layers[0] = autograd.ReLU()

        if strides != 1:
            self.layers.append(autograd.MaxPool2d(3, strides, padding + 1))
예제 #16
0
    def __init__(self, num_classes=1000):
        """ Constructor
        Args:
            num_classes: number of classes
        """
        super(Xception, self).__init__()
        self.num_classes = num_classes

        self.conv1 = autograd.Conv2d(3, 32, 3, 2, 0, bias=False)
        self.bn1 = autograd.BatchNorm2d(32)

        self.conv2 = autograd.Conv2d(32, 64, 3, 1, 1, bias=False)
        self.bn2 = autograd.BatchNorm2d(64)
        # do relu here

        self.block1 = Block(64,
                            128,
                            2,
                            2,
                            padding=0,
                            start_with_relu=False,
                            grow_first=True)
        self.block2 = Block(128,
                            256,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)
        self.block3 = Block(256,
                            728,
                            2,
                            2,
                            padding=0,
                            start_with_relu=True,
                            grow_first=True)

        self.block4 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block5 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block6 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block7 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)

        self.block8 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block9 = Block(728,
                            728,
                            3,
                            1,
                            start_with_relu=True,
                            grow_first=True)
        self.block10 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)
        self.block11 = Block(728,
                             728,
                             3,
                             1,
                             start_with_relu=True,
                             grow_first=True)

        self.block12 = Block(728,
                             1024,
                             2,
                             2,
                             start_with_relu=True,
                             grow_first=False)

        self.conv3 = autograd.SeparableConv2d(1024, 1536, 3, 1, 1)
        self.bn3 = autograd.BatchNorm2d(1536)

        # do relu here
        self.conv4 = autograd.SeparableConv2d(1536, 2048, 3, 1, 1)
        self.bn4 = autograd.BatchNorm2d(2048)

        self.globalpooling = autograd.MaxPool2d(10, 1)
        self.fc = autograd.Linear(2048, num_classes)
예제 #17
0
파일: cnn.py 프로젝트: joddiy/singa-onnx
    epochs = 1

    sgd = opt.SGD(lr=0.01)

    x_train = preprocess(train[0])
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print('the shape of training data is', x_train.shape)
    print('the shape of training label is', y_train.shape)
    print('the shape of testing data is', x_test.shape)
    print('the shape of testing label is', y_test.shape)

    # operations initialization
    conv1 = autograd.Conv2d(1, 32, 3, padding=1)
    conv21 = autograd.Conv2d(32, 16, 3, padding=1)
    conv22 = autograd.Conv2d(32, 16, 3, padding=1)
    linear = autograd.Linear(32 * 28 * 28, 10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.tanh(y)
        y1 = conv21(y)
        y2 = conv22(y)
        y = autograd.cat((y1, y2), 1)
        y = autograd.sigmoid(y)
        y = autograd.mul(y, y)
        y = autograd.flatten(y)
        y = linear(y)
        loss = autograd.softmax_cross_entropy(y, t)
예제 #18
0
    epochs = 1

    sgd = opt.SGD(lr=0.0)

    x_train = preprocess(train[0])
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print('the shape of training data is', x_train.shape)
    print('the shape of training label is', y_train.shape)
    print('the shape of testing data is', x_test.shape)
    print('the shape of testing label is', y_test.shape)

    # operations initialization
    conv1 = autograd.Conv2d(1, 1, 3, padding=1)
    conv21 = autograd.Conv2d(1, 5, 3, padding=1)
    conv22 = autograd.Conv2d(1, 5, 3, padding=1)
    pooling1 = autograd.MaxPool2d(3, 1, padding=1)
    pooling2 = autograd.AvgPool2d(28, 1, padding=0)
    linear = autograd.Linear(10, 10)
    bn = autograd.BatchNorm2d(10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.tanh(y)
        y1 = conv21(y)
        y2 = conv22(y)
        y = autograd.cat((y1, y2), 1)
        y = autograd.sigmoid(y)
        y = bn(y)
예제 #19
0
    epochs = 1

    sgd = optimizer.SGD(0.05)

    x_train = preprocess(train[0])
    y_train = to_categorical(train[1], num_classes)

    x_test = preprocess(test[0])
    y_test = to_categorical(test[1], num_classes)
    print('the shape of training data is', x_train.shape)
    print('the shape of training label is', y_train.shape)
    print('the shape of testing data is', x_test.shape)
    print('the shape of testing label is', y_test.shape)

    # operations initialization
    conv1 = autograd.Conv2d(3, 32)
    conv2 = autograd.Conv2d(32, 32)
    linear = autograd.Linear(32 * 28 * 28, 10)

    def forward(x, t):
        y = conv1(x)
        y = autograd.relu(y)
        y = conv2(y)
        y = autograd.relu(y)
        y = autograd.max_pool_2d(y)
        y = autograd.flatten(y)
        y = linear(y)
        y = autograd.soft_max(y)
        loss = autograd.cross_entropy(y, t)
        return loss, y