Example #1
0
    def features(self, input):
        x = self.conv1(input)
        x = self.bn1(x)
        x = autograd.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = autograd.relu(x)

        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)
        x = self.block8(x)
        x = self.block9(x)
        x = self.block10(x)
        x = self.block11(x)
        x = self.block12(x)

        x = self.conv3(x)
        x = self.bn3(x)
        x = autograd.relu(x)

        x = self.conv4(x)
        x = self.bn4(x)
        return x
    def features(self, input):
        x = self.conv1(input)
        x = self.bn1(x)
        x = autograd.relu(x)

        x = self.conv2(x)
        x = self.bn2(x)
        x = autograd.relu(x)

        x = self.block1(x)
        x = self.block2(x)
        x = self.block3(x)
        x = self.block4(x)
        x = self.block5(x)
        x = self.block6(x)
        x = self.block7(x)
        x = self.block8(x)
        x = self.block9(x)
        x = self.block10(x)
        x = self.block11(x)
        x = self.block12(x)

        x = self.conv3(x)
        x = self.bn3(x)
        x = autograd.relu(x)

        x = self.conv4(x)
        x = self.bn4(x)
        return x
Example #3
0
def onnx_loss(a,model,target):
    '''
    input:
    a graph node dictionary
    model: graph model
    target: label

    load other nodes of onnx
    '''
    for i in model.graph.node:
        if (i.op_type == 'Constant'):
            pass
            # do nothing
        if (i.op_type == 'LeakyRelu'):
            a[str(i.output[0])] = autograd.relu(a[str(i.input[0])])
        elif (i.op_type == 'Relu'):
            a[str(i.output[0])] = autograd.relu(a[str(i.input[0])])
        elif (i.op_type == 'Softmax'):
            a[str(i.output[0])] = autograd.softmax(a[str(i.input[0])])
        elif (i.op_type == 'Add'):
            if(str(i.input[1])[-1] == 'b'):
                a[str(i.output[0])] = autograd.add_bias(a[str(i.input[0])], a[str(i.input[1])])
            else:
                a[str(i.output[0])] = autograd.add(a[str(i.input[0])],a[str(i.input[1])])
        elif (i.op_type == 'MatMul'):
            a[str(i.output[0])] = autograd.matmul(a[str(i.input[0])], a[str(i.input[1])])

    loss = autograd.cross_entropy(a['Y'], target)
    return loss
Example #4
0
 def forward(self, x):
     y = sg_ir.run([x], last_layers=self.last_layers)[0]
     y = self.append_linear1(y)
     y = autograd.relu(y)
     y = self.append_linear2(y)
     y = autograd.relu(y)
     y = self.append_linear3(y)
     y = autograd.relu(y)
     return y
Example #5
0
 def forward(x, t):
     y = conv1(x)
     y = autograd.relu(y)
     y = conv2(y)
     y = autograd.relu(y)
     y = pooling(y)
     y = autograd.flatten(y)
     y = linear(y)
     loss = autograd.softmax_cross_entropy(y, t)
     return loss, y
    def __call__(self, input):
        x = self.features(input)
        x = self.logits(x)

        x = autograd.relu(x)
        x = self.linear1(x)
        x = autograd.relu(x)
        x = self.linear2(x)

        return x
Example #7
0
 def forward(x, t):
     y = conv1(x)
     y = autograd.relu(y)
     y1 = conv21(y)
     y2 = conv22(y)
     y = autograd.cat((y1, y2), 1)
     y = autograd.relu(y)
     y = autograd.flatten(y)
     y = linear(y)
     loss = autograd.softmax_cross_entropy(y, t)
     return loss, y
Example #8
0
 def forward(x, t):
     y = conv1(x)
     y = autograd.relu(y)
     y = conv2(y)
     y = autograd.relu(y)
     y = autograd.max_pool_2d(y)
     y = autograd.flatten(y)
     y = linear(y)
     y = autograd.soft_max(y)
     loss = autograd.cross_entropy(y, t)
     return loss, y
Example #9
0
 def forward(x, t):
     y = conv1(x)
     y = autograd.relu(y)
     y = conv2(y)
     y = autograd.relu(y)
     y = autograd.max_pool_2d(y)
     y = autograd.flatten(y)
     y = linear(y)
     y = autograd.soft_max(y)
     loss = autograd.cross_entropy(y, t)
     return loss, y
Example #10
0
 def forward(self, x):
     y = self.conv1(x)
     y = autograd.relu(y)
     y = self.pooling1(y)
     y = self.conv2(y)
     y = autograd.relu(y)
     y = self.pooling2(y)
     y = autograd.flatten(y)
     y = self.linear1(y)
     y = autograd.relu(y)
     y = self.linear2(y)
     return y
Example #11
0
 def forward(self, inputs):
     x = autograd.matmul(inputs, self.w0)
     x = autograd.add_bias(x, self.b0)
     x = autograd.relu(x)
     x = autograd.matmul(x, self.w1)
     x = autograd.add_bias(x, self.b1)
     return x
Example #12
0
 def forward(x, t):
     y = conv1(x)
     y = autograd.relu(y)
     y = bn1(y)
     y = pooling1(y)
     y1 = conv21(y)
     y2 = conv22(y)
     y = autograd.cat((y1, y2), 1)
     y = bn2(y)
     y = autograd.relu(y)
     y = bn2(y)
     y = pooling2(y)
     y = autograd.flatten(y)
     y = linear(y)
     loss = autograd.softmax_cross_entropy(y, t)
     return loss, y
Example #13
0
 def forward(self, x):
     y = self.lstm(x)
     y = autograd.reshape(y, (y.shape[0], -1))
     y = self.l1(y)
     y = autograd.relu(y)
     y = self.l2(y)
     return y
Example #14
0
    def __call__(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = autograd.relu(out)

        out = self.conv2(out)
        out = self.bn2(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out = autograd.add(out, residual)
        out = autograd.relu(out)

        return out
Example #15
0
    def __call__(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = autograd.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = autograd.flatten(x)
        x = self.fc(x)

        return x
Example #16
0
def singa_to_onnx(niter, use_cpu=False):
    if use_cpu:
        print("Using CPU")
        dev = device.get_default_device()
    else:
        print("Using GPU")
        dev = device.create_cuda_gpu()
    inputs = Tensor(
        data=data,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="input",
    )
    target = Tensor(
        data=label,
        device=dev,
        requires_grad=False,
        stores_grad=False,
        name="target",
    )

    w0 = Tensor(shape=(2, 3), device=dev, requires_grad=True, stores_grad=True)
    w0.gaussian(0.0, 0.1)
    b0 = Tensor(shape=(3,), device=dev, requires_grad=True, stores_grad=True)
    b0.set_value(0.0)

    w1 = Tensor(shape=(3, 2), device=dev, requires_grad=True, stores_grad=True)
    w1.gaussian(0.0, 0.1)
    b1 = Tensor(shape=(2,), device=dev, requires_grad=True, stores_grad=True)
    b1.set_value(0.0)

    sgd = opt.SGD(0.1)
    # training process
    for i in range(100):
        x = autograd.matmul(inputs, w0)
        x = autograd.add_bias(x, b0)
        x = autograd.relu(x)
        x = autograd.matmul(x, w1)
        x = autograd.add_bias(x, b1)
        loss = autograd.softmax_cross_entropy(x, target)
        for p, gp in autograd.backward(loss):
            sgd.update(p, gp)

        print("training loss = ", tensor.to_numpy(loss)[0])
    sonnx.export([inputs], [x], file_path="mlp.onnx")
Example #17
0
    def forward(x, t):
        y = conv1(x)
        y = autograd.tanh(y)
        y1 = conv21(y)
        y2 = conv22(y)
        y = autograd.cat((y1, y2), 1)
        y = autograd.sigmoid(y)
        y = bn(y)
        y = autograd.relu(y)
        y = autograd.mul(y, y)
        y = pooling1(y)
        y = autograd.sigmoid(y)

        y = pooling2(y)

        print(tensor.to_numpy(y).shape)
        y = autograd.flatten(y)
        y = linear(y)
        print(tensor.to_numpy(y).shape)
        loss = autograd.softmax_cross_entropy(y, t)
        return loss, y
Example #18
0
    def run(model, modeldic, layer,inputs):
        '''
            input: input for singa model
            load other nodes of onnx
            '''
        supportLayer = ['Linear','Conv','MaxPool','AveragePool','BatchNormalization']
        #supportLayer = ['Conv', 'MaxPool', 'AveragePool', 'BatchNormalization']
        oper=modeldic

        for counter,i in enumerate(model.graph.input):
            oper[i.name] = inputs[counter]
        for i in model.graph.node:
            if (i.op_type == 'Relu'):
                oper[str(i.output[0])] = autograd.relu(oper[str(i.input[0])])
            elif (i.op_type == 'Softmax'):
                oper[str(i.output[0])] = autograd.softmax(oper[str(i.input[0])])
            elif (i.op_type == 'Add'):
                oper[str(i.output[0])] = autograd.add(oper[str(i.input[0])], oper[str(i.input[1])])
            elif (i.op_type == 'MatMul'):
                oper[str(i.output[0])] = autograd.matmul(oper[str(i.input[0])], oper[str(i.input[1])])
            elif (i.op_type == 'Flatten'):
                oper[str(i.output[0])] = autograd.flatten(oper[str(i.input[0])])
            elif(i.op_type == 'Concat'):
                oper[str(i.output[0])] = autograd.cat((oper[str(i.input[0])], oper[str(i.input[1])]),int(i.attribute[0].i))
            elif(i.op_type == 'Tanh'):
                oper[str(i.output[0])] = autograd.tanh(oper[str(i.input[0])])
            elif (i.op_type == 'Sigmoid'):
                oper[str(i.output[0])] = autograd.sigmoid(oper[str(i.input[0])])
            elif (i.op_type == 'Mul'):
                oper[str(i.output[0])] = autograd.mul(oper[str(i.input[0])],oper[str(i.input[1])])
            elif (i.op_type in supportLayer):
                oper[str(i.output[0])] = layer[str(i.output[0])](oper[str(i.input[0])])
        out =[]
        for counter,i in enumerate(model.graph.output):
            out.append(modeldic[i.name])
        return out
Example #19
0
    print("train_label_shape:", label.shape)

    inputs = Tensor(data=data)
    target = Tensor(data=label)

    w0 = Tensor(shape=(2, 3), requires_grad=True, stores_grad=True)
    w0.gaussian(0.0, 0.1)
    b0 = Tensor(shape=(1, 3), requires_grad=True, stores_grad=True)
    b0.set_value(0.0)

    w1 = Tensor(shape=(3, 2), requires_grad=True, stores_grad=True)
    w1.gaussian(0.0, 0.1)
    b1 = Tensor(shape=(1, 2), requires_grad=True, stores_grad=True)
    b1.set_value(0.0)

    sgd = optimizer.SGD(0.05)
    # training process
    for i in range(1001):
        x = autograd.matmul(inputs, w0)
        x = autograd.add_bias(x, b0)
        x = autograd.relu(x)
        x = autograd.matmul(x, w1)
        x = autograd.add_bias(x, b1)
        x = autograd.softmax(x)
        loss = autograd.cross_entropy(x, target)
        for p, gp in autograd.backward(loss):
            sgd.apply(0, gp, p, "")

        if i % 100 == 0:
            print("training loss = ", tensor.to_numpy(loss)[0])
 def logits(self, features):
     x = autograd.relu(features)
     x = self.globalpooling(x)
     x = autograd.flatten(x)
     x = self.fc(x)
     return x
Example #21
0
    print('train_label_shape:', label.shape)

    inputs = Tensor(data=data)
    target = Tensor(data=label)

    w0 = Tensor(shape=(2, 3), requires_grad=True, stores_grad=True)
    w0.gaussian(0.0, 0.1)
    b0 = Tensor(shape=(1, 3), requires_grad=True, stores_grad=True)
    b0.set_value(0.0)

    w1 = Tensor(shape=(3, 2), requires_grad=True, stores_grad=True)
    w1.gaussian(0.0, 0.1)
    b1 = Tensor(shape=(1, 2), requires_grad=True, stores_grad=True)
    b1.set_value(0.0)

    sgd = optimizer.SGD(0.05)
    # training process
    for i in range(1001):
        x = autograd.matmul(inputs, w0)
        x = autograd.add_bias(x, b0)
        x = autograd.relu(x)
        x = autograd.matmul(x, w1)
        x = autograd.add_bias(x, b1)
        x = autograd.softmax(x)
        loss = autograd.cross_entropy(x, target)
        for p, gp in autograd.backward(loss):
            sgd.apply(0, gp, p, '')

        if (i % 100 == 0):
            print('training loss = ', tensor.to_numpy(loss)[0])
Example #22
0
 def logits(self, features):
     x = autograd.relu(features)
     x = self.globalpooling(x)
     x = autograd.flatten(x)
     x = self.fc(x)
     return x