def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = autograd.BatchNorm2d(planes) self.conv2 = conv3x3(planes, planes) self.bn2 = autograd.BatchNorm2d(planes) self.downsample = downsample self.stride = stride
def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = autograd.Conv2d( inplanes, planes, kernel_size=1, bias=False) self.bn1 = autograd.BatchNorm2d(planes) self.conv2 = autograd.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = autograd.BatchNorm2d(planes) self.conv3 = autograd.Conv2d( planes, planes * self.expansion, kernel_size=1, bias=False) self.bn3 = autograd.BatchNorm2d(planes * self.expansion) self.downsample = downsample self.stride = stride
def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: conv = autograd.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False) bn = autograd.BatchNorm2d(planes * block.expansion) def downsample(x): return bn(conv(x)) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) def forward(x): for layer in layers: x = layer(x) return x return forward
def combine_node(model,modeldic): ''' # for combine operators to layers ''' for idx, i in enumerate(model.graph.node): if (i.op_type == 'MatMul'): addlist = Backend.find_add(model,i.output[0]) if (len(addlist) == 0): continue if (len(addlist) > 1): continue addidx = addlist[0] if (i.name == "not_requires_grad" and model.graph.node[addidx].name == "not_requires_grad"): continue model.graph.node[idx].output[0] = model.graph.node[addidx].output[0] model.graph.node[idx].input.append(model.graph.node[addidx].input[1]) model.graph.node[idx].op_type = 'Linear' model.graph.node[addidx].op_type = 'removed' layer = {} for i in model.graph.node: if (i.op_type == 'Linear'): shape = Backend.find_shape(model,i.input[1]) layer[str(i.output[0])] = autograd.Linear(shape[0], shape[1]) layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])])) layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])])) for i in model.graph.node: if (i.op_type == 'Conv'): shape = Backend.find_shape(model,i.input[1]) layer[str(i.output[0])] = autograd.Conv2d(shape[1], shape[0], shape[2], padding=int(i.attribute[0].ints[0])) layer[str(i.output[0])].set_params(W=tensor.to_numpy(modeldic[str(i.input[1])].clone())) layer[str(i.output[0])].set_params(b=tensor.to_numpy(modeldic[str(i.input[2])].clone())) for i in model.graph.node: if (i.op_type == 'MaxPool'): k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0])) layer[str(i.output[0])] = autograd.MaxPool2d(k, int(i.attribute[2].ints[0]), padding=int(i.attribute[1].ints[0])) for i in model.graph.node: if (i.op_type == 'AveragePool'): k = (int(i.attribute[0].ints[0]), int(i.attribute[0].ints[0])) layer[str(i.output[0])] = autograd.AvgPool2d(k, int(i.attribute[2].ints[0]), padding=int(i.attribute[1].ints[0])) for i in model.graph.node: if (i.op_type == 'BatchNormalization'): shape = Backend.find_shape(model,i.input[1]) layer[str(i.output[0])] = autograd.BatchNorm2d(shape[0]) layer[str(i.output[0])].set_params(scale=tensor.to_numpy(modeldic[str(i.input[1])].clone())) layer[str(i.output[0])].set_params(bias=tensor.to_numpy(modeldic[str(i.input[2])].clone())) return model,modeldic,layer
def __init__(self, block, layers, num_classes=1000): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = autograd.Conv2d( 3, 64, kernel_size=7, stride=2, padding=3, bias=False ) self.bn1 = autograd.BatchNorm2d(64) self.maxpool = autograd.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = autograd.AvgPool2d(7, stride=1) self.fc = autograd.Linear(512 * block.expansion, num_classes)
def test_batchnorm2d_gpu(self): batchnorm_0 = autograd.BatchNorm2d(3) gpu_input_tensor = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) gpu_input_tensor.gaussian(0.0, 1.0) dy = gpu_input_tensor.clone().data y = batchnorm_0(gpu_input_tensor) dx, ds, db = y.creator.backward(dy) self.check_shape(y.shape, (2, 3, 3, 3)) self.check_shape(dx.shape(), (2, 3, 3, 3)) self.check_shape(ds.shape(), (3, )) self.check_shape(db.shape(), (3, ))
def test_batch_norm(self): x = tensor.Tensor(shape=(2, 3, 3, 3), device=gpu_dev) x.gaussian(0.0, 1.0) y = autograd.BatchNorm2d(3)(x) # frontend model = sonnx.to_onnx([x], [y]) # print('The model is:\n{}'.format(model)) # backend sg_ir = sonnx.prepare(model, device=gpu_dev) y_t = sg_ir.run([x]) np.testing.assert_array_almost_equal(tensor.to_numpy(y), tensor.to_numpy(y_t[0]), decimal=5)
def __init__(self, in_filters, out_filters, reps, strides=1, padding=0, start_with_relu=True, grow_first=True): super(Block, self).__init__() if out_filters != in_filters or strides != 1: self.skip = autograd.Conv2d(in_filters, out_filters, 1, stride=strides, padding=padding, bias=False) self.skipbn = autograd.BatchNorm2d(out_filters) else: self.skip = None self.layers = [] filters = in_filters if grow_first: self.layers.append(autograd.ReLU()) self.layers.append( autograd.SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False)) self.layers.append(autograd.BatchNorm2d(out_filters)) filters = out_filters for i in range(reps - 1): self.layers.append(autograd.ReLU()) self.layers.append( autograd.SeparableConv2d(filters, filters, 3, stride=1, padding=1, bias=False)) self.layers.append(autograd.BatchNorm2d(filters)) if not grow_first: self.layers.append(autograd.ReLU()) self.layers.append( autograd.SeparableConv2d(in_filters, out_filters, 3, stride=1, padding=1, bias=False)) self.layers.append(autograd.BatchNorm2d(out_filters)) if not start_with_relu: self.layers = self.layers[1:] else: self.layers[0] = autograd.ReLU() if strides != 1: self.layers.append(autograd.MaxPool2d(3, strides, padding + 1))
def __init__(self, num_classes=1000): """ Constructor Args: num_classes: number of classes """ super(Xception, self).__init__() self.num_classes = num_classes self.conv1 = autograd.Conv2d(3, 32, 3, 2, 0, bias=False) self.bn1 = autograd.BatchNorm2d(32) self.conv2 = autograd.Conv2d(32, 64, 3, 1, 1, bias=False) self.bn2 = autograd.BatchNorm2d(64) # do relu here self.block1 = Block(64, 128, 2, 2, padding=0, start_with_relu=False, grow_first=True) self.block2 = Block(128, 256, 2, 2, padding=0, start_with_relu=True, grow_first=True) self.block3 = Block(256, 728, 2, 2, padding=0, start_with_relu=True, grow_first=True) self.block4 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block5 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block6 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block7 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block8 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block9 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block10 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block11 = Block(728, 728, 3, 1, start_with_relu=True, grow_first=True) self.block12 = Block(728, 1024, 2, 2, start_with_relu=True, grow_first=False) self.conv3 = autograd.SeparableConv2d(1024, 1536, 3, 1, 1) self.bn3 = autograd.BatchNorm2d(1536) # do relu here self.conv4 = autograd.SeparableConv2d(1536, 2048, 3, 1, 1) self.bn4 = autograd.BatchNorm2d(2048) self.globalpooling = autograd.MaxPool2d(10, 1) self.fc = autograd.Linear(2048, num_classes)
x_test = preprocess(test[0]) y_test = to_categorical(test[1], num_classes) print('the shape of training data is', x_train.shape) print('the shape of training label is', y_train.shape) print('the shape of testing data is', x_test.shape) print('the shape of testing label is', y_test.shape) # operations initialization conv1 = autograd.Conv2d(1, 1, 3, padding=1) conv21 = autograd.Conv2d(1, 5, 3, padding=1) conv22 = autograd.Conv2d(1, 5, 3, padding=1) pooling1 = autograd.MaxPool2d(3, 1, padding=1) pooling2 = autograd.AvgPool2d(28, 1, padding=0) linear = autograd.Linear(10, 10) bn = autograd.BatchNorm2d(10) def forward(x, t): y = conv1(x) y = autograd.tanh(y) y1 = conv21(y) y2 = conv22(y) y = autograd.cat((y1, y2), 1) y = autograd.sigmoid(y) y = bn(y) y = autograd.relu(y) y = autograd.mul(y, y) y = pooling1(y) y = autograd.sigmoid(y) y = pooling2(y)
sgd = opt.SGD(lr=0.01) x_train = preprocess(train[0]) y_train = to_categorical(train[1], num_classes) x_test = preprocess(test[0]) y_test = to_categorical(test[1], num_classes) print("the shape of training data is", x_train.shape) print("the shape of training label is", y_train.shape) print("the shape of testing data is", x_test.shape) print("the shape of testing label is", y_test.shape) # operations initialization conv1 = autograd.Conv2d(1, 32, 3, padding=1, bias=False) bn1 = autograd.BatchNorm2d(32) conv21 = autograd.Conv2d(32, 16, 3, padding=1) conv22 = autograd.Conv2d(32, 16, 3, padding=1) bn2 = autograd.BatchNorm2d(32) linear = autograd.Linear(32 * 28 * 28, 10) pooling1 = autograd.MaxPool2d(3, 1, padding=1) pooling2 = autograd.AvgPool2d(3, 1, padding=1) def forward(x, t): y = conv1(x) y = autograd.relu(y) y = bn1(y) y = pooling1(y) y1 = conv21(y) y2 = conv22(y) y = autograd.cat((y1, y2), 1)
x_test = preprocess(test[0]) y_test = to_categorical(test[1], num_classes) print('the shape of training data is', x_train.shape) print('the shape of training label is', y_train.shape) print('the shape of testing data is', x_test.shape) print('the shape of testing label is', y_test.shape) # operations initialization conv1 = autograd.Conv2d(1, 32, 3, padding=1) conv21 = autograd.Conv2d(32, 16, 3, padding=1) conv22 = autograd.Conv2d(32, 16, 3, padding=1) linear = autograd.Linear(32 * 28 * 28, 10) pooling1 = autograd.MaxPool2d(3, 1, padding=1) pooling2 = autograd.AvgPool2d(3, 1, padding=1) bn = autograd.BatchNorm2d(32) def forward(x, t): y = conv1(x) y = autograd.tanh(y) y1 = conv21(y) y2 = conv22(y) y = autograd.cat((y1, y2), 1) y = autograd.sigmoid(y) y = bn(y) y = autograd.relu(y) y = autograd.mul(y,y)