def __init__(self, num_classes=10): super(AlexNet, self).__init__() # update conv2d with custom kernel from pyinn self.conv1 = Conv2dDepthwise(channels=3, outputs=64, kernel_size=11, stride=4, padding=5) self.conv2 = Conv2dDepthwise(channels=64, outputs=192, kernel_size=5, padding=2) self.conv3 = Conv2dDepthwise(channels=192, outputs=384, kernel_size=3, padding=1) self.conv4 = Conv2dDepthwise(channels=384, outputs=256, kernel_size=3, padding=1) self.conv5 = Conv2dDepthwise(channels=256, outputs=256, kernel_size=3, padding=1) # pyinn has a negative concatenated relu --> need to find equivalent of regular relu self.relu = nn.ReLU(inplace=True) # pyinn doesn't have maxpool, need to see how to implement this self.maxpool2d = nn.MaxPool2d(kernel_size=2, stride=2) # pyinn doens't have a linear which is a matrix multiply, but can this be done as a conv2d self.classifier = nn.Linear(256, num_classes)
def run_check_sep_conv2d(): pytorch_sep_conv = nn.Conv2d(16, 16, kernel_size=3, padding=1, stride=1, groups=16, bias=False).cuda() input = Variable(torch.randn(1, 16, 32, 32)).cuda() pytorch_output = pytorch_sep_conv(input) inn_sep_conv = Conv2dDepthwise(channels=16, kernel_size=3, padding=1, bias=False).cuda() inn_sep_conv.weight.data = pytorch_sep_conv.weight.data inn_output = inn_sep_conv(input) print('pytorch__output----------------------------') print(pytorch_output[0, 0]) print(pytorch_sep_conv.weight.size()) #torch.Size([16, 1, 3, 3]) print('') print('inn_output----------------------------') print(inn_output[0, 0]) print(inn_sep_conv.weight.size())
def __init__(self, in_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False): super(BasicSepConv, self).__init__() self.out_channels = in_planes self.conv = Conv2dDepthwise(in_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, bias=bias) self.bn = nn.BatchNorm2d(in_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None self.relu = nn.ReLU(inplace=True) if relu else None
def __init__(self, in_channels, out_channels, kernel_size=3, padding=1, stride=1, is_bn=True): super(SeparableConvBn2d, self).__init__() #self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size=kernel_size, padding=padding, stride=stride, groups=in_channels, bias=False) #depth_wise #self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, stride=1, bias=False) #point_wise self.conv1 = Conv2dDepthwise(in_channels, kernel_size=kernel_size, padding=padding, stride=stride, bias=False) self.conv2 = nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, stride=1, bias=False) self.bn = nn.BatchNorm2d(out_channels, eps=BN_EPS)
def conv_dw(inp, oup, stride): return nn.Sequential( Conv2dDepthwise(inp, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(inp), nn.ReLU(inplace=True), nn.Conv2d(inp, oup, 1, 1, 0, bias=False), nn.BatchNorm2d(oup), nn.ReLU(inplace=True), )
def test_modules(self): module = Conv2dDepthwise(channels=8, kernel_size=3) x = Variable(torch.randn(1, 8, 5, 5)) y = module(x) y_cuda = module.cuda()(x.cuda()) self.assertLess((y - y_cuda.cpu()).data.abs().max(), 1e-6)