def DoublingConv(in_channels, hidden_channels, out_channels): module = nn.Sequential( nn.ConvTranspose2d(in_channels, hidden_channels, kernel_size=3, padding=1, stride=2), nn.Batchnorm2d(), nn.ReLU(), nn.ConvTranspose2d(hidden_channels, out_channels, kernel_size=3, padding=1, stride=1), nn.Batchnorm2d(), nn.ReLU())
def __init__(self, inplanes, planes, stride=1): super(BasicBlock, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size = 3, stride = stride, padding = 1, bias = False) self.conv2 = nn.Conv2d(planes, planes, kernel_size = 3, stride = stride, padding = 1, bias = False) self.BN1 = nn.Batchnorm2d(planes) self.BN2 = nn.Batchnorm2d(planes) self.shortcut = nn.Sequential() if stride != 1 or inplanes != self.expansion*planes: self.shortcut = nn.Sequential( nn.Conv2d(inplanes, self.expansion*planes, kernel_size=1, stride=stride, bias = False), nn.Batchnorm2d(self.expansion*planes) )
def HalvingConv(in_channels, hidden_channels, out_channels): module = nn.Sequential( nn.Conv2d(in_channels, hidden_channels, kernel_size=3, padding=1, stride=1), nn.Batchnorm2d(), nn.ReLU(), nn.Conv2d(hidden_channels, out_channels, kernel_size=3, padding=1, stride=2), nn.Batchnorm2d(), nn.ReLU()) return module
def __init__(self): self.shared_convnet = nn.Sequential(nn.Conv2d(), nn.Batchnorm2d(), nn.Relu(), nn.MaxPool2d(), nn.Conv2d(), nn.Batchnorm2d(), nn.Relu(), nn.MaxPool2d(), nn.Conv2d(), nn.Batchnorm2d(), nn.Relu(), nn.MaxPool2d()) self.current_conv = nn.Sequential(nn.Conv2d(), nn.Batchnorm2d(), nn.Relu(), nn.MaxPool2d()) self.current_deconv = nn.Sequential(ConvTranspose2d())
def __init__(self, basic_block, num_blocks, num_classes = 100): super(ResNet, self).__init__() channels = [32,64,128,256] self.conv1 = nn.Conv2d(3, 32, kernel_size = 3, stride = 1, padding = 1, bias = False) self.BN1 = nn.Batchnorm2d(32) self.drop = nn.Dropout2d(0.25) self.basic1 = self._add_layer(basic_block, channels[0], num_blocks[0], stride=1) self.basic2 = self._add_layer(basic_block, channels[1], num_blocks[1], stride=2) self.basic3 = self._add_layer(basic_block, channels[2], num_blocks[1], stride=2) self.basic4 = self._add_layer(basic_block, channels[3], num_blocks[0], stride=2) self.fc = nn.Linear(256*2*2*block.expansion, num_classes)
def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(inplanes) self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) self.bn2 = nn.Batchnorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn3 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride