def __init__(self, inplanes, planes, stride=1, downsample=None, lv_init=-5, var_p=-1): super(Bottleneck, self).__init__() self.bn1 = nn.BatchNorm2d(inplanes) self.conv1 = vi.BayesConv2d(inplanes, planes, kernel_size=1, bias=False, lv_init=lv_init, var_p=var_p) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = vi.BayesConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, lv_init=lv_init, var_p=var_p) self.bn3 = nn.BatchNorm2d(planes) self.conv3 = vi.BayesConv2d(planes, planes * 4, kernel_size=1, bias=False, lv_init=lv_init, var_p=var_p) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride
def __init__(self, in_planes, planes, dropout_rate, stride=1, lv_init=-5, var_p=-1): super(WideBasic, self).__init__() self.bn1 = nn.BatchNorm2d(in_planes) self.conv1 = vi.BayesConv2d(in_planes, planes, kernel_size=3, padding=1, bias=True, lv_init=lv_init, var_p=var_p) self.dropout = nn.Dropout(p=dropout_rate) self.bn2 = nn.BatchNorm2d(planes) self.conv2 = vi.BayesConv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=True, lv_init=lv_init, var_p=var_p) self.shortcut = nn.Sequential() if stride != 1 or in_planes != planes: self.shortcut = nn.Sequential( vi.BayesConv2d(in_planes, planes, kernel_size=1, stride=stride, bias=True, lv_init=lv_init, var_p=var_p), )
def conv1x1(in_planes, out_planes, stride=1): """1x1 convolution""" return vi.BayesConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def _make_layer(self, block, planes, blocks, stride=1, lv_init=-5, var_p=-1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( vi.BayesConv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False, lv_init=lv_init, var_p=var_p), ) layers = list() layers.append( block(self.inplanes, planes, stride, downsample, lv_init=lv_init, var_p=var_p)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append( block(self.inplanes, planes, lv_init=lv_init, var_p=var_p)) return nn.Sequential(*layers)
def conv3x3(in_planes, out_planes, stride=1, lv_init=-6, var_p=-1): return vi.BayesConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=True, lv_init=lv_init, var_p=var_p)
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1): """3x3 convolution with padding""" return vi.BayesConv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=dilation, groups=groups, bias=False, dilation=dilation)
def __init__(self, num_classes=10, depth=110, lv_init=-5, var_p=-1): super(PreResNet, self).__init__() if depth >= 44: assert (depth - 2) % 9 == 0, 'depth should be 9n+2' n = (depth - 2) // 9 block = Bottleneck else: assert (depth - 2) % 6 == 0, 'depth should be 6n+2' n = (depth - 2) // 6 block = BasicBlock self.var_p = var_p self.inplanes = 16 self.conv1 = vi.BayesConv2d(3, 16, kernel_size=3, padding=1, bias=False, var_p=var_p) self.layer1 = self._make_layer(block, 16, n, lv_init=lv_init, var_p=var_p) self.layer2 = self._make_layer(block, 32, n, stride=2, lv_init=lv_init, var_p=var_p) self.layer3 = self._make_layer(block, 64, n, stride=2, lv_init=lv_init, var_p=var_p) self.bn = nn.BatchNorm2d(64 * block.expansion) self.relu = nn.ReLU(inplace=True) self.avgpool = nn.AvgPool2d(8) self.fc = vi.BayesLinear(64 * block.expansion, num_classes, var_p=var_p) for m in self.modules(): if isinstance(m, vi.BayesConv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_()
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None, lv_init=-1, var_p=-1): super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = vi.BayesConv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False, lv_init=lv_init, var_p=var_p) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0], lv_init=lv_init, var_p=var_p) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0], lv_init=lv_init, var_p=var_p) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1], lv_init=lv_init, var_p=var_p) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2], lv_init=lv_init, var_p=var_p) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.fc = vi.BayesLinear(512 * block.expansion, num_classes, lv_init=lv_init, var_p=var_p) for m in self.modules(): if isinstance(m, vi.BayesConv2d): nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0)
def make_layers(cfg, batch_norm=False, lv_init=-5, var_p=-1): layers = list() in_channels = 3 #print('make_layers', lv_init) for v in cfg: if v == 'M': layers += [nn.MaxPool2d(kernel_size=2, stride=2)] else: conv2d = vi.BayesConv2d(in_channels, v, kernel_size=3, padding=1, lv_init=lv_init, var_p=var_p) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] else: layers += [conv2d, nn.ReLU(inplace=True)] in_channels = v return nn.Sequential(*layers)
def conv1x1(in_planes, out_planes, stride=1, lv_init=-1, var_p=-1): """1x1 convolution""" return vi.BayesConv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False, lv_init=lv_init, var_p=var_p)