class ResNet(nn.Module): def __init__(self, block, layers, input_channels, num_classes=10, zero_init_residual=False, groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None): super(ResNet, self).__init__() if norm_layer is None: norm_layer = nn.BatchNorm2d self._norm_layer = norm_layer self.inplanes = 64 self.dilation = 1 self.q_logvar_init = 0.05 self.p_logvar_init = math.log(0.05) if replace_stride_with_dilation is None: # each element in the tuple indicates if we should replace # the 2x2 stride with a dilated convolution instead replace_stride_with_dilation = [False, False, False] if len(replace_stride_with_dilation) != 3: raise ValueError("replace_stride_with_dilation should be None " "or a 3-element tuple, got {}".format( replace_stride_with_dilation)) self.groups = groups self.base_width = width_per_group self.conv1 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, input_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = norm_layer(self.inplanes) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]) self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]) self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]) self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) self.drop = nn.Dropout(p=.5) self.classifier = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init, 512 * block.expansion, num_classes, flow=False) print(block.expansion) layers2 = [ self.conv1, self.bn1, self.relu, self.maxpool, self.layer1, self.layer2, self.layer3, self.layer4, self.avgpool ] self.layers2 = nn.ModuleList(layers2) for m in self.modules(): if isinstance(m, BBBConv2d): m.reset_parameters() elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0) # Zero-initialize the last BN in each residual branch, # so that the residual branch starts with zeros, and each residual block behaves like an identity. # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 if zero_init_residual: for m in self.modules(): if isinstance(m, Bottleneck): nn.init.constant_(m.bn3.weight, 0) elif isinstance(m, BasicBlock): nn.init.constant_(m.bn2.weight, 0) def _make_layer(self, block, planes, blocks, stride=1, dilate=False): norm_layer = self._norm_layer downsample = None previous_dilation = self.dilation if dilate: self.dilation *= stride stride = 1 if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( conv1x1(self.inplanes, planes * block.expansion, stride), norm_layer(planes * block.expansion), ) layers = [] layers.append( block(self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer)) self.inplanes = planes * block.expansion for _ in range(1, blocks): layers.append( block(self.inplanes, planes, groups=self.groups, base_width=self.base_width, dilation=self.dilation, norm_layer=norm_layer)) return nn.Sequential(*layers) def probforward(self, x, dropout=False): loss = 0 i = 0 out, kl = self.conv1.probforward(x) out = self.relu(self.bn1(out)) loss += kl out, kl = self.pf(out, self.layer1) loss += kl out, kl = self.pf(out, self.layer2) loss += kl out, kl = self.pf(out, self.layer3) loss += kl out, kl = self.pf(out, self.layer4) loss += kl out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) if (dropout): x = self.drop(x) x, _kl = self.classifier.probforward(out) kl += loss logits = x return logits, kl def pf(self, x, layer): kl = 0 for l in layer: #print(l) if hasattr(l, 'probforward') and callable(l.probforward): x, _kl, = l.probforward(x) kl += _kl else: print(l) x = l.forward(x) return x, kl
class BBBAlexNet(nn.Module): '''The architecture of AlexNet with Bayesian Layers''' def __init__(self, outputs, inputs): super(BBBAlexNet, self).__init__() flow = False self.q_logvar_init = 0.05 self.p_logvar_init = math.log(0.05) self.classifier = BBBLinearFactorial(self.q_logvar_init, self.p_logvar_init, 1 * 1 * 128, outputs, flow=flow) self.conv1 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, inputs, 64, kernel_size=11, stride=4, padding=5, flow=flow) self.soft1 = nn.Softplus() self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv2 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 64, 192, kernel_size=5, padding=2, flow=flow) self.soft2 = nn.Softplus() self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2) self.conv3 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 192, 384, kernel_size=3, padding=1, flow=flow) self.soft3 = nn.Softplus() self.conv4 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 384, 256, kernel_size=3, padding=1, flow=flow) self.soft4 = nn.Softplus() self.conv5 = BBBConv2d(self.q_logvar_init, self.p_logvar_init, 256, 128, kernel_size=3, padding=1, flow=flow) self.soft5 = nn.Softplus() self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2) # self.flatten = FlattenLayer(1 * 1 * 128) # self.fc1 = BBBLinearFactorial(q_logvar_init, N, p_logvar_init, 1* 1 * 128, outputs) layers = [ self.conv1, self.soft1, self.pool1, self.conv2, self.soft2, self.pool2, self.conv3, self.soft3, self.conv4, self.soft4, self.conv5, self.soft5, self.pool3 ] self.layers = nn.ModuleList(layers) def probforward(self, x): kl = 0 for layer in self.layers: if hasattr(layer, 'probforward') and callable(layer.probforward): x, _kl, = layer.probforward(x) else: x = layer.forward(x) x = x.view(x.size(0), -1) x, _kl = self.classifier.probforward(x) kl += _kl logits = x return logits, kl