def __init__(self, block=creat_residual_block): super(ResNet18_M, self).__init__() self.block = block self.stage1 = nn.Sequential( conv_bn_relu(3, 32, stride=2, kszie=3, pad=3, has_bn=True, has_relu=True, bias=False), conv_bn_relu(32, 32, stride=1, kszie=3, pad=1, has_bn=True, has_relu=True, bias=False), conv_bn_relu(32, 32, stride=1, kszie=3, pad=1, has_bn=True, has_relu=True, bias=False), nn.MaxPool2d(3, 2, 1, ceil_mode=False)) self.stage2 = self.__make_stage(self.block, 32, 64, 2, 1) self.stage3 = self.__make_stage(self.block, 64, 128, 2, 2) self.stage4 = self.__make_stage(self.block, 128, 256, 2, 2) self.stage5 = self.__make_stage(self.block, 256, 256, 2, 2)
def __init__(self, inplanes, outplanes, stride, has_proj=False): super(creat_residual_block, self).__init__() self.has_proj = has_proj if self.has_proj: self.proj_conv = conv_bn_relu(inplanes, outplanes, stride=stride, kszie=1, pad=0, has_bn=True, has_relu=False, bias=False) self.conv1 = conv_bn_relu(inplanes, outplanes, stride=stride, kszie=3, pad=1, has_bn=True, has_relu=True, bias=False) self.conv2 = conv_bn_relu(outplanes, outplanes, stride=1, kszie=3, pad=1, has_bn=True, has_relu=False, bias=False) self.relu = nn.ReLU()
def __init__(self): super(TinyConv, self).__init__() self.conv1 = conv_bn_relu(3, 32, stride=2, kszie=3, pad=0) self.pool1 = nn.MaxPool2d(3, stride=2, padding=0, ceil_mode=True) self.conv2a = conv_bn_relu(32, 64, stride=1, kszie=1, pad=0) self.conv2b = conv_bn_relu(64, 64, stride=2, kszie=7, pad=0, groups=64) self.conv3a = conv_bn_relu(64, 64, stride=1, kszie=3, pad=0) self.conv3b = conv_bn_relu(64, 64, stride=1, kszie=1, pad=0, has_relu=False) # initialization for m in self.modules(): if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): import scipy.stats as stats stddev = m.stddev if hasattr(m, 'stddev') else 0.1 X = stats.truncnorm(-2, 2, scale=stddev) values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) values = values.view(m.weight.size()) with torch.no_grad(): m.weight.copy_(values) elif isinstance(m, nn.BatchNorm2d): nn.init.constant_(m.weight, 1) nn.init.constant_(m.bias, 0)
def __init__(self): super(AlexNet, self).__init__() self.conv1 = conv_bn_relu(3, 96, stride=2, kszie=11, pad=0) self.pool1 = nn.MaxPool2d(3, 2, 0, ceil_mode=True) self.conv2 = conv_bn_relu(96, 256, 1, 5, 0) self.pool2 = nn.MaxPool2d(3, 2, 0, ceil_mode=True) self.conv3 = conv_bn_relu(256, 384, 1, 3, 0) self.conv4 = conv_bn_relu(384, 384, 1, 3, 0) self.conv5 = conv_bn_relu(384, 256, 1, 3, 0, has_relu=False)
def _make_convs(self): head_width = self._hyper_params['head_width'] # feature adjustment self.r_z_k = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False) self.c_z_k = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False) self.r_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False) self.c_x = conv_bn_relu(head_width, head_width, 1, 3, 0, has_relu=False)