class FaceModel(nn.Module): def __init__(self, num_classes, pretrained=False, **kwargs): super(FaceModel, self).__init__() self.model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs) if pretrained: parameters = model_zoo.load_url(model_urls['resnet18']) self.model.load_state_dict(parameters) self.model.avgpool = None self.model.fc1 = nn.Linear(512 * 3 * 4, 512) self.model.fc2 = nn.Linear(512, 512) self.model.classifier = nn.Linear(512, num_classes) self.register_buffer('centers', torch.zeros(num_classes, 512)) self.num_classes = num_classes def forward(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) x = x.view(x.size(0), -1) x = self.model.fc1(x) #feature for center loss x = self.model.fc2(x) self.features = x x = self.model.classifier(x) return F.log_softmax(x)
class MyNet(nn.Module): def __init__(self, pretrained=False, **kwargs): super(MyNet, self).__init__() netname = args.arch.split('_')[0] if netname == 'resnet152': self.model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs) else: self.model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs) if pretrained: parameters = model_zoo.load_url(model_urls[netname]) self.model.load_state_dict(parameters) self.model.avgpool = nn.AvgPool2d(8) self.model.fc = nn.Linear(1024, 1) self.model.sig = nn.Sigmoid() def forward(self, x): x = self.model.conv1(x) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) # x = self.model.layer4(x) x = self.model.avgpool(x) x = x.view(x.size(0), -1) x = self.model.fc(x) return self.model.sig(x)
class OriNet(nn.Module): def __init__(self,bits_len,pretrained=False,**kwargs): super(OriNet,self).__init__() self.bits_len=bits_len self.resnetmodel=ResNet(BasicBlock,[2,2,2,2],**kwargs) if pretrained: parameters=model_zoo.load_url(model_urls['resnet18']) self.model.load_state_dict(parameters) self.resnetmodel.avgpool=None self.resnetmodel.fc1=nn.Linear(512*3*4,512) self.resnetmodel.fc2=nn.Linear(512,512) self.resnetmodel.classifier=nn.Linear(512,bits_len) self.register_buffer('centers',torch.zeros(bits_len,512)) def forward(self,input1): input1=self.resnetmodel.conv1(input1) input1=self.resnetmodel.bn1(input1) input1=self.resnetmodel.relu(input1) input1=self.resnetmodel.maxpool(input1) input1=self.resnetmodel.layer1(input1) input1=self.resnetmodel.layer2(input1) input1=self.resnetmodel.layer3(input1) input1=self.resnetmodel.layer4(input1) input1=input1.view(input1.size(),-1) input1=self.resnetmodel.fc1(input1) #feature for center loss input1=self.resnetmodel.fc2(input1) self.features=input1 input1=self.resnetmodel.classifier(input1) output1=F.log_softmax(input1) return output1
def __init__(self, in_channels=1, n_classes=2, stride=1, inplanes=64, pre_ssl=True, **kwargs): super().__init__() store_attr('in_channels, n_classes, inplanes, pre_ssl') #encoder if pre_ssl: m = torch.hub.load( 'facebookresearch/semi-supervised-ImageNet1K-models', 'resnext50_32x4d_ssl') else: m = ResNet(Bottleneck, [3, 4, 6, 3], groups=32, width_per_group=4) m.conv1.padding = (0, 0) if in_channels < 3: #print('Cutting input layer weights to', in_channels, 'channel(s).') with torch.no_grad(): m.conv1.weight = nn.Parameter(m.conv1.weight[:, :in_channels, ...]) elif in_channels > 3: m.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, bias=False) #self.bn1 = m.bn1 if in_channels==3 else nn.BatchNorm2d(self.inplanes) self.enc0 = nn.Sequential(m.conv1, m.bn1, nn.ReLU(inplace=True)) self.enc1 = nn.Sequential( nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1), m.layer1) #256 self.enc2 = m.layer2 #512 self.enc3 = m.layer3 #1024 self.enc4 = m.layer4 #2048 #aspp with customized dilatations self.aspp = ASPP( 2048, 256, out_c=512, dilations=[stride * 1, stride * 2, stride * 3, stride * 4]) self.drop_aspp = nn.Dropout2d(0.5) #decoder self.dec4 = UnetBlock(512, 1024, 256, padding=0) self.dec3 = UnetBlock(256, 512, 128, padding=0) self.dec2 = UnetBlock(128, 256, 64, padding=0) self.dec1 = UnetBlock(64, 64, 32, padding=0) self.fpn = FPN([512, 256, 128, 64], [16] * 4) self.drop = nn.Dropout2d(0.1) self.final_conv = ConvLayer(32 + 16 * 4, n_classes, ks=1, norm_type=None, act_cls=None)
class ResNeXt50(BaseNetwork): def __init__(self, opt): super(ResNeXt50, self).__init__() self.model = ResNet(Bottleneck, [3, 4, 6, 3], groups=32, width_per_group=4) self.opt = opt # self.reduced_id_dim = opt.reduced_id_dim self.conv1x1 = nn.Conv2d(512 * Bottleneck.expansion, 512, kernel_size=1, padding=0) self.fc = nn.Linear(512 * Bottleneck.expansion, opt.num_classes) # self.fc_pre = nn.Sequential(nn.Linear(512 * Bottleneck.expansion, self.reduced_id_dim), nn.ReLU()) def load_pretrain(self): check_point = torch.load(model_urls['resnext50_32x4d']) util.copy_state_dict(check_point, self.model) def forward_feature(self, input): x = self.model.conv1(input) x = self.model.bn1(x) x = self.model.relu(x) x = self.model.maxpool(x) x = self.model.layer1(x) x = self.model.layer2(x) x = self.model.layer3(x) x = self.model.layer4(x) net = self.model.avgpool(x) net = torch.flatten(net, 1) x = self.conv1x1(x) # x = self.fc_pre(x) return net, x def forward(self, input): input_batch = input.view(-1, self.opt.output_nc, self.opt.crop_size, self.opt.crop_size) net, x = self.forward_feature(input_batch) net = net.view(-1, self.opt.num_inputs, 512 * Bottleneck.expansion) x = F.adaptive_avg_pool2d(x, (7, 7)) x = x.view(-1, self.opt.num_inputs, 512, 7, 7) net = torch.mean(net, 1) x = torch.mean(x, 1) cls_scores = self.fc(net) return [net, x], cls_scores
def get_efficientnet_encoder(in_channels, out_channels=1024, layers=None, pretrained=None, norm_layer=nn.Identity): # TODO if layers is None: layers = [3, 4, 6, 3] encoder = ResNet(Bottleneck, layers, norm_layer=norm_layer) if pretrained: if pretrained not in model_urls: raise RuntimeError('No pretrained weights for this model') state_dict = load_state_dict_from_url(model_urls[pretrained]) encoder.load_state_dict(state_dict, strict=False) # replace first conv for different number of input channels if in_channels != 3: encoder.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3, bias=False) nn.init.kaiming_normal_(encoder.conv1.weight, mode='fan_out', nonlinearity='relu') if out_channels == 1024: end_layer = -3 elif out_channels == 2048: end_layer = -2 else: raise RuntimeError('Invalid out_channels value') return StitchedModel((encoder, 0, end_layer), nn.AdaptiveAvgPool2d(1), nn.Flatten(1))
def custom_resnet(layers, pretrained=False, progress=True, arch='resnet', **kwargs): """ Builds custom ResNet backbone Arguments: layers (list): configuration of layer-blocks (Bottlenecks) pretrained (bool): If True, returns a model pre-trained on ImageNet dataset progress (bool): If True, shows progress bar while downloading model arch (str): give architecture name if pretrained=True to fetch model params """ model = ResNet(Bottleneck, layers, **kwargs) if pretrained: state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) model.load_state_dict(state_dict) model.conv1 = nn.Conv2d(200, 64, kernel_size=7, stride=2, padding=3, bias=False) # adjust for 200 layers return model
def resnet50(): resnet = ResNet(Bottleneck, [3, 4, 6, 3], num_classes=10) resnet.conv1 = nn.Conv2d( 3, 64, kernel_size=3, stride=1, padding=1, bias=False) return resnet
def resnet18(): resnet = ResNet(BasicBlock, [2, 2, 2, 2], num_classes=10) resnet.conv1 = nn.Conv2d( 3, 64, kernel_size=3, stride=1, padding=1, bias=False) return resnet