示例#1
0
    def __init__(self, load_weights=False):
        super(CSRNet, self).__init__()
        self.frontend_cfg = [
            64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512
        ]
        self.backend_cfg = [512, 512, 512, 256, 128, 64]
        self.frontend = make_layers(self.frontend_cfg, batch_norm=True)
        self.backend = make_layers(self.backend_cfg,
                                   in_channels=512,
                                   batch_norm=True,
                                   dilation=2)
        self.output_layer = nn.Conv2d(in_channels=64,
                                      out_channels=1,
                                      kernel_size=1)

        if not load_weights:
            mod = vgg16_bn(pretrained=True)
            self._initialize_weight()
            od = OrderedDict()

            for front_key in self.frontend.state_dict():
                vgg_key = 'features.' + front_key
                od[front_key] = mod.state_dict()[vgg_key]

            self.frontend.load_state_dict(od)
示例#2
0
    def __init__(self, num_classes, batch_norm=False):
        super(CervixClassificationModel, self).__init__()

        # vgg16 model features
        self.features = [
            vgg.make_layers(vgg.cfg['D'], batch_norm=True),
            vgg.make_layers(vgg.cfg['A'], batch_norm=True),
            vgg.make_layers(vgg.cfg['A'], batch_norm=True),
            vgg.make_layers(vgg.cfg['A'], batch_norm=True),
            vgg.make_layers(vgg.cfg['A'], batch_norm=True)
        ]
        self.f0 = self.features[0]
        self.f1 = self.features[1]
        self.f2 = self.features[2]
        self.f3 = self.features[3]
        self.f4 = self.features[4]
        #self.features = torch.nn.Sequential ( *features )
        self.classifier = torch.nn.Sequential(
            torch.nn.Linear(512 * 7 * 7 + 2048 * 4, 4096 * 2),
            #            torch.nn.BatchNorm1d(4096),
            torch.nn.ReLU(inplace=True),
            torch.nn.Dropout(p=0.5),
            torch.nn.Linear(4096 * 2, 4096),
            #            torch.nn.BatchNorm1d(4096),
            torch.nn.ReLU(inplace=True),
            torch.nn.Dropout(p=0.5),
            #            torch.nn.Linear(1024, 1024),
            #            torch.nn.ReLU(inplace=True),
            #            torch.nn.Dropout(p=0.15),
            torch.nn.Linear(4096, num_classes))
示例#3
0
def makeextractor(cfg,
                  batch_norm,
                  progress,
                  pretrained=True,
                  arch='vgg16_bn',
                  **kwargs):
    if pretrained:
        kwargs['init_weights'] = False
    layersetup = cfgs[cfg]
    cfg_presub = []

    checksum = 0
    while not checksum:
        if layersetup[0] == 'M':
            checksum = True
        cfg_presub.append(cfg.pop(0))
    cfg_postsub = cfg
    layerlist_presub = refvgg.make_layers(cfg_presub)
    layerlist_postsub = refvgg.make_layers(cfg_postsub)

    model = vggbackbone(
        refvgg.make_layers(layerlist_presub,
                           layerlist_postsub,
                           batch_norm=batch_norm), **kwargs)
    if pretrained:
        state_dict = hub.load_state_dict_from_url(model_urls[arch],
                                                  progress=progress)
        model.load_state_dict(state_dict, strict=False)
    return model
示例#4
0
def vgg_face(pretrained=False, **kwargs):
    if pretrained:
        kwargs['init_weights'] = False
    if torch.cuda.is_available():
        model = vgg.VGG(vgg.make_layers(vgg.cfg['D'], batch_norm=False),
                        num_classes=2622,
                        **kwargs)
    else:
        model = vgg.VGG(vgg.make_layers(vgg.cfgs['D'], batch_norm=False),
                        num_classes=2622,
                        **kwargs)
    if pretrained:
        model.load_state_dict(vgg_face_state_dict())
    return model
    def __init__(self):
        super(STL10_VGG, self).__init__()

        # Based on the imagenet normalization params.
        self.offset = 0.44900
        self.multiplier = 4.42477

        # VGG16.
        self.cfg = [
            64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M',
            512, 512, 512, 'M'
        ]
        self.model = VGG.VGG(VGG.make_layers(self.cfg, batch_norm=True),
                             num_classes=10)
        # Cifar 10 would have a different sized feature map.
        self.model.classifier = nn.Sequential(
            nn.Linear(512 * 3 * 3, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, 10),
        )
        self.model._initialize_weights()
示例#6
0
    def __init__(self, in_channels: int = 1, base_channel_size: int = 64, bilinear=True, depth=4,
                 second_encoder='vgg16'):
        super(UNetDualEncoder, self).__init__()
        self.name = 'UNet'
        self.n_channels = in_channels
        self.base_channel_size = base_channel_size
        self.bilinear = bilinear
        self.depth = depth
        self.second_encoder_name = second_encoder
        self.second_encoder = VGG(make_layers(cfgs['D'], False), init_weights=False)

        self.inc = DoubleConv(in_channels, base_channel_size)
        self.downs = nn.ModuleList()
        self.ups = nn.ModuleList()
        self.fuse = nn.Sequential(nn.Conv2d(512 + 1024, 1024, kernel_size=1), nn.ReLU(inplace=True),
                                  DoubleConv(1024, 1024))
        self.pad_to = PadToX(32)
        down_channel = base_channel_size
        factor = 2 if bilinear else 1
        # go down:
        # 64 -> 128 -> 256 -> 512 -> 1024
        for i in range(1, self.depth):
            self.downs.append(Down(down_channel, down_channel * 2))
            down_channel *= 2
        self.downs.append(Down(down_channel, down_channel * 2 // factor))
        for i in range(1, self.depth):
            self.ups.append(Up(down_channel * 2, down_channel // factor, bilinear))
            down_channel = down_channel // 2
        self.ups.append(Up(down_channel * 2, base_channel_size, bilinear))
示例#7
0
    def __init__(self, config, layer_num=16):
        super(VGG_FCN, self).__init__()
        self.config = config
        if layer_num == 16:
            vgg16_cfg = [
                64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512,
                'M', 512, 512, 512
            ]
            scale = 16
        else:
            vgg16_cfg = [
                64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512
            ]
            scale = 8
        vgg16_url = 'https://download.pytorch.org/models/vgg16-397923af.pth'
        self.feature = vgg.make_layers(vgg16_cfg, batch_norm=False)
        feature_dict = self.feature.state_dict()
        pretrained_dict = model_zoo.load_url(vgg16_url)
        pretrained_dict = {
            k[9:]: v
            for idx, (k, v) in enumerate(pretrained_dict.items())
            if idx < len(feature_dict.keys())
        }
        feature_dict.update_by_rule(pretrained_dict)

        self.feature.load_state_dict(feature_dict)
        self.deconv = nn.Sequential(
            nn.ConvTranspose2d(512,
                               self.config.num_keypoints,
                               kernel_size=2 * scale,
                               stride=scale,
                               padding=scale // 2))
示例#8
0
    def __init__(self, opt):
        spec = opt.cnn_model
        self.opt = opt
        flag = False
        super(VggNetModel, self).__init__(make_layers(model_configs[spec]))
        if vars(opt).get('cnn_start_from', None) is not None:
            flag = True # Reorder layers before loading
            #  self.load_state_dict(torch.load(osp.join(opt.start_from, 'model-cnn.pth')))
        else:
            opt.logger.debug('Setting VGG weigths from the models zoo')
            self.load_state_dict(model_zoo.load_url(model_urls[spec]))
        opt.logger.warn('Setting the fc feature as %s' % opt.cnn_fc_feat)
        if opt.cnn_fc_feat == 'fc7':
            self.keepdim_fc = 6
        elif opt.cnn_fc_feat == 'fc6':
            self.keepdim_fc = 3
        elif opt.cnn_fc_feat == 'fc8':
            self.keepdim_fc = 7

        self.keepdim_att = 30
        #  print 'PRE:', self._modules
        # Reassemble:
        self.features1 = nn.Sequential(*list(self.features._modules.values())[:self.keepdim_att])
        self.features2 = nn.Sequential(*list(self.features._modules.values())[self.keepdim_att:])
        self.fc = nn.Sequential(*list(self.classifier._modules.values())[:self.keepdim_fc])
        self.features = nn.Module()
        self.classifier = nn.Module()
        self.norm2 = L2Norm(n_channels=512, scale=True)
        if flag:
            self.load_state_dict(torch.load(opt.cnn_start_from))
        self.to_finetune = self._modules.values()
        self.keep_asis = []
示例#9
0
    def __init__(self):
        super().__init__(make_layers(cfg['D']))

        self.pool5 = self.features[30]
        self.conv_fc6 = self._conv_block(512, 1024)
        self.conv_fc7 = self._conv_block(1024, 1024, kernel=1)

        self.conv6_1 = self._conv_block(1024, 256, kernel=1)
        self.conv6_2 = self._conv_block(256, 512, stride=2)

        self.conv7_1 = self._conv_block(512, 128, kernel=1)
        self.conv7_2 = self._conv_block(128, 256, stride=2)

        self.norm3_3 = Scale(10)
        self.norm4_3 = Scale(8)
        self.norm5_3 = Scale(5)

        self.predict3_3_reg = nn.Conv2d(256, 4, kernel_size=3, padding=1)
        self.predict4_3_reg = nn.Conv2d(512, 4, kernel_size=3, padding=1)
        self.predict5_3_reg = nn.Conv2d(512, 4, kernel_size=3, padding=1)
        self.predict_fc7_reg = nn.Conv2d(1024, 4, kernel_size=3, padding=1)
        self.predict6_2_reg = nn.Conv2d(512, 4, kernel_size=3, padding=1)
        self.predict7_2_reg = nn.Conv2d(256, 4, kernel_size=3, padding=1)

        self.predict3_3_cls = nn.Conv2d(256, 2, kernel_size=3, padding=1)
        self.predict4_3_cls = nn.Conv2d(512, 2, kernel_size=3, padding=1)
        self.predict5_3_cls = nn.Conv2d(512, 2, kernel_size=3, padding=1)
        self.predict_fc7_cls = nn.Conv2d(1024, 2, kernel_size=3, padding=1)
        self.predict6_2_cls = nn.Conv2d(512, 2, kernel_size=3, padding=1)
        self.predict7_2_cls = nn.Conv2d(256, 2, kernel_size=3, padding=1)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                m.weight.data.normal_(0.0, 0.02)
示例#10
0
def smallCNNnp(**kwargs):
    # cfg = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
    cfg = [
        64, 64, 128, 64, 8
    ]  # notice how we change the last filter num to fit model in memory!
    kwargs['init_weights'] = True
    model = smallCNN_NoPooling(make_layers(cfg, batch_norm=False), **kwargs)
    return model
示例#11
0
 def __init__(self, config, batch_norm=False, *args, **kwargs):
     super().__init__(
         make_layers(config, batch_norm=batch_norm), 
         *args, 
         **kwargs
     )
     self.pretrained = False
     del self.classifier
 def __init__(self, config, batch_norm=False, *args, **kwargs):
     super().__init__(
         make_layers(config, batch_norm=batch_norm), 
         *args, 
         **kwargs
     )
     self.pretrained = False
     del self.classifier
示例#13
0
def vgg_face(pretrained=False, **kwargs):
    if pretrained:
        kwargs['init_weights'] = False
    model = vgg.VGG(vgg.make_layers(vgg.cfgs['D'], batch_norm=False), num_classes=2622, **kwargs)
    if pretrained:
        print("111111111111")
        model.load_state_dict(vgg_face_state_dict())
    return model
 def __init__(self,
              arch: str,
              num_classes: int = 1000,
              **kwargs: Any) -> None:
     super(ArcVGG, self).__init__(
         vgg.make_layers(cfgs[arch_to_cfg[arch]],
                         batch_norm=arch in batch_norm), num_classes,
         **kwargs)
     self.classifier[-1] = torch.nn.Linear(4096, num_classes, bias=False)
示例#15
0
 def __init__(self, num_classes, layer_cfg='D'):
     super(VGGLP,
           self).__init__(make_layers(cfgs[layer_cfg], batch_norm=True))
     num_features = self.classifier[6].in_features
     features = list(self.classifier.children())[:-1]  # Remove last layer
     features.extend([nn.Linear(num_features, num_classes)
                      ])  # Add our layer with 4 outputs
     self.classifier = nn.Sequential(
         *features)  # Replace the model classifier
示例#16
0
文件: vgg.py 项目: MoozIiSP/DLToolbox
def _vgg(index, arch, cfg, batch_norm, pretrained, progress, **kwargs):
    if pretrained:
        kwargs['init_weights'] = False
    model = VGGBackbone(index, make_layers(cfgs[cfg], batch_norm=batch_norm),
                        **kwargs)
    if pretrained:
        state_dict = load_state_dict_from_url(model_urls[arch],
                                              progress=progress)
        model.load_state_dict(state_dict, strict=False)
    return model
示例#17
0
def my_vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
    if pretrained:
        kwargs['init_weights'] = False
    model = MyVGG(vgg.make_layers(vgg.cfgs[cfg], batch_norm=batch_norm),
                  **kwargs)
    if pretrained:
        state_dict = models_utils.load_state_dict_from_url(
            vgg.model_urls[arch], progress=progress)
        model.load_state_dict(state_dict)
    return model
示例#18
0
    def __init__(self, batch_norm=True, pretrained=True):
        super(Extractor, self).__init__()
        vgg = VGG(make_layers(cfg['D'], batch_norm))

        if pretrained:
            if batch_norm:
                vgg.load_state_dict(model_zoo.load_url(model_urls['vgg16_bn']))
            else:
                vgg.load_state_dict(model_zoo.load_url(model_urls['vgg16']))

        self.features = vgg.features
示例#19
0
 def __init__(self,
              out_channels,
              config,
              batch_norm=False,
              depth=5,
              **kwargs):
     super().__init__(make_layers(config, batch_norm=batch_norm), **kwargs)
     self._out_channels = out_channels
     self._depth = depth
     self._in_channels = 3
     del self.classifier
示例#20
0
def vgg19_bn(pretrained=False, model_dir=None, **kwargs):
    """VGG 19-layer model (configuration 'E') with batch normalization
    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
        model_dir (None, str): If not None, specifies the directory for the model pickle.
    """
    if pretrained:
        kwargs['init_weights'] = False
    model = vgg.VGG(vgg.make_layers(vgg.cfg['E'], batch_norm=True), **kwargs)
    if pretrained:
        model.load_state_dict(model_zoo.load_url(vgg.model_urls['vgg19_bn'], model_dir=model_dir))
    return model
示例#21
0
    def __init__(self, num_classes=1000, init_weights=True):
        super().__init__(make_layers(cfg['D']))

        # self.features = features

        self.classifier = nn.Sequential(nn.Linear(512 * 7 * 7, 256), nn.ReLU(),
                                        nn.Dropout(0.2),
                                        nn.Linear(256, len(cat_to_name)),
                                        nn.LogSoftmax(dim=1))

        if init_weights:
            self._initialize_weights()
def feature_vgg16(pretrained=True, **kwargs):
    """VGG 16-layer model (configuration "D")

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    if pretrained:
        kwargs['init_weights'] = False
    model = FeatureVGG(make_layers(cfgs['D']), **kwargs)
    if pretrained:
        model.load_state_dict(models.vgg16(pretrained=True).state_dict())
    return model
示例#23
0
def vgg19(vgg_path, pretrained=False, **kwargs):
    """VGG 19-layer model (configuration "E")

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    cfg_E = [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M']
    model = vgg.VGG(vgg.make_layers(cfg_E), **kwargs)
    if pretrained:
        # model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))
        vgg_model = torch.load(vgg_path, map_location='cpu')
        model.load_state_dict(vgg_model)
    return model
    def __init__(self, num_classes, pretrained=True, **kwargs):
        super().__init__(make_layers(cfgs['A'], batch_norm=True, **kwargs))
        if pretrained:
            self.load_state_dict(
                load_state_dict_from_url(
                    'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
                    progress=True))

        self.feature_extractor = nn.Sequential(
            self.features,
            self.avgpool,
        )
        num_ftrs = self.classifier[6].in_features
        self.classifier[6] = nn.Linear(num_ftrs, num_classes)
示例#25
0
 def __init__(self,
              num_classes,
              weights_path=None,
              progress=False,
              frozen_layers=None):
     if weights_path is None:
         super().__init__(make_layers(cfgs['E'], batch_norm=False),
                          num_classes=1000,
                          init_weights=False)
         state_dict = load_state_dict_from_url(model_urls['vgg19'],
                                               progress=progress)
         self.load_state_dict(state_dict)
         self.classifier[6] = nn.Linear(4096, num_classes)
         if frozen_layers is not None:
             if frozen_layers < 1 or frozen_layers > 16:
                 raise ValueError('frozen_layers must be between 1 and 16')
             self._freeze_layers(frozen_layers)
     else:
         print('Using given weights instead of ImageNet weights')
         super().__init__(make_layers(cfgs['E'], batch_norm=False),
                          num_classes=num_classes,
                          init_weights=False)
         state_dict = torch.load(weights_path)
         self.load_state_dict(state_dict)
示例#26
0
def vgg11_bn(pretrained=False, requires_grad=False, **kwargs):
    """VGG 11-layer model (configuration "A") with batch normalization

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    if pretrained:
        kwargs['init_weights'] = False
    model = VGG(make_layers(cfg['A'], batch_norm=True), **kwargs)
    if pretrained:
        pretrained_state_dict = model_zoo.load_url(model_urls['vgg11_bn'])
        pretrained_state_dict = extract_feature_state_dict(
            pretrained_state_dict, model)
        model.load_state_dict(pretrained_state_dict)
        model.requires_grad = requires_grad
    return model
示例#27
0
def vgg16(pretrained=False, **kwargs):
    if pretrained:
        kwargs['init_weights'] = False
    model = VGG16Feature(vgg.make_layers(vgg.cfgs['D'], batch_norm=True),
                         **kwargs)

    if pretrained:
        # Stitching to split the features from predictions
        state_dict = model_zoo.load_url(vgg.model_urls['vgg16_bn'])
        state_dict["classifier_final.0.weight"] = state_dict[
            "classifier.6.weight"]
        state_dict["classifier_final.0.bias"] = state_dict["classifier.6.bias"]
        del state_dict["classifier.6.weight"]
        del state_dict["classifier.6.bias"]
        model.load_state_dict(state_dict)
    return model
示例#28
0
def vgg13(pretrained=False, requires_grad=False, **kwargs):
    """VGG 13-layer model (configuration "B")

    Args:
        pretrained (bool): If True, returns a model pre-trained on ImageNet
    """
    if pretrained:
        kwargs['init_weights'] = False
    model = VGG(make_layers(cfg['B']), **kwargs)
    if pretrained:
        pretrained_state_dict = model_zoo.load_url(model_urls['vgg13'])
        pretrained_state_dict = extract_feature_state_dict(
            pretrained_state_dict, model)
        model.load_state_dict(pretrained_state_dict)
        model.requires_grad = requires_grad
    return model
示例#29
0
def load_vgg_from_local(arch='vgg19',
                        cfg='E',
                        batch_norm=False,
                        pretrained=True,
                        vgg_dir=None,
                        parallel=True,
                        **kwargs):
    vgg = vgglib.VGG(vgglib.make_layers(cfgs[cfg], batch_norm=batch_norm),
                     **kwargs)
    vgg.load_state_dict(
        model_zoo.load_url(url=VGG_URL,
                           model_dir='/gpub/temp/imagenet2012/hdf5'))
    vgg = (vgg.eval()).cuda()
    if parallel:
        print("Parallel VGG model...")
        vgg = torch.nn.DataParallel(vgg)
    return vgg
	def __init__(self, pretrained=True, device='cuda'):
		super(VGG_perceptual_loss, self).__init__()
		self.device=device
		self.loss_function=nn.L1Loss()
		self.vgg_features = vgg.make_layers(vgg.cfg['D'])
		if pretrained:
			self.vgg_features.load_state_dict(torch.load('utils/vgg16_pretrained_features.pth'))
		self.vgg_features.to(device)
		# freeze parameter update
		for params in self.vgg_features.parameters():
			params.requires_grad = False
		self.layer_name_mapping = {
			'3': "relu1_2",
			'8': "relu2_2",
			'15': "relu3_3",
			'22': "relu4_3"
		}
示例#31
0
文件: net.py 项目: limzh317/ResUnet
    def __init__(self, num_classes=1000):
        super(VGG16, self).__init__()
        features = [
            64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M',
            512, 512, 512, 'M'
        ]
        self.features = make_layers(features)

        self.classifier = nn.Sequential(
            nn.Linear(512 * 7 * 7, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, 4096),
            nn.ReLU(True),
            nn.Dropout(),
            nn.Linear(4096, num_classes),
        )