Example #1
0
 def __init__(self, pretrained=True):
     super(SEResNet50, self).__init__()
     import pretrainedmodels
     if pretrained:
         model = pretrainedmodels.se_resnet50()
     else:
         model = pretrainedmodels.se_resnet50(pretrained=None)
     layers = list(model.children())[:-1]
     self.model = nn.Sequential(*layers)
Example #2
0
def get_softmax_basenet(net):
    # get the pytorch model for specific net input
    if net == 'ResNet152':
        model = models.resnet152(pretrained=True)
    elif net == 'ResNet50':
        model = models.resnet50(pretrained=True)
    elif net == 'ResNet34':
        model = models.resnet34(pretrained=True)
    elif net == 'ResNet101':
        model = models.resnet101(pretrained=True)
    elif net == 'ResNext101_32x4d':
        model = pretrainedmodels.resnext101_32x4d(pretrained='imagenet')
    elif net == 'IncRes_v2':
        # model = pretrainedmodels.inceptionv3(pretrained='imagenet')
        print 'F**k! Stop! Not Finished!'
    elif net == 'Inc_v3':
        from myNetwork import get_customInceptionV3
        # model = pretrainedmodels.inceptionresnetv2()
        model = get_customInceptionV3(100)

        # model = models.inception_v3()
    elif net == 'DenseNet161':
        model = models.densenet161(pretrained=True)
    elif net == 'SEResNet50':
        model = pretrainedmodels.se_resnet50(pretrained='imagenet')

    return model
 def get_resnet(self, name):
     resnets = {
         "resnet18": torchvision.models.resnet18(),
         "resnet50": torchvision.models.resnet50(),
         "se_resnet50" : pretrainedmodels.se_resnet50(num_classes=1000, pretrained="imagenet"),
         "se_resnext101_32x4d" : pretrainedmodels.se_resnext101_32x4d(num_classes=1000, pretrained="imagenet"),
     }
     if name not in resnets.keys():
         raise KeyError(f"{name} is not a valid Model (ResNet) version")
     return resnets[name]
def se_resnet50():
    """
    From: https://github.com/Cadene/pretrained-models.pytorch
    From: https://github.com/hujie-frank/SENet

    Residual networks with squeeze & excitation blocks
    Squeeze and Excitation: https://arxiv.org/abs/1709.01507

    Params ~25M, size 107MB, Top-1 acc 77.636, Top-5 acc 93.752
    """
    return pretrainedmodels.se_resnet50(num_classes=1000, pretrained="imagenet")
Example #5
0
 def test_pretrain_consistency(self, model, input_param):
     input_data = torch.randn(1, 3, 64, 64).to(device)
     net = test_pretrained_networks(model, input_param, device)
     with eval_mode(net):
         result = net.features(input_data)
     cadene_net = pretrainedmodels.se_resnet50().to(device)
     with eval_mode(cadene_net):
         expected_result = cadene_net.features(input_data)
     # The difference between Cadene's senet and our version is that
     # we use nn.Linear as the FC layer, but Cadene's version uses
     # a conv layer with kernel size equals to 1. It may bring a little difference.
     self.assertTrue(torch.allclose(result, expected_result, rtol=1e-5, atol=1e-5))
 def __init__(self):
     super(main_model, self).__init__()
     se_resnet50 = pretrainedmodels.se_resnet50(pretrained=None)
     self.conv1 = torch.nn.Conv2d(1, 64, (3, 3))
     self.layer0 = se_resnet50.layer0[1:]
     self.layer1 = se_resnet50.layer1
     self.layer2 = se_resnet50.layer2
     self.layer3 = se_resnet50.layer3
     self.layer4 = se_resnet50.layer4
     self.layer5 = torch.nn.Conv2d(2048, 256, (3, 3))
     self.fc1 = torch.nn.Linear(4096, 168)
     self.fc2 = torch.nn.Linear(4096, 11)
     self.fc3 = torch.nn.Linear(4096, 7)
Example #7
0
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
    if 'ADL_position' in kwargs and kwargs['ADL_position'] is not None:
        kwargs['ADL_position'] = make_cfg(kwargs['ADL_position'])
    if 'se' in arch:
        model = ResNetSE(SEResNetBottleneck, [3, 4, 6, 3],
                         groups=1,
                         reduction=16,
                         dropout_p=None,
                         inplanes=64,
                         input_3x3=False,
                         downsample_kernel_size=1,
                         downsample_padding=0,
                         **kwargs)
    else:
        model = ResNet(block, layers, **kwargs)
    if pretrained:
        strict_rule = True
        # state_dict = torch.load('pretrained/resnet50-caffe.pth')

        if 'se' in arch:
            if arch == 'resnet50_se':
                state_dict = se_resnet50(num_classes=1000,
                                         pretrained='imagenet').state_dict()
            else:
                raise NotImplementedError
        else:
            state_dict = load_url(model_urls[arch], progress=progress)

        if 'ADL_position' in kwargs and kwargs['ADL_position'] is not None:
            state_dict = align_layer(state_dict, kwargs['ADL_position'])

        if kwargs['num_classes'] != 1000:
            remove_layer(state_dict, 'fc')
            strict_rule = False
        model.load_state_dict(state_dict, strict=strict_rule)
    return model
Example #8
0
 def __init__(self, num_classes):
     super().__init__()
     self.resnet = pretrainedmodels.se_resnet50(pretrained='imagenet')
     self.resnet.avg_pool = torch.nn.AdaptiveAvgPool2d(1)
     self.resnet.last_linear = torch.nn.Linear(self.resnet.last_linear.in_features, num_classes)
Example #9
0
def se_resnet50(num_classes=1000,pretrained=None):
    model = pretrainedmodels.se_resnet50(num_classes=num_classes,pretrained=pretrained)
    return model
import pretrainedmodels
from pretrainedmodels import se_resnet50

tmp = se_resnet50(num_classes=1000, pretrained='imagenet')
print('tmp: ', type(tmp))
Example #11
0
def se_resnet50_(pretrained='imagenet', **kwargs):
    model = pretrainedmodels.se_resnet50(pretrained=pretrained)
    model.avg_pool = nn.AvgPool2d((2, 5), stride=(2, 5))
    model.last_linear = nn.Linear(512 * 4, 41)
    return model