Exemplo n.º 1
0
 def __init__(self,
              compound_coef,
              load_weights=False,
              use_gpu=config['cuda'],
              crop_size=config['crop_size']):
     super(EfficientNet, self).__init__()
     model = EffNet.from_pretrained(f'efficientnet-b{compound_coef}',
                                    load_weights)
     del model._conv_head
     del model._bn1
     # del model._avg_pooling
     # del model._dropout
     update_weight = False
     for name, param in model.named_parameters(
     ):  # nn.Module有成员函数parameters()
         if "_blocks.10" in name:
             update_weight = True
         elif "_blocks.11" in name:
             update_weight = False
         if update_weight is False:
             param.requires_grad = False
     in_features = model._blocks_args[-1].output_filters
     out_features = 128
     model._fc = nn.Linear(in_features, out_features)
     self.model = model
     self.use_gpu = use_gpu
     self.crop_size = crop_size
Exemplo n.º 2
0
 def __init__(self, compound_coef, load_weights=False):
     super(EfficientNet, self).__init__()
     model = EffNet.from_pretrained(f'efficientnet-b{compound_coef}', load_weights)
     del model._conv_head
     del model._bn1
     del model._avg_pooling
     del model._dropout
     del model._fc
     self.model = model
Exemplo n.º 3
0
 def __init__(self, compound_coef, class_num,load_weights=False):
     super(EfficientNet, self).__init__()
     model = EffNet.from_pretrained(f'efficientnet-b{compound_coef}', load_weights, num_classes=class_num)
     # del model._conv_head
     # del model._bn1
     # del model._avg_pooling
     # del model._dropout
     # del model._fc
     self.model = model
Exemplo n.º 4
0
 def __init__(self):
     super().__init__()
     self.model = EfficientNet.from_pretrained("efficientnet-b1")
     #self.model = EfficientNet.from_name("efficientnet-b0")
     #for param in self.model.parameters():
     #param.requires_grad = True
     self.dp1 = nn.Dropout(p=0.4)
     #self.ln1 = nn.Linear(in_features=1000, out_features=500, bias=True)
     self.ln1 = nn.Linear(in_features=1000, out_features=1, bias=True)
 def __init__(self, compound_coef, load_weights=False, parallel_gpus=None):
     super(EfficientNet, self).__init__()
     model = EffNet.from_pretrained(f'efficientnet-b{compound_coef}',
                                    load_weights,
                                    parallel_gpus=parallel_gpus)
     del model._conv_head
     del model._bn1
     del model._avg_pooling
     del model._dropout
     del model._fc
     self.model = model
     self.use_model_parallel = parallel_gpus is not None
Exemplo n.º 6
0
    def get_trunk(self, architecture):

        if "efficientnet" in architecture.lower():
            self.trunk = EfficientNet.from_pretrained(
                architecture, num_classes=self.MLP_neurons)

        elif "resnet" in architecture.lower():
            if "18" in architecture.lower():
                self.trunk = models.resnet18(pretrained=True)
                self.trunk.fc = nn.Linear(512, self.MLP_neurons)

            elif "50" in architecture.lower():
                self.trunk = models.resnext50_32x4d(pretrained=True)
                self.trunk.fc = nn.Linear(2048, self.MLP_neurons)

        elif "mobilenet" in architecture.lower():
            self.trunk = models.mobilenet_v2(pretrained=True)
            self.trunk.classifier[1] = torch.nn.Linear(1280, self.MLP_neurons)
Exemplo n.º 7
0
    def __init__(self, arch, pretrained=True):
        super().__init__()

        # load EfficientNet
        if arch == 'se_resnext50_32x4d':
            if pretrained:
                self.base = se_resnext50_32x4d()
            else:
                self.base = se_resnext50_32x4d(pretrained=None)
            self.nc = self.base.last_linear.in_features
        elif arch == 'inceptionv4':
            if pretrained:
                self.base = inceptionv4()
            else:
                self.base = inceptionv4(pretrained=None)
            self.nc = self.base.last_linear.in_features
        elif arch == 'inceptionresnetv2':
            if pretrained:
                self.base = inceptionresnetv2()
            else:
                self.base = inceptionresnetv2(pretrained=None)
            self.nc = self.base.last_linear.in_features
        elif 'efficientnet' in arch:
            if pretrained:
                self.base = EfficientNet.from_pretrained(model_name=arch)
            else:
                self.base = EfficientNet.from_name(model_name=arch)

            self.nc = self.base._fc.in_features

        self.logit = nn.Sequential(AdaptiveConcatPool2d(1), Flatten(),
                                   nn.BatchNorm1d(2 * self.nc),
                                   nn.Dropout(0.5),
                                   nn.Linear(2 * self.nc, 512), Mish(),
                                   nn.BatchNorm1d(512), nn.Dropout(0.5),
                                   nn.Linear(512, 1))
Exemplo n.º 8
0
testloader = torch.utils.data.DataLoader(testset,
                                         batch_size=100,
                                         shuffle=False,
                                         num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse',
           'ship', 'truck')

# Model
print('==> Building model..')
if model == 'DenseNetWide':
    net = DenseNet(depth=106, k=13, num_classes=10)  # 992,841
if model == 'DenseNetDeep':
    net = DenseNet(depth=117, k=12, num_classes=10)  # 932,986
if model == 'EfficientNetB0':
    net = EfficientNet.from_pretrained('efficientnet-b0',
                                       num_classes=10)  # 4,020,358
if model == 'EfficientNetB4':
    net = EfficientNet.from_pretrained('efficientnet-b4',
                                       num_classes=10)  # 17,566,546
print('Number of parameters: ', count_parameters(net))
net = net.to(device)

# Loss and optimizer
criterion = nn.CrossEntropyLoss()
if fine_tune:
    lr = args.lr
    optimizer = optim.SGD(
        [{
            "params":
            chain(net._conv_stem.parameters(), net._bn0.parameters(),
                  net._blocks.parameters()),
Exemplo n.º 9
0
transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])

data_dir = '~/data'
trainset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=False)
trainset = MultiTransDataset(trainset, transform_big, transform_small)
testset = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=False, transform=transform_test)

trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.batch_size, shuffle=True, num_workers=2)
testloader = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=2)

classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')

net_big = EfficientNet.from_pretrained('efficientnet-b4', num_classes=10).to(device)
net_big = torch.nn.DataParallel(net_big)
cudnn.benchmark = True

checkpoint = torch.load(checkpoint_dir_big)
net_big.load_state_dict(checkpoint['net'])
net_big.eval()


if args.model == 'DenseNetWide': net_small = DenseNet(depth=106, k=13, num_classes=10).to(device) # <1m
if args.model == 'DenseNetDeep': net_small = DenseNet(depth=117, k=12, num_classes=10).to(device) # <1m

if not os.path.isdir(checkpoint_dir):
    os.mkdir(checkpoint_dir)
log_file_name = os.path.join(checkpoint_dir, 'log.txt')
log_file = open(log_file_name, "at")