def __init__(self): super(OrdNet, self).__init__() # Load densenet and modify the forward function to match # keras.applications' densenet169 when include_top=False (as in the DenseDepth paper) self.encoder = densenet169(pretrained=True) self.pools = {} self.activation = {} def get_activation(name, activation): def hook(model, input, output): activation[name] = output.clone() return hook self.encoder.features.relu0.register_forward_hook(get_activation("conv0", self.activation)) self.encoder.features.pool0.register_forward_hook(get_activation("pool0", self.activation)) self.encoder.features.transition1.pool.register_forward_hook(get_activation("pool1", self.activation)) self.encoder.features.transition2.pool.register_forward_hook(get_activation("pool2", self.activation)) # self.encoder.features.transition3.pool.register_forward_hook(get_activation("pool3", self.activation)) # Make Decoder decoder_channels, conv0_channels, pool0_channels, pool1_channels, pool2_channels = \ self.get_num_encoder_channels() self.conv2 = nn.Conv2d(in_channels=decoder_channels, out_channels=decoder_channels, kernel_size=1, stride=1, padding=0) self.up1 = UpBlock(decoder_channels + pool2_channels, decoder_channels//2) self.up2 = UpBlock(decoder_channels//2 + pool1_channels, decoder_channels//4) self.up3 = UpBlock(decoder_channels//4 + pool0_channels , decoder_channels//8) self.up4 = UpBlock(decoder_channels//8 + conv0_channels, decoder_channels//16) self.conv3 = nn.Conv2d(in_channels=decoder_channels//16, out_channels=1, kernel_size=3, padding=1, stride=1)
def __init__(self, subtype='densenet121', out_stages=[2, 3, 4], backbone_path=None): super(Densenet, self).__init__() self.out_stages = out_stages self.backbone_path = backbone_path self.out_channels = [64, 128, 256, 512, 1024] if subtype == 'densenet121': features = densenet121(pretrained=not backbone_path).features elif subtype == 'densenet161': features = densenet161(pretrained=not backbone_path).features elif subtype == 'densenet169': features = densenet169(pretrained=not backbone_path).features elif subtype == 'densenet201': features = densenet201(pretrained=not backbone_path).features self.out_channels = self.out_channels[self.out_stages[0]:self. out_stages[-1] + 1] self.conv1 = nn.Sequential(features.conv0, features.norm0, features.relu0, features.pool0) self.layer1 = nn.Sequential(features.denseblock1, features.transition1) self.layer2 = nn.Sequential(features.denseblock2, features.transition2) self.layer3 = nn.Sequential(features.denseblock3, features.transition3) self.layer4 = features.denseblock4 if self.backbone_path: self.features.load_state_dict(torch.load(self.backbone_path)) else: self.init_weights()
def recordDense(info): global SKIP import torchvision.models.densenet as denGen if not (SKIP and 'densenet121' in info['name_list']): INFO("proceeding for DenseNet121") net = denGen.densenet121(pretrained=True).cuda() sum = __summary(net, [3, 224, 224], verbose=True) __writeInfoJSON(sum, 'densenet121') else: INFO("Skip DenseNet121") if not (SKIP and 'densenet161' in info['name_list']): INFO("proceeding for DenseNet161") net = denGen.densenet161(pretrained=True).cuda() sum = __summary(net, [3, 224, 224], verbose=True) __writeInfoJSON(sum, 'densenet161') else: INFO("Skip DenseNet161") if not (SKIP and 'densenet169' in info['name_list']): INFO("proceeding for DenseNet169") net = denGen.densenet169(pretrained=True).cuda() sum = __summary(net, [3, 224, 224], verbose=True) __writeInfoJSON(sum, 'densenet169') else: INFO("Skip DenseNet169") if not (SKIP and 'densenet201' in info['name_list']): INFO("proceeding for DenseNet201") net = denGen.densenet201(pretrained=True).cuda() sum = __summary(net, [3, 224, 224], verbose=True) __writeInfoJSON(sum, 'densenet201') else: INFO("Skip DenseNet201")
def __init__(self, pretrained=True, progress=True, cfg_dropout2d=dict(p=0.1), **kwargs): super(densenet169_dropout2d, self).__init__() backbone = densenet.densenet169(pretrained, progress, **kwargs) self.backbone = backbone.features self.dropout2d = nn.Dropout2d(**cfg_dropout2d)
def create_model(model_name, num_classes=1000, pretrained=False, **kwargs): if 'test_time_pool' in kwargs: test_time_pool = kwargs.pop('test_time_pool') else: test_time_pool = True if 'extra' in kwargs: extra = kwargs.pop('extra') else: extra = True if model_name == 'dpn68': model = dpn68( num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool) elif model_name == 'dpn68b': model = dpn68b( num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool) elif model_name == 'dpn92': model = dpn92( num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool, extra=extra) elif model_name == 'dpn98': model = dpn98( num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool) elif model_name == 'dpn131': model = dpn131( num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool) elif model_name == 'dpn107': model = dpn107( num_classes=num_classes, pretrained=pretrained, test_time_pool=test_time_pool) elif model_name == 'resnet18': model = resnet18(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'resnet34': model = resnet34(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'resnet50': model = resnet50(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'resnet101': model = resnet101(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'resnet152': model = resnet152(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'densenet121': model = densenet121(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'densenet161': model = densenet161(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'densenet169': model = densenet169(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'densenet201': model = densenet201(num_classes=num_classes, pretrained=pretrained, **kwargs) elif model_name == 'inception_v3': model = inception_v3( num_classes=num_classes, pretrained=pretrained, transform_input=False, **kwargs) else: assert False, "Unknown model architecture (%s)" % model_name return model
def black_box_densenet169(cuda=True): black_box_model = densenet169(pretrained=True) black_box_model.train(False) if cuda: # black_box_model = black_box_model.cuda() black_box_model = torch.nn.DataParallel(black_box_model).cuda() freeze_model(black_box_model) def black_box_fn(_images): # with torch.no_grad(): # print(torch.min(_images[0]), torch.max(_images[0])) return black_box_model(imagenet_normalize(0.5 * (_images + 1))) return black_box_fn
def get_densenet_model(densenet, num_classes=-1): if densenet == "densenet121": model = densenet121(pretrained=True) elif densenet == "densenet169": model = densenet169(pretrained=True) elif densenet == "densenet201": model = densenet201(pretrained=True) elif densenet == "densenet161": model = densenet161(pretrained=True) else: raise RuntimeError("Unsupported model, %s" % densenet) if num_classes > 0: in_features = model.classifier.in_features classifier = nn.Linear(in_features, num_classes, bias=True) model.classifier = classifier return model
def __init__(self, pretrained=True, progress=True, **kwargs): super(densenet169, self).__init__() backbone = densenet.densenet169(pretrained, progress, **kwargs) self.backbone = backbone.features
def loss_selector(loss_net): #base if loss_net == "vgg16": from torchvision.models.vgg import vgg16 net = vgg16(pretrained=True) loss_network = nn.Sequential(*list(net.features)[:31]).eval() return loss_network elif loss_net == "vgg16_bn": from torchvision.models.vgg import vgg16_bn net = vgg16_bn(pretrained=True) loss_network = nn.Sequential(*list(net.features)[:44]).eval() return loss_network elif loss_net == "resnet50": from torchvision.models.resnet import resnet50 net=resnet50(pretrained=True) loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval() return loss_network elif loss_net == "resnet101": from torchvision.models.resnet import resnet101 net=resnet101(pretrained=True) loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval() return loss_network elif loss_net == "resnet152": from torchvision.models.resnet import resnet152 net=resnet152(pretrained=True) loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval() return loss_network elif loss_net == "squeezenet1_1": from torchvision.models.squeezenet import squeezenet1_1 net=squeezenet1_1(pretrained=True) classifier=[item for item in net.classifier.modules()][1:-1] loss_network=nn.Sequential(*[net.features,*classifier]).eval() return loss_network elif loss_net == "densenet121": from torchvision.models.densenet import densenet121 net=densenet121(pretrained=True) loss_network=nn.Sequential(*[net.features,nn.ReLU()]).eval() return loss_network elif loss_net == "densenet169": from torchvision.models.densenet import densenet169 net=densenet169(pretrained=True) loss_network=nn.Sequential(*[net.features,nn.ReLU()]).eval() return loss_network elif loss_net == "densenet201": from torchvision.models.densenet import densenet201 net=densenet201(pretrained=True) loss_network=nn.Sequential(*[net.features,nn.ReLU()]).eval() return loss_network elif loss_net == "mobilenet_v2": from torchvision.models.mobilenet import mobilenet_v2 net=mobilenet_v2(pretrained=True) loss_network=nn.Sequential(*[net.features]).eval() return loss_network elif loss_net == "resnext50_32x4d": from torchvision.models.resnet import resnext50_32x4d net=resnext50_32x4d(pretrained=True) loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval() return loss_network elif loss_net == "resnext101_32x8d": from torchvision.models.resnet import resnext101_32x8d net=resnext101_32x8d(pretrained=True) loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval() return loss_network elif loss_net == "wide_resnet50_2": from torchvision.models.resnet import wide_resnet50_2 net=wide_resnet50_2(pretrained=True) loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval() return loss_network elif loss_net == "wide_resnet101_2": from torchvision.models.resnet import wide_resnet101_2 net=wide_resnet101_2(pretrained=True) loss_network=nn.Sequential(*[child_module for child_module in net.children()][:-2]).eval() return loss_network elif loss_net == "inception_v3":
test_loss /= size correct /= size # Report it to the the console print( f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n" ) # Return a dictionary containing the results (for use in plotting etc.), list wrapped for dataframe usage return {"loss": [test_loss], "accuracy": [correct]} if __name__ == '__main__': train_epochs = 50 device = "cpu" model = densenet169() training_data = CIFAR100Coarse(root="data", train=True, download=True, transform=data_managers.train_transform) testing_data = CIFAR100Coarse(root="data", train=False, download=True, transform=data_managers.test_transform) # Lock the random seed here to make the batches consistent between tests torch.manual_seed(36246) # Split the training data into sets of 5000 entries training_sets = random_split(training_data, [5000] * 10)
def densenet_bc169(nb_classes=1000, pretrained=False): return densenet169(num_classes=nb_classes, pretrained=pretrained)
from torchvision.models.densenet import _DenseBlock, _Transition #import code from copy import deepcopy from collections import OrderedDict # Project imports from util.util import print # print with a header for this project # Leveraged Torchvision implementation of DenseNet at: # https://github.com/pytorch/vision/blob/master/torchvision/models/densenet.py _use_pretrained = True name_to_model_fns = { "densenet121": lambda: densenet.densenet121(pretrained=_use_pretrained), "densenet169": lambda: densenet.densenet169(pretrained=_use_pretrained), "densenet201": lambda: densenet.densenet201( pretrained=_use_pretrained) # We're just going to use this one , "densenet161": lambda: densenet.densenet161(pretrained=_use_pretrained) } def get_densenet(name): return name_to_model_fns[name]() # Call the function """ old_model.modules is more accurate of all indiv. layers old_model.features decomposes structure more, but doesn't include final FC classifier (instead under old_model.classifier)