def waveResnext101_32x4d(pretrained='imagenet', num_classes=41): base = pretrainedmodels.resnext101_32x4d(pretrained=pretrained) modules = list(base.children())[0] modules = modules[1:] # We need 1 channel input, remove first conv layer. model = WaveResnext(modules, num_classes) return model
def get_softmax_basenet(net): # get the pytorch model for specific net input if net == 'ResNet152': model = models.resnet152(pretrained=True) elif net == 'ResNet50': model = models.resnet50(pretrained=True) elif net == 'ResNet34': model = models.resnet34(pretrained=True) elif net == 'ResNet101': model = models.resnet101(pretrained=True) elif net == 'ResNext101_32x4d': model = pretrainedmodels.resnext101_32x4d(pretrained='imagenet') elif net == 'IncRes_v2': # model = pretrainedmodels.inceptionv3(pretrained='imagenet') print 'F**k! Stop! Not Finished!' elif net == 'Inc_v3': from myNetwork import get_customInceptionV3 # model = pretrainedmodels.inceptionresnetv2() model = get_customInceptionV3(100) # model = models.inception_v3() elif net == 'DenseNet161': model = models.densenet161(pretrained=True) elif net == 'SEResNet50': model = pretrainedmodels.se_resnet50(pretrained='imagenet') return model
def waveResnext101_32x4d(pretrained='imagenet', num_classes=16): base = pretrainedmodels.resnext101_32x4d(pretrained=pretrained) base.avg_pool = nn.AvgPool2d((2, 5), stride=(2, 5)) base.last_linear = nn.Linear(512 * 4, 16) modules = list(base.children()) # print(type(modules)) # print(len(modules)) model = WaveResnext(modules[0], num_classes) return model
def get_features(network): if network == 'resnet50': rn = resnet50(True) features = nn.ModuleList([ nn.Sequential(rn.conv1, rn.bn1, rn.relu, rn.maxpool, rn.layer1), rn.layer2, rn.layer3, rn.layer4 ]) channels = 256, 512, 1024, 2048 elif network == 'resnext101_32x4d': features = nn.ModuleList(list(resnext101_32x4d().features)[:8]) features = nn.ModuleList([ nn.Sequential(*features[:5]), features[5], features[6], features[7], ]) channels = 256, 512, 1024, 2048 elif network == 'dpn68b': dpn = list(dpn68b().features) features = nn.ModuleList([ nn.Sequential(*dpn[:4]), nn.Sequential(*dpn[4:8]), nn.Sequential(*dpn[8:20]), nn.Sequential(*dpn[20:23]) ]) channels = 144, 320, 704, 832 elif network == 'dpn92': dpn = list(dpn92().features) features = nn.ModuleList([ nn.Sequential(*dpn[:4]), nn.Sequential(*dpn[4:8]), nn.Sequential(*dpn[8:28]), nn.Sequential(*dpn[28:31]) ]) channels = 336, 704, 1552, 2688 else: raise ValueError(network) return features, channels
def resnext101_32x4d_(pretrained='imagenet', **kwargs): model = pretrainedmodels.resnext101_32x4d(pretrained=pretrained) model.avg_pool = nn.AvgPool2d((2, 5), stride=(2, 5)) model.last_linear = nn.Linear(512 * 4, 10) return model
def __init__(self, train_fe=True, use_cuda=True, feature_extraction_cnn='vgg', last_layer=''): super(FeatureExtraction, self).__init__() if feature_extraction_cnn == 'vgg': model_urls['vgg16'] = model_urls['vgg16'].replace( 'https://', 'http://') self.model = models.vgg16(pretrained=True) # keep feature extraction network up to indicated layer vgg_feature_layers = [ 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'pool5' ] if last_layer == '': last_layer = 'pool4' last_layer_idx = vgg_feature_layers.index(last_layer) self.model = nn.Sequential( *list(self.model.features.children())[:last_layer_idx + 1]) if feature_extraction_cnn == 'resnet101': resnet_urls['resnet101'] = resnet_urls['resnet101'].replace( 'https://', 'http://') self.model = models.resnet101(pretrained=True) resnet_feature_layers = [ 'conv1', 'bn1', 'relu', 'maxpool', 'layer1', 'layer2', 'layer3', 'layer4' ] if last_layer == '': last_layer = 'layer3' last_layer_idx = resnet_feature_layers.index(last_layer) resnet_module_list = [ self.model.conv1, self.model.bn1, self.model.relu, self.model.maxpool, self.model.layer1, self.model.layer2, self.model.layer3, self.model.layer4 ] self.model = nn.Sequential(*resnet_module_list[:last_layer_idx + 1]) if feature_extraction_cnn == 'resnext101': self.model = pretrainedmodels.resnext101_32x4d( pretrained='imagenet') self.model = nn.Sequential(*list(self.model.children())[0][:-1]) if feature_extraction_cnn == 'se_resnext101': self.model = pretrainedmodels.se_resnext101_32x4d( pretrained='imagenet') self.model = nn.Sequential(*list(self.model.children())[:-3]) if feature_extraction_cnn == 'densenet169': self.model = models.densenet169(pretrained=True) self.model = nn.Sequential( *list(self.model.features.children())[:-3]) if not train_fe: # freeze parameters for param in self.model.parameters(): param.requires_grad = False # print('FeatureExtraction Network is Freezed') # move to GPU if use_cuda: self.model.cuda()
def Model_builder(configer): model_name = configer.model['name'] No_classes = configer.dataset_cfg["id_cfg"]["num_classes"] model_pretrained = configer.model['pretrained'] model_dataparallel = configer.model["DataParallel"] model_gpu_replica = configer.model["Multi_GPU_replica"] gpu_ids = configer.train_cfg["gpu"] if model_name == "Inceptionv3": model = PM.inceptionv3(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "Xception": model = PM.xception(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "VGG_19": model = PM.vgg19(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "Resnet18": model = PM.resnet18(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "Resnet50": model = PM.resnet50(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "Resnet101": model = PM.resnet101(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "Resnet152": model = PM.resnet152(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "Resnet34": model = PM.resnet34(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "Densenet121": model = PM.densenet121(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "ResNeXt101-32": model = PM.resnext101_32x4d(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "ResNeXt101-64": model = PM.resnext101_64x4d(num_classes=1000, pretrained=model_pretrained) d = model.last_linear.in_features model.last_linear = nn.Linear(d, No_classes) elif model_name == "MobilenetV2": model = MobileNetV2(n_class=No_classes) else: raise ImportError("Model Architecture not supported") # Performing Data Parallelism if configured if model_dataparallel: model = torch.nn.DataParallel(model.to(device), device_ids=gpu_ids) elif model_gpu_replica: torch.distributed.init_process_group(backend='nccl', world_size=1, rank=1) model = torch.nn.DistributedDataParallel(model.to(device), device_ids=gpu_ids) else: model = model.to(device) print('---------- Model Loaded') return model
import torch import torch.nn as nn from torch.nn import init from torch.optim import lr_scheduler import torchsummary import pretrainedmodels from pretrainedmodels import resnext101_32x4d from core.networks import (init_net, init_weights, find_first_layer, reshape_input_nc) model = resnext101_32x4d(num_classes=1000, pretrained='imagenet') model.cuda() model = init_net(model, init_type='xavier', gpu_ids=[0, 1, 2]) torchsummary.summary(model, (3, 224, 224)) first_conv_layer, container = find_first_layer(model, nn.Conv2d) print("First Conv2d Layer: ", first_conv_layer) print("First Conv contained inside: ", type(container)) print("Reshaping Input NC -----------------------------------------") model = reshape_input_nc(model, 15) print("After reshaping model") torchsummary.summary(model, (15, 224, 224)) first_conv_layer, container = find_first_layer(model, nn.Conv2d) print("First Conv2d Layer after reshape: ", first_conv_layer) print("Shape of first conv2d weight after reshape: ", first_conv_layer.weight)